From 038601cc714b1fe66d7bf8b3763b344c89749a35 Mon Sep 17 00:00:00 2001 From: Alessandro Yuichi Okimoto Date: Wed, 28 Sep 2022 15:34:48 +0900 Subject: [PATCH] feat: add the initial implementation (#1) --- .bazelrc | 20 + .bazelversion | 1 + .github/workflows/pr-title-validation.yaml | 38 + .github/workflows/publish_chart.yaml | 62 + .github/workflows/release.yaml | 27 + .gitignore | 17 + .golangci.yml | 25 + BUILD.bazel | 11 + BUILD.googleapis | 21 + CHANGELOG.md | 22 + CLA.md | 26 + CONTRIBUTING.md | 7 + DEPLOYMENT.md | 3 + LICENSE | 201 + Makefile | 295 + README.md | 93 +- WORKSPACE | 144 + cmd/account/BUILD.bazel | 31 + cmd/account/account.go | 43 + cmd/auditlog/BUILD.bazel | 31 + cmd/auditlog/auditlog.go | 43 + cmd/auth/BUILD.bazel | 30 + cmd/auth/auth.go | 41 + cmd/autoops/BUILD.bazel | 35 + cmd/autoops/autoops.go | 42 + cmd/environment/BUILD.bazel | 30 + cmd/environment/environment.go | 41 + cmd/eventcounter/BUILD.bazel | 30 + cmd/eventcounter/eventcounter.go | 41 + cmd/eventpersister/BUILD.bazel | 30 + cmd/eventpersister/eventpersister.go | 41 + cmd/experiment/BUILD.bazel | 31 + cmd/experiment/experiment.go | 43 + cmd/feature/BUILD.bazel | 36 + cmd/feature/feature.go | 47 + cmd/gateway/BUILD.bazel | 30 + cmd/gateway/gateway.go | 41 + cmd/goalbatch/BUILD.bazel | 30 + cmd/goalbatch/goalbatch.go | 41 + cmd/metricsevent/BUILD.bazel | 35 + cmd/metricsevent/metricsevent.go | 42 + cmd/migration/BUILD.bazel | 29 + cmd/migration/migration.go | 41 + cmd/notification/BUILD.bazel | 32 + cmd/notification/notification.go | 44 + cmd/opsevent/BUILD.bazel | 35 + cmd/opsevent/opsevent.go | 42 + cmd/push/BUILD.bazel | 32 + cmd/push/push.go | 44 + cmd/user/BUILD.bazel | 36 + cmd/user/user.go | 44 + go.mod | 119 + go.sum | 1163 ++ hack/create-account/BUILD.bazel | 27 + hack/create-account/README.md | 14 + hack/create-account/command.go | 135 + hack/create-account/main.go | 35 + hack/create-api-key/BUILD.bazel | 27 + hack/create-api-key/README.md | 14 + hack/create-api-key/command.go | 104 + hack/create-api-key/main.go | 35 + hack/create-environment/BUILD.bazel | 26 + hack/create-environment/README.md | 11 + hack/create-environment/command.go | 89 + hack/create-environment/main.go | 35 + hack/create-project/BUILD.bazel | 26 + hack/create-project/README.md | 11 + hack/create-project/command.go | 111 + hack/create-project/main.go | 35 + hack/delete-e2e-data-mysql/BUILD.bazel | 24 + hack/delete-e2e-data-mysql/README.md | 13 + hack/delete-e2e-data-mysql/command.go | 152 + hack/delete-e2e-data-mysql/main.go | 35 + hack/delete-environment/BUILD.bazel | 26 + hack/delete-environment/README.md | 9 + hack/delete-environment/command.go | 82 + hack/delete-environment/main.go | 35 + hack/generate-service-token/BUILD.bazel | 26 + hack/generate-service-token/README.md | 14 + hack/generate-service-token/command.go | 90 + hack/generate-service-token/main.go | 35 + manifests/bucketeer/.helmignore | 21 + manifests/bucketeer/Chart.yaml | 5 + .../charts/account-apikey-cacher/.helmignore | 21 + .../charts/account-apikey-cacher/Chart.yaml | 5 + .../account-apikey-cacher/templates/NOTES.txt | 15 + .../templates/_helpers.tpl | 48 + .../templates/deployment.yaml | 163 + .../templates/envoy-configmap.yaml | 283 + .../account-apikey-cacher/templates/hpa.yaml | 19 + .../templates/service-cert-secret.yaml | 16 + .../templates/service-token-secret.yaml | 15 + .../templates/service.yaml | 29 + .../charts/account-apikey-cacher/values.yaml | 71 + .../bucketeer/charts/account/.helmignore | 21 + manifests/bucketeer/charts/account/Chart.yaml | 5 + .../charts/account/templates/NOTES.txt | 15 + .../charts/account/templates/_helpers.tpl | 56 + .../charts/account/templates/deployment.yaml | 163 + .../account/templates/envoy-configmap.yaml | 229 + .../charts/account/templates/hpa.yaml | 19 + .../account/templates/oauth-key-secret.yaml | 15 + .../charts/account/templates/pdb.yaml | 12 + .../templates/service-cert-secret.yaml | 16 + .../templates/service-token-secret.yaml | 15 + .../charts/account/templates/service.yaml | 29 + .../bucketeer/charts/account/values.yaml | 75 + .../bucketeer/charts/api-gateway/.helmignore | 21 + .../bucketeer/charts/api-gateway/Chart.yaml | 5 + .../charts/api-gateway/templates/NOTES.txt | 3 + .../charts/api-gateway/templates/_helpers.tpl | 48 + .../api-gateway/templates/backend-config.yaml | 9 + .../api-gateway/templates/deployment.yaml | 180 + .../templates/envoy-configmap.yaml | 373 + .../charts/api-gateway/templates/hpa.yaml | 19 + .../charts/api-gateway/templates/ingress.yaml | 24 + .../charts/api-gateway/templates/pdb.yaml | 12 + .../charts/api-gateway/templates/secret.yaml | 13 + .../templates/service-cert-secret.yaml | 16 + .../templates/service-token-secret.yaml | 15 + .../charts/api-gateway/templates/service.yaml | 32 + .../bucketeer/charts/api-gateway/values.yaml | 84 + .../charts/auditlog-persister/Chart.yaml | 5 + .../auditlog-persister/templates/NOTES.txt | 15 + .../auditlog-persister/templates/_helpers.tpl | 40 + .../templates/deployment.yaml | 157 + .../templates/envoy-configmap.yaml | 127 + .../auditlog-persister/templates/hpa.yaml | 19 + .../templates/service-cert-secret.yaml | 16 + .../auditlog-persister/templates/service.yaml | 29 + .../charts/auditlog-persister/values.yaml | 68 + .../bucketeer/charts/auditlog/.helmignore | 21 + .../bucketeer/charts/auditlog/Chart.yaml | 5 + .../charts/auditlog/templates/NOTES.txt | 15 + .../charts/auditlog/templates/_helpers.tpl | 56 + .../charts/auditlog/templates/deployment.yaml | 161 + .../auditlog/templates/envoy-configmap.yaml | 230 + .../charts/auditlog/templates/hpa.yaml | 19 + .../auditlog/templates/oauth-key-secret.yaml | 15 + .../charts/auditlog/templates/pdb.yaml | 12 + .../templates/service-cert-secret.yaml | 16 + .../templates/service-token-secret.yaml | 15 + .../charts/auditlog/templates/service.yaml | 29 + .../bucketeer/charts/auditlog/values.yaml | 74 + manifests/bucketeer/charts/auth/.helmignore | 21 + manifests/bucketeer/charts/auth/Chart.yaml | 5 + .../bucketeer/charts/auth/templates/NOTES.txt | 15 + .../charts/auth/templates/_helpers.tpl | 64 + .../charts/auth/templates/deployment.yaml | 166 + .../auth/templates/envoy-configmap.yaml | 229 + .../bucketeer/charts/auth/templates/hpa.yaml | 19 + .../auth/templates/issuer-cert-secret.yaml | 15 + .../auth/templates/oauth-key-secret.yaml | 15 + .../bucketeer/charts/auth/templates/pdb.yaml | 12 + .../auth/templates/service-cert-secret.yaml | 16 + .../auth/templates/service-token-secret.yaml | 15 + .../charts/auth/templates/service.yaml | 29 + manifests/bucketeer/charts/auth/values.yaml | 77 + .../bucketeer/charts/auto-ops/Chart.yaml | 5 + .../charts/auto-ops/templates/NOTES.txt | 15 + .../charts/auto-ops/templates/_helpers.tpl | 56 + .../charts/auto-ops/templates/deployment.yaml | 173 + .../auto-ops/templates/envoy-configmap.yaml | 393 + .../charts/auto-ops/templates/hpa.yaml | 19 + .../auto-ops/templates/oauth-key-secret.yaml | 15 + .../charts/auto-ops/templates/pdb.yaml | 12 + .../templates/service-cert-secret.yaml | 16 + .../templates/service-token-secret.yaml | 15 + .../charts/auto-ops/templates/service.yaml | 30 + .../bucketeer/charts/auto-ops/values.yaml | 82 + .../bucketeer/charts/calculator/Chart.yaml | 5 + .../charts/calculator/templates/NOTES.txt | 15 + .../charts/calculator/templates/_helpers.tpl | 48 + .../calculator/templates/deployment.yaml | 153 + .../calculator/templates/envoy-configmap.yaml | 391 + .../charts/calculator/templates/hpa.yaml | 23 + .../templates/service-cert-secret.yaml | 16 + .../templates/service-token-secret.yaml | 15 + .../charts/calculator/templates/service.yaml | 29 + .../bucketeer/charts/calculator/values.yaml | 73 + manifests/bucketeer/charts/dex/.helmignore | 21 + manifests/bucketeer/charts/dex/Chart.yaml | 5 + .../bucketeer/charts/dex/templates/NOTES.txt | 15 + .../charts/dex/templates/_helpers.tpl | 32 + .../charts/dex/templates/cert-secret.yaml | 13 + .../charts/dex/templates/configmap.yaml | 38 + .../charts/dex/templates/deployment.yaml | 124 + .../charts/dex/templates/envoy-configmap.yaml | 120 + .../bucketeer/charts/dex/templates/pdb.yaml | 12 + .../charts/dex/templates/secret.yaml | 13 + .../charts/dex/templates/service.yaml | 32 + manifests/bucketeer/charts/dex/values.yaml | 67 + manifests/bucketeer/charts/druid/Chart.yaml | 5 + .../druid/charts/druid-cluster/.helmignore | 21 + .../druid/charts/druid-cluster/Chart.yaml | 5 + .../charts/druid-cluster/templates/NOTES.txt | 2 + .../druid-cluster/templates/_helpers.tpl | 21 + .../charts/druid-cluster/templates/druid.yaml | 297 + .../druid/charts/druid-cluster/values.yaml | 332 + .../druid/charts/druid-operator/.helmignore | 21 + .../druid/charts/druid-operator/Chart.yaml | 5 + .../charts/druid-operator/templates/NOTES.txt | 2 + .../druid-operator/templates/_helpers.tpl | 31 + .../druid.apache.org_druids_crd.yaml | 46 + .../druid-operator/templates/operator.yaml | 46 + .../charts/druid-operator/templates/role.yaml | 83 + .../templates/role_binding.yaml | 16 + .../templates/service_account.yaml | 9 + .../druid/charts/druid-operator/values.yaml | 15 + .../charts/zookeeper-operator/Chart.yaml | 10 + .../druid/charts/zookeeper-operator/README.md | 4 + .../zookeeper-operator/templates/_helpers.tpl | 37 + .../templates/clusterrole.yaml | 45 + .../templates/clusterrolebinding.yaml | 18 + .../templates/operator.yaml | 55 + .../templates/post-install-upgrade-hooks.yaml | 121 + .../zookeeper-operator/templates/role.yaml | 45 + .../templates/rolebinding.yaml | 18 + .../templates/service_account.yaml | 17 + ...eper.pravega.io_zookeeperclusters_crd.yaml | 3714 ++++ .../charts/zookeeper-operator/values.yaml | 53 + .../charts/druid/charts/zookeeper/Chart.yaml | 10 + .../charts/druid/charts/zookeeper/README.md | 4 + .../charts/zookeeper/templates/_helpers.tpl | 37 + .../templates/post-install-upgrade-hooks.yaml | 117 + .../charts/zookeeper/templates/zookeeper.yaml | 148 + .../charts/druid/charts/zookeeper/values.yaml | 71 + .../charts/zookeeper/values/minikube.yaml | 8 + .../bucketeer/charts/druid/requirements.lock | 9 + .../bucketeer/charts/druid/requirements.yaml | 14 + manifests/bucketeer/charts/druid/values.yaml | 477 + .../bucketeer/charts/environment/Chart.yaml | 5 + .../charts/environment/templates/NOTES.txt | 15 + .../charts/environment/templates/_helpers.tpl | 56 + .../environment/templates/deployment.yaml | 167 + .../templates/envoy-configmap.yaml | 229 + .../charts/environment/templates/hpa.yaml | 19 + .../templates/oauth-key-secret.yaml | 15 + .../charts/environment/templates/pdb.yaml | 12 + .../templates/service-cert-secret.yaml | 16 + .../templates/service-token-secret.yaml | 15 + .../charts/environment/templates/service.yaml | 30 + .../bucketeer/charts/environment/values.yaml | 77 + .../charts/event-counter/.helmignore | 21 + .../bucketeer/charts/event-counter/Chart.yaml | 5 + .../charts/event-counter/templates/NOTES.txt | 15 + .../event-counter/templates/_helpers.tpl | 56 + .../event-counter/templates/deployment.yaml | 173 + .../templates/envoy-configmap.yaml | 337 + .../charts/event-counter/templates/hpa.yaml | 19 + .../templates/oauth-key-secret.yaml | 15 + .../charts/event-counter/templates/pdb.yaml | 12 + .../templates/service-cert-secret.yaml | 16 + .../templates/service-token-secret.yaml | 15 + .../event-counter/templates/service.yaml | 29 + .../charts/event-counter/values.yaml | 79 + .../.helmignore | 21 + .../Chart.yaml | 5 + .../templates/NOTES.txt | 0 .../templates/_helpers.tpl | 48 + .../templates/deployment.yaml | 193 + .../templates/envoy-configmap.yaml | 229 + .../templates/hpa.yaml | 19 + .../templates/service-cert-secret.yaml | 16 + .../templates/service-token-secret.yaml | 15 + .../templates/service.yaml | 29 + .../values.yaml | 86 + .../.helmignore | 21 + .../Chart.yaml | 5 + .../templates/NOTES.txt | 0 .../templates/_helpers.tpl | 48 + .../templates/deployment.yaml | 193 + .../templates/envoy-configmap.yaml | 229 + .../templates/hpa.yaml | 19 + .../templates/service-cert-secret.yaml | 16 + .../templates/service-token-secret.yaml | 15 + .../templates/service.yaml | 29 + .../values.yaml | 86 + .../.helmignore | 21 + .../Chart.yaml | 5 + .../templates/NOTES.txt | 0 .../templates/_helpers.tpl | 48 + .../templates/deployment.yaml | 193 + .../templates/envoy-configmap.yaml | 229 + .../templates/hpa.yaml | 19 + .../templates/service-cert-secret.yaml | 16 + .../templates/service-token-secret.yaml | 15 + .../templates/service.yaml | 29 + .../values.yaml | 86 + .../bucketeer/charts/experiment/Chart.yaml | 5 + .../charts/experiment/templates/NOTES.txt | 15 + .../charts/experiment/templates/_helpers.tpl | 56 + .../experiment/templates/deployment.yaml | 165 + .../experiment/templates/envoy-configmap.yaml | 283 + .../charts/experiment/templates/hpa.yaml | 19 + .../templates/oauth-key-secret.yaml | 15 + .../charts/experiment/templates/pdb.yaml | 12 + .../templates/service-cert-secret.yaml | 16 + .../templates/service-token-secret.yaml | 15 + .../charts/experiment/templates/service.yaml | 30 + .../bucketeer/charts/experiment/values.yaml | 76 + .../charts/feature-recorder/.helmignore | 21 + .../charts/feature-recorder/Chart.yaml | 5 + .../feature-recorder/templates/NOTES.txt | 15 + .../feature-recorder/templates/_helpers.tpl | 48 + .../templates/deployment.yaml | 159 + .../templates/envoy-configmap.yaml | 127 + .../templates/service-cert-secret.yaml | 16 + .../templates/service-token-secret.yaml | 15 + .../feature-recorder/templates/service.yaml | 29 + .../feature-recorder/templates/vpa.yaml | 19 + .../charts/feature-recorder/values.yaml | 72 + .../feature-segment-persister/.helmignore | 21 + .../feature-segment-persister/Chart.yaml | 5 + .../templates/NOTES.txt | 15 + .../templates/_helpers.tpl | 40 + .../templates/deployment.yaml | 167 + .../templates/envoy-configmap.yaml | 127 + .../templates/hpa.yaml | 19 + .../templates/service-cert-secret.yaml | 16 + .../templates/service.yaml | 29 + .../feature-segment-persister/values.yaml | 73 + .../charts/feature-tag-cacher/.helmignore | 21 + .../charts/feature-tag-cacher/Chart.yaml | 5 + .../feature-tag-cacher/templates/NOTES.txt | 15 + .../feature-tag-cacher/templates/_helpers.tpl | 48 + .../templates/deployment.yaml | 161 + .../templates/envoy-configmap.yaml | 229 + .../feature-tag-cacher/templates/hpa.yaml | 19 + .../templates/service-cert-secret.yaml | 16 + .../templates/service-token-secret.yaml | 15 + .../feature-tag-cacher/templates/service.yaml | 29 + .../charts/feature-tag-cacher/values.yaml | 70 + .../bucketeer/charts/feature/.helmignore | 21 + manifests/bucketeer/charts/feature/Chart.yaml | 5 + .../charts/feature/templates/NOTES.txt | 15 + .../charts/feature/templates/_helpers.tpl | 56 + .../charts/feature/templates/deployment.yaml | 179 + .../feature/templates/envoy-configmap.yaml | 283 + .../charts/feature/templates/hpa.yaml | 19 + .../feature/templates/oauth-key-secret.yaml | 15 + .../charts/feature/templates/pdb.yaml | 12 + .../templates/service-cert-secret.yaml | 16 + .../templates/service-token-secret.yaml | 15 + .../charts/feature/templates/service.yaml | 29 + .../bucketeer/charts/feature/values.yaml | 84 + .../charts/goal-batch-transformer/.helmignore | 21 + .../charts/goal-batch-transformer/Chart.yaml | 5 + .../templates/NOTES.txt | 15 + .../templates/_helpers.tpl | 48 + .../templates/deployment.yaml | 157 + .../templates/envoy-configmap.yaml | 283 + .../goal-batch-transformer/templates/hpa.yaml | 19 + .../templates/service-cert-secret.yaml | 16 + .../templates/service-token-secret.yaml | 15 + .../templates/service.yaml | 29 + .../charts/goal-batch-transformer/values.yaml | 67 + manifests/bucketeer/charts/kafka/Chart.yaml | 5 + manifests/bucketeer/charts/kafka/Makefile | 9 + .../kafka/charts/kafka-cluster/.helmignore | 21 + .../kafka/charts/kafka-cluster/Chart.yaml | 5 + .../charts/kafka-cluster/templates/NOTES.txt | 2 + .../kafka-cluster/templates/_helpers.tpl | 32 + .../kafka-cluster/templates/cluster.yaml | 126 + .../templates/kafkaconfigmap.yaml | 10 + .../charts/kafka-cluster/templates/topic.yaml | 20 + .../charts/kafka-cluster/templates/user.yaml | 26 + .../kafka-cluster/templates/usersecret.yaml | 16 + .../templates/zookeeperconfigmap.yaml | 10 + .../kafka/charts/kafka-cluster/values.yaml | 202 + .../charts/strimzi-kafka-operator/.helmignore | 21 + .../charts/strimzi-kafka-operator/Chart.yaml | 24 + .../charts/strimzi-kafka-operator/OWNERS | 14 + .../charts/strimzi-kafka-operator/README.md | 4 + .../crds/040-Crd-kafka.yaml | 5955 ++++++ .../crds/041-Crd-kafkaconnect.yaml | 1906 ++ .../crds/042-Crd-strimzipodset.yaml | 119 + .../crds/043-Crd-kafkatopic.yaml | 254 + .../crds/044-Crd-kafkauser.yaml | 638 + .../crds/045-Crd-kafkamirrormaker.yaml | 1183 ++ .../crds/046-Crd-kafkabridge.yaml | 1039 + .../crds/047-Crd-kafkaconnector.yaml | 110 + .../crds/048-Crd-kafkamirrormaker2.yaml | 1897 ++ .../crds/049-Crd-kafkarebalance.yaml | 108 + ...rviceAccount-strimzi-cluster-operator.yaml | 13 + ...terRole-strimzi-cluster-operator-role.yaml | 219 + ...-RoleBinding-strimzi-cluster-operator.yaml | 33 + ...terRole-strimzi-cluster-operator-role.yaml | 46 + ...rRoleBinding-strimzi-cluster-operator.yaml | 22 + .../030-ClusterRole-strimzi-kafka-broker.yaml | 23 + ...ster-operator-kafka-broker-delegation.yaml | 25 + ...1-ClusterRole-strimzi-entity-operator.yaml | 52 + ...r-operator-entity-operator-delegation.yaml | 31 + .../033-ClusterRole-strimzi-kafka-client.yaml | 24 + ...ster-operator-kafka-client-delegation.yaml | 26 + ...50-ConfigMap-strimzi-cluster-operator.yaml | 38 + ...0-Deployment-strimzi-cluster-operator.yaml | 167 + .../templates/NOTES.txt | 5 + .../templates/_helpers.tpl | 32 + .../templates/_kafka_image_map.tpl | 36 + .../charts/strimzi-kafka-operator/values.yaml | 163 + .../bucketeer/charts/kafka/requirements.lock | 6 + .../bucketeer/charts/kafka/requirements.yaml | 4 + manifests/bucketeer/charts/kafka/values.yaml | 215 + .../charts/metrics-event-persister/Chart.yaml | 5 + .../templates/NOTES.txt | 15 + .../templates/_helpers.tpl | 48 + .../templates/deployment.yaml | 147 + .../templates/envoy-configmap.yaml | 127 + .../templates/hpa.yaml | 19 + .../templates/service-cert-secret.yaml | 16 + .../templates/service.yaml | 29 + .../metrics-event-persister/values.yaml | 62 + .../charts/migration-mysql/Chart.yaml | 5 + .../migration-mysql/templates/NOTES.txt | 15 + .../migration-mysql/templates/_helpers.tpl | 48 + .../migration-mysql/templates/deployment.yaml | 162 + .../templates/envoy-configmap.yaml | 127 + .../charts/migration-mysql/templates/hpa.yaml | 19 + .../templates/oauth-key-secret.yaml | 15 + .../templates/service-cert-secret.yaml | 16 + .../migration-mysql/templates/service.yaml | 30 + .../charts/migration-mysql/values.yaml | 68 + .../charts/notification-sender/Chart.yaml | 5 + .../notification-sender/templates/NOTES.txt | 15 + .../templates/_helpers.tpl | 48 + .../templates/deployment.yaml | 168 + .../templates/envoy-configmap.yaml | 453 + .../templates/service-cert-secret.yaml | 16 + .../templates/service-token-secret.yaml | 15 + .../templates/service.yaml | 29 + .../charts/notification-sender/values.yaml | 67 + .../bucketeer/charts/notification/Chart.yaml | 5 + .../charts/notification/templates/NOTES.txt | 15 + .../notification/templates/_helpers.tpl | 56 + .../notification/templates/deployment.yaml | 163 + .../templates/envoy-configmap.yaml | 232 + .../charts/notification/templates/hpa.yaml | 19 + .../templates/oauth-key-secret.yaml | 15 + .../charts/notification/templates/pdb.yaml | 12 + .../templates/service-cert-secret.yaml | 16 + .../templates/service-token-secret.yaml | 15 + .../notification/templates/service.yaml | 29 + .../bucketeer/charts/notification/values.yaml | 75 + .../charts/ops-event-batch/Chart.yaml | 5 + .../ops-event-batch/templates/NOTES.txt | 15 + .../ops-event-batch/templates/_helpers.tpl | 48 + .../ops-event-batch/templates/deployment.yaml | 160 + .../templates/envoy-configmap.yaml | 396 + .../templates/service-cert-secret.yaml | 16 + .../templates/service-token-secret.yaml | 15 + .../ops-event-batch/templates/service.yaml | 29 + .../charts/ops-event-batch/values.yaml | 62 + .../bucketeer/charts/push-sender/Chart.yaml | 5 + .../charts/push-sender/templates/NOTES.txt | 15 + .../charts/push-sender/templates/_helpers.tpl | 48 + .../push-sender/templates/deployment.yaml | 162 + .../templates/envoy-configmap.yaml | 286 + .../templates/service-cert-secret.yaml | 16 + .../templates/service-token-secret.yaml | 15 + .../charts/push-sender/templates/service.yaml | 29 + .../bucketeer/charts/push-sender/values.yaml | 65 + manifests/bucketeer/charts/push/Chart.yaml | 5 + .../bucketeer/charts/push/templates/NOTES.txt | 15 + .../charts/push/templates/_helpers.tpl | 56 + .../charts/push/templates/deployment.yaml | 167 + .../push/templates/envoy-configmap.yaml | 341 + .../bucketeer/charts/push/templates/hpa.yaml | 19 + .../push/templates/oauth-key-secret.yaml | 15 + .../bucketeer/charts/push/templates/pdb.yaml | 12 + .../push/templates/service-cert-secret.yaml | 16 + .../push/templates/service-token-secret.yaml | 15 + .../charts/push/templates/service.yaml | 29 + manifests/bucketeer/charts/push/values.yaml | 77 + .../charts/user-persister/Chart.yaml | 5 + .../charts/user-persister/templates/NOTES.txt | 15 + .../user-persister/templates/_helpers.tpl | 48 + .../user-persister/templates/deployment.yaml | 167 + .../templates/envoy-configmap.yaml | 231 + .../charts/user-persister/templates/hpa.yaml | 19 + .../templates/service-cert-secret.yaml | 16 + .../user-persister/templates/service.yaml | 29 + .../charts/user-persister/values.yaml | 72 + manifests/bucketeer/charts/user/Chart.yaml | 5 + .../bucketeer/charts/user/templates/NOTES.txt | 15 + .../charts/user/templates/_helpers.tpl | 56 + .../charts/user/templates/deployment.yaml | 163 + .../user/templates/envoy-configmap.yaml | 231 + .../bucketeer/charts/user/templates/hpa.yaml | 19 + .../user/templates/oauth-key-secret.yaml | 15 + .../bucketeer/charts/user/templates/pdb.yaml | 12 + .../user/templates/service-cert-secret.yaml | 16 + .../user/templates/service-token-secret.yaml | 15 + .../charts/user/templates/service.yaml | 30 + manifests/bucketeer/charts/user/values.yaml | 75 + .../bucketeer/charts/web-gateway/.helmignore | 21 + .../bucketeer/charts/web-gateway/Chart.yaml | 5 + .../charts/web-gateway/templates/NOTES.txt | 14 + .../charts/web-gateway/templates/_helpers.tpl | 48 + .../web-gateway/templates/backend-config.yaml | 9 + .../templates/bucketeer-jp-cert-secret.yaml | 16 + .../web-gateway/templates/deployment.yaml | 171 + .../templates/envoy-configmap.yaml | 876 + .../charts/web-gateway/templates/hpa.yaml | 19 + .../charts/web-gateway/templates/pdb.yaml | 12 + .../charts/web-gateway/templates/secret.yaml | 156 + .../templates/service-cert-secret.yaml | 16 + .../charts/web-gateway/templates/service.yaml | 33 + .../bucketeer/charts/web-gateway/values.yaml | 64 + manifests/bucketeer/charts/web/.helmignore | 21 + manifests/bucketeer/charts/web/Chart.yaml | 5 + .../bucketeer/charts/web/templates/NOTES.txt | 15 + .../charts/web/templates/_helpers.tpl | 40 + .../charts/web/templates/cert-secret.yaml | 16 + .../charts/web/templates/configmap.yaml | 58 + .../charts/web/templates/deployment.yaml | 85 + .../bucketeer/charts/web/templates/hpa.yaml | 19 + .../bucketeer/charts/web/templates/pdb.yaml | 12 + .../charts/web/templates/service.yaml | 23 + manifests/bucketeer/charts/web/values.yaml | 58 + manifests/bucketeer/values.yaml | 2437 +++ pkg/account/api/BUILD.bazel | 69 + pkg/account/api/account.go | 367 + pkg/account/api/account_test.go | 548 + pkg/account/api/admin_account.go | 572 + pkg/account/api/admin_account_test.go | 679 + pkg/account/api/api.go | 206 + pkg/account/api/api_key.go | 403 + pkg/account/api/api_key_test.go | 465 + pkg/account/api/api_test.go | 127 + pkg/account/api/error.go | 166 + pkg/account/api/validation.go | 175 + pkg/account/api/validation_test.go | 39 + pkg/account/apikeycacher/BUILD.bazel | 29 + pkg/account/apikeycacher/apikeycacher.go | 415 + pkg/account/apikeycacher/metrics.go | 43 + pkg/account/client/BUILD.bazel | 13 + pkg/account/client/client.go | 50 + pkg/account/client/mock/BUILD.bazel | 13 + pkg/account/client/mock/client.go | 470 + pkg/account/cmd/apikeycacher/BUILD.bazel | 24 + pkg/account/cmd/apikeycacher/apikeycacher.go | 193 + pkg/account/cmd/server/BUILD.bazel | 23 + pkg/account/cmd/server/server.go | 184 + pkg/account/command/BUILD.bazel | 39 + pkg/account/command/account.go | 132 + pkg/account/command/account_test.go | 106 + pkg/account/command/admin_account.go | 103 + pkg/account/command/admin_account_test.go | 85 + pkg/account/command/api_key.go | 120 + pkg/account/command/api_key_test.go | 105 + pkg/account/command/command.go | 30 + pkg/account/domain/BUILD.bazel | 26 + pkg/account/domain/account.go | 60 + pkg/account/domain/account_test.go | 53 + pkg/account/domain/api_key.go | 70 + pkg/account/domain/api_key_test.go | 59 + pkg/account/storage/v2/BUILD.bazel | 35 + pkg/account/storage/v2/account.go | 245 + pkg/account/storage/v2/account_test.go | 300 + pkg/account/storage/v2/admin_account.go | 239 + pkg/account/storage/v2/admin_account_test.go | 288 + pkg/account/storage/v2/api_key.go | 229 + pkg/account/storage/v2/api_key_test.go | 300 + pkg/account/storage/v2/mock/BUILD.bazel | 18 + pkg/account/storage/v2/mock/account.go | 99 + pkg/account/storage/v2/mock/admin_account.go | 99 + pkg/account/storage/v2/mock/api_key.go | 99 + pkg/auditlog/api/BUILD.bazel | 52 + pkg/auditlog/api/api.go | 415 + pkg/auditlog/api/api_test.go | 243 + pkg/auditlog/api/error.go | 86 + pkg/auditlog/client/BUILD.bazel | 13 + pkg/auditlog/client/client.go | 49 + pkg/auditlog/cmd/persister/BUILD.bazel | 20 + pkg/auditlog/cmd/persister/persister.go | 171 + pkg/auditlog/cmd/server/BUILD.bazel | 21 + pkg/auditlog/cmd/server/server.go | 155 + pkg/auditlog/domain/BUILD.bazel | 12 + pkg/auditlog/domain/auditlog.go | 41 + pkg/auditlog/persister/BUILD.bazel | 48 + pkg/auditlog/persister/metrics.go | 43 + pkg/auditlog/persister/persister.go | 247 + pkg/auditlog/persister/persister_test.go | 156 + pkg/auditlog/storage/v2/BUILD.bazel | 34 + pkg/auditlog/storage/v2/admin_audit_log.go | 166 + .../storage/v2/admin_audit_log_test.go | 179 + pkg/auditlog/storage/v2/audit_log.go | 168 + pkg/auditlog/storage/v2/audit_log_test.go | 179 + pkg/auditlog/storage/v2/mock/BUILD.bazel | 17 + .../storage/v2/mock/admin_audit_log.go | 70 + pkg/auditlog/storage/v2/mock/audit_log.go | 70 + pkg/auth/api/BUILD.bazel | 36 + pkg/auth/api/api.go | 369 + pkg/auth/api/api_test.go | 15 + pkg/auth/api/error.go | 166 + pkg/auth/client/BUILD.bazel | 13 + pkg/auth/client/client.go | 50 + pkg/auth/client/mock/BUILD.bazel | 13 + pkg/auth/client/mock/client.go | 110 + pkg/auth/cmd/server/BUILD.bazel | 21 + pkg/auth/cmd/server/server.go | 150 + pkg/auth/oidc/BUILD.bazel | 23 + pkg/auth/oidc/oidc.go | 256 + pkg/auth/oidc/oidc_test.go | 50 + pkg/autoops/api/BUILD.bazel | 73 + pkg/autoops/api/api.go | 899 + pkg/autoops/api/api_test.go | 792 + pkg/autoops/api/error.go | 396 + pkg/autoops/api/operation.go | 100 + pkg/autoops/api/webhook.go | 522 + pkg/autoops/api/webhook_test.go | 441 + pkg/autoops/client/BUILD.bazel | 13 + pkg/autoops/client/client.go | 50 + pkg/autoops/client/mock/BUILD.bazel | 13 + pkg/autoops/client/mock/client.go | 290 + pkg/autoops/cmd/server/BUILD.bazel | 29 + pkg/autoops/cmd/server/server.go | 298 + pkg/autoops/command/BUILD.bazel | 37 + pkg/autoops/command/auto_ops_rule.go | 219 + pkg/autoops/command/auto_ops_rule_test.go | 318 + pkg/autoops/command/command.go | 30 + pkg/autoops/command/webhook.go | 141 + pkg/autoops/domain/BUILD.bazel | 32 + pkg/autoops/domain/auto_ops_rule.go | 268 + pkg/autoops/domain/auto_ops_rule_test.go | 296 + pkg/autoops/domain/webhook.go | 49 + pkg/autoops/domain/webhook_secret.go | 57 + pkg/autoops/storage/v2/BUILD.bazel | 30 + pkg/autoops/storage/v2/auto_ops_rule.go | 243 + pkg/autoops/storage/v2/auto_ops_rule_test.go | 258 + pkg/autoops/storage/v2/mock/BUILD.bazel | 17 + pkg/autoops/storage/v2/mock/auto_ops_rule.go | 98 + pkg/autoops/storage/v2/mock/webhook.go | 113 + pkg/autoops/storage/v2/webhook.go | 256 + pkg/autoops/webhookhandler/BUILD.bazel | 50 + pkg/autoops/webhookhandler/evaluation.go | 207 + pkg/autoops/webhookhandler/handler.go | 319 + pkg/autoops/webhookhandler/handler_test.go | 571 + .../webhookhandler/testdata/invalid-token | 1 + .../webhookhandler/testdata/valid-public.pem | 14 + .../webhookhandler/testdata/valid-token | 1 + pkg/backoff/BUILD.bazel | 26 + pkg/backoff/backoff.go | 26 + pkg/backoff/constant.go | 54 + pkg/backoff/constant_test.go | 34 + pkg/backoff/exponential.go | 66 + pkg/backoff/exponential_test.go | 40 + pkg/backoff/retry.go | 64 + pkg/cache/BUILD.bazel | 16 + pkg/cache/cache.go | 96 + pkg/cache/mock/BUILD.bazel | 9 + pkg/cache/mock/cache.go | 447 + pkg/cache/redis_cache.go | 49 + pkg/cache/testing/BUILD.bazel | 9 + pkg/cache/testing/cache.go | 65 + pkg/cache/ttl_cache.go | 90 + pkg/cache/v2/BUILD.bazel | 14 + pkg/cache/v2/redis_cache.go | 103 + pkg/cache/v3/BUILD.bazel | 41 + pkg/cache/v3/environment_api_key.go | 68 + pkg/cache/v3/experiments.go | 87 + pkg/cache/v3/features.go | 73 + pkg/cache/v3/features_test.go | 148 + pkg/cache/v3/mock/BUILD.bazel | 19 + pkg/cache/v3/mock/environment_api_key.go | 65 + pkg/cache/v3/mock/experiments.go | 65 + pkg/cache/v3/mock/features.go | 65 + pkg/cache/v3/mock/segment_users.go | 80 + pkg/cache/v3/redis_cache.go | 75 + pkg/cache/v3/segment_users.go | 121 + pkg/cache/v3/segment_users_test.go | 220 + pkg/cli/BUILD.bazel | 20 + pkg/cli/app.go | 165 + pkg/cli/cmd.go | 37 + pkg/crypto/BUILD.bazel | 15 + pkg/crypto/cloudkmscrypto.go | 60 + pkg/crypto/crypto.go | 22 + pkg/domainevent/BUILD.bazel | 0 pkg/domainevent/domain/BUILD.bazel | 34 + pkg/domainevent/domain/event.go | 104 + pkg/domainevent/domain/message.go | 665 + pkg/domainevent/domain/message_test.go | 71 + pkg/domainevent/domain/url.go | 80 + pkg/domainevent/domain/url_test.go | 54 + pkg/druid/BUILD.bazel | 17 + pkg/druid/mock/BUILD.bazel | 9 + pkg/druid/mock/supervisor_creator.go | 49 + pkg/druid/supervisor.go | 171 + pkg/druid/supervisor_creator.go | 124 + pkg/environment/api/BUILD.bazel | 63 + pkg/environment/api/api.go | 105 + pkg/environment/api/api_test.go | 89 + pkg/environment/api/environment.go | 388 + pkg/environment/api/environment_test.go | 529 + pkg/environment/api/error.go | 206 + pkg/environment/api/project.go | 494 + pkg/environment/api/project_test.go | 734 + pkg/environment/client/BUILD.bazel | 13 + pkg/environment/client/client.go | 50 + pkg/environment/client/mock/BUILD.bazel | 13 + pkg/environment/client/mock/client.go | 330 + pkg/environment/cmd/server/BUILD.bazel | 23 + pkg/environment/cmd/server/server.go | 187 + pkg/environment/command/BUILD.bazel | 39 + pkg/environment/command/command.go | 30 + pkg/environment/command/environment.go | 111 + pkg/environment/command/environment_test.go | 103 + pkg/environment/command/project.go | 131 + pkg/environment/command/project_test.go | 132 + pkg/environment/domain/BUILD.bazel | 22 + pkg/environment/domain/environment.go | 56 + pkg/environment/domain/environment_test.go | 52 + pkg/environment/domain/project.go | 58 + pkg/environment/domain/project_test.go | 57 + pkg/environment/storage/v2/BUILD.bazel | 33 + pkg/environment/storage/v2/environment.go | 286 + .../storage/v2/environment_test.go | 348 + pkg/environment/storage/v2/mock/BUILD.bazel | 17 + .../storage/v2/mock/environment.go | 114 + pkg/environment/storage/v2/mock/project.go | 114 + pkg/environment/storage/v2/project.go | 279 + pkg/environment/storage/v2/project_test.go | 352 + pkg/errgroup/BUILD.bazel | 16 + pkg/errgroup/errgroup.go | 75 + pkg/errgroup/errgroup_test.go | 73 + pkg/eventcounter/api/BUILD.bazel | 69 + pkg/eventcounter/api/api.go | 673 + pkg/eventcounter/api/api_test.go | 1006 + pkg/eventcounter/api/error.go | 146 + pkg/eventcounter/api/metrics.go | 78 + pkg/eventcounter/client/BUILD.bazel | 13 + pkg/eventcounter/client/client.go | 50 + pkg/eventcounter/client/mock/BUILD.bazel | 13 + pkg/eventcounter/client/mock/client.go | 210 + pkg/eventcounter/cmd/server/BUILD.bazel | 25 + pkg/eventcounter/cmd/server/server.go | 224 + pkg/eventcounter/domain/BUILD.bazel | 9 + pkg/eventcounter/domain/experiment_result.go | 23 + pkg/eventcounter/druid/BUILD.bazel | 33 + pkg/eventcounter/druid/mock/BUILD.bazel | 12 + pkg/eventcounter/druid/mock/querier.go | 132 + pkg/eventcounter/druid/querier.go | 650 + pkg/eventcounter/druid/querier_test.go | 153 + pkg/eventcounter/druid/query.go | 236 + pkg/eventcounter/druid/query_test.go | 693 + pkg/eventcounter/storage/v2/BUILD.bazel | 25 + .../storage/v2/experiment_result.go | 78 + .../storage/v2/experiment_result_test.go | 100 + pkg/eventcounter/storage/v2/mock/BUILD.bazel | 12 + .../storage/v2/mock/experiment_result.go | 52 + pkg/eventpersister/cmd/server/BUILD.bazel | 24 + pkg/eventpersister/cmd/server/server.go | 303 + pkg/eventpersister/datastore/BUILD.bazel | 29 + pkg/eventpersister/datastore/datastore.go | 56 + .../datastore/datastore_test.go | 77 + pkg/eventpersister/datastore/kafka.go | 137 + pkg/eventpersister/datastore/metrics.go | 59 + pkg/eventpersister/persister/BUILD.bazel | 52 + pkg/eventpersister/persister/metrics.go | 51 + pkg/eventpersister/persister/persister.go | 594 + .../persister/persister_test.go | 481 + pkg/eventpersister/storage/v2/BUILD.bazel | 13 + pkg/eventpersister/storage/v2/persister.go | 190 + pkg/experiment/api/BUILD.bazel | 64 + pkg/experiment/api/api.go | 129 + pkg/experiment/api/api_test.go | 115 + pkg/experiment/api/error.go | 208 + pkg/experiment/api/experiment.go | 611 + pkg/experiment/api/experiment_test.go | 699 + pkg/experiment/api/goal.go | 381 + pkg/experiment/api/goal_test.go | 435 + pkg/experiment/batch/job/BUILD.bazel | 35 + .../batch/job/experiment_status_updater.go | 221 + .../job/experiment_status_updater_test.go | 129 + pkg/experiment/batch/job/job.go | 49 + pkg/experiment/client/BUILD.bazel | 13 + pkg/experiment/client/client.go | 50 + pkg/experiment/client/mock/BUILD.bazel | 13 + pkg/experiment/client/mock/client.go | 350 + pkg/experiment/cmd/batch/BUILD.bazel | 21 + pkg/experiment/cmd/batch/batch.go | 172 + pkg/experiment/cmd/server/BUILD.bazel | 24 + pkg/experiment/cmd/server/server.go | 212 + pkg/experiment/command/BUILD.bazel | 40 + pkg/experiment/command/command.go | 30 + pkg/experiment/command/experiment.go | 189 + pkg/experiment/command/experiment_test.go | 178 + pkg/experiment/command/goal.go | 127 + pkg/experiment/command/goal_test.go | 109 + pkg/experiment/domain/BUILD.bazel | 31 + pkg/experiment/domain/experiment.go | 236 + pkg/experiment/domain/experiment_test.go | 515 + pkg/experiment/domain/goal.go | 60 + pkg/experiment/domain/goal_test.go | 64 + pkg/experiment/storage/v2/BUILD.bazel | 33 + pkg/experiment/storage/v2/experiment.go | 345 + pkg/experiment/storage/v2/experiment_test.go | 266 + pkg/experiment/storage/v2/goal.go | 295 + pkg/experiment/storage/v2/goal_test.go | 274 + pkg/experiment/storage/v2/mock/BUILD.bazel | 17 + pkg/experiment/storage/v2/mock/experiment.go | 99 + pkg/experiment/storage/v2/mock/goal.go | 99 + pkg/feature/api/BUILD.bazel | 96 + pkg/feature/api/api.go | 169 + pkg/feature/api/api_test.go | 181 + pkg/feature/api/error.go | 664 + pkg/feature/api/feature.go | 1639 ++ pkg/feature/api/feature_test.go | 2901 +++ pkg/feature/api/segment.go | 462 + pkg/feature/api/segment_test.go | 376 + pkg/feature/api/segment_user.go | 501 + pkg/feature/api/segment_user_test.go | 230 + pkg/feature/api/tag.go | 156 + pkg/feature/api/tag_test.go | 144 + pkg/feature/api/user_evaluations.go | 91 + pkg/feature/api/user_evaluations_test.go | 228 + pkg/feature/api/validation.go | 659 + pkg/feature/cacher/BUILD.bazel | 33 + pkg/feature/cacher/cacher.go | 309 + pkg/feature/cacher/cacher_test.go | 30 + pkg/feature/cacher/metrics.go | 43 + pkg/feature/client/BUILD.bazel | 13 + pkg/feature/client/client.go | 50 + pkg/feature/client/mock/BUILD.bazel | 13 + pkg/feature/client/mock/client.go | 630 + pkg/feature/cmd/cacher/BUILD.bazel | 23 + pkg/feature/cmd/cacher/cacher.go | 172 + pkg/feature/cmd/recorder/BUILD.bazel | 20 + pkg/feature/cmd/recorder/recorder.go | 172 + pkg/feature/cmd/segmentpersister/BUILD.bazel | 21 + pkg/feature/cmd/segmentpersister/persister.go | 221 + pkg/feature/cmd/server/BUILD.bazel | 26 + pkg/feature/cmd/server/server.go | 272 + pkg/feature/command/BUILD.bazel | 43 + pkg/feature/command/command.go | 44 + pkg/feature/command/detail.go | 89 + pkg/feature/command/eventfactory.go | 47 + pkg/feature/command/feature.go | 758 + pkg/feature/command/feature_test.go | 587 + pkg/feature/command/segment.go | 319 + pkg/feature/command/segment_test.go | 85 + pkg/feature/domain/BUILD.bazel | 48 + pkg/feature/domain/clause_evaluator.go | 279 + pkg/feature/domain/clause_evaluator_test.go | 1150 ++ pkg/feature/domain/evaluation.go | 139 + pkg/feature/domain/evaluation_test.go | 367 + pkg/feature/domain/feature.go | 873 + pkg/feature/domain/feature_last_used_info.go | 117 + .../domain/feature_last_used_info_test.go | 254 + pkg/feature/domain/feature_test.go | 1816 ++ pkg/feature/domain/rule_evaluator.go | 64 + pkg/feature/domain/rule_evaluator_test.go | 252 + pkg/feature/domain/segment.go | 209 + pkg/feature/domain/segment_evaluator.go | 60 + pkg/feature/domain/segment_test.go | 265 + pkg/feature/domain/segment_user.go | 42 + pkg/feature/domain/strategy_evaluator.go | 85 + pkg/feature/domain/tag.go | 36 + pkg/feature/domain/user_evaluations.go | 64 + pkg/feature/domain/user_evaluations_test.go | 75 + pkg/feature/recorder/BUILD.bazel | 44 + pkg/feature/recorder/metrics.go | 46 + pkg/feature/recorder/recorder.go | 329 + pkg/feature/recorder/recorder_test.go | 170 + pkg/feature/segmentpersister/BUILD.bazel | 56 + pkg/feature/segmentpersister/metrics.go | 43 + pkg/feature/segmentpersister/persister.go | 456 + .../segmentpersister/persister_test.go | 161 + pkg/feature/storage/BUILD.bazel | 38 + pkg/feature/storage/feature_last_used_info.go | 158 + .../storage/feature_last_used_info_test.go | 264 + pkg/feature/storage/mock/BUILD.bazel | 17 + .../storage/mock/feature_last_used_info.go | 112 + pkg/feature/storage/mock/user_evaluations.go | 66 + pkg/feature/storage/user_evaluations.go | 125 + pkg/feature/storage/user_evaluations_test.go | 220 + pkg/feature/storage/v2/BUILD.bazel | 36 + pkg/feature/storage/v2/feature.go | 467 + .../storage/v2/feature_last_used_info.go | 154 + .../storage/v2/feature_last_used_info_test.go | 32 + pkg/feature/storage/v2/feature_test.go | 32 + pkg/feature/storage/v2/mock/BUILD.bazel | 20 + pkg/feature/storage/v2/mock/feature.go | 116 + .../storage/v2/mock/feature_last_used_info.go | 66 + pkg/feature/storage/v2/mock/segment.go | 99 + pkg/feature/storage/v2/mock/segment_user.go | 84 + pkg/feature/storage/v2/mock/tag.go | 70 + pkg/feature/storage/v2/segment.go | 343 + pkg/feature/storage/v2/segment_test.go | 32 + pkg/feature/storage/v2/segment_user.go | 179 + pkg/feature/storage/v2/segment_user_test.go | 32 + pkg/feature/storage/v2/tag.go | 136 + pkg/feature/storage/v2/tag_test.go | 32 + pkg/gateway/api/BUILD.bazel | 84 + pkg/gateway/api/api.go | 1279 ++ pkg/gateway/api/api_grpc.go | 935 + pkg/gateway/api/api_grpc_test.go | 2242 +++ pkg/gateway/api/api_test.go | 2511 +++ pkg/gateway/api/grpc_validation.go | 299 + pkg/gateway/api/metrics.go | 110 + pkg/gateway/api/trackhandler.go | 275 + pkg/gateway/api/trackhandler_test.go | 243 + pkg/gateway/api/validation.go | 129 + pkg/gateway/api/validation_test.go | 478 + pkg/gateway/client/BUILD.bazel | 17 + pkg/gateway/client/client.go | 49 + pkg/gateway/client/credentials.go | 47 + pkg/gateway/cmd/BUILD.bazel | 26 + pkg/gateway/cmd/server.go | 350 + pkg/goalbatch/cmd/transformer/BUILD.bazel | 22 + pkg/goalbatch/cmd/transformer/transformer.go | 177 + pkg/goalbatch/transformer/BUILD.bazel | 52 + pkg/goalbatch/transformer/metrics.go | 83 + pkg/goalbatch/transformer/transformer.go | 349 + pkg/goalbatch/transformer/transformer_test.go | 356 + pkg/health/BUILD.bazel | 25 + pkg/health/grpc_health.go | 55 + pkg/health/health.go | 138 + pkg/health/health_test.go | 117 + pkg/health/rest_health.go | 41 + pkg/job/BUILD.bazel | 17 + pkg/job/job.go | 112 + pkg/job/metrics.go | 67 + pkg/kafka/BUILD.bazel | 16 + pkg/kafka/mock/BUILD.bazel | 9 + pkg/kafka/mock/topic_creator.go | 49 + pkg/kafka/topic.go | 23 + pkg/kafka/topic_creator.go | 106 + pkg/ldflags/BUILD.bazel | 8 + pkg/ldflags/ldflags.go | 20 + pkg/locale/BUILD.bazel | 34 + pkg/locale/locale.go | 39 + pkg/locale/localizedata/en.yaml | 8 + pkg/locale/localizedata/ja.yaml | 34 + pkg/locale/localizer.go | 100 + pkg/locale/localizer_test.go | 85 + pkg/locale/options.go | 53 + pkg/log/BUILD.bazel | 24 + pkg/log/field.go | 64 + pkg/log/log.go | 116 + pkg/log/log_test.go | 40 + pkg/metrics/BUILD.bazel | 24 + pkg/metrics/metrics.go | 163 + pkg/metrics/metrics_test.go | 40 + pkg/metrics/mock/BUILD.bazel | 14 + pkg/metrics/mock/metrics.go | 160 + pkg/metricsevent/cmd/persister/BUILD.bazel | 19 + pkg/metricsevent/cmd/persister/persister.go | 137 + pkg/metricsevent/persister/BUILD.bazel | 45 + pkg/metricsevent/persister/metrics.go | 55 + pkg/metricsevent/persister/persister.go | 290 + pkg/metricsevent/persister/persister_test.go | 260 + pkg/metricsevent/storage/BUILD.bazel | 29 + pkg/metricsevent/storage/event.go | 56 + pkg/metricsevent/storage/event_test.go | 38 + pkg/metricsevent/storage/metrics.go | 66 + pkg/metricsevent/storage/mock/BUILD.bazel | 9 + pkg/metricsevent/storage/mock/event.go | 83 + pkg/migration/cmd/mysqlserver/BUILD.bazel | 19 + pkg/migration/cmd/mysqlserver/mysqlserver.go | 122 + pkg/migration/mysql/api/BUILD.bazel | 39 + pkg/migration/mysql/api/api.go | 157 + pkg/migration/mysql/api/api_test.go | 233 + pkg/migration/mysql/migrate/BUILD.bazel | 13 + pkg/migration/mysql/migrate/migrate.go | 88 + pkg/migration/mysql/migrate/mock/BUILD.bazel | 12 + pkg/migration/mysql/migrate/mock/migrate.go | 102 + pkg/notification/api/BUILD.bazel | 61 + pkg/notification/api/admin_subscription.go | 579 + .../api/admin_subscription_test.go | 658 + pkg/notification/api/api.go | 159 + pkg/notification/api/api_test.go | 132 + pkg/notification/api/error.go | 198 + pkg/notification/api/subscription.go | 623 + pkg/notification/api/subscription_test.go | 584 + pkg/notification/client/BUILD.bazel | 13 + pkg/notification/client/client.go | 50 + pkg/notification/client/mock/BUILD.bazel | 13 + pkg/notification/client/mock/client.go | 370 + pkg/notification/cmd/sender/BUILD.bazel | 29 + pkg/notification/cmd/sender/sender.go | 343 + pkg/notification/cmd/server/BUILD.bazel | 23 + pkg/notification/cmd/server/server.go | 205 + pkg/notification/command/BUILD.bazel | 36 + .../command/admin_subscription.go | 160 + .../command/admin_subscription_test.go | 193 + pkg/notification/command/command.go | 33 + pkg/notification/command/subscription.go | 157 + pkg/notification/command/subscription_test.go | 209 + pkg/notification/domain/BUILD.bazel | 19 + pkg/notification/domain/subscription.go | 146 + pkg/notification/domain/subscription_test.go | 91 + pkg/notification/sender/BUILD.bazel | 38 + pkg/notification/sender/informer/BUILD.bazel | 9 + .../sender/informer/batch/BUILD.bazel | 15 + pkg/notification/sender/informer/batch/job.go | 115 + .../sender/informer/batch/job/BUILD.bazel | 56 + .../batch/job/experiment_running_watcher.go | 158 + .../job/experiment_running_watcher_test.go | 104 + .../informer/batch/job/feature_watcher.go | 175 + .../batch/job/feature_watcher_test.go | 134 + .../sender/informer/batch/job/job.go | 53 + .../informer/batch/job/mau_count_watcher.go | 214 + .../batch/job/mau_count_watcher_test.go | 214 + .../sender/informer/domainevent/BUILD.bazel | 45 + .../informer/domainevent/domain_event.go | 312 + .../informer/domainevent/domain_event_test.go | 137 + .../sender/informer/domainevent/metrics.go | 50 + pkg/notification/sender/informer/informer.go | 28 + .../sender/informer/mock/BUILD.bazel | 12 + .../sender/informer/mock/informer.go | 77 + pkg/notification/sender/metrics.go | 51 + pkg/notification/sender/mock/BUILD.bazel | 12 + pkg/notification/sender/mock/sender.go | 51 + pkg/notification/sender/notifier/BUILD.bazel | 36 + pkg/notification/sender/notifier/message.go | 62 + pkg/notification/sender/notifier/metrics.go | 52 + .../sender/notifier/mock/BUILD.bazel | 13 + .../sender/notifier/mock/notifier.go | 52 + pkg/notification/sender/notifier/notifier.go | 27 + pkg/notification/sender/notifier/slack.go | 283 + .../sender/notifier/slack_test.go | 53 + pkg/notification/sender/sender.go | 197 + pkg/notification/sender/sender_test.go | 403 + pkg/notification/storage/v2/BUILD.bazel | 33 + .../storage/v2/admin_subscription.go | 253 + .../storage/v2/admin_subscription_test.go | 344 + pkg/notification/storage/v2/mock/BUILD.bazel | 17 + .../storage/v2/mock/admin_subscription.go | 113 + .../storage/v2/mock/subscription.go | 113 + pkg/notification/storage/v2/subscription.go | 275 + .../storage/v2/subscription_test.go | 360 + pkg/opsevent/batch/executor/BUILD.bazel | 29 + pkg/opsevent/batch/executor/executor.go | 81 + pkg/opsevent/batch/executor/executor_test.go | 92 + pkg/opsevent/batch/executor/mock/BUILD.bazel | 9 + pkg/opsevent/batch/executor/mock/executor.go | 49 + pkg/opsevent/batch/job/BUILD.bazel | 58 + pkg/opsevent/batch/job/count_watcher.go | 354 + pkg/opsevent/batch/job/count_watcher_test.go | 309 + pkg/opsevent/batch/job/datetime_watcher.go | 114 + .../batch/job/datetime_watcher_test.go | 153 + pkg/opsevent/batch/job/job.go | 49 + pkg/opsevent/batch/targetstore/BUILD.bazel | 45 + pkg/opsevent/batch/targetstore/metrics.go | 40 + .../batch/targetstore/mock/BUILD.bazel | 13 + .../batch/targetstore/mock/targetstore.go | 164 + pkg/opsevent/batch/targetstore/targetstore.go | 273 + .../batch/targetstore/targetstore_test.go | 279 + pkg/opsevent/cmd/batch/BUILD.bazel | 26 + pkg/opsevent/cmd/batch/batch.go | 288 + pkg/opsevent/domain/BUILD.bazel | 19 + pkg/opsevent/domain/ops_count.go | 44 + pkg/opsevent/domain/ops_count_test.go | 33 + pkg/opsevent/storage/v2/BUILD.bazel | 27 + pkg/opsevent/storage/v2/mock/BUILD.bazel | 14 + pkg/opsevent/storage/v2/mock/ops_count.go | 69 + pkg/opsevent/storage/v2/ops_count.go | 134 + pkg/opsevent/storage/v2/ops_count_test.go | 144 + pkg/pubsub/BUILD.bazel | 16 + pkg/pubsub/publisher/BUILD.bazel | 19 + pkg/pubsub/publisher/metrics.go | 81 + pkg/pubsub/publisher/mock/BUILD.bazel | 12 + pkg/pubsub/publisher/mock/publisher.go | 152 + pkg/pubsub/publisher/publisher.go | 153 + pkg/pubsub/pubsub.go | 252 + pkg/pubsub/puller/BUILD.bazel | 16 + pkg/pubsub/puller/codes/BUILD.bazel | 11 + pkg/pubsub/puller/codes/code_string.go | 30 + pkg/pubsub/puller/codes/codes.go | 29 + pkg/pubsub/puller/mock/BUILD.bazel | 15 + pkg/pubsub/puller/mock/puller.go | 51 + pkg/pubsub/puller/mock/rate_limited_puller.go | 65 + pkg/pubsub/puller/puller.go | 82 + pkg/pubsub/puller/rate_limited_puller.go | 59 + pkg/push/api/BUILD.bazel | 57 + pkg/push/api/api.go | 586 + pkg/push/api/api_test.go | 488 + pkg/push/api/error.go | 186 + pkg/push/client/BUILD.bazel | 13 + pkg/push/client/client.go | 50 + pkg/push/client/mock/BUILD.bazel | 13 + pkg/push/client/mock/client.go | 130 + pkg/push/cmd/sender/BUILD.bazel | 24 + pkg/push/cmd/sender/sender.go | 234 + pkg/push/cmd/server/BUILD.bazel | 25 + pkg/push/cmd/server/server.go | 235 + pkg/push/command/BUILD.bazel | 36 + pkg/push/command/command.go | 30 + pkg/push/command/push.go | 117 + pkg/push/command/push_test.go | 165 + pkg/push/domain/BUILD.bazel | 22 + pkg/push/domain/push.go | 125 + pkg/push/domain/push_test.go | 181 + pkg/push/sender/BUILD.bazel | 41 + pkg/push/sender/metrics.go | 56 + pkg/push/sender/sender.go | 433 + pkg/push/sender/sender_test.go | 111 + pkg/push/storage/v2/BUILD.bazel | 27 + pkg/push/storage/v2/mock/BUILD.bazel | 14 + pkg/push/storage/v2/mock/push.go | 99 + pkg/push/storage/v2/push.go | 233 + pkg/push/storage/v2/push_test.go | 302 + pkg/redis/BUILD.bazel | 33 + pkg/redis/conn.go | 44 + pkg/redis/metrics.go | 131 + pkg/redis/redis.go | 273 + pkg/redis/redis_test.go | 163 + pkg/redis/v2/BUILD.bazel | 15 + pkg/redis/v2/redis.go | 248 + pkg/redis/v3/BUILD.bazel | 15 + pkg/redis/v3/redis.go | 355 + pkg/rest/BUILD.bazel | 39 + pkg/rest/error.go | 57 + pkg/rest/handler.go | 21 + pkg/rest/log.go | 86 + pkg/rest/log_test.go | 64 + pkg/rest/metrics.go | 80 + pkg/rest/middleware.go | 69 + pkg/rest/middleware_test.go | 109 + pkg/rest/response.go | 70 + pkg/rest/server.go | 134 + pkg/rest/server_test.go | 98 + pkg/rest/testdata/server.crt | 20 + pkg/rest/testdata/server.key | 28 + pkg/rest/testdata/service.config | 16 + pkg/role/BUILD.bazel | 31 + pkg/role/role.go | 77 + pkg/role/role_test.go | 137 + pkg/rpc/BUILD.bazel | 58 + pkg/rpc/auth.go | 69 + pkg/rpc/client/BUILD.bazel | 40 + pkg/rpc/client/client.go | 125 + pkg/rpc/client/credentials.go | 48 + pkg/rpc/client/interceptor.go | 49 + pkg/rpc/client/interceptor_test.go | 57 + pkg/rpc/client/log.go | 78 + pkg/rpc/client/metrics.go | 81 + pkg/rpc/client/request_id.go | 42 + pkg/rpc/interceptor.go | 54 + pkg/rpc/interceptor_test.go | 60 + pkg/rpc/log.go | 103 + pkg/rpc/log_test.go | 52 + pkg/rpc/metadata/BUILD.bazel | 22 + pkg/rpc/metadata/request_id.go | 61 + pkg/rpc/metadata/request_id_test.go | 105 + pkg/rpc/metrics.go | 95 + pkg/rpc/server.go | 193 + pkg/rpc/server_test.go | 236 + pkg/rpc/service.go | 21 + pkg/rpc/status/BUILD.bazel | 12 + pkg/rpc/status/status.go | 28 + pkg/rpc/testdata/server.crt | 20 + pkg/rpc/testdata/server.key | 28 + pkg/rpc/testdata/service.config | 16 + pkg/storage/BUILD.bazel | 8 + pkg/storage/druid/BUILD.bazel | 14 + pkg/storage/druid/broker_client.go | 67 + pkg/storage/druid/coordinator_client.go | 43 + pkg/storage/druid/druid.go | 23 + pkg/storage/druid/overlord_client.go | 43 + pkg/storage/kafka/BUILD.bazel | 17 + pkg/storage/kafka/cluster_admin.go | 61 + pkg/storage/kafka/kafka.go | 23 + pkg/storage/kafka/producer.go | 59 + pkg/storage/kafka/scram_client.go | 50 + pkg/storage/mock/BUILD.bazel | 12 + pkg/storage/mock/storage.go | 837 + pkg/storage/storage.go | 186 + pkg/storage/testing/BUILD.bazel | 15 + pkg/storage/testing/file.go | 52 + pkg/storage/testing/storage.go | 173 + pkg/storage/v2/bigtable/BUILD.bazel | 33 + pkg/storage/v2/bigtable/client.go | 248 + pkg/storage/v2/bigtable/metrics.go | 94 + pkg/storage/v2/bigtable/mock/BUILD.bazel | 12 + pkg/storage/v2/bigtable/mock/client.go | 249 + pkg/storage/v2/bigtable/request.go | 102 + pkg/storage/v2/bigtable/request_test.go | 46 + pkg/storage/v2/bigtable/rows.go | 87 + pkg/storage/v2/bigtable/rows_test.go | 141 + pkg/storage/v2/mysql/BUILD.bazel | 40 + pkg/storage/v2/mysql/client.go | 191 + pkg/storage/v2/mysql/error.go | 43 + pkg/storage/v2/mysql/error_test.go | 51 + pkg/storage/v2/mysql/json.go | 42 + pkg/storage/v2/mysql/jsonpb.go | 55 + pkg/storage/v2/mysql/metrics.go | 86 + pkg/storage/v2/mysql/mock/BUILD.bazel | 17 + pkg/storage/v2/mysql/mock/client.go | 326 + pkg/storage/v2/mysql/mock/query.go | 49 + pkg/storage/v2/mysql/mock/result.go | 202 + pkg/storage/v2/mysql/mock/transaction.go | 124 + pkg/storage/v2/mysql/query.go | 283 + pkg/storage/v2/mysql/query_test.go | 282 + pkg/storage/v2/mysql/result.go | 63 + pkg/storage/v2/mysql/transaction.go | 75 + pkg/storage/v2/postgres/BUILD.bazel | 28 + pkg/storage/v2/postgres/client.go | 141 + pkg/storage/v2/postgres/error.go | 40 + pkg/storage/v2/postgres/error_test.go | 50 + pkg/storage/v2/postgres/json.go | 42 + pkg/storage/v2/postgres/result.go | 27 + pkg/token/BUILD.bazel | 36 + pkg/token/idtoken.go | 57 + pkg/token/idtoken_test.go | 60 + pkg/token/signer.go | 86 + pkg/token/signer_test.go | 102 + pkg/token/testdata/invalid-private.pem | 1 + pkg/token/testdata/invalid-public.pem | 1 + pkg/token/testdata/valid-private.pem | 51 + pkg/token/testdata/valid-public.pem | 14 + pkg/token/verifier.go | 104 + pkg/token/verifier_test.go | 105 + pkg/trace/BUILD.bazel | 23 + pkg/trace/trace.go | 70 + pkg/trace/trace_test.go | 81 + pkg/user/api/BUILD.bazel | 50 + pkg/user/api/api.go | 123 + pkg/user/api/error.go | 106 + pkg/user/api/user.go | 157 + pkg/user/api/user_test.go | 151 + pkg/user/client/BUILD.bazel | 13 + pkg/user/client/client.go | 50 + pkg/user/client/mock/BUILD.bazel | 13 + pkg/user/client/mock/client.go | 90 + pkg/user/cmd/persister/BUILD.bazel | 22 + pkg/user/cmd/persister/persister.go | 211 + pkg/user/cmd/server/BUILD.bazel | 21 + pkg/user/cmd/server/server.go | 155 + pkg/user/domain/BUILD.bazel | 20 + pkg/user/domain/user.go | 66 + pkg/user/domain/user_test.go | 203 + pkg/user/persister/BUILD.bazel | 48 + pkg/user/persister/metrics.go | 55 + pkg/user/persister/persister.go | 338 + pkg/user/persister/persister_test.go | 266 + pkg/user/storage/v2/BUILD.bazel | 24 + pkg/user/storage/v2/mock/BUILD.bazel | 14 + pkg/user/storage/v2/mock/user.go | 99 + pkg/user/storage/v2/user.go | 214 + pkg/user/storage/v2/user_test.go | 32 + pkg/uuid/BUILD.bazel | 18 + pkg/uuid/uuid.go | 62 + pkg/uuid/uuid_test.go | 56 + proto/.clang-format | 1 + proto/BUILD.bazel | 0 proto/Makefile | 46 + proto/account/BUILD.bazel | 46 + proto/account/account.proto | 44 + proto/account/api_key.proto | 37 + proto/account/command.proto | 59 + proto/account/service.proto | 281 + proto/auditlog/BUILD.bazel | 51 + proto/auditlog/auditlog.proto | 35 + proto/auditlog/service.proto | 107 + proto/auth/BUILD.bazel | 35 + proto/auth/service.proto | 53 + proto/auth/token.proto | 31 + proto/autoops/BUILD.bazel | 43 + proto/autoops/auto_ops_rule.proto | 36 + proto/autoops/clause.proto | 60 + proto/autoops/command.proto | 83 + proto/autoops/ops_count.proto | 28 + proto/autoops/service.proto | 180 + proto/autoops/webhook.proto | 26 + proto/environment/BUILD.bazel | 41 + proto/environment/command.proto | 57 + proto/environment/environment.proto | 29 + proto/environment/project.proto | 28 + proto/environment/service.proto | 183 + proto/event/client/BUILD.bazel | 33 + proto/event/client/event.proto | 113 + proto/event/domain/BUILD.bazel | 40 + proto/event/domain/event.proto | 876 + proto/event/domain/localized_message.proto | 26 + proto/event/service/BUILD.bazel | 39 + proto/event/service/feature.proto | 30 + proto/event/service/segment.proto | 30 + proto/event/service/user.proto | 33 + proto/eventcounter/BUILD.bazel | 49 + proto/eventcounter/distribution_summary.proto | 32 + proto/eventcounter/evaluation_count.proto | 29 + proto/eventcounter/experiment_count.proto | 37 + proto/eventcounter/experiment_result.proto | 27 + proto/eventcounter/filter.proto | 25 + proto/eventcounter/goal_result.proto | 25 + proto/eventcounter/histogram.proto | 23 + proto/eventcounter/service.proto | 159 + proto/eventcounter/table.proto | 32 + proto/eventcounter/timeseries.proto | 28 + proto/eventcounter/variation_count.proto | 29 + proto/eventcounter/variation_result.proto | 47 + proto/experiment/BUILD.bazel | 46 + proto/experiment/command.proto | 70 + proto/experiment/experiment.proto | 53 + proto/experiment/goal.proto | 29 + proto/experiment/service.proto | 223 + .../google/rpc/BUILD.bazel | 31 + .../google/rpc/code.proto | 200 + .../google/rpc/error_details.proto | 214 + .../google/rpc/status.proto | 106 + .../v3.18.1/google/protobuf/BUILD.bazel | 40 + .../v3.18.1/google/protobuf/any.proto | 172 + .../v3.18.1/google/protobuf/api.proto | 222 + .../v3.18.1/google/protobuf/descriptor.proto | 925 + .../v3.18.1/google/protobuf/duration.proto | 130 + .../v3.18.1/google/protobuf/empty.proto | 66 + .../v3.18.1/google/protobuf/field_mask.proto | 259 + .../google/protobuf/source_context.proto | 62 + .../v3.18.1/google/protobuf/struct.proto | 109 + .../v3.18.1/google/protobuf/timestamp.proto | 161 + .../v3.18.1/google/protobuf/type.proto | 201 + .../v3.18.1/google/protobuf/wrappers.proto | 137 + proto/feature/BUILD.bazel | 57 + proto/feature/clause.proto | 38 + proto/feature/command.proto | 229 + proto/feature/evaluation.proto | 43 + proto/feature/feature.proto | 66 + proto/feature/feature_last_used_info.proto | 27 + proto/feature/prerequisite.proto | 23 + proto/feature/reason.proto | 31 + proto/feature/rule.proto | 27 + proto/feature/segment.proto | 58 + proto/feature/service.proto | 407 + proto/feature/strategy.proto | 40 + proto/feature/target.proto | 23 + proto/feature/variation.proto | 26 + proto/gateway/BUILD.bazel | 79 + proto/gateway/service.proto | 94 + proto/migration/BUILD.bazel | 32 + proto/migration/mysql_service.proto | 35 + proto/notification/BUILD.bazel | 41 + proto/notification/command.proto | 69 + proto/notification/recipient.proto | 28 + proto/notification/sender/BUILD.bazel | 38 + proto/notification/sender/notification.proto | 64 + .../sender/notification_event.proto | 29 + proto/notification/service.proto | 228 + proto/notification/subscription.proto | 49 + proto/proto.lock | 14208 ++++++++++++++ proto/proto_descriptor.bzl | 30 + proto/push/BUILD.bazel | 40 + proto/push/command.proto | 38 + proto/push/push.proto | 28 + proto/push/service.proto | 78 + proto/test/BUILD.bazel | 24 + proto/test/service.proto | 30 + proto/user/BUILD.bazel | 35 + proto/user/service.proto | 59 + proto/user/user.proto | 29 + python/.gitignore | 5 + python/Dockerfile | 25 + python/Makefile | 48 + python/README.md | 63 + python/poetry.lock | 1682 ++ python/poetry.toml | 3 + python/pyproject.toml | 38 + python/requirements-dev.txt | 71 + python/requirements.txt | 49 + python/src/cmd/calculator/main.py | 122 + .../src/lib/calculator/domain/experiment.py | 45 + .../calculator/domain/experiment_result.py | 20 + .../calculator/job/calculate_experiments.py | 543 + python/src/lib/calculator/job/metrics.py | 27 + python/src/lib/calculator/stats/binomial.py | 174 + python/src/lib/calculator/stats/metrics.py | 47 + .../calculator/stats/normal_inverse_gamma.py | 147 + .../storage/mysql_experiment_result.py | 57 + python/src/lib/environment/stub/stub.py | 15 + python/src/lib/eventcounter/stub/stub.py | 15 + python/src/lib/experiment/stub/stub.py | 15 + python/src/lib/health/health.py | 46 + python/src/lib/log/formatter.py | 7 + python/src/lib/log/logger.py | 31 + python/src/lib/metrics/server.py | 18 + python/src/lib/rpc/rpc.py | 36 + python/src/lib/schedule/job.py | 29 + python/src/lib/schedule/metrics.py | 31 + python/src/lib/schedule/scheduler.py | 72 + python/src/lib/signal/signal_handler.py | 34 + python/src/lib/storage/mysql/client.py | 26 + .../domain/experiment_result_test.py | 26 + .../lib/calculator/domain/experiment_test.py | 162 + .../job/calculate_experiments_test.py | 958 + .../lib/calculator/stats/binomial_test.py | 71 + .../stats/normal_inverse_gamma_test.py | 105 + remove.sh | 7 + renovate.json | 3 + repositories.bzl | 1655 ++ static-files/img/bucketeer-dashboard.png | Bin 0 -> 223095 bytes test/e2e/autoops/BUILD.bazel | 26 + test/e2e/autoops/auto_ops_test.go | 1183 ++ test/e2e/environment/BUILD.bazel | 14 + test/e2e/environment/environment_test.go | 172 + test/e2e/environment/project_test.go | 85 + test/e2e/eventcounter/BUILD.bazel | 24 + test/e2e/eventcounter/eventcounter_test.go | 2102 +++ test/e2e/experiment/BUILD.bazel | 16 + test/e2e/experiment/experiment_test.go | 775 + test/e2e/feature/BUILD.bazel | 33 + .../feature/feature_last_used_info_test.go | 187 + test/e2e/feature/feature_test.go | 1823 ++ test/e2e/feature/segment_test.go | 219 + test/e2e/feature/segment_user_test.go | 296 + test/e2e/feature/tag_test.go | 111 + test/e2e/feature/user_evaluations_test.go | 86 + test/e2e/gateway/BUILD.bazel | 27 + test/e2e/gateway/api_grpc_test.go | 444 + test/e2e/gateway/api_test.go | 198 + test/e2e/gateway/testdata/invalid-apikey | 1 + test/e2e/notification/BUILD.bazel | 18 + .../notification/admin_subscription_test.go | 293 + test/e2e/notification/subscription_test.go | 426 + test/e2e/push/BUILD.bazel | 15 + test/e2e/push/push_test.go | 233 + test/e2e/user/BUILD.bazel | 20 + test/e2e/user/user_test.go | 444 + test/e2e/util/BUILD.bazel | 14 + test/e2e/util/rest.go | 183 + test/util/BUILD.bazel | 16 + test/util/command.go | 25 + test/util/sort.go | 44 + tools/build/status.sh | 4 + tools/bzl/nodejs/BUILD.bazel | 0 tools/bzl/nodejs/defs.bzl | 3 + tools/bzl/nodejs/protobufjs/BUILD.bazel | 0 .../nodejs/protobufjs/ts_proto_library.bzl | 41 + tools/gen/gen.sh | 42 + tools/runner/Dockerfile | 100 + ui/web-v2/.editorconfig | 13 + ui/web-v2/.eslintignore | 1 + ui/web-v2/.eslintrc.json | 38 + ui/web-v2/.gitignore | 48 + ui/web-v2/.prettierignore | 4 + ui/web-v2/.prettierrc | 3 + ui/web-v2/BUILD.bazel | 31 + ui/web-v2/Dockerfile | 12 + ui/web-v2/Makefile | 43 + ui/web-v2/README.md | 57 + ui/web-v2/apps/.gitkeep | 0 ui/web-v2/apps/admin/.babelrc | 4 + ui/web-v2/apps/admin/.browserslistrc | 16 + ui/web-v2/apps/admin/.eslintrc.json | 55 + ui/web-v2/apps/admin/babel-jest.config.json | 14 + ui/web-v2/apps/admin/certs/.gitkeep | 0 ui/web-v2/apps/admin/jest.config.js | 13 + ui/web-v2/apps/admin/src/assets/.gitkeep | 0 ui/web-v2/apps/admin/src/assets/lang/en.json | 455 + ui/web-v2/apps/admin/src/assets/lang/ja.json | 456 + ui/web-v2/apps/admin/src/assets/logo.png | Bin 0 -> 10804 bytes .../src/components/APIKeyAddForm/index.tsx | 97 + .../admin/src/components/APIKeyList/index.tsx | 182 + .../src/components/APIKeySearch/index.tsx | 191 + .../src/components/APIKeyUpdateForm/index.tsx | 102 + .../src/components/AccountAddForm/index.tsx | 150 + .../src/components/AccountList/index.tsx | 179 + .../src/components/AccountSearch/index.tsx | 238 + .../components/AccountUpdateForm/index.tsx | 154 + .../admin/src/components/ActionMenu/index.tsx | 103 + .../components/AdminAccountAddForm/index.tsx | 100 + .../src/components/AdminAccountList/index.tsx | 161 + .../components/AdminAccountSearch/index.tsx | 196 + .../AdminNotificationAddForm/index.tsx | 147 + .../AdminNotificationList/index.tsx | 185 + .../AdminNotificationUpdateForm/index.tsx | 161 + .../src/components/AnalysisForm/index.tsx | 311 + .../src/components/AnalysisTable/index.tsx | 241 + .../src/components/AuditLogList/index.tsx | 142 + .../src/components/AuditLogSearch/index.tsx | 98 + .../src/components/Breadcrumbs/index.tsx | 57 + .../admin/src/components/CheckBox/index.tsx | 29 + .../src/components/CheckBoxList/index.tsx | 81 + .../src/components/ConfirmDialog/index.tsx | 59 + .../admin/src/components/CopyChip/index.tsx | 49 + .../components/CountResultBarChart/index.tsx | 57 + .../components/CountResultPieChart/index.tsx | 43 + .../src/components/CreatableSelect/index.tsx | 88 + .../src/components/DatetimePicker/index.tsx | 38 + .../src/components/DetailSkeleton/index.tsx | 19 + .../components/EnvironmentAddForm/index.tsx | 169 + .../src/components/EnvironmentList/index.tsx | 135 + .../components/EnvironmentSearch/index.tsx | 195 + .../components/EnvironmentSelect/index.tsx | 141 + .../EnvironmentUpdateForm/index.tsx | 134 + .../components/ExperimentAddForm/index.tsx | 358 + .../src/components/ExperimentList/index.tsx | 231 + .../ExperimentResultDetail/index.tsx | 129 + .../src/components/ExperimentSearch/index.tsx | 277 + .../components/ExperimentUpdateForm/index.tsx | 139 + .../src/components/FeatureAddForm/index.tsx | 238 + .../FeatureAutoOpsRulesForm/index.tsx | 679 + .../src/components/FeatureCloneForm/index.tsx | 156 + .../components/FeatureConfirmDialog/index.tsx | 119 + .../components/FeatureEvaluation/index.tsx | 92 + .../src/components/FeatureHeader/index.tsx | 115 + .../src/components/FeatureIdChip/index.tsx | 34 + .../src/components/FeatureList/index.tsx | 623 + .../components/FeatureSettingsForm/index.tsx | 122 + .../components/FeatureTargetingForm/index.tsx | 900 + .../FeatureVariationsForm/index.tsx | 93 + .../admin/src/components/FilterChip/index.tsx | 30 + .../src/components/FilterPopover/index.tsx | 181 + .../FilterRemoveAllButton/index.tsx | 33 + .../src/components/GoalAddForm/index.tsx | 141 + .../admin/src/components/GoalList/index.tsx | 406 + .../ConversionRateDetail/index.tsx | 46 + .../ConversionRateDistributionChart/index.tsx | 42 + .../index.tsx | 43 + .../ConversionRateTable/index.tsx | 139 + .../ConversionRateTimeseriesChart/index.tsx | 34 + .../EvaluationUserTimeseriesChart/index.tsx | 36 + .../GoalResultTable/index.tsx | 134 + .../GoalTotalTimeseriesChart/index.tsx | 37 + .../GoalUserTimeseriesChart/index.tsx | 36 + .../GoalResultDetail/Table/index.tsx | 85 + .../ValuePerUserDetail/index.tsx | 39 + .../index.tsx | 46 + .../ValuePerUserTable/index.tsx | 140 + .../ValuePerUserTimeseriesChart/index.tsx | 35 + .../ValueTotalTimeseriesChart/index.tsx | 36 + .../src/components/GoalResultDetail/index.tsx | 264 + .../src/components/GoalUpdateForm/index.tsx | 148 + .../admin/src/components/Header/index.tsx | 18 + .../src/components/HelpTextTooltip/index.tsx | 53 + .../src/components/HistogramChart/index.tsx | 64 + .../src/components/HoverPopover/index.tsx | 66 + .../src/components/ListSkeleton/index.tsx | 21 + .../admin/src/components/ListTab/index.tsx | 40 + .../apps/admin/src/components/Modal/index.tsx | 71 + .../admin/src/components/NotFound/index.tsx | 9 + .../components/NotificationAddForm/index.tsx | 147 + .../src/components/NotificationList/index.tsx | 185 + .../components/NotificationSearch/index.tsx | 182 + .../NotificationUpdateForm/index.tsx | 163 + .../admin/src/components/Overlay/index.tsx | 59 + .../admin/src/components/Pagination/index.tsx | 99 + .../src/components/ProjectAddForm/index.tsx | 108 + .../src/components/ProjectList/index.tsx | 205 + .../src/components/ProjectSearch/index.tsx | 188 + .../components/ProjectUpdateForm/index.tsx | 115 + .../src/components/PushAddForm/index.tsx | 142 + .../admin/src/components/PushList/index.tsx | 170 + .../admin/src/components/PushSearch/index.tsx | 102 + .../src/components/PushUpdateForm/index.tsx | 145 + .../src/components/RelativeDateText/index.tsx | 35 + .../src/components/SearchInput/index.tsx | 42 + .../src/components/SegmentAddForm/index.tsx | 214 + .../components/SegmentDeleteDialog/index.tsx | 68 + .../src/components/SegmentList/index.tsx | 228 + .../src/components/SegmentSearch/index.tsx | 190 + .../components/SegmentUpdateForm/index.tsx | 261 + .../admin/src/components/Select/index.tsx | 108 + .../admin/src/components/SideMenu/index.tsx | 271 + .../admin/src/components/SortSelect/index.tsx | 109 + .../admin/src/components/Switch/index.tsx | 65 + .../admin/src/components/TagsChips/index.tsx | 37 + .../TimeseriesAreaLineChart/index.tsx | 116 + .../components/TimeseriesLineChart/index.tsx | 62 + .../TimeseriesStackedLineChart/index.tsx | 56 + .../admin/src/components/Toasts/index.tsx | 132 + .../src/components/VariationInput/index.tsx | 318 + ui/web-v2/apps/admin/src/config/index.ts | 12 + ui/web-v2/apps/admin/src/constants/account.ts | 2 + .../admin/src/constants/adminNotification.ts | 35 + .../apps/admin/src/constants/analysis.ts | 3 + ui/web-v2/apps/admin/src/constants/apiKey.ts | 3 + .../apps/admin/src/constants/auditLog.ts | 1 + ui/web-v2/apps/admin/src/constants/autoops.ts | 1 + .../apps/admin/src/constants/colorPattern.ts | 21 + .../apps/admin/src/constants/environment.ts | 2 + .../apps/admin/src/constants/experiment.ts | 6 + ui/web-v2/apps/admin/src/constants/feature.ts | 10 + ui/web-v2/apps/admin/src/constants/goal.ts | 4 + .../apps/admin/src/constants/notification.ts | 77 + ui/web-v2/apps/admin/src/constants/project.ts | 2 + ui/web-v2/apps/admin/src/constants/push.ts | 4 + ui/web-v2/apps/admin/src/constants/routing.ts | 31 + ui/web-v2/apps/admin/src/constants/segment.ts | 5 + .../apps/admin/src/constants/variation.ts | 4 + ui/web-v2/apps/admin/src/cookie/index.ts | 16 + .../src/environments/environment.prod.ts | 3 + .../admin/src/environments/environment.ts | 6 + ui/web-v2/apps/admin/src/favicon.ico | Bin 0 -> 738 bytes ui/web-v2/apps/admin/src/grpc/account.ts | 238 + .../apps/admin/src/grpc/adminSubscription.ts | 278 + ui/web-v2/apps/admin/src/grpc/adminaccount.ts | 199 + ui/web-v2/apps/admin/src/grpc/apikey.ts | 186 + ui/web-v2/apps/admin/src/grpc/auditLog.ts | 125 + ui/web-v2/apps/admin/src/grpc/auth.ts | 109 + ui/web-v2/apps/admin/src/grpc/autoops.ts | 192 + ui/web-v2/apps/admin/src/grpc/environment.ts | 195 + ui/web-v2/apps/admin/src/grpc/eventcounter.ts | 184 + ui/web-v2/apps/admin/src/grpc/experiment.ts | 394 + ui/web-v2/apps/admin/src/grpc/features.ts | 369 + ui/web-v2/apps/admin/src/grpc/messages.ts | 78 + ui/web-v2/apps/admin/src/grpc/project.ts | 217 + ui/web-v2/apps/admin/src/grpc/push.ts | 134 + ui/web-v2/apps/admin/src/grpc/segments.ts | 319 + ui/web-v2/apps/admin/src/grpc/subscription.ts | 230 + ui/web-v2/apps/admin/src/grpc/utils.ts | 31 + ui/web-v2/apps/admin/src/history/index.ts | 3 + ui/web-v2/apps/admin/src/index.html | 14 + ui/web-v2/apps/admin/src/interfaces/grpc.ts | 6 + ui/web-v2/apps/admin/src/lang/index.ts | 15 + ui/web-v2/apps/admin/src/lang/messages.ts | 2306 +++ ui/web-v2/apps/admin/src/lang/yup/jp.ts | 43 + ui/web-v2/apps/admin/src/main.tsx | 25 + .../src/middlewares/thunkErrorHandler.ts | 39 + ui/web-v2/apps/admin/src/modules/accounts.ts | 230 + .../apps/admin/src/modules/adminAccounts.ts | 186 + .../admin/src/modules/adminNotifications.ts | 275 + ui/web-v2/apps/admin/src/modules/apiKeys.ts | 221 + ui/web-v2/apps/admin/src/modules/auditLogs.ts | 190 + ui/web-v2/apps/admin/src/modules/auth.ts | 169 + .../apps/admin/src/modules/autoOpsRules.ts | 180 + .../apps/admin/src/modules/environments.ts | 170 + .../src/modules/evaluationTimeseriesCount.ts | 67 + .../admin/src/modules/experimentResult.ts | 79 + .../apps/admin/src/modules/experiments.ts | 270 + ui/web-v2/apps/admin/src/modules/features.ts | 447 + .../apps/admin/src/modules/goalCounts.ts | 96 + ui/web-v2/apps/admin/src/modules/goals.ts | 237 + ui/web-v2/apps/admin/src/modules/index.ts | 54 + ui/web-v2/apps/admin/src/modules/me.ts | 120 + .../apps/admin/src/modules/notifications.ts | 255 + ui/web-v2/apps/admin/src/modules/projects.ts | 227 + ui/web-v2/apps/admin/src/modules/pushes.ts | 186 + ui/web-v2/apps/admin/src/modules/segments.ts | 281 + ui/web-v2/apps/admin/src/modules/toasts.ts | 45 + .../apps/admin/src/modules/userMetadata.ts | 63 + .../admin/src/pages/account/formSchema.ts | 25 + .../apps/admin/src/pages/account/index.tsx | 383 + .../src/pages/admin/account/formSchema.ts | 18 + .../admin/src/pages/admin/account/index.tsx | 325 + .../admin/src/pages/admin/auditLog/index.tsx | 146 + .../src/pages/admin/environment/formSchema.ts | 28 + .../src/pages/admin/environment/index.tsx | 292 + .../apps/admin/src/pages/admin/index.tsx | 144 + .../pages/admin/notification/formSchema.ts | 32 + .../src/pages/admin/notification/index.tsx | 425 + .../src/pages/admin/projects/formSchema.ts | 27 + .../admin/src/pages/admin/projects/index.tsx | 417 + .../admin/src/pages/analysis/formSchema.ts | 48 + .../apps/admin/src/pages/analysis/index.tsx | 90 + .../apps/admin/src/pages/apiKey/formSchema.ts | 21 + .../apps/admin/src/pages/apiKey/index.tsx | 384 + .../apps/admin/src/pages/auditLog/index.tsx | 129 + ui/web-v2/apps/admin/src/pages/auth/index.tsx | 31 + .../admin/src/pages/experiment/formSchema.ts | 71 + .../apps/admin/src/pages/experiment/index.tsx | 416 + .../apps/admin/src/pages/feature/autoops.tsx | 522 + .../apps/admin/src/pages/feature/detail.tsx | 181 + .../admin/src/pages/feature/evaluation.tsx | 42 + .../admin/src/pages/feature/experiments.tsx | 337 + .../admin/src/pages/feature/formSchema.ts | 320 + .../apps/admin/src/pages/feature/history.tsx | 126 + .../apps/admin/src/pages/feature/index.tsx | 526 + .../apps/admin/src/pages/feature/settings.tsx | 141 + .../admin/src/pages/feature/targeting.tsx | 633 + .../admin/src/pages/feature/variations.tsx | 294 + .../apps/admin/src/pages/goal/formSchema.ts | 25 + ui/web-v2/apps/admin/src/pages/goal/index.tsx | 368 + ui/web-v2/apps/admin/src/pages/index.tsx | 245 + .../src/pages/notification/formSchema.ts | 32 + .../admin/src/pages/notification/index.tsx | 434 + .../apps/admin/src/pages/push/formSchema.ts | 29 + ui/web-v2/apps/admin/src/pages/push/index.tsx | 350 + .../admin/src/pages/segment/formSchema.ts | 52 + .../apps/admin/src/pages/segment/index.tsx | 469 + .../apps/admin/src/pages/settings/index.tsx | 107 + ui/web-v2/apps/admin/src/polyfills.ts | 9 + ui/web-v2/apps/admin/src/postcss.config.js | 6 + .../apps/admin/src/storage/environment.ts | 39 + ui/web-v2/apps/admin/src/storage/token.ts | 42 + ui/web-v2/apps/admin/src/store/index.ts | 12 + ui/web-v2/apps/admin/src/styles/styles.css | 89 + ui/web-v2/apps/admin/src/types/account.ts | 27 + .../apps/admin/src/types/adminAccount.ts | 28 + .../apps/admin/src/types/adminNotification.ts | 29 + ui/web-v2/apps/admin/src/types/apiKey.ts | 26 + ui/web-v2/apps/admin/src/types/auditLog.ts | 22 + ui/web-v2/apps/admin/src/types/environment.ts | 28 + ui/web-v2/apps/admin/src/types/experiment.ts | 29 + ui/web-v2/apps/admin/src/types/feature.ts | 27 + ui/web-v2/apps/admin/src/types/goal.ts | 26 + ui/web-v2/apps/admin/src/types/list.ts | 8 + .../apps/admin/src/types/notification.ts | 28 + ui/web-v2/apps/admin/src/types/project.ts | 26 + ui/web-v2/apps/admin/src/types/push.ts | 25 + ui/web-v2/apps/admin/src/types/segment.ts | 26 + ui/web-v2/apps/admin/src/utils/css.ts | 3 + ui/web-v2/apps/admin/src/utils/date.ts | 5 + .../apps/admin/src/utils/search-params.ts | 14 + ui/web-v2/apps/admin/src/utils/validate.ts | 8 + ui/web-v2/apps/admin/tailwind.config.js | 26 + ui/web-v2/apps/admin/tsconfig.app.json | 25 + ui/web-v2/apps/admin/tsconfig.json | 21 + ui/web-v2/apps/admin/tsconfig.spec.json | 20 + ui/web-v2/apps/admin/webpack-config.js | 88 + ui/web-v2/babel.config.json | 4 + ui/web-v2/jest.config.js | 3 + ui/web-v2/jest.preset.js | 3 + ui/web-v2/libs/.gitkeep | 0 ui/web-v2/nx.json | 30 + ui/web-v2/package.json | 138 + ui/web-v2/tools/generators/.gitkeep | 0 ui/web-v2/tools/tsconfig.tools.json | 12 + ui/web-v2/tsconfig.base.json | 21 + ui/web-v2/workspace.json | 106 + ui/web-v2/yarn.lock | 15701 ++++++++++++++++ 1712 files changed, 251614 insertions(+), 1 deletion(-) create mode 100644 .bazelrc create mode 100644 .bazelversion create mode 100644 .github/workflows/pr-title-validation.yaml create mode 100644 .github/workflows/publish_chart.yaml create mode 100644 .github/workflows/release.yaml create mode 100644 .gitignore create mode 100644 .golangci.yml create mode 100644 BUILD.bazel create mode 100644 BUILD.googleapis create mode 100644 CHANGELOG.md create mode 100644 CLA.md create mode 100644 CONTRIBUTING.md create mode 100644 DEPLOYMENT.md create mode 100644 LICENSE create mode 100644 Makefile create mode 100644 WORKSPACE create mode 100644 cmd/account/BUILD.bazel create mode 100644 cmd/account/account.go create mode 100644 cmd/auditlog/BUILD.bazel create mode 100644 cmd/auditlog/auditlog.go create mode 100644 cmd/auth/BUILD.bazel create mode 100644 cmd/auth/auth.go create mode 100644 cmd/autoops/BUILD.bazel create mode 100644 cmd/autoops/autoops.go create mode 100644 cmd/environment/BUILD.bazel create mode 100644 cmd/environment/environment.go create mode 100644 cmd/eventcounter/BUILD.bazel create mode 100644 cmd/eventcounter/eventcounter.go create mode 100644 cmd/eventpersister/BUILD.bazel create mode 100644 cmd/eventpersister/eventpersister.go create mode 100644 cmd/experiment/BUILD.bazel create mode 100644 cmd/experiment/experiment.go create mode 100644 cmd/feature/BUILD.bazel create mode 100644 cmd/feature/feature.go create mode 100644 cmd/gateway/BUILD.bazel create mode 100644 cmd/gateway/gateway.go create mode 100644 cmd/goalbatch/BUILD.bazel create mode 100644 cmd/goalbatch/goalbatch.go create mode 100644 cmd/metricsevent/BUILD.bazel create mode 100644 cmd/metricsevent/metricsevent.go create mode 100644 cmd/migration/BUILD.bazel create mode 100644 cmd/migration/migration.go create mode 100644 cmd/notification/BUILD.bazel create mode 100644 cmd/notification/notification.go create mode 100644 cmd/opsevent/BUILD.bazel create mode 100644 cmd/opsevent/opsevent.go create mode 100644 cmd/push/BUILD.bazel create mode 100644 cmd/push/push.go create mode 100644 cmd/user/BUILD.bazel create mode 100644 cmd/user/user.go create mode 100644 go.mod create mode 100644 go.sum create mode 100644 hack/create-account/BUILD.bazel create mode 100644 hack/create-account/README.md create mode 100644 hack/create-account/command.go create mode 100644 hack/create-account/main.go create mode 100644 hack/create-api-key/BUILD.bazel create mode 100644 hack/create-api-key/README.md create mode 100644 hack/create-api-key/command.go create mode 100644 hack/create-api-key/main.go create mode 100644 hack/create-environment/BUILD.bazel create mode 100644 hack/create-environment/README.md create mode 100644 hack/create-environment/command.go create mode 100644 hack/create-environment/main.go create mode 100644 hack/create-project/BUILD.bazel create mode 100644 hack/create-project/README.md create mode 100644 hack/create-project/command.go create mode 100644 hack/create-project/main.go create mode 100644 hack/delete-e2e-data-mysql/BUILD.bazel create mode 100644 hack/delete-e2e-data-mysql/README.md create mode 100644 hack/delete-e2e-data-mysql/command.go create mode 100644 hack/delete-e2e-data-mysql/main.go create mode 100644 hack/delete-environment/BUILD.bazel create mode 100644 hack/delete-environment/README.md create mode 100644 hack/delete-environment/command.go create mode 100644 hack/delete-environment/main.go create mode 100644 hack/generate-service-token/BUILD.bazel create mode 100644 hack/generate-service-token/README.md create mode 100644 hack/generate-service-token/command.go create mode 100644 hack/generate-service-token/main.go create mode 100644 manifests/bucketeer/.helmignore create mode 100644 manifests/bucketeer/Chart.yaml create mode 100644 manifests/bucketeer/charts/account-apikey-cacher/.helmignore create mode 100644 manifests/bucketeer/charts/account-apikey-cacher/Chart.yaml create mode 100644 manifests/bucketeer/charts/account-apikey-cacher/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/account-apikey-cacher/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/account-apikey-cacher/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/account-apikey-cacher/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/account-apikey-cacher/templates/hpa.yaml create mode 100644 manifests/bucketeer/charts/account-apikey-cacher/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/account-apikey-cacher/templates/service-token-secret.yaml create mode 100644 manifests/bucketeer/charts/account-apikey-cacher/templates/service.yaml create mode 100644 manifests/bucketeer/charts/account-apikey-cacher/values.yaml create mode 100644 manifests/bucketeer/charts/account/.helmignore create mode 100644 manifests/bucketeer/charts/account/Chart.yaml create mode 100644 manifests/bucketeer/charts/account/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/account/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/account/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/account/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/account/templates/hpa.yaml create mode 100644 manifests/bucketeer/charts/account/templates/oauth-key-secret.yaml create mode 100644 manifests/bucketeer/charts/account/templates/pdb.yaml create mode 100644 manifests/bucketeer/charts/account/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/account/templates/service-token-secret.yaml create mode 100644 manifests/bucketeer/charts/account/templates/service.yaml create mode 100644 manifests/bucketeer/charts/account/values.yaml create mode 100644 manifests/bucketeer/charts/api-gateway/.helmignore create mode 100644 manifests/bucketeer/charts/api-gateway/Chart.yaml create mode 100644 manifests/bucketeer/charts/api-gateway/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/api-gateway/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/api-gateway/templates/backend-config.yaml create mode 100644 manifests/bucketeer/charts/api-gateway/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/api-gateway/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/api-gateway/templates/hpa.yaml create mode 100644 manifests/bucketeer/charts/api-gateway/templates/ingress.yaml create mode 100644 manifests/bucketeer/charts/api-gateway/templates/pdb.yaml create mode 100644 manifests/bucketeer/charts/api-gateway/templates/secret.yaml create mode 100644 manifests/bucketeer/charts/api-gateway/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/api-gateway/templates/service-token-secret.yaml create mode 100644 manifests/bucketeer/charts/api-gateway/templates/service.yaml create mode 100644 manifests/bucketeer/charts/api-gateway/values.yaml create mode 100644 manifests/bucketeer/charts/auditlog-persister/Chart.yaml create mode 100644 manifests/bucketeer/charts/auditlog-persister/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/auditlog-persister/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/auditlog-persister/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/auditlog-persister/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/auditlog-persister/templates/hpa.yaml create mode 100644 manifests/bucketeer/charts/auditlog-persister/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/auditlog-persister/templates/service.yaml create mode 100644 manifests/bucketeer/charts/auditlog-persister/values.yaml create mode 100644 manifests/bucketeer/charts/auditlog/.helmignore create mode 100644 manifests/bucketeer/charts/auditlog/Chart.yaml create mode 100644 manifests/bucketeer/charts/auditlog/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/auditlog/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/auditlog/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/auditlog/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/auditlog/templates/hpa.yaml create mode 100644 manifests/bucketeer/charts/auditlog/templates/oauth-key-secret.yaml create mode 100644 manifests/bucketeer/charts/auditlog/templates/pdb.yaml create mode 100644 manifests/bucketeer/charts/auditlog/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/auditlog/templates/service-token-secret.yaml create mode 100644 manifests/bucketeer/charts/auditlog/templates/service.yaml create mode 100644 manifests/bucketeer/charts/auditlog/values.yaml create mode 100644 manifests/bucketeer/charts/auth/.helmignore create mode 100644 manifests/bucketeer/charts/auth/Chart.yaml create mode 100644 manifests/bucketeer/charts/auth/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/auth/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/auth/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/auth/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/auth/templates/hpa.yaml create mode 100644 manifests/bucketeer/charts/auth/templates/issuer-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/auth/templates/oauth-key-secret.yaml create mode 100644 manifests/bucketeer/charts/auth/templates/pdb.yaml create mode 100644 manifests/bucketeer/charts/auth/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/auth/templates/service-token-secret.yaml create mode 100644 manifests/bucketeer/charts/auth/templates/service.yaml create mode 100644 manifests/bucketeer/charts/auth/values.yaml create mode 100644 manifests/bucketeer/charts/auto-ops/Chart.yaml create mode 100644 manifests/bucketeer/charts/auto-ops/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/auto-ops/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/auto-ops/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/auto-ops/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/auto-ops/templates/hpa.yaml create mode 100644 manifests/bucketeer/charts/auto-ops/templates/oauth-key-secret.yaml create mode 100644 manifests/bucketeer/charts/auto-ops/templates/pdb.yaml create mode 100644 manifests/bucketeer/charts/auto-ops/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/auto-ops/templates/service-token-secret.yaml create mode 100644 manifests/bucketeer/charts/auto-ops/templates/service.yaml create mode 100644 manifests/bucketeer/charts/auto-ops/values.yaml create mode 100644 manifests/bucketeer/charts/calculator/Chart.yaml create mode 100644 manifests/bucketeer/charts/calculator/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/calculator/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/calculator/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/calculator/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/calculator/templates/hpa.yaml create mode 100644 manifests/bucketeer/charts/calculator/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/calculator/templates/service-token-secret.yaml create mode 100644 manifests/bucketeer/charts/calculator/templates/service.yaml create mode 100644 manifests/bucketeer/charts/calculator/values.yaml create mode 100644 manifests/bucketeer/charts/dex/.helmignore create mode 100644 manifests/bucketeer/charts/dex/Chart.yaml create mode 100644 manifests/bucketeer/charts/dex/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/dex/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/dex/templates/cert-secret.yaml create mode 100644 manifests/bucketeer/charts/dex/templates/configmap.yaml create mode 100644 manifests/bucketeer/charts/dex/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/dex/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/dex/templates/pdb.yaml create mode 100644 manifests/bucketeer/charts/dex/templates/secret.yaml create mode 100644 manifests/bucketeer/charts/dex/templates/service.yaml create mode 100644 manifests/bucketeer/charts/dex/values.yaml create mode 100644 manifests/bucketeer/charts/druid/Chart.yaml create mode 100644 manifests/bucketeer/charts/druid/charts/druid-cluster/.helmignore create mode 100644 manifests/bucketeer/charts/druid/charts/druid-cluster/Chart.yaml create mode 100644 manifests/bucketeer/charts/druid/charts/druid-cluster/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/druid/charts/druid-cluster/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/druid/charts/druid-cluster/templates/druid.yaml create mode 100644 manifests/bucketeer/charts/druid/charts/druid-cluster/values.yaml create mode 100644 manifests/bucketeer/charts/druid/charts/druid-operator/.helmignore create mode 100644 manifests/bucketeer/charts/druid/charts/druid-operator/Chart.yaml create mode 100644 manifests/bucketeer/charts/druid/charts/druid-operator/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/druid/charts/druid-operator/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/druid/charts/druid-operator/templates/druid.apache.org_druids_crd.yaml create mode 100644 manifests/bucketeer/charts/druid/charts/druid-operator/templates/operator.yaml create mode 100644 manifests/bucketeer/charts/druid/charts/druid-operator/templates/role.yaml create mode 100644 manifests/bucketeer/charts/druid/charts/druid-operator/templates/role_binding.yaml create mode 100644 manifests/bucketeer/charts/druid/charts/druid-operator/templates/service_account.yaml create mode 100644 manifests/bucketeer/charts/druid/charts/druid-operator/values.yaml create mode 100644 manifests/bucketeer/charts/druid/charts/zookeeper-operator/Chart.yaml create mode 100644 manifests/bucketeer/charts/druid/charts/zookeeper-operator/README.md create mode 100644 manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/clusterrole.yaml create mode 100644 manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/clusterrolebinding.yaml create mode 100644 manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/operator.yaml create mode 100644 manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/post-install-upgrade-hooks.yaml create mode 100644 manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/role.yaml create mode 100644 manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/rolebinding.yaml create mode 100644 manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/service_account.yaml create mode 100644 manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/zookeeper.pravega.io_zookeeperclusters_crd.yaml create mode 100644 manifests/bucketeer/charts/druid/charts/zookeeper-operator/values.yaml create mode 100644 manifests/bucketeer/charts/druid/charts/zookeeper/Chart.yaml create mode 100644 manifests/bucketeer/charts/druid/charts/zookeeper/README.md create mode 100644 manifests/bucketeer/charts/druid/charts/zookeeper/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/druid/charts/zookeeper/templates/post-install-upgrade-hooks.yaml create mode 100644 manifests/bucketeer/charts/druid/charts/zookeeper/templates/zookeeper.yaml create mode 100644 manifests/bucketeer/charts/druid/charts/zookeeper/values.yaml create mode 100644 manifests/bucketeer/charts/druid/charts/zookeeper/values/minikube.yaml create mode 100644 manifests/bucketeer/charts/druid/requirements.lock create mode 100644 manifests/bucketeer/charts/druid/requirements.yaml create mode 100644 manifests/bucketeer/charts/druid/values.yaml create mode 100644 manifests/bucketeer/charts/environment/Chart.yaml create mode 100644 manifests/bucketeer/charts/environment/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/environment/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/environment/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/environment/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/environment/templates/hpa.yaml create mode 100644 manifests/bucketeer/charts/environment/templates/oauth-key-secret.yaml create mode 100644 manifests/bucketeer/charts/environment/templates/pdb.yaml create mode 100644 manifests/bucketeer/charts/environment/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/environment/templates/service-token-secret.yaml create mode 100644 manifests/bucketeer/charts/environment/templates/service.yaml create mode 100644 manifests/bucketeer/charts/environment/values.yaml create mode 100644 manifests/bucketeer/charts/event-counter/.helmignore create mode 100644 manifests/bucketeer/charts/event-counter/Chart.yaml create mode 100644 manifests/bucketeer/charts/event-counter/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/event-counter/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/event-counter/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/event-counter/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/event-counter/templates/hpa.yaml create mode 100644 manifests/bucketeer/charts/event-counter/templates/oauth-key-secret.yaml create mode 100644 manifests/bucketeer/charts/event-counter/templates/pdb.yaml create mode 100644 manifests/bucketeer/charts/event-counter/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/event-counter/templates/service-token-secret.yaml create mode 100644 manifests/bucketeer/charts/event-counter/templates/service.yaml create mode 100644 manifests/bucketeer/charts/event-counter/values.yaml create mode 100644 manifests/bucketeer/charts/event-persister-evaluation-events-kafka/.helmignore create mode 100644 manifests/bucketeer/charts/event-persister-evaluation-events-kafka/Chart.yaml create mode 100644 manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/hpa.yaml create mode 100644 manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/service-token-secret.yaml create mode 100644 manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/service.yaml create mode 100644 manifests/bucketeer/charts/event-persister-evaluation-events-kafka/values.yaml create mode 100644 manifests/bucketeer/charts/event-persister-goal-events-kafka/.helmignore create mode 100644 manifests/bucketeer/charts/event-persister-goal-events-kafka/Chart.yaml create mode 100644 manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/hpa.yaml create mode 100644 manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/service-token-secret.yaml create mode 100644 manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/service.yaml create mode 100644 manifests/bucketeer/charts/event-persister-goal-events-kafka/values.yaml create mode 100644 manifests/bucketeer/charts/event-persister-user-events-kafka/.helmignore create mode 100644 manifests/bucketeer/charts/event-persister-user-events-kafka/Chart.yaml create mode 100644 manifests/bucketeer/charts/event-persister-user-events-kafka/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/event-persister-user-events-kafka/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/event-persister-user-events-kafka/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/event-persister-user-events-kafka/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/event-persister-user-events-kafka/templates/hpa.yaml create mode 100644 manifests/bucketeer/charts/event-persister-user-events-kafka/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/event-persister-user-events-kafka/templates/service-token-secret.yaml create mode 100644 manifests/bucketeer/charts/event-persister-user-events-kafka/templates/service.yaml create mode 100644 manifests/bucketeer/charts/event-persister-user-events-kafka/values.yaml create mode 100644 manifests/bucketeer/charts/experiment/Chart.yaml create mode 100644 manifests/bucketeer/charts/experiment/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/experiment/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/experiment/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/experiment/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/experiment/templates/hpa.yaml create mode 100644 manifests/bucketeer/charts/experiment/templates/oauth-key-secret.yaml create mode 100644 manifests/bucketeer/charts/experiment/templates/pdb.yaml create mode 100644 manifests/bucketeer/charts/experiment/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/experiment/templates/service-token-secret.yaml create mode 100644 manifests/bucketeer/charts/experiment/templates/service.yaml create mode 100644 manifests/bucketeer/charts/experiment/values.yaml create mode 100644 manifests/bucketeer/charts/feature-recorder/.helmignore create mode 100644 manifests/bucketeer/charts/feature-recorder/Chart.yaml create mode 100644 manifests/bucketeer/charts/feature-recorder/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/feature-recorder/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/feature-recorder/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/feature-recorder/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/feature-recorder/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/feature-recorder/templates/service-token-secret.yaml create mode 100644 manifests/bucketeer/charts/feature-recorder/templates/service.yaml create mode 100644 manifests/bucketeer/charts/feature-recorder/templates/vpa.yaml create mode 100644 manifests/bucketeer/charts/feature-recorder/values.yaml create mode 100644 manifests/bucketeer/charts/feature-segment-persister/.helmignore create mode 100644 manifests/bucketeer/charts/feature-segment-persister/Chart.yaml create mode 100644 manifests/bucketeer/charts/feature-segment-persister/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/feature-segment-persister/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/feature-segment-persister/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/feature-segment-persister/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/feature-segment-persister/templates/hpa.yaml create mode 100644 manifests/bucketeer/charts/feature-segment-persister/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/feature-segment-persister/templates/service.yaml create mode 100644 manifests/bucketeer/charts/feature-segment-persister/values.yaml create mode 100644 manifests/bucketeer/charts/feature-tag-cacher/.helmignore create mode 100644 manifests/bucketeer/charts/feature-tag-cacher/Chart.yaml create mode 100644 manifests/bucketeer/charts/feature-tag-cacher/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/feature-tag-cacher/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/feature-tag-cacher/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/feature-tag-cacher/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/feature-tag-cacher/templates/hpa.yaml create mode 100644 manifests/bucketeer/charts/feature-tag-cacher/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/feature-tag-cacher/templates/service-token-secret.yaml create mode 100644 manifests/bucketeer/charts/feature-tag-cacher/templates/service.yaml create mode 100644 manifests/bucketeer/charts/feature-tag-cacher/values.yaml create mode 100644 manifests/bucketeer/charts/feature/.helmignore create mode 100644 manifests/bucketeer/charts/feature/Chart.yaml create mode 100644 manifests/bucketeer/charts/feature/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/feature/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/feature/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/feature/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/feature/templates/hpa.yaml create mode 100644 manifests/bucketeer/charts/feature/templates/oauth-key-secret.yaml create mode 100644 manifests/bucketeer/charts/feature/templates/pdb.yaml create mode 100644 manifests/bucketeer/charts/feature/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/feature/templates/service-token-secret.yaml create mode 100644 manifests/bucketeer/charts/feature/templates/service.yaml create mode 100644 manifests/bucketeer/charts/feature/values.yaml create mode 100644 manifests/bucketeer/charts/goal-batch-transformer/.helmignore create mode 100644 manifests/bucketeer/charts/goal-batch-transformer/Chart.yaml create mode 100644 manifests/bucketeer/charts/goal-batch-transformer/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/goal-batch-transformer/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/goal-batch-transformer/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/goal-batch-transformer/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/goal-batch-transformer/templates/hpa.yaml create mode 100644 manifests/bucketeer/charts/goal-batch-transformer/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/goal-batch-transformer/templates/service-token-secret.yaml create mode 100644 manifests/bucketeer/charts/goal-batch-transformer/templates/service.yaml create mode 100644 manifests/bucketeer/charts/goal-batch-transformer/values.yaml create mode 100644 manifests/bucketeer/charts/kafka/Chart.yaml create mode 100644 manifests/bucketeer/charts/kafka/Makefile create mode 100644 manifests/bucketeer/charts/kafka/charts/kafka-cluster/.helmignore create mode 100644 manifests/bucketeer/charts/kafka/charts/kafka-cluster/Chart.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/cluster.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/kafkaconfigmap.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/topic.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/user.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/usersecret.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/zookeeperconfigmap.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/kafka-cluster/values.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/.helmignore create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/Chart.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/OWNERS create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/README.md create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/040-Crd-kafka.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/041-Crd-kafkaconnect.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/042-Crd-strimzipodset.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/043-Crd-kafkatopic.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/044-Crd-kafkauser.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/045-Crd-kafkamirrormaker.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/046-Crd-kafkabridge.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/047-Crd-kafkaconnector.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/048-Crd-kafkamirrormaker2.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/049-Crd-kafkarebalance.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/010-ServiceAccount-strimzi-cluster-operator.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/020-ClusterRole-strimzi-cluster-operator-role.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/020-RoleBinding-strimzi-cluster-operator.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/021-ClusterRole-strimzi-cluster-operator-role.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/021-ClusterRoleBinding-strimzi-cluster-operator.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/030-ClusterRole-strimzi-kafka-broker.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/030-ClusterRoleBinding-strimzi-cluster-operator-kafka-broker-delegation.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/031-ClusterRole-strimzi-entity-operator.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/031-RoleBinding-strimzi-cluster-operator-entity-operator-delegation.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/033-ClusterRole-strimzi-kafka-client.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/033-ClusterRoleBinding-strimzi-cluster-operator-kafka-client-delegation.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/050-ConfigMap-strimzi-cluster-operator.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/060-Deployment-strimzi-cluster-operator.yaml create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/_kafka_image_map.tpl create mode 100644 manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/values.yaml create mode 100644 manifests/bucketeer/charts/kafka/requirements.lock create mode 100644 manifests/bucketeer/charts/kafka/requirements.yaml create mode 100644 manifests/bucketeer/charts/kafka/values.yaml create mode 100644 manifests/bucketeer/charts/metrics-event-persister/Chart.yaml create mode 100644 manifests/bucketeer/charts/metrics-event-persister/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/metrics-event-persister/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/metrics-event-persister/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/metrics-event-persister/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/metrics-event-persister/templates/hpa.yaml create mode 100644 manifests/bucketeer/charts/metrics-event-persister/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/metrics-event-persister/templates/service.yaml create mode 100644 manifests/bucketeer/charts/metrics-event-persister/values.yaml create mode 100644 manifests/bucketeer/charts/migration-mysql/Chart.yaml create mode 100644 manifests/bucketeer/charts/migration-mysql/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/migration-mysql/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/migration-mysql/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/migration-mysql/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/migration-mysql/templates/hpa.yaml create mode 100644 manifests/bucketeer/charts/migration-mysql/templates/oauth-key-secret.yaml create mode 100644 manifests/bucketeer/charts/migration-mysql/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/migration-mysql/templates/service.yaml create mode 100644 manifests/bucketeer/charts/migration-mysql/values.yaml create mode 100644 manifests/bucketeer/charts/notification-sender/Chart.yaml create mode 100644 manifests/bucketeer/charts/notification-sender/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/notification-sender/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/notification-sender/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/notification-sender/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/notification-sender/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/notification-sender/templates/service-token-secret.yaml create mode 100644 manifests/bucketeer/charts/notification-sender/templates/service.yaml create mode 100644 manifests/bucketeer/charts/notification-sender/values.yaml create mode 100644 manifests/bucketeer/charts/notification/Chart.yaml create mode 100644 manifests/bucketeer/charts/notification/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/notification/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/notification/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/notification/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/notification/templates/hpa.yaml create mode 100644 manifests/bucketeer/charts/notification/templates/oauth-key-secret.yaml create mode 100644 manifests/bucketeer/charts/notification/templates/pdb.yaml create mode 100644 manifests/bucketeer/charts/notification/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/notification/templates/service-token-secret.yaml create mode 100644 manifests/bucketeer/charts/notification/templates/service.yaml create mode 100644 manifests/bucketeer/charts/notification/values.yaml create mode 100644 manifests/bucketeer/charts/ops-event-batch/Chart.yaml create mode 100644 manifests/bucketeer/charts/ops-event-batch/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/ops-event-batch/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/ops-event-batch/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/ops-event-batch/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/ops-event-batch/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/ops-event-batch/templates/service-token-secret.yaml create mode 100644 manifests/bucketeer/charts/ops-event-batch/templates/service.yaml create mode 100644 manifests/bucketeer/charts/ops-event-batch/values.yaml create mode 100644 manifests/bucketeer/charts/push-sender/Chart.yaml create mode 100644 manifests/bucketeer/charts/push-sender/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/push-sender/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/push-sender/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/push-sender/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/push-sender/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/push-sender/templates/service-token-secret.yaml create mode 100644 manifests/bucketeer/charts/push-sender/templates/service.yaml create mode 100644 manifests/bucketeer/charts/push-sender/values.yaml create mode 100644 manifests/bucketeer/charts/push/Chart.yaml create mode 100644 manifests/bucketeer/charts/push/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/push/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/push/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/push/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/push/templates/hpa.yaml create mode 100644 manifests/bucketeer/charts/push/templates/oauth-key-secret.yaml create mode 100644 manifests/bucketeer/charts/push/templates/pdb.yaml create mode 100644 manifests/bucketeer/charts/push/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/push/templates/service-token-secret.yaml create mode 100644 manifests/bucketeer/charts/push/templates/service.yaml create mode 100644 manifests/bucketeer/charts/push/values.yaml create mode 100644 manifests/bucketeer/charts/user-persister/Chart.yaml create mode 100644 manifests/bucketeer/charts/user-persister/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/user-persister/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/user-persister/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/user-persister/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/user-persister/templates/hpa.yaml create mode 100644 manifests/bucketeer/charts/user-persister/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/user-persister/templates/service.yaml create mode 100644 manifests/bucketeer/charts/user-persister/values.yaml create mode 100644 manifests/bucketeer/charts/user/Chart.yaml create mode 100644 manifests/bucketeer/charts/user/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/user/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/user/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/user/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/user/templates/hpa.yaml create mode 100644 manifests/bucketeer/charts/user/templates/oauth-key-secret.yaml create mode 100644 manifests/bucketeer/charts/user/templates/pdb.yaml create mode 100644 manifests/bucketeer/charts/user/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/user/templates/service-token-secret.yaml create mode 100644 manifests/bucketeer/charts/user/templates/service.yaml create mode 100644 manifests/bucketeer/charts/user/values.yaml create mode 100644 manifests/bucketeer/charts/web-gateway/.helmignore create mode 100644 manifests/bucketeer/charts/web-gateway/Chart.yaml create mode 100644 manifests/bucketeer/charts/web-gateway/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/web-gateway/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/web-gateway/templates/backend-config.yaml create mode 100644 manifests/bucketeer/charts/web-gateway/templates/bucketeer-jp-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/web-gateway/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/web-gateway/templates/envoy-configmap.yaml create mode 100644 manifests/bucketeer/charts/web-gateway/templates/hpa.yaml create mode 100644 manifests/bucketeer/charts/web-gateway/templates/pdb.yaml create mode 100644 manifests/bucketeer/charts/web-gateway/templates/secret.yaml create mode 100644 manifests/bucketeer/charts/web-gateway/templates/service-cert-secret.yaml create mode 100644 manifests/bucketeer/charts/web-gateway/templates/service.yaml create mode 100644 manifests/bucketeer/charts/web-gateway/values.yaml create mode 100644 manifests/bucketeer/charts/web/.helmignore create mode 100644 manifests/bucketeer/charts/web/Chart.yaml create mode 100644 manifests/bucketeer/charts/web/templates/NOTES.txt create mode 100644 manifests/bucketeer/charts/web/templates/_helpers.tpl create mode 100644 manifests/bucketeer/charts/web/templates/cert-secret.yaml create mode 100644 manifests/bucketeer/charts/web/templates/configmap.yaml create mode 100644 manifests/bucketeer/charts/web/templates/deployment.yaml create mode 100644 manifests/bucketeer/charts/web/templates/hpa.yaml create mode 100644 manifests/bucketeer/charts/web/templates/pdb.yaml create mode 100644 manifests/bucketeer/charts/web/templates/service.yaml create mode 100644 manifests/bucketeer/charts/web/values.yaml create mode 100644 manifests/bucketeer/values.yaml create mode 100644 pkg/account/api/BUILD.bazel create mode 100644 pkg/account/api/account.go create mode 100644 pkg/account/api/account_test.go create mode 100644 pkg/account/api/admin_account.go create mode 100644 pkg/account/api/admin_account_test.go create mode 100644 pkg/account/api/api.go create mode 100644 pkg/account/api/api_key.go create mode 100644 pkg/account/api/api_key_test.go create mode 100644 pkg/account/api/api_test.go create mode 100644 pkg/account/api/error.go create mode 100644 pkg/account/api/validation.go create mode 100644 pkg/account/api/validation_test.go create mode 100644 pkg/account/apikeycacher/BUILD.bazel create mode 100644 pkg/account/apikeycacher/apikeycacher.go create mode 100644 pkg/account/apikeycacher/metrics.go create mode 100644 pkg/account/client/BUILD.bazel create mode 100644 pkg/account/client/client.go create mode 100644 pkg/account/client/mock/BUILD.bazel create mode 100644 pkg/account/client/mock/client.go create mode 100644 pkg/account/cmd/apikeycacher/BUILD.bazel create mode 100644 pkg/account/cmd/apikeycacher/apikeycacher.go create mode 100644 pkg/account/cmd/server/BUILD.bazel create mode 100644 pkg/account/cmd/server/server.go create mode 100644 pkg/account/command/BUILD.bazel create mode 100644 pkg/account/command/account.go create mode 100644 pkg/account/command/account_test.go create mode 100644 pkg/account/command/admin_account.go create mode 100644 pkg/account/command/admin_account_test.go create mode 100644 pkg/account/command/api_key.go create mode 100644 pkg/account/command/api_key_test.go create mode 100644 pkg/account/command/command.go create mode 100644 pkg/account/domain/BUILD.bazel create mode 100644 pkg/account/domain/account.go create mode 100644 pkg/account/domain/account_test.go create mode 100644 pkg/account/domain/api_key.go create mode 100644 pkg/account/domain/api_key_test.go create mode 100644 pkg/account/storage/v2/BUILD.bazel create mode 100644 pkg/account/storage/v2/account.go create mode 100644 pkg/account/storage/v2/account_test.go create mode 100644 pkg/account/storage/v2/admin_account.go create mode 100644 pkg/account/storage/v2/admin_account_test.go create mode 100644 pkg/account/storage/v2/api_key.go create mode 100644 pkg/account/storage/v2/api_key_test.go create mode 100644 pkg/account/storage/v2/mock/BUILD.bazel create mode 100644 pkg/account/storage/v2/mock/account.go create mode 100644 pkg/account/storage/v2/mock/admin_account.go create mode 100644 pkg/account/storage/v2/mock/api_key.go create mode 100644 pkg/auditlog/api/BUILD.bazel create mode 100644 pkg/auditlog/api/api.go create mode 100644 pkg/auditlog/api/api_test.go create mode 100644 pkg/auditlog/api/error.go create mode 100644 pkg/auditlog/client/BUILD.bazel create mode 100644 pkg/auditlog/client/client.go create mode 100644 pkg/auditlog/cmd/persister/BUILD.bazel create mode 100644 pkg/auditlog/cmd/persister/persister.go create mode 100644 pkg/auditlog/cmd/server/BUILD.bazel create mode 100644 pkg/auditlog/cmd/server/server.go create mode 100644 pkg/auditlog/domain/BUILD.bazel create mode 100644 pkg/auditlog/domain/auditlog.go create mode 100644 pkg/auditlog/persister/BUILD.bazel create mode 100644 pkg/auditlog/persister/metrics.go create mode 100644 pkg/auditlog/persister/persister.go create mode 100644 pkg/auditlog/persister/persister_test.go create mode 100644 pkg/auditlog/storage/v2/BUILD.bazel create mode 100644 pkg/auditlog/storage/v2/admin_audit_log.go create mode 100644 pkg/auditlog/storage/v2/admin_audit_log_test.go create mode 100644 pkg/auditlog/storage/v2/audit_log.go create mode 100644 pkg/auditlog/storage/v2/audit_log_test.go create mode 100644 pkg/auditlog/storage/v2/mock/BUILD.bazel create mode 100644 pkg/auditlog/storage/v2/mock/admin_audit_log.go create mode 100644 pkg/auditlog/storage/v2/mock/audit_log.go create mode 100644 pkg/auth/api/BUILD.bazel create mode 100644 pkg/auth/api/api.go create mode 100644 pkg/auth/api/api_test.go create mode 100644 pkg/auth/api/error.go create mode 100644 pkg/auth/client/BUILD.bazel create mode 100644 pkg/auth/client/client.go create mode 100644 pkg/auth/client/mock/BUILD.bazel create mode 100644 pkg/auth/client/mock/client.go create mode 100644 pkg/auth/cmd/server/BUILD.bazel create mode 100644 pkg/auth/cmd/server/server.go create mode 100644 pkg/auth/oidc/BUILD.bazel create mode 100644 pkg/auth/oidc/oidc.go create mode 100644 pkg/auth/oidc/oidc_test.go create mode 100644 pkg/autoops/api/BUILD.bazel create mode 100644 pkg/autoops/api/api.go create mode 100644 pkg/autoops/api/api_test.go create mode 100644 pkg/autoops/api/error.go create mode 100644 pkg/autoops/api/operation.go create mode 100644 pkg/autoops/api/webhook.go create mode 100644 pkg/autoops/api/webhook_test.go create mode 100644 pkg/autoops/client/BUILD.bazel create mode 100644 pkg/autoops/client/client.go create mode 100644 pkg/autoops/client/mock/BUILD.bazel create mode 100644 pkg/autoops/client/mock/client.go create mode 100644 pkg/autoops/cmd/server/BUILD.bazel create mode 100644 pkg/autoops/cmd/server/server.go create mode 100644 pkg/autoops/command/BUILD.bazel create mode 100644 pkg/autoops/command/auto_ops_rule.go create mode 100644 pkg/autoops/command/auto_ops_rule_test.go create mode 100644 pkg/autoops/command/command.go create mode 100644 pkg/autoops/command/webhook.go create mode 100644 pkg/autoops/domain/BUILD.bazel create mode 100644 pkg/autoops/domain/auto_ops_rule.go create mode 100644 pkg/autoops/domain/auto_ops_rule_test.go create mode 100644 pkg/autoops/domain/webhook.go create mode 100644 pkg/autoops/domain/webhook_secret.go create mode 100644 pkg/autoops/storage/v2/BUILD.bazel create mode 100644 pkg/autoops/storage/v2/auto_ops_rule.go create mode 100644 pkg/autoops/storage/v2/auto_ops_rule_test.go create mode 100644 pkg/autoops/storage/v2/mock/BUILD.bazel create mode 100644 pkg/autoops/storage/v2/mock/auto_ops_rule.go create mode 100644 pkg/autoops/storage/v2/mock/webhook.go create mode 100644 pkg/autoops/storage/v2/webhook.go create mode 100644 pkg/autoops/webhookhandler/BUILD.bazel create mode 100644 pkg/autoops/webhookhandler/evaluation.go create mode 100644 pkg/autoops/webhookhandler/handler.go create mode 100644 pkg/autoops/webhookhandler/handler_test.go create mode 100644 pkg/autoops/webhookhandler/testdata/invalid-token create mode 100644 pkg/autoops/webhookhandler/testdata/valid-public.pem create mode 100644 pkg/autoops/webhookhandler/testdata/valid-token create mode 100644 pkg/backoff/BUILD.bazel create mode 100644 pkg/backoff/backoff.go create mode 100644 pkg/backoff/constant.go create mode 100644 pkg/backoff/constant_test.go create mode 100644 pkg/backoff/exponential.go create mode 100644 pkg/backoff/exponential_test.go create mode 100644 pkg/backoff/retry.go create mode 100644 pkg/cache/BUILD.bazel create mode 100644 pkg/cache/cache.go create mode 100644 pkg/cache/mock/BUILD.bazel create mode 100644 pkg/cache/mock/cache.go create mode 100644 pkg/cache/redis_cache.go create mode 100644 pkg/cache/testing/BUILD.bazel create mode 100644 pkg/cache/testing/cache.go create mode 100644 pkg/cache/ttl_cache.go create mode 100644 pkg/cache/v2/BUILD.bazel create mode 100644 pkg/cache/v2/redis_cache.go create mode 100644 pkg/cache/v3/BUILD.bazel create mode 100644 pkg/cache/v3/environment_api_key.go create mode 100644 pkg/cache/v3/experiments.go create mode 100644 pkg/cache/v3/features.go create mode 100644 pkg/cache/v3/features_test.go create mode 100644 pkg/cache/v3/mock/BUILD.bazel create mode 100644 pkg/cache/v3/mock/environment_api_key.go create mode 100644 pkg/cache/v3/mock/experiments.go create mode 100644 pkg/cache/v3/mock/features.go create mode 100644 pkg/cache/v3/mock/segment_users.go create mode 100644 pkg/cache/v3/redis_cache.go create mode 100644 pkg/cache/v3/segment_users.go create mode 100644 pkg/cache/v3/segment_users_test.go create mode 100644 pkg/cli/BUILD.bazel create mode 100644 pkg/cli/app.go create mode 100644 pkg/cli/cmd.go create mode 100644 pkg/crypto/BUILD.bazel create mode 100644 pkg/crypto/cloudkmscrypto.go create mode 100644 pkg/crypto/crypto.go create mode 100644 pkg/domainevent/BUILD.bazel create mode 100644 pkg/domainevent/domain/BUILD.bazel create mode 100644 pkg/domainevent/domain/event.go create mode 100644 pkg/domainevent/domain/message.go create mode 100644 pkg/domainevent/domain/message_test.go create mode 100644 pkg/domainevent/domain/url.go create mode 100644 pkg/domainevent/domain/url_test.go create mode 100644 pkg/druid/BUILD.bazel create mode 100644 pkg/druid/mock/BUILD.bazel create mode 100644 pkg/druid/mock/supervisor_creator.go create mode 100644 pkg/druid/supervisor.go create mode 100644 pkg/druid/supervisor_creator.go create mode 100644 pkg/environment/api/BUILD.bazel create mode 100644 pkg/environment/api/api.go create mode 100644 pkg/environment/api/api_test.go create mode 100644 pkg/environment/api/environment.go create mode 100644 pkg/environment/api/environment_test.go create mode 100644 pkg/environment/api/error.go create mode 100644 pkg/environment/api/project.go create mode 100644 pkg/environment/api/project_test.go create mode 100644 pkg/environment/client/BUILD.bazel create mode 100644 pkg/environment/client/client.go create mode 100644 pkg/environment/client/mock/BUILD.bazel create mode 100644 pkg/environment/client/mock/client.go create mode 100644 pkg/environment/cmd/server/BUILD.bazel create mode 100644 pkg/environment/cmd/server/server.go create mode 100644 pkg/environment/command/BUILD.bazel create mode 100644 pkg/environment/command/command.go create mode 100644 pkg/environment/command/environment.go create mode 100644 pkg/environment/command/environment_test.go create mode 100644 pkg/environment/command/project.go create mode 100644 pkg/environment/command/project_test.go create mode 100644 pkg/environment/domain/BUILD.bazel create mode 100644 pkg/environment/domain/environment.go create mode 100644 pkg/environment/domain/environment_test.go create mode 100644 pkg/environment/domain/project.go create mode 100644 pkg/environment/domain/project_test.go create mode 100644 pkg/environment/storage/v2/BUILD.bazel create mode 100644 pkg/environment/storage/v2/environment.go create mode 100644 pkg/environment/storage/v2/environment_test.go create mode 100644 pkg/environment/storage/v2/mock/BUILD.bazel create mode 100644 pkg/environment/storage/v2/mock/environment.go create mode 100644 pkg/environment/storage/v2/mock/project.go create mode 100644 pkg/environment/storage/v2/project.go create mode 100644 pkg/environment/storage/v2/project_test.go create mode 100644 pkg/errgroup/BUILD.bazel create mode 100644 pkg/errgroup/errgroup.go create mode 100644 pkg/errgroup/errgroup_test.go create mode 100644 pkg/eventcounter/api/BUILD.bazel create mode 100644 pkg/eventcounter/api/api.go create mode 100644 pkg/eventcounter/api/api_test.go create mode 100644 pkg/eventcounter/api/error.go create mode 100644 pkg/eventcounter/api/metrics.go create mode 100644 pkg/eventcounter/client/BUILD.bazel create mode 100644 pkg/eventcounter/client/client.go create mode 100644 pkg/eventcounter/client/mock/BUILD.bazel create mode 100644 pkg/eventcounter/client/mock/client.go create mode 100644 pkg/eventcounter/cmd/server/BUILD.bazel create mode 100644 pkg/eventcounter/cmd/server/server.go create mode 100644 pkg/eventcounter/domain/BUILD.bazel create mode 100644 pkg/eventcounter/domain/experiment_result.go create mode 100644 pkg/eventcounter/druid/BUILD.bazel create mode 100644 pkg/eventcounter/druid/mock/BUILD.bazel create mode 100644 pkg/eventcounter/druid/mock/querier.go create mode 100644 pkg/eventcounter/druid/querier.go create mode 100644 pkg/eventcounter/druid/querier_test.go create mode 100644 pkg/eventcounter/druid/query.go create mode 100644 pkg/eventcounter/druid/query_test.go create mode 100644 pkg/eventcounter/storage/v2/BUILD.bazel create mode 100644 pkg/eventcounter/storage/v2/experiment_result.go create mode 100644 pkg/eventcounter/storage/v2/experiment_result_test.go create mode 100644 pkg/eventcounter/storage/v2/mock/BUILD.bazel create mode 100644 pkg/eventcounter/storage/v2/mock/experiment_result.go create mode 100644 pkg/eventpersister/cmd/server/BUILD.bazel create mode 100644 pkg/eventpersister/cmd/server/server.go create mode 100644 pkg/eventpersister/datastore/BUILD.bazel create mode 100644 pkg/eventpersister/datastore/datastore.go create mode 100644 pkg/eventpersister/datastore/datastore_test.go create mode 100644 pkg/eventpersister/datastore/kafka.go create mode 100644 pkg/eventpersister/datastore/metrics.go create mode 100644 pkg/eventpersister/persister/BUILD.bazel create mode 100644 pkg/eventpersister/persister/metrics.go create mode 100644 pkg/eventpersister/persister/persister.go create mode 100644 pkg/eventpersister/persister/persister_test.go create mode 100644 pkg/eventpersister/storage/v2/BUILD.bazel create mode 100644 pkg/eventpersister/storage/v2/persister.go create mode 100644 pkg/experiment/api/BUILD.bazel create mode 100644 pkg/experiment/api/api.go create mode 100644 pkg/experiment/api/api_test.go create mode 100644 pkg/experiment/api/error.go create mode 100644 pkg/experiment/api/experiment.go create mode 100644 pkg/experiment/api/experiment_test.go create mode 100644 pkg/experiment/api/goal.go create mode 100644 pkg/experiment/api/goal_test.go create mode 100644 pkg/experiment/batch/job/BUILD.bazel create mode 100644 pkg/experiment/batch/job/experiment_status_updater.go create mode 100644 pkg/experiment/batch/job/experiment_status_updater_test.go create mode 100644 pkg/experiment/batch/job/job.go create mode 100644 pkg/experiment/client/BUILD.bazel create mode 100644 pkg/experiment/client/client.go create mode 100644 pkg/experiment/client/mock/BUILD.bazel create mode 100644 pkg/experiment/client/mock/client.go create mode 100644 pkg/experiment/cmd/batch/BUILD.bazel create mode 100644 pkg/experiment/cmd/batch/batch.go create mode 100644 pkg/experiment/cmd/server/BUILD.bazel create mode 100644 pkg/experiment/cmd/server/server.go create mode 100644 pkg/experiment/command/BUILD.bazel create mode 100644 pkg/experiment/command/command.go create mode 100644 pkg/experiment/command/experiment.go create mode 100644 pkg/experiment/command/experiment_test.go create mode 100644 pkg/experiment/command/goal.go create mode 100644 pkg/experiment/command/goal_test.go create mode 100644 pkg/experiment/domain/BUILD.bazel create mode 100644 pkg/experiment/domain/experiment.go create mode 100644 pkg/experiment/domain/experiment_test.go create mode 100644 pkg/experiment/domain/goal.go create mode 100644 pkg/experiment/domain/goal_test.go create mode 100644 pkg/experiment/storage/v2/BUILD.bazel create mode 100644 pkg/experiment/storage/v2/experiment.go create mode 100644 pkg/experiment/storage/v2/experiment_test.go create mode 100644 pkg/experiment/storage/v2/goal.go create mode 100644 pkg/experiment/storage/v2/goal_test.go create mode 100644 pkg/experiment/storage/v2/mock/BUILD.bazel create mode 100644 pkg/experiment/storage/v2/mock/experiment.go create mode 100644 pkg/experiment/storage/v2/mock/goal.go create mode 100644 pkg/feature/api/BUILD.bazel create mode 100644 pkg/feature/api/api.go create mode 100644 pkg/feature/api/api_test.go create mode 100644 pkg/feature/api/error.go create mode 100644 pkg/feature/api/feature.go create mode 100644 pkg/feature/api/feature_test.go create mode 100644 pkg/feature/api/segment.go create mode 100644 pkg/feature/api/segment_test.go create mode 100644 pkg/feature/api/segment_user.go create mode 100644 pkg/feature/api/segment_user_test.go create mode 100644 pkg/feature/api/tag.go create mode 100644 pkg/feature/api/tag_test.go create mode 100644 pkg/feature/api/user_evaluations.go create mode 100644 pkg/feature/api/user_evaluations_test.go create mode 100644 pkg/feature/api/validation.go create mode 100644 pkg/feature/cacher/BUILD.bazel create mode 100644 pkg/feature/cacher/cacher.go create mode 100644 pkg/feature/cacher/cacher_test.go create mode 100644 pkg/feature/cacher/metrics.go create mode 100644 pkg/feature/client/BUILD.bazel create mode 100644 pkg/feature/client/client.go create mode 100644 pkg/feature/client/mock/BUILD.bazel create mode 100644 pkg/feature/client/mock/client.go create mode 100644 pkg/feature/cmd/cacher/BUILD.bazel create mode 100644 pkg/feature/cmd/cacher/cacher.go create mode 100644 pkg/feature/cmd/recorder/BUILD.bazel create mode 100644 pkg/feature/cmd/recorder/recorder.go create mode 100644 pkg/feature/cmd/segmentpersister/BUILD.bazel create mode 100644 pkg/feature/cmd/segmentpersister/persister.go create mode 100644 pkg/feature/cmd/server/BUILD.bazel create mode 100644 pkg/feature/cmd/server/server.go create mode 100644 pkg/feature/command/BUILD.bazel create mode 100644 pkg/feature/command/command.go create mode 100644 pkg/feature/command/detail.go create mode 100644 pkg/feature/command/eventfactory.go create mode 100644 pkg/feature/command/feature.go create mode 100644 pkg/feature/command/feature_test.go create mode 100644 pkg/feature/command/segment.go create mode 100644 pkg/feature/command/segment_test.go create mode 100644 pkg/feature/domain/BUILD.bazel create mode 100644 pkg/feature/domain/clause_evaluator.go create mode 100644 pkg/feature/domain/clause_evaluator_test.go create mode 100644 pkg/feature/domain/evaluation.go create mode 100644 pkg/feature/domain/evaluation_test.go create mode 100644 pkg/feature/domain/feature.go create mode 100644 pkg/feature/domain/feature_last_used_info.go create mode 100644 pkg/feature/domain/feature_last_used_info_test.go create mode 100644 pkg/feature/domain/feature_test.go create mode 100644 pkg/feature/domain/rule_evaluator.go create mode 100644 pkg/feature/domain/rule_evaluator_test.go create mode 100644 pkg/feature/domain/segment.go create mode 100644 pkg/feature/domain/segment_evaluator.go create mode 100644 pkg/feature/domain/segment_test.go create mode 100644 pkg/feature/domain/segment_user.go create mode 100644 pkg/feature/domain/strategy_evaluator.go create mode 100644 pkg/feature/domain/tag.go create mode 100644 pkg/feature/domain/user_evaluations.go create mode 100644 pkg/feature/domain/user_evaluations_test.go create mode 100644 pkg/feature/recorder/BUILD.bazel create mode 100644 pkg/feature/recorder/metrics.go create mode 100644 pkg/feature/recorder/recorder.go create mode 100644 pkg/feature/recorder/recorder_test.go create mode 100644 pkg/feature/segmentpersister/BUILD.bazel create mode 100644 pkg/feature/segmentpersister/metrics.go create mode 100644 pkg/feature/segmentpersister/persister.go create mode 100644 pkg/feature/segmentpersister/persister_test.go create mode 100644 pkg/feature/storage/BUILD.bazel create mode 100644 pkg/feature/storage/feature_last_used_info.go create mode 100644 pkg/feature/storage/feature_last_used_info_test.go create mode 100644 pkg/feature/storage/mock/BUILD.bazel create mode 100644 pkg/feature/storage/mock/feature_last_used_info.go create mode 100644 pkg/feature/storage/mock/user_evaluations.go create mode 100644 pkg/feature/storage/user_evaluations.go create mode 100644 pkg/feature/storage/user_evaluations_test.go create mode 100644 pkg/feature/storage/v2/BUILD.bazel create mode 100644 pkg/feature/storage/v2/feature.go create mode 100644 pkg/feature/storage/v2/feature_last_used_info.go create mode 100644 pkg/feature/storage/v2/feature_last_used_info_test.go create mode 100644 pkg/feature/storage/v2/feature_test.go create mode 100644 pkg/feature/storage/v2/mock/BUILD.bazel create mode 100644 pkg/feature/storage/v2/mock/feature.go create mode 100644 pkg/feature/storage/v2/mock/feature_last_used_info.go create mode 100644 pkg/feature/storage/v2/mock/segment.go create mode 100644 pkg/feature/storage/v2/mock/segment_user.go create mode 100644 pkg/feature/storage/v2/mock/tag.go create mode 100644 pkg/feature/storage/v2/segment.go create mode 100644 pkg/feature/storage/v2/segment_test.go create mode 100644 pkg/feature/storage/v2/segment_user.go create mode 100644 pkg/feature/storage/v2/segment_user_test.go create mode 100644 pkg/feature/storage/v2/tag.go create mode 100644 pkg/feature/storage/v2/tag_test.go create mode 100644 pkg/gateway/api/BUILD.bazel create mode 100644 pkg/gateway/api/api.go create mode 100644 pkg/gateway/api/api_grpc.go create mode 100644 pkg/gateway/api/api_grpc_test.go create mode 100644 pkg/gateway/api/api_test.go create mode 100644 pkg/gateway/api/grpc_validation.go create mode 100644 pkg/gateway/api/metrics.go create mode 100644 pkg/gateway/api/trackhandler.go create mode 100644 pkg/gateway/api/trackhandler_test.go create mode 100644 pkg/gateway/api/validation.go create mode 100644 pkg/gateway/api/validation_test.go create mode 100644 pkg/gateway/client/BUILD.bazel create mode 100644 pkg/gateway/client/client.go create mode 100644 pkg/gateway/client/credentials.go create mode 100644 pkg/gateway/cmd/BUILD.bazel create mode 100644 pkg/gateway/cmd/server.go create mode 100644 pkg/goalbatch/cmd/transformer/BUILD.bazel create mode 100644 pkg/goalbatch/cmd/transformer/transformer.go create mode 100644 pkg/goalbatch/transformer/BUILD.bazel create mode 100644 pkg/goalbatch/transformer/metrics.go create mode 100644 pkg/goalbatch/transformer/transformer.go create mode 100644 pkg/goalbatch/transformer/transformer_test.go create mode 100644 pkg/health/BUILD.bazel create mode 100644 pkg/health/grpc_health.go create mode 100644 pkg/health/health.go create mode 100644 pkg/health/health_test.go create mode 100644 pkg/health/rest_health.go create mode 100644 pkg/job/BUILD.bazel create mode 100644 pkg/job/job.go create mode 100644 pkg/job/metrics.go create mode 100644 pkg/kafka/BUILD.bazel create mode 100644 pkg/kafka/mock/BUILD.bazel create mode 100644 pkg/kafka/mock/topic_creator.go create mode 100644 pkg/kafka/topic.go create mode 100644 pkg/kafka/topic_creator.go create mode 100644 pkg/ldflags/BUILD.bazel create mode 100644 pkg/ldflags/ldflags.go create mode 100644 pkg/locale/BUILD.bazel create mode 100644 pkg/locale/locale.go create mode 100644 pkg/locale/localizedata/en.yaml create mode 100644 pkg/locale/localizedata/ja.yaml create mode 100644 pkg/locale/localizer.go create mode 100644 pkg/locale/localizer_test.go create mode 100644 pkg/locale/options.go create mode 100644 pkg/log/BUILD.bazel create mode 100644 pkg/log/field.go create mode 100644 pkg/log/log.go create mode 100644 pkg/log/log_test.go create mode 100644 pkg/metrics/BUILD.bazel create mode 100644 pkg/metrics/metrics.go create mode 100644 pkg/metrics/metrics_test.go create mode 100644 pkg/metrics/mock/BUILD.bazel create mode 100644 pkg/metrics/mock/metrics.go create mode 100644 pkg/metricsevent/cmd/persister/BUILD.bazel create mode 100644 pkg/metricsevent/cmd/persister/persister.go create mode 100644 pkg/metricsevent/persister/BUILD.bazel create mode 100644 pkg/metricsevent/persister/metrics.go create mode 100644 pkg/metricsevent/persister/persister.go create mode 100644 pkg/metricsevent/persister/persister_test.go create mode 100644 pkg/metricsevent/storage/BUILD.bazel create mode 100644 pkg/metricsevent/storage/event.go create mode 100644 pkg/metricsevent/storage/event_test.go create mode 100644 pkg/metricsevent/storage/metrics.go create mode 100644 pkg/metricsevent/storage/mock/BUILD.bazel create mode 100644 pkg/metricsevent/storage/mock/event.go create mode 100644 pkg/migration/cmd/mysqlserver/BUILD.bazel create mode 100644 pkg/migration/cmd/mysqlserver/mysqlserver.go create mode 100644 pkg/migration/mysql/api/BUILD.bazel create mode 100644 pkg/migration/mysql/api/api.go create mode 100644 pkg/migration/mysql/api/api_test.go create mode 100644 pkg/migration/mysql/migrate/BUILD.bazel create mode 100644 pkg/migration/mysql/migrate/migrate.go create mode 100644 pkg/migration/mysql/migrate/mock/BUILD.bazel create mode 100644 pkg/migration/mysql/migrate/mock/migrate.go create mode 100644 pkg/notification/api/BUILD.bazel create mode 100644 pkg/notification/api/admin_subscription.go create mode 100644 pkg/notification/api/admin_subscription_test.go create mode 100644 pkg/notification/api/api.go create mode 100644 pkg/notification/api/api_test.go create mode 100644 pkg/notification/api/error.go create mode 100644 pkg/notification/api/subscription.go create mode 100644 pkg/notification/api/subscription_test.go create mode 100644 pkg/notification/client/BUILD.bazel create mode 100644 pkg/notification/client/client.go create mode 100644 pkg/notification/client/mock/BUILD.bazel create mode 100644 pkg/notification/client/mock/client.go create mode 100644 pkg/notification/cmd/sender/BUILD.bazel create mode 100644 pkg/notification/cmd/sender/sender.go create mode 100644 pkg/notification/cmd/server/BUILD.bazel create mode 100644 pkg/notification/cmd/server/server.go create mode 100644 pkg/notification/command/BUILD.bazel create mode 100644 pkg/notification/command/admin_subscription.go create mode 100644 pkg/notification/command/admin_subscription_test.go create mode 100644 pkg/notification/command/command.go create mode 100644 pkg/notification/command/subscription.go create mode 100644 pkg/notification/command/subscription_test.go create mode 100644 pkg/notification/domain/BUILD.bazel create mode 100644 pkg/notification/domain/subscription.go create mode 100644 pkg/notification/domain/subscription_test.go create mode 100644 pkg/notification/sender/BUILD.bazel create mode 100644 pkg/notification/sender/informer/BUILD.bazel create mode 100644 pkg/notification/sender/informer/batch/BUILD.bazel create mode 100644 pkg/notification/sender/informer/batch/job.go create mode 100644 pkg/notification/sender/informer/batch/job/BUILD.bazel create mode 100644 pkg/notification/sender/informer/batch/job/experiment_running_watcher.go create mode 100644 pkg/notification/sender/informer/batch/job/experiment_running_watcher_test.go create mode 100644 pkg/notification/sender/informer/batch/job/feature_watcher.go create mode 100644 pkg/notification/sender/informer/batch/job/feature_watcher_test.go create mode 100644 pkg/notification/sender/informer/batch/job/job.go create mode 100644 pkg/notification/sender/informer/batch/job/mau_count_watcher.go create mode 100644 pkg/notification/sender/informer/batch/job/mau_count_watcher_test.go create mode 100644 pkg/notification/sender/informer/domainevent/BUILD.bazel create mode 100644 pkg/notification/sender/informer/domainevent/domain_event.go create mode 100644 pkg/notification/sender/informer/domainevent/domain_event_test.go create mode 100644 pkg/notification/sender/informer/domainevent/metrics.go create mode 100644 pkg/notification/sender/informer/informer.go create mode 100644 pkg/notification/sender/informer/mock/BUILD.bazel create mode 100644 pkg/notification/sender/informer/mock/informer.go create mode 100644 pkg/notification/sender/metrics.go create mode 100644 pkg/notification/sender/mock/BUILD.bazel create mode 100644 pkg/notification/sender/mock/sender.go create mode 100644 pkg/notification/sender/notifier/BUILD.bazel create mode 100644 pkg/notification/sender/notifier/message.go create mode 100644 pkg/notification/sender/notifier/metrics.go create mode 100644 pkg/notification/sender/notifier/mock/BUILD.bazel create mode 100644 pkg/notification/sender/notifier/mock/notifier.go create mode 100644 pkg/notification/sender/notifier/notifier.go create mode 100644 pkg/notification/sender/notifier/slack.go create mode 100644 pkg/notification/sender/notifier/slack_test.go create mode 100644 pkg/notification/sender/sender.go create mode 100644 pkg/notification/sender/sender_test.go create mode 100644 pkg/notification/storage/v2/BUILD.bazel create mode 100644 pkg/notification/storage/v2/admin_subscription.go create mode 100644 pkg/notification/storage/v2/admin_subscription_test.go create mode 100644 pkg/notification/storage/v2/mock/BUILD.bazel create mode 100644 pkg/notification/storage/v2/mock/admin_subscription.go create mode 100644 pkg/notification/storage/v2/mock/subscription.go create mode 100644 pkg/notification/storage/v2/subscription.go create mode 100644 pkg/notification/storage/v2/subscription_test.go create mode 100644 pkg/opsevent/batch/executor/BUILD.bazel create mode 100644 pkg/opsevent/batch/executor/executor.go create mode 100644 pkg/opsevent/batch/executor/executor_test.go create mode 100644 pkg/opsevent/batch/executor/mock/BUILD.bazel create mode 100644 pkg/opsevent/batch/executor/mock/executor.go create mode 100644 pkg/opsevent/batch/job/BUILD.bazel create mode 100644 pkg/opsevent/batch/job/count_watcher.go create mode 100644 pkg/opsevent/batch/job/count_watcher_test.go create mode 100644 pkg/opsevent/batch/job/datetime_watcher.go create mode 100644 pkg/opsevent/batch/job/datetime_watcher_test.go create mode 100644 pkg/opsevent/batch/job/job.go create mode 100644 pkg/opsevent/batch/targetstore/BUILD.bazel create mode 100644 pkg/opsevent/batch/targetstore/metrics.go create mode 100644 pkg/opsevent/batch/targetstore/mock/BUILD.bazel create mode 100644 pkg/opsevent/batch/targetstore/mock/targetstore.go create mode 100644 pkg/opsevent/batch/targetstore/targetstore.go create mode 100644 pkg/opsevent/batch/targetstore/targetstore_test.go create mode 100644 pkg/opsevent/cmd/batch/BUILD.bazel create mode 100644 pkg/opsevent/cmd/batch/batch.go create mode 100644 pkg/opsevent/domain/BUILD.bazel create mode 100644 pkg/opsevent/domain/ops_count.go create mode 100644 pkg/opsevent/domain/ops_count_test.go create mode 100644 pkg/opsevent/storage/v2/BUILD.bazel create mode 100644 pkg/opsevent/storage/v2/mock/BUILD.bazel create mode 100644 pkg/opsevent/storage/v2/mock/ops_count.go create mode 100644 pkg/opsevent/storage/v2/ops_count.go create mode 100644 pkg/opsevent/storage/v2/ops_count_test.go create mode 100644 pkg/pubsub/BUILD.bazel create mode 100644 pkg/pubsub/publisher/BUILD.bazel create mode 100644 pkg/pubsub/publisher/metrics.go create mode 100644 pkg/pubsub/publisher/mock/BUILD.bazel create mode 100644 pkg/pubsub/publisher/mock/publisher.go create mode 100644 pkg/pubsub/publisher/publisher.go create mode 100644 pkg/pubsub/pubsub.go create mode 100644 pkg/pubsub/puller/BUILD.bazel create mode 100644 pkg/pubsub/puller/codes/BUILD.bazel create mode 100644 pkg/pubsub/puller/codes/code_string.go create mode 100644 pkg/pubsub/puller/codes/codes.go create mode 100644 pkg/pubsub/puller/mock/BUILD.bazel create mode 100644 pkg/pubsub/puller/mock/puller.go create mode 100644 pkg/pubsub/puller/mock/rate_limited_puller.go create mode 100644 pkg/pubsub/puller/puller.go create mode 100644 pkg/pubsub/puller/rate_limited_puller.go create mode 100644 pkg/push/api/BUILD.bazel create mode 100644 pkg/push/api/api.go create mode 100644 pkg/push/api/api_test.go create mode 100644 pkg/push/api/error.go create mode 100644 pkg/push/client/BUILD.bazel create mode 100644 pkg/push/client/client.go create mode 100644 pkg/push/client/mock/BUILD.bazel create mode 100644 pkg/push/client/mock/client.go create mode 100644 pkg/push/cmd/sender/BUILD.bazel create mode 100644 pkg/push/cmd/sender/sender.go create mode 100644 pkg/push/cmd/server/BUILD.bazel create mode 100644 pkg/push/cmd/server/server.go create mode 100644 pkg/push/command/BUILD.bazel create mode 100644 pkg/push/command/command.go create mode 100644 pkg/push/command/push.go create mode 100644 pkg/push/command/push_test.go create mode 100644 pkg/push/domain/BUILD.bazel create mode 100644 pkg/push/domain/push.go create mode 100644 pkg/push/domain/push_test.go create mode 100644 pkg/push/sender/BUILD.bazel create mode 100644 pkg/push/sender/metrics.go create mode 100644 pkg/push/sender/sender.go create mode 100644 pkg/push/sender/sender_test.go create mode 100644 pkg/push/storage/v2/BUILD.bazel create mode 100644 pkg/push/storage/v2/mock/BUILD.bazel create mode 100644 pkg/push/storage/v2/mock/push.go create mode 100644 pkg/push/storage/v2/push.go create mode 100644 pkg/push/storage/v2/push_test.go create mode 100644 pkg/redis/BUILD.bazel create mode 100644 pkg/redis/conn.go create mode 100644 pkg/redis/metrics.go create mode 100644 pkg/redis/redis.go create mode 100644 pkg/redis/redis_test.go create mode 100644 pkg/redis/v2/BUILD.bazel create mode 100644 pkg/redis/v2/redis.go create mode 100644 pkg/redis/v3/BUILD.bazel create mode 100644 pkg/redis/v3/redis.go create mode 100644 pkg/rest/BUILD.bazel create mode 100644 pkg/rest/error.go create mode 100644 pkg/rest/handler.go create mode 100644 pkg/rest/log.go create mode 100644 pkg/rest/log_test.go create mode 100644 pkg/rest/metrics.go create mode 100644 pkg/rest/middleware.go create mode 100644 pkg/rest/middleware_test.go create mode 100644 pkg/rest/response.go create mode 100644 pkg/rest/server.go create mode 100644 pkg/rest/server_test.go create mode 100644 pkg/rest/testdata/server.crt create mode 100644 pkg/rest/testdata/server.key create mode 100644 pkg/rest/testdata/service.config create mode 100644 pkg/role/BUILD.bazel create mode 100644 pkg/role/role.go create mode 100644 pkg/role/role_test.go create mode 100644 pkg/rpc/BUILD.bazel create mode 100644 pkg/rpc/auth.go create mode 100644 pkg/rpc/client/BUILD.bazel create mode 100644 pkg/rpc/client/client.go create mode 100644 pkg/rpc/client/credentials.go create mode 100644 pkg/rpc/client/interceptor.go create mode 100644 pkg/rpc/client/interceptor_test.go create mode 100644 pkg/rpc/client/log.go create mode 100644 pkg/rpc/client/metrics.go create mode 100644 pkg/rpc/client/request_id.go create mode 100644 pkg/rpc/interceptor.go create mode 100644 pkg/rpc/interceptor_test.go create mode 100644 pkg/rpc/log.go create mode 100644 pkg/rpc/log_test.go create mode 100644 pkg/rpc/metadata/BUILD.bazel create mode 100644 pkg/rpc/metadata/request_id.go create mode 100644 pkg/rpc/metadata/request_id_test.go create mode 100644 pkg/rpc/metrics.go create mode 100644 pkg/rpc/server.go create mode 100644 pkg/rpc/server_test.go create mode 100644 pkg/rpc/service.go create mode 100644 pkg/rpc/status/BUILD.bazel create mode 100644 pkg/rpc/status/status.go create mode 100644 pkg/rpc/testdata/server.crt create mode 100644 pkg/rpc/testdata/server.key create mode 100644 pkg/rpc/testdata/service.config create mode 100644 pkg/storage/BUILD.bazel create mode 100644 pkg/storage/druid/BUILD.bazel create mode 100644 pkg/storage/druid/broker_client.go create mode 100644 pkg/storage/druid/coordinator_client.go create mode 100644 pkg/storage/druid/druid.go create mode 100644 pkg/storage/druid/overlord_client.go create mode 100644 pkg/storage/kafka/BUILD.bazel create mode 100644 pkg/storage/kafka/cluster_admin.go create mode 100644 pkg/storage/kafka/kafka.go create mode 100644 pkg/storage/kafka/producer.go create mode 100644 pkg/storage/kafka/scram_client.go create mode 100644 pkg/storage/mock/BUILD.bazel create mode 100644 pkg/storage/mock/storage.go create mode 100644 pkg/storage/storage.go create mode 100644 pkg/storage/testing/BUILD.bazel create mode 100644 pkg/storage/testing/file.go create mode 100644 pkg/storage/testing/storage.go create mode 100644 pkg/storage/v2/bigtable/BUILD.bazel create mode 100644 pkg/storage/v2/bigtable/client.go create mode 100644 pkg/storage/v2/bigtable/metrics.go create mode 100644 pkg/storage/v2/bigtable/mock/BUILD.bazel create mode 100644 pkg/storage/v2/bigtable/mock/client.go create mode 100644 pkg/storage/v2/bigtable/request.go create mode 100644 pkg/storage/v2/bigtable/request_test.go create mode 100644 pkg/storage/v2/bigtable/rows.go create mode 100644 pkg/storage/v2/bigtable/rows_test.go create mode 100644 pkg/storage/v2/mysql/BUILD.bazel create mode 100644 pkg/storage/v2/mysql/client.go create mode 100644 pkg/storage/v2/mysql/error.go create mode 100644 pkg/storage/v2/mysql/error_test.go create mode 100644 pkg/storage/v2/mysql/json.go create mode 100644 pkg/storage/v2/mysql/jsonpb.go create mode 100644 pkg/storage/v2/mysql/metrics.go create mode 100644 pkg/storage/v2/mysql/mock/BUILD.bazel create mode 100644 pkg/storage/v2/mysql/mock/client.go create mode 100644 pkg/storage/v2/mysql/mock/query.go create mode 100644 pkg/storage/v2/mysql/mock/result.go create mode 100644 pkg/storage/v2/mysql/mock/transaction.go create mode 100644 pkg/storage/v2/mysql/query.go create mode 100644 pkg/storage/v2/mysql/query_test.go create mode 100644 pkg/storage/v2/mysql/result.go create mode 100644 pkg/storage/v2/mysql/transaction.go create mode 100644 pkg/storage/v2/postgres/BUILD.bazel create mode 100644 pkg/storage/v2/postgres/client.go create mode 100644 pkg/storage/v2/postgres/error.go create mode 100644 pkg/storage/v2/postgres/error_test.go create mode 100644 pkg/storage/v2/postgres/json.go create mode 100644 pkg/storage/v2/postgres/result.go create mode 100644 pkg/token/BUILD.bazel create mode 100644 pkg/token/idtoken.go create mode 100644 pkg/token/idtoken_test.go create mode 100644 pkg/token/signer.go create mode 100644 pkg/token/signer_test.go create mode 100644 pkg/token/testdata/invalid-private.pem create mode 100644 pkg/token/testdata/invalid-public.pem create mode 100644 pkg/token/testdata/valid-private.pem create mode 100644 pkg/token/testdata/valid-public.pem create mode 100644 pkg/token/verifier.go create mode 100644 pkg/token/verifier_test.go create mode 100644 pkg/trace/BUILD.bazel create mode 100644 pkg/trace/trace.go create mode 100644 pkg/trace/trace_test.go create mode 100644 pkg/user/api/BUILD.bazel create mode 100644 pkg/user/api/api.go create mode 100644 pkg/user/api/error.go create mode 100644 pkg/user/api/user.go create mode 100644 pkg/user/api/user_test.go create mode 100644 pkg/user/client/BUILD.bazel create mode 100644 pkg/user/client/client.go create mode 100644 pkg/user/client/mock/BUILD.bazel create mode 100644 pkg/user/client/mock/client.go create mode 100644 pkg/user/cmd/persister/BUILD.bazel create mode 100644 pkg/user/cmd/persister/persister.go create mode 100644 pkg/user/cmd/server/BUILD.bazel create mode 100644 pkg/user/cmd/server/server.go create mode 100644 pkg/user/domain/BUILD.bazel create mode 100644 pkg/user/domain/user.go create mode 100644 pkg/user/domain/user_test.go create mode 100644 pkg/user/persister/BUILD.bazel create mode 100644 pkg/user/persister/metrics.go create mode 100644 pkg/user/persister/persister.go create mode 100644 pkg/user/persister/persister_test.go create mode 100644 pkg/user/storage/v2/BUILD.bazel create mode 100644 pkg/user/storage/v2/mock/BUILD.bazel create mode 100644 pkg/user/storage/v2/mock/user.go create mode 100644 pkg/user/storage/v2/user.go create mode 100644 pkg/user/storage/v2/user_test.go create mode 100644 pkg/uuid/BUILD.bazel create mode 100644 pkg/uuid/uuid.go create mode 100644 pkg/uuid/uuid_test.go create mode 100644 proto/.clang-format create mode 100644 proto/BUILD.bazel create mode 100644 proto/Makefile create mode 100644 proto/account/BUILD.bazel create mode 100644 proto/account/account.proto create mode 100644 proto/account/api_key.proto create mode 100644 proto/account/command.proto create mode 100644 proto/account/service.proto create mode 100644 proto/auditlog/BUILD.bazel create mode 100644 proto/auditlog/auditlog.proto create mode 100644 proto/auditlog/service.proto create mode 100644 proto/auth/BUILD.bazel create mode 100644 proto/auth/service.proto create mode 100644 proto/auth/token.proto create mode 100644 proto/autoops/BUILD.bazel create mode 100644 proto/autoops/auto_ops_rule.proto create mode 100644 proto/autoops/clause.proto create mode 100644 proto/autoops/command.proto create mode 100644 proto/autoops/ops_count.proto create mode 100644 proto/autoops/service.proto create mode 100644 proto/autoops/webhook.proto create mode 100644 proto/environment/BUILD.bazel create mode 100644 proto/environment/command.proto create mode 100644 proto/environment/environment.proto create mode 100644 proto/environment/project.proto create mode 100644 proto/environment/service.proto create mode 100644 proto/event/client/BUILD.bazel create mode 100644 proto/event/client/event.proto create mode 100644 proto/event/domain/BUILD.bazel create mode 100644 proto/event/domain/event.proto create mode 100644 proto/event/domain/localized_message.proto create mode 100644 proto/event/service/BUILD.bazel create mode 100644 proto/event/service/feature.proto create mode 100644 proto/event/service/segment.proto create mode 100644 proto/event/service/user.proto create mode 100644 proto/eventcounter/BUILD.bazel create mode 100644 proto/eventcounter/distribution_summary.proto create mode 100644 proto/eventcounter/evaluation_count.proto create mode 100644 proto/eventcounter/experiment_count.proto create mode 100644 proto/eventcounter/experiment_result.proto create mode 100644 proto/eventcounter/filter.proto create mode 100644 proto/eventcounter/goal_result.proto create mode 100644 proto/eventcounter/histogram.proto create mode 100644 proto/eventcounter/service.proto create mode 100644 proto/eventcounter/table.proto create mode 100644 proto/eventcounter/timeseries.proto create mode 100644 proto/eventcounter/variation_count.proto create mode 100644 proto/eventcounter/variation_result.proto create mode 100644 proto/experiment/BUILD.bazel create mode 100644 proto/experiment/command.proto create mode 100644 proto/experiment/experiment.proto create mode 100644 proto/experiment/goal.proto create mode 100644 proto/experiment/service.proto create mode 100644 proto/external/googleapis/googleapis/83e756a66b80b072bd234abcfe89edf459090974/google/rpc/BUILD.bazel create mode 100644 proto/external/googleapis/googleapis/83e756a66b80b072bd234abcfe89edf459090974/google/rpc/code.proto create mode 100644 proto/external/googleapis/googleapis/83e756a66b80b072bd234abcfe89edf459090974/google/rpc/error_details.proto create mode 100644 proto/external/googleapis/googleapis/83e756a66b80b072bd234abcfe89edf459090974/google/rpc/status.proto create mode 100644 proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/BUILD.bazel create mode 100644 proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/any.proto create mode 100644 proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/api.proto create mode 100644 proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/descriptor.proto create mode 100644 proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/duration.proto create mode 100644 proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/empty.proto create mode 100644 proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/field_mask.proto create mode 100644 proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/source_context.proto create mode 100644 proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/struct.proto create mode 100644 proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/timestamp.proto create mode 100644 proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/type.proto create mode 100644 proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/wrappers.proto create mode 100644 proto/feature/BUILD.bazel create mode 100644 proto/feature/clause.proto create mode 100644 proto/feature/command.proto create mode 100644 proto/feature/evaluation.proto create mode 100644 proto/feature/feature.proto create mode 100644 proto/feature/feature_last_used_info.proto create mode 100644 proto/feature/prerequisite.proto create mode 100644 proto/feature/reason.proto create mode 100644 proto/feature/rule.proto create mode 100644 proto/feature/segment.proto create mode 100644 proto/feature/service.proto create mode 100644 proto/feature/strategy.proto create mode 100644 proto/feature/target.proto create mode 100644 proto/feature/variation.proto create mode 100644 proto/gateway/BUILD.bazel create mode 100644 proto/gateway/service.proto create mode 100644 proto/migration/BUILD.bazel create mode 100644 proto/migration/mysql_service.proto create mode 100644 proto/notification/BUILD.bazel create mode 100644 proto/notification/command.proto create mode 100644 proto/notification/recipient.proto create mode 100644 proto/notification/sender/BUILD.bazel create mode 100644 proto/notification/sender/notification.proto create mode 100644 proto/notification/sender/notification_event.proto create mode 100644 proto/notification/service.proto create mode 100644 proto/notification/subscription.proto create mode 100644 proto/proto.lock create mode 100644 proto/proto_descriptor.bzl create mode 100644 proto/push/BUILD.bazel create mode 100644 proto/push/command.proto create mode 100644 proto/push/push.proto create mode 100644 proto/push/service.proto create mode 100644 proto/test/BUILD.bazel create mode 100644 proto/test/service.proto create mode 100644 proto/user/BUILD.bazel create mode 100644 proto/user/service.proto create mode 100644 proto/user/user.proto create mode 100644 python/.gitignore create mode 100644 python/Dockerfile create mode 100644 python/Makefile create mode 100644 python/README.md create mode 100644 python/poetry.lock create mode 100644 python/poetry.toml create mode 100644 python/pyproject.toml create mode 100644 python/requirements-dev.txt create mode 100644 python/requirements.txt create mode 100644 python/src/cmd/calculator/main.py create mode 100644 python/src/lib/calculator/domain/experiment.py create mode 100644 python/src/lib/calculator/domain/experiment_result.py create mode 100644 python/src/lib/calculator/job/calculate_experiments.py create mode 100644 python/src/lib/calculator/job/metrics.py create mode 100644 python/src/lib/calculator/stats/binomial.py create mode 100644 python/src/lib/calculator/stats/metrics.py create mode 100644 python/src/lib/calculator/stats/normal_inverse_gamma.py create mode 100644 python/src/lib/calculator/storage/mysql_experiment_result.py create mode 100644 python/src/lib/environment/stub/stub.py create mode 100644 python/src/lib/eventcounter/stub/stub.py create mode 100644 python/src/lib/experiment/stub/stub.py create mode 100644 python/src/lib/health/health.py create mode 100644 python/src/lib/log/formatter.py create mode 100644 python/src/lib/log/logger.py create mode 100644 python/src/lib/metrics/server.py create mode 100644 python/src/lib/rpc/rpc.py create mode 100644 python/src/lib/schedule/job.py create mode 100644 python/src/lib/schedule/metrics.py create mode 100644 python/src/lib/schedule/scheduler.py create mode 100644 python/src/lib/signal/signal_handler.py create mode 100644 python/src/lib/storage/mysql/client.py create mode 100644 python/tests/lib/calculator/domain/experiment_result_test.py create mode 100644 python/tests/lib/calculator/domain/experiment_test.py create mode 100644 python/tests/lib/calculator/job/calculate_experiments_test.py create mode 100644 python/tests/lib/calculator/stats/binomial_test.py create mode 100644 python/tests/lib/calculator/stats/normal_inverse_gamma_test.py create mode 100755 remove.sh create mode 100644 renovate.json create mode 100644 repositories.bzl create mode 100644 static-files/img/bucketeer-dashboard.png create mode 100644 test/e2e/autoops/BUILD.bazel create mode 100644 test/e2e/autoops/auto_ops_test.go create mode 100644 test/e2e/environment/BUILD.bazel create mode 100644 test/e2e/environment/environment_test.go create mode 100644 test/e2e/environment/project_test.go create mode 100644 test/e2e/eventcounter/BUILD.bazel create mode 100644 test/e2e/eventcounter/eventcounter_test.go create mode 100644 test/e2e/experiment/BUILD.bazel create mode 100644 test/e2e/experiment/experiment_test.go create mode 100644 test/e2e/feature/BUILD.bazel create mode 100644 test/e2e/feature/feature_last_used_info_test.go create mode 100644 test/e2e/feature/feature_test.go create mode 100644 test/e2e/feature/segment_test.go create mode 100644 test/e2e/feature/segment_user_test.go create mode 100644 test/e2e/feature/tag_test.go create mode 100644 test/e2e/feature/user_evaluations_test.go create mode 100644 test/e2e/gateway/BUILD.bazel create mode 100644 test/e2e/gateway/api_grpc_test.go create mode 100644 test/e2e/gateway/api_test.go create mode 100644 test/e2e/gateway/testdata/invalid-apikey create mode 100644 test/e2e/notification/BUILD.bazel create mode 100644 test/e2e/notification/admin_subscription_test.go create mode 100644 test/e2e/notification/subscription_test.go create mode 100644 test/e2e/push/BUILD.bazel create mode 100644 test/e2e/push/push_test.go create mode 100644 test/e2e/user/BUILD.bazel create mode 100644 test/e2e/user/user_test.go create mode 100644 test/e2e/util/BUILD.bazel create mode 100644 test/e2e/util/rest.go create mode 100644 test/util/BUILD.bazel create mode 100644 test/util/command.go create mode 100644 test/util/sort.go create mode 100755 tools/build/status.sh create mode 100644 tools/bzl/nodejs/BUILD.bazel create mode 100644 tools/bzl/nodejs/defs.bzl create mode 100644 tools/bzl/nodejs/protobufjs/BUILD.bazel create mode 100644 tools/bzl/nodejs/protobufjs/ts_proto_library.bzl create mode 100644 tools/gen/gen.sh create mode 100644 tools/runner/Dockerfile create mode 100644 ui/web-v2/.editorconfig create mode 100644 ui/web-v2/.eslintignore create mode 100644 ui/web-v2/.eslintrc.json create mode 100644 ui/web-v2/.gitignore create mode 100644 ui/web-v2/.prettierignore create mode 100644 ui/web-v2/.prettierrc create mode 100644 ui/web-v2/BUILD.bazel create mode 100644 ui/web-v2/Dockerfile create mode 100644 ui/web-v2/Makefile create mode 100644 ui/web-v2/README.md create mode 100644 ui/web-v2/apps/.gitkeep create mode 100644 ui/web-v2/apps/admin/.babelrc create mode 100644 ui/web-v2/apps/admin/.browserslistrc create mode 100644 ui/web-v2/apps/admin/.eslintrc.json create mode 100644 ui/web-v2/apps/admin/babel-jest.config.json create mode 100644 ui/web-v2/apps/admin/certs/.gitkeep create mode 100644 ui/web-v2/apps/admin/jest.config.js create mode 100644 ui/web-v2/apps/admin/src/assets/.gitkeep create mode 100644 ui/web-v2/apps/admin/src/assets/lang/en.json create mode 100644 ui/web-v2/apps/admin/src/assets/lang/ja.json create mode 100644 ui/web-v2/apps/admin/src/assets/logo.png create mode 100644 ui/web-v2/apps/admin/src/components/APIKeyAddForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/APIKeyList/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/APIKeySearch/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/APIKeyUpdateForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/AccountAddForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/AccountList/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/AccountSearch/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/AccountUpdateForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/ActionMenu/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/AdminAccountAddForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/AdminAccountList/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/AdminAccountSearch/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/AdminNotificationAddForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/AdminNotificationList/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/AdminNotificationUpdateForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/AnalysisForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/AnalysisTable/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/AuditLogList/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/AuditLogSearch/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/Breadcrumbs/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/CheckBox/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/CheckBoxList/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/ConfirmDialog/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/CopyChip/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/CountResultBarChart/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/CountResultPieChart/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/CreatableSelect/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/DatetimePicker/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/DetailSkeleton/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/EnvironmentAddForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/EnvironmentList/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/EnvironmentSearch/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/EnvironmentSelect/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/EnvironmentUpdateForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/ExperimentAddForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/ExperimentList/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/ExperimentResultDetail/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/ExperimentSearch/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/ExperimentUpdateForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/FeatureAddForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/FeatureAutoOpsRulesForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/FeatureCloneForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/FeatureConfirmDialog/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/FeatureEvaluation/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/FeatureHeader/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/FeatureIdChip/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/FeatureList/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/FeatureSettingsForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/FeatureTargetingForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/FeatureVariationsForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/FilterChip/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/FilterPopover/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/FilterRemoveAllButton/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/GoalAddForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/GoalList/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/GoalResultDetail/ConversionRateDetail/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/GoalResultDetail/ConversionRateDistributionChart/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/GoalResultDetail/ConversionRateDistributionTimeseriesChart/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/GoalResultDetail/ConversionRateTable/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/GoalResultDetail/ConversionRateTimeseriesChart/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/GoalResultDetail/EvaluationUserTimeseriesChart/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/GoalResultDetail/GoalResultTable/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/GoalResultDetail/GoalTotalTimeseriesChart/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/GoalResultDetail/GoalUserTimeseriesChart/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/GoalResultDetail/Table/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/GoalResultDetail/ValuePerUserDetail/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/GoalResultDetail/ValuePerUserDistributionTimeseriesChart/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/GoalResultDetail/ValuePerUserTable/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/GoalResultDetail/ValuePerUserTimeseriesChart/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/GoalResultDetail/ValueTotalTimeseriesChart/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/GoalResultDetail/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/GoalUpdateForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/Header/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/HelpTextTooltip/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/HistogramChart/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/HoverPopover/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/ListSkeleton/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/ListTab/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/Modal/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/NotFound/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/NotificationAddForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/NotificationList/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/NotificationSearch/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/NotificationUpdateForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/Overlay/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/Pagination/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/ProjectAddForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/ProjectList/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/ProjectSearch/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/ProjectUpdateForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/PushAddForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/PushList/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/PushSearch/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/PushUpdateForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/RelativeDateText/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/SearchInput/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/SegmentAddForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/SegmentDeleteDialog/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/SegmentList/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/SegmentSearch/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/SegmentUpdateForm/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/Select/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/SideMenu/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/SortSelect/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/Switch/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/TagsChips/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/TimeseriesAreaLineChart/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/TimeseriesLineChart/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/TimeseriesStackedLineChart/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/Toasts/index.tsx create mode 100644 ui/web-v2/apps/admin/src/components/VariationInput/index.tsx create mode 100644 ui/web-v2/apps/admin/src/config/index.ts create mode 100644 ui/web-v2/apps/admin/src/constants/account.ts create mode 100644 ui/web-v2/apps/admin/src/constants/adminNotification.ts create mode 100644 ui/web-v2/apps/admin/src/constants/analysis.ts create mode 100644 ui/web-v2/apps/admin/src/constants/apiKey.ts create mode 100644 ui/web-v2/apps/admin/src/constants/auditLog.ts create mode 100644 ui/web-v2/apps/admin/src/constants/autoops.ts create mode 100644 ui/web-v2/apps/admin/src/constants/colorPattern.ts create mode 100644 ui/web-v2/apps/admin/src/constants/environment.ts create mode 100644 ui/web-v2/apps/admin/src/constants/experiment.ts create mode 100644 ui/web-v2/apps/admin/src/constants/feature.ts create mode 100644 ui/web-v2/apps/admin/src/constants/goal.ts create mode 100644 ui/web-v2/apps/admin/src/constants/notification.ts create mode 100644 ui/web-v2/apps/admin/src/constants/project.ts create mode 100644 ui/web-v2/apps/admin/src/constants/push.ts create mode 100644 ui/web-v2/apps/admin/src/constants/routing.ts create mode 100644 ui/web-v2/apps/admin/src/constants/segment.ts create mode 100644 ui/web-v2/apps/admin/src/constants/variation.ts create mode 100644 ui/web-v2/apps/admin/src/cookie/index.ts create mode 100644 ui/web-v2/apps/admin/src/environments/environment.prod.ts create mode 100644 ui/web-v2/apps/admin/src/environments/environment.ts create mode 100644 ui/web-v2/apps/admin/src/favicon.ico create mode 100644 ui/web-v2/apps/admin/src/grpc/account.ts create mode 100644 ui/web-v2/apps/admin/src/grpc/adminSubscription.ts create mode 100644 ui/web-v2/apps/admin/src/grpc/adminaccount.ts create mode 100644 ui/web-v2/apps/admin/src/grpc/apikey.ts create mode 100644 ui/web-v2/apps/admin/src/grpc/auditLog.ts create mode 100644 ui/web-v2/apps/admin/src/grpc/auth.ts create mode 100644 ui/web-v2/apps/admin/src/grpc/autoops.ts create mode 100644 ui/web-v2/apps/admin/src/grpc/environment.ts create mode 100644 ui/web-v2/apps/admin/src/grpc/eventcounter.ts create mode 100644 ui/web-v2/apps/admin/src/grpc/experiment.ts create mode 100644 ui/web-v2/apps/admin/src/grpc/features.ts create mode 100644 ui/web-v2/apps/admin/src/grpc/messages.ts create mode 100644 ui/web-v2/apps/admin/src/grpc/project.ts create mode 100644 ui/web-v2/apps/admin/src/grpc/push.ts create mode 100644 ui/web-v2/apps/admin/src/grpc/segments.ts create mode 100644 ui/web-v2/apps/admin/src/grpc/subscription.ts create mode 100644 ui/web-v2/apps/admin/src/grpc/utils.ts create mode 100644 ui/web-v2/apps/admin/src/history/index.ts create mode 100644 ui/web-v2/apps/admin/src/index.html create mode 100644 ui/web-v2/apps/admin/src/interfaces/grpc.ts create mode 100644 ui/web-v2/apps/admin/src/lang/index.ts create mode 100644 ui/web-v2/apps/admin/src/lang/messages.ts create mode 100644 ui/web-v2/apps/admin/src/lang/yup/jp.ts create mode 100644 ui/web-v2/apps/admin/src/main.tsx create mode 100644 ui/web-v2/apps/admin/src/middlewares/thunkErrorHandler.ts create mode 100644 ui/web-v2/apps/admin/src/modules/accounts.ts create mode 100644 ui/web-v2/apps/admin/src/modules/adminAccounts.ts create mode 100644 ui/web-v2/apps/admin/src/modules/adminNotifications.ts create mode 100644 ui/web-v2/apps/admin/src/modules/apiKeys.ts create mode 100644 ui/web-v2/apps/admin/src/modules/auditLogs.ts create mode 100644 ui/web-v2/apps/admin/src/modules/auth.ts create mode 100644 ui/web-v2/apps/admin/src/modules/autoOpsRules.ts create mode 100644 ui/web-v2/apps/admin/src/modules/environments.ts create mode 100644 ui/web-v2/apps/admin/src/modules/evaluationTimeseriesCount.ts create mode 100644 ui/web-v2/apps/admin/src/modules/experimentResult.ts create mode 100644 ui/web-v2/apps/admin/src/modules/experiments.ts create mode 100644 ui/web-v2/apps/admin/src/modules/features.ts create mode 100644 ui/web-v2/apps/admin/src/modules/goalCounts.ts create mode 100644 ui/web-v2/apps/admin/src/modules/goals.ts create mode 100644 ui/web-v2/apps/admin/src/modules/index.ts create mode 100644 ui/web-v2/apps/admin/src/modules/me.ts create mode 100644 ui/web-v2/apps/admin/src/modules/notifications.ts create mode 100644 ui/web-v2/apps/admin/src/modules/projects.ts create mode 100644 ui/web-v2/apps/admin/src/modules/pushes.ts create mode 100644 ui/web-v2/apps/admin/src/modules/segments.ts create mode 100644 ui/web-v2/apps/admin/src/modules/toasts.ts create mode 100644 ui/web-v2/apps/admin/src/modules/userMetadata.ts create mode 100644 ui/web-v2/apps/admin/src/pages/account/formSchema.ts create mode 100644 ui/web-v2/apps/admin/src/pages/account/index.tsx create mode 100644 ui/web-v2/apps/admin/src/pages/admin/account/formSchema.ts create mode 100644 ui/web-v2/apps/admin/src/pages/admin/account/index.tsx create mode 100644 ui/web-v2/apps/admin/src/pages/admin/auditLog/index.tsx create mode 100644 ui/web-v2/apps/admin/src/pages/admin/environment/formSchema.ts create mode 100644 ui/web-v2/apps/admin/src/pages/admin/environment/index.tsx create mode 100644 ui/web-v2/apps/admin/src/pages/admin/index.tsx create mode 100644 ui/web-v2/apps/admin/src/pages/admin/notification/formSchema.ts create mode 100644 ui/web-v2/apps/admin/src/pages/admin/notification/index.tsx create mode 100644 ui/web-v2/apps/admin/src/pages/admin/projects/formSchema.ts create mode 100644 ui/web-v2/apps/admin/src/pages/admin/projects/index.tsx create mode 100644 ui/web-v2/apps/admin/src/pages/analysis/formSchema.ts create mode 100644 ui/web-v2/apps/admin/src/pages/analysis/index.tsx create mode 100644 ui/web-v2/apps/admin/src/pages/apiKey/formSchema.ts create mode 100644 ui/web-v2/apps/admin/src/pages/apiKey/index.tsx create mode 100644 ui/web-v2/apps/admin/src/pages/auditLog/index.tsx create mode 100644 ui/web-v2/apps/admin/src/pages/auth/index.tsx create mode 100644 ui/web-v2/apps/admin/src/pages/experiment/formSchema.ts create mode 100644 ui/web-v2/apps/admin/src/pages/experiment/index.tsx create mode 100644 ui/web-v2/apps/admin/src/pages/feature/autoops.tsx create mode 100644 ui/web-v2/apps/admin/src/pages/feature/detail.tsx create mode 100644 ui/web-v2/apps/admin/src/pages/feature/evaluation.tsx create mode 100644 ui/web-v2/apps/admin/src/pages/feature/experiments.tsx create mode 100644 ui/web-v2/apps/admin/src/pages/feature/formSchema.ts create mode 100644 ui/web-v2/apps/admin/src/pages/feature/history.tsx create mode 100644 ui/web-v2/apps/admin/src/pages/feature/index.tsx create mode 100644 ui/web-v2/apps/admin/src/pages/feature/settings.tsx create mode 100644 ui/web-v2/apps/admin/src/pages/feature/targeting.tsx create mode 100644 ui/web-v2/apps/admin/src/pages/feature/variations.tsx create mode 100644 ui/web-v2/apps/admin/src/pages/goal/formSchema.ts create mode 100644 ui/web-v2/apps/admin/src/pages/goal/index.tsx create mode 100644 ui/web-v2/apps/admin/src/pages/index.tsx create mode 100644 ui/web-v2/apps/admin/src/pages/notification/formSchema.ts create mode 100644 ui/web-v2/apps/admin/src/pages/notification/index.tsx create mode 100644 ui/web-v2/apps/admin/src/pages/push/formSchema.ts create mode 100644 ui/web-v2/apps/admin/src/pages/push/index.tsx create mode 100644 ui/web-v2/apps/admin/src/pages/segment/formSchema.ts create mode 100644 ui/web-v2/apps/admin/src/pages/segment/index.tsx create mode 100644 ui/web-v2/apps/admin/src/pages/settings/index.tsx create mode 100644 ui/web-v2/apps/admin/src/polyfills.ts create mode 100644 ui/web-v2/apps/admin/src/postcss.config.js create mode 100644 ui/web-v2/apps/admin/src/storage/environment.ts create mode 100644 ui/web-v2/apps/admin/src/storage/token.ts create mode 100644 ui/web-v2/apps/admin/src/store/index.ts create mode 100644 ui/web-v2/apps/admin/src/styles/styles.css create mode 100644 ui/web-v2/apps/admin/src/types/account.ts create mode 100644 ui/web-v2/apps/admin/src/types/adminAccount.ts create mode 100644 ui/web-v2/apps/admin/src/types/adminNotification.ts create mode 100644 ui/web-v2/apps/admin/src/types/apiKey.ts create mode 100644 ui/web-v2/apps/admin/src/types/auditLog.ts create mode 100644 ui/web-v2/apps/admin/src/types/environment.ts create mode 100644 ui/web-v2/apps/admin/src/types/experiment.ts create mode 100644 ui/web-v2/apps/admin/src/types/feature.ts create mode 100644 ui/web-v2/apps/admin/src/types/goal.ts create mode 100644 ui/web-v2/apps/admin/src/types/list.ts create mode 100644 ui/web-v2/apps/admin/src/types/notification.ts create mode 100644 ui/web-v2/apps/admin/src/types/project.ts create mode 100644 ui/web-v2/apps/admin/src/types/push.ts create mode 100644 ui/web-v2/apps/admin/src/types/segment.ts create mode 100644 ui/web-v2/apps/admin/src/utils/css.ts create mode 100644 ui/web-v2/apps/admin/src/utils/date.ts create mode 100644 ui/web-v2/apps/admin/src/utils/search-params.ts create mode 100644 ui/web-v2/apps/admin/src/utils/validate.ts create mode 100644 ui/web-v2/apps/admin/tailwind.config.js create mode 100644 ui/web-v2/apps/admin/tsconfig.app.json create mode 100644 ui/web-v2/apps/admin/tsconfig.json create mode 100644 ui/web-v2/apps/admin/tsconfig.spec.json create mode 100644 ui/web-v2/apps/admin/webpack-config.js create mode 100644 ui/web-v2/babel.config.json create mode 100644 ui/web-v2/jest.config.js create mode 100644 ui/web-v2/jest.preset.js create mode 100644 ui/web-v2/libs/.gitkeep create mode 100644 ui/web-v2/nx.json create mode 100644 ui/web-v2/package.json create mode 100644 ui/web-v2/tools/generators/.gitkeep create mode 100644 ui/web-v2/tools/tsconfig.tools.json create mode 100644 ui/web-v2/tsconfig.base.json create mode 100644 ui/web-v2/workspace.json create mode 100644 ui/web-v2/yarn.lock diff --git a/.bazelrc b/.bazelrc new file mode 100644 index 000000000..6e69250ae --- /dev/null +++ b/.bazelrc @@ -0,0 +1,20 @@ +# Taken from Kubernetes + +# Show us information about failures. +build --verbose_failures +test --test_output=errors + +# Make /tmp hermetic. +build --sandbox_tmpfs_path=/tmp + +# Ensure that Bazel never runs as root, which can cause unit tests to fail. +# This flag requires Bazel 0.5.0+ +build --sandbox_fake_username + +# Enable go race detection. +test --features=race + +# Prevent TS worker from trying to expand the `includes` section in tsconfig.json. +# It would find the "test/*.ts" reference when compiling //src:src, and the FileCache will then error +# when TS attempts to read one of these files that doesn't belong in the compilation. +build --worker_sandboxing diff --git a/.bazelversion b/.bazelversion new file mode 100644 index 000000000..ac14c3dfa --- /dev/null +++ b/.bazelversion @@ -0,0 +1 @@ +5.1.1 diff --git a/.github/workflows/pr-title-validation.yaml b/.github/workflows/pr-title-validation.yaml new file mode 100644 index 000000000..3ee3cf945 --- /dev/null +++ b/.github/workflows/pr-title-validation.yaml @@ -0,0 +1,38 @@ +name: "pr-title-validation" + +on: + pull_request_target: + types: + - opened + - edited + - synchronize + +jobs: + validate_pr_title: + name: Validate PR title + runs-on: ubuntu-latest + steps: + - uses: amannn/action-semantic-pull-request@v4 + with: + # Use the following release types to match the same rules in the PR title lint + # https://github.com/googleapis/release-please/blob/main/src/changelog-notes.ts#L42-L55 + types: | + feat + fix + perf + deps + revert + docs + style + chore + refactor + test + build + ci + subjectPattern: ^(?![A-Z]).+$ + subjectPatternError: | + The subject "{subject}" found in the pull request title "{title}" + didn't match the configured pattern. Please ensure that the subject + doesn't start with an uppercase character. + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/publish_chart.yaml b/.github/workflows/publish_chart.yaml new file mode 100644 index 000000000..30dea299f --- /dev/null +++ b/.github/workflows/publish_chart.yaml @@ -0,0 +1,62 @@ +name: publish-chart + +on: + workflow_dispatch: + inputs: + bucketeer_version: + description: "Bucketeer version" + required: false + push: + branches: + - main + tags: + - "v*" + paths: + - "!**/**.md" + - "CHANGELOG.md" # DO NOT remove this line. Otherwise, the publish chart won't start. + +env: + REGISTRY: ghcr.io + HELM_VERSION: 3.8.2 + +jobs: + artifacts: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + checks: read + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + # This is a workaround to ensure the publish_chart won't start before the release workflow. + # Because the version is based on the tag, if the publish_chart starts before + # the release workflow, it will create a chart with an old version. + - name: Wait for release note to succeed + uses: lewagon/wait-on-check-action@v1.0.0 + with: + ref: ${{ github.ref }} + # DO NOT CHANGE the check-name. This name is based on the workflow name defined in the release.yaml + check-name: "Release Please" + repo-token: ${{ secrets.GITHUB_TOKEN }} + wait-interval: 10 + - name: Determine version + run: | + if [ ! -z ${{ github.event.inputs.bucketeer_version }} ]; then + echo "BUCKETEER_VERSION=${{ github.event.inputs.bucketeer_version }}" >> $GITHUB_ENV + else + echo "BUCKETEER_VERSION=$(git describe --tags --always --abbrev=7)" >> $GITHUB_ENV + fi + - name: Install helm + uses: Azure/setup-helm@v1 + with: + version: ${{ env.HELM_VERSION }} + - name: Login to OCI using Helm + run: | + echo "${{ secrets.GITHUB_TOKEN }}" | helm registry login ${{ env.REGISTRY }} --username ${{ github.repository_owner }} --password-stdin + - name: Publish helm chart + run: | + make build-chart VERSION=${{ env.BUCKETEER_VERSION }} + helm push .artifacts/bucketeer-${{ env.BUCKETEER_VERSION }}.tgz oci://${{ env.REGISTRY }}/bucketeer-io/chart diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 000000000..57c6997f0 --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,27 @@ +name: release + +on: + workflow_dispatch: + push: + branches: + - main + +jobs: + release_please: + name: Release Please + runs-on: ubuntu-latest + steps: + - uses: google-github-actions/release-please-action@v3.5.0 + with: + changelog-types: | + [ + {"type":"build","section":"Build System","hidden":false}, + {"type":"chore","section":"Miscellaneous","hidden":false}, + {"type":"feat","section":"Features","hidden":false}, + {"type":"fix","section":"Bug Fixes","hidden":false}, + {"type":"perf","section":"Performance Improvements","hidden":false}, + {"type":"revert","section":"Reverts","hidden":false} + ] + release-type: simple + package-name: release-please-action + bump-minor-pre-major: true diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..65cc707e1 --- /dev/null +++ b/.gitignore @@ -0,0 +1,17 @@ +.artifacts +/build +/bazel-bin +/bazel-bucketeer +/bazel-genfiles +/bazel-out +/bazel-proto +/bazel-testlogs +.DS_Store +/vendor +/proto/**/*.pb.go +/proto/**/*.py +/proto/swift_output +node_modules +telepresence.log +venv +*.swp diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 000000000..0a8189e2c --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,25 @@ +run: + tests: false +linters-settings: + goimports: + # put imports beginning with prefix after 3rd-party packages; + # it's a comma-separated list of prefixes + local-prefixes: github.com/bucketeer-io/bucketeer +linters: + disable-all: true + enable: + - deadcode + - errcheck + - gosimple + - govet + - ineffassign + # Since we got the following error, staticcheck is disabled. After solving it, we'll enable it. + # pkg/account/apikeycacher/apikeycacher.go:284:12: SA1019: ptypes.UnmarshalAny is deprecated: Call the any.UnmarshalTo method instead. (staticcheck) + # - staticcheck + - structcheck + - typecheck + - unused + - varcheck + - gofmt + - goimports + - lll diff --git a/BUILD.bazel b/BUILD.bazel new file mode 100644 index 000000000..3eb235f3b --- /dev/null +++ b/BUILD.bazel @@ -0,0 +1,11 @@ +load("@bazel_gazelle//:def.bzl", "gazelle") + +# gazelle:exclude vendor +# gazelle:exclude proto/external +# gazelle:go_naming_convention go_default_library + +gazelle( + name = "gazelle", + command = "fix", + prefix = "github.com/bucketeer-io/bucketeer", +) diff --git a/BUILD.googleapis b/BUILD.googleapis new file mode 100644 index 000000000..18fd63893 --- /dev/null +++ b/BUILD.googleapis @@ -0,0 +1,21 @@ +package(default_visibility = ["//visibility:public"]) + +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "api_proto", + srcs = [ + "google/api/http.proto", + "google/api/annotations.proto", + ], + deps = ["@com_google_protobuf//:descriptor_proto"], +) + +go_proto_library( + name = "api_go_proto", + importpath = "google/api", + proto = ":api_proto", + deps = [ + "@com_github_golang_protobuf//protoc-gen-go/descriptor:go_default_library", + ], +) \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 000000000..fdc4a9548 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,22 @@ +# Changelog + +## [0.1.0](https://github.com/bucketeer-io/bucketeer/compare/v0.0.0...v0.1.0) (2022-09-25) + + +### Features + +* add initial implementation ([#1](https://github.com/bucketeer-io/bucketeer/issues/1)) ([2ddbb2c](https://github.com/bucketeer-io/bucketeer/commit/2ddbb2c455a99cbce30a6e6da0b3859fdcc4b919)) + +## [0.1.1](https://github.com/bucketeer-io/bucketeer/compare/v0.1.0...v0.1.1) (2022-09-25) + + +### Bug Fixes + +* publish chart workflow not triggering ([5f73004](https://github.com/bucketeer-io/bucketeer/commit/5f7300484cb20ac5084960185a18b4ffe7160e1f)) + +## [0.1.0](https://github.com/bucketeer-io/bucketeer/compare/v0.0.0...v0.1.0) (2022-09-25) + + +### Features + +* add initial implementation ([#1](https://github.com/bucketeer-io/bucketeer/issues/1)) ([47bdcec](https://github.com/bucketeer-io/bucketeer/commit/47bdcec22d4237fcc2b16b42198b9f1290e48ad0)) diff --git a/CLA.md b/CLA.md new file mode 100644 index 000000000..3bc87862b --- /dev/null +++ b/CLA.md @@ -0,0 +1,26 @@ +## Bucketeer Project - Individual Contributor License Agreement + +Thank you for your interest in Bucketeer Project. In order to clarify the intellectual property license granted with Contributions from any person or entity, the Project must have a Contributor License Agreement ("CLA") on file that has been signed by each Contributor, indicating agreement to the license terms below. This license is for your protection as a Contributor as well as the protection of the Project and its users; it does not change your rights to use your own Contributions for any other purpose. + +You accept and agree to the following terms and conditions for Your present and future Contributions submitted to the Project. In return, the Project shall not use Your Contributions in a way that is contrary to the public benefit. Except for the license granted herein to the Project and recipients of software distributed by the Project, You reserve all right, title, and interest in and to Your Contributions. + +1. Definitions. + + "You" (or "Your") shall mean the copyright owner or legal entity authorized by the copyright owner that is making this Agreement with the Project. For legal entities, the entity making a Contribution and all other entities that control, are controlled by, or are under common control with that entity are considered to be a single Contributor. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + + "Contribution" shall mean any original work of authorship, including any modifications or additions to an existing work, that is intentionally submitted by You to the Project for inclusion in, or documentation of, any of the products owned or managed by the Project (the "Work"). For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Project or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Project for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by You as "Not a Contribution." + +2. Grant of Copyright License. Subject to the terms and conditions of this Agreement, You hereby grant to the Project and to recipients of software distributed by the Project a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, sublicense, and distribute Your Contributions and such derivative works. + +3. Grant of Patent License. Subject to the terms and conditions of this Agreement, You hereby grant to the Project and to recipients of software distributed by the Project a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by You that are necessarily infringed by Your Contribution(s) alone or by combination of Your Contribution(s) with the Work to which such Contribution(s) was submitted. If any entity institutes patent litigation against You or any other entity (including a cross-claim or counterclaim in a lawsuit) alleging that your Contribution, or the Work to which you have contributed, constitutes direct or contributory patent infringement, then any patent licenses granted to that entity under this Agreement for that Contribution or Work shall terminate as of the date such litigation is filed. + +4. You represent that you are legally entitled to grant the above license. If your employer(s) has rights to intellectual property that you create that includes your Contributions, you represent that you have received permission to make Contributions on behalf of that employer, that your employer has waived such rights for your Contributions to the Project, or that your employer has executed a separate Corporate CLA with the Project. + +5. You represent that each of Your Contributions is Your original creation (see section 7 for submissions on behalf of others). You represent that Your Contribution submissions include complete details of any third-party license or other restriction (including, but not limited to, related patents and trademarks) of which you are personally aware and which are associated with any part of Your Contributions. + +6. You are not expected to provide support for Your Contributions, except to the extent You desire to provide support. You may provide support for free, for a fee, or not at all. Unless required by applicable law or agreed to in writing, You provide Your Contributions on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. + +7. Should You wish to submit work that is not Your original creation, You may submit it to the Project separately from any Contribution, identifying the complete details of its source and of any license or other restriction (including, but not limited to, related patents, trademarks, and license agreements) of which you are personally aware, and conspicuously marking the work as "Submitted on behalf of a third-party: [named here]". + +8. You agree to notify the Project of any facts or circumstances of which you become aware that would make these representations inaccurate in any respect. + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..690da637c --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,7 @@ +# Contributing to Bucketeer + +We would ❤️ for you to contribute to Bucketeer and help make it better! Anyone can use, improve, and enjoy it! + +## Become a contributor + +Please follow our contribution guide [here](https://docs.bucketeer.io/contribution-guide). \ No newline at end of file diff --git a/DEPLOYMENT.md b/DEPLOYMENT.md new file mode 100644 index 000000000..810bf05eb --- /dev/null +++ b/DEPLOYMENT.md @@ -0,0 +1,3 @@ +# Deployment + +The deployment documentation is still in a work-in-progress state. We will update it soon. Stay tuned! diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..c7b29d0cf --- /dev/null +++ b/Makefile @@ -0,0 +1,295 @@ +############################# +# Variables +############################# + +# set output directory to CI cache path via environment variables +BZFLAGS = +BUILD_FLAGS = +ifdef IS_CI + BAZEL_OUTPUT_BASE ?= ../bazel-cache + BZLFLAGS += --output_base ${BAZEL_OUTPUT_BASE} + BUILD_FLAGS += --action_env=DOCKER_HOST --remote_cache=${BAZEL_REMOTE_CACHE} --google_credentials=${BAZEL_REMOTE_CACHE_CREDENTIALS} +endif + +DELETED_PACKAGES := //proto/external/googleapis/googleapis/83e756a66b80b072bd234abcfe89edf459090974/google/rpc,//proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf +LOCAL_IMPORT_PATH := github.com/bucketeer-io/bucketeer + +############################# +# All +############################# + +.PHONY: shutdown +shutdown: + bazelisk shutdown + +############################# +# Run make commands on docker container +############################# + +# E.g. make docker-run CMD=proto-go +RUNNER_IMAGE = ghcr.io/bucketeer-io/bucketeer-runner:0.1.0 +DOCKER_REPO_PATH = /go/src/github.com/bucketeer-io/bucketeer +DOCKER_RUN_CMD = docker run -it --rm -v ${PWD}:${DOCKER_REPO_PATH} -w ${DOCKER_REPO_PATH} ${RUNNER_IMAGE} +.PHONY: docker-run +docker-run: + eval ${DOCKER_RUN_CMD} make $$CMD + +############################# +# Go +############################# + +.PHONY: all +all: gazelle gofmt-check proto-check update-repos-check lint build test + +# protoc-gen-go should be same version as https://github.com/bazelbuild/rules_go/blob/master/go/private/repositories.bzl +.PHONY: local-deps +local-deps: + mkdir -p ~/go-tools; \ + cd ~/go-tools; \ + if [ ! -e go.mod ]; then go mod init go-tools; fi; \ + go install golang.org/x/tools/cmd/goimports@latest; \ + go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.39.0; \ + go install github.com/golang/mock/mockgen@v1.6.0; \ + go install github.com/golang/protobuf/protoc-gen-go@v1.5.2; \ + go install github.com/nilslice/protolock/...@v0.15.0; \ + go install github.com/bazelbuild/buildtools/buildifier@5.1.0; \ + go get github.com/googleapis/googleapis; + +.PHONY: gazelle +gazelle: proto-go + bazelisk run ${BUILD_FLAGS} //:gazelle + +.PHONY: lint +lint: proto-go vendor + golangci-lint run ./cmd/... ./pkg/... ./hack/... ./test/... + +.PHONY: build +build: + bazelisk ${BZLFLAGS} build ${BUILD_FLAGS} --deleted_packages=${DELETED_PACKAGES} --workspace_status_command=$${PWD}/tools/build/status.sh \ + -k -- //cmd/... //pkg/... //proto/... //hack/... //test/... + +.PHONY: test +test: + bazelisk ${BZLFLAGS} test ${BUILD_FLAGS} -- //cmd/... //pkg/... //hack/... + +.PHONY: gofmt +gofmt: + goimports -local ${LOCAL_IMPORT_PATH} -w \ + $$(find . -path "./vendor" -prune -o -path "./proto" -prune -o -path "./bazel-cache" -prune -o -path "./bazel-proto" -prune -o -type f -name '*.go' -print) + +.PHONY: gofmt-check +gofmt-check: + test -z "$$(goimports -local ${LOCAL_IMPORT_PATH} -d \ + $$(find . -path "./vendor" -prune -o -path "./proto" -prune -o -path "./bazel-cache" -prune -o -path "./bazel-proto" -prune -o -type f -name '*.go' -print))" + +.PHONY: proto-check +proto-check: + make -C proto check + +.PHONY: proto-fmt +proto-fmt: + make -C proto fmt + +.PHONY: proto-fmt-check +proto-fmt-check: + make -C proto fmt-check + +.PHONY: proto-lock-check +proto-lock-check: + make -C proto lock-check + +.PHONY: proto-lock-commit +proto-lock-commit: + make -C proto lock-commit + +.PHONY: proto-lock-commit-force +proto-lock-commit-force: + make -C proto lock-commit-force + +.PHONY: proto-go +proto-go: + make -C proto go + +.PHONY: mockgen +mockgen: proto-go + find ./pkg -path "*mock*.go" -type f -delete + go generate -run="mockgen" ./pkg/... + make gofmt + +.PHONY: vendor +vendor: tidy-deps + go mod vendor + +.PHONY: update-repos +update-repos: tidy-deps + bazelisk run ${BUILD_FLAGS} //:gazelle -- update-repos -from_file=go.mod -to_macro=repositories.bzl%go_repositories -prune=true + +.PHONY: update-repos-check +update-repos-check: update-repos diff-check + +.PHONY: diff-check +diff-check: + test -z "$$(git diff --name-only)" + +.PHONY: tidy-deps +tidy-deps: + go mod tidy + +############################# +# UI/WEB +############################# + +.PHONY: build-ui-web-v2 +build-ui-web-v2: + bazelisk ${BZLFLAGS} build ${BUILD_FLAGS} -k --action_env=RELEASE_CHANNEL=prod -- //ui/web-v2:bundle + +.PHONY: build-ui-web-v2-image +build-ui-web-v2-image: + bazelisk ${BZLFLAGS} run ${BUILD_FLAGS} -k --action_env=RELEASE_CHANNEL=prod -- //ui/web-v2:bundle-image + +############################# +# Charts +############################# + +.PHONY: build-chart +build-chart: VERSION ?= $(shell git describe --tags --always --dirty --abbrev=7) +build-chart: + mkdir -p .artifacts + helm package manifests/bucketeer --version $(VERSION) --app-version $(VERSION) --dependency-update --destination .artifacts + +############################# +# Dev tool +############################# + +.PHONY: buildifier +buildifier: + buildifier --lint=fix \ + --warnings=-function-docstring,-function-docstring-header,-function-docstring-args,-function-docstring-return,-module-docstring,-skylark-docstring,-rule-impl-return \ + $$(find . -type d -name node_modules -prune -or -type f \( -iname '*.bazel' -or -iname '*.bzl' \) -print) + +.PHONY: buildifier-check +buildifier-check: + buildifier --mode=check --lint=warn \ + --warnings=-function-docstring,-function-docstring-header,-function-docstring-args,-function-docstring-return,-module-docstring,-skylark-docstring,-rule-impl-return \ + $$(find . -type d -name node_modules -prune -or -type f \( -iname '*.bazel' -or -iname '*.bzl' \) -print) + +############################# +# E2E for backend +############################# + +.PHONY: delete-e2e-data-mysql +delete-e2e-data-mysql: + bazelisk ${BZLFLAGS} run ${BUILD_FLAGS} //hack/delete-e2e-data-mysql:delete-e2e-data-mysql -- delete \ + --mysql-user=${MYSQL_USER} \ + --mysql-pass=${MYSQL_PASS} \ + --mysql-host=mysql-${ENV}.bucketeer.private \ + --mysql-port=3306 \ + --mysql-db-name=master \ + --test-id=${TEST_ID} \ + --no-profile \ + --no-gcp-trace-enabled + +.PHONY: generate-service-token +generate-service-token: + bazelisk ${BZLFLAGS} run ${BUILD_FLAGS} //hack/generate-service-token:generate-service-token -- generate \ + --issuer=${ISSUER} \ + --sub=service \ + --audience=bucketeer \ + --email=${EMAIL} \ + --role=OWNER \ + --key=${OAUTH_KEY_PATH} \ + --output=${SERVICE_TOKEN_PATH} \ + --no-profile \ + --no-gcp-trace-enabled + +.PHONY: create-api-key +create-api-key: + bazelisk ${BZLFLAGS} run ${BUILD_FLAGS} //hack/create-api-key:create-api-key -- create \ + --cert=${WEB_GATEWAY_CERT_PATH} \ + --web-gateway=${WEB_GATEWAY_URL}:443 \ + --service-token=${SERVICE_TOKEN_PATH} \ + --name=$$(date +%s) \ + --role=SDK \ + --output=${API_KEY_PATH} \ + --environment-namespace=${ENVIRONMENT_NAMESPACE} \ + --no-profile \ + --no-gcp-trace-enabled + +.PHONY: e2e-l4 +e2e-l4: + bazelisk ${BZLFLAGS} test ${BUILD_FLAGS} \ + --cache_test_results=no \ + --test_output=all \ + --test_timeout=500 \ + --verbose_test_summary \ + --test_arg=--web-gateway-addr=${WEB_GATEWAY_URL} \ + --test_arg=--web-gateway-port=443 \ + --test_arg=--web-gateway-cert=${WEB_GATEWAY_CERT_PATH} \ + --test_arg=--api-key=${API_KEY_PATH} \ + --test_arg=--gateway-addr=${GATEWAY_URL} \ + --test_arg=--gateway-port=9000 \ + --test_arg=--gateway-cert=${GATEWAY_CERT_PATH} \ + --test_arg=--service-token=${SERVICE_TOKEN_PATH} \ + --test_arg=--environment-namespace=${ENVIRONMENT_NAMESPACE} \ + --test_arg=--test-id=${TEST_ID} \ + //test/e2e/autoops:go_default_test //test/e2e/environment:go_default_test //test/e2e/feature:go_default_test //test/e2e/experiment:go_default_test //test/e2e/gateway:go_default_test //test/e2e/eventcounter:go_default_test //test/e2e/user:go_default_test //test/e2e/push:go_default_test //test/e2e/notification:go_default_test + +.PHONY: e2e +e2e: + bazelisk ${BZLFLAGS} test ${BUILD_FLAGS} \ + --cache_test_results=no \ + --test_output=all \ + --test_timeout=500 \ + --verbose_test_summary \ + --test_arg=--web-gateway-addr=${WEB_GATEWAY_URL} \ + --test_arg=--web-gateway-port=443 \ + --test_arg=--web-gateway-cert=${WEB_GATEWAY_CERT_PATH} \ + --test_arg=--api-key=${API_KEY_PATH} \ + --test_arg=--gateway-addr=${GATEWAY_URL} \ + --test_arg=--gateway-port=443 \ + --test_arg=--gateway-cert=${GATEWAY_CERT_PATH} \ + --test_arg=--service-token=${SERVICE_TOKEN_PATH} \ + --test_arg=--environment-namespace=${ENVIRONMENT_NAMESPACE} \ + --test_arg=--test-id=${TEST_ID} \ + //test/e2e/autoops:go_default_test //test/e2e/environment:go_default_test //test/e2e/feature:go_default_test //test/e2e/experiment:go_default_test //test/e2e/gateway:go_default_test //test/e2e/eventcounter:go_default_test //test/e2e/user:go_default_test //test/e2e/push:go_default_test //test/e2e/notification:go_default_test + +############################# +# Chores +############################# + +.PHONY: docker-gen +docker-gen: + rm -fr bazel-proto + cp -r $$(bazel info | grep bazel-bin | sed -E 's/bazel-bin: (.+)/\1/')/proto bazel-proto + docker run -it --rm \ + -v ${PWD}:/go/src/github.com/bucketeer-io/bucketeer \ + -w /go/src/github.com/bucketeer-io/bucketeer \ + --env DIR=/go/src/github.com/bucketeer-io/bucketeer \ + --env DESCRIPTOR_PATH=/go/src/github.com/bucketeer-io/bucketeer/bazel-proto \ + ghcr.io/bucketeer-io/bucketeer-runner:0.1.0 \ + bash tools/gen/gen.sh + +.PHONY: remove-bazel-output +remove-bazel-output: + bazelisk clean --expunge + +.PHONY: disable-expired-trial-projects +disable-expired-trial-projects: + bazelisk ${BZLFLAGS} run ${BUILD_FLAGS} //hack/disable-expired-trial-projects:disable-expired-trial-projects -- disable \ + --cert=${WEB_GATEWAY_CERT_PATH} \ + --service-token=${SERVICE_TOKEN_PATH} \ + --web-gateway=${WEB_GATEWAY_ADDR}:443 \ + --no-profile \ + --no-gcp-trace-enabled + +.PHONY: delete-user-data +delete-user-data: + bazelisk ${BZLFLAGS} run ${BUILD_FLAGS} //hack/delete-user-data:delete-user-data -- delete \ + --mysql-host=${MYSQL_HOST} \ + --mysql-port=${MYSQL_PORT} \ + --mysql-user=${MYSQL_USER} \ + --mysql-pass=${MYSQL_PASS} \ + --mysql-db-name=${MYSQL_DB_NAME} \ + --target-period=${TARGET_PERIOD} \ + --no-profile \ + --no-gcp-trace-enabled diff --git a/README.md b/README.md index 3ee91a24b..a2d7cb780 100644 --- a/README.md +++ b/README.md @@ -1 +1,92 @@ -# bucketeer \ No newline at end of file +

Maximize Development with Minimum Risk

+ +![Bucketeer Dashboard](/static-files/img/bucketeer-dashboard.png) + +[Bucketeer](https://bucketeer.io) is an open-source platform created by CyberAgent to help teams make better decisions, reduce deployment lead time and release risk through feature flags. +Bucketeer offers advanced features, such as dark launch and staged rollouts, that perform limited releases based on user attributes, devices, and other segments. + +We are preparing and making the installation process simpler so anyone can easily use it. Stay tuned for weekly updates! + +## Bucketeer Features + +### Feature Flags + +Feature Flags are a software development tool that ensures an efficient, low-risk release cycle by enabling or disabling features in real time without deploying new code.
+Bucketeer offers advanced features, such as dark launch and staged rollouts, that perform limited releases based on user attributes, devices, and other segments. + +With feature flags, you can continuously deploy new features that are still in development without making them visible to the users.
+This makes it possible to separate the deployment from the release, which allows teams to manage the feature's entire lifecycle. + +### A/B Testing + +A/B testing is an experimentation process to compare one or multiple versions of an application. It helps your team analyze what performs better and make better data-driven decisions without relying on intuition or personal experience. + +Bucketeer uses the Bayesian algorithm to analyze which variable of your A/B test is likely to perform better. Because it requires a smaller sample size, you can get results faster with lower experimentation costs than a Frequentist algorithm. + +### Trunk-based Development + +Trunk-based development reduces lead time, speeding up the process from code review to release with the use of feature flags. + +Developers can implement a new feature by disabling the flag and deploying it to the main branch at any time.
+This helps prevent merge conflicts caused by long-lived branches and reduces code review costs. + +This practice is essential for large teams to ensure that a shared branch is always releasable without delaying the QA time and affecting the user. + +### Supported SDKs + +Bucketeer supports various languages and runtimes, with more coming soon! + +#### Client-side + +- Android +- iOS +- Javascript +- Flutter + +#### Server-side + +- Go +- NodeJS + +## Documentation + +See our [documentation](https://docs.bucketeer.io). + +**Note**: The documentation is still a work in progress. We will update it every week. Stay tuned! + +## License + +Apache License 2.0, see [LICENSE](https://github.com/bucketeer-io/bucketeer/blob/master/LICENSE). + +## Contributing + +We would ❤️ for you to contribute to Bucketeer and help improve it! Anyone can use and enjoy it! + +Please follow our contribution guide [here](https://docs.bucketeer.io/contribution-guide/). + +Big thanks to all Bucketeer contributors who have helped us since the beginning! + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/WORKSPACE b/WORKSPACE new file mode 100644 index 000000000..e0434a493 --- /dev/null +++ b/WORKSPACE @@ -0,0 +1,144 @@ +workspace( + name = "bucketeer", + managed_directories = { + "@npm": ["node_modules"], + }, +) + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") +load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") + +############################################################################### +# Go +############################################################################### +http_archive( + name = "io_bazel_rules_go", + sha256 = "f2dcd210c7095febe54b804bb1cd3a58fe8435a909db2ec04e31542631cf715c", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.31.0/rules_go-v0.31.0.zip", + "https://github.com/bazelbuild/rules_go/releases/download/v0.31.0/rules_go-v0.31.0.zip", + ], +) + +load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies") + +go_rules_dependencies() + +load("@io_bazel_rules_go//extras:embed_data_deps.bzl", "go_embed_data_dependencies") + +go_embed_data_dependencies() + +go_register_toolchains(version = "1.17.2") + +http_archive( + name = "bazel_gazelle", + sha256 = "62ca106be173579c0a167deb23358fdfe71ffa1e4cfdddf5582af26520f1c66f", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz", + "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz", + ], +) + +############################################################################### +# Dependencies +############################################################################### +load("//:repositories.bzl", "go_repositories") + +# gazelle:repository_macro repositories.bzl%go_repositories +go_repositories() + +load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies") + +gazelle_dependencies() + +############################################################################### +# NodeJS +############################################################################### +http_archive( + name = "build_bazel_rules_nodejs", + sha256 = "ee3280a7f58aa5c1caa45cb9e08cbb8f4d74300848c508374daf37314d5390d6", + urls = ["https://github.com/bazelbuild/rules_nodejs/releases/download/5.5.1/rules_nodejs-5.5.1.tar.gz"], +) + +http_archive( + name = "rules_nodejs", + sha256 = "77cbc1989562c5b2268b293573deff30984ef06b129b40c36eff764af702fe2f", + urls = ["https://github.com/bazelbuild/rules_nodejs/releases/download/5.5.1/rules_nodejs-core-5.5.1.tar.gz"], +) + +load( + "@build_bazel_rules_nodejs//:index.bzl", + "node_repositories", + "yarn_install", +) + +node_repositories( + node_version = "16.15.1", + yarn_version = "1.22.19", +) + +yarn_install( + name = "npm-v2", + package_json = "//ui/web-v2:package.json", + yarn_lock = "//ui/web-v2:yarn.lock", +) + +############################################################################### +# Protobuf +############################################################################### +http_archive( + name = "com_google_protobuf", + sha256 = "b8ab9bbdf0c6968cf20060794bc61e231fae82aaf69d6e3577c154181991f576", + strip_prefix = "protobuf-3.18.1", + urls = ["https://github.com/protocolbuffers/protobuf/releases/download/v3.18.1/protobuf-all-3.18.1.tar.gz"], +) + +load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps") + +protobuf_deps() + +# Googleapis +http_archive( + name = "com_github_googleapis_googleapis", + build_file = "@//:BUILD.googleapis", + sha256 = "963c1126e22cfbc68a96d32f40fb76fad5ba51755a61e98c8cdfdfccd6d3354a", + strip_prefix = "googleapis-83e756a66b80b072bd234abcfe89edf459090974/", + urls = ["https://github.com/googleapis/googleapis/archive/83e756a66b80b072bd234abcfe89edf459090974.zip"], +) + +############################################################################### +# Docker +############################################################################### +http_archive( + name = "io_bazel_rules_docker", + sha256 = "92779d3445e7bdc79b961030b996cb0c91820ade7ffa7edca69273f404b085d5", + strip_prefix = "rules_docker-0.20.0", + urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.20.0/rules_docker-v0.20.0.tar.gz"], +) + +load( + "@io_bazel_rules_docker//repositories:repositories.bzl", + container_repositories = "repositories", +) + +container_repositories() + +load("@io_bazel_rules_docker//repositories:deps.bzl", container_deps = "deps") + +container_deps() + +load("@io_bazel_rules_docker//container:container.bzl", "container_pull") + +container_pull( + name = "bucketeer-web-nginx", + registry = "ghcr.io", + repository = "bucketeer-io/bucketeer-web-nginx", + tag = "1.13.11-alpine", +) + +load( + "@io_bazel_rules_docker//go:image.bzl", + _go_image_repos = "repositories", +) + +_go_image_repos() diff --git a/cmd/account/BUILD.bazel b/cmd/account/BUILD.bazel new file mode 100644 index 000000000..0e7ab03bc --- /dev/null +++ b/cmd/account/BUILD.bazel @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") +load("@io_bazel_rules_docker//go:image.bzl", "go_image") + +go_library( + name = "go_default_library", + srcs = ["account.go"], + importpath = "github.com/bucketeer-io/bucketeer/cmd/account", + visibility = ["//visibility:private"], + deps = [ + "//pkg/account/cmd/apikeycacher:go_default_library", + "//pkg/account/cmd/server:go_default_library", + "//pkg/cli:go_default_library", + ], +) + +go_binary( + name = "account", + embed = [":go_default_library"], + pure = "on", + visibility = ["//visibility:public"], +) + +go_image( + name = "account_image", + binary = ":account", +) + +alias( + name = "account_image_tar", + actual = ":account_image.tar", +) diff --git a/cmd/account/account.go b/cmd/account/account.go new file mode 100644 index 000000000..b659a2282 --- /dev/null +++ b/cmd/account/account.go @@ -0,0 +1,43 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/bucketeer-io/bucketeer/pkg/account/cmd/apikeycacher" + "github.com/bucketeer-io/bucketeer/pkg/account/cmd/server" + "github.com/bucketeer-io/bucketeer/pkg/cli" +) + +var ( + name = "bucketeer-account" + version = "" + build = "" +) + +func main() { + app := cli.NewApp(name, "A/B Testing Microservice", version, build) + registerCommands(app) + err := app.Run() + if err != nil { + log.Fatal(err) + } +} + +func registerCommands(app *cli.App) { + server.RegisterCommand(app, app) + apikeycacher.RegisterCommand(app, app) +} diff --git a/cmd/auditlog/BUILD.bazel b/cmd/auditlog/BUILD.bazel new file mode 100644 index 000000000..b804ee170 --- /dev/null +++ b/cmd/auditlog/BUILD.bazel @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") +load("@io_bazel_rules_docker//go:image.bzl", "go_image") + +go_library( + name = "go_default_library", + srcs = ["auditlog.go"], + importpath = "github.com/bucketeer-io/bucketeer/cmd/auditlog", + visibility = ["//visibility:private"], + deps = [ + "//pkg/auditlog/cmd/persister:go_default_library", + "//pkg/auditlog/cmd/server:go_default_library", + "//pkg/cli:go_default_library", + ], +) + +go_binary( + name = "auditlog", + embed = [":go_default_library"], + pure = "on", + visibility = ["//visibility:public"], +) + +go_image( + name = "auditlog_image", + binary = ":auditlog", +) + +alias( + name = "auditlog_image_tar", + actual = ":auditlog_image.tar", +) diff --git a/cmd/auditlog/auditlog.go b/cmd/auditlog/auditlog.go new file mode 100644 index 000000000..68f95f95b --- /dev/null +++ b/cmd/auditlog/auditlog.go @@ -0,0 +1,43 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/bucketeer-io/bucketeer/pkg/auditlog/cmd/persister" + "github.com/bucketeer-io/bucketeer/pkg/auditlog/cmd/server" + "github.com/bucketeer-io/bucketeer/pkg/cli" +) + +var ( + name = "bucketeer-audit-log" + version = "" + build = "" +) + +func main() { + app := cli.NewApp(name, "A/B Testing Microservice", version, build) + registerCommands(app) + err := app.Run() + if err != nil { + log.Fatal(err) + } +} + +func registerCommands(app *cli.App) { + server.RegisterCommand(app, app) + persister.RegisterCommand(app, app) +} diff --git a/cmd/auth/BUILD.bazel b/cmd/auth/BUILD.bazel new file mode 100644 index 000000000..0604059c8 --- /dev/null +++ b/cmd/auth/BUILD.bazel @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") +load("@io_bazel_rules_docker//go:image.bzl", "go_image") + +go_library( + name = "go_default_library", + srcs = ["auth.go"], + importpath = "github.com/bucketeer-io/bucketeer/cmd/auth", + visibility = ["//visibility:private"], + deps = [ + "//pkg/auth/cmd/server:go_default_library", + "//pkg/cli:go_default_library", + ], +) + +go_binary( + name = "auth", + embed = [":go_default_library"], + pure = "on", + visibility = ["//visibility:public"], +) + +go_image( + name = "auth_image", + binary = ":auth", +) + +alias( + name = "auth_image_tar", + actual = ":auth_image.tar", +) diff --git a/cmd/auth/auth.go b/cmd/auth/auth.go new file mode 100644 index 000000000..847036152 --- /dev/null +++ b/cmd/auth/auth.go @@ -0,0 +1,41 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/bucketeer-io/bucketeer/pkg/auth/cmd/server" + "github.com/bucketeer-io/bucketeer/pkg/cli" +) + +var ( + name = "bucketeer-auth" + version = "" + build = "" +) + +func main() { + app := cli.NewApp(name, "A/B Testing Microservice", version, build) + registerCommands(app) + err := app.Run() + if err != nil { + log.Fatal(err) + } +} + +func registerCommands(app *cli.App) { + server.RegisterCommand(app, app) +} diff --git a/cmd/autoops/BUILD.bazel b/cmd/autoops/BUILD.bazel new file mode 100644 index 000000000..953ef8f13 --- /dev/null +++ b/cmd/autoops/BUILD.bazel @@ -0,0 +1,35 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") +load("@io_bazel_rules_docker//go:image.bzl", "go_image") + +go_library( + name = "go_default_library", + srcs = ["autoops.go"], + importpath = "github.com/bucketeer-io/bucketeer/cmd/autoops", + visibility = ["//visibility:private"], + deps = [ + "//pkg/autoops/cmd/server:go_default_library", + "//pkg/cli:go_default_library", + "//pkg/ldflags:go_default_library", + ], +) + +go_binary( + name = "autoops", + embed = [":go_default_library"], + pure = "on", + visibility = ["//visibility:public"], + x_defs = { + "github.com/bucketeer-io/bucketeer/pkg/ldflags.Hash": "{HASH}", + "github.com/bucketeer-io/bucketeer/pkg/ldflags.BuildDate": "{BUILDDATE}", + }, +) + +go_image( + name = "autoops_image", + binary = ":autoops", +) + +alias( + name = "autoops_image_tar", + actual = ":autoops_image.tar", +) diff --git a/cmd/autoops/autoops.go b/cmd/autoops/autoops.go new file mode 100644 index 000000000..ca94fb173 --- /dev/null +++ b/cmd/autoops/autoops.go @@ -0,0 +1,42 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/bucketeer-io/bucketeer/pkg/autoops/cmd/server" + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/ldflags" +) + +var ( + name = "bucketeer-auto-ops" + version = ldflags.BuildDate + build = ldflags.Hash +) + +func main() { + app := cli.NewApp(name, "A/B Testing Microservice", version, build) + registerCommands(app) + err := app.Run() + if err != nil { + log.Fatal(err) + } +} + +func registerCommands(app *cli.App) { + server.RegisterServerCommand(app, app) +} diff --git a/cmd/environment/BUILD.bazel b/cmd/environment/BUILD.bazel new file mode 100644 index 000000000..31e079056 --- /dev/null +++ b/cmd/environment/BUILD.bazel @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") +load("@io_bazel_rules_docker//go:image.bzl", "go_image") + +go_library( + name = "go_default_library", + srcs = ["environment.go"], + importpath = "github.com/bucketeer-io/bucketeer/cmd/environment", + visibility = ["//visibility:private"], + deps = [ + "//pkg/cli:go_default_library", + "//pkg/environment/cmd/server:go_default_library", + ], +) + +go_binary( + name = "environment", + embed = [":go_default_library"], + pure = "on", + visibility = ["//visibility:public"], +) + +go_image( + name = "environment_image", + binary = ":environment", +) + +alias( + name = "environment_image_tar", + actual = ":environment_image.tar", +) diff --git a/cmd/environment/environment.go b/cmd/environment/environment.go new file mode 100644 index 000000000..e6c46696b --- /dev/null +++ b/cmd/environment/environment.go @@ -0,0 +1,41 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/environment/cmd/server" +) + +var ( + name = "bucketeer-environment" + version = "" + build = "" +) + +func main() { + app := cli.NewApp(name, "A/B Testing Microservice", version, build) + registerCommands(app) + err := app.Run() + if err != nil { + log.Fatal(err) + } +} + +func registerCommands(app *cli.App) { + server.RegisterServerCommand(app, app) +} diff --git a/cmd/eventcounter/BUILD.bazel b/cmd/eventcounter/BUILD.bazel new file mode 100644 index 000000000..27c1b01c9 --- /dev/null +++ b/cmd/eventcounter/BUILD.bazel @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") +load("@io_bazel_rules_docker//go:image.bzl", "go_image") + +go_library( + name = "go_default_library", + srcs = ["eventcounter.go"], + importpath = "github.com/bucketeer-io/bucketeer/cmd/eventcounter", + visibility = ["//visibility:private"], + deps = [ + "//pkg/cli:go_default_library", + "//pkg/eventcounter/cmd/server:go_default_library", + ], +) + +go_binary( + name = "eventcounter", + embed = [":go_default_library"], + pure = "on", + visibility = ["//visibility:public"], +) + +go_image( + name = "eventcounter_image", + binary = ":eventcounter", +) + +alias( + name = "eventcounter_image_tar", + actual = ":eventcounter_image.tar", +) diff --git a/cmd/eventcounter/eventcounter.go b/cmd/eventcounter/eventcounter.go new file mode 100644 index 000000000..e3254446e --- /dev/null +++ b/cmd/eventcounter/eventcounter.go @@ -0,0 +1,41 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/eventcounter/cmd/server" +) + +var ( + name = "bucketeer-event-counter" + version = "" + build = "" +) + +func main() { + app := cli.NewApp(name, "A/B Testing Microservice", version, build) + registerCommands(app) + err := app.Run() + if err != nil { + log.Fatal(err) + } +} + +func registerCommands(app *cli.App) { + server.RegisterCommand(app, app) +} diff --git a/cmd/eventpersister/BUILD.bazel b/cmd/eventpersister/BUILD.bazel new file mode 100644 index 000000000..8b3824d93 --- /dev/null +++ b/cmd/eventpersister/BUILD.bazel @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") +load("@io_bazel_rules_docker//go:image.bzl", "go_image") + +go_library( + name = "go_default_library", + srcs = ["eventpersister.go"], + importpath = "github.com/bucketeer-io/bucketeer/cmd/eventpersister", + visibility = ["//visibility:private"], + deps = [ + "//pkg/cli:go_default_library", + "//pkg/eventpersister/cmd/server:go_default_library", + ], +) + +go_binary( + name = "eventpersister", + embed = [":go_default_library"], + pure = "on", + visibility = ["//visibility:public"], +) + +go_image( + name = "eventpersister_image", + binary = ":eventpersister", +) + +alias( + name = "eventpersister_image_tar", + actual = ":eventpersister_image.tar", +) diff --git a/cmd/eventpersister/eventpersister.go b/cmd/eventpersister/eventpersister.go new file mode 100644 index 000000000..1aeb82675 --- /dev/null +++ b/cmd/eventpersister/eventpersister.go @@ -0,0 +1,41 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/eventpersister/cmd/server" +) + +var ( + name = "bucketeer-event-persister" + version = "" + build = "" +) + +func main() { + app := cli.NewApp(name, "A/B Testing Microservice", version, build) + registerCommands(app) + err := app.Run() + if err != nil { + log.Fatal(err) + } +} + +func registerCommands(app *cli.App) { + server.RegisterServerCommand(app, app) +} diff --git a/cmd/experiment/BUILD.bazel b/cmd/experiment/BUILD.bazel new file mode 100644 index 000000000..2d058ffd1 --- /dev/null +++ b/cmd/experiment/BUILD.bazel @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") +load("@io_bazel_rules_docker//go:image.bzl", "go_image") + +go_library( + name = "go_default_library", + srcs = ["experiment.go"], + importpath = "github.com/bucketeer-io/bucketeer/cmd/experiment", + visibility = ["//visibility:private"], + deps = [ + "//pkg/cli:go_default_library", + "//pkg/experiment/cmd/batch:go_default_library", + "//pkg/experiment/cmd/server:go_default_library", + ], +) + +go_binary( + name = "experiment", + embed = [":go_default_library"], + pure = "on", + visibility = ["//visibility:public"], +) + +go_image( + name = "experiment_image", + binary = ":experiment", +) + +alias( + name = "experiment_image_tar", + actual = ":experiment_image.tar", +) diff --git a/cmd/experiment/experiment.go b/cmd/experiment/experiment.go new file mode 100644 index 000000000..9a8a986e5 --- /dev/null +++ b/cmd/experiment/experiment.go @@ -0,0 +1,43 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/experiment/cmd/batch" + "github.com/bucketeer-io/bucketeer/pkg/experiment/cmd/server" +) + +var ( + name = "bucketeer-experiment" + version = "" + build = "" +) + +func main() { + app := cli.NewApp(name, "A/B Testing Microservice", version, build) + registerCommands(app) + err := app.Run() + if err != nil { + log.Fatal(err) + } +} + +func registerCommands(app *cli.App) { + batch.RegisterCommand(app, app) + server.RegisterCommand(app, app) +} diff --git a/cmd/feature/BUILD.bazel b/cmd/feature/BUILD.bazel new file mode 100644 index 000000000..0e448a799 --- /dev/null +++ b/cmd/feature/BUILD.bazel @@ -0,0 +1,36 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") +load("@io_bazel_rules_docker//go:image.bzl", "go_image") +#load("@io_bazel_rules_docker//container:bundle.bzl", "container_bundle") + +go_library( + name = "go_default_library", + srcs = ["feature.go"], + importpath = "github.com/bucketeer-io/bucketeer/cmd/feature", + visibility = ["//visibility:private"], + deps = [ + "//pkg/cli:go_default_library", + "//pkg/feature/cmd/cacher:go_default_library", + "//pkg/feature/cmd/recorder:go_default_library", + "//pkg/feature/cmd/segmentpersister:go_default_library", + "//pkg/feature/cmd/server:go_default_library", + ], +) + +go_binary( + name = "feature", + embed = [":go_default_library"], + pure = "on", + visibility = ["//visibility:public"], +) + +go_image( + name = "feature_image", + binary = ":feature", +) + +# needed to make bazel build //cmd/feature:... build the docker image which has rule //cmd/feature:feature.tar +# it seems it is not built because nothing depends on it. +alias( + name = "feature_image_tar", + actual = ":feature_image.tar", +) diff --git a/cmd/feature/feature.go b/cmd/feature/feature.go new file mode 100644 index 000000000..5eac744b0 --- /dev/null +++ b/cmd/feature/feature.go @@ -0,0 +1,47 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/bucketeer-io/bucketeer/pkg/cli" + cacher "github.com/bucketeer-io/bucketeer/pkg/feature/cmd/cacher" + "github.com/bucketeer-io/bucketeer/pkg/feature/cmd/recorder" + "github.com/bucketeer-io/bucketeer/pkg/feature/cmd/segmentpersister" + "github.com/bucketeer-io/bucketeer/pkg/feature/cmd/server" +) + +var ( + name = "bucketeer-feature" + version = "" + build = "" +) + +func main() { + app := cli.NewApp(name, "A/B Testing Microservice", version, build) + registerCommands(app) + err := app.Run() + if err != nil { + log.Fatal(err) + } +} + +func registerCommands(app *cli.App) { + server.RegisterServerCommand(app, app) + cacher.RegisterCommand(app, app) + recorder.RegisterCommand(app, app) + segmentpersister.RegisterCommand(app, app) +} diff --git a/cmd/gateway/BUILD.bazel b/cmd/gateway/BUILD.bazel new file mode 100644 index 000000000..ce88bb2cd --- /dev/null +++ b/cmd/gateway/BUILD.bazel @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") +load("@io_bazel_rules_docker//go:image.bzl", "go_image") + +go_library( + name = "go_default_library", + srcs = ["gateway.go"], + importpath = "github.com/bucketeer-io/bucketeer/cmd/gateway", + visibility = ["//visibility:private"], + deps = [ + "//pkg/cli:go_default_library", + "//pkg/gateway/cmd:go_default_library", + ], +) + +go_binary( + name = "gateway", + embed = [":go_default_library"], + pure = "on", + visibility = ["//visibility:public"], +) + +go_image( + name = "gateway_image", + binary = ":gateway", +) + +alias( + name = "gateway_image_tar", + actual = ":gateway_image.tar", +) diff --git a/cmd/gateway/gateway.go b/cmd/gateway/gateway.go new file mode 100644 index 000000000..d70a6b224 --- /dev/null +++ b/cmd/gateway/gateway.go @@ -0,0 +1,41 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/gateway/cmd" +) + +var ( + name = "bucketeer-gateway" + version = "" + build = "" +) + +func main() { + app := cli.NewApp(name, "A/B Testing Microservice", version, build) + registerCommands(app) + err := app.Run() + if err != nil { + log.Fatal(err) + } +} + +func registerCommands(app *cli.App) { + cmd.RegisterCommand(app, app) +} diff --git a/cmd/goalbatch/BUILD.bazel b/cmd/goalbatch/BUILD.bazel new file mode 100644 index 000000000..a5bc11a82 --- /dev/null +++ b/cmd/goalbatch/BUILD.bazel @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") +load("@io_bazel_rules_docker//go:image.bzl", "go_image") + +go_library( + name = "go_default_library", + srcs = ["goalbatch.go"], + importpath = "github.com/bucketeer-io/bucketeer/cmd/goalbatch", + visibility = ["//visibility:private"], + deps = [ + "//pkg/cli:go_default_library", + "//pkg/goalbatch/cmd/transformer:go_default_library", + ], +) + +go_binary( + name = "goalbatch", + embed = [":go_default_library"], + pure = "on", + visibility = ["//visibility:public"], +) + +go_image( + name = "goalbatch_image", + binary = ":goalbatch", +) + +alias( + name = "goalbatch_image_tar", + actual = ":goalbatch_image.tar", +) diff --git a/cmd/goalbatch/goalbatch.go b/cmd/goalbatch/goalbatch.go new file mode 100644 index 000000000..2ee6ac7a6 --- /dev/null +++ b/cmd/goalbatch/goalbatch.go @@ -0,0 +1,41 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/goalbatch/cmd/transformer" +) + +var ( + name = "bucketeer-goal-batch" + version = "" + build = "" +) + +func main() { + app := cli.NewApp(name, "A/B Testing Microservice", version, build) + registerCommands(app) + err := app.Run() + if err != nil { + log.Fatal(err) + } +} + +func registerCommands(app *cli.App) { + transformer.RegisterCommand(app, app) +} diff --git a/cmd/metricsevent/BUILD.bazel b/cmd/metricsevent/BUILD.bazel new file mode 100644 index 000000000..d408b1b68 --- /dev/null +++ b/cmd/metricsevent/BUILD.bazel @@ -0,0 +1,35 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") +load("@io_bazel_rules_docker//go:image.bzl", "go_image") + +go_library( + name = "go_default_library", + srcs = ["metricsevent.go"], + importpath = "github.com/bucketeer-io/bucketeer/cmd/metricsevent", + visibility = ["//visibility:private"], + deps = [ + "//pkg/cli:go_default_library", + "//pkg/ldflags:go_default_library", + "//pkg/metricsevent/cmd/persister:go_default_library", + ], +) + +go_binary( + name = "metricsevent", + embed = [":go_default_library"], + pure = "on", + visibility = ["//visibility:public"], + x_defs = { + "github.com/bucketeer-io/bucketeer/pkg/ldflags.Hash": "{HASH}", + "github.com/bucketeer-io/bucketeer/pkg/ldflags.BuildDate": "{BUILDDATE}", + }, +) + +go_image( + name = "metricsevent_image", + binary = ":metricsevent", +) + +alias( + name = "metricsevent_image_tar", + actual = ":metricsevent_image.tar", +) diff --git a/cmd/metricsevent/metricsevent.go b/cmd/metricsevent/metricsevent.go new file mode 100644 index 000000000..dbb777599 --- /dev/null +++ b/cmd/metricsevent/metricsevent.go @@ -0,0 +1,42 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/ldflags" + "github.com/bucketeer-io/bucketeer/pkg/metricsevent/cmd/persister" +) + +var ( + name = "bucketeer-metrics-event" + version = ldflags.BuildDate + build = ldflags.Hash +) + +func main() { + app := cli.NewApp(name, "A/B Testing Microservice", version, build) + registerCommands(app) + err := app.Run() + if err != nil { + log.Fatal(err) + } +} + +func registerCommands(app *cli.App) { + persister.RegisterCommand(app, app) +} diff --git a/cmd/migration/BUILD.bazel b/cmd/migration/BUILD.bazel new file mode 100644 index 000000000..599932a1e --- /dev/null +++ b/cmd/migration/BUILD.bazel @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") +load("@io_bazel_rules_docker//go:image.bzl", "go_image") + +go_library( + name = "go_default_library", + srcs = ["migration.go"], + importpath = "github.com/bucketeer-io/bucketeer/cmd/migration", + visibility = ["//visibility:private"], + deps = [ + "//pkg/cli:go_default_library", + "//pkg/migration/cmd/mysqlserver:go_default_library", + ], +) + +go_binary( + name = "migration", + embed = [":go_default_library"], + visibility = ["//visibility:public"], +) + +go_image( + name = "migration_image", + binary = ":migration", +) + +alias( + name = "migration_image_tar", + actual = ":migration_image.tar", +) diff --git a/cmd/migration/migration.go b/cmd/migration/migration.go new file mode 100644 index 000000000..c73a6a91c --- /dev/null +++ b/cmd/migration/migration.go @@ -0,0 +1,41 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/migration/cmd/mysqlserver" +) + +var ( + name = "bucketeer-migration" + version = "" + build = "" +) + +func main() { + app := cli.NewApp(name, "A/B Testing Microservice", version, build) + registerCommands(app) + err := app.Run() + if err != nil { + log.Fatal(err) + } +} + +func registerCommands(app *cli.App) { + mysqlserver.RegisterServerCommand(app, app) +} diff --git a/cmd/notification/BUILD.bazel b/cmd/notification/BUILD.bazel new file mode 100644 index 000000000..d253d0504 --- /dev/null +++ b/cmd/notification/BUILD.bazel @@ -0,0 +1,32 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") +load("@io_bazel_rules_docker//go:image.bzl", "go_image") + +go_library( + name = "go_default_library", + srcs = ["notification.go"], + importpath = "github.com/bucketeer-io/bucketeer/cmd/notification", + visibility = ["//visibility:private"], + deps = [ + "//pkg/cli:go_default_library", + "//pkg/ldflags:go_default_library", + "//pkg/notification/cmd/sender:go_default_library", + "//pkg/notification/cmd/server:go_default_library", + ], +) + +go_binary( + name = "notification", + embed = [":go_default_library"], + pure = "on", + visibility = ["//visibility:public"], +) + +go_image( + name = "notification_image", + binary = ":notification", +) + +alias( + name = "notification_image_tar", + actual = ":notification_image.tar", +) diff --git a/cmd/notification/notification.go b/cmd/notification/notification.go new file mode 100644 index 000000000..497faccbb --- /dev/null +++ b/cmd/notification/notification.go @@ -0,0 +1,44 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/ldflags" + "github.com/bucketeer-io/bucketeer/pkg/notification/cmd/sender" + "github.com/bucketeer-io/bucketeer/pkg/notification/cmd/server" +) + +var ( + name = "bucketeer-notification" + version = ldflags.BuildDate + build = ldflags.Hash +) + +func main() { + app := cli.NewApp(name, "A/B Testing Microservice", version, build) + registerCommands(app) + err := app.Run() + if err != nil { + log.Fatal(err) + } +} + +func registerCommands(app *cli.App) { + sender.RegisterCommand(app, app) + server.RegisterCommand(app, app) +} diff --git a/cmd/opsevent/BUILD.bazel b/cmd/opsevent/BUILD.bazel new file mode 100644 index 000000000..25f37293c --- /dev/null +++ b/cmd/opsevent/BUILD.bazel @@ -0,0 +1,35 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") +load("@io_bazel_rules_docker//go:image.bzl", "go_image") + +go_library( + name = "go_default_library", + srcs = ["opsevent.go"], + importpath = "github.com/bucketeer-io/bucketeer/cmd/opsevent", + visibility = ["//visibility:private"], + deps = [ + "//pkg/cli:go_default_library", + "//pkg/ldflags:go_default_library", + "//pkg/opsevent/cmd/batch:go_default_library", + ], +) + +go_binary( + name = "opsevent", + embed = [":go_default_library"], + pure = "on", + visibility = ["//visibility:public"], + x_defs = { + "github.com/bucketeer-io/bucketeer/pkg/ldflags.Hash": "{HASH}", + "github.com/bucketeer-io/bucketeer/pkg/ldflags.BuildDate": "{BUILDDATE}", + }, +) + +go_image( + name = "opsevent_image", + binary = ":opsevent", +) + +alias( + name = "opsevent_image_tar", + actual = ":opsevent_image.tar", +) diff --git a/cmd/opsevent/opsevent.go b/cmd/opsevent/opsevent.go new file mode 100644 index 000000000..8600e51bf --- /dev/null +++ b/cmd/opsevent/opsevent.go @@ -0,0 +1,42 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/ldflags" + "github.com/bucketeer-io/bucketeer/pkg/opsevent/cmd/batch" +) + +var ( + name = "bucketeer-ops-event" + version = ldflags.BuildDate + build = ldflags.Hash +) + +func main() { + app := cli.NewApp(name, "A/B Testing Microservice", version, build) + registerCommands(app) + err := app.Run() + if err != nil { + log.Fatal(err) + } +} + +func registerCommands(app *cli.App) { + batch.RegisterCommand(app, app) +} diff --git a/cmd/push/BUILD.bazel b/cmd/push/BUILD.bazel new file mode 100644 index 000000000..9dfaa5c5b --- /dev/null +++ b/cmd/push/BUILD.bazel @@ -0,0 +1,32 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") +load("@io_bazel_rules_docker//go:image.bzl", "go_image") + +go_library( + name = "go_default_library", + srcs = ["push.go"], + importpath = "github.com/bucketeer-io/bucketeer/cmd/push", + visibility = ["//visibility:private"], + deps = [ + "//pkg/cli:go_default_library", + "//pkg/ldflags:go_default_library", + "//pkg/push/cmd/sender:go_default_library", + "//pkg/push/cmd/server:go_default_library", + ], +) + +go_binary( + name = "push", + embed = [":go_default_library"], + pure = "on", + visibility = ["//visibility:public"], +) + +go_image( + name = "push_image", + binary = ":push", +) + +alias( + name = "push_image_tar", + actual = ":push_image.tar", +) diff --git a/cmd/push/push.go b/cmd/push/push.go new file mode 100644 index 000000000..0d12fbce7 --- /dev/null +++ b/cmd/push/push.go @@ -0,0 +1,44 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/ldflags" + "github.com/bucketeer-io/bucketeer/pkg/push/cmd/sender" + "github.com/bucketeer-io/bucketeer/pkg/push/cmd/server" +) + +var ( + name = "bucketeer-push" + version = ldflags.BuildDate + build = ldflags.Hash +) + +func main() { + app := cli.NewApp(name, "A/B Testing Microservice", version, build) + registerCommands(app) + err := app.Run() + if err != nil { + log.Fatal(err) + } +} + +func registerCommands(app *cli.App) { + server.RegisterCommand(app, app) + sender.RegisterCommand(app, app) +} diff --git a/cmd/user/BUILD.bazel b/cmd/user/BUILD.bazel new file mode 100644 index 000000000..2ccd3d360 --- /dev/null +++ b/cmd/user/BUILD.bazel @@ -0,0 +1,36 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") +load("@io_bazel_rules_docker//go:image.bzl", "go_image") + +go_library( + name = "go_default_library", + srcs = ["user.go"], + importpath = "github.com/bucketeer-io/bucketeer/cmd/user", + visibility = ["//visibility:private"], + deps = [ + "//pkg/cli:go_default_library", + "//pkg/ldflags:go_default_library", + "//pkg/user/cmd/persister:go_default_library", + "//pkg/user/cmd/server:go_default_library", + ], +) + +go_binary( + name = "user", + embed = [":go_default_library"], + pure = "on", + visibility = ["//visibility:public"], + x_defs = { + "github.com/bucketeer-io/bucketeer/pkg/ldflags.Hash": "{HASH}", + "github.com/bucketeer-io/bucketeer/pkg/ldflags.BuildDate": "{BUILDDATE}", + }, +) + +go_image( + name = "user_image", + binary = ":user", +) + +alias( + name = "user_image_tar", + actual = ":user_image.tar", +) diff --git a/cmd/user/user.go b/cmd/user/user.go new file mode 100644 index 000000000..9ade3ae7c --- /dev/null +++ b/cmd/user/user.go @@ -0,0 +1,44 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/ldflags" + "github.com/bucketeer-io/bucketeer/pkg/user/cmd/persister" + "github.com/bucketeer-io/bucketeer/pkg/user/cmd/server" +) + +var ( + name = "bucketeer-user" + version = ldflags.BuildDate + build = ldflags.Hash +) + +func main() { + app := cli.NewApp(name, "A/B Testing Microservice", version, build) + registerCommands(app) + err := app.Run() + if err != nil { + log.Fatal(err) + } +} + +func registerCommands(app *cli.App) { + persister.RegisterCommand(app, app) + server.RegisterCommand(app, app) +} diff --git a/go.mod b/go.mod new file mode 100644 index 000000000..b26f3448f --- /dev/null +++ b/go.mod @@ -0,0 +1,119 @@ +module github.com/bucketeer-io/bucketeer + +go 1.17 + +require ( + cloud.google.com/go/alloydbconn v0.2.1 + cloud.google.com/go/bigtable v1.0.0 + cloud.google.com/go/kms v1.4.0 + cloud.google.com/go/profiler v0.3.0 + cloud.google.com/go/pubsub v1.3.1 + contrib.go.opencensus.io/exporter/stackdriver v0.8.0 + github.com/Shopify/sarama v1.27.0 + github.com/VividCortex/mysqlerr v1.0.0 + github.com/blang/semver v3.5.1+incompatible + github.com/ca-dp/godruid v0.0.0-20210401093507-918893fdd0d7 + github.com/coreos/go-oidc v2.1.0+incompatible + github.com/go-redis/redis v6.15.2+incompatible + github.com/go-sql-driver/mysql v1.6.0 + github.com/golang-migrate/migrate/v4 v4.11.0 + github.com/golang/mock v1.6.0 + github.com/golang/protobuf v1.5.2 + github.com/gomodule/redigo v2.0.0+incompatible + github.com/itchyny/gojq v0.12.5 + github.com/lib/pq v1.10.2 + github.com/mna/redisc v1.1.2 + github.com/nicksnyder/go-i18n/v2 v2.2.0 + github.com/prometheus/client_golang v1.2.1 + github.com/robfig/cron v0.0.0-20171101201047-2315d5715e36 + github.com/slack-go/slack v0.6.4 + github.com/stretchr/testify v1.7.0 + github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c + go.opencensus.io v0.23.0 + go.uber.org/zap v1.13.0 + golang.org/x/oauth2 v0.0.0-20220808172628-8227340efae7 + golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f + golang.org/x/text v0.3.7 + golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 + google.golang.org/genproto v0.0.0-20220812140447-cec7f5303424 + google.golang.org/grpc v1.48.0 + google.golang.org/protobuf v1.28.1 + gopkg.in/alecthomas/kingpin.v2 v2.2.6 + gopkg.in/square/go-jose.v2 v2.4.0 + gopkg.in/yaml.v2 v2.3.0 +) + +require ( + cloud.google.com/go v0.103.0 // indirect + cloud.google.com/go/compute v1.8.0 // indirect + cloud.google.com/go/iam v0.3.0 // indirect + cloud.google.com/go/monitoring v1.6.0 // indirect + cloud.google.com/go/storage v1.25.0 // indirect + cloud.google.com/go/trace v1.2.0 // indirect + github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect + github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 // indirect + github.com/aws/aws-sdk-go v1.17.7 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.1.1 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/eapache/go-resiliency v1.2.0 // indirect + github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect + github.com/eapache/queue v1.1.0 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/snappy v0.0.3 // indirect + github.com/google/go-cmp v0.5.8 // indirect + github.com/google/go-github v17.0.0+incompatible // indirect + github.com/google/go-querystring v1.0.0 // indirect + github.com/google/pprof v0.0.0-20220412212628-83db2b799d1f // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect + github.com/googleapis/gax-go/v2 v2.5.1 // indirect + github.com/gorilla/websocket v1.2.0 // indirect + github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/hashicorp/go-multierror v1.1.0 // indirect + github.com/hashicorp/go-uuid v1.0.2 // indirect + github.com/itchyny/timefmt-go v0.1.3 // indirect + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgconn v1.12.1 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.3.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect + github.com/jackc/pgtype v1.11.0 // indirect + github.com/jackc/pgx/v4 v4.16.1 // indirect + github.com/jcmturner/gofork v1.0.0 // indirect + github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af // indirect + github.com/klauspost/compress v1.10.10 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/onsi/ginkgo v1.10.1 // indirect + github.com/onsi/gomega v1.7.0 // indirect + github.com/pierrec/lz4 v2.5.2+incompatible // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pquerna/cachecontrol v0.1.0 // indirect + github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 // indirect + github.com/prometheus/common v0.7.0 // indirect + github.com/prometheus/procfs v0.0.5 // indirect + github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect + github.com/xdg/stringprep v1.0.0 // indirect + go.uber.org/atomic v1.6.0 // indirect + go.uber.org/multierr v1.5.0 // indirect + golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 // indirect + golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect + golang.org/x/net v0.0.0-20220812174116-3211cb980234 // indirect + golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab // indirect + golang.org/x/tools v0.1.5 // indirect + golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect + google.golang.org/api v0.92.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect + gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect + gopkg.in/jcmturner/gokrb5.v7 v7.5.0 // indirect + gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect +) + +replace ( + google.golang.org/grpc v1.36.0 => google.golang.org/grpc v1.29.1 + k8s.io/code-generator => k8s.io/code-generator v0.0.0-20191003035328-700b1226c0bd +) diff --git a/go.sum b/go.sum new file mode 100644 index 000000000..7bf35413a --- /dev/null +++ b/go.sum @@ -0,0 +1,1163 @@ +cloud.google.com/go v0.23.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.103.0 h1:YXtxp9ymmZjlGzxV7VrYQ8aaQuAgcqxSy6YhDX4I458= +cloud.google.com/go v0.103.0/go.mod h1:vwLx1nqLrzLX/fpwSMOXmFIqBOyHsvHbnAdbGSJ+mKk= +cloud.google.com/go/alloydbconn v0.2.1 h1:p46IfIof8huGKXzQXKqI/11M36EAbrt7DjxMh3yNNvE= +cloud.google.com/go/alloydbconn v0.2.1/go.mod h1:C7OMWtEDDHA4OCUKXCvvVTprIgdRNJr2239DGMgz8TA= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigtable v1.0.0 h1:2DCxzxiuoWubL6J0yl4rUtFtIJAX566mcefQS3xy6us= +cloud.google.com/go/bigtable v1.0.0/go.mod h1:N+NeT8ICfOM1Ek4CeP03mSESb2x+qLkSC0+CRBEsvAA= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.8.0 h1:NLtR56/eKx9K1s2Tw/4hec2vsU1S3WeKRMj8HXbBo6E= +cloud.google.com/go/compute v1.8.0/go.mod h1:boQ44qJsMqZjKzzsEkoJWQGj4h8ygmyk17UArClWzmg= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/kms v1.4.0 h1:iElbfoE61VeLhnZcGOltqL8HIly8Nhbe5t6JlH9GXjo= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/monitoring v1.6.0 h1:+x5AA2mFkiHK/ySN6NWKbeKBV+Z/DN+h51kBzcW08zU= +cloud.google.com/go/monitoring v1.6.0/go.mod h1:w+OY1TYCk4MtvY7WfEHlIp5mP8SV/gDSqOsvGhVa2KM= +cloud.google.com/go/profiler v0.3.0 h1:R6y/xAeifaUXxd2x6w+jIwKxoKl8Cv5HJvcvASTPWJo= +cloud.google.com/go/profiler v0.3.0/go.mod h1:9wYk9eY4iZHsev8TQb61kh3wiOiSyz/xOYixWPzweCU= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/spanner v1.2.0/go.mod h1:LfwGAsK42Yz8IeLsd/oagGFBqTXt3xVWtm8/KD2vrEI= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.25.0 h1:D2Dn0PslpK7Z3B2AvuUHyIC762bDbGJdlmQlCBR71os= +cloud.google.com/go/storage v1.25.0/go.mod h1:Qys4JU+jeup3QnuKKAosWuxrD95C4MSqxfVDnSirDsI= +cloud.google.com/go/trace v1.2.0 h1:oIaB4KahkIUOpLSAAjEJ8y2desbjY/x/RfP4O3KAtTI= +cloud.google.com/go/trace v1.2.0/go.mod h1:Wc8y/uYyOhPy12KEnXG9XGrvfMz5F5SrYecQlbW1rwM= +contrib.go.opencensus.io/exporter/stackdriver v0.8.0 h1:HBofEuVSbRgTTAQmE8y9skbElwGgBs1ecH7fxJE7Nrg= +contrib.go.opencensus.io/exporter/stackdriver v0.8.0/go.mod h1:hNe5qQofPbg6bLQY5wHCvQ7o+2E5P8PkegEuQ+MyRw0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU= +github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/ClickHouse/clickhouse-go v1.3.12/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= +github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/sarama v1.27.0 h1:tqo2zmyzPf1+gwTTwhI6W+EXDw4PVSczynpHKFtVAmo= +github.com/Shopify/sarama v1.27.0/go.mod h1:aCdj6ymI8uyPEux1JJ9gcaDT6cinjGhNCAhs54taSUo= +github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/VividCortex/mysqlerr v1.0.0 h1:5pZ2TZA+YnzPgzBfiUWGqWmKDVNBdrkf9g+DNe1Tiq8= +github.com/VividCortex/mysqlerr v1.0.0/go.mod h1:xERx8E4tBhLvpjzdUyQiSfUxeMcATEQrflDAfXsqcAE= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/aws/aws-sdk-go v1.15.31/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.17.7 h1:/4+rDPe0W95KBmNGYCG+NUvdL8ssPYBMxL+aSCg6nIA= +github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/ca-dp/godruid v0.0.0-20210401093507-918893fdd0d7 h1:aWiNqopVwXEC1+HzANRIMYWXVsiU5LDAVDL6XMpkJJ0= +github.com/ca-dp/godruid v0.0.0-20210401093507-918893fdd0d7/go.mod h1:r1eDVkcZjba2oTmlHP73BYjH+G9lRkjQ6CtzNSdPpiY= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/cockroachdb/cockroach-go v0.0.0-20190925194419-606b3d062051/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= +github.com/containerd/containerd v1.3.3 h1:LoIzb5y9x5l8VKAlyrbusNPXqBY0+kviRloxFUMFwKc= +github.com/containerd/containerd v1.3.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/coreos/go-oidc v2.1.0+incompatible h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= +github.com/dhui/dktest v0.3.2 h1:nZSDcnkpbotzT/nEHNsO+JCKY8i1Qoki1AYOpeLRb6M= +github.com/dhui/dktest v0.3.2/go.mod h1:l1/ib23a/CmxAe7yixtrYPc8Iy90Zy2udyaHINM5p58= +github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20200213202729-31a86c4ab209 h1:tmV+YbYOUAYDmAiamzhRKqQXaAUyUY2xVt27Rv7rCzA= +github.com/docker/docker v1.4.2-0.20200213202729-31a86c4ab209/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= +github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.10.0 h1:Gfh+GAJZOAoKZsIZeZbdn2JF10kN1XHNvjsvQK8gVkE= +github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-redis/redis v6.15.2+incompatible h1:9SpNVG76gr6InJGxoZ6IuuxaCOQwDAhzyXg+Bs+0Sb4= +github.com/go-redis/redis v6.15.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.0.4 h1:u2CU3YKy9I2pmu9pX0eq50wCgjfGIt539SqR7FbHiho= +github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= +github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= +github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang-migrate/migrate/v4 v4.11.0 h1:uqtd0ysK5WyBQ/T1K2uDIooJV0o2Obt6uPwP062DupQ= +github.com/golang-migrate/migrate/v4 v4.11.0/go.mod h1:nqbpDbckcYjsCD5I8q5+NI9Tkk7SVcmaF40Ax1eAWhg= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0= +github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20220412212628-83db2b799d1f h1:VrKTY4lquiy1oJzVZgXrauku9Jx9P+POv/gTLakG4Wk= +github.com/google/pprof v0.0.0-20220412212628-83db2b799d1f/go.mod h1:Pt31oes+eGImORns3McJn8zHefuQl2rG8l6xQjGYB4U= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0 h1:zO8WHNx/MYiAKJ3d5spxZXZE6KHmIQGQcAzwUzV7qQw= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1 h1:kBRZU0PSuI7PspsSb/ChWoVResUcwNVIdpB049pKTiw= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v1.2.0 h1:VJtLvh6VQym50czpZzx07z/kw9EgAxI3x1ZB8taTMQQ= +github.com/gorilla/websocket v1.2.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= +github.com/itchyny/go-flags v1.5.0/go.mod h1:lenkYuCobuxLBAd/HGFE4LRoW8D3B6iXRQfWYJ+MNbA= +github.com/itchyny/gojq v0.12.5 h1:6SJ1BQ1VAwJAlIvLSIZmqHP/RUEq3qfVWvsRxrqhsD0= +github.com/itchyny/gojq v0.12.5/go.mod h1:3e1hZXv+Kwvdp6V9HXpVrvddiHVApi5EDZwS+zLFeiE= +github.com/itchyny/timefmt-go v0.1.3 h1:7M3LGVDsqcd0VZH2U+x393obrzZisp7C0uEe921iRkU= +github.com/itchyny/timefmt-go v0.1.3/go.mod h1:0osSSCQSASBJMsIZnhAaF1C2fCBTJZXrnj37mG8/c+A= +github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.3.2/go.mod h1:LvCquS3HbBKwgl7KbX9KyqEIumJAbm1UMcTvGaIf3bM= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.12.1 h1:rsDFzIpRk7xT4B8FufgpCCeyjdNpKyghZeSefViE5W8= +github.com/jackc/pgconn v1.12.1/go.mod h1:ZkhRC59Llhrq3oSfrikvwQ5NaxYExr6twkdkMLaKono= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.0 h1:brH0pCGBDkBW07HWlN/oSBXrmo3WB0UvZd1pIuDcL8Y= +github.com/jackc/pgproto3/v2 v2.3.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.11.0 h1:u4uiGPz/1hryuXzyaBhSk6dnIyyG2683olG2OV+UUgs= +github.com/jackc/pgtype v1.11.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.16.1 h1:JzTglcal01DrghUqt+PmzWsZx/Yh7SC/CTQmSBMTd0Y= +github.com/jackc/pgx/v4 v4.16.1/go.mod h1:SIhx0D5hoADaiXZVyv+3gSm3LCIIINTVO0PficsvWGQ= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.2.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.10.10 h1:a/y8CglcM7gLGYmlbP/stPE5sR3hbhFRUjCBfd/0B3I= +github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/markbates/pkger v0.15.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mna/redisc v1.1.2 h1:cup2P6113vVbzNCuYOQtIctHFBGlvGvJi8+GGFPYobU= +github.com/mna/redisc v1.1.2/go.mod h1:GXeOb7zyYKiT+K8MKdIiJvuv7MfhDoQGcuzfiJQmqQI= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= +github.com/neo4j-drivers/gobolt v1.7.4/go.mod h1:O9AUbip4Dgre+CD3p40dnMD4a4r52QBIfblg5k7CTbE= +github.com/neo4j/neo4j-go-driver v1.7.4/go.mod h1:aPO0vVr+WnhEJne+FgFjfsjzAnssPFLucHgGZ76Zb/U= +github.com/nicksnyder/go-i18n/v2 v2.2.0 h1:MNXbyPvd141JJqlU6gJKrczThxJy+kdCNivxZpBQFkw= +github.com/nicksnyder/go-i18n/v2 v2.2.0/go.mod h1:4OtLfzqyAxsscyCb//3gfqSvBc81gImX91LrZzczN1o= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= +github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.2.1 h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvlsMzzNayLOI= +github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= +github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/robfig/cron v0.0.0-20171101201047-2315d5715e36 h1:7ELV9kd3xWoNnXRvGWOMrPiBz/6W47lSwikPlnvMTV8= +github.com/robfig/cron v0.0.0-20171101201047-2315d5715e36/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/slack-go/slack v0.6.4 h1:cxOqFgM5RW6mdEyDqAJutFk3qiORK9oHRKi5bPqkY9o= +github.com/slack-go/slack v0.6.4/go.mod h1:sGRjv3w+ERAUMMMbldHObQPBcNSyVB7KLKYfnwUFBfw= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= +github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= +go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200213203834-85f925bdd4d0/go.mod h1:IX6Eufr4L0ErOUlzqX/aFlHqsiKZRbV42Kb69e9VsTE= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200528225125-3c3fba18258b/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220812174116-3211cb980234 h1:RDqmgfe7SvlMWoqC3xwQ2blLO3fcWcxMa3eBLRdRW7E= +golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/oauth2 v0.0.0-20180603041954-1e0a3fa8ba9a/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220722155238-128564f6959c/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220808172628-8227340efae7 h1:dtndE8FcEta75/4kHF3AbpuWzV6f1LjnLrM4pe2SZrw= +golang.org/x/oauth2 v0.0.0-20220808172628-8227340efae7/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab h1:2QkjZIsXupsJbJIdSjjUOgWK3aEtzyuh2mPt3l/CkeU= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 h1:ftMN5LMiBFjbzleLqtoBZk7KdJwhuybIU+FckUHgoyQ= +golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200128002243-345141a36859/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200213224642-88e652f7a869/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.0.0-20180603000442-8e296ef26005/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.86.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.88.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.91.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.92.0 h1:8JHk7q/+rJla+iRsWj9FQ9/wjv2M1SKtpKSdmLhxPT0= +google.golang.org/api v0.92.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180601223552-81158efcc9f2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200128133413-58ce757ed39b/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220720214146-176da50484ac/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220804142021-4e6b2dfa6612/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220808131553-a91ffa7f803e/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220812140447-cec7f5303424 h1:zZnTt15U44/Txe/9cN/tVbteBkPMiyXK48hPsKRmqj4= +google.golang.org/genproto v0.0.0-20220812140447-cec7f5303424/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= +gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= +gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= +gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= +gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= +gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= +gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlIrg= +gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= +gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= +gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= +gopkg.in/square/go-jose.v2 v2.4.0 h1:0kXPskUMGAXXWJlP05ktEMOV0vmzFQUWw6d+aZJQU8A= +gopkg.in/square/go-jose.v2 v2.4.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200601152816-913338de1bd2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg= +modernc.org/db v1.0.0/go.mod h1:kYD/cO29L/29RM0hXYl4i3+Q5VojL31kTUVpVJDw0s8= +modernc.org/file v1.0.0/go.mod h1:uqEokAEn1u6e+J45e54dsEA/pw4o7zLrA2GwyntZzjw= +modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/internal v1.0.0/go.mod h1:VUD/+JAkhCpvkUitlEOnhpVxCgsBI90oTzSCRcqQVSM= +modernc.org/lldb v1.0.0/go.mod h1:jcRvJGWfCGodDZz8BPwiKMJxGJngQ/5DrRapkQnLob8= +modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= +modernc.org/ql v1.0.0/go.mod h1:xGVyrLIatPcO2C1JvI/Co8c0sr6y91HKFNy4pt9JXEY= +modernc.org/sortutil v1.1.0/go.mod h1:ZyL98OQHJgH9IEfN71VsamvJgrtRX9Dj2gX+vH86L1k= +modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/zappy v1.0.0/go.mod h1:hHe+oGahLVII/aTTyWK/b53VDHMAGCBYYeZ9sn83HC4= +rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/hack/create-account/BUILD.bazel b/hack/create-account/BUILD.bazel new file mode 100644 index 000000000..d209021b3 --- /dev/null +++ b/hack/create-account/BUILD.bazel @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "command.go", + "main.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/hack/create-account", + visibility = ["//visibility:private"], + deps = [ + "//pkg/account/client:go_default_library", + "//pkg/cli:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/rpc/client:go_default_library", + "//proto/account:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_binary( + name = "create-account", + embed = [":go_default_library"], + pure = "on", + visibility = ["//visibility:public"], +) diff --git a/hack/create-account/README.md b/hack/create-account/README.md new file mode 100644 index 000000000..a60e099fb --- /dev/null +++ b/hack/create-account/README.md @@ -0,0 +1,14 @@ +## Run Command + +``` +bazelisk run //hack/create-account:create-account -- create \ + --cert=full-path-to-certificate \ + --web-gateway=web-gateway-address \ + --service-token=full-path-to-service-token-file \ + --email=email \ + --role=role \ + --environment-namespace=environment-namespace \ + --is-admin(optional) \ + --no-profile \ + --no-gcp-trace-enabled +``` diff --git a/hack/create-account/command.go b/hack/create-account/command.go new file mode 100644 index 000000000..9738d31b6 --- /dev/null +++ b/hack/create-account/command.go @@ -0,0 +1,135 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "errors" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" +) + +type command struct { + *kingpin.CmdClause + certPath *string + serviceTokenPath *string + webGatewayAddress *string + email *string + role *string + environmentNamespace *string + isAdmin *bool +} + +func registerCommand(r cli.CommandRegistry, p cli.ParentCommand) *command { + cmd := p.Command("create", "Create a new account") + command := &command{ + CmdClause: cmd, + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + serviceTokenPath: cmd.Flag("service-token", "Path to service token file.").Required().String(), + webGatewayAddress: cmd.Flag("web-gateway", "Address of web-gateway.").Required().String(), + email: cmd.Flag("email", "The email of an account.").Required().String(), + role: cmd.Flag("role", "The role of an account.").Required().Enum("VIEWER", "EDITOR", "OWNER"), + environmentNamespace: cmd.Flag( + "environment-namespace", + "The environment namespace for Datestore namespace", + ).Required().String(), + isAdmin: cmd.Flag("is-admin", "Is an account admin or not.").Default("false").Bool(), + } + r.RegisterCommand(command) + return command +} + +func (c *command) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + client, err := createAccountClient(*c.webGatewayAddress, *c.certPath, *c.serviceTokenPath, logger) + if err != nil { + logger.Error("Failed to create account client", zap.Error(err)) + return err + } + role, ok := accountproto.Account_Role_value[*c.role] + if !ok { + logger.Error("Wrong role parameter", zap.String("role", *c.role)) + return errors.New("wrong role parameter") + } + if *c.isAdmin { + err := c.createAdminAccount(ctx, client, accountproto.Account_Role(role)) + if err != nil { + logger.Error("Failed to create admin account", zap.Error(err)) + return err + } + logger.Info("Admin account created") + return nil + } + err = c.createAccount(ctx, client, accountproto.Account_Role(role)) + if err != nil { + logger.Error("Failed to create account", zap.Error(err), + zap.String("environmentNamespace", *c.environmentNamespace)) + return err + } + logger.Info("Account created") + return nil +} + +func (c *command) createAdminAccount( + ctx context.Context, + client accountclient.Client, + role accountproto.Account_Role, +) error { + req := &accountproto.CreateAdminAccountRequest{ + Command: &accountproto.CreateAdminAccountCommand{Email: *c.email}, + } + if _, err := client.CreateAdminAccount(ctx, req); err != nil { + return err + } + return nil +} + +func (c *command) createAccount( + ctx context.Context, + client accountclient.Client, + role accountproto.Account_Role, +) error { + req := &accountproto.CreateAccountRequest{ + Command: &accountproto.CreateAccountCommand{ + Email: *c.email, + Role: role, + }, + EnvironmentNamespace: *c.environmentNamespace, + } + if _, err := client.CreateAccount(ctx, req); err != nil { + return err + } + return nil +} + +func createAccountClient(addr, cert, serviceToken string, logger *zap.Logger) (accountclient.Client, error) { + creds, err := client.NewPerRPCCredentials(serviceToken) + if err != nil { + return nil, err + } + return accountclient.NewClient(addr, cert, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(10*time.Second), + client.WithBlock(), + client.WithLogger(logger), + ) +} diff --git a/hack/create-account/main.go b/hack/create-account/main.go new file mode 100644 index 000000000..bd63aff33 --- /dev/null +++ b/hack/create-account/main.go @@ -0,0 +1,35 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/bucketeer-io/bucketeer/pkg/cli" +) + +var ( + name = "create-account" + version = "" + build = "" +) + +func main() { + app := cli.NewApp(name, "Bucketeer tool", version, build) + registerCommand(app, app) + if err := app.Run(); err != nil { + log.Fatal(err) + } +} diff --git a/hack/create-api-key/BUILD.bazel b/hack/create-api-key/BUILD.bazel new file mode 100644 index 000000000..be9af84fa --- /dev/null +++ b/hack/create-api-key/BUILD.bazel @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "command.go", + "main.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/hack/create-api-key", + visibility = ["//visibility:private"], + deps = [ + "//pkg/account/client:go_default_library", + "//pkg/cli:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/rpc/client:go_default_library", + "//proto/account:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_binary( + name = "create-api-key", + embed = [":go_default_library"], + pure = "on", + visibility = ["//visibility:public"], +) diff --git a/hack/create-api-key/README.md b/hack/create-api-key/README.md new file mode 100644 index 000000000..6995e9a66 --- /dev/null +++ b/hack/create-api-key/README.md @@ -0,0 +1,14 @@ +## Run Command + +``` +bazelisk run //hack/create-api-key:create-api-key -- create \ + --cert=full-path-to-certificate \ + --web-gateway=web-gateway-address \ + --service-token=full-path-to-service-token-file \ + --name=key-name \ + --role=key-role \ + --output=full-path-to-output-file \ + --environment-namespace=environment-namespace \ + --no-profile \ + --no-gcp-trace-enabled +``` diff --git a/hack/create-api-key/command.go b/hack/create-api-key/command.go new file mode 100644 index 000000000..b4e9aa5a6 --- /dev/null +++ b/hack/create-api-key/command.go @@ -0,0 +1,104 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "errors" + "io/ioutil" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" +) + +type command struct { + *kingpin.CmdClause + certPath *string + serviceTokenPath *string + webGatewayAddress *string + name *string + role *string + output *string + environmentNamespace *string +} + +func registerCommand(r cli.CommandRegistry, p cli.ParentCommand) *command { + cmd := p.Command("create", "Create a new api key") + command := &command{ + CmdClause: cmd, + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + serviceTokenPath: cmd.Flag("service-token", "Path to service token file.").Required().String(), + webGatewayAddress: cmd.Flag("web-gateway", "Address of web-gateway.").Required().String(), + name: cmd.Flag("name", "The name of key.").Required().String(), + role: cmd.Flag("role", "The role of key.").Default("SDK").Enum("SDK", "SERVICE"), + output: cmd.Flag("output", "Path of file to write api key.").Required().String(), + environmentNamespace: cmd.Flag( + "environment-namespace", + "The environment namespace to store api key", + ).Required().String(), + } + r.RegisterCommand(command) + return command +} + +func (c *command) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + client, err := createAccountClient(*c.webGatewayAddress, *c.certPath, *c.serviceTokenPath, logger) + if err != nil { + logger.Error("Failed to create account client", zap.Error(err)) + return err + } + role, ok := accountproto.APIKey_Role_value[*c.role] + if !ok { + logger.Error("Wrong role parameter", zap.String("role", *c.role)) + return errors.New("wrong role parameter") + } + resp, err := client.CreateAPIKey(ctx, &accountproto.CreateAPIKeyRequest{ + Command: &accountproto.CreateAPIKeyCommand{ + Name: *c.name, + Role: accountproto.APIKey_Role(role), + }, + EnvironmentNamespace: *c.environmentNamespace, + }) + if err != nil { + logger.Error("Failed to create api key", zap.Error(err)) + return err + } + if err := ioutil.WriteFile(*c.output, []byte(resp.ApiKey.Id), 0644); err != nil { + logger.Error("Failed to write key to file", zap.Error(err), zap.String("output", *c.output)) + return err + } + logger.Info("Key generated") + return nil +} + +func createAccountClient(addr, cert, serviceToken string, logger *zap.Logger) (accountclient.Client, error) { + creds, err := client.NewPerRPCCredentials(serviceToken) + if err != nil { + return nil, err + } + return accountclient.NewClient(addr, cert, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithLogger(logger), + ) +} diff --git a/hack/create-api-key/main.go b/hack/create-api-key/main.go new file mode 100644 index 000000000..7564ea445 --- /dev/null +++ b/hack/create-api-key/main.go @@ -0,0 +1,35 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/bucketeer-io/bucketeer/pkg/cli" +) + +var ( + name = "create-api-key" + version = "" + build = "" +) + +func main() { + app := cli.NewApp(name, "Bucketeer tool", version, build) + registerCommand(app, app) + if err := app.Run(); err != nil { + log.Fatal(err) + } +} diff --git a/hack/create-environment/BUILD.bazel b/hack/create-environment/BUILD.bazel new file mode 100644 index 000000000..7b4381919 --- /dev/null +++ b/hack/create-environment/BUILD.bazel @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "command.go", + "main.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/hack/create-environment", + visibility = ["//visibility:private"], + deps = [ + "//pkg/cli:go_default_library", + "//pkg/environment/client:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/rpc/client:go_default_library", + "//proto/environment:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_binary( + name = "create-environment", + embed = [":go_default_library"], + visibility = ["//visibility:public"], +) diff --git a/hack/create-environment/README.md b/hack/create-environment/README.md new file mode 100644 index 000000000..d99f4c678 --- /dev/null +++ b/hack/create-environment/README.md @@ -0,0 +1,11 @@ +## Run Command + +``` +bazelisk run //hack/create-environment:create-environment -- create \ + --cert=full-path-to-certificate \ + --web-gateway=web-gateway-address \ + --service-token=full-path-to-service-token-file \ + --id=environment-id \ + --description=optional-environment-description \ + --project-id=project-id +``` diff --git a/hack/create-environment/command.go b/hack/create-environment/command.go new file mode 100644 index 000000000..bf10ee3c3 --- /dev/null +++ b/hack/create-environment/command.go @@ -0,0 +1,89 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + "github.com/bucketeer-io/bucketeer/pkg/cli" + environmentclient "github.com/bucketeer-io/bucketeer/pkg/environment/client" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + environmentproto "github.com/bucketeer-io/bucketeer/proto/environment" +) + +type command struct { + *kingpin.CmdClause + certPath *string + serviceTokenPath *string + webGatewayAddress *string + id *string + description *string + projectID *string +} + +func registerCommand(r cli.CommandRegistry, p cli.ParentCommand) *command { + cmd := p.Command("create", "Create a new environment") + command := &command{ + CmdClause: cmd, + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + serviceTokenPath: cmd.Flag("service-token", "Path to service token file.").Required().String(), + webGatewayAddress: cmd.Flag("web-gateway", "Address of web-gateway.").Required().String(), + id: cmd.Flag("id", "Id of an environment.").Required().String(), + description: cmd.Flag("description", "(optional) Description of an environment.").String(), + projectID: cmd.Flag("project-id", "Project Id of an environment.").Required().String(), + } + r.RegisterCommand(command) + return command +} + +func (c *command) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + client, err := createEnvironmentClient(*c.webGatewayAddress, *c.certPath, *c.serviceTokenPath, logger) + if err != nil { + logger.Error("Failed to create environment client", zap.Error(err)) + return err + } + defer client.Close() + req := &environmentproto.CreateEnvironmentRequest{ + Command: &environmentproto.CreateEnvironmentCommand{ + Id: *c.id, + Description: *c.description, + ProjectId: *c.projectID, + }, + } + if _, err = client.CreateEnvironment(ctx, req); err != nil { + logger.Error("Failed to create environment", zap.Error(err)) + return err + } + logger.Info("Environment created") + return nil +} + +func createEnvironmentClient(addr, cert, serviceToken string, logger *zap.Logger) (environmentclient.Client, error) { + creds, err := client.NewPerRPCCredentials(serviceToken) + if err != nil { + return nil, err + } + return environmentclient.NewClient(addr, cert, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(10*time.Second), + client.WithBlock(), + client.WithLogger(logger), + ) +} diff --git a/hack/create-environment/main.go b/hack/create-environment/main.go new file mode 100644 index 000000000..ad3585285 --- /dev/null +++ b/hack/create-environment/main.go @@ -0,0 +1,35 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/bucketeer-io/bucketeer/pkg/cli" +) + +var ( + name = "create-environment" + version = "" + build = "" +) + +func main() { + app := cli.NewApp(name, "Bucketeer tool", version, build) + registerCommand(app, app) + if err := app.Run(); err != nil { + log.Fatal(err) + } +} diff --git a/hack/create-project/BUILD.bazel b/hack/create-project/BUILD.bazel new file mode 100644 index 000000000..3e32ccad8 --- /dev/null +++ b/hack/create-project/BUILD.bazel @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "command.go", + "main.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/hack/create-project", + visibility = ["//visibility:private"], + deps = [ + "//pkg/cli:go_default_library", + "//pkg/environment/client:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/rpc/client:go_default_library", + "//proto/environment:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_binary( + name = "create-project", + embed = [":go_default_library"], + visibility = ["//visibility:public"], +) diff --git a/hack/create-project/README.md b/hack/create-project/README.md new file mode 100644 index 000000000..08e860fca --- /dev/null +++ b/hack/create-project/README.md @@ -0,0 +1,11 @@ +## Run Command + +``` +bazelisk run //hack/create-project:create-project -- create \ + --cert=full-path-to-certificate \ + --web-gateway=web-gateway-address \ + --service-token=full-path-to-service-token-file \ + --id=project-id \ + --description=optional-project-description \ + --create-environments(optional) +``` diff --git a/hack/create-project/command.go b/hack/create-project/command.go new file mode 100644 index 000000000..635fa8b16 --- /dev/null +++ b/hack/create-project/command.go @@ -0,0 +1,111 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "fmt" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + "github.com/bucketeer-io/bucketeer/pkg/cli" + environmentclient "github.com/bucketeer-io/bucketeer/pkg/environment/client" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + environmentproto "github.com/bucketeer-io/bucketeer/proto/environment" +) + +type command struct { + *kingpin.CmdClause + certPath *string + serviceTokenPath *string + webGatewayAddress *string + id *string + description *string + createEnvironments *bool +} + +func registerCommand(r cli.CommandRegistry, p cli.ParentCommand) *command { + cmd := p.Command("create", "Create a new project") + command := &command{ + CmdClause: cmd, + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + serviceTokenPath: cmd.Flag("service-token", "Path to service token file.").Required().String(), + webGatewayAddress: cmd.Flag("web-gateway", "Address of web-gateway.").Required().String(), + id: cmd.Flag("id", "Id of an project.").Required().String(), + description: cmd.Flag("description", "(optional) Description of an project.").String(), + createEnvironments: cmd.Flag("create-environments", "create environments or not").Bool(), + } + r.RegisterCommand(command) + return command +} + +func (c *command) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + client, err := createEnvironmentClient(*c.webGatewayAddress, *c.certPath, *c.serviceTokenPath, logger) + if err != nil { + logger.Error("Failed to create environment client", zap.Error(err)) + return err + } + // create project + req := &environmentproto.CreateProjectRequest{ + Command: &environmentproto.CreateProjectCommand{ + Id: *c.id, + Description: *c.description, + }, + } + if _, err = client.CreateProject(ctx, req); err != nil { + logger.Error("Failed to create project", zap.Error(err)) + return err + } + logger.Info(fmt.Sprintf("%s project created", *c.id)) + // create environments (optional) + if *c.createEnvironments { + envIDs := []string{ + fmt.Sprintf("%s-development", *c.id), + fmt.Sprintf("%s-staging", *c.id), + fmt.Sprintf("%s-production", *c.id), + } + for _, envID := range envIDs { + req := &environmentproto.CreateEnvironmentRequest{ + Command: &environmentproto.CreateEnvironmentCommand{ + Id: envID, + ProjectId: *c.id, + }, + } + if _, err = client.CreateEnvironment(ctx, req); err != nil { + logger.Error("Failed to create environment", zap.Error(err)) + return err + } + logger.Info(fmt.Sprintf("%s environment created", envID)) + } + } + logger.Info("Succeeded") + return nil +} + +func createEnvironmentClient(addr, cert, serviceToken string, logger *zap.Logger) (environmentclient.Client, error) { + creds, err := client.NewPerRPCCredentials(serviceToken) + if err != nil { + return nil, err + } + return environmentclient.NewClient(addr, cert, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(10*time.Second), + client.WithBlock(), + client.WithLogger(logger), + ) +} diff --git a/hack/create-project/main.go b/hack/create-project/main.go new file mode 100644 index 000000000..cf46ffcb6 --- /dev/null +++ b/hack/create-project/main.go @@ -0,0 +1,35 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/bucketeer-io/bucketeer/pkg/cli" +) + +var ( + name = "create-project" + version = "" + build = "" +) + +func main() { + app := cli.NewApp(name, "Bucketeer tool", version, build) + registerCommand(app, app) + if err := app.Run(); err != nil { + log.Fatal(err) + } +} diff --git a/hack/delete-e2e-data-mysql/BUILD.bazel b/hack/delete-e2e-data-mysql/BUILD.bazel new file mode 100644 index 000000000..6b3737874 --- /dev/null +++ b/hack/delete-e2e-data-mysql/BUILD.bazel @@ -0,0 +1,24 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "command.go", + "main.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/hack/delete-e2e-data-mysql", + visibility = ["//visibility:private"], + deps = [ + "//pkg/cli:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_binary( + name = "delete-e2e-data-mysql", + embed = [":go_default_library"], + visibility = ["//visibility:public"], +) diff --git a/hack/delete-e2e-data-mysql/README.md b/hack/delete-e2e-data-mysql/README.md new file mode 100644 index 000000000..b6a8e00e1 --- /dev/null +++ b/hack/delete-e2e-data-mysql/README.md @@ -0,0 +1,13 @@ +## Run Command + +``` +bazelisk run //hack/delete-e2e-data-mysql:delete-e2e-data-mysql -- delete \ + --mysql-user= \ + --mysql-pass= \ + --mysql-host= \ + --mysql-port= \ + --mysql-db-name= \ + --test-id= \ # optional + --no-profile \ + --no-gcp-trace-enabled +``` diff --git a/hack/delete-e2e-data-mysql/command.go b/hack/delete-e2e-data-mysql/command.go new file mode 100644 index 000000000..d6b9f4e0b --- /dev/null +++ b/hack/delete-e2e-data-mysql/command.go @@ -0,0 +1,152 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "fmt" + "time" + + "go.uber.org/zap" + "gopkg.in/alecthomas/kingpin.v2" + + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" +) + +const ( + envNamespace = "e2e" + prefixTestName = "e2e-test" +) + +var ( + targetEntities = []*mysqlE2EInfo{ + {table: "subscription", targetField: "name"}, + {table: "experiment_result", targetField: ""}, + {table: "push", targetField: "name"}, + {table: "ops_count", targetField: ""}, + {table: "auto_ops_rule", targetField: "feature_id"}, + {table: "segment_user", targetField: "user_id"}, + {table: "segment", targetField: "name"}, + {table: "goal", targetField: "id"}, + {table: "experiment", targetField: "feature_id"}, + {table: "tag", targetField: ""}, + {table: "feature", targetField: "id"}, + {table: "webhook", targetField: "name"}, + } +) + +type mysqlE2EInfo struct { + table string + targetField string +} + +type command struct { + *kingpin.CmdClause + mysqlUser *string + mysqlPass *string + mysqlHost *string + mysqlPort *int + mysqlDBName *string + testID *string +} + +func registerCommand(r cli.CommandRegistry, p cli.ParentCommand) *command { + cmd := p.Command("delete", "delete e2e data") + command := &command{ + CmdClause: cmd, + mysqlUser: cmd.Flag("mysql-user", "MySQL user.").Required().String(), + mysqlPass: cmd.Flag("mysql-pass", "MySQL password.").Required().String(), + mysqlHost: cmd.Flag("mysql-host", "MySQL host.").Required().String(), + mysqlPort: cmd.Flag("mysql-port", "MySQL port.").Required().Int(), + mysqlDBName: cmd.Flag("mysql-db-name", "MySQL database name.").Required().String(), + testID: cmd.Flag("test-id", "Test ID.").String(), + } + r.RegisterCommand(command) + return command +} + +func (c *command) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + client, err := c.createMySQLClient(ctx, logger) + if err != nil { + logger.Error("Failed to create mysql client", zap.Error(err)) + return err + } + defer client.Close() + for _, target := range targetEntities { + if err := c.deleteData(ctx, client, target); err != nil { + logger.Error("Failed to delete data", zap.Error(err), zap.String("table", target.table)) + return err + } + } + logger.Info("Done") + return nil +} + +func (c *command) createMySQLClient( + ctx context.Context, + logger *zap.Logger, +) (mysql.Client, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + return mysql.NewClient( + ctx, + *c.mysqlUser, *c.mysqlPass, *c.mysqlHost, + *c.mysqlPort, + *c.mysqlDBName, + mysql.WithLogger(logger), + ) +} + +func (c *command) deleteData(ctx context.Context, client mysql.Client, target *mysqlE2EInfo) error { + query, args := c.constructDeleteQuery(target) + _, err := client.ExecContext( + ctx, + query, + args..., + ) + if err != nil { + return err + } + return nil +} + +func (c *command) constructDeleteQuery(target *mysqlE2EInfo) (query string, args []interface{}) { + if target.targetField != "" && *c.testID != "" { + query = fmt.Sprintf(` + DELETE FROM + %s + WHERE + environment_namespace = ? AND + %s LIKE ? + `, target.table, target.targetField) + args = []interface{}{ + envNamespace, + prefixTestName + "-" + *c.testID + "%", + } + return + } + query = fmt.Sprintf(` + DELETE FROM + %s + WHERE + environment_namespace = ? + `, target.table) + args = []interface{}{ + envNamespace, + } + return +} diff --git a/hack/delete-e2e-data-mysql/main.go b/hack/delete-e2e-data-mysql/main.go new file mode 100644 index 000000000..bd3a2adc2 --- /dev/null +++ b/hack/delete-e2e-data-mysql/main.go @@ -0,0 +1,35 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/bucketeer-io/bucketeer/pkg/cli" +) + +var ( + name = "delete" + version = "" + build = "" +) + +func main() { + app := cli.NewApp(name, "Bucketeer tool", version, build) + registerCommand(app, app) + if err := app.Run(); err != nil { + log.Fatal(err) + } +} diff --git a/hack/delete-environment/BUILD.bazel b/hack/delete-environment/BUILD.bazel new file mode 100644 index 000000000..6bba3fbc0 --- /dev/null +++ b/hack/delete-environment/BUILD.bazel @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "command.go", + "main.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/hack/delete-environment", + visibility = ["//visibility:private"], + deps = [ + "//pkg/cli:go_default_library", + "//pkg/environment/client:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/rpc/client:go_default_library", + "//proto/environment:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_binary( + name = "delete-environment", + embed = [":go_default_library"], + visibility = ["//visibility:public"], +) diff --git a/hack/delete-environment/README.md b/hack/delete-environment/README.md new file mode 100644 index 000000000..7dfeb66aa --- /dev/null +++ b/hack/delete-environment/README.md @@ -0,0 +1,9 @@ +## Run Command + +``` +bazelisk run //hack/delete-environment:delete-environment -- delete \ + --cert=full-path-to-certificate \ + --web-gateway=web-gateway-address \ + --service-token=full-path-to-service-token-file \ + --id=environment-id +``` diff --git a/hack/delete-environment/command.go b/hack/delete-environment/command.go new file mode 100644 index 000000000..90b9e3ec7 --- /dev/null +++ b/hack/delete-environment/command.go @@ -0,0 +1,82 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + "github.com/bucketeer-io/bucketeer/pkg/cli" + environmentclient "github.com/bucketeer-io/bucketeer/pkg/environment/client" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + environmentproto "github.com/bucketeer-io/bucketeer/proto/environment" +) + +type command struct { + *kingpin.CmdClause + certPath *string + serviceTokenPath *string + webGatewayAddress *string + id *string +} + +func registerCommand(r cli.CommandRegistry, p cli.ParentCommand) *command { + cmd := p.Command("delete", "Delete a environment") + command := &command{ + CmdClause: cmd, + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + serviceTokenPath: cmd.Flag("service-token", "Path to service token file.").Required().String(), + webGatewayAddress: cmd.Flag("web-gateway", "Address of web-gateway.").Required().String(), + id: cmd.Flag("id", "Id of an environment.").Required().String(), + } + r.RegisterCommand(command) + return command +} + +func (c *command) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + client, err := createEnvironmentClient(*c.webGatewayAddress, *c.certPath, *c.serviceTokenPath, logger) + if err != nil { + logger.Error("Failed to create environment client", zap.Error(err)) + return err + } + defer client.Close() + req := &environmentproto.DeleteEnvironmentRequest{ + Id: *c.id, + Command: &environmentproto.DeleteEnvironmentCommand{}, + } + if _, err = client.DeleteEnvironment(ctx, req); err != nil { + logger.Error("Failed to delete environment", zap.Error(err)) + return err + } + logger.Info("Environment created") + return nil +} + +func createEnvironmentClient(addr, cert, serviceToken string, logger *zap.Logger) (environmentclient.Client, error) { + creds, err := client.NewPerRPCCredentials(serviceToken) + if err != nil { + return nil, err + } + return environmentclient.NewClient(addr, cert, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(10*time.Second), + client.WithBlock(), + client.WithLogger(logger), + ) +} diff --git a/hack/delete-environment/main.go b/hack/delete-environment/main.go new file mode 100644 index 000000000..e1915225b --- /dev/null +++ b/hack/delete-environment/main.go @@ -0,0 +1,35 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/bucketeer-io/bucketeer/pkg/cli" +) + +var ( + name = "delete-environment" + version = "" + build = "" +) + +func main() { + app := cli.NewApp(name, "Bucketeer tool", version, build) + registerCommand(app, app) + if err := app.Run(); err != nil { + log.Fatal(err) + } +} diff --git a/hack/generate-service-token/BUILD.bazel b/hack/generate-service-token/BUILD.bazel new file mode 100644 index 000000000..725a86cc9 --- /dev/null +++ b/hack/generate-service-token/BUILD.bazel @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "command.go", + "main.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/hack/generate-service-token", + visibility = ["//visibility:private"], + deps = [ + "//pkg/cli:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/token:go_default_library", + "//proto/account:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_binary( + name = "generate-service-token", + embed = [":go_default_library"], + pure = "on", + visibility = ["//visibility:public"], +) diff --git a/hack/generate-service-token/README.md b/hack/generate-service-token/README.md new file mode 100644 index 000000000..b606f1512 --- /dev/null +++ b/hack/generate-service-token/README.md @@ -0,0 +1,14 @@ +## Run Command + +``` +bazelisk run //hack/generate-service-token:generate-service-token -- generate \ + --issuer=dex-issuer \ + --sub=sub \ + --audience=client-id-set-in-dex-config \ + --email=email \ + --role=role \ + --key=full-path-to-private-key \ + --output=full-path-to-output-file \ + --no-profile \ + --no-gcp-trace-enabled +``` diff --git a/hack/generate-service-token/command.go b/hack/generate-service-token/command.go new file mode 100644 index 000000000..c8b5721cb --- /dev/null +++ b/hack/generate-service-token/command.go @@ -0,0 +1,90 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "errors" + "io/ioutil" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/token" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" +) + +type command struct { + *kingpin.CmdClause + keyPath *string + issuer *string + sub *string + audience *string + email *string + role *string + output *string +} + +func registerCommand(r cli.CommandRegistry, p cli.ParentCommand) *command { + cmd := p.Command("generate", "Generate a new service token") + command := &command{ + CmdClause: cmd, + keyPath: cmd.Flag("key", "Path to the private keys.").Required().String(), + issuer: cmd.Flag("issuer", "Issuer url set in dex config.").Required().String(), + sub: cmd.Flag("sub", "Subject id.").Required().String(), + audience: cmd.Flag("audience", "Client id set in dex config.").Required().String(), + email: cmd.Flag("email", "Email will be set in token.").Required().String(), + role: cmd.Flag("role", "Role will be set in token.").Default("VIEWER").Enum("VIEWER", "EDITOR", "OWNER"), + output: cmd.Flag("output", "Path of file to write service token.").Required().String(), + } + r.RegisterCommand(command) + return command +} + +func (c *command) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + signer, err := token.NewSigner(*c.keyPath) + if err != nil { + logger.Error("Failed to create signer", zap.Error(err)) + return err + } + role, ok := accountproto.Account_Role_value[*c.role] + if !ok { + logger.Error("Wrong role parameter", zap.String("role", *c.role)) + return errors.New("wrong role parameter") + } + idToken := &token.IDToken{ + Issuer: *c.issuer, + Subject: *c.sub, + Audience: *c.audience, + Expiry: time.Now().AddDate(100, 0, 0), + IssuedAt: time.Now(), + Email: *c.email, + AdminRole: accountproto.Account_Role(role), + } + signedIDToken, err := signer.Sign(idToken) + if err != nil { + logger.Error("Failed to sign token", zap.Error(err)) + return err + } + if err := ioutil.WriteFile(*c.output, []byte(signedIDToken), 0644); err != nil { + logger.Error("Failed to write token to file", zap.Error(err), zap.String("output", *c.output)) + return err + } + logger.Info("Token generated") + return nil +} diff --git a/hack/generate-service-token/main.go b/hack/generate-service-token/main.go new file mode 100644 index 000000000..17f62540d --- /dev/null +++ b/hack/generate-service-token/main.go @@ -0,0 +1,35 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/bucketeer-io/bucketeer/pkg/cli" +) + +var ( + name = "generate-service-token" + version = "" + build = "" +) + +func main() { + app := cli.NewApp(name, "Bucketeer tool", version, build) + registerCommand(app, app) + if err := app.Run(); err != nil { + log.Fatal(err) + } +} diff --git a/manifests/bucketeer/.helmignore b/manifests/bucketeer/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/manifests/bucketeer/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/manifests/bucketeer/Chart.yaml b/manifests/bucketeer/Chart.yaml new file mode 100644 index 000000000..c1af713d8 --- /dev/null +++ b/manifests/bucketeer/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +name: bucketeer +description: A Helm chart for bucketeer +version: 0.0.0 +appVersion: 0.0.0 diff --git a/manifests/bucketeer/charts/account-apikey-cacher/.helmignore b/manifests/bucketeer/charts/account-apikey-cacher/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/manifests/bucketeer/charts/account-apikey-cacher/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/manifests/bucketeer/charts/account-apikey-cacher/Chart.yaml b/manifests/bucketeer/charts/account-apikey-cacher/Chart.yaml new file mode 100644 index 000000000..d1bfb1516 --- /dev/null +++ b/manifests/bucketeer/charts/account-apikey-cacher/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-account-apikey-cacher +name: account-apikey-cacher +version: 1.0.0 diff --git a/manifests/bucketeer/charts/account-apikey-cacher/templates/NOTES.txt b/manifests/bucketeer/charts/account-apikey-cacher/templates/NOTES.txt new file mode 100644 index 000000000..0dcfdca2e --- /dev/null +++ b/manifests/bucketeer/charts/account-apikey-cacher/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "account-apikey-cacher.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "account-apikey-cacher.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "account-apikey-cacher.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "account-apikey-cacher.name" . }},release={{ template "account-apikey-cacher.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/manifests/bucketeer/charts/account-apikey-cacher/templates/_helpers.tpl b/manifests/bucketeer/charts/account-apikey-cacher/templates/_helpers.tpl new file mode 100644 index 000000000..d6e56733e --- /dev/null +++ b/manifests/bucketeer/charts/account-apikey-cacher/templates/_helpers.tpl @@ -0,0 +1,48 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "account-apikey-cacher.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "account-apikey-cacher.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "account-apikey-cacher.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "account-apikey-cacher.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "service-token-secret" -}} +{{- if .Values.serviceToken.secret }} +{{- printf "%s" .Values.serviceToken.secret -}} +{{- else -}} +{{ template "account-apikey-cacher.fullname" . }}-service-token +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/manifests/bucketeer/charts/account-apikey-cacher/templates/deployment.yaml b/manifests/bucketeer/charts/account-apikey-cacher/templates/deployment.yaml new file mode 100644 index 000000000..30c5cc7b6 --- /dev/null +++ b/manifests/bucketeer/charts/account-apikey-cacher/templates/deployment.yaml @@ -0,0 +1,163 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "account-apikey-cacher.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "account-apikey-cacher.name" . }} + chart: {{ template "account-apikey-cacher.chart" . }} + release: {{ template "account-apikey-cacher.fullname" . }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "account-apikey-cacher.name" . }} + release: {{ template "account-apikey-cacher.fullname" . }} + template: + metadata: + labels: + app: {{ template "account-apikey-cacher.name" . }} + release: {{ template "account-apikey-cacher.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "account-apikey-cacher.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + - name: service-token-secret + secret: + secretName: {{ template "service-token-secret" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["apikey-cacher"] + env: + - name: BUCKETEER_ACCOUNT_PROJECT + value: "{{ .Values.env.project }}" + - name: BUCKETEER_ACCOUNT_ACCOUNT_SERVICE + value: "{{ .Values.env.accountService }}" + - name: BUCKETEER_ACCOUNT_ENVIRONMENT_SERVICE + value: "{{ .Values.env.environmentService }}" + - name: BUCKETEER_ACCOUNT_TOPIC + value: "{{ .Values.env.topic }}" + - name: BUCKETEER_ACCOUNT_SUBSCRIPTION + value: "{{ .Values.env.subscription }}" + - name: BUCKETEER_ACCOUNT_MAX_MPS + value: "{{ .Values.env.maxMps }}" + - name: BUCKETEER_ACCOUNT_NUM_WORKERS + value: "{{ .Values.env.numWorkers }}" + - name: BUCKETEER_ACCOUNT_FLUSH_SIZE + value: "{{ .Values.env.flushSize }}" + - name: BUCKETEER_ACCOUNT_FLUSH_INTERVAL + value: "{{ .Values.env.flushInterval }}" + - name: BUCKETEER_ACCOUNT_PULLER_NUM_GOROUTINES + value: "{{ .Values.env.pullerNumGoroutines }}" + - name: BUCKETEER_ACCOUNT_PULLER_MAX_OUTSTANDING_MESSAGES + value: "{{ .Values.env.pullerMaxOutstandingMessages }}" + - name: BUCKETEER_ACCOUNT_PULLER_MAX_OUTSTANDING_BYTES + value: "{{ .Values.env.pullerMaxOutstandingBytes }}" + - name: BUCKETEER_ACCOUNT_REDIS_SERVER_NAME + value: "{{ .Values.env.redis.serverName }}" + - name: BUCKETEER_ACCOUNT_REDIS_ADDR + value: "{{ .Values.env.redis.addr }}" + - name: BUCKETEER_ACCOUNT_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_ACCOUNT_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_ACCOUNT_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + - name: BUCKETEER_ACCOUNT_SERVICE_TOKEN + value: /usr/local/service-token/token + - name: BUCKETEER_ACCOUNT_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_ACCOUNT_KEY + value: /usr/local/certs/service/tls.key + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + - name: service-token-secret + mountPath: /usr/local/service-token + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/account-apikey-cacher/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/account-apikey-cacher/templates/envoy-configmap.yaml new file mode 100644 index 000000000..49e26f1ac --- /dev/null +++ b/manifests/bucketeer/charts/account-apikey-cacher/templates/envoy-configmap.yaml @@ -0,0 +1,283 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "account-apikey-cacher.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "account-apikey-cacher.name" . }} + chart: {{ template "account-apikey-cacher.chart" . }} + release: {{ template "account-apikey-cacher.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: account-apikey-cacher + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: account-apikey-cacher + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + - name: account + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + ignore_health_on_host_removal: true + lb_policy: round_robin + load_assignment: + cluster_name: account + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: account.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + - name: environment + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: environment + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: environment.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + account-apikey-cacher: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: / + route: + cluster: account-apikey-cacher + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: ingress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true + - name: egress + address: + socket_address: + address: 127.0.0.1 + port_value: 9001 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + account: + value: 25 + environment: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: egress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.account.AccountService + route: + cluster: account + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.environment.EnvironmentService + route: + cluster: environment + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: egress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true diff --git a/manifests/bucketeer/charts/account-apikey-cacher/templates/hpa.yaml b/manifests/bucketeer/charts/account-apikey-cacher/templates/hpa.yaml new file mode 100644 index 000000000..392607cdd --- /dev/null +++ b/manifests/bucketeer/charts/account-apikey-cacher/templates/hpa.yaml @@ -0,0 +1,19 @@ +{{ if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "account-apikey-cacher.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "account-apikey-cacher.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.metrics.cpu.targetAverageUtilization }} +{{ end }} diff --git a/manifests/bucketeer/charts/account-apikey-cacher/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/account-apikey-cacher/templates/service-cert-secret.yaml new file mode 100644 index 000000000..f3092cb76 --- /dev/null +++ b/manifests/bucketeer/charts/account-apikey-cacher/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "account-apikey-cacher.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "account-apikey-cacher.name" . }} + chart: {{ template "account-apikey-cacher.chart" . }} + release: {{ template "account-apikey-cacher.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/account-apikey-cacher/templates/service-token-secret.yaml b/manifests/bucketeer/charts/account-apikey-cacher/templates/service-token-secret.yaml new file mode 100644 index 000000000..d56c7e4c3 --- /dev/null +++ b/manifests/bucketeer/charts/account-apikey-cacher/templates/service-token-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.serviceToken.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "account-apikey-cacher.fullname" . }}-service-token + namespace: {{ .Values.namespace }} + labels: + app: {{ template "account-apikey-cacher.name" . }} + chart: {{ template "account-apikey-cacher.chart" . }} + release: {{ template "account-apikey-cacher.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + token: {{ required "Service token is required" .Values.serviceToken.token | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/account-apikey-cacher/templates/service.yaml b/manifests/bucketeer/charts/account-apikey-cacher/templates/service.yaml new file mode 100644 index 000000000..c40457bb8 --- /dev/null +++ b/manifests/bucketeer/charts/account-apikey-cacher/templates/service.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "account-apikey-cacher.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "account-apikey-cacher.name" . }} + chart: {{ template "account-apikey-cacher.chart" . }} + release: {{ template "account-apikey-cacher.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "account-apikey-cacher.name" . }} + release: {{ template "account-apikey-cacher.fullname" . }} diff --git a/manifests/bucketeer/charts/account-apikey-cacher/values.yaml b/manifests/bucketeer/charts/account-apikey-cacher/values.yaml new file mode 100644 index 000000000..0aca95330 --- /dev/null +++ b/manifests/bucketeer/charts/account-apikey-cacher/values.yaml @@ -0,0 +1,71 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-account + pullPolicy: IfNotPresent + +fullnameOverride: "account-apikey-cacher" + +namespace: + +env: + project: + accountService: localhost:9001 + environmentService: localhost:9001 + maxMps: "1000" + numWorkers: 2 + flushSize: 100 + flushInterval: 2s + pullerNumGoroutines: 5 + pullerMaxOutstandingMessages: "1000" + pullerMaxOutstandingBytes: "1000000000" + redis: + serverName: + addr: + logLevel: info + port: 9090 + metricsPort: 9002 + topic: + subscription: + +affinity: {} + +nodeSelector: {} + +hpa: + enabled: + minReplicas: + maxReplicas: + metrics: + cpu: + targetAverageUtilization: + +tls: + service: + secret: + cert: + key: + +serviceToken: + secret: + token: + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} diff --git a/manifests/bucketeer/charts/account/.helmignore b/manifests/bucketeer/charts/account/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/manifests/bucketeer/charts/account/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/manifests/bucketeer/charts/account/Chart.yaml b/manifests/bucketeer/charts/account/Chart.yaml new file mode 100644 index 000000000..6322acaf2 --- /dev/null +++ b/manifests/bucketeer/charts/account/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-account +name: account +version: 1.0.0 diff --git a/manifests/bucketeer/charts/account/templates/NOTES.txt b/manifests/bucketeer/charts/account/templates/NOTES.txt new file mode 100644 index 000000000..17aa0e5e8 --- /dev/null +++ b/manifests/bucketeer/charts/account/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "account.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "account.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "account.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "account.name" . }},release={{ template "account.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/manifests/bucketeer/charts/account/templates/_helpers.tpl b/manifests/bucketeer/charts/account/templates/_helpers.tpl new file mode 100644 index 000000000..8772aa560 --- /dev/null +++ b/manifests/bucketeer/charts/account/templates/_helpers.tpl @@ -0,0 +1,56 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "account.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "account.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "account.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "account.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "oauth-key-secret" -}} +{{- if .Values.oauth.key.secret }} +{{- printf "%s" .Values.oauth.key.secret -}} +{{- else -}} +{{ template "account.fullname" . }}-oauth-key +{{- end -}} +{{- end -}} + +{{- define "service-token-secret" -}} +{{- if .Values.serviceToken.secret }} +{{- printf "%s" .Values.serviceToken.secret -}} +{{- else -}} +{{ template "account.fullname" . }}-service-token +{{- end -}} +{{- end -}} diff --git a/manifests/bucketeer/charts/account/templates/deployment.yaml b/manifests/bucketeer/charts/account/templates/deployment.yaml new file mode 100644 index 000000000..0f3b13fd6 --- /dev/null +++ b/manifests/bucketeer/charts/account/templates/deployment.yaml @@ -0,0 +1,163 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "account.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "account.name" . }} + chart: {{ template "account.chart" . }} + release: {{ template "account.fullname" . }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "account.name" . }} + release: {{ template "account.fullname" . }} + template: + metadata: + labels: + app: {{ template "account.name" . }} + release: {{ template "account.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "account.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + - name: service-token-secret + secret: + secretName: {{ template "service-token-secret" . }} + - name: oauth-key-secret + secret: + secretName: {{ template "oauth-key-secret" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["server"] + env: + - name: BUCKETEER_ACCOUNT_PROJECT + value: "{{ .Values.env.project }}" + - name: BUCKETEER_ACCOUNT_MYSQL_USER + value: "{{ .Values.env.mysqlUser }}" + - name: BUCKETEER_ACCOUNT_MYSQL_PASS + value: "{{ .Values.env.mysqlPass }}" + - name: BUCKETEER_ACCOUNT_MYSQL_HOST + value: "{{ .Values.env.mysqlHost }}" + - name: BUCKETEER_ACCOUNT_MYSQL_PORT + value: "{{ .Values.env.mysqlPort }}" + - name: BUCKETEER_ACCOUNT_MYSQL_DB_NAME + value: "{{ .Values.env.mysqlDbName }}" + - name: BUCKETEER_ACCOUNT_TOPIC + value: "{{ .Values.env.topic }}" + - name: BUCKETEER_ACCOUNT_ENVIRONMENT_SERVICE + value: "{{ .Values.env.environmentService }}" + - name: BUCKETEER_ACCOUNT_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_ACCOUNT_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_ACCOUNT_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + - name: BUCKETEER_ACCOUNT_OAUTH_CLIENT_ID + value: "{{ .Values.oauth.clientId }}" + - name: BUCKETEER_ACCOUNT_OAUTH_ISSUER + value: "{{ .Values.oauth.issuer }}" + - name: BUCKETEER_ACCOUNT_OAUTH_KEY + value: /usr/local/oauth-key/public.pem + - name: BUCKETEER_ACCOUNT_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_ACCOUNT_KEY + value: /usr/local/certs/service/tls.key + - name: BUCKETEER_ACCOUNT_SERVICE_TOKEN + value: /usr/local/service-token/token + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + - name: service-token-secret + mountPath: /usr/local/service-token + readOnly: true + - name: oauth-key-secret + mountPath: /usr/local/oauth-key + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/account/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/account/templates/envoy-configmap.yaml new file mode 100644 index 000000000..e89de69f4 --- /dev/null +++ b/manifests/bucketeer/charts/account/templates/envoy-configmap.yaml @@ -0,0 +1,229 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "account.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "account.name" . }} + chart: {{ template "account.chart" . }} + release: {{ template "account.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: account + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: account + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + - name: environment + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + ignore_health_on_host_removal: true + lb_policy: round_robin + load_assignment: + cluster_name: environment + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: environment.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + listeners: + - address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + account: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: / + route: + cluster: account + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: ingress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true + name: ingress + - address: + socket_address: + address: 127.0.0.1 + port_value: 9001 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + environment: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: egress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.environment.EnvironmentService + route: + cluster: environment + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: egress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true + name: egress diff --git a/manifests/bucketeer/charts/account/templates/hpa.yaml b/manifests/bucketeer/charts/account/templates/hpa.yaml new file mode 100644 index 000000000..c430d88cc --- /dev/null +++ b/manifests/bucketeer/charts/account/templates/hpa.yaml @@ -0,0 +1,19 @@ +{{ if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "account.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "account.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.metrics.cpu.targetAverageUtilization }} +{{ end }} diff --git a/manifests/bucketeer/charts/account/templates/oauth-key-secret.yaml b/manifests/bucketeer/charts/account/templates/oauth-key-secret.yaml new file mode 100644 index 000000000..8de413d9c --- /dev/null +++ b/manifests/bucketeer/charts/account/templates/oauth-key-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.oauth.key.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "account.fullname" . }}-oauth-key + namespace: {{ .Values.namespace }} + labels: + app: {{ template "account.name" . }} + chart: {{ template "account.chart" . }} + release: {{ template "account.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + public.pem: {{ required "OAuth key is required" .Values.oauth.key.public | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/account/templates/pdb.yaml b/manifests/bucketeer/charts/account/templates/pdb.yaml new file mode 100644 index 000000000..5c79961cb --- /dev/null +++ b/manifests/bucketeer/charts/account/templates/pdb.yaml @@ -0,0 +1,12 @@ +{{ if .Values.pdb.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "account.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + selector: + matchLabels: + app: {{ template "account.name" . }} +{{ end }} diff --git a/manifests/bucketeer/charts/account/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/account/templates/service-cert-secret.yaml new file mode 100644 index 000000000..e756d4d58 --- /dev/null +++ b/manifests/bucketeer/charts/account/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "account.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "account.name" . }} + chart: {{ template "account.chart" . }} + release: {{ template "account.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/account/templates/service-token-secret.yaml b/manifests/bucketeer/charts/account/templates/service-token-secret.yaml new file mode 100644 index 000000000..ce64d8795 --- /dev/null +++ b/manifests/bucketeer/charts/account/templates/service-token-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.serviceToken.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "account.fullname" . }}-service-token + namespace: {{ .Values.namespace }} + labels: + app: {{ template "account.name" . }} + chart: {{ template "account.chart" . }} + release: {{ template "account.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + token: {{ required "Service token is required" .Values.serviceToken.token | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/account/templates/service.yaml b/manifests/bucketeer/charts/account/templates/service.yaml new file mode 100644 index 000000000..178c80be8 --- /dev/null +++ b/manifests/bucketeer/charts/account/templates/service.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "account.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "account.name" . }} + chart: {{ template "account.chart" . }} + release: {{ template "account.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "account.name" . }} + release: {{ template "account.fullname" . }} diff --git a/manifests/bucketeer/charts/account/values.yaml b/manifests/bucketeer/charts/account/values.yaml new file mode 100644 index 000000000..d52270e52 --- /dev/null +++ b/manifests/bucketeer/charts/account/values.yaml @@ -0,0 +1,75 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-account + pullPolicy: IfNotPresent + +fullnameOverride: "account" + +namespace: + +env: + project: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + topic: + environmentService: localhost:9001 + logLevel: info + port: 9090 + metricsPort: 9002 + +affinity: {} + +nodeSelector: {} + +pdb: + enabled: + maxUnavailable: 50% + +hpa: + enabled: + minReplicas: + maxReplicas: + metrics: + cpu: + targetAverageUtilization: + +tls: + service: + secret: + cert: + key: + +serviceToken: + secret: + token: + +oauth: + key: + secret: + public: + clientId: + issuer: + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} diff --git a/manifests/bucketeer/charts/api-gateway/.helmignore b/manifests/bucketeer/charts/api-gateway/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/manifests/bucketeer/charts/api-gateway/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/manifests/bucketeer/charts/api-gateway/Chart.yaml b/manifests/bucketeer/charts/api-gateway/Chart.yaml new file mode 100644 index 000000000..e40832feb --- /dev/null +++ b/manifests/bucketeer/charts/api-gateway/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-gateway (L7) +name: api-gateway +version: 1.0.0 diff --git a/manifests/bucketeer/charts/api-gateway/templates/NOTES.txt b/manifests/bucketeer/charts/api-gateway/templates/NOTES.txt new file mode 100644 index 000000000..5678f3ca5 --- /dev/null +++ b/manifests/bucketeer/charts/api-gateway/templates/NOTES.txt @@ -0,0 +1,3 @@ +export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "api-gateway.fullname" . }}) +export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") +echo http://$NODE_IP:$NODE_PORT \ No newline at end of file diff --git a/manifests/bucketeer/charts/api-gateway/templates/_helpers.tpl b/manifests/bucketeer/charts/api-gateway/templates/_helpers.tpl new file mode 100644 index 000000000..7ad54712e --- /dev/null +++ b/manifests/bucketeer/charts/api-gateway/templates/_helpers.tpl @@ -0,0 +1,48 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "api-gateway.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "api-gateway.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "api-gateway.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "api-gateway.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "service-token-secret" -}} +{{- if .Values.serviceToken.secret }} +{{- printf "%s" .Values.serviceToken.secret -}} +{{- else -}} +{{ template "api-gateway.fullname" . }}-service-token +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/manifests/bucketeer/charts/api-gateway/templates/backend-config.yaml b/manifests/bucketeer/charts/api-gateway/templates/backend-config.yaml new file mode 100644 index 000000000..57a544b62 --- /dev/null +++ b/manifests/bucketeer/charts/api-gateway/templates/backend-config.yaml @@ -0,0 +1,9 @@ +apiVersion: cloud.google.com/v1beta1 +kind: BackendConfig +metadata: + name: {{ template "api-gateway.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + timeoutSec: 40 + connectionDraining: + drainingTimeoutSec: 60 diff --git a/manifests/bucketeer/charts/api-gateway/templates/deployment.yaml b/manifests/bucketeer/charts/api-gateway/templates/deployment.yaml new file mode 100644 index 000000000..235e59cc4 --- /dev/null +++ b/manifests/bucketeer/charts/api-gateway/templates/deployment.yaml @@ -0,0 +1,180 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "api-gateway.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "api-gateway.name" . }} + chart: {{ template "api-gateway.chart" . }} + release: {{ template "api-gateway.fullname" . }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "api-gateway.name" . }} + release: {{ template "api-gateway.fullname" . }} + template: + metadata: + labels: + app: {{ template "api-gateway.name" . }} + release: {{ template "api-gateway.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "api-gateway.fullname" . }}-envoy-config + - name: secret + secret: + secretName: {{ template "api-gateway.fullname" . }} + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + - name: service-token-secret + secret: + secretName: {{ template "service-token-secret" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["server"] + env: + - name: BUCKETEER_GATEWAY_PROJECT + value: "{{ .Values.env.project }}" + - name: BUCKETEER_GATEWAY_BIGTABLE_INSTANCE + value: "{{ .Values.env.bigtableInstance }}" + - name: BUCKETEER_GATEWAY_GOAL_TOPIC + value: "{{ .Values.env.goalTopic }}" + - name: BUCKETEER_GATEWAY_GOAL_BATCH_TOPIC + value: "{{ .Values.env.goalBatchTopic }}" + - name: BUCKETEER_GATEWAY_EVALUATION_TOPIC + value: "{{ .Values.env.evaluationTopic }}" + - name: BUCKETEER_GATEWAY_USER_TOPIC + value: "{{ .Values.env.userTopic }}" + - name: BUCKETEER_GATEWAY_METRICS_TOPIC + value: "{{ .Values.env.metricsTopic }}" + - name: BUCKETEER_GATEWAY_PUBLISH_NUM_GOROUTINES + value: "{{ .Values.env.publishNumGoroutines }}" + - name: BUCKETEER_GATEWAY_PUBLISH_TIMEOUT + value: "{{ .Values.env.publishTimeout }}" + - name: BUCKETEER_GATEWAY_REDIS_SERVER_NAME + value: "{{ .Values.env.redis.serverName }}" + - name: BUCKETEER_GATEWAY_REDIS_ADDR + value: "{{ .Values.env.redis.addr }}" + - name: BUCKETEER_GATEWAY_REDIS_POOL_MAX_IDLE + value: "{{ .Values.env.redis.poolMaxIdle }}" + - name: BUCKETEER_GATEWAY_REDIS_POOL_MAX_ACTIVE + value: "{{ .Values.env.redis.poolMaxActive }}" + - name: BUCKETEER_GATEWAY_OLDEST_EVENT_TIMESTAMP + value: "{{ .Values.env.oldestEventTimestamp }}" + - name: BUCKETEER_GATEWAY_FURTHEST_EVENT_TIMESTAMP + value: "{{ .Values.env.furthestEventTimestamp }}" + - name: BUCKETEER_GATEWAY_FEATURE_SERVICE + value: "{{ .Values.env.featureService }}" + - name: BUCKETEER_GATEWAY_ACCOUNT_SERVICE + value: "{{ .Values.env.accountService }}" + - name: BUCKETEER_GATEWAY_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_GATEWAY_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_GATEWAY_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + - name: BUCKETEER_GATEWAY_TRACE_SAMPLING_PROBABILITY + value: "{{ .Values.env.traceSamplingProbability }}" + - name: BUCKETEER_GATEWAY_SERVICE_TOKEN + value: /usr/local/service-token/token + - name: BUCKETEER_GATEWAY_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_GATEWAY_KEY + value: /usr/local/certs/service/tls.key + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + - name: service-token-secret + mountPath: /usr/local/service-token + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + # Forces Envoy free up unused memory + - name: TCMALLOC_RELEASE_RATE + value: "100.0" + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf + readOnly: true + - name: secret + mountPath: /usr/local/secret + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/api-gateway/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/api-gateway/templates/envoy-configmap.yaml new file mode 100644 index 000000000..b27a8fa7b --- /dev/null +++ b/manifests/bucketeer/charts/api-gateway/templates/envoy-configmap.yaml @@ -0,0 +1,373 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "api-gateway.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "api-gateway.name" . }} + chart: {{ template "api-gateway.chart" . }} + release: {{ template "api-gateway.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + - name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: "/dev/stdout" + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: api-gateway + connect_timeout: 5s + ignore_health_on_host_removal: true + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + circuit_breakers: + thresholds: + - priority: DEFAULT + max_retries: 3 + # we don't want to break a circuit by number of request, so set a large number. + max_pending_requests: 100000000 + max_requests: 100000000 + load_assignment: + cluster_name: api-gateway + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + dns_lookup_family: V4_ONLY + lb_policy: round_robin + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: ["h2"] + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + type: strict_dns + - name: api-gateway-rest-v1 + connect_timeout: 5s + ignore_health_on_host_removal: true + health_checks: + - http_health_check: + path: /v1/gateway/health + codec_client_type: 1 # http2.0 + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + circuit_breakers: + thresholds: + - priority: DEFAULT + max_retries: 3 + # we don't want to break a circuit by number of request, so set a large number. + max_pending_requests: 100000000 + max_requests: 100000000 + load_assignment: + cluster_name: api-gateway-rest-v1 + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 8000 + dns_lookup_family: V4_ONLY + lb_policy: round_robin + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: ["h2"] + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + type: strict_dns + - name: feature + connect_timeout: 5s + ignore_health_on_host_removal: true + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + circuit_breakers: + thresholds: + - priority: DEFAULT + max_retries: 3 + # we don't want to break a circuit by number of request, so set a large number. + max_pending_requests: 100000000 + max_requests: 100000000 + load_assignment: + cluster_name: feature + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: feature.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + dns_lookup_family: V4_ONLY + lb_policy: least_request + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: ["h2"] + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + type: strict_dns + - name: account + connect_timeout: 5s + ignore_health_on_host_removal: true + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + circuit_breakers: + thresholds: + - priority: DEFAULT + max_retries: 3 + # we don't want to break a circuit by number of request, so set a large number. + max_pending_requests: 100000000 + max_requests: 100000000 + load_assignment: + cluster_name: account + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: account.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + dns_lookup_family: V4_ONLY + lb_policy: round_robin + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: ["h2"] + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + type: strict_dns + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + access_log: + name: envoy.access_loggers.file + typed_config: + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + common_http_protocol_options: + # set longer timeout than lb session timeout (600s) + idle_timeout: 620s + http_filters: + - name: envoy.filters.http.health_check + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + api-gateway: + value: 25 + pass_through_mode: false + headers: + - name: :path + string_match: + exact: /health + - name: envoy.filters.http.cors + - name: envoy.filters.http.grpc_web + - name: envoy.filters.http.grpc_json_transcoder + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder + proto_descriptor: /usr/local/secret/gateway_proto_descriptor.pb + services: + - bucketeer.gateway.Gateway + print_options: + always_print_primitive_fields: true + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - name: ingress_services + domains: + - "*" + cors: + allow_origin_string_match: + - prefix: "*" + allow_headers: "content-type, x-grpc-web, authorization" + allow_methods: "GET,POST" + allow_credentials: true + max_age: "86400" + routes: + - match: + prefix: /v1/gateway + headers: + - name: content-type + string_match: + exact: application/json + route: + cluster: api-gateway-rest-v1 + timeout: 15s + retry_policy: + retry_on: 5xx + num_retries: 3 + - match: + prefix: / + route: + cluster: api-gateway + timeout: 15s + retry_policy: + retry_on: 5xx + num_retries: 3 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + require_client_certificate: true + common_tls_context: + alpn_protocols: ["h2"] + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + - name: egress + address: + socket_address: + address: 127.0.0.1 + port_value: 9001 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: egress_http + access_log: + name: envoy.access_loggers.file + typed_config: + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + account: + value: 25 + feature: + value: 25 + pass_through_mode: false + headers: + - name: :path + string_match: + exact: /health + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - name: egress_services + domains: + - "*" + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.feature.FeatureService + route: + cluster: feature + timeout: 15s + retry_policy: + retry_on: 5xx + num_retries: 3 + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.account.AccountService + route: + cluster: account + timeout: 15s + retry_policy: + retry_on: 5xx + num_retries: 3 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + require_client_certificate: true + common_tls_context: + alpn_protocols: ["h2"] + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key diff --git a/manifests/bucketeer/charts/api-gateway/templates/hpa.yaml b/manifests/bucketeer/charts/api-gateway/templates/hpa.yaml new file mode 100644 index 000000000..1fa1a9234 --- /dev/null +++ b/manifests/bucketeer/charts/api-gateway/templates/hpa.yaml @@ -0,0 +1,19 @@ +{{ if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "api-gateway.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "api-gateway.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.metrics.cpu.targetAverageUtilization }} +{{ end }} diff --git a/manifests/bucketeer/charts/api-gateway/templates/ingress.yaml b/manifests/bucketeer/charts/api-gateway/templates/ingress.yaml new file mode 100644 index 000000000..60e503b29 --- /dev/null +++ b/manifests/bucketeer/charts/api-gateway/templates/ingress.yaml @@ -0,0 +1,24 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ template "api-gateway.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "api-gateway.name" . }} + chart: {{ template "api-gateway.chart" . }} + release: {{ template "api-gateway.fullname" . }} + heritage: {{ .Release.Service }} + annotations: + kubernetes.io/ingress.class: "gce" + kubernetes.io/ingress.allow-http: "false" + kubernetes.io/ingress.global-static-ip-name: {{ .Values.ingress.staticIPName }} +spec: + tls: +{{ toYaml .Values.tls.bucketeerJP.secrets | indent 4 }} + rules: + - host: {{ .Values.ingress.host }} + http: + paths: + - backend: + serviceName: {{ template "api-gateway.fullname" . }} + servicePort: {{ .Values.service.externalPort }} diff --git a/manifests/bucketeer/charts/api-gateway/templates/pdb.yaml b/manifests/bucketeer/charts/api-gateway/templates/pdb.yaml new file mode 100644 index 000000000..bfa7ff1bf --- /dev/null +++ b/manifests/bucketeer/charts/api-gateway/templates/pdb.yaml @@ -0,0 +1,12 @@ +{{ if .Values.pdb.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "api-gateway.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + selector: + matchLabels: + app: {{ template "api-gateway.name" . }} +{{ end }} diff --git a/manifests/bucketeer/charts/api-gateway/templates/secret.yaml b/manifests/bucketeer/charts/api-gateway/templates/secret.yaml new file mode 100644 index 000000000..f0c923209 --- /dev/null +++ b/manifests/bucketeer/charts/api-gateway/templates/secret.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "api-gateway.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "api-gateway.name" . }} + chart: {{ template "api-gateway.chart" . }} + release: {{ template "api-gateway.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + gateway_proto_descriptor.pb: {{ required "Envoy descriptor is required" .Values.envoy.descriptor | quote }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/api-gateway/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/api-gateway/templates/service-cert-secret.yaml new file mode 100644 index 000000000..721ae1130 --- /dev/null +++ b/manifests/bucketeer/charts/api-gateway/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "api-gateway.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "api-gateway.name" . }} + chart: {{ template "api-gateway.chart" . }} + release: {{ template "api-gateway.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/api-gateway/templates/service-token-secret.yaml b/manifests/bucketeer/charts/api-gateway/templates/service-token-secret.yaml new file mode 100644 index 000000000..7c0d1527d --- /dev/null +++ b/manifests/bucketeer/charts/api-gateway/templates/service-token-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.serviceToken.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "api-gateway.fullname" . }}-service-token + namespace: {{ .Values.namespace }} + labels: + app: {{ template "api-gateway.name" . }} + chart: {{ template "api-gateway.chart" . }} + release: {{ template "api-gateway.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + token: {{ required "Service token is required" .Values.serviceToken.token | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/api-gateway/templates/service.yaml b/manifests/bucketeer/charts/api-gateway/templates/service.yaml new file mode 100644 index 000000000..9d992751b --- /dev/null +++ b/manifests/bucketeer/charts/api-gateway/templates/service.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "api-gateway.fullname" . }} + namespace: {{ .Values.namespace }} + annotations: + cloud.google.com/app-protocols: '{"service":"HTTP2"}' + cloud.google.com/neg: '{"ingress": true}' + beta.cloud.google.com/backend-config: '{"default": "{{ template "api-gateway.fullname" . }}"}' + labels: + app: {{ template "api-gateway.name" . }} + chart: {{ template "api-gateway.chart" . }} + release: {{ template "api-gateway.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: NodePort + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: {{ .Values.envoy.port }} + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "api-gateway.name" . }} + release: {{ template "api-gateway.fullname" . }} diff --git a/manifests/bucketeer/charts/api-gateway/values.yaml b/manifests/bucketeer/charts/api-gateway/values.yaml new file mode 100644 index 000000000..2f8d5b725 --- /dev/null +++ b/manifests/bucketeer/charts/api-gateway/values.yaml @@ -0,0 +1,84 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-gateway + pullPolicy: IfNotPresent + +fullnameOverride: "api-gateway" + +namespace: + +env: + project: + bigtableInstance: + goalTopic: + goalBatchTopic: + evaluationTopic: + userTopic: + metricsTopic: + publishNumGoroutines: 200 + publishTimeout: 1m + redis: + serverName: + poolMaxIdle: 50 + poolMaxActive: 200 + addr: + oldestEventTimestamp: + furthestEventTimestamp: + logLevel: info + port: 9090 + metricsPort: 9002 + featureService: localhost:9001 + accountService: localhost:9001 + traceSamplingProbability: 0.0001 + +affinity: {} + +nodeSelector: {} + +pdb: + enabled: + maxUnavailable: 20% + +hpa: + enabled: + minReplicas: + maxReplicas: + metrics: + cpu: + targetAverageUtilization: + +tls: + bucketeerJP: + secrets: + service: + secret: + cert: + key: + +serviceToken: + secret: + token: + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + descriptor: "CocFChVnb29nbGUvYXBpL2h0dHAucHJvdG8SCmdvb2dsZS5hcGkieQoESHR0cBIqCgVydWxlcxgBIAMoCzIULmdvb2dsZS5hcGkuSHR0cFJ1bGVSBXJ1bGVzEkUKH2Z1bGx5X2RlY29kZV9yZXNlcnZlZF9leHBhbnNpb24YAiABKAhSHGZ1bGx5RGVjb2RlUmVzZXJ2ZWRFeHBhbnNpb24itQIKCEh0dHBSdWxlEhoKCHNlbGVjdG9yGAEgASgJUghzZWxlY3RvchISCgNnZXQYAiABKAlIAFIDZ2V0EhIKA3B1dBgDIAEoCUgAUgNwdXQSFAoEcG9zdBgEIAEoCUgAUgRwb3N0EhgKBmRlbGV0ZRgFIAEoCUgAUgZkZWxldGUSFgoFcGF0Y2gYBiABKAlIAFIFcGF0Y2gSNwoGY3VzdG9tGAggASgLMh0uZ29vZ2xlLmFwaS5DdXN0b21IdHRwUGF0dGVybkgAUgZjdXN0b20SEgoEYm9keRgHIAEoCVIEYm9keRJFChNhZGRpdGlvbmFsX2JpbmRpbmdzGAsgAygLMhQuZ29vZ2xlLmFwaS5IdHRwUnVsZVISYWRkaXRpb25hbEJpbmRpbmdzQgkKB3BhdHRlcm4iOwoRQ3VzdG9tSHR0cFBhdHRlcm4SEgoEa2luZBgBIAEoCVIEa2luZBISCgRwYXRoGAIgASgJUgRwYXRoQmoKDmNvbS5nb29nbGUuYXBpQglIdHRwUHJvdG9QAVpBZ29vZ2xlLmdvbGFuZy5vcmcvZ2VucHJvdG8vZ29vZ2xlYXBpcy9hcGkvYW5ub3RhdGlvbnM7YW5ub3RhdGlvbnP4AQGiAgRHQVBJYgZwcm90bzMKwzsKIGdvb2dsZS9wcm90b2J1Zi9kZXNjcmlwdG9yLnByb3RvEg9nb29nbGUucHJvdG9idWYiTQoRRmlsZURlc2NyaXB0b3JTZXQSOAoEZmlsZRgBIAMoCzIkLmdvb2dsZS5wcm90b2J1Zi5GaWxlRGVzY3JpcHRvclByb3RvUgRmaWxlIuQEChNGaWxlRGVzY3JpcHRvclByb3RvEhIKBG5hbWUYASABKAlSBG5hbWUSGAoHcGFja2FnZRgCIAEoCVIHcGFja2FnZRIeCgpkZXBlbmRlbmN5GAMgAygJUgpkZXBlbmRlbmN5EisKEXB1YmxpY19kZXBlbmRlbmN5GAogAygFUhBwdWJsaWNEZXBlbmRlbmN5EicKD3dlYWtfZGVwZW5kZW5jeRgLIAMoBVIOd2Vha0RlcGVuZGVuY3kSQwoMbWVzc2FnZV90eXBlGAQgAygLMiAuZ29vZ2xlLnByb3RvYnVmLkRlc2NyaXB0b3JQcm90b1ILbWVzc2FnZVR5cGUSQQoJZW51bV90eXBlGAUgAygLMiQuZ29vZ2xlLnByb3RvYnVmLkVudW1EZXNjcmlwdG9yUHJvdG9SCGVudW1UeXBlEkEKB3NlcnZpY2UYBiADKAsyJy5nb29nbGUucHJvdG9idWYuU2VydmljZURlc2NyaXB0b3JQcm90b1IHc2VydmljZRJDCglleHRlbnNpb24YByADKAsyJS5nb29nbGUucHJvdG9idWYuRmllbGREZXNjcmlwdG9yUHJvdG9SCWV4dGVuc2lvbhI2CgdvcHRpb25zGAggASgLMhwuZ29vZ2xlLnByb3RvYnVmLkZpbGVPcHRpb25zUgdvcHRpb25zEkkKEHNvdXJjZV9jb2RlX2luZm8YCSABKAsyHy5nb29nbGUucHJvdG9idWYuU291cmNlQ29kZUluZm9SDnNvdXJjZUNvZGVJbmZvEhYKBnN5bnRheBgMIAEoCVIGc3ludGF4IrkGCg9EZXNjcmlwdG9yUHJvdG8SEgoEbmFtZRgBIAEoCVIEbmFtZRI7CgVmaWVsZBgCIAMoCzIlLmdvb2dsZS5wcm90b2J1Zi5GaWVsZERlc2NyaXB0b3JQcm90b1IFZmllbGQSQwoJZXh0ZW5zaW9uGAYgAygLMiUuZ29vZ2xlLnByb3RvYnVmLkZpZWxkRGVzY3JpcHRvclByb3RvUglleHRlbnNpb24SQQoLbmVzdGVkX3R5cGUYAyADKAsyIC5nb29nbGUucHJvdG9idWYuRGVzY3JpcHRvclByb3RvUgpuZXN0ZWRUeXBlEkEKCWVudW1fdHlwZRgEIAMoCzIkLmdvb2dsZS5wcm90b2J1Zi5FbnVtRGVzY3JpcHRvclByb3RvUghlbnVtVHlwZRJYCg9leHRlbnNpb25fcmFuZ2UYBSADKAsyLy5nb29nbGUucHJvdG9idWYuRGVzY3JpcHRvclByb3RvLkV4dGVuc2lvblJhbmdlUg5leHRlbnNpb25SYW5nZRJECgpvbmVvZl9kZWNsGAggAygLMiUuZ29vZ2xlLnByb3RvYnVmLk9uZW9mRGVzY3JpcHRvclByb3RvUglvbmVvZkRlY2wSOQoHb3B0aW9ucxgHIAEoCzIfLmdvb2dsZS5wcm90b2J1Zi5NZXNzYWdlT3B0aW9uc1IHb3B0aW9ucxJVCg5yZXNlcnZlZF9yYW5nZRgJIAMoCzIuLmdvb2dsZS5wcm90b2J1Zi5EZXNjcmlwdG9yUHJvdG8uUmVzZXJ2ZWRSYW5nZVINcmVzZXJ2ZWRSYW5nZRIjCg1yZXNlcnZlZF9uYW1lGAogAygJUgxyZXNlcnZlZE5hbWUaegoORXh0ZW5zaW9uUmFuZ2USFAoFc3RhcnQYASABKAVSBXN0YXJ0EhAKA2VuZBgCIAEoBVIDZW5kEkAKB29wdGlvbnMYAyABKAsyJi5nb29nbGUucHJvdG9idWYuRXh0ZW5zaW9uUmFuZ2VPcHRpb25zUgdvcHRpb25zGjcKDVJlc2VydmVkUmFuZ2USFAoFc3RhcnQYASABKAVSBXN0YXJ0EhAKA2VuZBgCIAEoBVIDZW5kInwKFUV4dGVuc2lvblJhbmdlT3B0aW9ucxJYChR1bmludGVycHJldGVkX29wdGlvbhjnByADKAsyJC5nb29nbGUucHJvdG9idWYuVW5pbnRlcnByZXRlZE9wdGlvblITdW5pbnRlcnByZXRlZE9wdGlvbioJCOgHEICAgIACIsEGChRGaWVsZERlc2NyaXB0b3JQcm90bxISCgRuYW1lGAEgASgJUgRuYW1lEhYKBm51bWJlchgDIAEoBVIGbnVtYmVyEkEKBWxhYmVsGAQgASgOMisuZ29vZ2xlLnByb3RvYnVmLkZpZWxkRGVzY3JpcHRvclByb3RvLkxhYmVsUgVsYWJlbBI+CgR0eXBlGAUgASgOMiouZ29vZ2xlLnByb3RvYnVmLkZpZWxkRGVzY3JpcHRvclByb3RvLlR5cGVSBHR5cGUSGwoJdHlwZV9uYW1lGAYgASgJUgh0eXBlTmFtZRIaCghleHRlbmRlZRgCIAEoCVIIZXh0ZW5kZWUSIwoNZGVmYXVsdF92YWx1ZRgHIAEoCVIMZGVmYXVsdFZhbHVlEh8KC29uZW9mX2luZGV4GAkgASgFUgpvbmVvZkluZGV4EhsKCWpzb25fbmFtZRgKIAEoCVIIanNvbk5hbWUSNwoHb3B0aW9ucxgIIAEoCzIdLmdvb2dsZS5wcm90b2J1Zi5GaWVsZE9wdGlvbnNSB29wdGlvbnMSJwoPcHJvdG8zX29wdGlvbmFsGBEgASgIUg5wcm90bzNPcHRpb25hbCK2AgoEVHlwZRIPCgtUWVBFX0RPVUJMRRABEg4KClRZUEVfRkxPQVQQAhIOCgpUWVBFX0lOVDY0EAMSDwoLVFlQRV9VSU5UNjQQBBIOCgpUWVBFX0lOVDMyEAUSEAoMVFlQRV9GSVhFRDY0EAYSEAoMVFlQRV9GSVhFRDMyEAcSDQoJVFlQRV9CT09MEAgSDwoLVFlQRV9TVFJJTkcQCRIOCgpUWVBFX0dST1VQEAoSEAoMVFlQRV9NRVNTQUdFEAsSDgoKVFlQRV9CWVRFUxAMEg8KC1RZUEVfVUlOVDMyEA0SDQoJVFlQRV9FTlVNEA4SEQoNVFlQRV9TRklYRUQzMhAPEhEKDVRZUEVfU0ZJWEVENjQQEBIPCgtUWVBFX1NJTlQzMhAREg8KC1RZUEVfU0lOVDY0EBIiQwoFTGFiZWwSEgoOTEFCRUxfT1BUSU9OQUwQARISCg5MQUJFTF9SRVFVSVJFRBACEhIKDkxBQkVMX1JFUEVBVEVEEAMiYwoUT25lb2ZEZXNjcmlwdG9yUHJvdG8SEgoEbmFtZRgBIAEoCVIEbmFtZRI3CgdvcHRpb25zGAIgASgLMh0uZ29vZ2xlLnByb3RvYnVmLk9uZW9mT3B0aW9uc1IHb3B0aW9ucyLjAgoTRW51bURlc2NyaXB0b3JQcm90bxISCgRuYW1lGAEgASgJUgRuYW1lEj8KBXZhbHVlGAIgAygLMikuZ29vZ2xlLnByb3RvYnVmLkVudW1WYWx1ZURlc2NyaXB0b3JQcm90b1IFdmFsdWUSNgoHb3B0aW9ucxgDIAEoCzIcLmdvb2dsZS5wcm90b2J1Zi5FbnVtT3B0aW9uc1IHb3B0aW9ucxJdCg5yZXNlcnZlZF9yYW5nZRgEIAMoCzI2Lmdvb2dsZS5wcm90b2J1Zi5FbnVtRGVzY3JpcHRvclByb3RvLkVudW1SZXNlcnZlZFJhbmdlUg1yZXNlcnZlZFJhbmdlEiMKDXJlc2VydmVkX25hbWUYBSADKAlSDHJlc2VydmVkTmFtZRo7ChFFbnVtUmVzZXJ2ZWRSYW5nZRIUCgVzdGFydBgBIAEoBVIFc3RhcnQSEAoDZW5kGAIgASgFUgNlbmQigwEKGEVudW1WYWx1ZURlc2NyaXB0b3JQcm90bxISCgRuYW1lGAEgASgJUgRuYW1lEhYKBm51bWJlchgCIAEoBVIGbnVtYmVyEjsKB29wdGlvbnMYAyABKAsyIS5nb29nbGUucHJvdG9idWYuRW51bVZhbHVlT3B0aW9uc1IHb3B0aW9ucyKnAQoWU2VydmljZURlc2NyaXB0b3JQcm90bxISCgRuYW1lGAEgASgJUgRuYW1lEj4KBm1ldGhvZBgCIAMoCzImLmdvb2dsZS5wcm90b2J1Zi5NZXRob2REZXNjcmlwdG9yUHJvdG9SBm1ldGhvZBI5CgdvcHRpb25zGAMgASgLMh8uZ29vZ2xlLnByb3RvYnVmLlNlcnZpY2VPcHRpb25zUgdvcHRpb25zIokCChVNZXRob2REZXNjcmlwdG9yUHJvdG8SEgoEbmFtZRgBIAEoCVIEbmFtZRIdCgppbnB1dF90eXBlGAIgASgJUglpbnB1dFR5cGUSHwoLb3V0cHV0X3R5cGUYAyABKAlSCm91dHB1dFR5cGUSOAoHb3B0aW9ucxgEIAEoCzIeLmdvb2dsZS5wcm90b2J1Zi5NZXRob2RPcHRpb25zUgdvcHRpb25zEjAKEGNsaWVudF9zdHJlYW1pbmcYBSABKAg6BWZhbHNlUg9jbGllbnRTdHJlYW1pbmcSMAoQc2VydmVyX3N0cmVhbWluZxgGIAEoCDoFZmFsc2VSD3NlcnZlclN0cmVhbWluZyKRCQoLRmlsZU9wdGlvbnMSIQoMamF2YV9wYWNrYWdlGAEgASgJUgtqYXZhUGFja2FnZRIwChRqYXZhX291dGVyX2NsYXNzbmFtZRgIIAEoCVISamF2YU91dGVyQ2xhc3NuYW1lEjUKE2phdmFfbXVsdGlwbGVfZmlsZXMYCiABKAg6BWZhbHNlUhFqYXZhTXVsdGlwbGVGaWxlcxJECh1qYXZhX2dlbmVyYXRlX2VxdWFsc19hbmRfaGFzaBgUIAEoCEICGAFSGWphdmFHZW5lcmF0ZUVxdWFsc0FuZEhhc2gSOgoWamF2YV9zdHJpbmdfY2hlY2tfdXRmOBgbIAEoCDoFZmFsc2VSE2phdmFTdHJpbmdDaGVja1V0ZjgSUwoMb3B0aW1pemVfZm9yGAkgASgOMikuZ29vZ2xlLnByb3RvYnVmLkZpbGVPcHRpb25zLk9wdGltaXplTW9kZToFU1BFRURSC29wdGltaXplRm9yEh0KCmdvX3BhY2thZ2UYCyABKAlSCWdvUGFja2FnZRI1ChNjY19nZW5lcmljX3NlcnZpY2VzGBAgASgIOgVmYWxzZVIRY2NHZW5lcmljU2VydmljZXMSOQoVamF2YV9nZW5lcmljX3NlcnZpY2VzGBEgASgIOgVmYWxzZVITamF2YUdlbmVyaWNTZXJ2aWNlcxI1ChNweV9nZW5lcmljX3NlcnZpY2VzGBIgASgIOgVmYWxzZVIRcHlHZW5lcmljU2VydmljZXMSNwoUcGhwX2dlbmVyaWNfc2VydmljZXMYKiABKAg6BWZhbHNlUhJwaHBHZW5lcmljU2VydmljZXMSJQoKZGVwcmVjYXRlZBgXIAEoCDoFZmFsc2VSCmRlcHJlY2F0ZWQSLgoQY2NfZW5hYmxlX2FyZW5hcxgfIAEoCDoEdHJ1ZVIOY2NFbmFibGVBcmVuYXMSKgoRb2JqY19jbGFzc19wcmVmaXgYJCABKAlSD29iamNDbGFzc1ByZWZpeBIpChBjc2hhcnBfbmFtZXNwYWNlGCUgASgJUg9jc2hhcnBOYW1lc3BhY2USIQoMc3dpZnRfcHJlZml4GCcgASgJUgtzd2lmdFByZWZpeBIoChBwaHBfY2xhc3NfcHJlZml4GCggASgJUg5waHBDbGFzc1ByZWZpeBIjCg1waHBfbmFtZXNwYWNlGCkgASgJUgxwaHBOYW1lc3BhY2USNAoWcGhwX21ldGFkYXRhX25hbWVzcGFjZRgsIAEoCVIUcGhwTWV0YWRhdGFOYW1lc3BhY2USIQoMcnVieV9wYWNrYWdlGC0gASgJUgtydWJ5UGFja2FnZRJYChR1bmludGVycHJldGVkX29wdGlvbhjnByADKAsyJC5nb29nbGUucHJvdG9idWYuVW5pbnRlcnByZXRlZE9wdGlvblITdW5pbnRlcnByZXRlZE9wdGlvbiI6CgxPcHRpbWl6ZU1vZGUSCQoFU1BFRUQQARINCglDT0RFX1NJWkUQAhIQCgxMSVRFX1JVTlRJTUUQAyoJCOgHEICAgIACSgQIJhAnIuMCCg5NZXNzYWdlT3B0aW9ucxI8ChdtZXNzYWdlX3NldF93aXJlX2Zvcm1hdBgBIAEoCDoFZmFsc2VSFG1lc3NhZ2VTZXRXaXJlRm9ybWF0EkwKH25vX3N0YW5kYXJkX2Rlc2NyaXB0b3JfYWNjZXNzb3IYAiABKAg6BWZhbHNlUhxub1N0YW5kYXJkRGVzY3JpcHRvckFjY2Vzc29yEiUKCmRlcHJlY2F0ZWQYAyABKAg6BWZhbHNlUgpkZXByZWNhdGVkEhsKCW1hcF9lbnRyeRgHIAEoCFIIbWFwRW50cnkSWAoUdW5pbnRlcnByZXRlZF9vcHRpb24Y5wcgAygLMiQuZ29vZ2xlLnByb3RvYnVmLlVuaW50ZXJwcmV0ZWRPcHRpb25SE3VuaW50ZXJwcmV0ZWRPcHRpb24qCQjoBxCAgICAAkoECAQQBUoECAUQBkoECAYQB0oECAgQCUoECAkQCiLiAwoMRmllbGRPcHRpb25zEkEKBWN0eXBlGAEgASgOMiMuZ29vZ2xlLnByb3RvYnVmLkZpZWxkT3B0aW9ucy5DVHlwZToGU1RSSU5HUgVjdHlwZRIWCgZwYWNrZWQYAiABKAhSBnBhY2tlZBJHCgZqc3R5cGUYBiABKA4yJC5nb29nbGUucHJvdG9idWYuRmllbGRPcHRpb25zLkpTVHlwZToJSlNfTk9STUFMUgZqc3R5cGUSGQoEbGF6eRgFIAEoCDoFZmFsc2VSBGxhenkSJQoKZGVwcmVjYXRlZBgDIAEoCDoFZmFsc2VSCmRlcHJlY2F0ZWQSGQoEd2VhaxgKIAEoCDoFZmFsc2VSBHdlYWsSWAoUdW5pbnRlcnByZXRlZF9vcHRpb24Y5wcgAygLMiQuZ29vZ2xlLnByb3RvYnVmLlVuaW50ZXJwcmV0ZWRPcHRpb25SE3VuaW50ZXJwcmV0ZWRPcHRpb24iLwoFQ1R5cGUSCgoGU1RSSU5HEAASCAoEQ09SRBABEhAKDFNUUklOR19QSUVDRRACIjUKBkpTVHlwZRINCglKU19OT1JNQUwQABINCglKU19TVFJJTkcQARINCglKU19OVU1CRVIQAioJCOgHEICAgIACSgQIBBAFInMKDE9uZW9mT3B0aW9ucxJYChR1bmludGVycHJldGVkX29wdGlvbhjnByADKAsyJC5nb29nbGUucHJvdG9idWYuVW5pbnRlcnByZXRlZE9wdGlvblITdW5pbnRlcnByZXRlZE9wdGlvbioJCOgHEICAgIACIsABCgtFbnVtT3B0aW9ucxIfCgthbGxvd19hbGlhcxgCIAEoCFIKYWxsb3dBbGlhcxIlCgpkZXByZWNhdGVkGAMgASgIOgVmYWxzZVIKZGVwcmVjYXRlZBJYChR1bmludGVycHJldGVkX29wdGlvbhjnByADKAsyJC5nb29nbGUucHJvdG9idWYuVW5pbnRlcnByZXRlZE9wdGlvblITdW5pbnRlcnByZXRlZE9wdGlvbioJCOgHEICAgIACSgQIBRAGIp4BChBFbnVtVmFsdWVPcHRpb25zEiUKCmRlcHJlY2F0ZWQYASABKAg6BWZhbHNlUgpkZXByZWNhdGVkElgKFHVuaW50ZXJwcmV0ZWRfb3B0aW9uGOcHIAMoCzIkLmdvb2dsZS5wcm90b2J1Zi5VbmludGVycHJldGVkT3B0aW9uUhN1bmludGVycHJldGVkT3B0aW9uKgkI6AcQgICAgAIinAEKDlNlcnZpY2VPcHRpb25zEiUKCmRlcHJlY2F0ZWQYISABKAg6BWZhbHNlUgpkZXByZWNhdGVkElgKFHVuaW50ZXJwcmV0ZWRfb3B0aW9uGOcHIAMoCzIkLmdvb2dsZS5wcm90b2J1Zi5VbmludGVycHJldGVkT3B0aW9uUhN1bmludGVycHJldGVkT3B0aW9uKgkI6AcQgICAgAIi4AIKDU1ldGhvZE9wdGlvbnMSJQoKZGVwcmVjYXRlZBghIAEoCDoFZmFsc2VSCmRlcHJlY2F0ZWQScQoRaWRlbXBvdGVuY3lfbGV2ZWwYIiABKA4yLy5nb29nbGUucHJvdG9idWYuTWV0aG9kT3B0aW9ucy5JZGVtcG90ZW5jeUxldmVsOhNJREVNUE9URU5DWV9VTktOT1dOUhBpZGVtcG90ZW5jeUxldmVsElgKFHVuaW50ZXJwcmV0ZWRfb3B0aW9uGOcHIAMoCzIkLmdvb2dsZS5wcm90b2J1Zi5VbmludGVycHJldGVkT3B0aW9uUhN1bmludGVycHJldGVkT3B0aW9uIlAKEElkZW1wb3RlbmN5TGV2ZWwSFwoTSURFTVBPVEVOQ1lfVU5LTk9XThAAEhMKD05PX1NJREVfRUZGRUNUUxABEg4KCklERU1QT1RFTlQQAioJCOgHEICAgIACIpoDChNVbmludGVycHJldGVkT3B0aW9uEkEKBG5hbWUYAiADKAsyLS5nb29nbGUucHJvdG9idWYuVW5pbnRlcnByZXRlZE9wdGlvbi5OYW1lUGFydFIEbmFtZRIpChBpZGVudGlmaWVyX3ZhbHVlGAMgASgJUg9pZGVudGlmaWVyVmFsdWUSLAoScG9zaXRpdmVfaW50X3ZhbHVlGAQgASgEUhBwb3NpdGl2ZUludFZhbHVlEiwKEm5lZ2F0aXZlX2ludF92YWx1ZRgFIAEoA1IQbmVnYXRpdmVJbnRWYWx1ZRIhCgxkb3VibGVfdmFsdWUYBiABKAFSC2RvdWJsZVZhbHVlEiEKDHN0cmluZ192YWx1ZRgHIAEoDFILc3RyaW5nVmFsdWUSJwoPYWdncmVnYXRlX3ZhbHVlGAggASgJUg5hZ2dyZWdhdGVWYWx1ZRpKCghOYW1lUGFydBIbCgluYW1lX3BhcnQYASACKAlSCG5hbWVQYXJ0EiEKDGlzX2V4dGVuc2lvbhgCIAIoCFILaXNFeHRlbnNpb24ipwIKDlNvdXJjZUNvZGVJbmZvEkQKCGxvY2F0aW9uGAEgAygLMiguZ29vZ2xlLnByb3RvYnVmLlNvdXJjZUNvZGVJbmZvLkxvY2F0aW9uUghsb2NhdGlvbhrOAQoITG9jYXRpb24SFgoEcGF0aBgBIAMoBUICEAFSBHBhdGgSFgoEc3BhbhgCIAMoBUICEAFSBHNwYW4SKQoQbGVhZGluZ19jb21tZW50cxgDIAEoCVIPbGVhZGluZ0NvbW1lbnRzEisKEXRyYWlsaW5nX2NvbW1lbnRzGAQgASgJUhB0cmFpbGluZ0NvbW1lbnRzEjoKGWxlYWRpbmdfZGV0YWNoZWRfY29tbWVudHMYBiADKAlSF2xlYWRpbmdEZXRhY2hlZENvbW1lbnRzItEBChFHZW5lcmF0ZWRDb2RlSW5mbxJNCgphbm5vdGF0aW9uGAEgAygLMi0uZ29vZ2xlLnByb3RvYnVmLkdlbmVyYXRlZENvZGVJbmZvLkFubm90YXRpb25SCmFubm90YXRpb24abQoKQW5ub3RhdGlvbhIWCgRwYXRoGAEgAygFQgIQAVIEcGF0aBIfCgtzb3VyY2VfZmlsZRgCIAEoCVIKc291cmNlRmlsZRIUCgViZWdpbhgDIAEoBVIFYmVnaW4SEAoDZW5kGAQgASgFUgNlbmRCfgoTY29tLmdvb2dsZS5wcm90b2J1ZkIQRGVzY3JpcHRvclByb3Rvc0gBWi1nb29nbGUuZ29sYW5nLm9yZy9wcm90b2J1Zi90eXBlcy9kZXNjcmlwdG9ycGL4AQGiAgNHUEKqAhpHb29nbGUuUHJvdG9idWYuUmVmbGVjdGlvbgqoAgocZ29vZ2xlL2FwaS9hbm5vdGF0aW9ucy5wcm90bxIKZ29vZ2xlLmFwaRoVZ29vZ2xlL2FwaS9odHRwLnByb3RvGiBnb29nbGUvcHJvdG9idWYvZGVzY3JpcHRvci5wcm90bzpLCgRodHRwEh4uZ29vZ2xlLnByb3RvYnVmLk1ldGhvZE9wdGlvbnMYsMq8IiABKAsyFC5nb29nbGUuYXBpLkh0dHBSdWxlUgRodHRwQm4KDmNvbS5nb29nbGUuYXBpQhBBbm5vdGF0aW9uc1Byb3RvUAFaQWdvb2dsZS5nb2xhbmcub3JnL2dlbnByb3RvL2dvb2dsZWFwaXMvYXBpL2Fubm90YXRpb25zO2Fubm90YXRpb25zogIER0FQSWIGcHJvdG8zCrkEChVwcm90by91c2VyL3VzZXIucHJvdG8SDmJ1Y2tldGVlci51c2VyIt4DCgRVc2VyEg4KAmlkGAEgASgJUgJpZBIyCgRkYXRhGAIgAygLMh4uYnVja2V0ZWVyLnVzZXIuVXNlci5EYXRhRW50cnlSBGRhdGESRQoLdGFnZ2VkX2RhdGEYAyADKAsyJC5idWNrZXRlZXIudXNlci5Vc2VyLlRhZ2dlZERhdGFFbnRyeVIKdGFnZ2VkRGF0YRIbCglsYXN0X3NlZW4YBCABKANSCGxhc3RTZWVuEh0KCmNyZWF0ZWRfYXQYBSABKANSCWNyZWF0ZWRBdBp8CgREYXRhEjoKBXZhbHVlGAEgAygLMiQuYnVja2V0ZWVyLnVzZXIuVXNlci5EYXRhLlZhbHVlRW50cnlSBXZhbHVlGjgKClZhbHVlRW50cnkSEAoDa2V5GAEgASgJUgNrZXkSFAoFdmFsdWUYAiABKAlSBXZhbHVlOgI4ARo3CglEYXRhRW50cnkSEAoDa2V5GAEgASgJUgNrZXkSFAoFdmFsdWUYAiABKAlSBXZhbHVlOgI4ARpYCg9UYWdnZWREYXRhRW50cnkSEAoDa2V5GAEgASgJUgNrZXkSLwoFdmFsdWUYAiABKAsyGS5idWNrZXRlZXIudXNlci5Vc2VyLkRhdGFSBXZhbHVlOgI4AUInWiVnaXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by91c2VyYgZwcm90bzMKzwEKHXByb3RvL2ZlYXR1cmUvdmFyaWF0aW9uLnByb3RvEhFidWNrZXRlZXIuZmVhdHVyZSJnCglWYXJpYXRpb24SDgoCaWQYASABKAlSAmlkEhQKBXZhbHVlGAIgASgJUgV2YWx1ZRISCgRuYW1lGAMgASgJUgRuYW1lEiAKC2Rlc2NyaXB0aW9uGAQgASgJUgtkZXNjcmlwdGlvbkIqWihnaXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9mZWF0dXJlYgZwcm90bzMKlwIKGnByb3RvL2ZlYXR1cmUvcmVhc29uLnByb3RvEhFidWNrZXRlZXIuZmVhdHVyZSKxAQoGUmVhc29uEjIKBHR5cGUYASABKA4yHi5idWNrZXRlZXIuZmVhdHVyZS5SZWFzb24uVHlwZVIEdHlwZRIXCgdydWxlX2lkGAIgASgJUgZydWxlSWQiWgoEVHlwZRIKCgZUQVJHRVQQABIICgRSVUxFEAESCwoHREVGQVVMVBADEgoKBkNMSUVOVBAEEhEKDU9GRl9WQVJJQVRJT04QBRIQCgxQUkVSRVFVSVNJVEUQBkIqWihnaXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9mZWF0dXJlYgZwcm90bzMKkQUKHnByb3RvL2ZlYXR1cmUvZXZhbHVhdGlvbi5wcm90bxIRYnVja2V0ZWVyLmZlYXR1cmUaHXByb3RvL2ZlYXR1cmUvdmFyaWF0aW9uLnByb3RvGhpwcm90by9mZWF0dXJlL3JlYXNvbi5wcm90byK8AgoKRXZhbHVhdGlvbhIOCgJpZBgBIAEoCVICaWQSHQoKZmVhdHVyZV9pZBgCIAEoCVIJZmVhdHVyZUlkEicKD2ZlYXR1cmVfdmVyc2lvbhgDIAEoBVIOZmVhdHVyZVZlcnNpb24SFwoHdXNlcl9pZBgEIAEoCVIGdXNlcklkEiEKDHZhcmlhdGlvbl9pZBgFIAEoCVILdmFyaWF0aW9uSWQSPgoJdmFyaWF0aW9uGAYgASgLMhwuYnVja2V0ZWVyLmZlYXR1cmUuVmFyaWF0aW9uQgIYAVIJdmFyaWF0aW9uEjEKBnJlYXNvbhgHIAEoCzIZLmJ1Y2tldGVlci5mZWF0dXJlLlJlYXNvblIGcmVhc29uEicKD3ZhcmlhdGlvbl92YWx1ZRgIIAEoCVIOdmFyaWF0aW9uVmFsdWUirQEKD1VzZXJFdmFsdWF0aW9ucxIOCgJpZBgBIAEoCVICaWQSPwoLZXZhbHVhdGlvbnMYAiADKAsyHS5idWNrZXRlZXIuZmVhdHVyZS5FdmFsdWF0aW9uUgtldmFsdWF0aW9ucxIdCgpjcmVhdGVkX2F0GAMgASgDUgljcmVhdGVkQXQiKgoFU3RhdGUSCgoGUVVFVUVEEAASCwoHUEFSVElBTBABEggKBEZVTEwQAkIqWihnaXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9mZWF0dXJlYgZwcm90bzMK5AEKGWdvb2dsZS9wcm90b2J1Zi9hbnkucHJvdG8SD2dvb2dsZS5wcm90b2J1ZiI2CgNBbnkSGQoIdHlwZV91cmwYASABKAlSB3R5cGVVcmwSFAoFdmFsdWUYAiABKAxSBXZhbHVlQnYKE2NvbS5nb29nbGUucHJvdG9idWZCCEFueVByb3RvUAFaLGdvb2dsZS5nb2xhbmcub3JnL3Byb3RvYnVmL3R5cGVzL2tub3duL2FueXBiogIDR1BCqgIeR29vZ2xlLlByb3RvYnVmLldlbGxLbm93blR5cGVzYgZwcm90bzMK+wEKHmdvb2dsZS9wcm90b2J1Zi9kdXJhdGlvbi5wcm90bxIPZ29vZ2xlLnByb3RvYnVmIjoKCER1cmF0aW9uEhgKB3NlY29uZHMYASABKANSB3NlY29uZHMSFAoFbmFub3MYAiABKAVSBW5hbm9zQoMBChNjb20uZ29vZ2xlLnByb3RvYnVmQg1EdXJhdGlvblByb3RvUAFaMWdvb2dsZS5nb2xhbmcub3JnL3Byb3RvYnVmL3R5cGVzL2tub3duL2R1cmF0aW9ucGL4AQGiAgNHUEKqAh5Hb29nbGUuUHJvdG9idWYuV2VsbEtub3duVHlwZXNiBnByb3RvMwq9EgoecHJvdG8vZXZlbnQvY2xpZW50L2V2ZW50LnByb3RvEhZidWNrZXRlZXIuZXZlbnQuY2xpZW50Ghlnb29nbGUvcHJvdG9idWYvYW55LnByb3RvGh5nb29nbGUvcHJvdG9idWYvZHVyYXRpb24ucHJvdG8aHnByb3RvL2ZlYXR1cmUvZXZhbHVhdGlvbi5wcm90bxoacHJvdG8vZmVhdHVyZS9yZWFzb24ucHJvdG8aFXByb3RvL3VzZXIvdXNlci5wcm90byJ4CgVFdmVudBIOCgJpZBgBIAEoCVICaWQSKgoFZXZlbnQYAiABKAsyFC5nb29nbGUucHJvdG9idWYuQW55UgVldmVudBIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYAyABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlIuECCg9FdmFsdWF0aW9uRXZlbnQSHAoJdGltZXN0YW1wGAEgASgDUgl0aW1lc3RhbXASHQoKZmVhdHVyZV9pZBgCIAEoCVIJZmVhdHVyZUlkEicKD2ZlYXR1cmVfdmVyc2lvbhgDIAEoBVIOZmVhdHVyZVZlcnNpb24SFwoHdXNlcl9pZBgEIAEoCVIGdXNlcklkEiEKDHZhcmlhdGlvbl9pZBgFIAEoCVILdmFyaWF0aW9uSWQSKAoEdXNlchgGIAEoCzIULmJ1Y2tldGVlci51c2VyLlVzZXJSBHVzZXISMQoGcmVhc29uGAcgASgLMhkuYnVja2V0ZWVyLmZlYXR1cmUuUmVhc29uUgZyZWFzb24SEAoDdGFnGAggASgJUgN0YWcSPQoJc291cmNlX2lkGAkgASgOMiAuYnVja2V0ZWVyLmV2ZW50LmNsaWVudC5Tb3VyY2VJZFIIc291cmNlSWQisQIKCUdvYWxFdmVudBIcCgl0aW1lc3RhbXAYASABKANSCXRpbWVzdGFtcBIXCgdnb2FsX2lkGAIgASgJUgZnb2FsSWQSFwoHdXNlcl9pZBgDIAEoCVIGdXNlcklkEhQKBXZhbHVlGAQgASgBUgV2YWx1ZRIoCgR1c2VyGAUgASgLMhQuYnVja2V0ZWVyLnVzZXIuVXNlclIEdXNlchJDCgtldmFsdWF0aW9ucxgGIAMoCzIdLmJ1Y2tldGVlci5mZWF0dXJlLkV2YWx1YXRpb25CAhgBUgtldmFsdWF0aW9ucxIQCgN0YWcYByABKAlSA3RhZxI9Cglzb3VyY2VfaWQYCCABKA4yIC5idWNrZXRlZXIuZXZlbnQuY2xpZW50LlNvdXJjZUlkUghzb3VyY2VJZCJYCgxNZXRyaWNzRXZlbnQSHAoJdGltZXN0YW1wGAEgASgDUgl0aW1lc3RhbXASKgoFZXZlbnQYAiABKAsyFC5nb29nbGUucHJvdG9idWYuQW55UgVldmVudCLyAQogR2V0RXZhbHVhdGlvbkxhdGVuY3lNZXRyaWNzRXZlbnQSXAoGbGFiZWxzGAEgAygLMkQuYnVja2V0ZWVyLmV2ZW50LmNsaWVudC5HZXRFdmFsdWF0aW9uTGF0ZW5jeU1ldHJpY3NFdmVudC5MYWJlbHNFbnRyeVIGbGFiZWxzEjUKCGR1cmF0aW9uGAIgASgLMhkuZ29vZ2xlLnByb3RvYnVmLkR1cmF0aW9uUghkdXJhdGlvbho5CgtMYWJlbHNFbnRyeRIQCgNrZXkYASABKAlSA2tleRIUCgV2YWx1ZRgCIAEoCVIFdmFsdWU6AjgBItIBCh1HZXRFdmFsdWF0aW9uU2l6ZU1ldHJpY3NFdmVudBJZCgZsYWJlbHMYASADKAsyQS5idWNrZXRlZXIuZXZlbnQuY2xpZW50LkdldEV2YWx1YXRpb25TaXplTWV0cmljc0V2ZW50LkxhYmVsc0VudHJ5UgZsYWJlbHMSGwoJc2l6ZV9ieXRlGAIgASgFUghzaXplQnl0ZRo5CgtMYWJlbHNFbnRyeRIQCgNrZXkYASABKAlSA2tleRIUCgV2YWx1ZRgCIAEoCVIFdmFsdWU6AjgBIjEKHVRpbWVvdXRFcnJvckNvdW50TWV0cmljc0V2ZW50EhAKA3RhZxgBIAEoCVIDdGFnIjIKHkludGVybmFsRXJyb3JDb3VudE1ldHJpY3NFdmVudBIQCgN0YWcYASABKAlSA3RhZyLFAQoIT3BzRXZlbnQSHAoJdGltZXN0YW1wGAEgASgDUgl0aW1lc3RhbXASHQoKZmVhdHVyZV9pZBgCIAEoCVIJZmVhdHVyZUlkEicKD2ZlYXR1cmVfdmVyc2lvbhgDIAEoBVIOZmVhdHVyZVZlcnNpb24SIQoMdmFyaWF0aW9uX2lkGAQgASgJUgt2YXJpYXRpb25JZBIXCgdnb2FsX2lkGAUgASgJUgZnb2FsSWQSFwoHdXNlcl9pZBgGIAEoCVIGdXNlcklkIpQBCg5Hb2FsQmF0Y2hFdmVudBIXCgd1c2VyX2lkGAEgASgJUgZ1c2VySWQSaQoadXNlcl9nb2FsX2V2ZW50c19vdmVyX3RhZ3MYAiADKAsyLS5idWNrZXRlZXIuZXZlbnQuY2xpZW50LlVzZXJHb2FsRXZlbnRzT3ZlclRhZ1IWdXNlckdvYWxFdmVudHNPdmVyVGFncyJ6ChVVc2VyR29hbEV2ZW50c092ZXJUYWcSEAoDdGFnGAEgASgJUgN0YWcSTwoQdXNlcl9nb2FsX2V2ZW50cxgCIAMoCzIlLmJ1Y2tldGVlci5ldmVudC5jbGllbnQuVXNlckdvYWxFdmVudFIOdXNlckdvYWxFdmVudHMiXAoNVXNlckdvYWxFdmVudBIcCgl0aW1lc3RhbXAYASABKANSCXRpbWVzdGFtcBIXCgdnb2FsX2lkGAIgASgJUgZnb2FsSWQSFAoFdmFsdWUYAyABKAFSBXZhbHVlKmYKCFNvdXJjZUlkEgsKB1VOS05PV04QABILCgdBTkRST0lEEAESBwoDSU9TEAISBwoDV0VCEAMSDgoKR09BTF9CQVRDSBAEEg0KCUdPX1NFUlZFUhAFEg8KC05PREVfU0VSVkVSEAZCL1otZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vZXZlbnQvY2xpZW50YgZwcm90bzMKqA4KG3Byb3RvL2dhdGV3YXkvc2VydmljZS5wcm90bxIRYnVja2V0ZWVyLmdhdGV3YXkaHGdvb2dsZS9hcGkvYW5ub3RhdGlvbnMucHJvdG8aFXByb3RvL3VzZXIvdXNlci5wcm90bxoecHJvdG8vZmVhdHVyZS9ldmFsdWF0aW9uLnByb3RvGh5wcm90by9ldmVudC9jbGllbnQvZXZlbnQucHJvdG8iDQoLUGluZ1JlcXVlc3QiIgoMUGluZ1Jlc3BvbnNlEhIKBHRpbWUYASABKANSBHRpbWUi5QEKFUdldEV2YWx1YXRpb25zUmVxdWVzdBIQCgN0YWcYASABKAlSA3RhZxIoCgR1c2VyGAIgASgLMhQuYnVja2V0ZWVyLnVzZXIuVXNlclIEdXNlchIuChN1c2VyX2V2YWx1YXRpb25zX2lkGAMgASgJUhF1c2VyRXZhbHVhdGlvbnNJZBIhCgpmZWF0dXJlX2lkGAQgASgJQgIYAVIJZmVhdHVyZUlkEj0KCXNvdXJjZV9pZBgFIAEoDjIgLmJ1Y2tldGVlci5ldmVudC5jbGllbnQuU291cmNlSWRSCHNvdXJjZUlkIs4BChZHZXRFdmFsdWF0aW9uc1Jlc3BvbnNlEj4KBXN0YXRlGAEgASgOMiguYnVja2V0ZWVyLmZlYXR1cmUuVXNlckV2YWx1YXRpb25zLlN0YXRlUgVzdGF0ZRJECgtldmFsdWF0aW9ucxgCIAEoCzIiLmJ1Y2tldGVlci5mZWF0dXJlLlVzZXJFdmFsdWF0aW9uc1ILZXZhbHVhdGlvbnMSLgoTdXNlcl9ldmFsdWF0aW9uc19pZBgDIAEoCVIRdXNlckV2YWx1YXRpb25zSWQisAEKFEdldEV2YWx1YXRpb25SZXF1ZXN0EhAKA3RhZxgBIAEoCVIDdGFnEigKBHVzZXIYAiABKAsyFC5idWNrZXRlZXIudXNlci5Vc2VyUgR1c2VyEh0KCmZlYXR1cmVfaWQYAyABKAlSCWZlYXR1cmVJZBI9Cglzb3VyY2VfaWQYBCABKA4yIC5idWNrZXRlZXIuZXZlbnQuY2xpZW50LlNvdXJjZUlkUghzb3VyY2VJZCJWChVHZXRFdmFsdWF0aW9uUmVzcG9uc2USPQoKZXZhbHVhdGlvbhgBIAEoCzIdLmJ1Y2tldGVlci5mZWF0dXJlLkV2YWx1YXRpb25SCmV2YWx1YXRpb24iTgoVUmVnaXN0ZXJFdmVudHNSZXF1ZXN0EjUKBmV2ZW50cxgBIAMoCzIdLmJ1Y2tldGVlci5ldmVudC5jbGllbnQuRXZlbnRSBmV2ZW50cyKUAgoWUmVnaXN0ZXJFdmVudHNSZXNwb25zZRJNCgZlcnJvcnMYASADKAsyNS5idWNrZXRlZXIuZ2F0ZXdheS5SZWdpc3RlckV2ZW50c1Jlc3BvbnNlLkVycm9yc0VudHJ5UgZlcnJvcnMaPwoFRXJyb3ISHAoJcmV0cmlhYmxlGAEgASgIUglyZXRyaWFibGUSGAoHbWVzc2FnZRgCIAEoCVIHbWVzc2FnZRpqCgtFcnJvcnNFbnRyeRIQCgNrZXkYASABKAlSA2tleRJFCgV2YWx1ZRgCIAEoCzIvLmJ1Y2tldGVlci5nYXRld2F5LlJlZ2lzdGVyRXZlbnRzUmVzcG9uc2UuRXJyb3JSBXZhbHVlOgI4ATLuAwoHR2F0ZXdheRJZCgRQaW5nEh4uYnVja2V0ZWVyLmdhdGV3YXkuUGluZ1JlcXVlc3QaHy5idWNrZXRlZXIuZ2F0ZXdheS5QaW5nUmVzcG9uc2UiEILT5JMCCiIFL3Bpbmc6ASoSggEKDkdldEV2YWx1YXRpb25zEiguYnVja2V0ZWVyLmdhdGV3YXkuR2V0RXZhbHVhdGlvbnNSZXF1ZXN0GikuYnVja2V0ZWVyLmdhdGV3YXkuR2V0RXZhbHVhdGlvbnNSZXNwb25zZSIbgtPkkwIVIhAvZ2V0X2V2YWx1YXRpb25zOgEqEn4KDUdldEV2YWx1YXRpb24SJy5idWNrZXRlZXIuZ2F0ZXdheS5HZXRFdmFsdWF0aW9uUmVxdWVzdBooLmJ1Y2tldGVlci5nYXRld2F5LkdldEV2YWx1YXRpb25SZXNwb25zZSIagtPkkwIUIg8vZ2V0X2V2YWx1YXRpb246ASoSggEKDlJlZ2lzdGVyRXZlbnRzEiguYnVja2V0ZWVyLmdhdGV3YXkuUmVnaXN0ZXJFdmVudHNSZXF1ZXN0GikuYnVja2V0ZWVyLmdhdGV3YXkuUmVnaXN0ZXJFdmVudHNSZXNwb25zZSIbgtPkkwIVIhAvcmVnaXN0ZXJfZXZlbnRzOgEqQipaKGdpdGh1Yi5jb20vY2EtZHAvYnVja2V0ZWVyL3Byb3RvL2dhdGV3YXliBnByb3RvMw==" + config: + port: 9000 + adminPort: 8001 + resources: {} + +service: + externalPort: 9000 + +ingress: + host: + staticIPName: + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} diff --git a/manifests/bucketeer/charts/auditlog-persister/Chart.yaml b/manifests/bucketeer/charts/auditlog-persister/Chart.yaml new file mode 100644 index 000000000..dd065c8e6 --- /dev/null +++ b/manifests/bucketeer/charts/auditlog-persister/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-auditlog-persister +name: auditlog-persister +version: 1.0.0 diff --git a/manifests/bucketeer/charts/auditlog-persister/templates/NOTES.txt b/manifests/bucketeer/charts/auditlog-persister/templates/NOTES.txt new file mode 100644 index 000000000..e1af821e6 --- /dev/null +++ b/manifests/bucketeer/charts/auditlog-persister/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "auditlog-persister.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "auditlog-persister.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "auditlog-persister.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "auditlog-persister.name" . }},release={{ template "auditlog-persister.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/manifests/bucketeer/charts/auditlog-persister/templates/_helpers.tpl b/manifests/bucketeer/charts/auditlog-persister/templates/_helpers.tpl new file mode 100644 index 000000000..3043aba06 --- /dev/null +++ b/manifests/bucketeer/charts/auditlog-persister/templates/_helpers.tpl @@ -0,0 +1,40 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "auditlog-persister.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "auditlog-persister.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "auditlog-persister.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "auditlog-persister.fullname" . }}-service-cert +{{- end -}} +{{- end -}} diff --git a/manifests/bucketeer/charts/auditlog-persister/templates/deployment.yaml b/manifests/bucketeer/charts/auditlog-persister/templates/deployment.yaml new file mode 100644 index 000000000..78afd07c9 --- /dev/null +++ b/manifests/bucketeer/charts/auditlog-persister/templates/deployment.yaml @@ -0,0 +1,157 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "auditlog-persister.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "auditlog-persister.name" . }} + chart: {{ template "auditlog-persister.chart" . }} + release: {{ template "auditlog-persister.fullname" . }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "auditlog-persister.name" . }} + release: {{ template "auditlog-persister.fullname" . }} + template: + metadata: + labels: + app: {{ template "auditlog-persister.name" . }} + release: {{ template "auditlog-persister.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "auditlog-persister.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["persister"] + env: + - name: BUCKETEER_AUDIT_LOG_PROJECT + value: "{{ .Values.env.project }}" + - name: BUCKETEER_AUDIT_LOG_MYSQL_USER + value: "{{ .Values.env.mysqlUser }}" + - name: BUCKETEER_AUDIT_LOG_MYSQL_PASS + value: "{{ .Values.env.mysqlPass }}" + - name: BUCKETEER_AUDIT_LOG_MYSQL_HOST + value: "{{ .Values.env.mysqlHost }}" + - name: BUCKETEER_AUDIT_LOG_MYSQL_PORT + value: "{{ .Values.env.mysqlPort }}" + - name: BUCKETEER_AUDIT_LOG_MYSQL_DB_NAME + value: "{{ .Values.env.mysqlDbName }}" + - name: BUCKETEER_AUDIT_LOG_TOPIC + value: "{{ .Values.env.topic }}" + - name: BUCKETEER_AUDIT_LOG_SUBSCRIPTION + value: "{{ .Values.env.subscription }}" + - name: BUCKETEER_AUDIT_LOG_MAX_MPS + value: "{{ .Values.env.maxMps }}" + - name: BUCKETEER_AUDIT_LOG_NUM_WORKERS + value: "{{ .Values.env.numWorkers }}" + - name: BUCKETEER_AUDIT_LOG_FLUSH_SIZE + value: "{{ .Values.env.flushSize }}" + - name: BUCKETEER_AUDIT_LOG_FLUSH_INTERVAL + value: "{{ .Values.env.flushInterval }}" + - name: BUCKETEER_AUDIT_LOG_PULLER_NUM_GOROUTINES + value: "{{ .Values.env.pullerNumGoroutines }}" + - name: BUCKETEER_AUDIT_LOG_PULLER_MAX_OUTSTANDING_MESSAGES + value: "{{ .Values.env.pullerMaxOutstandingMessages }}" + - name: BUCKETEER_AUDIT_LOG_PULLER_MAX_OUTSTANDING_BYTES + value: "{{ .Values.env.pullerMaxOutstandingBytes }}" + - name: BUCKETEER_AUDIT_LOG_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_AUDIT_LOG_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_AUDIT_LOG_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + - name: BUCKETEER_AUDIT_LOG_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_AUDIT_LOG_KEY + value: /usr/local/certs/service/tls.key + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/auditlog-persister/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/auditlog-persister/templates/envoy-configmap.yaml new file mode 100644 index 000000000..3c54f8ea2 --- /dev/null +++ b/manifests/bucketeer/charts/auditlog-persister/templates/envoy-configmap.yaml @@ -0,0 +1,127 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "auditlog-persister.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "auditlog-persister.name" . }} + chart: {{ template "auditlog-persister.chart" . }} + release: {{ template "auditlog-persister.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: auditlog-persister + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + ignore_health_on_host_removal: true + lb_policy: round_robin + load_assignment: + cluster_name: auditlog-persister + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + auditlog-persister: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: / + route: + cluster: auditlog-persister + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: ingress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true diff --git a/manifests/bucketeer/charts/auditlog-persister/templates/hpa.yaml b/manifests/bucketeer/charts/auditlog-persister/templates/hpa.yaml new file mode 100644 index 000000000..3a3a1b4ee --- /dev/null +++ b/manifests/bucketeer/charts/auditlog-persister/templates/hpa.yaml @@ -0,0 +1,19 @@ +{{ if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "auditlog-persister.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "auditlog-persister.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.metrics.cpu.targetAverageUtilization }} +{{ end }} diff --git a/manifests/bucketeer/charts/auditlog-persister/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/auditlog-persister/templates/service-cert-secret.yaml new file mode 100644 index 000000000..5b115c7f7 --- /dev/null +++ b/manifests/bucketeer/charts/auditlog-persister/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "auditlog-persister.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "auditlog-persister.name" . }} + chart: {{ template "auditlog-persister.chart" . }} + release: {{ template "auditlog-persister.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/auditlog-persister/templates/service.yaml b/manifests/bucketeer/charts/auditlog-persister/templates/service.yaml new file mode 100644 index 000000000..5ae8d980a --- /dev/null +++ b/manifests/bucketeer/charts/auditlog-persister/templates/service.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "auditlog-persister.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "auditlog-persister.name" . }} + chart: {{ template "auditlog-persister.chart" . }} + release: {{ template "auditlog-persister.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "auditlog-persister.name" . }} + release: {{ template "auditlog-persister.fullname" . }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/auditlog-persister/values.yaml b/manifests/bucketeer/charts/auditlog-persister/values.yaml new file mode 100644 index 000000000..24d6c3530 --- /dev/null +++ b/manifests/bucketeer/charts/auditlog-persister/values.yaml @@ -0,0 +1,68 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-auditlog + pullPolicy: IfNotPresent + +fullnameOverride: "auditlog-persister" + +namespace: + +env: + project: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + topic: + subscription: + maxMps: "1000" + numWorkers: 2 + flushSize: 100 + flushInterval: 2s + pullerNumGoroutines: 5 + pullerMaxOutstandingMessages: "1000" + pullerMaxOutstandingBytes: "1000000000" + logLevel: info + port: 9090 + metricsPort: 9002 + +affinity: {} + +nodeSelector: {} + +hpa: + enabled: + namespace: + minReplicas: + maxReplicas: + metrics: + cpu: + targetAverageUtilization: + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +tls: + service: + secret: + cert: + key: + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} diff --git a/manifests/bucketeer/charts/auditlog/.helmignore b/manifests/bucketeer/charts/auditlog/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/manifests/bucketeer/charts/auditlog/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/manifests/bucketeer/charts/auditlog/Chart.yaml b/manifests/bucketeer/charts/auditlog/Chart.yaml new file mode 100644 index 000000000..88556b96c --- /dev/null +++ b/manifests/bucketeer/charts/auditlog/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-auditlog +name: auditlog +version: 1.0.0 diff --git a/manifests/bucketeer/charts/auditlog/templates/NOTES.txt b/manifests/bucketeer/charts/auditlog/templates/NOTES.txt new file mode 100644 index 000000000..6c2136763 --- /dev/null +++ b/manifests/bucketeer/charts/auditlog/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "auditlog.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "auditlog.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "auditlog.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "auditlog.name" . }},release={{ template "auditlog.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/manifests/bucketeer/charts/auditlog/templates/_helpers.tpl b/manifests/bucketeer/charts/auditlog/templates/_helpers.tpl new file mode 100644 index 000000000..35c2a4de1 --- /dev/null +++ b/manifests/bucketeer/charts/auditlog/templates/_helpers.tpl @@ -0,0 +1,56 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "auditlog.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "auditlog.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "auditlog.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "auditlog.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "oauth-key-secret" -}} +{{- if .Values.oauth.key.secret }} +{{- printf "%s" .Values.oauth.key.secret -}} +{{- else -}} +{{ template "auditlog.fullname" . }}-oauth-key +{{- end -}} +{{- end -}} + +{{- define "service-token-secret" -}} +{{- if .Values.serviceToken.secret }} +{{- printf "%s" .Values.serviceToken.secret -}} +{{- else -}} +{{ template "auditlog.fullname" . }}-service-token +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/manifests/bucketeer/charts/auditlog/templates/deployment.yaml b/manifests/bucketeer/charts/auditlog/templates/deployment.yaml new file mode 100644 index 000000000..d6a76b7a3 --- /dev/null +++ b/manifests/bucketeer/charts/auditlog/templates/deployment.yaml @@ -0,0 +1,161 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "auditlog.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "auditlog.name" . }} + chart: {{ template "auditlog.chart" . }} + release: {{ template "auditlog.fullname" . }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "auditlog.name" . }} + release: {{ template "auditlog.fullname" . }} + template: + metadata: + labels: + app: {{ template "auditlog.name" . }} + release: {{ template "auditlog.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "auditlog.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + - name: oauth-key-secret + secret: + secretName: {{ template "oauth-key-secret" . }} + - name: service-token-secret + secret: + secretName: {{ template "service-token-secret" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["server"] + env: + - name: BUCKETEER_AUDIT_LOG_PROJECT + value: "{{ .Values.env.project }}" + - name: BUCKETEER_AUDIT_LOG_MYSQL_USER + value: "{{ .Values.env.mysqlUser }}" + - name: BUCKETEER_AUDIT_LOG_MYSQL_PASS + value: "{{ .Values.env.mysqlPass }}" + - name: BUCKETEER_AUDIT_LOG_MYSQL_HOST + value: "{{ .Values.env.mysqlHost }}" + - name: BUCKETEER_AUDIT_LOG_MYSQL_PORT + value: "{{ .Values.env.mysqlPort }}" + - name: BUCKETEER_AUDIT_LOG_MYSQL_DB_NAME + value: "{{ .Values.env.mysqlDbName }}" + - name: BUCKETEER_AUDIT_LOG_ACCOUNT_SERVICE + value: "{{ .Values.env.accountService }}" + - name: BUCKETEER_AUDIT_LOG_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_AUDIT_LOG_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_AUDIT_LOG_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + - name: BUCKETEER_AUDIT_LOG_OAUTH_CLIENT_ID + value: "{{ .Values.oauth.clientId }}" + - name: BUCKETEER_AUDIT_LOG_OAUTH_ISSUER + value: "{{ .Values.oauth.issuer }}" + - name: BUCKETEER_AUDIT_LOG_OAUTH_KEY + value: /usr/local/oauth-key/public.pem + - name: BUCKETEER_AUDIT_LOG_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_AUDIT_LOG_KEY + value: /usr/local/certs/service/tls.key + - name: BUCKETEER_AUDIT_LOG_SERVICE_TOKEN + value: /usr/local/service-token/token + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + - name: oauth-key-secret + mountPath: /usr/local/oauth-key + readOnly: true + - name: service-token-secret + mountPath: /usr/local/service-token + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/auditlog/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/auditlog/templates/envoy-configmap.yaml new file mode 100644 index 000000000..42feff948 --- /dev/null +++ b/manifests/bucketeer/charts/auditlog/templates/envoy-configmap.yaml @@ -0,0 +1,230 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "auditlog.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "auditlog.name" . }} + chart: {{ template "auditlog.chart" . }} + release: {{ template "auditlog.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: auditlog + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + ignore_health_on_host_removal: true + lb_policy: round_robin + load_assignment: + cluster_name: auditlog + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + - name: account + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + ignore_health_on_host_removal: true + lb_policy: round_robin + load_assignment: + cluster_name: account + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: account.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + auditlog: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: / + route: + cluster: auditlog + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: ingress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true + - name: egress + address: + socket_address: + address: 127.0.0.1 + port_value: 9001 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + account: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: egress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.account.AccountService + route: + cluster: account + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: egress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true + diff --git a/manifests/bucketeer/charts/auditlog/templates/hpa.yaml b/manifests/bucketeer/charts/auditlog/templates/hpa.yaml new file mode 100644 index 000000000..0177db1b5 --- /dev/null +++ b/manifests/bucketeer/charts/auditlog/templates/hpa.yaml @@ -0,0 +1,19 @@ +{{ if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "auditlog.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "auditlog.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.metrics.cpu.targetAverageUtilization }} +{{ end }} diff --git a/manifests/bucketeer/charts/auditlog/templates/oauth-key-secret.yaml b/manifests/bucketeer/charts/auditlog/templates/oauth-key-secret.yaml new file mode 100644 index 000000000..2cfc5bad4 --- /dev/null +++ b/manifests/bucketeer/charts/auditlog/templates/oauth-key-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.oauth.key.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "auditlog.fullname" . }}-oauth-key + namespace: {{ .Values.namespace }} + labels: + app: {{ template "auditlog.name" . }} + chart: {{ template "auditlog.chart" . }} + release: {{ template "auditlog.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + public.pem: {{ required "OAuth key is required" .Values.oauth.key.public | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/auditlog/templates/pdb.yaml b/manifests/bucketeer/charts/auditlog/templates/pdb.yaml new file mode 100644 index 000000000..0b28b1797 --- /dev/null +++ b/manifests/bucketeer/charts/auditlog/templates/pdb.yaml @@ -0,0 +1,12 @@ +{{ if .Values.pdb.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "auditlog.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + selector: + matchLabels: + app: {{ template "auditlog.name" . }} +{{ end }} diff --git a/manifests/bucketeer/charts/auditlog/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/auditlog/templates/service-cert-secret.yaml new file mode 100644 index 000000000..79524cc26 --- /dev/null +++ b/manifests/bucketeer/charts/auditlog/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "auditlog.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "auditlog.name" . }} + chart: {{ template "auditlog.chart" . }} + release: {{ template "auditlog.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/auditlog/templates/service-token-secret.yaml b/manifests/bucketeer/charts/auditlog/templates/service-token-secret.yaml new file mode 100644 index 000000000..fc3224ca1 --- /dev/null +++ b/manifests/bucketeer/charts/auditlog/templates/service-token-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.serviceToken.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "auditlog.fullname" . }}-service-token + namespace: {{ .Values.namespace }} + labels: + app: {{ template "auditlog.name" . }} + chart: {{ template "auditlog.chart" . }} + release: {{ template "auditlog.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + token: {{ required "Service token is required" .Values.serviceToken.token | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/auditlog/templates/service.yaml b/manifests/bucketeer/charts/auditlog/templates/service.yaml new file mode 100644 index 000000000..ae7321ce7 --- /dev/null +++ b/manifests/bucketeer/charts/auditlog/templates/service.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "auditlog.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "auditlog.name" . }} + chart: {{ template "auditlog.chart" . }} + release: {{ template "auditlog.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "auditlog.name" . }} + release: {{ template "auditlog.fullname" . }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/auditlog/values.yaml b/manifests/bucketeer/charts/auditlog/values.yaml new file mode 100644 index 000000000..33ec975d6 --- /dev/null +++ b/manifests/bucketeer/charts/auditlog/values.yaml @@ -0,0 +1,74 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-auditlog + pullPolicy: IfNotPresent + +fullnameOverride: "auditlog" + +namespace: + +env: + project: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + accountService: localhost:9001 + logLevel: info + port: 9090 + metricsPort: 9002 + +affinity: {} + +nodeSelector: {} + +pdb: + enabled: + maxUnavailable: 50% + +hpa: + enabled: + minReplicas: + maxReplicas: + metrics: + cpu: + targetAverageUtilization: + +tls: + service: + secret: + cert: + key: + +oauth: + key: + secret: + public: + clientId: + issuer: + +serviceToken: + secret: + token: + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} diff --git a/manifests/bucketeer/charts/auth/.helmignore b/manifests/bucketeer/charts/auth/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/manifests/bucketeer/charts/auth/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/manifests/bucketeer/charts/auth/Chart.yaml b/manifests/bucketeer/charts/auth/Chart.yaml new file mode 100644 index 000000000..ad65edddd --- /dev/null +++ b/manifests/bucketeer/charts/auth/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-auth +name: auth +version: 1.0.0 diff --git a/manifests/bucketeer/charts/auth/templates/NOTES.txt b/manifests/bucketeer/charts/auth/templates/NOTES.txt new file mode 100644 index 000000000..87bd69377 --- /dev/null +++ b/manifests/bucketeer/charts/auth/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "auth.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "auth.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "auth.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "auth.name" . }},release={{ template "auth.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/manifests/bucketeer/charts/auth/templates/_helpers.tpl b/manifests/bucketeer/charts/auth/templates/_helpers.tpl new file mode 100644 index 000000000..c4ad4e546 --- /dev/null +++ b/manifests/bucketeer/charts/auth/templates/_helpers.tpl @@ -0,0 +1,64 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "auth.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "auth.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "auth.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "auth.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "issuer-cert-secret" -}} +{{- if .Values.tls.issuer.secret }} +{{- printf "%s" .Values.tls.issuer.secret -}} +{{- else -}} +{{ template "auth.fullname" . }}-issuer-cert +{{- end -}} +{{- end -}} + +{{- define "oauth-key-secret" -}} +{{- if .Values.oauth.key.secret }} +{{- printf "%s" .Values.oauth.key.secret -}} +{{- else -}} +{{ template "auth.fullname" . }}-oauth-key +{{- end -}} +{{- end -}} + +{{- define "service-token-secret" -}} +{{- if .Values.serviceToken.secret }} +{{- printf "%s" .Values.serviceToken.secret -}} +{{- else -}} +{{ template "auth.fullname" . }}-service-token +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/manifests/bucketeer/charts/auth/templates/deployment.yaml b/manifests/bucketeer/charts/auth/templates/deployment.yaml new file mode 100644 index 000000000..52fa3a2dd --- /dev/null +++ b/manifests/bucketeer/charts/auth/templates/deployment.yaml @@ -0,0 +1,166 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "auth.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "auth.name" . }} + chart: {{ template "auth.chart" . }} + release: {{ template "auth.fullname" . }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "auth.name" . }} + release: {{ template "auth.fullname" . }} + template: + metadata: + labels: + app: {{ template "auth.name" . }} + release: {{ template "auth.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "auth.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + - name: issuer-cert-secret + secret: + secretName: {{ template "issuer-cert-secret" . }} + - name: oauth-key-secret + secret: + secretName: {{ template "oauth-key-secret" . }} + - name: service-token-secret + secret: + secretName: {{ template "service-token-secret" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["server"] + env: + - name: BUCKETEER_AUTH_ACCOUNT_SERVICE + value: "{{ .Values.env.accountService }}" + - name: BUCKETEER_AUTH_EMAIL_FILTER + value: "{{ .Values.env.emailFilter }}" + - name: BUCKETEER_AUTH_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_AUTH_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_AUTH_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + - name: BUCKETEER_AUTH_OAUTH_CLIENT_ID + value: "{{ .Values.oauth.clientId }}" + - name: BUCKETEER_AUTH_OAUTH_CLIENT_SECRET + value: "{{ .Values.oauth.clientSecret }}" + - name: BUCKETEER_AUTH_OAUTH_REDIRECT_URLS + value: | +{{ .Values.oauth.redirectUrls | indent 16 }} + - name: BUCKETEER_AUTH_OAUTH_ISSUER + value: "{{ .Values.oauth.issuer }}" + - name: BUCKETEER_AUTH_OAUTH_PRIVATE_KEY + value: /usr/local/oauth-key/private.pem + - name: BUCKETEER_AUTH_SERVICE_TOKEN + value: /usr/local/service-token/token + - name: BUCKETEER_AUTH_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_AUTH_KEY + value: /usr/local/certs/service/tls.key + - name: BUCKETEER_AUTH_OAUTH_ISSUER_CERT + value: /usr/local/certs/issuer/tls.crt + - name: BUCKETEER_AUTH_WEBHOOK_KMS_RESOURCE_NAME + value: "{{ .Values.webhook.kmsResourceName }}" + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + - name: issuer-cert-secret + mountPath: /usr/local/certs/issuer + readOnly: true + - name: oauth-key-secret + mountPath: /usr/local/oauth-key + readOnly: true + - name: service-token-secret + mountPath: /usr/local/service-token + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/auth/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/auth/templates/envoy-configmap.yaml new file mode 100644 index 000000000..6faf16ecb --- /dev/null +++ b/manifests/bucketeer/charts/auth/templates/envoy-configmap.yaml @@ -0,0 +1,229 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "auth.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "auth.name" . }} + chart: {{ template "auth.chart" . }} + release: {{ template "auth.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: auth + connect_timeout: 5s + dns_lookup_family: V4_ONLY + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + lb_policy: round_robin + load_assignment: + cluster_name: auth + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + type: strict_dns + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + - name: account + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + ignore_health_on_host_removal: true + lb_policy: round_robin + load_assignment: + cluster_name: account + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: account.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + auth: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: / + route: + cluster: auth + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: ingress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true + - name: egress + address: + socket_address: + address: 127.0.0.1 + port_value: 9001 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + account: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: egress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.account.AccountService + route: + cluster: account + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: egress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true diff --git a/manifests/bucketeer/charts/auth/templates/hpa.yaml b/manifests/bucketeer/charts/auth/templates/hpa.yaml new file mode 100644 index 000000000..ef44760c3 --- /dev/null +++ b/manifests/bucketeer/charts/auth/templates/hpa.yaml @@ -0,0 +1,19 @@ +{{ if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "auth.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "auth.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.metrics.cpu.targetAverageUtilization }} +{{ end }} diff --git a/manifests/bucketeer/charts/auth/templates/issuer-cert-secret.yaml b/manifests/bucketeer/charts/auth/templates/issuer-cert-secret.yaml new file mode 100644 index 000000000..0bb7e482f --- /dev/null +++ b/manifests/bucketeer/charts/auth/templates/issuer-cert-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.tls.issuer.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "auth.fullname" . }}-issuer-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "auth.name" . }} + chart: {{ template "auth.chart" . }} + release: {{ template "auth.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Issuer TLS certificate is required" .Values.tls.issuer.cert | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/auth/templates/oauth-key-secret.yaml b/manifests/bucketeer/charts/auth/templates/oauth-key-secret.yaml new file mode 100644 index 000000000..643656534 --- /dev/null +++ b/manifests/bucketeer/charts/auth/templates/oauth-key-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.oauth.key.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "auth.fullname" . }}-oauth-key + namespace: {{ .Values.namespace }} + labels: + app: {{ template "auth.name" . }} + chart: {{ template "auth.chart" . }} + release: {{ template "auth.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + private.pem: {{ required "OAuth private key is required" .Values.oauth.key.private | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/auth/templates/pdb.yaml b/manifests/bucketeer/charts/auth/templates/pdb.yaml new file mode 100644 index 000000000..087cd1b3f --- /dev/null +++ b/manifests/bucketeer/charts/auth/templates/pdb.yaml @@ -0,0 +1,12 @@ +{{ if .Values.pdb.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "auth.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + selector: + matchLabels: + app: {{ template "auth.name" . }} +{{ end }} diff --git a/manifests/bucketeer/charts/auth/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/auth/templates/service-cert-secret.yaml new file mode 100644 index 000000000..13e06d538 --- /dev/null +++ b/manifests/bucketeer/charts/auth/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "auth.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "auth.name" . }} + chart: {{ template "auth.chart" . }} + release: {{ template "auth.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/auth/templates/service-token-secret.yaml b/manifests/bucketeer/charts/auth/templates/service-token-secret.yaml new file mode 100644 index 000000000..2e92d691e --- /dev/null +++ b/manifests/bucketeer/charts/auth/templates/service-token-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.serviceToken.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "auth.fullname" . }}-service-token + namespace: {{ .Values.namespace }} + labels: + app: {{ template "auth.name" . }} + chart: {{ template "auth.chart" . }} + release: {{ template "auth.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + token: {{ required "Service token is required" .Values.serviceToken.token | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/auth/templates/service.yaml b/manifests/bucketeer/charts/auth/templates/service.yaml new file mode 100644 index 000000000..2481364af --- /dev/null +++ b/manifests/bucketeer/charts/auth/templates/service.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "auth.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "auth.name" . }} + chart: {{ template "auth.chart" . }} + release: {{ template "auth.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "auth.name" . }} + release: {{ template "auth.fullname" . }} diff --git a/manifests/bucketeer/charts/auth/values.yaml b/manifests/bucketeer/charts/auth/values.yaml new file mode 100644 index 000000000..6e8da9657 --- /dev/null +++ b/manifests/bucketeer/charts/auth/values.yaml @@ -0,0 +1,77 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-auth + pullPolicy: IfNotPresent + +fullnameOverride: "auth" + +namespace: + +env: + accountService: localhost:9001 + emailFilter: + logLevel: info + port: 9090 + metricsPort: 9002 + +affinity: {} + +nodeSelector: {} + +pdb: + enabled: + maxUnavailable: 50% + +hpa: + enabled: + minReplicas: + maxReplicas: + metrics: + cpu: + targetAverageUtilization: + +tls: + service: + secret: + cert: + key: + issuer: + secret: + cert: + +oauth: + key: + secret: + private: + clientId: + clientSecret: + redirectUrls: + issuer: + +serviceToken: + secret: + token: + +webhook: + kmsResourceName: + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} diff --git a/manifests/bucketeer/charts/auto-ops/Chart.yaml b/manifests/bucketeer/charts/auto-ops/Chart.yaml new file mode 100644 index 000000000..edbd94463 --- /dev/null +++ b/manifests/bucketeer/charts/auto-ops/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-auto-ops +name: auto-ops +version: 1.0.0 diff --git a/manifests/bucketeer/charts/auto-ops/templates/NOTES.txt b/manifests/bucketeer/charts/auto-ops/templates/NOTES.txt new file mode 100644 index 000000000..a536f5dbb --- /dev/null +++ b/manifests/bucketeer/charts/auto-ops/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "auto-ops.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "auto-ops.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "auto-ops.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "auto-ops.name" . }},release={{ template "auto-ops.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/manifests/bucketeer/charts/auto-ops/templates/_helpers.tpl b/manifests/bucketeer/charts/auto-ops/templates/_helpers.tpl new file mode 100644 index 000000000..e0e6819e4 --- /dev/null +++ b/manifests/bucketeer/charts/auto-ops/templates/_helpers.tpl @@ -0,0 +1,56 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "auto-ops.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "auto-ops.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "auto-ops.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "auto-ops.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "oauth-key-secret" -}} +{{- if .Values.oauth.key.secret }} +{{- printf "%s" .Values.oauth.key.secret -}} +{{- else -}} +{{ template "auto-ops.fullname" . }}-oauth-key +{{- end -}} +{{- end -}} + +{{- define "service-token-secret" -}} +{{- if .Values.serviceToken.secret }} +{{- printf "%s" .Values.serviceToken.secret -}} +{{- else -}} +{{ template "auto-ops.fullname" . }}-service-token +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/manifests/bucketeer/charts/auto-ops/templates/deployment.yaml b/manifests/bucketeer/charts/auto-ops/templates/deployment.yaml new file mode 100644 index 000000000..f956219b7 --- /dev/null +++ b/manifests/bucketeer/charts/auto-ops/templates/deployment.yaml @@ -0,0 +1,173 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "auto-ops.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "auto-ops.name" . }} + chart: {{ template "auto-ops.chart" . }} + release: {{ template "auto-ops.fullname" . }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "auto-ops.name" . }} + release: {{ template "auto-ops.fullname" . }} + template: + metadata: + labels: + app: {{ template "auto-ops.name" . }} + release: {{ template "auto-ops.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "auto-ops.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + - name: oauth-key-secret + secret: + secretName: {{ template "oauth-key-secret" . }} + - name: service-token-secret + secret: + secretName: {{ template "service-token-secret" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["server"] + env: + - name: BUCKETEER_AUTO_OPS_PROJECT + value: "{{ .Values.env.project }}" + - name: BUCKETEER_AUTO_OPS_MYSQL_USER + value: "{{ .Values.env.mysqlUser }}" + - name: BUCKETEER_AUTO_OPS_MYSQL_PASS + value: "{{ .Values.env.mysqlPass }}" + - name: BUCKETEER_AUTO_OPS_MYSQL_HOST + value: "{{ .Values.env.mysqlHost }}" + - name: BUCKETEER_AUTO_OPS_MYSQL_PORT + value: "{{ .Values.env.mysqlPort }}" + - name: BUCKETEER_AUTO_OPS_MYSQL_DB_NAME + value: "{{ .Values.env.mysqlDbName }}" + - name: BUCKETEER_AUTO_OPS_DOMAIN_EVENT_TOPIC + value: "{{ .Values.env.domainEventTopic }}" + - name: BUCKETEER_AUTO_OPS_ACCOUNT_SERVICE + value: "{{ .Values.env.accountService }}" + - name: BUCKETEER_AUTO_OPS_FEATURE_SERVICE + value: "{{ .Values.env.featureService }}" + - name: BUCKETEER_AUTO_OPS_EXPERIMENT_SERVICE + value: "{{ .Values.env.experimentService }}" + - name: BUCKETEER_AUTO_OPS_AUTH_SERVICE + value: "{{ .Values.env.authService }}" + - name: BUCKETEER_AUTO_OPS_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_AUTO_OPS_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_AUTO_OPS_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + - name: BUCKETEER_AUTO_OPS_OAUTH_CLIENT_ID + value: "{{ .Values.oauth.clientId }}" + - name: BUCKETEER_AUTO_OPS_OAUTH_ISSUER + value: "{{ .Values.oauth.issuer }}" + - name: BUCKETEER_AUTO_OPS_OAUTH_KEY + value: /usr/local/oauth-key/public.pem + - name: BUCKETEER_AUTO_OPS_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_AUTO_OPS_KEY + value: /usr/local/certs/service/tls.key + - name: BUCKETEER_AUTO_OPS_SERVICE_TOKEN + value: /usr/local/service-token/token + - name: BUCKETEER_AUTO_OPS_WEBHOOK_BASE_URL + value: "{{ .Values.webhook.baseURL }}" + - name: BUCKETEER_AUTO_OPS_WEBHOOK_KMS_RESOURCE_NAME + value: "{{ .Values.webhook.kmsResourceName }}" + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + - name: oauth-key-secret + mountPath: /usr/local/oauth-key + readOnly: true + - name: service-token-secret + mountPath: /usr/local/service-token + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/auto-ops/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/auto-ops/templates/envoy-configmap.yaml new file mode 100644 index 000000000..7f23e6057 --- /dev/null +++ b/manifests/bucketeer/charts/auto-ops/templates/envoy-configmap.yaml @@ -0,0 +1,393 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "auto-ops.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "auto-ops.name" . }} + chart: {{ template "auto-ops.chart" . }} + release: {{ template "auto-ops.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: auto-ops + type: strict_dns + dns_lookup_family: V4_ONLY + connect_timeout: 5s + lb_policy: round_robin + load_assignment: + cluster_name: auto-ops + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + - name: account + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: account + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: account.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + - name: auth + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: auth + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: auth.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + - name: experiment + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: experiment + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: experiment.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + - name: feature + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: feature + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: feature.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + auto-ops: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + safe_regex: + google_re2: {} + regex: ^application\/(grpc|json)$ + prefix: / + route: + cluster: auto-ops + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: ingress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true + - name: egress + address: + socket_address: + address: 127.0.0.1 + port_value: 9001 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + account: + value: 25 + auth: + value: 25 + experiment: + value: 25 + feature: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: egress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.account.AccountService + route: + cluster: account + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.auth.AuthService + route: + cluster: auth + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.experiment.ExperimentService + route: + cluster: experiment + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.feature.FeatureService + route: + cluster: feature + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: egress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true diff --git a/manifests/bucketeer/charts/auto-ops/templates/hpa.yaml b/manifests/bucketeer/charts/auto-ops/templates/hpa.yaml new file mode 100644 index 000000000..857e7a46a --- /dev/null +++ b/manifests/bucketeer/charts/auto-ops/templates/hpa.yaml @@ -0,0 +1,19 @@ +{{ if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "auto-ops.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "auto-ops.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.metrics.cpu.targetAverageUtilization }} +{{ end }} diff --git a/manifests/bucketeer/charts/auto-ops/templates/oauth-key-secret.yaml b/manifests/bucketeer/charts/auto-ops/templates/oauth-key-secret.yaml new file mode 100644 index 000000000..dbffc49ac --- /dev/null +++ b/manifests/bucketeer/charts/auto-ops/templates/oauth-key-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.oauth.key.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "auto-ops.fullname" . }}-oauth-key + namespace: {{ .Values.namespace }} + labels: + app: {{ template "auto-ops.name" . }} + chart: {{ template "auto-ops.chart" . }} + release: {{ template "auto-ops.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + public.pem: {{ required "OAuth key is required" .Values.oauth.key.public | b64enc | quote }} +{{- end }} diff --git a/manifests/bucketeer/charts/auto-ops/templates/pdb.yaml b/manifests/bucketeer/charts/auto-ops/templates/pdb.yaml new file mode 100644 index 000000000..7f815ceb4 --- /dev/null +++ b/manifests/bucketeer/charts/auto-ops/templates/pdb.yaml @@ -0,0 +1,12 @@ +{{ if .Values.pdb.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "auto-ops.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + selector: + matchLabels: + app: {{ template "auto-ops.name" . }} +{{ end }} diff --git a/manifests/bucketeer/charts/auto-ops/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/auto-ops/templates/service-cert-secret.yaml new file mode 100644 index 000000000..2d24822c7 --- /dev/null +++ b/manifests/bucketeer/charts/auto-ops/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "auto-ops.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "auto-ops.name" . }} + chart: {{ template "auto-ops.chart" . }} + release: {{ template "auto-ops.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} diff --git a/manifests/bucketeer/charts/auto-ops/templates/service-token-secret.yaml b/manifests/bucketeer/charts/auto-ops/templates/service-token-secret.yaml new file mode 100644 index 000000000..035794047 --- /dev/null +++ b/manifests/bucketeer/charts/auto-ops/templates/service-token-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.serviceToken.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "auto-ops.fullname" . }}-service-token + namespace: {{ .Values.namespace }} + labels: + app: {{ template "auto-ops.name" . }} + chart: {{ template "auto-ops.chart" . }} + release: {{ template "auto-ops.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + token: {{ required "Service token is required" .Values.serviceToken.token | b64enc | quote }} +{{- end }} diff --git a/manifests/bucketeer/charts/auto-ops/templates/service.yaml b/manifests/bucketeer/charts/auto-ops/templates/service.yaml new file mode 100644 index 000000000..fa7f97a32 --- /dev/null +++ b/manifests/bucketeer/charts/auto-ops/templates/service.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "auto-ops.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "auto-ops.name" . }} + chart: {{ template "auto-ops.chart" . }} + release: {{ template "auto-ops.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "auto-ops.name" . }} + release: {{ template "auto-ops.fullname" . }} + diff --git a/manifests/bucketeer/charts/auto-ops/values.yaml b/manifests/bucketeer/charts/auto-ops/values.yaml new file mode 100644 index 000000000..fd2d8a99c --- /dev/null +++ b/manifests/bucketeer/charts/auto-ops/values.yaml @@ -0,0 +1,82 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-auto-ops + pullPolicy: IfNotPresent + +fullnameOverride: "auto-ops" + +namespace: + +env: + project: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + logLevel: info + port: 9090 + metricsPort: 9002 + domainEventTopic: + accountService: localhost:9001 + featureService: localhost:9001 + experimentService: localhost:9001 + authService: localhost:9001 + +webhook: + baseURL: + kmsResourceName: + +affinity: {} + +nodeSelector: {} + +pdb: + enabled: + maxUnavailable: 50% + +hpa: + enabled: true + minReplicas: + maxReplicas: + metrics: + cpu: + targetAverageUtilization: 75 + +tls: + service: + secret: + cert: + key: + +oauth: + key: + secret: + public: + clientId: + issuer: + +serviceToken: + secret: + token: + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} diff --git a/manifests/bucketeer/charts/calculator/Chart.yaml b/manifests/bucketeer/charts/calculator/Chart.yaml new file mode 100644 index 000000000..86616c357 --- /dev/null +++ b/manifests/bucketeer/charts/calculator/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-calculator +name: calculator +version: 1.0.0 diff --git a/manifests/bucketeer/charts/calculator/templates/NOTES.txt b/manifests/bucketeer/charts/calculator/templates/NOTES.txt new file mode 100644 index 000000000..f2910ce52 --- /dev/null +++ b/manifests/bucketeer/charts/calculator/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "calculator.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "calculator.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "calculator.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "calculator.name" . }},release={{ template "calculator.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/manifests/bucketeer/charts/calculator/templates/_helpers.tpl b/manifests/bucketeer/charts/calculator/templates/_helpers.tpl new file mode 100644 index 000000000..6a89a5e92 --- /dev/null +++ b/manifests/bucketeer/charts/calculator/templates/_helpers.tpl @@ -0,0 +1,48 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "calculator.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "calculator.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "calculator.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "calculator.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "service-token-secret" -}} +{{- if .Values.serviceToken.secret }} +{{- printf "%s" .Values.serviceToken.secret -}} +{{- else -}} +{{ template "calculator.fullname" . }}-service-token +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/manifests/bucketeer/charts/calculator/templates/deployment.yaml b/manifests/bucketeer/charts/calculator/templates/deployment.yaml new file mode 100644 index 000000000..a35ad7cec --- /dev/null +++ b/manifests/bucketeer/charts/calculator/templates/deployment.yaml @@ -0,0 +1,153 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "calculator.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "calculator.name" . }} + chart: {{ template "calculator.chart" . }} + release: {{ template "calculator.fullname" . }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ template "calculator.name" . }} + release: {{ template "calculator.fullname" . }} + template: + metadata: + labels: + app: {{ template "calculator.name" . }} + release: {{ template "calculator.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "calculator.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + - name: service-token-secret + secret: + secretName: {{ template "service-token-secret" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: BUCKETEER_CALCULATOR_PROJECT + value: "{{ .Values.env.project }}" + - name: BUCKETEER_CALCULATOR_MYSQL_USER + value: "{{ .Values.env.mysqlUser }}" + - name: BUCKETEER_CALCULATOR_MYSQL_PASS + value: "{{ .Values.env.mysqlPass }}" + - name: BUCKETEER_CALCULATOR_MYSQL_HOST + value: "{{ .Values.env.mysqlHost }}" + - name: BUCKETEER_CALCULATOR_MYSQL_PORT + value: "{{ .Values.env.mysqlPort }}" + - name: BUCKETEER_CALCULATOR_MYSQL_DB_NAME + value: "{{ .Values.env.mysqlDbName }}" + - name: BUCKETEER_CALCULATOR_ENVIRONMENT_SERVICE + value: "{{ .Values.env.environmentService }}" + - name: BUCKETEER_CALCULATOR_EXPERIMENT_SERVICE + value: "{{ .Values.env.experimentService }}" + - name: BUCKETEER_CALCULATOR_EVENT_COUNTER_SERVICE + value: "{{ .Values.env.eventCounterService }}" + - name: BUCKETEER_CALCULATOR_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_CALCULATOR_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_CALCULATOR_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + - name: BUCKETEER_CALCULATOR_SERVICE_TOKEN + value: /usr/local/service-token/token + - name: BUCKETEER_CALCULATOR_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_CALCULATOR_KEY + value: /usr/local/certs/service/tls.key + - name: BUCKETEER_CALCULATOR_TRACE_SAMPLING_PROBABILITY + value: "{{ .Values.env.traceSamplingProbability }}" + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + - name: service-token-secret + mountPath: /usr/local/service-token + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + readinessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=127.0.0.1:9090", "-tls", "-tls-no-verify"] + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + livenessProbe: + exec: + command: ["/bin/grpc_health_probe", "-addr=127.0.0.1:9090", "-tls", "-tls-no-verify"] + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/calculator/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/calculator/templates/envoy-configmap.yaml new file mode 100644 index 000000000..7674aca7f --- /dev/null +++ b/manifests/bucketeer/charts/calculator/templates/envoy-configmap.yaml @@ -0,0 +1,391 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "calculator.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "calculator.name" . }} + chart: {{ template "calculator.chart" . }} + release: {{ template "calculator.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: calculator + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: calculator + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + - connect_timeout: 5s + dns_lookup_family: V4_ONLY + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + lb_policy: round_robin + load_assignment: + cluster_name: environment + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: environment.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + name: environment + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + type: strict_dns + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + - name: feature + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: feature + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: feature.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + - name: experiment + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: experiment + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: experiment.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + - name: event-counter-server + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: event-counter-server + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: event-counter.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + calculator: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: / + route: + cluster: calculator + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: ingress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true + - name: egress + address: + socket_address: + address: 127.0.0.1 + port_value: 9001 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + environment: + value: 25 + event-counter-server: + value: 25 + experiment: + value: 25 + feature: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: egress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.environment.EnvironmentService + route: + cluster: environment + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.feature.FeatureService + route: + cluster: feature + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.experiment.ExperimentService + route: + cluster: experiment + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.eventcounter.EventCounterService + route: + cluster: event-counter-server + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: egress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true diff --git a/manifests/bucketeer/charts/calculator/templates/hpa.yaml b/manifests/bucketeer/charts/calculator/templates/hpa.yaml new file mode 100644 index 000000000..9ddf15445 --- /dev/null +++ b/manifests/bucketeer/charts/calculator/templates/hpa.yaml @@ -0,0 +1,23 @@ +{{ if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "calculator.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "calculator.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.hpa.metrics.memory.targetAverageUtilization }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.metrics.cpu.targetAverageUtilization }} +{{ end }} diff --git a/manifests/bucketeer/charts/calculator/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/calculator/templates/service-cert-secret.yaml new file mode 100644 index 000000000..8e486104a --- /dev/null +++ b/manifests/bucketeer/charts/calculator/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "calculator.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "calculator.name" . }} + chart: {{ template "calculator.chart" . }} + release: {{ template "calculator.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/calculator/templates/service-token-secret.yaml b/manifests/bucketeer/charts/calculator/templates/service-token-secret.yaml new file mode 100644 index 000000000..bb3567de0 --- /dev/null +++ b/manifests/bucketeer/charts/calculator/templates/service-token-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.serviceToken.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "calculator.fullname" . }}-service-token + namespace: {{ .Values.namespace }} + labels: + app: {{ template "calculator.name" . }} + chart: {{ template "calculator.chart" . }} + release: {{ template "calculator.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + token: {{ required "Service token is required" .Values.serviceToken.token | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/calculator/templates/service.yaml b/manifests/bucketeer/charts/calculator/templates/service.yaml new file mode 100644 index 000000000..f0e79a133 --- /dev/null +++ b/manifests/bucketeer/charts/calculator/templates/service.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "calculator.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "calculator.name" . }} + chart: {{ template "calculator.chart" . }} + release: {{ template "calculator.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "calculator.name" . }} + release: {{ template "calculator.fullname" . }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/calculator/values.yaml b/manifests/bucketeer/charts/calculator/values.yaml new file mode 100644 index 000000000..c88b1ccef --- /dev/null +++ b/manifests/bucketeer/charts/calculator/values.yaml @@ -0,0 +1,73 @@ +replicaCount: 1 + +image: + repository: ghcr.io/bucketeer-io/bucketeer-calculator + pullPolicy: IfNotPresent + +fullnameOverride: "calculator" + +namespace: + +env: + project: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + environmentService: localhost:9001 + experimentService: localhost:9001 + eventCounterService: localhost:9001 + logLevel: info + port: 9090 + metricsPort: 9002 + traceSamplingProbability: 0.001 + +affinity: {} + +nodeSelector: + +hpa: + enabled: + minReplicas: + maxReplicas: + metrics: + cpu: + targetAverageUtilization: + memory: + targetAverageUtilization: + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +tls: + service: + secret: + cert: + key: + +serviceToken: + secret: + token: + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +health: + periodSeconds: 10 + failureThreshold: 10 + # It is necessary to wait for the model compilation to be done. + # The duration is up to resources. + # cf. CPU: 500m, MEM: 3Gi -> 280sec + initialDelaySeconds: + +resources: {} diff --git a/manifests/bucketeer/charts/dex/.helmignore b/manifests/bucketeer/charts/dex/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/manifests/bucketeer/charts/dex/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/manifests/bucketeer/charts/dex/Chart.yaml b/manifests/bucketeer/charts/dex/Chart.yaml new file mode 100644 index 000000000..742d1fe57 --- /dev/null +++ b/manifests/bucketeer/charts/dex/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Dex +name: dex +version: 0.1.0 diff --git a/manifests/bucketeer/charts/dex/templates/NOTES.txt b/manifests/bucketeer/charts/dex/templates/NOTES.txt new file mode 100644 index 000000000..4bbd13541 --- /dev/null +++ b/manifests/bucketeer/charts/dex/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "dex.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "dex.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "dex.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "dex.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/manifests/bucketeer/charts/dex/templates/_helpers.tpl b/manifests/bucketeer/charts/dex/templates/_helpers.tpl new file mode 100644 index 000000000..fa47488c6 --- /dev/null +++ b/manifests/bucketeer/charts/dex/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "dex.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "dex.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "dex.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/manifests/bucketeer/charts/dex/templates/cert-secret.yaml b/manifests/bucketeer/charts/dex/templates/cert-secret.yaml new file mode 100644 index 000000000..846ba3f29 --- /dev/null +++ b/manifests/bucketeer/charts/dex/templates/cert-secret.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "dex.fullname" . }}-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "dex.name" . }} + chart: {{ template "dex.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: + tls.crt: {{ required "TLS certificate is required" .Values.global.tls.cert | b64enc | quote }} + tls.key: {{ required "TLS key is required" .Values.global.tls.key | b64enc | quote }} diff --git a/manifests/bucketeer/charts/dex/templates/configmap.yaml b/manifests/bucketeer/charts/dex/templates/configmap.yaml new file mode 100644 index 000000000..10c29465e --- /dev/null +++ b/manifests/bucketeer/charts/dex/templates/configmap.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "dex.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "dex.name" . }} + chart: {{ template "dex.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + issuer: {{ .Values.config.issuer }} + storage: + type: kubernetes + config: + inCluster: true + web: + https: 0.0.0.0:{{ .Values.service.dexPort }} + tlsCert: /etc/dex/tls/tls.crt + tlsKey: /etc/dex/tls/tls.key + connectors: + - type: oidc + id: google + name: Google + config: + issuer: {{ .Values.config.google.issuer }} + clientID: $GOOGLE_CLIENT_ID + clientSecret: $GOOGLE_CLIENT_SECRET + redirectURI: {{ .Values.config.google.redirectURI }} + staticClients: + - id: {{ .Values.config.client.id }} + name: {{ .Values.config.client.name }} + secret: {{ .Values.config.client.secret }} + redirectURIs: + {{- range .Values.config.client.redirectURIs }} + - {{ . }} + {{- end }} diff --git a/manifests/bucketeer/charts/dex/templates/deployment.yaml b/manifests/bucketeer/charts/dex/templates/deployment.yaml new file mode 100644 index 000000000..cdddc91a2 --- /dev/null +++ b/manifests/bucketeer/charts/dex/templates/deployment.yaml @@ -0,0 +1,124 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "dex.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "dex.name" . }} + chart: {{ template "dex.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ template "dex.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "dex.name" . }} + release: {{ .Release.Name }} + spec: + affinity: {{ toYaml .Values.affinity | nindent 8 }} + nodeSelector: {{ toYaml .Values.nodeSelector | indent 8 }} + containers: + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: cert + mountPath: /etc/dex/tls + readOnly: true + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "sleep 10;" + args: + - serve + - /etc/dex/config/config.yaml + env: + - name: GOOGLE_CLIENT_ID + valueFrom: + secretKeyRef: + name: {{ template "dex.fullname" . }} + key: google-client-id + - name: GOOGLE_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: {{ template "dex.fullname" . }} + key: google-client-secret + - name: KUBERNETES_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: config + mountPath: /etc/dex/config + - name: secret + mountPath: /etc/dex/secret + readOnly: true + - name: cert + mountPath: /etc/dex/tls + readOnly: true + - name: db + mountPath: /etc/dex/db + ports: + - name: https + containerPort: {{ .Values.service.internalPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: {{ .Values.service.internalPort }} + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: {{ .Values.service.internalPort }} + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + volumes: + - name: config + configMap: + name: {{ template "dex.fullname" . }} + - name: secret + secret: + secretName: {{ template "dex.fullname" . }} + - name: cert + secret: + secretName: {{ template "dex.fullname" . }}-cert + - name: envoy-config + configMap: + name: {{ template "dex.fullname" . }}-envoy-config + - name: db + emptyDir: {} diff --git a/manifests/bucketeer/charts/dex/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/dex/templates/envoy-configmap.yaml new file mode 100644 index 000000000..ba73df2bd --- /dev/null +++ b/manifests/bucketeer/charts/dex/templates/envoy-configmap.yaml @@ -0,0 +1,120 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "dex.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "dex.name" . }} + chart: {{ template "dex.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + - name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: "/dev/stdout" + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: dex + type: strict_dns + connect_timeout: 5s + ignore_health_on_host_removal: true + dns_lookup_family: V4_ONLY + lb_policy: round_robin + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + load_assignment: + cluster_name: dex + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 5556 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /etc/dex/tls/tls.crt + private_key: + filename: /etc/dex/tls/tls.key + health_checks: + - http_health_check: + path: /dex/healthz + codec_client_type: HTTP2 + healthy_threshold: 2 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + dex: + value: 100 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - "*" + name: ingress_services + routes: + - match: + prefix: / + route: + cluster: dex + stat_prefix: ingress_http + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /etc/dex/tls/tls.crt + private_key: + filename: /etc/dex/tls/tls.key + require_client_certificate: true diff --git a/manifests/bucketeer/charts/dex/templates/pdb.yaml b/manifests/bucketeer/charts/dex/templates/pdb.yaml new file mode 100644 index 000000000..9ba0eded5 --- /dev/null +++ b/manifests/bucketeer/charts/dex/templates/pdb.yaml @@ -0,0 +1,12 @@ +{{ if .Values.pdb.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "dex.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + selector: + matchLabels: + app: {{ template "dex.name" . }} +{{ end }} diff --git a/manifests/bucketeer/charts/dex/templates/secret.yaml b/manifests/bucketeer/charts/dex/templates/secret.yaml new file mode 100644 index 000000000..5fc007d0b --- /dev/null +++ b/manifests/bucketeer/charts/dex/templates/secret.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "dex.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "dex.name" . }} + chart: {{ template "dex.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: + google-client-id: {{ .Values.config.google.clientID | b64enc | quote }} + google-client-secret: {{ .Values.config.google.clientSecret | b64enc | quote }} diff --git a/manifests/bucketeer/charts/dex/templates/service.yaml b/manifests/bucketeer/charts/dex/templates/service.yaml new file mode 100644 index 000000000..753d37503 --- /dev/null +++ b/manifests/bucketeer/charts/dex/templates/service.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "dex.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "dex.name" . }} + chart: {{ template "dex.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + envoy: "true" +spec: + type: {{ .Values.service.type }} + {{- if .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + ports: + - port: {{ .Values.service.dexPort }} + targetPort: {{ .Values.service.dexPort }} + protocol: TCP + name: {{ .Values.service.name }} + - port: {{ .Values.service.externalPort }} + targetPort: {{ .Values.service.internalPort }} + protocol: TCP + name: envoy + - port: {{ .Values.envoy.adminPort }} + targetPort: {{ .Values.envoy.adminPort }} + protocol: TCP + name: admin + selector: + app: {{ template "dex.name" . }} + release: {{ .Release.Name }} diff --git a/manifests/bucketeer/charts/dex/values.yaml b/manifests/bucketeer/charts/dex/values.yaml new file mode 100644 index 000000000..79887f971 --- /dev/null +++ b/manifests/bucketeer/charts/dex/values.yaml @@ -0,0 +1,67 @@ +replicaCount: 2 + +image: + repository: dexidp/dex + tag: v2.27.0 + pullPolicy: IfNotPresent + +namespace: + +affinity: {} + +nodeSelector: {} + +pdb: + enabled: + maxUnavailable: 50% + +service: + name: dex + type: ClusterIP + clusterIP: None + externalPort: 9000 + internalPort: 9000 + dexPort: 5556 + +tls: + cert: + key: + +envoy: + adminPort: 8001 + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + resources: + limits: + cpu: 50m + memory: 64Mi + requests: + cpu: 50m + memory: 64Mi + +health: + initialDelaySeconds: 10 + +resources: + limits: + cpu: 100m + memory: 64Mi + requests: + cpu: 100m + memory: 64Mi + +config: + issuer: + client: + id: + name: + secret: + redirectURIs: + google: + issuer: + clientID: + clientSecret: + redirectURIs: diff --git a/manifests/bucketeer/charts/druid/Chart.yaml b/manifests/bucketeer/charts/druid/Chart.yaml new file mode 100644 index 000000000..119c7a6d4 --- /dev/null +++ b/manifests/bucketeer/charts/druid/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-druid +name: druid +version: 1.0.0 diff --git a/manifests/bucketeer/charts/druid/charts/druid-cluster/.helmignore b/manifests/bucketeer/charts/druid/charts/druid-cluster/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/druid-cluster/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/manifests/bucketeer/charts/druid/charts/druid-cluster/Chart.yaml b/manifests/bucketeer/charts/druid/charts/druid-cluster/Chart.yaml new file mode 100644 index 000000000..e1ebf6423 --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/druid-cluster/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: druid-cluster +version: 0.1.0 diff --git a/manifests/bucketeer/charts/druid/charts/druid-cluster/templates/NOTES.txt b/manifests/bucketeer/charts/druid/charts/druid-cluster/templates/NOTES.txt new file mode 100644 index 000000000..b39cd3dbc --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/druid-cluster/templates/NOTES.txt @@ -0,0 +1,2 @@ +The Druid Operator has been installed. Check its status by running: + kubectl --namespace {{ .Release.Namespace }} get pods -l "app={{ template "druid-cluster.name" . }},release={{ .Release.Name }}" diff --git a/manifests/bucketeer/charts/druid/charts/druid-cluster/templates/_helpers.tpl b/manifests/bucketeer/charts/druid/charts/druid-cluster/templates/_helpers.tpl new file mode 100644 index 000000000..9fa93fb92 --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/druid-cluster/templates/_helpers.tpl @@ -0,0 +1,21 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "druid-cluster.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "druid-cluster.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/manifests/bucketeer/charts/druid/charts/druid-cluster/templates/druid.yaml b/manifests/bucketeer/charts/druid/charts/druid-cluster/templates/druid.yaml new file mode 100644 index 000000000..5fa8e169c --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/druid-cluster/templates/druid.yaml @@ -0,0 +1,297 @@ +{{- if .Values.global.druid.enabled }} +apiVersion: "druid.apache.org/v1alpha1" +kind: "Druid" +metadata: + name: cluster + namespace: {{ .Values.namespace }} + annotations: + "helm.sh/resource-policy": keep +spec: + imagePullSecrets: {{ toYaml .Values.spec.imagePullSecrets | nindent 4 }} + image: {{ .Values.spec.image }} + startScript: /druid.sh + securityContext: + fsGroup: 1000 + runAsUser: 1000 + runAsGroup: 1000 + nodeSelector: {{ toYaml .Values.spec.nodeSelector | nindent 4 }} + services: + - spec: + type: ClusterIP + clusterIP: None + commonConfigMountPath: "/opt/druid/conf/druid/cluster/_common" + jvm.options: {{ toYaml .Values.spec.jvmOptions | nindent 4 }} + log4j.config: {{ toYaml .Values.spec.log4jConfig | nindent 4 }} + common.runtime.properties: {{ toYaml .Values.spec.commonRuntimeProperties | nindent 4 }} + deepStorage: + spec: + properties: {{ toYaml .Values.spec.deepStorage.spec.properties | nindent 6 }} + type: default + metadataStore: + spec: + properties: {{ toYaml .Values.spec.metadataStore.spec.properties | nindent 6 }} + type: default + zookeeper: + spec: + properties: {{ toYaml .Values.spec.zookeeper.spec.properties | nindent 6 }} + type: default + env: {{ toYaml .Values.spec.env | nindent 4 }} + volumeMounts: {{ toYaml .Values.spec.volumeMounts | nindent 4 }} + volumes: {{ toYaml .Values.spec.volumes | nindent 4 }} + + nodes: + brokers: + nodeType: "broker" + nodeConfigMountPath: "/opt/druid/conf/druid/cluster/query/broker" + runtime.properties: {{ toYaml .Values.spec.nodes.brokers.runtimeProperties | nindent 8 }} + extra.jvm.options: {{ toYaml .Values.spec.nodes.brokers.extraJvmOptions | nindent 8 }} + druid.port: 8088 + volumeMounts: + - mountPath: /druid/data + name: data-volume + volumes: + - name: data-volume + emptyDir: {} + affinity: {{ toYaml .Values.spec.nodes.brokers.affinity | nindent 8 }} + resources: {{ toYaml .Values.spec.nodes.brokers.resources | nindent 8 }} + podDisruptionBudgetSpec: + maxUnavailable: {{ .Values.spec.nodes.brokers.podDisruptionBudgetSpec.maxUnavailable }} + replicas: {{ .Values.spec.nodes.brokers.hpAutoscaler.minReplicas }} + hpAutoscaler: + minReplicas: {{ .Values.spec.nodes.brokers.hpAutoscaler.minReplicas }} + maxReplicas: {{ .Values.spec.nodes.brokers.hpAutoscaler.maxReplicas }} + scaleTargetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: druid-cluster-brokers + metrics: {{ toYaml .Values.spec.nodes.brokers.hpAutoscaler.metrics | nindent 10 }} + + coordinators: + nodeType: "coordinator" + nodeConfigMountPath: "/opt/druid/conf/druid/cluster/master/coordinator-overlord" + runtime.properties: {{ toYaml .Values.spec.nodes.coordinators.runtimeProperties | nindent 8 }} + extra.jvm.options: {{ toYaml .Values.spec.nodes.coordinators.extraJvmOptions | nindent 8 }} + druid.port: 8088 + volumeMounts: + - mountPath: /druid/data + name: data-volume + volumes: + - name: data-volume + emptyDir: {} + replicas: {{ .Values.spec.nodes.coordinators.hpAutoscaler.minReplicas }} + resources: {{ toYaml .Values.spec.nodes.coordinators.resources | nindent 8 }} + affinity: {{ toYaml .Values.spec.nodes.coordinators.affinity | nindent 8 }} + podDisruptionBudgetSpec: + maxUnavailable: {{ .Values.spec.nodes.coordinators.podDisruptionBudgetSpec.maxUnavailable }} + hpAutoscaler: + minReplicas: {{ .Values.spec.nodes.coordinators.hpAutoscaler.minReplicas }} + maxReplicas: {{ .Values.spec.nodes.coordinators.hpAutoscaler.maxReplicas }} + scaleTargetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: druid-cluster-coordinators + metrics: {{ toYaml .Values.spec.nodes.coordinators.hpAutoscaler.metrics | nindent 10 }} + + overlords: + nodeType: overlord + druid.port: 8081 + nodeConfigMountPath: /opt/druid/conf/druid/cluster/master/overlord + runtime.properties: {{ toYaml .Values.spec.nodes.overlords.runtimeProperties | nindent 8 }} + extra.jvm.options: {{ toYaml .Values.spec.nodes.overlords.extraJvmOptions | nindent 8 }} + services: + - metadata: + name: "%s" + spec: + clusterIP: None + ports: + - + name: tcp-service-port + port: 8081 + targetPort: 8081 + type: ClusterIP + volumeClaimTemplates: + - metadata: + name: data-volume + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + storageClassName: standard + volumeMounts: + - mountPath: /druid/data + name: data-volume + livenessProbe: + initialDelaySeconds: 30 + httpGet: + path: /status/health + port: 8081 + readinessProbe: + initialDelaySeconds: 30 + httpGet: + path: /status/health + port: 8081 + resources: {{ toYaml .Values.spec.nodes.overlords.resources | nindent 8 }} + affinity: {{ toYaml .Values.spec.nodes.overlords.affinity | nindent 8 }} + podDisruptionBudgetSpec: + maxUnavailable: {{ .Values.spec.nodes.overlords.podDisruptionBudgetSpec.maxUnavailable }} + replicas: {{ .Values.spec.nodes.overlords.hpAutoscaler.minReplicas }} + hpAutoscaler: + minReplicas: {{ .Values.spec.nodes.overlords.hpAutoscaler.minReplicas }} + maxReplicas: {{ .Values.spec.nodes.overlords.hpAutoscaler.maxReplicas }} + scaleTargetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: druid-cluster-overlords + metrics: {{ toYaml .Values.spec.nodes.overlords.hpAutoscaler.metrics | nindent 10 }} + + historicals: + nodeType: "historical" + nodeConfigMountPath: "/opt/druid/conf/druid/cluster/data/historical" + runtime.properties: {{ toYaml .Values.spec.nodes.historicals.runtimeProperties | nindent 8 }} + extra.jvm.options: {{ toYaml .Values.spec.nodes.historicals.extraJvmOptions | nindent 8 }} + druid.port: 8088 + volumeClaimTemplates: {{ toYaml .Values.spec.nodes.historicals.volumeClaimTemplates | nindent 8 }} + volumeMounts: {{ toYaml .Values.spec.nodes.historicals.volumeMounts | nindent 8 }} + resources: {{ toYaml .Values.spec.nodes.historicals.resources | nindent 8 }} + podDisruptionBudgetSpec: + maxUnavailable: {{ .Values.spec.nodes.overlords.podDisruptionBudgetSpec.maxUnavailable }} + replicas: {{ .Values.spec.nodes.historicals.hpAutoscaler.minReplicas }} + hpAutoscaler: + minReplicas: {{ .Values.spec.nodes.historicals.hpAutoscaler.minReplicas }} + maxReplicas: {{ .Values.spec.nodes.historicals.hpAutoscaler.maxReplicas }} + scaleTargetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: druid-cluster-historicals + metrics: {{ toYaml .Values.spec.nodes.historicals.hpAutoscaler.metrics | nindent 10 }} + + middlemanagers: + nodeType: middleManager + nodeConfigMountPath: /opt/druid/conf/druid/cluster/data/middleManager + runtime.properties: {{ toYaml .Values.spec.nodes.middlemanagers.runtimeProperties | nindent 8 }} + extra.jvm.options: {{ toYaml .Values.spec.nodes.middlemanagers.extraJvmOptions | nindent 8 }} + druid.port: 8091 + ports: + - containerPort: 8100 + name: peon-0-pt + - containerPort: 8101 + name: peon-1-pt + - containerPort: 8102 + name: peon-2-pt + - containerPort: 8103 + name: peon-3-pt + - containerPort: 8104 + name: peon-4-pt + - containerPort: 8105 + name: peon-5-pt + - containerPort: 8106 + name: peon-6-pt + - containerPort: 8107 + name: peon-7-pt + - containerPort: 8108 + name: peon-8-pt + - containerPort: 8109 + name: peon-9-pt + services: + - spec: + clusterIP: None + ports: + - name: tcp-service-port + port: 8091 + targetPort: 8091 + - name: peon-port-0 + port: 8100 + targetPort: 8100 + - name: peon-port-1 + port: 8101 + targetPort: 8101 + - name: peon-port-2 + port: 8102 + targetPort: 8102 + - name: peon-port-3 + port: 8103 + targetPort: 8103 + - name: peon-port-4 + port: 8104 + targetPort: 8104 + - name: peon-port-5 + port: 8105 + targetPort: 8105 + - name: peon-port-6 + port: 8106 + targetPort: 8106 + - name: peon-port-7 + port: 8107 + targetPort: 8107 + - name: peon-port-8 + port: 8108 + targetPort: 8108 + - name: peon-port-9 + port: 8109 + targetPort: 8109 + type: ClusterIP + volumeClaimTemplates: {{ toYaml .Values.spec.nodes.middlemanagers.volumeClaimTemplates | nindent 8 }} + volumeMounts: + - mountPath: /druid/data + name: data-volume + livenessProbe: + initialDelaySeconds: 30 + httpGet: + path: /status/health + port: 8091 + readinessProbe: + initialDelaySeconds: 30 + httpGet: + path: /status/health + port: 8091 + resources: {{ toYaml .Values.spec.nodes.middlemanagers.resources | nindent 8 }} + podDisruptionBudgetSpec: + maxUnavailable: {{ .Values.spec.nodes.middlemanagers.podDisruptionBudgetSpec.maxUnavailable }} + replicas: {{ .Values.spec.nodes.middlemanagers.hpAutoscaler.minReplicas }} + hpAutoscaler: + minReplicas: {{ .Values.spec.nodes.middlemanagers.hpAutoscaler.minReplicas }} + maxReplicas: {{ .Values.spec.nodes.middlemanagers.hpAutoscaler.maxReplicas }} + scaleTargetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: druid-cluster-middlemanagers + metrics: {{ toYaml .Values.spec.nodes.middlemanagers.hpAutoscaler.metrics | nindent 10 }} + + routers: + nodeType: router + nodeConfigMountPath: /opt/druid/conf/druid/cluster/query/router + runtime.properties: {{ toYaml .Values.spec.nodes.routers.runtimeProperties | nindent 8 }} + extra.jvm.options: {{ toYaml .Values.spec.nodes.routers.extraJvmOptions | nindent 8 }} + druid.port: 8888 + services: + - metadata: + name: "%s" + spec: + clusterIP: None + ports: + - name: tcp-service-port + port: 8888 + targetPort: 8888 + type: ClusterIP + volumeMounts: + - mountPath: /druid/data + name: data-volume + volumes: + - name: data-volume + emptyDir: {} + resources: {{ toYaml .Values.spec.nodes.routers.resources | nindent 8 }} + affinity: {{ toYaml .Values.spec.nodes.routers.affinity | nindent 8 }} + podDisruptionBudgetSpec: + maxUnavailable: {{ .Values.spec.nodes.routers.podDisruptionBudgetSpec.maxUnavailable }} + replicas: {{ .Values.spec.nodes.routers.hpAutoscaler.minReplicas }} + hpAutoscaler: + minReplicas: {{ .Values.spec.nodes.routers.hpAutoscaler.minReplicas }} + maxReplicas: {{ .Values.spec.nodes.routers.hpAutoscaler.maxReplicas }} + scaleTargetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: druid-cluster-routers + metrics: {{ toYaml .Values.spec.nodes.routers.hpAutoscaler.metrics | nindent 10 }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/druid/charts/druid-cluster/values.yaml b/manifests/bucketeer/charts/druid/charts/druid-cluster/values.yaml new file mode 100644 index 000000000..6a8f1171b --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/druid-cluster/values.yaml @@ -0,0 +1,332 @@ +namespace: +spec: + imagePullSecrets: + image: ghcr.io/bucketeer-io/druid:0.5.0 + nodeSelector: {} + jvmOptions: |- + -server + -XX:MaxDirectMemorySize=10240g + -Duser.timezone=UTC + -Dfile.encoding=UTF-8 + -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager + -Dorg.jboss.logging.provider=slf4j + -Dnet.spy.log.LoggerImpl=net.spy.memcached.compat.log.SLF4JLogger + -Dlog4j.shutdownCallbackRegistry=org.apache.druid.common.config.Log4jShutdown + -Dlog4j.shutdownHookEnabled=true + -XX:+UseG1GC + -XX:MaxGCPauseMillis=200 + -XX:+ExitOnOutOfMemoryError + log4jConfig: |- + + + + + + + + + + + + + + commonRuntimeProperties: | + druid.indexing.doubleStorage=double + + # Extensions + druid.extensions.loadList=["druid-basic-security","druid-datasketches","druid-distinctcount","druid-google-extensions","druid-kafka-indexing-service","druid-stats","mysql-metadata-storage"] + + # Service discovery + druid.router.defaultBrokerServiceName=druid/broker + druid.selectors.indexing.serviceName=druid/overlord + druid.selectors.coordinator.serviceName=druid/coordinator + + druid.sql.enable=true + + # Authenticator + druid.auth.authenticatorChain=["BasicMetadataAuthenticator"] + druid.auth.authenticator.BasicMetadataAuthenticator.type=basic + druid.auth.authenticator.BasicMetadataAuthenticator.initialAdminPassword=password + druid.auth.authenticator.BasicMetadataAuthenticator.initialInternalClientPassword=password + druid.auth.authenticator.BasicMetadataAuthenticator.credentialsValidator.type=metadata + druid.auth.authenticator.BasicMetadataAuthenticator.skipOnFailure=false + druid.auth.authenticator.BasicMetadataAuthenticator.authorizerName=BasicMetadataAuthorizer + # Escalator + druid.escalator.type=basic + druid.escalator.internalClientUsername=username + druid.escalator.internalClientPassword=password + druid.escalator.authorizerName=BasicMetadataAuthorizer + # Authorizer + druid.auth.authorizers=["BasicMetadataAuthorizer"] + druid.auth.authorizer.BasicMetadataAuthorizer.type=basic + + # Monitoring + druid.monitoring.monitors=[] + druid.emitter.http.recipientBaseUrl=http://druid-exporter.monitoring.svc.cluster.local:8080/druid + druid.emitter=http + deepStorage: + spec: + properties: |- + druid.storage.type=google + druid.google.bucket=example-druid-deep-storage + type: default + metadataStore: + spec: + properties: |- + druid.metadata.storage.type=mysql + druid.metadata.storage.connector.connectURI=jdbc:mysql://druid-mysql.example.com/druid + druid.metadata.storage.connector.user=user + druid.metadata.storage.connector.password=password + druid.metadata.storage.connector.createTables=true + type: default + zookeeper: + spec: + properties: |- + druid.zk.service.host=druid-zookeeper-client.druid.svc.cluster.local + druid.zk.paths.base=/druid + type: default + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /var/secrets/google/token + volumeMounts: + - name: google-cloud-key + mountPath: /var/secrets/google + volumes: + - name: google-cloud-key + secret: + secretName: druid-gcp-sa-key + + nodes: + brokers: + runtimeProperties: | + druid.service=druid/broker + # HTTP server threads + druid.broker.http.numConnections=5 + druid.server.http.numThreads=10 + # Processing threads and buffers + druid.processing.buffer.sizeBytes=1000 + druid.processing.numMergeBuffers=1 + druid.processing.numThreads=1 + + # Monitoring + druid.monitoring.monitors=["org.apache.druid.server.metrics.QueryCountStatsMonitor"] + extraJvmOptions: |- + -Xmx1G + -Xms1G + resources: {} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + nodeSpecUniqueStr: druid-cluster-brokers + podDisruptionBudgetSpec: + maxUnavailable: 1 + hpAutoscaler: + minReplicas: 1 + maxReplicas: 1 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 50 + + coordinators: + runtimeProperties: | + druid.service=druid/coordinator + # HTTP server threads + druid.coordinator.startDelay=PT30S + druid.coordinator.period=PT30S + # Configure this coordinator to also run as Overlord + druid.coordinator.asOverlord.enable=false + # druid.coordinator.asOverlord.overlordService=druid/overlord + druid.indexer.queue.startDelay=PT30S + druid.indexer.runner.type=local + extraJvmOptions: |- + -Xmx1G + -Xms1G + resources: {} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + nodeSpecUniqueStr: druid-cluster-coordinators + podDisruptionBudgetSpec: + maxUnavailable: 1 + hpAutoscaler: + minReplicas: 2 + maxReplicas: 2 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 60 + + overlords: + nodeConfigMountPath: /opt/druid/conf/druid/cluster/master/overlord + runtimeProperties: |- + druid.service=druid/overlord + druid.indexer.queue.startDelay=PT30S + druid.indexer.runner.type=remote + druid.indexer.storage.type=metadata + + # Monitoring + druid.monitoring.monitors=["org.apache.druid.server.metrics.TaskCountStatsMonitor"] + extraJvmOptions: |- + -Xmx4G + -Xms4G + resources: {} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + nodeSpecUniqueStr: druid-cluster-overlords + podDisruptionBudgetSpec: + maxUnavailable: 1 + hpAutoscaler: + maxReplicas: 2 + minReplicas: 2 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 60 + - type: Resource + resource: + name: memory + targetAverageUtilization: 60 + + historicals: + runtimeProperties: | + druid.service=druid/historical + druid.server.http.numThreads=5 + + # Memory tuning and resource limits for groupBy v2 + druid.processing.buffer.sizeBytes=1000 + druid.query.groupBy.maxOnDiskStorage=100000 + + druid.processing.numMergeBuffers=1 + druid.processing.numThreads=1 + # Segment storage + druid.segmentCache.locations=[{\"path\":\"/druid/data/segments\",\"maxSize\":10737418240}] + druid.server.maxSize=10737418240 + + # Monitoring + druid.monitoring.monitors=["org.apache.druid.server.metrics.HistoricalMetricsMonitor"] + extraJvmOptions: |- + -Xmx1G + -Xms1G + volumeClaimTemplates: {} + volumeMounts: + - mountPath: /druid/data + name: data-volume + resources: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + nodeSpecUniqueStr: druid-cluster-historicals + podDisruptionBudgetSpec: + maxUnavailable: 1 + hpAutoscaler: + maxReplicas: 1 + minReplicas: 1 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 60 + - type: Resource + resource: + name: memory + targetAverageUtilization: 60 + + middlemanagers: + runtimeProperties: | + druid.service=druid/middleManager + druid.worker.capacity=3 + druid.server.http.numThreads=10 + druid.processing.buffer.sizebytes=536870912 + # Resources for peons + druid.indexer.runner.javaOpts=-server -Xms1G -Xmx1G -XX:MaxDirectMemorySize=10g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -Djava.io.tmpdir=/druid/data/tmp -XX:+UnlockDiagnosticVMOptions -XX:+PrintSafepointStatistics -XX:PrintSafepointStatisticsCount=1 -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintGCApplicationConcurrentTime -XX:+ExitOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError -XX:+UseG1GC + druid.indexer.task.baseTaskDir=/druid/data/baseTaskDir + # Peon properties + druid.indexer.fork.property.druid.processing.numThreads=1 + druid.indexer.fork.property.druid.processing.numMergeBuffers=2 + druid.indexer.fork.property.druid.processing.buffer.sizeBytes=536870912 + extraJvmOptions: |- + -Xmx4G + -Xms4G + volumeClaimTemplates: + - metadata: + name: data-volume + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + storageClassName: standard + resources: {} + podDisruptionBudgetSpec: + maxUnavailable: 1 + hpAutoscaler: + maxReplicas: 1 + minReplicas: 1 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 60 + - type: Resource + resource: + name: memory + targetAverageUtilization: 60 + + routers: + nodeConfigMountPath: /opt/druid/conf/druid/cluster/query/router + runtimeProperties: | + druid.service=druid/router + druid.plaintextPort=8888 + # HTTP proxy + druid.router.http.numConnections=50 + druid.router.http.readTimeout=PT5M + druid.router.http.numMaxThreads=100 + druid.server.http.numThreads=100 + # Service discovery + druid.router.defaultBrokerServiceName=druid/broker + druid.router.coordinatorServiceName=druid/coordinator + # Management proxy to coordinator / overlord: required for unified web console. + druid.router.managementProxy.enabled=true + extraJvmOptions: |- + -Xmx512m + -Xms512m + resources: {} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + nodeSpecUniqueStr: druid-cluster-routers + podDisruptionBudgetSpec: + maxUnavailable: 1 + hpAutoscaler: + maxReplicas: 1 + minReplicas: 1 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 60 + - type: Resource + resource: + name: memory + targetAverageUtilization: 60 diff --git a/manifests/bucketeer/charts/druid/charts/druid-operator/.helmignore b/manifests/bucketeer/charts/druid/charts/druid-operator/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/druid-operator/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/manifests/bucketeer/charts/druid/charts/druid-operator/Chart.yaml b/manifests/bucketeer/charts/druid/charts/druid-operator/Chart.yaml new file mode 100644 index 000000000..a29260295 --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/druid-operator/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: druid-operator +version: 0.1.0 diff --git a/manifests/bucketeer/charts/druid/charts/druid-operator/templates/NOTES.txt b/manifests/bucketeer/charts/druid/charts/druid-operator/templates/NOTES.txt new file mode 100644 index 000000000..3dc82ec5b --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/druid-operator/templates/NOTES.txt @@ -0,0 +1,2 @@ +The Druid Operator has been installed. Check its status by running: + kubectl --namespace {{ .Release.Namespace }} get pods -l "app={{ template "druid-operator.name" . }},release={{ .Release.Name }}" diff --git a/manifests/bucketeer/charts/druid/charts/druid-operator/templates/_helpers.tpl b/manifests/bucketeer/charts/druid/charts/druid-operator/templates/_helpers.tpl new file mode 100644 index 000000000..32b56d6d1 --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/druid-operator/templates/_helpers.tpl @@ -0,0 +1,31 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "druid-operator.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "druid-operator.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +Create chart name and version as used by the chart label. +*/}} +{{- define "druid-operator.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} \ No newline at end of file diff --git a/manifests/bucketeer/charts/druid/charts/druid-operator/templates/druid.apache.org_druids_crd.yaml b/manifests/bucketeer/charts/druid/charts/druid-operator/templates/druid.apache.org_druids_crd.yaml new file mode 100644 index 000000000..0177777e8 --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/druid-operator/templates/druid.apache.org_druids_crd.yaml @@ -0,0 +1,46 @@ +{{- if .Values.global.druid.enabled }} +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: druids.druid.apache.org + annotations: + "helm.sh/resource-policy": keep +spec: + group: druid.apache.org + names: + kind: Druid + listKind: DruidList + plural: druids + singular: druid + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: Druid is the Schema for the druids API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DruidSpec defines the desired state of Druid + type: object + status: + description: DruidStatus defines the observed state of Druid + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/druid/charts/druid-operator/templates/operator.yaml b/manifests/bucketeer/charts/druid/charts/druid-operator/templates/operator.yaml new file mode 100644 index 000000000..4e42515eb --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/druid-operator/templates/operator.yaml @@ -0,0 +1,46 @@ +{{- if .Values.global.druid.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "druid-operator.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "druid-operator.name" . }} + chart: {{ template "druid-operator.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/resource-policy": keep +spec: + replicas: 1 + selector: + matchLabels: + name: druid-operator + template: + metadata: + labels: + name: druid-operator + spec: + serviceAccountName: druid-operator + containers: + - name: druid-operator + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + command: + - druid-operator + imagePullPolicy: Always + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: "druid-operator" + resources: {{ toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/druid/charts/druid-operator/templates/role.yaml b/manifests/bucketeer/charts/druid/charts/druid-operator/templates/role.yaml new file mode 100644 index 000000000..e85ada1d4 --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/druid-operator/templates/role.yaml @@ -0,0 +1,83 @@ +{{- if .Values.global.druid.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: druid-operator + namespace: {{ .Values.namespace }} + annotations: + "helm.sh/resource-policy": keep +rules: +- apiGroups: + - "" + resources: + - pods + - services + - services/finalizers + - endpoints + - persistentvolumeclaims + - events + - configmaps + - secrets + verbs: + - '*' +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - '*' +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - '*' +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - '*' +- apiGroups: + - extensions + resources: + - ingresses + verbs: + - '*' +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create +- apiGroups: + - apps + resourceNames: + - druid-operator + resources: + - deployments/finalizers + verbs: + - update +- apiGroups: + - "" + resources: + - pods + verbs: + - get +- apiGroups: + - apps + resources: + - replicasets + verbs: + - get +- apiGroups: + - druid.apache.org + resources: + - '*' + verbs: + - '*' +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/druid/charts/druid-operator/templates/role_binding.yaml b/manifests/bucketeer/charts/druid/charts/druid-operator/templates/role_binding.yaml new file mode 100644 index 000000000..d107d483a --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/druid-operator/templates/role_binding.yaml @@ -0,0 +1,16 @@ +{{- if .Values.global.druid.enabled }} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: druid-operator + namespace: {{ .Values.namespace }} + annotations: + "helm.sh/resource-policy": keep +subjects: +- kind: ServiceAccount + name: druid-operator +roleRef: + kind: Role + name: druid-operator + apiGroup: rbac.authorization.k8s.io +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/druid/charts/druid-operator/templates/service_account.yaml b/manifests/bucketeer/charts/druid/charts/druid-operator/templates/service_account.yaml new file mode 100644 index 000000000..e87728a03 --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/druid-operator/templates/service_account.yaml @@ -0,0 +1,9 @@ +{{- if .Values.global.druid.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: druid-operator + namespace: {{ .Values.namespace }} + annotations: + "helm.sh/resource-policy": keep +{{- end }} diff --git a/manifests/bucketeer/charts/druid/charts/druid-operator/values.yaml b/manifests/bucketeer/charts/druid/charts/druid-operator/values.yaml new file mode 100644 index 000000000..20ff81df1 --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/druid-operator/values.yaml @@ -0,0 +1,15 @@ +namespace: +fullnameOverride: +image: + repository: druidio/druid-operator + tag: + +nodeSelector: {} + +resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi diff --git a/manifests/bucketeer/charts/druid/charts/zookeeper-operator/Chart.yaml b/manifests/bucketeer/charts/druid/charts/zookeeper-operator/Chart.yaml new file mode 100644 index 000000000..0bb5a7b6c --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/zookeeper-operator/Chart.yaml @@ -0,0 +1,10 @@ +apiVersion: v2 +appVersion: 0.2.9 +description: Zookeeper Operator Helm chart for Kubernetes +home: https://github.com/pravega/zookeeper-operator +icon: https://zookeeper.apache.org/images/zookeeper_small.gif +keywords: +- zookeeper +- storage +name: zookeeper-operator +version: 0.2.9 diff --git a/manifests/bucketeer/charts/druid/charts/zookeeper-operator/README.md b/manifests/bucketeer/charts/druid/charts/zookeeper-operator/README.md new file mode 100644 index 000000000..1f690c5e6 --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/zookeeper-operator/README.md @@ -0,0 +1,4 @@ +# Zookeeper Operator Helm Chart + +This chart is the custom version of [pravega/zookeeper](https://github.com/pravega/zookeeper-operator/tree/master/charts/zookeeper). +The version is written [here](../../requirements.yaml). \ No newline at end of file diff --git a/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/_helpers.tpl b/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/_helpers.tpl new file mode 100644 index 000000000..a3dff43e2 --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/_helpers.tpl @@ -0,0 +1,37 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "zookeeper-operator.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "zookeeper-operator.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "zookeeper-operator.commonLabels" -}} +app.kubernetes.io/name: {{ include "zookeeper-operator.name" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" +{{- end -}} diff --git a/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/clusterrole.yaml b/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/clusterrole.yaml new file mode 100644 index 000000000..aa580ab9f --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/clusterrole.yaml @@ -0,0 +1,45 @@ +{{- if .Values.global.druid.enabled }} +{{- if .Values.rbac.create }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "zookeeper-operator.fullname" . }} + labels: +{{ include "zookeeper-operator.commonLabels" . | indent 4 }} +rules: +- apiGroups: + - zookeeper.pravega.io + resources: + - "*" + verbs: + - "*" +- apiGroups: + - "" + resources: + - pods + - services + - endpoints + - persistentvolumeclaims + - events + - configmaps + - secrets + - serviceaccounts + verbs: + - "*" +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - "*" +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - "*" +{{- end }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/clusterrolebinding.yaml b/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..3f19083c4 --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/clusterrolebinding.yaml @@ -0,0 +1,18 @@ +{{- if .Values.global.druid.enabled }} +{{- if .Values.rbac.create }} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "zookeeper-operator.fullname" . }} + labels: +{{ include "zookeeper-operator.commonLabels" . | indent 4 }} +subjects: +- kind: ServiceAccount + name: {{ .Values.serviceAccount.name }} + namespace: {{ .Values.namespace }} +roleRef: + kind: ClusterRole + name: {{ template "zookeeper-operator.fullname" . }} + apiGroup: rbac.authorization.k8s.io +{{- end }} +{{- end }} diff --git a/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/operator.yaml b/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/operator.yaml new file mode 100644 index 000000000..585725ba0 --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/operator.yaml @@ -0,0 +1,55 @@ +{{- if .Values.global.druid.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "zookeeper-operator.fullname" . }} + namespace: {{ .Values.namespace }} + labels: +{{ include "zookeeper-operator.commonLabels" . | indent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + name: {{ template "zookeeper-operator.fullname" . }} + template: + metadata: + labels: + name: {{ template "zookeeper-operator.fullname" . }} + component: zookeeper-operator + spec: + serviceAccountName: {{ .Values.serviceAccount.name }} + containers: + - name: {{ template "zookeeper-operator.fullname" . }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: 6000 + name: metrics + command: + - zookeeper-operator + env: + - name: WATCH_NAMESPACE + value: "{{ .Values.watchNamespace }}" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: {{ template "zookeeper-operator.fullname" . }} + {{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 10 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} +{{- end }} diff --git a/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/post-install-upgrade-hooks.yaml b/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/post-install-upgrade-hooks.yaml new file mode 100644 index 000000000..23274fe70 --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/post-install-upgrade-hooks.yaml @@ -0,0 +1,121 @@ +{{- if .Values.global.druid.enabled }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "zookeeper-operator.fullname" . }}-post-install-upgrade + namespace: {{ .Values.namespace }} + annotations: + "helm.sh/hook": post-install, post-upgrade + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation, hook-failed +rules: +- apiGroups: + - zookeeper.pravega.io + resources: + - "*" + verbs: + - get +- apiGroups: + - extensions + resources: + - "deployments" + verbs: + - get +--- + +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "zookeeper-operator.fullname" . }}-post-install-upgrade + namespace: {{ .Values.namespace }} + annotations: + "helm.sh/hook": post-install, post-upgrade + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation, hook-failed +subjects: +- kind: ServiceAccount + name: {{ template "zookeeper-operator.fullname" . }}-post-install-upgrade + namespace: {{.Values.namespace}} +roleRef: + kind: Role + name: {{ template "zookeeper-operator.fullname" . }}-post-install-upgrade + apiGroup: rbac.authorization.k8s.io + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "zookeeper-operator.fullname" . }}-post-install-upgrade + namespace: {{ .Values.namespace }} + annotations: + "helm.sh/hook": post-install, post-upgrade + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation, hook-failed + +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "zookeeper-operator.fullname" . }}-post-install-upgrade + namespace: {{ .Values.namespace }} + annotations: + "helm.sh/hook": post-install, post-upgrade + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation, hook-failed +data: + validations.sh: | + #!/bin/sh + set -e + sleep 30 + + if [ -z "$(kubectl api-resources | grep ZookeeperCluster)" ]; then + exit 1 + fi +--- + +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "zookeeper-operator.fullname" . }}-post-install-upgrade + namespace: {{ .Values.namespace }} + annotations: + "helm.sh/hook": post-install, post-upgrade + "helm.sh/hook-weight": "2" + "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation, hook-failed +spec: + backoffLimit: {{ .Values.hooks.backoffLimit }} + template: + metadata: + name: {{ template "zookeeper-operator.fullname" . }}-post-install-upgrade + spec: + serviceAccountName: {{ template "zookeeper-operator.fullname" . }}-post-install-upgrade + restartPolicy: Never + containers: + - name: post-install-upgrade-job + image: "{{ .Values.hooks.image.repository }}:{{ .Values.hooks.image.tag }}" + command: + - /scripts/validations.sh + volumeMounts: + - name: sh + mountPath: /scripts + readOnly: true + volumes: + - name: sh + configMap: + name: {{ template "zookeeper-operator.fullname" . }}-post-install-upgrade + defaultMode: 0555 + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/role.yaml b/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/role.yaml new file mode 100644 index 000000000..d27ff6772 --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/role.yaml @@ -0,0 +1,45 @@ +{{- if .Values.global.druid.enabled }} +{{- if .Values.rbac.create }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "zookeeper-operator.fullname" . }} + namespace: {{ .Values.namespace }} + labels: +{{ include "zookeeper-operator.commonLabels" . | indent 4 }} +rules: +- apiGroups: + - zookeeper.pravega.io + resources: + - "*" + verbs: + - "*" +- apiGroups: + - "" + resources: + - pods + - services + - endpoints + - persistentvolumeclaims + - events + - configmaps + - secrets + verbs: + - "*" +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - "*" +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - "*" +{{- end }} +{{- end }} diff --git a/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/rolebinding.yaml b/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/rolebinding.yaml new file mode 100644 index 000000000..0b341fd71 --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/rolebinding.yaml @@ -0,0 +1,18 @@ +{{- if .Values.global.druid.enabled }} +{{- if .Values.rbac.create }} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "zookeeper-operator.fullname" . }} + namespace: {{ .Values.namespace }} + labels: +{{ include "zookeeper-operator.commonLabels" . | indent 4 }} +subjects: +- kind: ServiceAccount + name: {{ .Values.serviceAccount.name }} +roleRef: + kind: Role + name: {{ template "zookeeper-operator.fullname" . }} + apiGroup: rbac.authorization.k8s.io +{{- end }} +{{- end }} diff --git a/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/service_account.yaml b/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/service_account.yaml new file mode 100644 index 000000000..6532b6ec5 --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/service_account.yaml @@ -0,0 +1,17 @@ +{{- if .Values.global.druid.enabled }} +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +{{- if or .Values.global.imagePullSecrets .Values.serviceAccount.imagePullSecrets }} +imagePullSecrets: +{{- range (default .Values.global.imagePullSecrets .Values.serviceAccount.imagePullSecrets) }} + - name: {{ . }} +{{- end }} +{{- end }} +metadata: + name: {{ .Values.serviceAccount.name }} + namespace: {{ .Values.namespace }} + labels: +{{ include "zookeeper-operator.commonLabels" . | indent 4 }} +{{- end }} +{{- end }} diff --git a/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/zookeeper.pravega.io_zookeeperclusters_crd.yaml b/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/zookeeper.pravega.io_zookeeperclusters_crd.yaml new file mode 100644 index 000000000..484bf539d --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/zookeeper-operator/templates/zookeeper.pravega.io_zookeeperclusters_crd.yaml @@ -0,0 +1,3714 @@ +{{- if .Values.global.druid.enabled }} +{{- if .Values.crd.create }} +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: zookeeperclusters.zookeeper.pravega.io +spec: + additionalPrinterColumns: + - JSONPath: .spec.replicas + description: The number of ZooKeeper servers in the ensemble + name: Replicas + type: integer + - JSONPath: .status.readyReplicas + description: The number of ZooKeeper servers in the ensemble that are in a Ready + state + name: Ready Replicas + type: integer + - JSONPath: .status.currentVersion + description: The current Zookeeper version + name: Version + type: string + - JSONPath: .spec.image.tag + description: The desired Zookeeper version + name: Desired Version + type: string + - JSONPath: .status.internalClientEndpoint + description: Client endpoint internal to cluster network + name: Internal Endpoint + type: string + - JSONPath: .status.externalClientEndpoint + description: Client endpoint external to cluster network via LoadBalancer + name: External Endpoint + type: string + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: zookeeper.pravega.io + names: + kind: ZookeeperCluster + listKind: ZookeeperClusterList + plural: zookeeperclusters + shortNames: + - zk + singular: zookeepercluster + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: ZookeeperCluster is the Schema for the zookeeperclusters API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ZookeeperClusterSpec defines the desired state of ZookeeperCluster + properties: + config: + description: Conf is the zookeeper configuration, which will be used + to generate the static zookeeper configuration. If no configuration + is provided required default values will be provided, and optional + values will be excluded. + properties: + autoPurgePurgeInterval: + description: "The time interval in hours for which the purge task + has to be triggered \n Disabled by default" + type: integer + autoPurgeSnapRetainCount: + description: "Retain the snapshots according to retain count \n + The default value is 3" + type: integer + commitLogCount: + description: "Zookeeper maintains an in-memory list of last committed + requests for fast synchronization with followers \n The default + value is 500" + type: integer + globalOutstandingLimit: + description: "Clients can submit requests faster than ZooKeeper + can process them, especially if there are a lot of clients. Zookeeper + will throttle Clients so that requests won't exceed global outstanding + limit. \n The default value is 1000" + type: integer + initLimit: + description: "InitLimit is the amount of time, in ticks, to allow + followers to connect and sync to a leader. \n Default value is + 10." + type: integer + maxClientCnxns: + description: "Limits the number of concurrent connections that a + single client, identified by IP address, may make to a single + member of the ZooKeeper ensemble. \n The default value is 60" + type: integer + maxCnxns: + description: "Limits the total number of concurrent connections + that can be made to a zookeeper server \n The defult value is + 0, indicating no limit" + type: integer + maxSessionTimeout: + description: "The maximum session timeout in milliseconds that the + server will allow the client to negotiate. \n The default value + is 40000" + type: integer + minSessionTimeout: + description: "The minimum session timeout in milliseconds that the + server will allow the client to negotiate \n The default value + is 4000" + type: integer + preAllocSize: + description: "To avoid seeks ZooKeeper allocates space in the transaction + log file in blocks of preAllocSize kilobytes \n The default value + is 64M" + type: integer + quorumListenOnAllIPs: + description: "QuorumListenOnAllIPs when set to true the ZooKeeper + server will listen for connections from its peers on all available + IP addresses, and not only the address configured in the server + list of the configuration file. It affects the connections handling + the ZAB protocol and the Fast Leader Election protocol. \n The + default value is false." + type: boolean + snapCount: + description: "ZooKeeper records its transactions using snapshots + and a transaction log The number of transactions recorded in the + transaction log before a snapshot can be taken is determined by + snapCount \n The default value is 100,000" + type: integer + snapSizeLimitInKb: + description: "Snapshot size limit in Kb \n The defult value is 4GB" + type: integer + syncLimit: + description: "SyncLimit is the amount of time, in ticks, to allow + followers to sync with Zookeeper. \n The default value is 2." + type: integer + tickTime: + description: "TickTime is the length of a single tick, which is + the basic time unit used by Zookeeper, as measured in milliseconds + \n The default value is 2000." + type: integer + type: object + containers: + description: Containers defines to support multi containers + items: + description: A single application container that you want to run within + a pod. + properties: + args: + description: 'Arguments to the entrypoint. The docker image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will be + unchanged. The $(VAR_NAME) syntax can be escaped with a double + $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + regardless of whether the variable exists or not. Cannot be + updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The + docker image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. The $(VAR_NAME) syntax + can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a + variable cannot be resolved, the reference in the input + string will be unchanged. The $(VAR_NAME) syntax can be + escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable + exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, metadata.labels, metadata.annotations, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be a + C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key will + take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of + ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in + response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The reason for termination is passed to + the handler. The Pod''s termination grace period countdown + begins before the PreStop hooked is executed. Regardless + of the outcome of the handler, the container will eventually + terminate within the Pod''s termination grace period. Other + management of the container blocks until the hook completes + or until the termination grace period is reached. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each + container in a pod must have a unique name (DNS_LABEL). Cannot + be updated. + type: string + ports: + description: List of ports to expose from the container. Exposing + a port here gives the system additional information about the + network connections a container uses, but is primarily informational. + Not specifying a port here DOES NOT prevent that port from being + exposed. Any port which is listening on the default "0.0.0.0" + address inside a container will be accessible from the network. + Cannot be updated. + items: + description: ContainerPort represents a network port in a single + container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, + this must be a valid port number, 0 < x < 65536. If HostNetwork + is specified, this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod must + have a unique name. Name for the port that can be referred + to by services. + type: string + protocol: + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + {{- if semverCompare "< 1.18-0" .Capabilities.KubeVersion.GitVersion }} + - protocol + {{- end }} + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container + will be removed from service endpoints if the probe fails. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + securityContext: + description: 'Security options the pod should run with. More info: + https://kubernetes.io/docs/concepts/policy/security-context/ + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a + process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the + container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in + privileged containers are essentially equivalent to root + on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use + for the containers. The default is DefaultProcMount which + uses the container runtime defaults for readonly paths and + masked paths. This requires the ProcMountType feature flag + to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail + to start the container if it does. If unset or false, no + such validation will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata if + unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. This field is beta-level and may be + disabled with the WindowsRunAsUserName feature flag. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod will + be restarted, just as if the livenessProbe failed. This can + be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. This + cannot be updated. This is an alpha feature enabled by the StartupProbe + feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for + stdin in the container runtime. If this is not set, reads from + stdin in the container will always result in EOF. Default is + false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin + channel after it has been opened by a single attach. When stdin + is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container + start, is empty until the first client attaches to stdin, and + then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container + is restarted. If this flag is false, a container processes that + reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s + termination message will be written is mounted into the container''s + filesystem. Message written is intended to be brief final status, + such as an assertion failure message. Will be truncated by the + node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. Defaults to /dev/termination-log. + Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. + File will use the contents of terminationMessagePath to populate + the container status message on both success and failure. FallbackToLogsOnError + will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever + is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. This is a beta feature. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other way + around. When not set, MountPropagationNone is used. This + field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might be + configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + domainName: + description: External host name appended for dns annotation + type: string + ephemeral: + description: Ephemeral is the configuration which helps create ephemeral + storage At anypoint only one of Persistence or Ephemeral should be + present in the manifest + properties: + emptydirvolumesource: + description: EmptyDirVolumeSource is optional and this will create + the emptydir volume It has two parameters Medium and SizeLimit + which are optional as well Medium specifies What type of storage + medium should back this directory. SizeLimit specifies Total amount + of local storage required for this EmptyDir volume. + properties: + medium: + description: 'What type of storage medium should back this directory. + The default is "" which means to use the node''s default medium. + Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'Total amount of local storage required for this + EmptyDir volume. The size limit is also applicable for memory + medium. The maximum usage on memory medium EmptyDir would + be the minimum value between the SizeLimit specified here + and the sum of memory limits of all containers in a pod. The + default is nil which means that the limit is undefined. More + info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + image: + description: Image is the container image. default is zookeeper:0.2.7 + properties: + pullPolicy: + description: PullPolicy describes a policy for if/when to pull a + container image + enum: + - Always + - Never + - IfNotPresent + type: string + repository: + type: string + tag: + type: string + type: object + kubernetesClusterDomain: + description: Domain of the kubernetes cluster, defaults to cluster.local + type: string + labels: + additionalProperties: + type: string + description: Labels specifies the labels to attach to pods the operator + creates for the zookeeper cluster. + type: object + persistence: + description: Persistence is the configuration for zookeeper persistent + layer. PersistentVolumeClaimSpec and VolumeReclaimPolicy can be specified + in here. + properties: + reclaimPolicy: + description: VolumeReclaimPolicy is a zookeeper operator configuration. + If it's set to Delete, the corresponding PVCs will be deleted + by the operator when zookeeper cluster is deleted. The default + value is Retain. + enum: + - Delete + - Retain + type: string + annotations: + additionalProperties: + type: string + description: Annotations specifies the annotations to attach to + pvc the operator creates. + type: object + spec: + description: PersistentVolumeClaimSpec is the spec to describe PVC + for the container This field is optional. If no PVC is specified + default persistentvolume will get created. + properties: + accessModes: + description: 'AccessModes contains the desired access modes + the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + enum: + - ReadWriteOnce + - ReadOnlyMany + - ReadWriteMany + type: string + type: array + dataSource: + description: This field requires the VolumeSnapshotDataSource + alpha feature gate to be enabled and currently VolumeSnapshot + is the only supported data source. If the provisioner can + support VolumeSnapshot data source, it will create a new volume + and data will be restored to the volume at the same time. + If the provisioner does not support VolumeSnapshot data source, + volume will not be created and the failure will be reported + as an event. In the future, we plan to support more data source + types and the behavior of the provisioner may change. + properties: + apiGroup: + description: APIGroup is the group for the resource being + referenced. If APIGroup is not specified, the specified + Kind must be in the core API group. For any other third-party + types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources the + volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + selector: + description: A label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. This + array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required + by the claim. Value of Filesystem is implied when not included + in claim spec. This is a beta feature. + type: string + volumeName: + description: VolumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + type: object + pod: + description: Pod defines the policy to create pod for the zookeeper + cluster. Updating the Pod does not take effect on any existing pods. + properties: + affinity: + description: The scheduling constraints on pods. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling requirements + (resource request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements + of this field and adding "weight" to the sum if the node + matches the corresponding matchExpressions; the node(s) + with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects + (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with + the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + enum: + - In + - NotIn + - Exists + - DoesNotExist + - Gt + - Lt + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values + array must be empty. If the operator is + Gt or Lt, the values array must have a + single element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + enum: + - In + - NotIn + - Exists + - DoesNotExist + - Gt + - Lt + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values + array must be empty. If the operator is + Gt or Lt, the values array must have a + single element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not + be scheduled onto the node. If the affinity requirements + specified by this field cease to be met at some point + during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its + node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. + The TopologySelectorTerm type implements a subset + of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + enum: + - In + - NotIn + - Exists + - DoesNotExist + - Gt + - Lt + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values + array must be empty. If the operator is + Gt or Lt, the values array must have a + single element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + enum: + - In + - NotIn + - Exists + - DoesNotExist + - Gt + - Lt + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values + array must be empty. If the operator is + Gt or Lt, the values array must have a + single element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling requirements + (resource request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements + of this field and adding "weight" to the sum if the node + has pods which matches the corresponding podAffinityTerm; + the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified + namespaces, where co-located is defined as running + on a node whose value of the label with key + topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey + is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not + be scheduled onto the node. If the affinity requirements + specified by this field cease to be met at some point + during pod execution (e.g. due to a pod label update), + the system may or may not try to eventually evict the + pod from its node. When there are multiple elements, the + lists of nodes corresponding to each podAffinityTerm are + intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not + co-located (anti-affinity) with, where co-located is + defined as running on a node whose value of the label + with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey matches + that of any node on which any of the selected pods + is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling requirements + (resource request, requiredDuringScheduling anti-affinity + expressions, etc.), compute a sum by iterating through + the elements of this field and adding "weight" to the + sum if the node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest sum are + the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified + namespaces, where co-located is defined as running + on a node whose value of the label with key + topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey + is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified + by this field are not met at scheduling time, the pod + will not be scheduled onto the node. If the anti-affinity + requirements specified by this field cease to be met at + some point during pod execution (e.g. due to a pod label + update), the system may or may not try to eventually evict + the pod from its node. When there are multiple elements, + the lists of nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not + co-located (anti-affinity) with, where co-located is + defined as running on a node whose value of the label + with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + enum: + - In + - NotIn + - Exists + - DoesNotExist + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey matches + that of any node on which any of the selected pods + is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + annotations: + additionalProperties: + type: string + description: Annotations specifies the annotations to attach to + pods the operator creates. + type: object + env: + description: List of environment variables to set in the container. + This field cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, metadata.labels, metadata.annotations, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + labels: + additionalProperties: + type: string + description: Labels specifies the labels to attach to pods the operator + creates for the zookeeper cluster. + type: object + nodeSelector: + additionalProperties: + type: string + description: NodeSelector specifies a map of key-value pairs. For + the pod to be eligible to run on a node, the node must have each + of the indicated key-value pairs as labels. + type: object + resources: + description: Resources is the resource requirements for the container. + This field cannot be updated once the cluster is created. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + securityContext: + description: 'SecurityContext specifies the security context for + the entire pod More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context' + properties: + fsGroup: + description: "A special supplemental group that applies to all + containers in a pod. Some volume types allow the Kubelet to + change the ownership of that volume to be owned by the pod: + \n 1. The owning GID will be the FSGroup 2. The setgid bit + is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- \n If unset, + the Kubelet will not modify the ownership and permissions + of any volume." + format: int64 + type: integer + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set in + SecurityContext. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence for + that container. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail to + start the container if it does. If unset or false, no such + validation will be performed. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + properties: + level: + description: Level is SELinux level label that applies to + the container. + type: string + role: + description: Role is a SELinux role label that applies to + the container. + type: string + type: + description: Type is a SELinux type label that applies to + the container. + type: string + user: + description: User is a SELinux user label that applies to + the container. + type: string + type: object + supplementalGroups: + description: A list of groups applied to the first process run + in each container, in addition to the container's primary + GID. If unspecified, no groups will be added to any container. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used + for the pod. Pods with unsupported sysctls (by the container + runtime) might fail to launch. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. This field is alpha-level and + is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + This field is beta-level and may be disabled with the + WindowsRunAsUserName feature flag. + type: string + type: object + type: object + serviceAccountName: + description: Service Account to be used in pods + type: string + terminationGracePeriodSeconds: + description: TerminationGracePeriodSeconds is the amount of time + that kubernetes will give for a pod instance to shutdown normally. + The default value is 30. + format: int64 + minimum: 0 + type: integer + tolerations: + description: Tolerations specifies the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using the + matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match + all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the + value. Valid operators are Exists and Equal. Defaults to + Equal. Exists is equivalent to wildcard for value, so that + a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time + the toleration (which must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do + not evict). Zero and negative values will be treated as + 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + type: object + ports: + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + minimum: 1 + maximum: 65535 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, + this must be a valid port number, 0 < x < 65536. If HostNetwork + is specified, this must match ContainerPort. Most containers + do not need this. + format: int32 + minimum: 1 + maximum: 65535 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique + within the pod. Each named port in a pod must have a unique + name. Name for the port that can be referred to by services. + type: string + protocol: + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults + to "TCP". + enum: + - UDP + - TCP + - SCTP + type: string + required: + - containerPort + type: object + type: array + replicas: + description: "Replicas is the expected size of the zookeeper cluster. + The pravega-operator will eventually make the size of the running + cluster equal to the expected size. \n The valid range of size is + from 1 to 7." + format: int32 + minimum: 1 + type: integer + storageType: + description: StorageType is used to tell which type of storage we will + be using It can take either Ephemeral or persistence Default StorageType + is Persistence storage + enum: + - ephemeral + - persistence + type: string + volumes: + description: Volumes defines to support customized volumes + items: + description: Volume represents a named volume in a pod that may be + accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'AWSElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + partition: + description: 'The partition in the volume that you want to + mount. If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition + as "1". Similarly, the volume partition for /dev/sda is + "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'Specify "true" to force and set the ReadOnly + property in VolumeMounts to "true". If omitted, the default + is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'Unique ID of the persistent disk resource in + AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: AzureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'Host Caching mode: None, Read Only, Read Write.' + type: string + diskName: + description: The Name of the data disk in the blob storage + type: string + diskURI: + description: The URI the data disk in the blob storage + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'Expected values Shared: multiple blob disks + per storage account Dedicated: single blob disk per storage + account Managed: azure managed data disk (only in managed + availability set). defaults to shared' + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: AzureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: the name of secret that contains Azure Storage + Account Name and Key + type: string + shareName: + description: Share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: CephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: 'Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'Optional: Used as the mounted root, rather than + the full Ceph tree, default is /' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. More + info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'Optional: SecretFile is the path to key ring + for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'Optional: SecretRef is reference to the authentication + secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'Optional: User is the rados user name, default + is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'Cinder represents a cinder volume attached and mounted + on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Examples: "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. More + info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'Optional: points to a secret object containing + parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volume id used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: ConfigMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the Data + field of the referenced ConfigMap will be projected into + the volume as a file whose name is the key and content is + the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the + ConfigMap, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might be + in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode + bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map the + key to. May not be an absolute path. May not contain + the path element '..'. May not start with the string + '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its keys must + be defined + type: boolean + type: object + csi: + description: CSI (Container Storage Interface) represents storage + that is handled by an external CSI driver (Alpha feature). + properties: + driver: + description: Driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. + type: string + fsType: + description: Filesystem type to mount. Ex. "ext4", "xfs", + "ntfs". If not provided, the empty value is passed to the + associated CSI driver which will determine the default filesystem + to apply. + type: string + nodePublishSecretRef: + description: NodePublishSecretRef is a reference to the secret + object containing sensitive information to pass to the CSI + driver to complete the CSI NodePublishVolume and NodeUnpublishVolume + calls. This field is optional, and may be empty if no secret + is required. If the secret object contains more than one + secret, all secret references are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: Specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: VolumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: DownwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might be + in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode + bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name + of the file to be created. Must not be absolute or + contain the ''..'' path. Must be utf-8 encoded. The + first item of the relative path must not start with + ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'EmptyDir represents a temporary directory that shares + a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'What type of storage medium should back this + directory. The default is "" which means to use the node''s + default medium. Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'Total amount of local storage required for this + EmptyDir volume. The size limit is also applicable for memory + medium. The maximum usage on memory medium EmptyDir would + be the minimum value between the SizeLimit specified here + and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + fc: + description: FC represents a Fibre Channel resource that is attached + to a kubelet's host machine and then exposed to the pod. + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + lun: + description: 'Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'Optional: FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be + set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: FlexVolume represents a generic volume resource that + is provisioned/attached using an exec based plugin. + properties: + driver: + description: Driver is the name of the driver to use for this + volume. + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". The default filesystem depends on FlexVolume + script. + type: string + options: + additionalProperties: + type: string + description: 'Optional: Extra command options if any.' + type: object + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'Optional: SecretRef is reference to the secret + object containing sensitive information to pass to the plugin + scripts. This may be empty if no secret object is specified. + If the secret object contains more than one secret, all + secrets are passed to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: Flocker represents a Flocker volume attached to a + kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: Name of the dataset stored as metadata -> name + on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: UUID of the dataset. This is unique identifier + of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'GCEPersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + partition: + description: 'The partition in the volume that you want to + mount. If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition + as "1". Similarly, the volume partition for /dev/sda is + "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'Unique name of the PD resource in GCE. Used + to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'GitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision a + container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: Target directory name. Must not contain or start + with '..'. If '.' is supplied, the volume directory will + be the git repository. Otherwise, if specified, the volume + will contain the git repository in the subdirectory with + the given name. + type: string + repository: + description: Repository URL + type: string + revision: + description: Commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Glusterfs represents a Glusterfs mount on the host + that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'EndpointsName is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'Path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'ReadOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'HostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'Path of the directory on the host. If the path + is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'Type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'ISCSI represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to the + pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + initiatorName: + description: Custom iSCSI Initiator Name. If initiatorName + is specified with iscsiInterface simultaneously, new iSCSI + interface : will be created + for the connection. + type: string + iqn: + description: Target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iSCSI Interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: iSCSI Target Portal List. The portal is either + an IP or ip_addr:port if the port is other than default + (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: CHAP Secret for iSCSI target and initiator authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: iSCSI Target Portal. The Portal is either an + IP or ip_addr:port if the port is other than default (typically + TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'Volume''s name. Must be a DNS_LABEL and unique within + the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'NFS represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'Path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'ReadOnly here will force the NFS export to be + mounted with read-only permissions. Defaults to false. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource represents a reference + to a PersistentVolumeClaim in the same namespace. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'ClaimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: PhotonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: ID that identifies Photon Controller persistent + disk + type: string + required: + - pdID + type: object + portworxVolume: + description: PortworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: FSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: VolumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: Items for all in one resources secrets, configmaps, + and downward API + properties: + defaultMode: + description: Mode bits to use on created files by default. + Must be a value between 0 and 0777. Directories within the + path are not affected by this setting. This might be in + conflict with other options that affect the file mode, like + fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + configMap: + description: information about the configMap data to + project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced ConfigMap + will be projected into the volume as a file whose + name is the key and content is the value. If specified, + the listed keys will be projected into the specified + paths, and unlisted keys will not be present. + If a key is specified which is not present in + the ConfigMap, the volume setup will error unless + it is marked optional. Paths must be relative + and may not contain the '..' path or start with + '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on + this file, must be a value between 0 and + 0777. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + downwardAPI: + description: information about the downwardAPI data + to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name + and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use on + this file, must be a value between 0 and + 0777. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: information about the secret data to project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced Secret will + be projected into the volume as a file whose name + is the key and content is the value. If specified, + the listed keys will be projected into the specified + paths, and unlisted keys will not be present. + If a key is specified which is not present in + the Secret, the volume setup will error unless + it is marked optional. Paths must be relative + and may not contain the '..' path or start with + '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on + this file, must be a value between 0 and + 0777. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + type: object + serviceAccountToken: + description: information about the serviceAccountToken + data to project + properties: + audience: + description: Audience is the intended audience of + the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: ExpirationSeconds is the requested + duration of validity of the service account token. + As the token approaches expiration, the kubelet + volume plugin will proactively rotate the service + account token. The kubelet will start trying to + rotate the token if the token is older than 80 + percent of its time to live or if the token is + older than 24 hours.Defaults to 1 hour and must + be at least 10 minutes. + format: int64 + type: integer + path: + description: Path is the path relative to the mount + point of the file to project the token into. + type: string + required: + - path + type: object + type: object + type: array + required: + - sources + type: object + quobyte: + description: Quobyte represents a Quobyte mount on the host that + shares a pod's lifetime + properties: + group: + description: Group to map volume access to Default is no group + type: string + readOnly: + description: ReadOnly here will force the Quobyte volume to + be mounted with read-only permissions. Defaults to false. + type: boolean + registry: + description: Registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: Tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: User to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: Volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'RBD represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + image: + description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'Keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'The rados pool name. Default is rbd. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'SecretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'The rados user name. Default is admin. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: ScaleIO represents a ScaleIO persistent volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: The host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: The name of the ScaleIO Protection Domain for + the configured storage. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: Flag to enable/disable SSL communication with + Gateway, default false + type: boolean + storageMode: + description: Indicates whether the storage for a volume should + be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. + type: string + storagePool: + description: The ScaleIO Storage Pool associated with the + protection domain. + type: string + system: + description: The name of the storage system as configured + in ScaleIO. + type: string + volumeName: + description: The name of a volume already created in the ScaleIO + system that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'Secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the Data + field of the referenced Secret will be projected into the + volume as a file whose name is the key and content is the + value. If specified, the listed keys will be projected into + the specified paths, and unlisted keys will not be present. + If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. + Paths must be relative and may not contain the '..' path + or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might be + in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode + bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map the + key to. May not be an absolute path. May not contain + the path element '..'. May not start with the string + '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: Specify whether the Secret or its keys must be + defined + type: boolean + secretName: + description: 'Name of the secret in the pod''s namespace to + use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: StorageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: VolumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within a + namespace. + type: string + volumeNamespace: + description: VolumeNamespace specifies the scope of the volume + within StorageOS. If no namespace is specified then the + Pod's namespace will be used. This allows the Kubernetes + name scoping to be mirrored within StorageOS for tighter + integration. Set VolumeName to any name to override the + default behaviour. Set to "default" if you are not using + namespaces within StorageOS. Namespaces that do not pre-exist + within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: VsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: Storage Policy Based Management (SPBM) profile + ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: Storage Policy Based Management (SPBM) profile + name. + type: string + volumePath: + description: Path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + status: + description: ZookeeperClusterStatus defines the observed state of ZookeeperCluster + properties: + conditions: + description: Conditions list all the applied conditions + items: + description: ClusterCondition shows the current condition of a Zookeeper + cluster. Comply with k8s API conventions + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + type: string + lastUpdateTime: + description: The last time this condition was updated. + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of Zookeeper cluster condition. + type: string + type: object + type: array + currentVersion: + description: CurrentVersion is the current cluster version + type: string + externalClientEndpoint: + description: ExternalClientEndpoint is the internal client IP and port + type: string + internalClientEndpoint: + description: InternalClientEndpoint is the internal client IP and port + type: string + members: + description: Members is the zookeeper members in the cluster + properties: + ready: + items: + type: string + nullable: true + type: array + unready: + items: + type: string + nullable: true + type: array + type: object + metaRootCreated: + type: boolean + readyReplicas: + description: ReadyReplicas is the number of number of ready replicas + in the cluster + format: int32 + type: integer + replicas: + description: Replicas is the number of number of desired replicas in + the cluster + format: int32 + type: integer + targetVersion: + type: string + type: object + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +{{- end }} +{{- end }} diff --git a/manifests/bucketeer/charts/druid/charts/zookeeper-operator/values.yaml b/manifests/bucketeer/charts/druid/charts/zookeeper-operator/values.yaml new file mode 100644 index 000000000..c97266c70 --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/zookeeper-operator/values.yaml @@ -0,0 +1,53 @@ +## Default values for zookeeper-operator. +## This is a YAML-formatted file. +## Declare variables to be passed into your templates. + +global: + # Lists the secrets you need to use to pull zookeeper-operator image from a private registry. + imagePullSecrets: [] + # - private-registry-key + +image: + repository: pravega/zookeeper-operator + tag: 0.2.9 + pullPolicy: IfNotPresent + +## Install RBAC roles and bindings. +rbac: + create: true + +## Service account name and whether to create it. +serviceAccount: + create: true + name: zookeeper-operator + ## Optionally specify an array of imagePullSecrets. Will override the global parameter if set + # imagePullSecrets: + +## Whether to create the CRD. +crd: + create: true + +## Specifies which namespace(s) the Operator should watch over. +## Default: An empty string means all namespaces. +## Multiple namespaces can be configured using a comma separated list of namespaces +watchNamespace: "" + +## Operator pod resources +resources: {} + # limits: + # cpu: 2 + # memory: 256Mi + # requests: + # cpu: 1 + # memory: 128Mi + +# Scheduling constraints +nodeSelector: {} +affinity: {} +tolerations: [] + +hooks: + backoffLimit: 10 + image: + repository: lachlanevenson/k8s-kubectl + tag: v1.16.10 diff --git a/manifests/bucketeer/charts/druid/charts/zookeeper/Chart.yaml b/manifests/bucketeer/charts/druid/charts/zookeeper/Chart.yaml new file mode 100644 index 000000000..c63c00d6a --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/zookeeper/Chart.yaml @@ -0,0 +1,10 @@ +apiVersion: v2 +appVersion: 0.2.9 +description: Zookeeper Helm chart for Kubernetes +home: https://github.com/apache/zookeeper +icon: https://zookeeper.apache.org/images/zookeeper_small.gif +keywords: +- zookeeper +- storage +name: zookeeper +version: 0.2.9 diff --git a/manifests/bucketeer/charts/druid/charts/zookeeper/README.md b/manifests/bucketeer/charts/druid/charts/zookeeper/README.md new file mode 100644 index 000000000..3c64b32b7 --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/zookeeper/README.md @@ -0,0 +1,4 @@ +# Zookeeper Helm Chart + +This chart is the custom version of [pravega/zookeeper-operator](https://github.com/pravega/zookeeper-operator/tree/master/charts/zookeeper-operator). +The version is written in [here](../../requirements.yaml). diff --git a/manifests/bucketeer/charts/druid/charts/zookeeper/templates/_helpers.tpl b/manifests/bucketeer/charts/druid/charts/zookeeper/templates/_helpers.tpl new file mode 100644 index 000000000..5ae832864 --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/zookeeper/templates/_helpers.tpl @@ -0,0 +1,37 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "zookeeper.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "zookeeper.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "zookeeper.commonLabels" -}} +app.kubernetes.io/name: {{ include "zookeeper.name" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" +{{- end -}} diff --git a/manifests/bucketeer/charts/druid/charts/zookeeper/templates/post-install-upgrade-hooks.yaml b/manifests/bucketeer/charts/druid/charts/zookeeper/templates/post-install-upgrade-hooks.yaml new file mode 100644 index 000000000..e1936283e --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/zookeeper/templates/post-install-upgrade-hooks.yaml @@ -0,0 +1,117 @@ +{{- if .Values.global.druid.enabled }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "zookeeper.fullname" . }}-post-install-upgrade + namespace: {{ .Values.namespace }} + annotations: + "helm.sh/hook": post-install, post-upgrade + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation, hook-failed +rules: +- apiGroups: + - zookeeper.pravega.io + resources: + - "*" + verbs: + - get +--- + +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "zookeeper.fullname" . }}-post-install-upgrade + namespace: {{ .Values.namespace }} + annotations: + "helm.sh/hook": post-install, post-upgrade + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation, hook-failed +subjects: +- kind: ServiceAccount + name: {{ template "zookeeper.fullname" . }}-post-install-upgrade + namespace: {{.Values.namespace}} +roleRef: + kind: Role + name: {{ template "zookeeper.fullname" . }}-post-install-upgrade + apiGroup: rbac.authorization.k8s.io + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "zookeeper.fullname" . }}-post-install-upgrade + namespace: {{ .Values.namespace }} + annotations: + "helm.sh/hook": post-install, post-upgrade + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation, hook-failed + +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "zookeeper.fullname" . }}-post-install-upgrade + namespace: {{ .Values.namespace }} + annotations: + "helm.sh/hook": post-install, post-upgrade + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation, hook-failed +data: + validations.sh: | + #!/bin/sh + set -e + sleep 30 + + replicas=`kubectl get zk -n {{ .Release.Namespace }} {{ template "zookeeper.fullname" . }} -o jsonpath='{.status.replicas}'` + readyReplicas=`kubectl get zk -n {{ .Release.Namespace }} {{ template "zookeeper.fullname" . }} -o jsonpath='{.status.readyReplicas}'` + currentVersion=`kubectl get zk -n {{ .Release.Namespace }} {{ template "zookeeper.fullname" . }} -o jsonpath='{.status.currentVersion}'` + targetVersion=`kubectl get zk -n {{ .Release.Namespace }} {{ template "zookeeper.fullname" . }} -o jsonpath='{.spec.image.tag}'` + + echo "ZookeeperCluster replicas: $currentReplicas" + echo "ZookeeperCluster readyReplicas: $readyReplicas" + echo "ZookeeperCluster currentVersion: $currentVersion" + echo "ZookeeperCluster targetVersion: $targetVersion" + + if [ $readyReplicas != $replicas ]; then + exit 1 + fi + + if [ $currentVersion != $targetVersion ]; then + exit 2 + fi +--- + +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "zookeeper.fullname" . }}-post-install-upgrade + namespace: {{ .Values.namespace }} + annotations: + "helm.sh/hook": post-install, post-upgrade + "helm.sh/hook-weight": "2" + "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation, hook-failed +spec: + backoffLimit: {{ .Values.hooks.backoffLimit }} + template: + metadata: + name: {{ template "zookeeper.fullname" . }}-post-install-upgrade + spec: + serviceAccountName: {{ template "zookeeper.fullname" . }}-post-install-upgrade + restartPolicy: Never + containers: + - name: post-install-upgrade-job + image: "{{ .Values.hooks.image.repository }}:{{ .Values.hooks.image.tag }}" + command: + - /scripts/validations.sh + volumeMounts: + - name: sh + mountPath: /scripts + readOnly: true + volumes: + - name: sh + configMap: + name: {{ template "zookeeper.fullname" . }}-post-install-upgrade + defaultMode: 0555 +{{- end }} diff --git a/manifests/bucketeer/charts/druid/charts/zookeeper/templates/zookeeper.yaml b/manifests/bucketeer/charts/druid/charts/zookeeper/templates/zookeeper.yaml new file mode 100644 index 000000000..fb2ba78ae --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/zookeeper/templates/zookeeper.yaml @@ -0,0 +1,148 @@ +{{- if .Values.global.druid.enabled }} +{{- $storageType := .Values.storageType | default "persistence" -}} +apiVersion: "zookeeper.pravega.io/v1beta1" +kind: "ZookeeperCluster" +metadata: + name: {{ template "zookeeper.fullname" . }} + namespace: {{ .Values.namespace }} + labels: +{{ include "zookeeper.commonLabels" . | indent 4 }} +spec: + replicas: {{ .Values.replicas }} + image: + repository: {{ .Values.image.repository }} + tag: {{ .Values.image.tag }} + pullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.domainName }} + domainName: {{ .Values.domainName }} + {{- end }} + kubernetesClusterDomain: {{ default "cluster.local" .Values.kubernetesClusterDomain }} + {{- if .Values.containers }} + containers: +{{ toYaml .Values.containers | indent 4 }} + {{- end }} + {{- if .Values.volumes }} + volumes: +{{ toYaml .Values.volumes | indent 4 }} + {{- end }} + labels: +{{ toYaml .Values.labels | indent 4 }} + ports: +{{ toYaml .Values.ports | indent 4 }} + pod: + {{- if .Values.pod.labels }} + labels: +{{ toYaml .Values.pod.labels | indent 6 }} + {{- end }} + {{- if .Values.pod.nodeSelector }} + nodeSelector: +{{ toYaml .Values.pod.nodeSelector | indent 6 }} + {{- end }} + {{- if .Values.pod.affinity }} + affinity: +{{ toYaml .Values.pod.affinity | indent 6 }} + {{- end }} + {{- if .Values.pod.resources }} + resources: +{{ toYaml .Values.pod.resources | indent 6 }} + {{- end }} + {{- if .Values.pod.tolerations }} + tolerations: +{{ toYaml .Values.pod.tolerations | indent 6 }} + {{- end }} + {{- if .Values.pod.env }} + env: +{{ toYaml .Values.pod.env | indent 6 }} + {{- end }} + {{- if .Values.pod.annotations }} + annotations: +{{ toYaml .Values.pod.annotations | indent 6 }} + {{- end }} + {{- if .Values.pod.securityContext }} + securityContext: +{{ toYaml .Values.pod.securityContext | indent 6 }} + {{- end }} + {{- if .Values.pod.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.pod.terminationGracePeriodSeconds }} + {{- end }} + serviceAccountName: {{ default "zookeeper" .Values.pod.serviceAccountName }} + {{- if .Values.config }} + config: + {{- if .Values.config.initLimit }} + initLimit: {{ .Values.config.initLimit }} + {{- end }} + {{- if .Values.config.tickTime }} + tickTime: {{ .Values.config.tickTime }} + {{- end }} + {{- if .Values.config.syncLimit }} + syncLimit: {{ .Values.config.syncLimit }} + {{- end }} + {{- if .Values.config.globalOutstandingLimit }} + globalOutstandingLimit: {{ .Values.config.globalOutstandingLimit }} + {{- end }} + {{- if .Values.config.preAllocSize }} + preAllocSize: {{ .Values.config.preAllocSize }} + {{- end }} + {{- if .Values.config.snapCount }} + snapCount: {{ .Values.config.snapCount }} + {{- end }} + {{- if .Values.config.commitLogCount }} + commitLogCount: {{ .Values.config.commitLogCount }} + {{- end }} + {{- if .Values.config.snapSizeLimitInKb }} + snapSizeLimitInKb: {{ .Values.snapSizeLimitInKb }} + {{- end }} + {{- if .Values.config.maxCnxns }} + maxCnxns: {{ .Values.maxCnxns }} + {{- end }} + {{- if .Values.config.maxClientCnxns }} + maxClientCnxns: {{ .Values.maxClientCnxns }} + {{- end }} + {{- if .Values.config.minSessionTimeout }} + minSessionTimeout: {{ .Values.minSessionTimeout }} + {{- end }} + {{- if .Values.config.maxSessionTimeout }} + maxSessionTimeout: {{ .Values.maxSessionTimeout }} + {{- end }} + {{- if .Values.config.autoPurgeSnapRetainCount }} + autoPurgeSnapRetainCount: {{ .Values.autoPurgeSnapRetainCount }} + {{- end }} + {{- if .Values.config.autoPurgePurgeInterval }} + autoPurgePurgeInterval: {{ .Values.config.autoPurgePurgeInterval }} + {{- end }} + {{- if .Values.config.quorumListenOnAllIPs }} + quorumListenOnAllIPs: {{ .Values.config.quorumListenOnAllIPs }} + {{- end }} + {{- end }} + storageType: {{ $storageType }} + {{- if eq $storageType "ephemeral" }} + ephemeral: + {{- if .Values.ephemeral.emptydirvolumesource }} + emptydirvolumesource: + {{- if .Values.ephemeral.emptydirvolumesource.medium }} + medium: {{ .Values.ephemeral.emptydirvolumesource.medium }} + {{- end }} + {{- if .Values.ephemeral.emptydirvolumesource.sizeLimit }} + sizeLimit: {{ .Values.ephemeral.emptydirvolumesource.sizeLimit }} + {{- end }} + {{- end }} + {{- else }} + persistence: + reclaimPolicy: {{ .Values.persistence.reclaimPolicy }} + {{- if .Values.persistence.annotations }} + annotations: +{{ toYaml .Values.persistence.annotations | indent 6 }} + {{- end }} + {{- if or .Values.persistence.storageClassName .Values.persistence.volumeSize }} + spec: + {{- if .Values.persistence.storageClassName }} + storageClassName: {{ .Values.persistence.storageClassName }} + {{- end }} + {{- if .Values.persistence.volumeSize }} + resources: + requests: + storage: {{ .Values.persistence.volumeSize }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/druid/charts/zookeeper/values.yaml b/manifests/bucketeer/charts/druid/charts/zookeeper/values.yaml new file mode 100644 index 000000000..912a424d6 --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/zookeeper/values.yaml @@ -0,0 +1,71 @@ +replicas: 3 + +fullnameOverride: + +image: + repository: pravega/zookeeper + tag: 0.2.9 + pullPolicy: IfNotPresent + +domainName: +labels: {} +ports: [] +kubernetesClusterDomain: "cluster.local" + +pod: + # labels: {} + # nodeSelector: {} + # affinity: {} + # resources: {} + # tolerations: [] + # env: [] + # annotations: {} + # securityContext: {} + # terminationGracePeriodSeconds: 30 + serviceAccountName: zookeeper + +config: {} + # initLimit: 10 + # tickTime: 2000 + # syncLimit: 2 + # globalOutstandingLimit: 1000 + # preAllocSize: 65536 + # snapCount: 100000 + # commitLogCount: 500 + # snapSizeLimitInKb: 4194304 + # maxCnxns: 0 + # maxClientCnxns: 60 + # minSessionTimeout: 4000 + # maxSessionTimeout: 40000 + # autoPurgeSnapRetainCount: 3 + # autoPurgePurgeInterval: 1 + # quorumListenOnAllIPs: false + +## configure the storage type +## accepted values : persistence/ephemeral +## default option is persistence +storageType: persistence + +persistence: + storageClassName: + ## specifying reclaim policy for PersistentVolumes + ## accepted values - Delete / Retain + reclaimPolicy: Delete + annotations: {} + volumeSize: 20Gi + +ephemeral: + emptydirvolumesource: + ## specifying Medium for emptydirvolumesource + ## accepted values - ""/Memory + medium: "" + sizeLimit: 20Gi + +hooks: + image: + repository: lachlanevenson/k8s-kubectl + tag: v1.16.10 + backoffLimit: 10 + +containers: [] +volumes: [] diff --git a/manifests/bucketeer/charts/druid/charts/zookeeper/values/minikube.yaml b/manifests/bucketeer/charts/druid/charts/zookeeper/values/minikube.yaml new file mode 100644 index 000000000..244fc712a --- /dev/null +++ b/manifests/bucketeer/charts/druid/charts/zookeeper/values/minikube.yaml @@ -0,0 +1,8 @@ +replicas: 1 + +persistence: + storageClassName: standard + ## specifying reclaim policy for PersistentVolumes + ## accepted values - Delete / Retain + reclaimPolicy: Delete + volumeSize: 10Gi diff --git a/manifests/bucketeer/charts/druid/requirements.lock b/manifests/bucketeer/charts/druid/requirements.lock new file mode 100644 index 000000000..f8ed48946 --- /dev/null +++ b/manifests/bucketeer/charts/druid/requirements.lock @@ -0,0 +1,9 @@ +dependencies: +- name: zookeeper-operator + repository: https://charts.pravega.io + version: 0.2.9 +- name: zookeeper + repository: https://charts.pravega.io + version: 0.2.9 +digest: sha256:b3a96fb27639cc16b92ad773737fe8533f05f46aca53301a67ac8481035dce0b +generated: "2022-05-23T14:00:31.690106+09:00" diff --git a/manifests/bucketeer/charts/druid/requirements.yaml b/manifests/bucketeer/charts/druid/requirements.yaml new file mode 100644 index 000000000..4027b6f7f --- /dev/null +++ b/manifests/bucketeer/charts/druid/requirements.yaml @@ -0,0 +1,14 @@ +dependencies: + - name: zookeeper-operator + repository: https://charts.pravega.io + # FIXME: The current tgz file is custom version of 0.2.9 that cherry picks the commit bellow. + # Please update the latest version when the it is released. + # https://github.com/pravega/zookeeper-operator/commit/b150f814a84eca9cf04ddb8d1f7b002a686f6942 + version: 0.2.9 + condition: global.druid.enabled + + - name: zookeeper + repository: https://charts.pravega.io + version: 0.2.9 + condition: global.druid.enabled + diff --git a/manifests/bucketeer/charts/druid/values.yaml b/manifests/bucketeer/charts/druid/values.yaml new file mode 100644 index 000000000..a41cfa1f8 --- /dev/null +++ b/manifests/bucketeer/charts/druid/values.yaml @@ -0,0 +1,477 @@ +druid-cluster: + namespace: + spec: + imagePullSecrets: + image: ghcr.io/bucketeer-io/druid:0.5.0/druid:0.5.0 + nodeSelector: {} + jvmOptions: |- + -server + -XX:MaxDirectMemorySize=10240g + -Duser.timezone=UTC + -Dfile.encoding=UTF-8 + -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager + -Dorg.jboss.logging.provider=slf4j + -Dnet.spy.log.LoggerImpl=net.spy.memcached.compat.log.SLF4JLogger + -Dlog4j.shutdownCallbackRegistry=org.apache.druid.common.config.Log4jShutdown + -Dlog4j.shutdownHookEnabled=true + -XX:+UseG1GC + -XX:MaxGCPauseMillis=200 + -XX:+ExitOnOutOfMemoryError + log4jConfig: |- + + + + + + + + + + + + + + commonRuntimeProperties: | + druid.indexing.doubleStorage=double + + # Extensions + druid.extensions.loadList=["druid-basic-security","druid-datasketches","druid-distinctcount","druid-google-extensions","druid-kafka-indexing-service","druid-stats","mysql-metadata-storage"] + + # Service discovery + druid.router.defaultBrokerServiceName=druid/broker + druid.selectors.indexing.serviceName=druid/overlord + druid.selectors.coordinator.serviceName=druid/coordinator + + druid.sql.enable=true + + # Authenticator + druid.auth.authenticatorChain=["BasicMetadataAuthenticator"] + druid.auth.authenticator.BasicMetadataAuthenticator.type=basic + druid.auth.authenticator.BasicMetadataAuthenticator.initialAdminPassword=password + druid.auth.authenticator.BasicMetadataAuthenticator.initialInternalClientPassword=password + druid.auth.authenticator.BasicMetadataAuthenticator.credentialsValidator.type=metadata + druid.auth.authenticator.BasicMetadataAuthenticator.skipOnFailure=false + druid.auth.authenticator.BasicMetadataAuthenticator.authorizerName=BasicMetadataAuthorizer + # Escalator + druid.escalator.type=basic + druid.escalator.internalClientUsername=username + druid.escalator.internalClientPassword=password + druid.escalator.authorizerName=BasicMetadataAuthorizer + # Authorizer + druid.auth.authorizers=["BasicMetadataAuthorizer"] + druid.auth.authorizer.BasicMetadataAuthorizer.type=basic + + # Monitoring + druid.monitoring.monitors=[] + druid.emitter.http.recipientBaseUrl=http://druid-exporter.monitoring.svc.cluster.local:8080/druid + druid.emitter=http + deepStorage: + spec: + properties: |- + druid.storage.type=google + druid.google.bucket=example-druid-deep-storage + type: default + metadataStore: + spec: + properties: |- + druid.metadata.storage.type=mysql + druid.metadata.storage.connector.connectURI=jdbc:mysql://druid-mysql.example.com/druid + druid.metadata.storage.connector.user=user + druid.metadata.storage.connector.password=password + druid.metadata.storage.connector.createTables=true + type: default + zookeeper: + spec: + properties: |- + druid.zk.service.host=druid-zookeeper-client.druid.svc.cluster.local + druid.zk.paths.base=/druid + type: default + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /var/secrets/google/token + volumeMounts: + - name: google-cloud-key + mountPath: /var/secrets/google + volumes: + - name: google-cloud-key + secret: + secretName: druid-gcp-sa-key + + nodes: + brokers: + runtimeProperties: | + druid.service=druid/broker + # HTTP server threads + druid.broker.http.numConnections=5 + druid.server.http.numThreads=10 + # Processing threads and buffers + druid.processing.buffer.sizeBytes=1000 + druid.processing.numMergeBuffers=1 + druid.processing.numThreads=1 + + # Monitoring + druid.monitoring.monitors=["org.apache.druid.server.metrics.QueryCountStatsMonitor"] + extraJvmOptions: |- + -Xmx1G + -Xms1G + resources: {} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + nodeSpecUniqueStr: druid-cluster-brokers + podDisruptionBudgetSpec: + maxUnavailable: 1 + hpAutoscaler: + minReplicas: 1 + maxReplicas: 1 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 50 + + coordinators: + runtimeProperties: | + druid.service=druid/coordinator + # HTTP server threads + druid.coordinator.startDelay=PT30S + druid.coordinator.period=PT30S + # Configure this coordinator to also run as Overlord + druid.coordinator.asOverlord.enable=false + # druid.coordinator.asOverlord.overlordService=druid/overlord + druid.indexer.queue.startDelay=PT30S + druid.indexer.runner.type=local + extraJvmOptions: |- + -Xmx1G + -Xms1G + resources: {} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + nodeSpecUniqueStr: druid-cluster-coordinators + podDisruptionBudgetSpec: + maxUnavailable: 1 + hpAutoscaler: + minReplicas: 2 + maxReplicas: 2 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 60 + + overlords: + nodeConfigMountPath: /opt/druid/conf/druid/cluster/master/overlord + runtimeProperties: |- + druid.service=druid/overlord + druid.indexer.queue.startDelay=PT30S + druid.indexer.runner.type=remote + druid.indexer.storage.type=metadata + + # Monitoring + druid.monitoring.monitors=["org.apache.druid.server.metrics.TaskCountStatsMonitor"] + extraJvmOptions: |- + -Xmx4G + -Xms4G + resources: {} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + nodeSpecUniqueStr: druid-cluster-overlords + podDisruptionBudgetSpec: + maxUnavailable: 1 + hpAutoscaler: + maxReplicas: 2 + minReplicas: 2 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 60 + - type: Resource + resource: + name: memory + targetAverageUtilization: 60 + + historicals: + runtimeProperties: | + druid.service=druid/historical + druid.server.http.numThreads=5 + + # Memory tuning and resource limits for groupBy v2 + druid.processing.buffer.sizeBytes=1000 + druid.query.groupBy.maxOnDiskStorage=100000 + + druid.processing.numMergeBuffers=1 + druid.processing.numThreads=1 + # Segment storage + druid.segmentCache.locations=[{\"path\":\"/druid/data/segments\",\"maxSize\":10737418240}] + druid.server.maxSize=10737418240 + + # Monitoring + druid.monitoring.monitors=["org.apache.druid.server.metrics.HistoricalMetricsMonitor"] + extraJvmOptions: |- + -Xmx1G + -Xms1G + volumeClaimTemplates: {} + volumeMounts: + - mountPath: /druid/data + name: data-volume + resources: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + nodeSpecUniqueStr: druid-cluster-historicals + podDisruptionBudgetSpec: + maxUnavailable: 1 + hpAutoscaler: + maxReplicas: 1 + minReplicas: 1 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 60 + - type: Resource + resource: + name: memory + targetAverageUtilization: 60 + + middlemanagers: + runtimeProperties: | + druid.service=druid/middleManager + druid.worker.capacity=3 + druid.server.http.numThreads=10 + druid.processing.buffer.sizebytes=536870912 + # Resources for peons + druid.indexer.runner.javaOpts=-server -Xms1G -Xmx1G -XX:MaxDirectMemorySize=10g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -Djava.io.tmpdir=/druid/data/tmp -XX:+UnlockDiagnosticVMOptions -XX:+PrintSafepointStatistics -XX:PrintSafepointStatisticsCount=1 -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintGCApplicationConcurrentTime -XX:+ExitOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError -XX:+UseG1GC + druid.indexer.task.baseTaskDir=/druid/data/baseTaskDir + # Peon properties + druid.indexer.fork.property.druid.processing.numThreads=1 + druid.indexer.fork.property.druid.processing.numMergeBuffers=2 + druid.indexer.fork.property.druid.processing.buffer.sizeBytes=536870912 + extraJvmOptions: |- + -Xmx4G + -Xms4G + volumeClaimTemplates: + - metadata: + name: data-volume + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + storageClassName: standard + resources: {} + podDisruptionBudgetSpec: + maxUnavailable: 1 + hpAutoscaler: + maxReplicas: 1 + minReplicas: 1 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 60 + - type: Resource + resource: + name: memory + targetAverageUtilization: 60 + + routers: + nodeConfigMountPath: /opt/druid/conf/druid/cluster/query/router + runtimeProperties: | + druid.service=druid/router + druid.plaintextPort=8888 + # HTTP proxy + druid.router.http.numConnections=50 + druid.router.http.readTimeout=PT5M + druid.router.http.numMaxThreads=100 + druid.server.http.numThreads=100 + # Service discovery + druid.router.defaultBrokerServiceName=druid/broker + druid.router.coordinatorServiceName=druid/coordinator + # Management proxy to coordinator / overlord: required for unified web console. + druid.router.managementProxy.enabled=true + extraJvmOptions: |- + -Xmx512m + -Xms512m + resources: {} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + nodeSpecUniqueStr: druid-cluster-routers + podDisruptionBudgetSpec: + maxUnavailable: 1 + hpAutoscaler: + maxReplicas: 1 + minReplicas: 1 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 60 + - type: Resource + resource: + name: memory + targetAverageUtilization: 60 + +druid-operator: + fullnameOverride: druid-operator + namespace: + image: + repository: druidio/druid-operator + tag: + + nodeSelector: {} + + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + +zookeeper-operator: + global: + imagePullSecrets: + namespace: + fullnameOverride: "druid-zookeeper-operator" + annotations: + "helm.sh/resource-policy": keep + + image: + repository: ghcr.io/bucketeer-io/pravega/zookeeper-operator + tag: 0.2.9-13 + + watchNamespace: "druid" + + resources: + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 200m + memory: 256Mi + + nodeSelector: + cloud.google.com/gke-nodepool: druid-pool + +zookeeper: + namespace: + fullnameOverride: "druid-zookeeper" + replicas: 3 + + image: + repository: pravega/zookeeper + tag: 0.2.9 + pullPolicy: IfNotPresent + + domainName: + labels: {} + ports: [] + kubernetesClusterDomain: "cluster.local" + probes: + readiness: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 3 + successThreshold: 1 + timeoutSeconds: 10 + liveness: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 3 + timeoutSeconds: 10 + + pod: + # labels: {} + nodeSelector: + cloud.google.com/gke-nodepool: druid-pool + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + release: druid-zookeeper + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 200m + memory: 512Mi + # tolerations: [] + env: + - name: SERVER_JVMFLAGS + value: "-Djute.maxbuffer=10485760" + annotations: + "helm.sh/resource-policy": keep + # securityContext: {} + terminationGracePeriodSeconds: 30 + serviceAccountName: zookeeper + # imagePullSecrets: [] + + config: + {} + # initLimit: 10 + # tickTime: 2000 + # syncLimit: 2 + # globalOutstandingLimit: 1000 + # preAllocSize: 65536 + # snapCount: 100000 + # commitLogCount: 500 + # snapSizeLimitInKb: 4194304 + # maxCnxns: 0 + # maxClientCnxns: 60 + # minSessionTimeout: 4000 + # maxSessionTimeout: 40000 + # autoPurgeSnapRetainCount: 3 + # autoPurgePurgeInterval: 1 + # quorumListenOnAllIPs: false + + ## configure the storage type + ## accepted values : persistence/ephemeral + ## default option is persistence + storageType: persistence + + persistence: + storageClassName: standard + ## specifying reclaim policy for PersistentVolumes + ## accepted values - Delete / Retain + reclaimPolicy: Retain + volumeSize: 5Gi + + ephemeral: + emptydirvolumesource: + ## specifying Medium for emptydirvolumesource + ## accepted values - ""/Memory + medium: "" + sizeLimit: 20Gi + + hooks: + image: + repository: lachlanevenson/k8s-kubectl + tag: v1.16.10 + backoffLimit: 10 + + containers: [] + volumes: [] diff --git a/manifests/bucketeer/charts/environment/Chart.yaml b/manifests/bucketeer/charts/environment/Chart.yaml new file mode 100644 index 000000000..f3c3be378 --- /dev/null +++ b/manifests/bucketeer/charts/environment/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-environment +name: environment +version: 1.0.0 diff --git a/manifests/bucketeer/charts/environment/templates/NOTES.txt b/manifests/bucketeer/charts/environment/templates/NOTES.txt new file mode 100644 index 000000000..f64beda6c --- /dev/null +++ b/manifests/bucketeer/charts/environment/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "environment.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "environment.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "environment.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "environment.name" . }},release={{ template "environment.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/manifests/bucketeer/charts/environment/templates/_helpers.tpl b/manifests/bucketeer/charts/environment/templates/_helpers.tpl new file mode 100644 index 000000000..ee4d33aea --- /dev/null +++ b/manifests/bucketeer/charts/environment/templates/_helpers.tpl @@ -0,0 +1,56 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "environment.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "environment.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "environment.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "environment.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "oauth-key-secret" -}} +{{- if .Values.oauth.key.secret }} +{{- printf "%s" .Values.oauth.key.secret -}} +{{- else -}} +{{ template "environment.fullname" . }}-oauth-key +{{- end -}} +{{- end -}} + +{{- define "service-token-secret" -}} +{{- if .Values.serviceToken.secret }} +{{- printf "%s" .Values.serviceToken.secret -}} +{{- else -}} +{{ template "environment.fullname" . }}-service-token +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/manifests/bucketeer/charts/environment/templates/deployment.yaml b/manifests/bucketeer/charts/environment/templates/deployment.yaml new file mode 100644 index 000000000..d0dd7b506 --- /dev/null +++ b/manifests/bucketeer/charts/environment/templates/deployment.yaml @@ -0,0 +1,167 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "environment.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "environment.name" . }} + chart: {{ template "environment.chart" . }} + release: {{ template "environment.fullname" . }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "environment.name" . }} + release: {{ template "environment.fullname" . }} + template: + metadata: + labels: + app: {{ template "environment.name" . }} + release: {{ template "environment.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "environment.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + - name: oauth-key-secret + secret: + secretName: {{ template "oauth-key-secret" . }} + - name: service-token-secret + secret: + secretName: {{ template "service-token-secret" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["server"] + env: + - name: BUCKETEER_ENVIRONMENT_PROJECT + value: "{{ .Values.env.project }}" + - name: BUCKETEER_ENVIRONMENT_MYSQL_USER + value: "{{ .Values.env.mysqlUser }}" + - name: BUCKETEER_ENVIRONMENT_MYSQL_PASS + value: "{{ .Values.env.mysqlPass }}" + - name: BUCKETEER_ENVIRONMENT_MYSQL_HOST + value: "{{ .Values.env.mysqlHost }}" + - name: BUCKETEER_ENVIRONMENT_MYSQL_PORT + value: "{{ .Values.env.mysqlPort }}" + - name: BUCKETEER_ENVIRONMENT_MYSQL_DB_NAME + value: "{{ .Values.env.mysqlDbName }}" + - name: BUCKETEER_ENVIRONMENT_DEFAULT_DATASET + value: "{{ .Values.env.defaultDataset }}" + - name: BUCKETEER_ENVIRONMENT_LOCATION + value: "{{ .Values.env.location }}" + - name: BUCKETEER_ENVIRONMENT_DOMAIN_EVENT_TOPIC + value: "{{ .Values.env.domainEventTopic }}" + - name: BUCKETEER_ENVIRONMENT_ACCOUNT_SERVICE + value: "{{ .Values.env.accountService }}" + - name: BUCKETEER_ENVIRONMENT_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_ENVIRONMENT_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_ENVIRONMENT_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + - name: BUCKETEER_ENVIRONMENT_OAUTH_CLIENT_ID + value: "{{ .Values.oauth.clientId }}" + - name: BUCKETEER_ENVIRONMENT_OAUTH_ISSUER + value: "{{ .Values.oauth.issuer }}" + - name: BUCKETEER_ENVIRONMENT_OAUTH_KEY + value: /usr/local/oauth-key/public.pem + - name: BUCKETEER_ENVIRONMENT_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_ENVIRONMENT_KEY + value: /usr/local/certs/service/tls.key + - name: BUCKETEER_ENVIRONMENT_SERVICE_TOKEN + value: /usr/local/service-token/token + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + - name: oauth-key-secret + mountPath: /usr/local/oauth-key + readOnly: true + - name: service-token-secret + mountPath: /usr/local/service-token + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/environment/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/environment/templates/envoy-configmap.yaml new file mode 100644 index 000000000..4e6ed886c --- /dev/null +++ b/manifests/bucketeer/charts/environment/templates/envoy-configmap.yaml @@ -0,0 +1,229 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "environment.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "environment.name" . }} + chart: {{ template "environment.chart" . }} + release: {{ template "environment.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: environment + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: environment + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + - name: account + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: account + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: account.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + environment: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: / + route: + cluster: environment + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 60s + stat_prefix: ingress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true + - name: egress + address: + socket_address: + address: 127.0.0.1 + port_value: 9001 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + account: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: egress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.account.AccountService + route: + cluster: account + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: egress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true diff --git a/manifests/bucketeer/charts/environment/templates/hpa.yaml b/manifests/bucketeer/charts/environment/templates/hpa.yaml new file mode 100644 index 000000000..eebdcaa3a --- /dev/null +++ b/manifests/bucketeer/charts/environment/templates/hpa.yaml @@ -0,0 +1,19 @@ +{{ if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "environment.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "environment.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.metrics.cpu.targetAverageUtilization }} +{{ end }} diff --git a/manifests/bucketeer/charts/environment/templates/oauth-key-secret.yaml b/manifests/bucketeer/charts/environment/templates/oauth-key-secret.yaml new file mode 100644 index 000000000..49d286465 --- /dev/null +++ b/manifests/bucketeer/charts/environment/templates/oauth-key-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.oauth.key.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "environment.fullname" . }}-oauth-key + namespace: {{ .Values.namespace }} + labels: + app: {{ template "environment.name" . }} + chart: {{ template "environment.chart" . }} + release: {{ template "environment.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + public.pem: {{ required "OAuth key is required" .Values.oauth.key.public | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/environment/templates/pdb.yaml b/manifests/bucketeer/charts/environment/templates/pdb.yaml new file mode 100644 index 000000000..09b383b7b --- /dev/null +++ b/manifests/bucketeer/charts/environment/templates/pdb.yaml @@ -0,0 +1,12 @@ +{{ if .Values.pdb.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "environment.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + selector: + matchLabels: + app: {{ template "environment.name" . }} +{{ end }} diff --git a/manifests/bucketeer/charts/environment/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/environment/templates/service-cert-secret.yaml new file mode 100644 index 000000000..8daa5a383 --- /dev/null +++ b/manifests/bucketeer/charts/environment/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "environment.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "environment.name" . }} + chart: {{ template "environment.chart" . }} + release: {{ template "environment.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/environment/templates/service-token-secret.yaml b/manifests/bucketeer/charts/environment/templates/service-token-secret.yaml new file mode 100644 index 000000000..c4de6376d --- /dev/null +++ b/manifests/bucketeer/charts/environment/templates/service-token-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.serviceToken.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "environment.fullname" . }}-service-token + namespace: {{ .Values.namespace }} + labels: + app: {{ template "environment.name" . }} + chart: {{ template "environment.chart" . }} + release: {{ template "environment.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + token: {{ required "Service token is required" .Values.serviceToken.token | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/environment/templates/service.yaml b/manifests/bucketeer/charts/environment/templates/service.yaml new file mode 100644 index 000000000..2b82b0d20 --- /dev/null +++ b/manifests/bucketeer/charts/environment/templates/service.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "environment.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "environment.name" . }} + chart: {{ template "environment.chart" . }} + release: {{ template "environment.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "environment.name" . }} + release: {{ template "environment.fullname" . }} + \ No newline at end of file diff --git a/manifests/bucketeer/charts/environment/values.yaml b/manifests/bucketeer/charts/environment/values.yaml new file mode 100644 index 000000000..137931575 --- /dev/null +++ b/manifests/bucketeer/charts/environment/values.yaml @@ -0,0 +1,77 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-environment + pullPolicy: IfNotPresent + +fullnameOverride: "environment" + +namespace: + +env: + project: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + defaultDataset: + location: asia-northeast1 + domainEventTopic: + accountService: localhost:9001 + logLevel: info + port: 9090 + metricsPort: 9002 + +affinity: {} + +nodeSelector: {} + +pdb: + enabled: + maxUnavailable: 50% + +hpa: + enabled: + minReplicas: + maxReplicas: + metrics: + cpu: + targetAverageUtilization: + +tls: + service: + secret: + cert: + key: + +oauth: + key: + secret: + public: + clientId: + issuer: + +serviceToken: + secret: + token: + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} diff --git a/manifests/bucketeer/charts/event-counter/.helmignore b/manifests/bucketeer/charts/event-counter/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/manifests/bucketeer/charts/event-counter/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/manifests/bucketeer/charts/event-counter/Chart.yaml b/manifests/bucketeer/charts/event-counter/Chart.yaml new file mode 100644 index 000000000..e2369a2b2 --- /dev/null +++ b/manifests/bucketeer/charts/event-counter/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-event-counter +name: event-counter +version: 1.0.0 diff --git a/manifests/bucketeer/charts/event-counter/templates/NOTES.txt b/manifests/bucketeer/charts/event-counter/templates/NOTES.txt new file mode 100644 index 000000000..6b120bdf4 --- /dev/null +++ b/manifests/bucketeer/charts/event-counter/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "event-counter.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "event-counter.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "event-counter.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "event-counter.name" . }},release={{ template "event-counter.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/manifests/bucketeer/charts/event-counter/templates/_helpers.tpl b/manifests/bucketeer/charts/event-counter/templates/_helpers.tpl new file mode 100644 index 000000000..81bb59b54 --- /dev/null +++ b/manifests/bucketeer/charts/event-counter/templates/_helpers.tpl @@ -0,0 +1,56 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "event-counter.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "event-counter.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "event-counter.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "event-counter.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "oauth-key-secret" -}} +{{- if .Values.oauth.key.secret }} +{{- printf "%s" .Values.oauth.key.secret -}} +{{- else -}} +{{ template "event-counter.fullname" . }}-oauth-key +{{- end -}} +{{- end -}} + +{{- define "service-token-secret" -}} +{{- if .Values.serviceToken.secret }} +{{- printf "%s" .Values.serviceToken.secret -}} +{{- else -}} +{{ template "event-counter.fullname" . }}-service-token +{{- end -}} +{{- end -}} diff --git a/manifests/bucketeer/charts/event-counter/templates/deployment.yaml b/manifests/bucketeer/charts/event-counter/templates/deployment.yaml new file mode 100644 index 000000000..d036e5185 --- /dev/null +++ b/manifests/bucketeer/charts/event-counter/templates/deployment.yaml @@ -0,0 +1,173 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "event-counter.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "event-counter.name" . }} + chart: {{ template "event-counter.chart" . }} + release: {{ template "event-counter.fullname" . }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "event-counter.name" . }} + release: {{ template "event-counter.fullname" . }} + template: + metadata: + labels: + app: {{ template "event-counter.name" . }} + release: {{ template "event-counter.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "event-counter.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + - name: oauth-key-secret + secret: + secretName: {{ template "oauth-key-secret" . }} + - name: service-token-secret + secret: + secretName: {{ template "service-token-secret" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["server"] + env: + - name: BUCKETEER_EVENT_COUNTER_PROJECT + value: "{{ .Values.env.project }}" + - name: BUCKETEER_EVENT_COUNTER_EXPERIMENT_SERVICE + value: "{{ .Values.env.experimentService }}" + - name: BUCKETEER_EVENT_COUNTER_FEATURE_SERVICE + value: "{{ .Values.env.featureService }}" + - name: BUCKETEER_EVENT_COUNTER_ACCOUNT_SERVICE + value: "{{ .Values.env.accountService }}" + {{ if .Values.env.druidPassword }} + - name: BUCKETEER_EVENT_COUNTER_DRUID_URL + value: "{{ .Values.env.druidUrl }}" + - name: BUCKETEER_EVENT_COUNTER_DRUID_DATASOURCE_PREFIX + value: "{{ .Values.env.druidDatasourcePrefix }}" + - name: BUCKETEER_EVENT_COUNTER_DRUID_USERNAME + value: "{{ .Values.env.druidUsername }}" + - name: BUCKETEER_EVENT_COUNTER_DRUID_PASSWORD + value: "{{ .Values.env.druidPassword }}" + {{ end }} + - name: BUCKETEER_EVENT_COUNTER_MYSQL_USER + value: "{{ .Values.env.mysqlUser }}" + - name: BUCKETEER_EVENT_COUNTER_MYSQL_PASS + value: "{{ .Values.env.mysqlPass }}" + - name: BUCKETEER_EVENT_COUNTER_MYSQL_HOST + value: "{{ .Values.env.mysqlHost }}" + - name: BUCKETEER_EVENT_COUNTER_MYSQL_PORT + value: "{{ .Values.env.mysqlPort }}" + - name: BUCKETEER_EVENT_COUNTER_MYSQL_DB_NAME + value: "{{ .Values.env.mysqlDbName }}" + - name: BUCKETEER_EVENT_COUNTER_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_EVENT_COUNTER_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_EVENT_COUNTER_SERVICE_TOKEN + value: /usr/local/service-token/token + - name: BUCKETEER_EVENT_COUNTER_OAUTH_CLIENT_ID + value: "{{ .Values.oauth.clientId }}" + - name: BUCKETEER_EVENT_COUNTER_OAUTH_ISSUER + value: "{{ .Values.oauth.issuer }}" + - name: BUCKETEER_EVENT_COUNTER_OAUTH_KEY + value: /usr/local/oauth-key/public.pem + - name: BUCKETEER_EVENT_COUNTER_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_EVENT_COUNTER_KEY + value: /usr/local/certs/service/tls.key + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + - name: service-token-secret + mountPath: /usr/local/service-token + readOnly: true + - name: oauth-key-secret + mountPath: /usr/local/oauth-key + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/event-counter/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/event-counter/templates/envoy-configmap.yaml new file mode 100644 index 000000000..8daa7a2c7 --- /dev/null +++ b/manifests/bucketeer/charts/event-counter/templates/envoy-configmap.yaml @@ -0,0 +1,337 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "event-counter.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "event-counter.name" . }} + chart: {{ template "event-counter.chart" . }} + release: {{ template "event-counter.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: event-counter + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: event-counter + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + - name: experiment + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: experiment + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: experiment.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + - name: feature + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: feature + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: feature.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + type: strict_dns + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + - name: account + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: account + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: account.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + event-counter: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: / + route: + cluster: event-counter + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 3600s + stat_prefix: ingress_http + stream_idle_timeout: 3600s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true + - name: egress + address: + socket_address: + address: 127.0.0.1 + port_value: 9001 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + account: + value: 25 + experiment: + value: 25 + feature: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: egress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.experiment.ExperimentService + route: + cluster: experiment + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.feature.FeatureService + route: + cluster: feature + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.account.AccountService + route: + cluster: account + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: egress_http + stream_idle_timeout: 3600s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true diff --git a/manifests/bucketeer/charts/event-counter/templates/hpa.yaml b/manifests/bucketeer/charts/event-counter/templates/hpa.yaml new file mode 100644 index 000000000..96ee74b4c --- /dev/null +++ b/manifests/bucketeer/charts/event-counter/templates/hpa.yaml @@ -0,0 +1,19 @@ +{{ if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "event-counter.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "event-counter.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.metrics.cpu.targetAverageUtilization }} +{{ end }} diff --git a/manifests/bucketeer/charts/event-counter/templates/oauth-key-secret.yaml b/manifests/bucketeer/charts/event-counter/templates/oauth-key-secret.yaml new file mode 100644 index 000000000..71329ceae --- /dev/null +++ b/manifests/bucketeer/charts/event-counter/templates/oauth-key-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.oauth.key.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "event-counter.fullname" . }}-oauth-key + namespace: {{ .Values.namespace }} + labels: + app: {{ template "event-counter.name" . }} + chart: {{ template "event-counter.chart" . }} + release: {{ template "event-counter.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + public.pem: {{ required "OAuth public key is required" .Values.oauth.key.public | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/event-counter/templates/pdb.yaml b/manifests/bucketeer/charts/event-counter/templates/pdb.yaml new file mode 100644 index 000000000..640bcef32 --- /dev/null +++ b/manifests/bucketeer/charts/event-counter/templates/pdb.yaml @@ -0,0 +1,12 @@ +{{ if .Values.pdb.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "event-counter.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + selector: + matchLabels: + app: {{ template "event-counter.name" . }} +{{ end }} diff --git a/manifests/bucketeer/charts/event-counter/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/event-counter/templates/service-cert-secret.yaml new file mode 100644 index 000000000..3468856bc --- /dev/null +++ b/manifests/bucketeer/charts/event-counter/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "event-counter.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "event-counter.name" . }} + chart: {{ template "event-counter.chart" . }} + release: {{ template "event-counter.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/event-counter/templates/service-token-secret.yaml b/manifests/bucketeer/charts/event-counter/templates/service-token-secret.yaml new file mode 100644 index 000000000..de52281ca --- /dev/null +++ b/manifests/bucketeer/charts/event-counter/templates/service-token-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.serviceToken.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "event-counter.fullname" . }}-service-token + namespace: {{ .Values.namespace }} + labels: + app: {{ template "event-counter.name" . }} + chart: {{ template "event-counter.chart" . }} + release: {{ template "event-counter.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + token: {{ required "Service token is required" .Values.serviceToken.token | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/event-counter/templates/service.yaml b/manifests/bucketeer/charts/event-counter/templates/service.yaml new file mode 100644 index 000000000..5f80b8167 --- /dev/null +++ b/manifests/bucketeer/charts/event-counter/templates/service.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "event-counter.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "event-counter.name" . }} + chart: {{ template "event-counter.chart" . }} + release: {{ template "event-counter.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "event-counter.name" . }} + release: {{ template "event-counter.fullname" . }} diff --git a/manifests/bucketeer/charts/event-counter/values.yaml b/manifests/bucketeer/charts/event-counter/values.yaml new file mode 100644 index 000000000..d2adbb824 --- /dev/null +++ b/manifests/bucketeer/charts/event-counter/values.yaml @@ -0,0 +1,79 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-event-counter + pullPolicy: IfNotPresent + +fullnameOverride: "event-counter" + +namespace: + +env: + project: + experimentService: localhost:9001 + featureService: localhost:9001 + accountService: localhost:9001 + druidUrl: + druidDatasourcePrefix: + druidUsername: + druidPassword: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + port: 9090 + metricsPort: 9002 + +affinity: {} + +nodeSelector: {} + +pdb: + enabled: + maxUnavailable: 50% + +hpa: + enabled: + minReplicas: + maxReplicas: + metrics: + cpu: + targetAverageUtilization: + +tls: + service: + secret: + cert: + key: + +serviceToken: + secret: + token: + +oauth: + key: + secret: + public: + clientId: + issuer: + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} diff --git a/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/.helmignore b/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/Chart.yaml b/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/Chart.yaml new file mode 100644 index 000000000..801425633 --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-event-persister-evaluation-events-kafka +name: event-persister-evaluation-events-kafka +version: 1.0.0 diff --git a/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/NOTES.txt b/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/NOTES.txt new file mode 100644 index 000000000..e69de29bb diff --git a/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/_helpers.tpl b/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/_helpers.tpl new file mode 100644 index 000000000..689916cbd --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/_helpers.tpl @@ -0,0 +1,48 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "event-persister-evaluation-events-kafka.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "event-persister-evaluation-events-kafka.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "event-persister-evaluation-events-kafka.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "event-persister-evaluation-events-kafka.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "service-token-secret" -}} +{{- if .Values.serviceToken.secret }} +{{- printf "%s" .Values.serviceToken.secret -}} +{{- else -}} +{{ template "event-persister-evaluation-events-kafka.fullname" . }}-service-token +{{- end -}} +{{- end -}} diff --git a/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/deployment.yaml b/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/deployment.yaml new file mode 100644 index 000000000..05ecbd9b6 --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/deployment.yaml @@ -0,0 +1,193 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "event-persister-evaluation-events-kafka.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "event-persister-evaluation-events-kafka.fullname" . }} + chart: {{ template "event-persister-evaluation-events-kafka.chart" . }} + release: {{ template "event-persister-evaluation-events-kafka.fullname" . }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "event-persister-evaluation-events-kafka.name" . }} + release: {{ template "event-persister-evaluation-events-kafka.fullname" . }} + template: + metadata: + labels: + app: {{ template "event-persister-evaluation-events-kafka.name" . }} + release: {{ template "event-persister-evaluation-events-kafka.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "event-persister-evaluation-events-kafka.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + - name: service-token-secret + secret: + secretName: {{ template "service-token-secret" . }} + containers: + - name: "event-persister" + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["server"] + env: + - name: BUCKETEER_EVENT_PERSISTER_PROJECT + value: "{{ .Values.env.project }}" + - name: BUCKETEER_EVENT_PERSISTER_FEATURE_SERVICE + value: "{{ .Values.env.featureService }}" + - name: BUCKETEER_EVENT_PERSISTER_BIGTABLE_INSTANCE + value: "{{ .Values.env.bigtableInstance }}" + - name: BUCKETEER_EVENT_PERSISTER_LOCATION + value: "{{ .Values.env.location }}" + - name: BUCKETEER_EVENT_PERSISTER_TOPIC + value: "{{ .Values.env.topic }}" + - name: BUCKETEER_EVENT_PERSISTER_SUBSCRIPTION + value: "{{ .Values.env.subscription }}" + - name: BUCKETEER_EVENT_PERSISTER_WRITER + value: "{{ .Values.env.writer }}" + {{ if .Values.env.kafkaPassword }} + - name: BUCKETEER_EVENT_PERSISTER_KAFKA_URL + value: "{{ .Values.env.kafkaUrl }}" + - name: BUCKETEER_EVENT_PERSISTER_KAFKA_TOPIC_PREFIX + value: "{{ .Values.env.kafkaTopicPrefix }}" + - name: BUCKETEER_EVENT_PERSISTER_KAFKA_TOPIC_DATA_TYPE + value: "{{ .Values.env.kafkaTopicDataType }}" + - name: BUCKETEER_EVENT_PERSISTER_KAFKA_USERNAME + value: "{{ .Values.env.kafkaUsername }}" + - name: BUCKETEER_EVENT_PERSISTER_KAFKA_PASSWORD + value: "{{ .Values.env.kafkaPassword }}" + {{ end }} + - name: BUCKETEER_EVENT_PERSISTER_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_EVENT_PERSISTER_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_EVENT_PERSISTER_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + - name: BUCKETEER_EVENT_PERSISTER_MAX_MPS + value: "{{ .Values.env.maxMps }}" + - name: BUCKETEER_EVENT_PERSISTER_NUM_WORKERS + value: "{{ .Values.env.numWorkers }}" + - name: BUCKETEER_EVENT_PERSISTER_NUM_WRITERS + value: "{{ .Values.env.numWriters }}" + - name: BUCKETEER_EVENT_PERSISTER_FLUSH_SIZE + value: "{{ .Values.env.flushSize }}" + - name: BUCKETEER_EVENT_PERSISTER_FLUSH_INTERVAL + value: "{{ .Values.env.flushInterval }}" + - name: BUCKETEER_EVENT_PERSISTER_PULLER_NUM_GOROUTINES + value: "{{ .Values.env.pullerNumGoroutines }}" + - name: BUCKETEER_EVENT_PERSISTER_PULLER_MAX_OUTSTANDING_MESSAGES + value: "{{ .Values.env.pullerMaxOutstandingMessages }}" + - name: BUCKETEER_EVENT_PERSISTER_PULLER_MAX_OUTSTANDING_BYTES + value: "{{ .Values.env.pullerMaxOutstandingBytes }}" + - name: BUCKETEER_EVENT_PERSISTER_REDIS_SERVER_NAME + value: "{{ .Values.env.redis.serverName }}" + - name: BUCKETEER_EVENT_PERSISTER_REDIS_ADDR + value: "{{ .Values.env.redis.addr }}" + - name: BUCKETEER_EVENT_PERSISTER_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_EVENT_PERSISTER_KEY + value: /usr/local/certs/service/tls.key + - name: BUCKETEER_EVENT_PERSISTER_SERVICE_TOKEN + value: /usr/local/service-token/token + - name: BUCKETEER_EVENT_PERSISTER_ALLOYDB_REGION + value: "{{ .Values.env.alloyDBRegion }}" + - name: BUCKETEER_EVENT_PERSISTER_ALLOYDB_CLUSTER_ID + value: "{{ .Values.env.alloyDBClusterId}}" + - name: BUCKETEER_EVENT_PERSISTER_ALLOYDB_INSTANCE_ID + value: "{{ .Values.env.alloyDBInstanceId }}" + - name: BUCKETEER_EVENT_PERSISTER_ALLOYDB_USER + value: "{{ .Values.env.alloyDBUser }}" + - name: BUCKETEER_EVENT_PERSISTER_ALLOYDB_PASS + value: "{{ .Values.env.alloyDBPass }}" + - name: BUCKETEER_EVENT_PERSISTER_ALLOYDB_NAME + value: "{{ .Values.env.alloyDBName }}" + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + - name: service-token-secret + mountPath: /usr/local/service-token + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/envoy-configmap.yaml new file mode 100644 index 000000000..3d952d0d3 --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/envoy-configmap.yaml @@ -0,0 +1,229 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "event-persister-evaluation-events-kafka.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "event-persister-evaluation-events-kafka.fullname" . }} + chart: {{ template "event-persister-evaluation-events-kafka.chart" . }} + release: {{ template "event-persister-evaluation-events-kafka.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: event-persister-evaluation-events-kafka + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: event-persister-evaluation-events-kafka + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + - name: feature + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: feature + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: feature.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + event-persister-evaluation-events-kafka: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: / + route: + cluster: event-persister-evaluation-events-kafka + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: ingress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true + - name: egress + address: + socket_address: + address: 127.0.0.1 + port_value: 9001 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + feature: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: egress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.feature.FeatureService + route: + cluster: feature + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: egress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true diff --git a/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/hpa.yaml b/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/hpa.yaml new file mode 100644 index 000000000..4f394cec4 --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/hpa.yaml @@ -0,0 +1,19 @@ +{{ if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "event-persister-evaluation-events-kafka.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "event-persister-evaluation-events-kafka.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.metrics.cpu.targetAverageUtilization }} +{{ end }} diff --git a/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/service-cert-secret.yaml new file mode 100644 index 000000000..e2bdf9a9f --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "event-persister-evaluation-events-kafka.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "event-persister-evaluation-events-kafka.fullname" . }} + chart: {{ template "event-persister-evaluation-events-kafka.chart" . }} + release: {{ template "event-persister-evaluation-events-kafka.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/service-token-secret.yaml b/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/service-token-secret.yaml new file mode 100644 index 000000000..eb6c4027d --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/service-token-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.serviceToken.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "event-persister-evaluation-events-kafka.fullname" . }}-service-token + namespace: {{ .Values.namespace }} + labels: + app: {{ template "event-persister-evaluation-events-kafka.name" . }} + chart: {{ template "event-persister-evaluation-events-kafka.chart" . }} + release: {{ template "event-persister-evaluation-events-kafka.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + token: {{ required "Service token is required" .Values.serviceToken.token | b64enc | quote }} +{{- end }} diff --git a/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/service.yaml b/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/service.yaml new file mode 100644 index 000000000..84f29194c --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/templates/service.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "event-persister-evaluation-events-kafka.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "event-persister-evaluation-events-kafka.fullname" . }} + chart: {{ template "event-persister-evaluation-events-kafka.chart" . }} + release: {{ template "event-persister-evaluation-events-kafka.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "event-persister-evaluation-events-kafka.name" . }} + release: {{ template "event-persister-evaluation-events-kafka.fullname" . }} diff --git a/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/values.yaml b/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/values.yaml new file mode 100644 index 000000000..4bad3d352 --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-evaluation-events-kafka/values.yaml @@ -0,0 +1,86 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-event-persister + pullPolicy: IfNotPresent + +nameOverride: "event-persister" +fullnameOverride: "event-persister-evaluation-events-kafka" + +namespace: + +env: + project: + featureService: localhost:9001 + bigtableInstance: + location: asia-northeast1 + topic: + subscription: + writer: kafka + kafkaUrl: kafka-kafka-bootstrap.kafka.svc.cluster.local:9092 + kafkaTopicPrefix: + kafkaTopicDataType: evaluation-events + kafkaUsername: service + kafkaPassword: + logLevel: info + port: 9090 + metricsPort: 9002 + maxMps: "1000" + numWorkers: 5 + numWriters: 2 + flushSize: 100 + flushInterval: 2s + redis: + serverName: + addr: + pullerNumGoroutines: 5 + pullerMaxOutstandingMessages: "1000" + pullerMaxOutstandingBytes: "1000000000" + alloyDBRegion: + alloyDBClusterId: + alloyDBInstanceId: + alloyDBUser: + alloyDBPass: + alloyDBName: + +affinity: {} + +nodeSelector: {} + +hpa: + enabled: + minReplicas: + maxReplicas: + metrics: + cpu: + targetAverageUtilization: + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +tls: + service: + secret: + cert: + key: + +serviceToken: + secret: + token: + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 diff --git a/manifests/bucketeer/charts/event-persister-goal-events-kafka/.helmignore b/manifests/bucketeer/charts/event-persister-goal-events-kafka/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-goal-events-kafka/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/manifests/bucketeer/charts/event-persister-goal-events-kafka/Chart.yaml b/manifests/bucketeer/charts/event-persister-goal-events-kafka/Chart.yaml new file mode 100644 index 000000000..9dbca527f --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-goal-events-kafka/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-event-persister-goal-events-kafka +name: event-persister-goal-events-kafka +version: 1.0.0 diff --git a/manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/NOTES.txt b/manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/NOTES.txt new file mode 100644 index 000000000..e69de29bb diff --git a/manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/_helpers.tpl b/manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/_helpers.tpl new file mode 100644 index 000000000..26173e928 --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/_helpers.tpl @@ -0,0 +1,48 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "event-persister-goal-events-kafka.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "event-persister-goal-events-kafka.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "event-persister-goal-events-kafka.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "event-persister-goal-events-kafka.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "service-token-secret" -}} +{{- if .Values.serviceToken.secret }} +{{- printf "%s" .Values.serviceToken.secret -}} +{{- else -}} +{{ template "event-persister-goal-events-kafka.fullname" . }}-service-token +{{- end -}} +{{- end -}} diff --git a/manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/deployment.yaml b/manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/deployment.yaml new file mode 100644 index 000000000..5c85f660b --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/deployment.yaml @@ -0,0 +1,193 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "event-persister-goal-events-kafka.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "event-persister-goal-events-kafka.fullname" . }} + chart: {{ template "event-persister-goal-events-kafka.chart" . }} + release: {{ template "event-persister-goal-events-kafka.fullname" . }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "event-persister-goal-events-kafka.name" . }} + release: {{ template "event-persister-goal-events-kafka.fullname" . }} + template: + metadata: + labels: + app: {{ template "event-persister-goal-events-kafka.name" . }} + release: {{ template "event-persister-goal-events-kafka.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "event-persister-goal-events-kafka.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + - name: service-token-secret + secret: + secretName: {{ template "service-token-secret" . }} + containers: + - name: "event-persister" + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["server"] + env: + - name: BUCKETEER_EVENT_PERSISTER_PROJECT + value: "{{ .Values.env.project }}" + - name: BUCKETEER_EVENT_PERSISTER_FEATURE_SERVICE + value: "{{ .Values.env.featureService }}" + - name: BUCKETEER_EVENT_PERSISTER_BIGTABLE_INSTANCE + value: "{{ .Values.env.bigtableInstance }}" + - name: BUCKETEER_EVENT_PERSISTER_LOCATION + value: "{{ .Values.env.location }}" + - name: BUCKETEER_EVENT_PERSISTER_TOPIC + value: "{{ .Values.env.topic }}" + - name: BUCKETEER_EVENT_PERSISTER_SUBSCRIPTION + value: "{{ .Values.env.subscription }}" + - name: BUCKETEER_EVENT_PERSISTER_WRITER + value: "{{ .Values.env.writer }}" + {{ if .Values.env.kafkaPassword }} + - name: BUCKETEER_EVENT_PERSISTER_KAFKA_URL + value: "{{ .Values.env.kafkaUrl }}" + - name: BUCKETEER_EVENT_PERSISTER_KAFKA_TOPIC_PREFIX + value: "{{ .Values.env.kafkaTopicPrefix }}" + - name: BUCKETEER_EVENT_PERSISTER_KAFKA_TOPIC_DATA_TYPE + value: "{{ .Values.env.kafkaTopicDataType }}" + - name: BUCKETEER_EVENT_PERSISTER_KAFKA_USERNAME + value: "{{ .Values.env.kafkaUsername }}" + - name: BUCKETEER_EVENT_PERSISTER_KAFKA_PASSWORD + value: "{{ .Values.env.kafkaPassword }}" + {{ end }} + - name: BUCKETEER_EVENT_PERSISTER_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_EVENT_PERSISTER_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_EVENT_PERSISTER_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + - name: BUCKETEER_EVENT_PERSISTER_MAX_MPS + value: "{{ .Values.env.maxMps }}" + - name: BUCKETEER_EVENT_PERSISTER_NUM_WORKERS + value: "{{ .Values.env.numWorkers }}" + - name: BUCKETEER_EVENT_PERSISTER_NUM_WRITERS + value: "{{ .Values.env.numWriters }}" + - name: BUCKETEER_EVENT_PERSISTER_FLUSH_SIZE + value: "{{ .Values.env.flushSize }}" + - name: BUCKETEER_EVENT_PERSISTER_FLUSH_INTERVAL + value: "{{ .Values.env.flushInterval }}" + - name: BUCKETEER_EVENT_PERSISTER_PULLER_NUM_GOROUTINES + value: "{{ .Values.env.pullerNumGoroutines }}" + - name: BUCKETEER_EVENT_PERSISTER_PULLER_MAX_OUTSTANDING_MESSAGES + value: "{{ .Values.env.pullerMaxOutstandingMessages }}" + - name: BUCKETEER_EVENT_PERSISTER_PULLER_MAX_OUTSTANDING_BYTES + value: "{{ .Values.env.pullerMaxOutstandingBytes }}" + - name: BUCKETEER_EVENT_PERSISTER_REDIS_SERVER_NAME + value: "{{ .Values.env.redis.serverName }}" + - name: BUCKETEER_EVENT_PERSISTER_REDIS_ADDR + value: "{{ .Values.env.redis.addr }}" + - name: BUCKETEER_EVENT_PERSISTER_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_EVENT_PERSISTER_KEY + value: /usr/local/certs/service/tls.key + - name: BUCKETEER_EVENT_PERSISTER_SERVICE_TOKEN + value: /usr/local/service-token/token + - name: BUCKETEER_EVENT_PERSISTER_ALLOYDB_REGION + value: "{{ .Values.env.alloyDBRegion }}" + - name: BUCKETEER_EVENT_PERSISTER_ALLOYDB_CLUSTER_ID + value: "{{ .Values.env.alloyDBClusterId}}" + - name: BUCKETEER_EVENT_PERSISTER_ALLOYDB_INSTANCE_ID + value: "{{ .Values.env.alloyDBInstanceId }}" + - name: BUCKETEER_EVENT_PERSISTER_ALLOYDB_USER + value: "{{ .Values.env.alloyDBUser }}" + - name: BUCKETEER_EVENT_PERSISTER_ALLOYDB_PASS + value: "{{ .Values.env.alloyDBPass }}" + - name: BUCKETEER_EVENT_PERSISTER_ALLOYDB_NAME + value: "{{ .Values.env.alloyDBName }}" + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + - name: service-token-secret + mountPath: /usr/local/service-token + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/envoy-configmap.yaml new file mode 100644 index 000000000..9ac0986a4 --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/envoy-configmap.yaml @@ -0,0 +1,229 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "event-persister-goal-events-kafka.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "event-persister-goal-events-kafka.fullname" . }} + chart: {{ template "event-persister-goal-events-kafka.chart" . }} + release: {{ template "event-persister-goal-events-kafka.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: event-persister-goal-events-kafka + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: event-persister-goal-events-kafka + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + - name: feature + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: feature + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: feature.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + event-persister-goal-events-kafka: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: / + route: + cluster: event-persister-goal-events-kafka + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: ingress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true + - name: egress + address: + socket_address: + address: 127.0.0.1 + port_value: 9001 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + feature: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: egress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.feature.FeatureService + route: + cluster: feature + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: egress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true diff --git a/manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/hpa.yaml b/manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/hpa.yaml new file mode 100644 index 000000000..17b971d85 --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/hpa.yaml @@ -0,0 +1,19 @@ +{{ if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "event-persister-goal-events-kafka.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "event-persister-goal-events-kafka.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.metrics.cpu.targetAverageUtilization }} +{{ end }} diff --git a/manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/service-cert-secret.yaml new file mode 100644 index 000000000..e295e4db7 --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "event-persister-goal-events-kafka.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "event-persister-goal-events-kafka.fullname" . }} + chart: {{ template "event-persister-goal-events-kafka.chart" . }} + release: {{ template "event-persister-goal-events-kafka.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/service-token-secret.yaml b/manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/service-token-secret.yaml new file mode 100644 index 000000000..fc7b703d2 --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/service-token-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.serviceToken.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "event-persister-goal-events-kafka.fullname" . }}-service-token + namespace: {{ .Values.namespace }} + labels: + app: {{ template "event-persister-goal-events-kafka.name" . }} + chart: {{ template "event-persister-goal-events-kafka.chart" . }} + release: {{ template "event-persister-goal-events-kafka.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + token: {{ required "Service token is required" .Values.serviceToken.token | b64enc | quote }} +{{- end }} diff --git a/manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/service.yaml b/manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/service.yaml new file mode 100644 index 000000000..c9e3ec970 --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-goal-events-kafka/templates/service.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "event-persister-goal-events-kafka.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "event-persister-goal-events-kafka.fullname" . }} + chart: {{ template "event-persister-goal-events-kafka.chart" . }} + release: {{ template "event-persister-goal-events-kafka.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "event-persister-goal-events-kafka.name" . }} + release: {{ template "event-persister-goal-events-kafka.fullname" . }} diff --git a/manifests/bucketeer/charts/event-persister-goal-events-kafka/values.yaml b/manifests/bucketeer/charts/event-persister-goal-events-kafka/values.yaml new file mode 100644 index 000000000..401d7a8a0 --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-goal-events-kafka/values.yaml @@ -0,0 +1,86 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-event-persister + pullPolicy: IfNotPresent + +nameOverride: "event-persister" +fullnameOverride: "event-persister-goal-events-kafka" + +namespace: + +env: + project: + featureService: localhost:9001 + bigtableInstance: + location: asia-northeast1 + topic: + subscription: + writer: kafka + kafkaUrl: kafka-kafka-bootstrap.kafka.svc.cluster.local:9092 + kafkaTopicPrefix: + kafkaTopicDataType: goal-events + kafkaUsername: service + kafkaPassword: + logLevel: info + port: 9090 + metricsPort: 9002 + maxMps: "1000" + numWorkers: 5 + numWriters: 2 + flushSize: 100 + flushInterval: 2s + redis: + serverName: + addr: + pullerNumGoroutines: 5 + pullerMaxOutstandingMessages: "1000" + pullerMaxOutstandingBytes: "1000000000" + alloyDBRegion: + alloyDBClusterId: + alloyDBInstanceId: + alloyDBUser: + alloyDBPass: + alloyDBName: + +affinity: {} + +nodeSelector: {} + +hpa: + enabled: + minReplicas: + maxReplicas: + metrics: + cpu: + targetAverageUtilization: + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +tls: + service: + secret: + cert: + key: + +serviceToken: + secret: + token: + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 diff --git a/manifests/bucketeer/charts/event-persister-user-events-kafka/.helmignore b/manifests/bucketeer/charts/event-persister-user-events-kafka/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-user-events-kafka/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/manifests/bucketeer/charts/event-persister-user-events-kafka/Chart.yaml b/manifests/bucketeer/charts/event-persister-user-events-kafka/Chart.yaml new file mode 100644 index 000000000..8d7436b54 --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-user-events-kafka/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-event-persister-user-events-kafka +name: event-persister-user-events-kafka +version: 1.0.0 diff --git a/manifests/bucketeer/charts/event-persister-user-events-kafka/templates/NOTES.txt b/manifests/bucketeer/charts/event-persister-user-events-kafka/templates/NOTES.txt new file mode 100644 index 000000000..e69de29bb diff --git a/manifests/bucketeer/charts/event-persister-user-events-kafka/templates/_helpers.tpl b/manifests/bucketeer/charts/event-persister-user-events-kafka/templates/_helpers.tpl new file mode 100644 index 000000000..81c6c0714 --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-user-events-kafka/templates/_helpers.tpl @@ -0,0 +1,48 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "event-persister-user-events-kafka.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "event-persister-user-events-kafka.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "event-persister-user-events-kafka.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "event-persister-user-events-kafka.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "service-token-secret" -}} +{{- if .Values.serviceToken.secret }} +{{- printf "%s" .Values.serviceToken.secret -}} +{{- else -}} +{{ template "event-persister-user-events-kafka.fullname" . }}-service-token +{{- end -}} +{{- end -}} diff --git a/manifests/bucketeer/charts/event-persister-user-events-kafka/templates/deployment.yaml b/manifests/bucketeer/charts/event-persister-user-events-kafka/templates/deployment.yaml new file mode 100644 index 000000000..fc7987c5c --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-user-events-kafka/templates/deployment.yaml @@ -0,0 +1,193 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "event-persister-user-events-kafka.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "event-persister-user-events-kafka.fullname" . }} + chart: {{ template "event-persister-user-events-kafka.chart" . }} + release: {{ template "event-persister-user-events-kafka.fullname" . }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "event-persister-user-events-kafka.name" . }} + release: {{ template "event-persister-user-events-kafka.fullname" . }} + template: + metadata: + labels: + app: {{ template "event-persister-user-events-kafka.name" . }} + release: {{ template "event-persister-user-events-kafka.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "event-persister-user-events-kafka.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + - name: service-token-secret + secret: + secretName: {{ template "service-token-secret" . }} + containers: + - name: "event-persister" + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["server"] + env: + - name: BUCKETEER_EVENT_PERSISTER_PROJECT + value: "{{ .Values.env.project }}" + - name: BUCKETEER_EVENT_PERSISTER_FEATURE_SERVICE + value: "{{ .Values.env.featureService }}" + - name: BUCKETEER_EVENT_PERSISTER_BIGTABLE_INSTANCE + value: "{{ .Values.env.bigtableInstance }}" + - name: BUCKETEER_EVENT_PERSISTER_LOCATION + value: "{{ .Values.env.location }}" + - name: BUCKETEER_EVENT_PERSISTER_TOPIC + value: "{{ .Values.env.topic }}" + - name: BUCKETEER_EVENT_PERSISTER_SUBSCRIPTION + value: "{{ .Values.env.subscription }}" + - name: BUCKETEER_EVENT_PERSISTER_WRITER + value: "{{ .Values.env.writer }}" + {{ if .Values.env.kafkaPassword }} + - name: BUCKETEER_EVENT_PERSISTER_KAFKA_URL + value: "{{ .Values.env.kafkaUrl }}" + - name: BUCKETEER_EVENT_PERSISTER_KAFKA_TOPIC_PREFIX + value: "{{ .Values.env.kafkaTopicPrefix }}" + - name: BUCKETEER_EVENT_PERSISTER_KAFKA_TOPIC_DATA_TYPE + value: "{{ .Values.env.kafkaTopicDataType }}" + - name: BUCKETEER_EVENT_PERSISTER_KAFKA_USERNAME + value: "{{ .Values.env.kafkaUsername }}" + - name: BUCKETEER_EVENT_PERSISTER_KAFKA_PASSWORD + value: "{{ .Values.env.kafkaPassword }}" + {{ end }} + - name: BUCKETEER_EVENT_PERSISTER_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_EVENT_PERSISTER_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_EVENT_PERSISTER_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + - name: BUCKETEER_EVENT_PERSISTER_MAX_MPS + value: "{{ .Values.env.maxMps }}" + - name: BUCKETEER_EVENT_PERSISTER_NUM_WORKERS + value: "{{ .Values.env.numWorkers }}" + - name: BUCKETEER_EVENT_PERSISTER_NUM_WRITERS + value: "{{ .Values.env.numWriters }}" + - name: BUCKETEER_EVENT_PERSISTER_FLUSH_SIZE + value: "{{ .Values.env.flushSize }}" + - name: BUCKETEER_EVENT_PERSISTER_FLUSH_INTERVAL + value: "{{ .Values.env.flushInterval }}" + - name: BUCKETEER_EVENT_PERSISTER_PULLER_NUM_GOROUTINES + value: "{{ .Values.env.pullerNumGoroutines }}" + - name: BUCKETEER_EVENT_PERSISTER_PULLER_MAX_OUTSTANDING_MESSAGES + value: "{{ .Values.env.pullerMaxOutstandingMessages }}" + - name: BUCKETEER_EVENT_PERSISTER_PULLER_MAX_OUTSTANDING_BYTES + value: "{{ .Values.env.pullerMaxOutstandingBytes }}" + - name: BUCKETEER_EVENT_PERSISTER_REDIS_SERVER_NAME + value: "{{ .Values.env.redis.serverName }}" + - name: BUCKETEER_EVENT_PERSISTER_REDIS_ADDR + value: "{{ .Values.env.redis.addr }}" + - name: BUCKETEER_EVENT_PERSISTER_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_EVENT_PERSISTER_KEY + value: /usr/local/certs/service/tls.key + - name: BUCKETEER_EVENT_PERSISTER_SERVICE_TOKEN + value: /usr/local/service-token/token + - name: BUCKETEER_EVENT_PERSISTER_ALLOYDB_REGION + value: "{{ .Values.env.alloyDBRegion }}" + - name: BUCKETEER_EVENT_PERSISTER_ALLOYDB_CLUSTER_ID + value: "{{ .Values.env.alloyDBClusterId}}" + - name: BUCKETEER_EVENT_PERSISTER_ALLOYDB_INSTANCE_ID + value: "{{ .Values.env.alloyDBInstanceId }}" + - name: BUCKETEER_EVENT_PERSISTER_ALLOYDB_USER + value: "{{ .Values.env.alloyDBUser }}" + - name: BUCKETEER_EVENT_PERSISTER_ALLOYDB_PASS + value: "{{ .Values.env.alloyDBPass }}" + - name: BUCKETEER_EVENT_PERSISTER_ALLOYDB_NAME + value: "{{ .Values.env.alloyDBName }}" + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + - name: service-token-secret + mountPath: /usr/local/service-token + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/event-persister-user-events-kafka/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/event-persister-user-events-kafka/templates/envoy-configmap.yaml new file mode 100644 index 000000000..8b6f02964 --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-user-events-kafka/templates/envoy-configmap.yaml @@ -0,0 +1,229 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "event-persister-user-events-kafka.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "event-persister-user-events-kafka.fullname" . }} + chart: {{ template "event-persister-user-events-kafka.chart" . }} + release: {{ template "event-persister-user-events-kafka.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: event-persister-user-events-kafka + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: event-persister-user-events-kafka + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + - name: feature + type: strict_dns + lb_policy: round_robin + dns_lookup_family: V4_ONLY + connect_timeout: 5s + load_assignment: + cluster_name: feature + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: feature.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + event-persister-user-events-kafka: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: / + route: + cluster: event-persister-user-events-kafka + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: ingress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true + - name: egress + address: + socket_address: + address: 127.0.0.1 + port_value: 9001 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + feature: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: egress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.feature.FeatureService + route: + cluster: feature + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: egress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true diff --git a/manifests/bucketeer/charts/event-persister-user-events-kafka/templates/hpa.yaml b/manifests/bucketeer/charts/event-persister-user-events-kafka/templates/hpa.yaml new file mode 100644 index 000000000..a55d24547 --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-user-events-kafka/templates/hpa.yaml @@ -0,0 +1,19 @@ +{{ if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "event-persister-user-events-kafka.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "event-persister-user-events-kafka.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.metrics.cpu.targetAverageUtilization }} +{{ end }} diff --git a/manifests/bucketeer/charts/event-persister-user-events-kafka/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/event-persister-user-events-kafka/templates/service-cert-secret.yaml new file mode 100644 index 000000000..89af763f2 --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-user-events-kafka/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "event-persister-user-events-kafka.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "event-persister-user-events-kafka.fullname" . }} + chart: {{ template "event-persister-user-events-kafka.chart" . }} + release: {{ template "event-persister-user-events-kafka.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/event-persister-user-events-kafka/templates/service-token-secret.yaml b/manifests/bucketeer/charts/event-persister-user-events-kafka/templates/service-token-secret.yaml new file mode 100644 index 000000000..d2d1e1fbd --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-user-events-kafka/templates/service-token-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.serviceToken.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "event-persister-user-events-kafka.fullname" . }}-service-token + namespace: {{ .Values.namespace }} + labels: + app: {{ template "event-persister-user-events-kafka.name" . }} + chart: {{ template "event-persister-user-events-kafka.chart" . }} + release: {{ template "event-persister-user-events-kafka.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + token: {{ required "Service token is required" .Values.serviceToken.token | b64enc | quote }} +{{- end }} diff --git a/manifests/bucketeer/charts/event-persister-user-events-kafka/templates/service.yaml b/manifests/bucketeer/charts/event-persister-user-events-kafka/templates/service.yaml new file mode 100644 index 000000000..7263a85c4 --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-user-events-kafka/templates/service.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "event-persister-user-events-kafka.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "event-persister-user-events-kafka.fullname" . }} + chart: {{ template "event-persister-user-events-kafka.chart" . }} + release: {{ template "event-persister-user-events-kafka.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "event-persister-user-events-kafka.name" . }} + release: {{ template "event-persister-user-events-kafka.fullname" . }} diff --git a/manifests/bucketeer/charts/event-persister-user-events-kafka/values.yaml b/manifests/bucketeer/charts/event-persister-user-events-kafka/values.yaml new file mode 100644 index 000000000..2e2048737 --- /dev/null +++ b/manifests/bucketeer/charts/event-persister-user-events-kafka/values.yaml @@ -0,0 +1,86 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-event-persister + pullPolicy: IfNotPresent + +nameOverride: "event-persister" +fullnameOverride: "event-persister-user-events-kafka" + +namespace: + +env: + project: + featureService: localhost:9001 + bigtableInstance: + location: asia-northeast1 + topic: + subscription: + writer: kafka + kafkaUrl: kafka-kafka-bootstrap.kafka.svc.cluster.local:9092 + kafkaTopicPrefix: + kafkaTopicDataType: user-events + kafkaUsername: service + kafkaPassword: + logLevel: info + port: 9090 + metricsPort: 9002 + maxMps: "1000" + numWorkers: 5 + numWriters: 2 + flushSize: 100 + flushInterval: 2s + redis: + serverName: + addr: + pullerNumGoroutines: 5 + pullerMaxOutstandingMessages: "1000" + pullerMaxOutstandingBytes: "1000000000" + alloydbRegion: + alloydbClusterId: + alloydbInstanceId: + alloydbUser: + alloydbPass: + alloyDBName: + +affinity: {} + +nodeSelector: {} + +hpa: + enabled: + minReplicas: + maxReplicas: + metrics: + cpu: + targetAverageUtilization: + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +tls: + service: + secret: + cert: + key: + +serviceToken: + secret: + token: + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 diff --git a/manifests/bucketeer/charts/experiment/Chart.yaml b/manifests/bucketeer/charts/experiment/Chart.yaml new file mode 100644 index 000000000..940c66d0a --- /dev/null +++ b/manifests/bucketeer/charts/experiment/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-experiment +name: experiment +version: 1.0.0 diff --git a/manifests/bucketeer/charts/experiment/templates/NOTES.txt b/manifests/bucketeer/charts/experiment/templates/NOTES.txt new file mode 100644 index 000000000..d7a32b580 --- /dev/null +++ b/manifests/bucketeer/charts/experiment/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "experiment.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "experiment.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "experiment.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "experiment.name" . }},release={{ template "experiment.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/manifests/bucketeer/charts/experiment/templates/_helpers.tpl b/manifests/bucketeer/charts/experiment/templates/_helpers.tpl new file mode 100644 index 000000000..922ee8bc9 --- /dev/null +++ b/manifests/bucketeer/charts/experiment/templates/_helpers.tpl @@ -0,0 +1,56 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "experiment.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "experiment.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "experiment.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "experiment.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "oauth-key-secret" -}} +{{- if .Values.oauth.key.secret }} +{{- printf "%s" .Values.oauth.key.secret -}} +{{- else -}} +{{ template "experiment.fullname" . }}-oauth-key +{{- end -}} +{{- end -}} + +{{- define "service-token-secret" -}} +{{- if .Values.serviceToken.secret }} +{{- printf "%s" .Values.serviceToken.secret -}} +{{- else -}} +{{ template "experiment.fullname" . }}-service-token +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/manifests/bucketeer/charts/experiment/templates/deployment.yaml b/manifests/bucketeer/charts/experiment/templates/deployment.yaml new file mode 100644 index 000000000..e210ba8d6 --- /dev/null +++ b/manifests/bucketeer/charts/experiment/templates/deployment.yaml @@ -0,0 +1,165 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "experiment.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "experiment.name" . }} + chart: {{ template "experiment.chart" . }} + release: {{ template "experiment.fullname" . }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "experiment.name" . }} + release: {{ template "experiment.fullname" . }} + template: + metadata: + labels: + app: {{ template "experiment.name" . }} + release: {{ template "experiment.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "experiment.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + - name: oauth-key-secret + secret: + secretName: {{ template "oauth-key-secret" . }} + - name: service-token-secret + secret: + secretName: {{ template "service-token-secret" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["server"] + env: + - name: BUCKETEER_EXPERIMENT_PROJECT + value: "{{ .Values.env.project }}" + - name: BUCKETEER_EXPERIMENT_MYSQL_USER + value: "{{ .Values.env.mysqlUser }}" + - name: BUCKETEER_EXPERIMENT_MYSQL_PASS + value: "{{ .Values.env.mysqlPass }}" + - name: BUCKETEER_EXPERIMENT_MYSQL_HOST + value: "{{ .Values.env.mysqlHost }}" + - name: BUCKETEER_EXPERIMENT_MYSQL_PORT + value: "{{ .Values.env.mysqlPort }}" + - name: BUCKETEER_EXPERIMENT_MYSQL_DB_NAME + value: "{{ .Values.env.mysqlDbName }}" + - name: BUCKETEER_EXPERIMENT_TOPIC + value: "{{ .Values.env.topic }}" + - name: BUCKETEER_EXPERIMENT_FEATURE_SERVICE + value: "{{ .Values.env.featureService }}" + - name: BUCKETEER_EXPERIMENT_ACCOUNT_SERVICE + value: "{{ .Values.env.accountService }}" + - name: BUCKETEER_EXPERIMENT_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_EXPERIMENT_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_EXPERIMENT_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + - name: BUCKETEER_EXPERIMENT_OAUTH_CLIENT_ID + value: "{{ .Values.oauth.clientId }}" + - name: BUCKETEER_EXPERIMENT_OAUTH_ISSUER + value: "{{ .Values.oauth.issuer }}" + - name: BUCKETEER_EXPERIMENT_OAUTH_KEY + value: /usr/local/oauth-key/public.pem + - name: BUCKETEER_EXPERIMENT_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_EXPERIMENT_KEY + value: /usr/local/certs/service/tls.key + - name: BUCKETEER_EXPERIMENT_SERVICE_TOKEN + value: /usr/local/service-token/token + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + - name: oauth-key-secret + mountPath: /usr/local/oauth-key + readOnly: true + - name: service-token-secret + mountPath: /usr/local/service-token + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/experiment/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/experiment/templates/envoy-configmap.yaml new file mode 100644 index 000000000..918f00afa --- /dev/null +++ b/manifests/bucketeer/charts/experiment/templates/envoy-configmap.yaml @@ -0,0 +1,283 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "experiment.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "experiment.name" . }} + chart: {{ template "experiment.chart" . }} + release: {{ template "experiment.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: experiment + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: experiment + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + - name: feature + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: feature + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: feature.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + - name: account + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: account + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: account.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + experiment: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: / + route: + cluster: experiment + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: ingress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true + - name: egress + address: + socket_address: + address: 127.0.0.1 + port_value: 9001 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + account: + value: 25 + feature: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: egress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.feature.FeatureService + route: + cluster: feature + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.account.AccountService + route: + cluster: account + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: egress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true diff --git a/manifests/bucketeer/charts/experiment/templates/hpa.yaml b/manifests/bucketeer/charts/experiment/templates/hpa.yaml new file mode 100644 index 000000000..99466b0d3 --- /dev/null +++ b/manifests/bucketeer/charts/experiment/templates/hpa.yaml @@ -0,0 +1,19 @@ +{{ if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "experiment.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "experiment.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.metrics.cpu.targetAverageUtilization }} +{{ end }} diff --git a/manifests/bucketeer/charts/experiment/templates/oauth-key-secret.yaml b/manifests/bucketeer/charts/experiment/templates/oauth-key-secret.yaml new file mode 100644 index 000000000..73e5f62cc --- /dev/null +++ b/manifests/bucketeer/charts/experiment/templates/oauth-key-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.oauth.key.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "experiment.fullname" . }}-oauth-key + namespace: {{ .Values.namespace }} + labels: + app: {{ template "experiment.name" . }} + chart: {{ template "experiment.chart" . }} + release: {{ template "experiment.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + public.pem: {{ required "OAuth key is required" .Values.oauth.key.public | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/experiment/templates/pdb.yaml b/manifests/bucketeer/charts/experiment/templates/pdb.yaml new file mode 100644 index 000000000..164b2ff83 --- /dev/null +++ b/manifests/bucketeer/charts/experiment/templates/pdb.yaml @@ -0,0 +1,12 @@ +{{ if .Values.pdb.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "experiment.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + selector: + matchLabels: + app: {{ template "experiment.name" . }} +{{ end }} diff --git a/manifests/bucketeer/charts/experiment/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/experiment/templates/service-cert-secret.yaml new file mode 100644 index 000000000..ab305c79b --- /dev/null +++ b/manifests/bucketeer/charts/experiment/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "experiment.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "experiment.name" . }} + chart: {{ template "experiment.chart" . }} + release: {{ template "experiment.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/experiment/templates/service-token-secret.yaml b/manifests/bucketeer/charts/experiment/templates/service-token-secret.yaml new file mode 100644 index 000000000..864b624a6 --- /dev/null +++ b/manifests/bucketeer/charts/experiment/templates/service-token-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.serviceToken.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "experiment.fullname" . }}-service-token + namespace: {{ .Values.namespace }} + labels: + app: {{ template "experiment.name" . }} + chart: {{ template "experiment.chart" . }} + release: {{ template "experiment.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + token: {{ required "Service token is required" .Values.serviceToken.token | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/experiment/templates/service.yaml b/manifests/bucketeer/charts/experiment/templates/service.yaml new file mode 100644 index 000000000..75479a30e --- /dev/null +++ b/manifests/bucketeer/charts/experiment/templates/service.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "experiment.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "experiment.name" . }} + chart: {{ template "experiment.chart" . }} + release: {{ template "experiment.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "experiment.name" . }} + release: {{ template "experiment.fullname" . }} + \ No newline at end of file diff --git a/manifests/bucketeer/charts/experiment/values.yaml b/manifests/bucketeer/charts/experiment/values.yaml new file mode 100644 index 000000000..bc2f730aa --- /dev/null +++ b/manifests/bucketeer/charts/experiment/values.yaml @@ -0,0 +1,76 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-experiment + pullPolicy: IfNotPresent + +fullnameOverride: "experiment" + +namespace: + +env: + project: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + logLevel: info + port: 9090 + metricsPort: 9002 + topic: + featureService: localhost:9001 + accountService: localhost:9001 + +affinity: {} + +nodeSelector: {} + +pdb: + enabled: + maxUnavailable: 50% + +hpa: + enabled: + minReplicas: + maxReplicas: + metrics: + cpu: + targetAverageUtilization: + +tls: + service: + secret: + cert: + key: + +oauth: + key: + secret: + public: + clientId: + issuer: + +serviceToken: + secret: + token: + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} diff --git a/manifests/bucketeer/charts/feature-recorder/.helmignore b/manifests/bucketeer/charts/feature-recorder/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/manifests/bucketeer/charts/feature-recorder/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/manifests/bucketeer/charts/feature-recorder/Chart.yaml b/manifests/bucketeer/charts/feature-recorder/Chart.yaml new file mode 100644 index 000000000..71626f09f --- /dev/null +++ b/manifests/bucketeer/charts/feature-recorder/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-feature-recorder +name: feature-recorder +version: 1.0.0 diff --git a/manifests/bucketeer/charts/feature-recorder/templates/NOTES.txt b/manifests/bucketeer/charts/feature-recorder/templates/NOTES.txt new file mode 100644 index 000000000..a2c276bf8 --- /dev/null +++ b/manifests/bucketeer/charts/feature-recorder/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "feature-recorder.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "feature-recorder.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "feature-recorder.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "feature-recorder.name" . }},release={{ template "feature-recorder.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/manifests/bucketeer/charts/feature-recorder/templates/_helpers.tpl b/manifests/bucketeer/charts/feature-recorder/templates/_helpers.tpl new file mode 100644 index 000000000..7b5ac44bd --- /dev/null +++ b/manifests/bucketeer/charts/feature-recorder/templates/_helpers.tpl @@ -0,0 +1,48 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "feature-recorder.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "feature-recorder.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "feature-recorder.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "feature-recorder.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "service-token-secret" -}} +{{- if .Values.serviceToken.secret }} +{{- printf "%s" .Values.serviceToken.secret -}} +{{- else -}} +{{ template "feature-recorder.fullname" . }}-service-token +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/manifests/bucketeer/charts/feature-recorder/templates/deployment.yaml b/manifests/bucketeer/charts/feature-recorder/templates/deployment.yaml new file mode 100644 index 000000000..a2366289c --- /dev/null +++ b/manifests/bucketeer/charts/feature-recorder/templates/deployment.yaml @@ -0,0 +1,159 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "feature-recorder.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "feature-recorder.name" . }} + chart: {{ template "feature-recorder.chart" . }} + release: {{ template "feature-recorder.fullname" . }} + heritage: {{ .Release.Service }} +spec: + replicas: 1 + selector: + matchLabels: + app: {{ template "feature-recorder.name" . }} + release: {{ template "feature-recorder.fullname" . }} + template: + metadata: + labels: + app: {{ template "feature-recorder.name" . }} + release: {{ template "feature-recorder.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "feature-recorder.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["recorder"] + env: + - name: BUCKETEER_FEATURE_PROJECT + value: "{{ .Values.env.project }}" + # TODO: Remove database info once mysql migration is done + - name: BUCKETEER_FEATURE_DATABASE + value: "{{ .Values.env.database }}" + - name: BUCKETEER_FEATURE_MYSQL_USER + value: "{{ .Values.env.mysqlUser }}" + - name: BUCKETEER_FEATURE_MYSQL_PASS + value: "{{ .Values.env.mysqlPass }}" + - name: BUCKETEER_FEATURE_MYSQL_HOST + value: "{{ .Values.env.mysqlHost }}" + - name: BUCKETEER_FEATURE_MYSQL_PORT + value: "{{ .Values.env.mysqlPort }}" + - name: BUCKETEER_FEATURE_MYSQL_DB_NAME + value: "{{ .Values.env.mysqlDbName }}" + - name: BUCKETEER_FEATURE_TOPIC + value: "{{ .Values.env.topic }}" + - name: BUCKETEER_FEATURE_SUBSCRIPTION + value: "{{ .Values.env.subscription }}" + - name: BUCKETEER_FEATURE_MAX_MPS + value: "{{ .Values.env.maxMps }}" + - name: BUCKETEER_FEATURE_NUM_WORKERS + value: "{{ .Values.env.numWorkers }}" + - name: BUCKETEER_FEATURE_PULLER_NUM_GOROUTINES + value: "{{ .Values.env.pullerNumGoroutines }}" + - name: BUCKETEER_FEATURE_PULLER_MAX_OUTSTANDING_MESSAGES + value: "{{ .Values.env.pullerMaxOutstandingMessages }}" + - name: BUCKETEER_FEATURE_PULLER_MAX_OUTSTANDING_BYTES + value: "{{ .Values.env.pullerMaxOutstandingBytes }}" + - name: BUCKETEER_FEATURE_FLUSH_INTERVAL + value: "{{ .Values.env.flushInterval }}" + - name: BUCKETEER_FEATURE_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_FEATURE_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_FEATURE_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + - name: BUCKETEER_FEATURE_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_FEATURE_KEY + value: /usr/local/certs/service/tls.key + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/feature-recorder/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/feature-recorder/templates/envoy-configmap.yaml new file mode 100644 index 000000000..00eccd02e --- /dev/null +++ b/manifests/bucketeer/charts/feature-recorder/templates/envoy-configmap.yaml @@ -0,0 +1,127 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "feature-recorder.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "feature-recorder.name" . }} + chart: {{ template "feature-recorder.chart" . }} + release: {{ template "feature-recorder.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: feature-recorder + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: feature-recorder + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + feature-recorder: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: / + route: + cluster: feature-recorder + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: ingress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true diff --git a/manifests/bucketeer/charts/feature-recorder/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/feature-recorder/templates/service-cert-secret.yaml new file mode 100644 index 000000000..db5a252cd --- /dev/null +++ b/manifests/bucketeer/charts/feature-recorder/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "feature-recorder.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "feature-recorder.name" . }} + chart: {{ template "feature-recorder.chart" . }} + release: {{ template "feature-recorder.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} diff --git a/manifests/bucketeer/charts/feature-recorder/templates/service-token-secret.yaml b/manifests/bucketeer/charts/feature-recorder/templates/service-token-secret.yaml new file mode 100644 index 000000000..90e282be2 --- /dev/null +++ b/manifests/bucketeer/charts/feature-recorder/templates/service-token-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.serviceToken.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "feature-recorder.fullname" . }}-service-token + namespace: {{ .Values.namespace }} + labels: + app: {{ template "feature-recorder.name" . }} + chart: {{ template "feature-recorder.chart" . }} + release: {{ template "feature-recorder.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + token: {{ required "Service token is required" .Values.serviceToken.token | b64enc | quote }} +{{- end }} diff --git a/manifests/bucketeer/charts/feature-recorder/templates/service.yaml b/manifests/bucketeer/charts/feature-recorder/templates/service.yaml new file mode 100644 index 000000000..80d5ba790 --- /dev/null +++ b/manifests/bucketeer/charts/feature-recorder/templates/service.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "feature-recorder.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "feature-recorder.name" . }} + chart: {{ template "feature-recorder.chart" . }} + release: {{ template "feature-recorder.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "feature-recorder.name" . }} + release: {{ template "feature-recorder.fullname" . }} diff --git a/manifests/bucketeer/charts/feature-recorder/templates/vpa.yaml b/manifests/bucketeer/charts/feature-recorder/templates/vpa.yaml new file mode 100644 index 000000000..8b5ae21e3 --- /dev/null +++ b/manifests/bucketeer/charts/feature-recorder/templates/vpa.yaml @@ -0,0 +1,19 @@ +{{ if .Values.vpa.enabled }} +apiVersion: autoscaling.k8s.io/v1beta2 +kind: VerticalPodAutoscaler +metadata: + name: {{ template "feature-recorder.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + targetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "feature-recorder.fullname" . }} + updatePolicy: + updateMode: "{{ .Values.vpa.updateMode }}" + resourcePolicy: + containerPolicies: + - containerName: feature-recorder + minAllowed: + cpu: {{ .Values.vpa.resourcePolicy.containerPolicies.minAllowed.cpu }} +{{ end }} diff --git a/manifests/bucketeer/charts/feature-recorder/values.yaml b/manifests/bucketeer/charts/feature-recorder/values.yaml new file mode 100644 index 000000000..d4a9c535a --- /dev/null +++ b/manifests/bucketeer/charts/feature-recorder/values.yaml @@ -0,0 +1,72 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-feature + pullPolicy: IfNotPresent + +fullnameOverride: "feature-recorder" + +namespace: + +env: + project: + featureService: localhost:9001 + database: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + maxMps: "1000" + numWorkers: "2" + pullerNumGoroutines: "5" + pullerMaxOutstandingMessages: "1000" + pullerMaxOutstandingBytes: "1000000000" + flushInterval: 1m + logLevel: info + port: 9090 + metricsPort: 9002 + topic: + subscription: + +affinity: {} + +nodeSelector: {} + +vpa: + enabled: false + updateMode: + resourcePolicy: + containerPolicies: + minAllowed: + cpu: 50m + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +tls: + service: + secret: + cert: + key: + +serviceToken: + secret: + token: + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} diff --git a/manifests/bucketeer/charts/feature-segment-persister/.helmignore b/manifests/bucketeer/charts/feature-segment-persister/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/manifests/bucketeer/charts/feature-segment-persister/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/manifests/bucketeer/charts/feature-segment-persister/Chart.yaml b/manifests/bucketeer/charts/feature-segment-persister/Chart.yaml new file mode 100644 index 000000000..89d9de813 --- /dev/null +++ b/manifests/bucketeer/charts/feature-segment-persister/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-feature-segment-persister +name: feature-segment-persister +version: 1.0.0 diff --git a/manifests/bucketeer/charts/feature-segment-persister/templates/NOTES.txt b/manifests/bucketeer/charts/feature-segment-persister/templates/NOTES.txt new file mode 100644 index 000000000..b9a5bbd4b --- /dev/null +++ b/manifests/bucketeer/charts/feature-segment-persister/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "feature-segment-persister.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "feature-segment-persister.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "feature-segment-persister.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "feature-segment-persister.name" . }},release={{ template "feature-segment-persister.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/manifests/bucketeer/charts/feature-segment-persister/templates/_helpers.tpl b/manifests/bucketeer/charts/feature-segment-persister/templates/_helpers.tpl new file mode 100644 index 000000000..5b3d3a1ba --- /dev/null +++ b/manifests/bucketeer/charts/feature-segment-persister/templates/_helpers.tpl @@ -0,0 +1,40 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "feature-segment-persister.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "feature-segment-persister.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "feature-segment-persister.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "feature-segment-persister.fullname" . }}-service-cert +{{- end -}} +{{- end -}} diff --git a/manifests/bucketeer/charts/feature-segment-persister/templates/deployment.yaml b/manifests/bucketeer/charts/feature-segment-persister/templates/deployment.yaml new file mode 100644 index 000000000..ed42838d5 --- /dev/null +++ b/manifests/bucketeer/charts/feature-segment-persister/templates/deployment.yaml @@ -0,0 +1,167 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "feature-segment-persister.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "feature-segment-persister.name" . }} + chart: {{ template "feature-segment-persister.chart" . }} + release: {{ template "feature-segment-persister.fullname" . }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "feature-segment-persister.name" . }} + release: {{ template "feature-segment-persister.fullname" . }} + template: + metadata: + labels: + app: {{ template "feature-segment-persister.name" . }} + release: {{ template "feature-segment-persister.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "feature-segment-persister.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["segment-persister"] + env: + - name: BUCKETEER_FEATURE_PROJECT + value: "{{ .Values.env.project }}" + - name: BUCKETEER_FEATURE_MYSQL_USER + value: "{{ .Values.env.mysqlUser }}" + - name: BUCKETEER_FEATURE_MYSQL_PASS + value: "{{ .Values.env.mysqlPass }}" + - name: BUCKETEER_FEATURE_MYSQL_HOST + value: "{{ .Values.env.mysqlHost }}" + - name: BUCKETEER_FEATURE_MYSQL_PORT + value: "{{ .Values.env.mysqlPort }}" + - name: BUCKETEER_FEATURE_MYSQL_DB_NAME + value: "{{ .Values.env.mysqlDbName }}" + - name: BUCKETEER_FEATURE_BULK_SEGMENT_USERS_RECEIVED_EVENT_TOPIC + value: "{{ .Values.env.bulkSegmentUsersReceivedEventTopic }}" + - name: BUCKETEER_FEATURE_BULK_SEGMENT_USERS_RECEIVED_EVENT_SUBSCRIPTION + value: "{{ .Values.env.bulkSegmentUsersReceivedEventSubscription }}" + - name: BUCKETEER_FEATURE_DOMAIN_EVENT_TOPIC + value: "{{ .Values.env.domainEventTopic }}" + - name: BUCKETEER_FEATURE_MAX_MPS + value: "{{ .Values.env.maxMps }}" + - name: BUCKETEER_FEATURE_NUM_WORKERS + value: "{{ .Values.env.numWorkers }}" + - name: BUCKETEER_FEATURE_FLUSH_SIZE + value: "{{ .Values.env.flushSize }}" + - name: BUCKETEER_FEATURE_FLUSH_INTERVAL + value: "{{ .Values.env.flushInterval }}" + - name: BUCKETEER_FEATURE_PULLER_NUM_GOROUTINES + value: "{{ .Values.env.pullerNumGoroutines }}" + - name: BUCKETEER_FEATURE_PULLER_MAX_OUTSTANDING_MESSAGES + value: "{{ .Values.env.pullerMaxOutstandingMessages }}" + - name: BUCKETEER_FEATURE_PULLER_MAX_OUTSTANDING_BYTES + value: "{{ .Values.env.pullerMaxOutstandingBytes }}" + - name: BUCKETEER_FEATURE_REDIS_SERVER_NAME + value: "{{ .Values.env.redis.serverName }}" + - name: BUCKETEER_FEATURE_REDIS_ADDR + value: "{{ .Values.env.redis.addr }}" + - name: BUCKETEER_FEATURE_REDIS_POOL_MAX_IDLE + value: "{{ .Values.env.redis.poolMaxIdle }}" + - name: BUCKETEER_FEATURE_REDIS_POOL_MAX_ACTIVE + value: "{{ .Values.env.redis.poolMaxActive }}" + - name: BUCKETEER_FEATURE_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_FEATURE_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_FEATURE_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + - name: BUCKETEER_FEATURE_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_FEATURE_KEY + value: /usr/local/certs/service/tls.key + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/feature-segment-persister/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/feature-segment-persister/templates/envoy-configmap.yaml new file mode 100644 index 000000000..b41297a92 --- /dev/null +++ b/manifests/bucketeer/charts/feature-segment-persister/templates/envoy-configmap.yaml @@ -0,0 +1,127 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "feature-segment-persister.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "feature-segment-persister.name" . }} + chart: {{ template "feature-segment-persister.chart" . }} + release: {{ template "feature-segment-persister.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: feature-segment-persister + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: feature-segment-persister + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + feature-segment-persister: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: / + route: + cluster: feature-segment-persister + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: ingress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true diff --git a/manifests/bucketeer/charts/feature-segment-persister/templates/hpa.yaml b/manifests/bucketeer/charts/feature-segment-persister/templates/hpa.yaml new file mode 100644 index 000000000..c615948af --- /dev/null +++ b/manifests/bucketeer/charts/feature-segment-persister/templates/hpa.yaml @@ -0,0 +1,19 @@ +{{ if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "feature-segment-persister.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "feature-segment-persister.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.metrics.cpu.targetAverageUtilization }} +{{ end }} diff --git a/manifests/bucketeer/charts/feature-segment-persister/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/feature-segment-persister/templates/service-cert-secret.yaml new file mode 100644 index 000000000..ebcf7ca06 --- /dev/null +++ b/manifests/bucketeer/charts/feature-segment-persister/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "feature-segment-persister.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "feature-segment-persister.name" . }} + chart: {{ template "feature-segment-persister.chart" . }} + release: {{ template "feature-segment-persister.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/feature-segment-persister/templates/service.yaml b/manifests/bucketeer/charts/feature-segment-persister/templates/service.yaml new file mode 100644 index 000000000..715c636a3 --- /dev/null +++ b/manifests/bucketeer/charts/feature-segment-persister/templates/service.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "feature-segment-persister.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "feature-segment-persister.name" . }} + chart: {{ template "feature-segment-persister.chart" . }} + release: {{ template "feature-segment-persister.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "feature-segment-persister.name" . }} + release: {{ template "feature-segment-persister.fullname" . }} diff --git a/manifests/bucketeer/charts/feature-segment-persister/values.yaml b/manifests/bucketeer/charts/feature-segment-persister/values.yaml new file mode 100644 index 000000000..b88807f07 --- /dev/null +++ b/manifests/bucketeer/charts/feature-segment-persister/values.yaml @@ -0,0 +1,73 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-feature + pullPolicy: IfNotPresent + +fullnameOverride: "feature-segment-persister" + +namespace: + +env: + project: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + bulkSegmentUsersReceivedEventTopic: + bulkSegmentUsersReceivedEventSubscription: + domainEventTopic: + maxMps: "100" + numWorkers: 2 + flushSize: 2 + flushInterval: 10s + pullerNumGoroutines: 5 + pullerMaxOutstandingMessages: "1000" + pullerMaxOutstandingBytes: "1000000000" + redis: + serverName: non-persistent-redis + poolMaxIdle: 50 + poolMaxActive: 200 + addr: + logLevel: info + port: 9090 + metricsPort: 9002 + +affinity: {} + +nodeSelector: {} + +hpa: + enabled: + minReplicas: + maxReplicas: + metrics: + cpu: + targetAverageUtilization: + +tls: + service: + secret: + cert: + key: + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} diff --git a/manifests/bucketeer/charts/feature-tag-cacher/.helmignore b/manifests/bucketeer/charts/feature-tag-cacher/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/manifests/bucketeer/charts/feature-tag-cacher/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/manifests/bucketeer/charts/feature-tag-cacher/Chart.yaml b/manifests/bucketeer/charts/feature-tag-cacher/Chart.yaml new file mode 100644 index 000000000..b10531210 --- /dev/null +++ b/manifests/bucketeer/charts/feature-tag-cacher/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-feature-tag-cacher +name: feature-tag-cacher +version: 1.0.0 diff --git a/manifests/bucketeer/charts/feature-tag-cacher/templates/NOTES.txt b/manifests/bucketeer/charts/feature-tag-cacher/templates/NOTES.txt new file mode 100644 index 000000000..2da762c7e --- /dev/null +++ b/manifests/bucketeer/charts/feature-tag-cacher/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "feature-tag-cacher.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "feature-tag-cacher.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "feature-tag-cacher.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "feature-tag-cacher.name" . }},release={{ template "feature-tag-cacher.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/manifests/bucketeer/charts/feature-tag-cacher/templates/_helpers.tpl b/manifests/bucketeer/charts/feature-tag-cacher/templates/_helpers.tpl new file mode 100644 index 000000000..d962fd0c4 --- /dev/null +++ b/manifests/bucketeer/charts/feature-tag-cacher/templates/_helpers.tpl @@ -0,0 +1,48 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "feature-tag-cacher.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "feature-tag-cacher.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "feature-tag-cacher.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "feature-tag-cacher.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "service-token-secret" -}} +{{- if .Values.serviceToken.secret }} +{{- printf "%s" .Values.serviceToken.secret -}} +{{- else -}} +{{ template "feature-tag-cacher.fullname" . }}-service-token +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/manifests/bucketeer/charts/feature-tag-cacher/templates/deployment.yaml b/manifests/bucketeer/charts/feature-tag-cacher/templates/deployment.yaml new file mode 100644 index 000000000..89733f534 --- /dev/null +++ b/manifests/bucketeer/charts/feature-tag-cacher/templates/deployment.yaml @@ -0,0 +1,161 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "feature-tag-cacher.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "feature-tag-cacher.name" . }} + chart: {{ template "feature-tag-cacher.chart" . }} + release: {{ template "feature-tag-cacher.fullname" . }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "feature-tag-cacher.name" . }} + release: {{ template "feature-tag-cacher.fullname" . }} + template: + metadata: + labels: + app: {{ template "feature-tag-cacher.name" . }} + release: {{ template "feature-tag-cacher.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "feature-tag-cacher.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + - name: service-token-secret + secret: + secretName: {{ template "service-token-secret" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["tag-cacher"] + env: + - name: BUCKETEER_FEATURE_PROJECT + value: "{{ .Values.env.project }}" + - name: BUCKETEER_FEATURE_FEATURE_SERVICE + value: "{{ .Values.env.featureService }}" + - name: BUCKETEER_FEATURE_TOPIC + value: "{{ .Values.env.topic }}" + - name: BUCKETEER_FEATURE_SUBSCRIPTION + value: "{{ .Values.env.subscription }}" + - name: BUCKETEER_FEATURE_MAX_MPS + value: "{{ .Values.env.maxMps }}" + - name: BUCKETEER_FEATURE_NUM_WORKERS + value: "{{ .Values.env.numWorkers }}" + - name: BUCKETEER_FEATURE_FLUSH_SIZE + value: "{{ .Values.env.flushSize }}" + - name: BUCKETEER_FEATURE_FLUSH_INTERVAL + value: "{{ .Values.env.flushInterval }}" + - name: BUCKETEER_FEATURE_PULLER_NUM_GOROUTINES + value: "{{ .Values.env.pullerNumGoroutines }}" + - name: BUCKETEER_FEATURE_PULLER_MAX_OUTSTANDING_MESSAGES + value: "{{ .Values.env.pullerMaxOutstandingMessages }}" + - name: BUCKETEER_FEATURE_PULLER_MAX_OUTSTANDING_BYTES + value: "{{ .Values.env.pullerMaxOutstandingBytes }}" + - name: BUCKETEER_FEATURE_REDIS_SERVER_NAME + value: "{{ .Values.env.redis.serverName }}" + - name: BUCKETEER_FEATURE_REDIS_ADDR + value: "{{ .Values.env.redis.addr }}" + - name: BUCKETEER_FEATURE_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_FEATURE_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_FEATURE_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + - name: BUCKETEER_FEATURE_SERVICE_TOKEN + value: /usr/local/service-token/token + - name: BUCKETEER_FEATURE_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_FEATURE_KEY + value: /usr/local/certs/service/tls.key + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + - name: service-token-secret + mountPath: /usr/local/service-token + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/feature-tag-cacher/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/feature-tag-cacher/templates/envoy-configmap.yaml new file mode 100644 index 000000000..929a556b0 --- /dev/null +++ b/manifests/bucketeer/charts/feature-tag-cacher/templates/envoy-configmap.yaml @@ -0,0 +1,229 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "feature-tag-cacher.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "feature-tag-cacher.name" . }} + chart: {{ template "feature-tag-cacher.chart" . }} + release: {{ template "feature-tag-cacher.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: feature-tag-cacher + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: feature-tag-cacher + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + - name: feature + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: feature + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: feature.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + feature-tag-cacher: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: / + route: + cluster: feature-tag-cacher + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: ingress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true + - name: egress + address: + socket_address: + address: 127.0.0.1 + port_value: 9001 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + feature: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: egress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.feature.FeatureService + route: + cluster: feature + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: egress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true diff --git a/manifests/bucketeer/charts/feature-tag-cacher/templates/hpa.yaml b/manifests/bucketeer/charts/feature-tag-cacher/templates/hpa.yaml new file mode 100644 index 000000000..fb420c644 --- /dev/null +++ b/manifests/bucketeer/charts/feature-tag-cacher/templates/hpa.yaml @@ -0,0 +1,19 @@ +{{ if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "feature-tag-cacher.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "feature-tag-cacher.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.metrics.cpu.targetAverageUtilization }} +{{ end }} diff --git a/manifests/bucketeer/charts/feature-tag-cacher/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/feature-tag-cacher/templates/service-cert-secret.yaml new file mode 100644 index 000000000..9a9cba9c9 --- /dev/null +++ b/manifests/bucketeer/charts/feature-tag-cacher/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "feature-tag-cacher.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "feature-tag-cacher.name" . }} + chart: {{ template "feature-tag-cacher.chart" . }} + release: {{ template "feature-tag-cacher.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/feature-tag-cacher/templates/service-token-secret.yaml b/manifests/bucketeer/charts/feature-tag-cacher/templates/service-token-secret.yaml new file mode 100644 index 000000000..6183e0fdd --- /dev/null +++ b/manifests/bucketeer/charts/feature-tag-cacher/templates/service-token-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.serviceToken.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "feature-tag-cacher.fullname" . }}-service-token + namespace: {{ .Values.namespace }} + labels: + app: {{ template "feature-tag-cacher.name" . }} + chart: {{ template "feature-tag-cacher.chart" . }} + release: {{ template "feature-tag-cacher.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + token: {{ required "Service token is required" .Values.serviceToken.token | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/feature-tag-cacher/templates/service.yaml b/manifests/bucketeer/charts/feature-tag-cacher/templates/service.yaml new file mode 100644 index 000000000..f8a1f31e9 --- /dev/null +++ b/manifests/bucketeer/charts/feature-tag-cacher/templates/service.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "feature-tag-cacher.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "feature-tag-cacher.name" . }} + chart: {{ template "feature-tag-cacher.chart" . }} + release: {{ template "feature-tag-cacher.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "feature-tag-cacher.name" . }} + release: {{ template "feature-tag-cacher.fullname" . }} diff --git a/manifests/bucketeer/charts/feature-tag-cacher/values.yaml b/manifests/bucketeer/charts/feature-tag-cacher/values.yaml new file mode 100644 index 000000000..e21f3cc42 --- /dev/null +++ b/manifests/bucketeer/charts/feature-tag-cacher/values.yaml @@ -0,0 +1,70 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-feature + pullPolicy: IfNotPresent + +fullnameOverride: "feature-tag-cacher" + +namespace: + +env: + project: + featureService: localhost:9001 + maxMps: "1000" + numWorkers: 2 + flushSize: 100 + flushInterval: 2s + pullerNumGoroutines: 5 + pullerMaxOutstandingMessages: "1000" + pullerMaxOutstandingBytes: "1000000000" + redis: + serverName: non-persistent-redis + addr: + logLevel: info + port: 9090 + metricsPort: 9002 + topic: + subscription: + +affinity: {} + +nodeSelector: {} + +hpa: + enabled: + minReplicas: + maxReplicas: + metrics: + cpu: + targetAverageUtilization: + +tls: + service: + secret: + cert: + key: + +serviceToken: + secret: + token: + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} diff --git a/manifests/bucketeer/charts/feature/.helmignore b/manifests/bucketeer/charts/feature/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/manifests/bucketeer/charts/feature/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/manifests/bucketeer/charts/feature/Chart.yaml b/manifests/bucketeer/charts/feature/Chart.yaml new file mode 100644 index 000000000..1e61d220f --- /dev/null +++ b/manifests/bucketeer/charts/feature/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-feature +name: feature +version: 1.0.0 diff --git a/manifests/bucketeer/charts/feature/templates/NOTES.txt b/manifests/bucketeer/charts/feature/templates/NOTES.txt new file mode 100644 index 000000000..c5d1b3a98 --- /dev/null +++ b/manifests/bucketeer/charts/feature/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "feature.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "feature.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "feature.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "feature.name" . }},release={{ template "feature.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/manifests/bucketeer/charts/feature/templates/_helpers.tpl b/manifests/bucketeer/charts/feature/templates/_helpers.tpl new file mode 100644 index 000000000..7a1e817a7 --- /dev/null +++ b/manifests/bucketeer/charts/feature/templates/_helpers.tpl @@ -0,0 +1,56 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "feature.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "feature.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "feature.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "feature.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "oauth-key-secret" -}} +{{- if .Values.oauth.key.secret }} +{{- printf "%s" .Values.oauth.key.secret -}} +{{- else -}} +{{ template "feature.fullname" . }}-oauth-key +{{- end -}} +{{- end -}} + +{{- define "service-token-secret" -}} +{{- if .Values.serviceToken.secret }} +{{- printf "%s" .Values.serviceToken.secret -}} +{{- else -}} +{{ template "feature.fullname" . }}-service-token +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/manifests/bucketeer/charts/feature/templates/deployment.yaml b/manifests/bucketeer/charts/feature/templates/deployment.yaml new file mode 100644 index 000000000..be05798d2 --- /dev/null +++ b/manifests/bucketeer/charts/feature/templates/deployment.yaml @@ -0,0 +1,179 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "feature.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "feature.name" . }} + chart: {{ template "feature.chart" . }} + release: {{ template "feature.fullname" . }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "feature.name" . }} + release: {{ template "feature.fullname" . }} + template: + metadata: + labels: + app: {{ template "feature.name" . }} + release: {{ template "feature.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "feature.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + - name: oauth-key-secret + secret: + secretName: {{ template "oauth-key-secret" . }} + - name: service-token-secret + secret: + secretName: {{ template "service-token-secret" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["server"] + env: + - name: BUCKETEER_FEATURE_PROJECT + value: "{{ .Values.env.project }}" + - name: BUCKETEER_FEATURE_DATABASE + value: "{{ .Values.env.database }}" + - name: BUCKETEER_FEATURE_MYSQL_USER + value: "{{ .Values.env.mysqlUser }}" + - name: BUCKETEER_FEATURE_MYSQL_PASS + value: "{{ .Values.env.mysqlPass }}" + - name: BUCKETEER_FEATURE_MYSQL_HOST + value: "{{ .Values.env.mysqlHost }}" + - name: BUCKETEER_FEATURE_MYSQL_PORT + value: "{{ .Values.env.mysqlPort }}" + - name: BUCKETEER_FEATURE_MYSQL_DB_NAME + value: "{{ .Values.env.mysqlDbName }}" + - name: BUCKETEER_FEATURE_BIGTABLE_INSTANCE + value: "{{ .Values.env.bigtableInstance }}" + - name: BUCKETEER_FEATURE_ACCOUNT_SERVICE + value: "{{ .Values.env.accountService }}" + - name: BUCKETEER_FEATURE_EXPERIMENT_SERVICE + value: "{{ .Values.env.experimentService }}" + - name: BUCKETEER_FEATURE_BULK_SEGMENT_USERS_RECEIVED_EVENT_TOPIC + value: "{{ .Values.env.bulkSegmentUsersReceivedEventTopic }}" + - name: BUCKETEER_FEATURE_DOMAIN_EVENT_TOPIC + value: "{{ .Values.env.domainEventTopic }}" + - name: BUCKETEER_FEATURE_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_FEATURE_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_FEATURE_REDIS_SERVER_NAME + value: "{{ .Values.env.redis.serverName }}" + - name: BUCKETEER_FEATURE_REDIS_ADDR + value: "{{ .Values.env.redis.addr }}" + - name: BUCKETEER_FEATURE_REDIS_POOL_MAX_IDLE + value: "{{ .Values.env.redis.poolMaxIdle }}" + - name: BUCKETEER_FEATURE_REDIS_POOL_MAX_ACTIVE + value: "{{ .Values.env.redis.poolMaxActive }}" + - name: BUCKETEER_FEATURE_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + - name: BUCKETEER_FEATURE_OAUTH_CLIENT_ID + value: "{{ .Values.oauth.clientId }}" + - name: BUCKETEER_FEATURE_OAUTH_ISSUER + value: "{{ .Values.oauth.issuer }}" + - name: BUCKETEER_FEATURE_OAUTH_KEY + value: /usr/local/oauth-key/public.pem + - name: BUCKETEER_FEATURE_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_FEATURE_KEY + value: /usr/local/certs/service/tls.key + - name: BUCKETEER_FEATURE_SERVICE_TOKEN + value: /usr/local/service-token/token + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + - name: oauth-key-secret + mountPath: /usr/local/oauth-key + readOnly: true + - name: service-token-secret + mountPath: /usr/local/service-token + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/feature/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/feature/templates/envoy-configmap.yaml new file mode 100644 index 000000000..002f164b5 --- /dev/null +++ b/manifests/bucketeer/charts/feature/templates/envoy-configmap.yaml @@ -0,0 +1,283 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "feature.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "feature.name" . }} + chart: {{ template "feature.chart" . }} + release: {{ template "feature.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: feature + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: feature + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + - name: account + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: account + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: account.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + - name: experiment + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: experiment + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: experiment.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + feature: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: / + route: + cluster: feature + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 60s + stat_prefix: ingress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true + - name: egress + address: + socket_address: + address: 127.0.0.1 + port_value: 9001 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + account: + value: 25 + experiment: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: egress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.account.AccountService + route: + cluster: account + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.experiment.ExperimentService + route: + cluster: experiment + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: egress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true diff --git a/manifests/bucketeer/charts/feature/templates/hpa.yaml b/manifests/bucketeer/charts/feature/templates/hpa.yaml new file mode 100644 index 000000000..ab8deaca0 --- /dev/null +++ b/manifests/bucketeer/charts/feature/templates/hpa.yaml @@ -0,0 +1,19 @@ +{{ if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "feature.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "feature.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.metrics.cpu.targetAverageUtilization }} +{{ end }} diff --git a/manifests/bucketeer/charts/feature/templates/oauth-key-secret.yaml b/manifests/bucketeer/charts/feature/templates/oauth-key-secret.yaml new file mode 100644 index 000000000..5f44cc614 --- /dev/null +++ b/manifests/bucketeer/charts/feature/templates/oauth-key-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.oauth.key.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "feature.fullname" . }}-oauth-key + namespace: {{ .Values.namespace }} + labels: + app: {{ template "feature.name" . }} + chart: {{ template "feature.chart" . }} + release: {{ template "feature.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + public.pem: {{ required "OAuth key is required" .Values.oauth.key.public | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/feature/templates/pdb.yaml b/manifests/bucketeer/charts/feature/templates/pdb.yaml new file mode 100644 index 000000000..85b82849c --- /dev/null +++ b/manifests/bucketeer/charts/feature/templates/pdb.yaml @@ -0,0 +1,12 @@ +{{ if .Values.pdb.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "feature.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + selector: + matchLabels: + app: {{ template "feature.name" . }} +{{ end }} diff --git a/manifests/bucketeer/charts/feature/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/feature/templates/service-cert-secret.yaml new file mode 100644 index 000000000..f8d39af8d --- /dev/null +++ b/manifests/bucketeer/charts/feature/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "feature.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "feature.name" . }} + chart: {{ template "feature.chart" . }} + release: {{ template "feature.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/feature/templates/service-token-secret.yaml b/manifests/bucketeer/charts/feature/templates/service-token-secret.yaml new file mode 100644 index 000000000..501c83860 --- /dev/null +++ b/manifests/bucketeer/charts/feature/templates/service-token-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.serviceToken.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "feature.fullname" . }}-service-token + namespace: {{ .Values.namespace }} + labels: + app: {{ template "feature.name" . }} + chart: {{ template "feature.chart" . }} + release: {{ template "feature.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + token: {{ required "Service token is required" .Values.serviceToken.token | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/feature/templates/service.yaml b/manifests/bucketeer/charts/feature/templates/service.yaml new file mode 100644 index 000000000..0a5c7858a --- /dev/null +++ b/manifests/bucketeer/charts/feature/templates/service.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "feature.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "feature.name" . }} + chart: {{ template "feature.chart" . }} + release: {{ template "feature.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "feature.name" . }} + release: {{ template "feature.fullname" . }} diff --git a/manifests/bucketeer/charts/feature/values.yaml b/manifests/bucketeer/charts/feature/values.yaml new file mode 100644 index 000000000..83752b02e --- /dev/null +++ b/manifests/bucketeer/charts/feature/values.yaml @@ -0,0 +1,84 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-feature + pullPolicy: IfNotPresent + +fullnameOverride: "feature" + +namespace: + +env: + project: + database: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + bigtableInstance: + accountService: localhost:9001 + experimentService: localhost:9001 + redis: + serverName: + poolMaxIdle: 50 + poolMaxActive: 200 + addr: + logLevel: info + port: 9090 + metricsPort: 9002 + bulkSegmentUsersReceivedEventTopic: + domainEventTopic: + +affinity: {} + +nodeSelector: {} + +pdb: + enabled: + maxUnavailable: 50% + +hpa: + enabled: + minReplicas: + maxReplicas: + metrics: + cpu: + targetAverageUtilization: + +tls: + service: + secret: + cert: + key: + +serviceToken: + secret: + token: + +oauth: + key: + secret: + public: + clientId: + issuer: + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} diff --git a/manifests/bucketeer/charts/goal-batch-transformer/.helmignore b/manifests/bucketeer/charts/goal-batch-transformer/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/manifests/bucketeer/charts/goal-batch-transformer/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/manifests/bucketeer/charts/goal-batch-transformer/Chart.yaml b/manifests/bucketeer/charts/goal-batch-transformer/Chart.yaml new file mode 100644 index 000000000..d9fb53c10 --- /dev/null +++ b/manifests/bucketeer/charts/goal-batch-transformer/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-goal-batch-transformer +name: goal-batch-transformer +version: 1.0.0 diff --git a/manifests/bucketeer/charts/goal-batch-transformer/templates/NOTES.txt b/manifests/bucketeer/charts/goal-batch-transformer/templates/NOTES.txt new file mode 100644 index 000000000..98b674f87 --- /dev/null +++ b/manifests/bucketeer/charts/goal-batch-transformer/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "goal-batch-transformer.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "goal-batch-transformer.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "goal-batch-transformer.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "goal-batch-transformer.name" . }},release={{ template "goal-batch-transformer.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/manifests/bucketeer/charts/goal-batch-transformer/templates/_helpers.tpl b/manifests/bucketeer/charts/goal-batch-transformer/templates/_helpers.tpl new file mode 100644 index 000000000..7ca37584c --- /dev/null +++ b/manifests/bucketeer/charts/goal-batch-transformer/templates/_helpers.tpl @@ -0,0 +1,48 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "goal-batch-transformer.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "goal-batch-transformer.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "goal-batch-transformer.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "goal-batch-transformer.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "service-token-secret" -}} +{{- if .Values.serviceToken.secret }} +{{- printf "%s" .Values.serviceToken.secret -}} +{{- else -}} +{{ template "goal-batch-transformer.fullname" . }}-service-token +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/manifests/bucketeer/charts/goal-batch-transformer/templates/deployment.yaml b/manifests/bucketeer/charts/goal-batch-transformer/templates/deployment.yaml new file mode 100644 index 000000000..b918b2c23 --- /dev/null +++ b/manifests/bucketeer/charts/goal-batch-transformer/templates/deployment.yaml @@ -0,0 +1,157 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "goal-batch-transformer.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "goal-batch-transformer.name" . }} + chart: {{ template "goal-batch-transformer.chart" . }} + release: {{ template "goal-batch-transformer.fullname" . }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "goal-batch-transformer.name" . }} + release: {{ template "goal-batch-transformer.fullname" . }} + template: + metadata: + labels: + app: {{ template "goal-batch-transformer.name" . }} + release: {{ template "goal-batch-transformer.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "goal-batch-transformer.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + - name: service-token-secret + secret: + secretName: {{ template "service-token-secret" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["transformer"] + env: + - name: BUCKETEER_GOAL_BATCH_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_GOAL_BATCH_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_GOAL_BATCH_PROJECT + value: "{{ .Values.env.project }}" + - name: BUCKETEER_GOAL_BATCH_FEATURE_SERVICE + value: "{{ .Values.env.featureService }}" + - name: BUCKETEER_GOAL_BATCH_USER_SERVICE + value: "{{ .Values.env.userService }}" + - name: BUCKETEER_GOAL_BATCH_GOAL_BATCH_TOPIC + value: "{{ .Values.env.goalBatchTopic }}" + - name: BUCKETEER_GOAL_BATCH_GOAL_BATCH_SUBSCRIPTION + value: "{{ .Values.env.goalBatchSubscription }}" + - name: BUCKETEER_GOAL_BATCH_GOAL_TOPIC + value: "{{ .Values.env.goalTopic }}" + - name: BUCKETEER_GOAL_BATCH_MAX_MPS + value: "{{ .Values.env.maxMps }}" + - name: BUCKETEER_GOAL_BATCH_NUM_WORKERS + value: "{{ .Values.env.numWorkers }}" + - name: BUCKETEER_GOAL_BATCH_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_GOAL_BATCH_KEY + value: /usr/local/certs/service/tls.key + - name: BUCKETEER_GOAL_BATCH_SERVICE_TOKEN + value: /usr/local/service-token/token + - name: BUCKETEER_GOAL_BATCH_PULLER_NUM_GOROUTINES + value: "{{ .Values.env.pullerNumGoroutines }}" + - name: BUCKETEER_GOAL_BATCH_PULLER_MAX_OUTSTANDING_MESSAGES + value: "{{ .Values.env.pullerMaxOutstandingMessages }}" + - name: BUCKETEER_GOAL_BATCH_PULLER_MAX_OUTSTANDING_BYTES + value: "{{ .Values.env.pullerMaxOutstandingBytes }}" + - name: BUCKETEER_GOAL_BATCH_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + - name: service-token-secret + mountPath: /usr/local/service-token + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/goal-batch-transformer/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/goal-batch-transformer/templates/envoy-configmap.yaml new file mode 100644 index 000000000..9ca40cb83 --- /dev/null +++ b/manifests/bucketeer/charts/goal-batch-transformer/templates/envoy-configmap.yaml @@ -0,0 +1,283 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "goal-batch-transformer.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "goal-batch-transformer.name" . }} + chart: {{ template "goal-batch-transformer.chart" . }} + release: {{ template "goal-batch-transformer.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: goal-batch-transformer + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: goal-batch-transformer + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + - name: feature + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: feature + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: feature.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + - name: user + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: user + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: user.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + goal-batch-transformer: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: / + route: + cluster: goal-batch-transformer + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: ingress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true + - name: egress + address: + socket_address: + address: 127.0.0.1 + port_value: 9001 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + feature: + value: 25 + user: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: egress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.feature.FeatureService + route: + cluster: feature + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.user.UserService + route: + cluster: user + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: egress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true diff --git a/manifests/bucketeer/charts/goal-batch-transformer/templates/hpa.yaml b/manifests/bucketeer/charts/goal-batch-transformer/templates/hpa.yaml new file mode 100644 index 000000000..80d0f5831 --- /dev/null +++ b/manifests/bucketeer/charts/goal-batch-transformer/templates/hpa.yaml @@ -0,0 +1,19 @@ +{{ if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "goal-batch-transformer.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "goal-batch-transformer.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.metrics.cpu.targetAverageUtilization }} +{{ end }} diff --git a/manifests/bucketeer/charts/goal-batch-transformer/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/goal-batch-transformer/templates/service-cert-secret.yaml new file mode 100644 index 000000000..20ef79171 --- /dev/null +++ b/manifests/bucketeer/charts/goal-batch-transformer/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "goal-batch-transformer.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "goal-batch-transformer.name" . }} + chart: {{ template "goal-batch-transformer.chart" . }} + release: {{ template "goal-batch-transformer.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/goal-batch-transformer/templates/service-token-secret.yaml b/manifests/bucketeer/charts/goal-batch-transformer/templates/service-token-secret.yaml new file mode 100644 index 000000000..b597ce591 --- /dev/null +++ b/manifests/bucketeer/charts/goal-batch-transformer/templates/service-token-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.serviceToken.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "goal-batch-transformer.fullname" . }}-service-token + namespace: {{ .Values.namespace }} + labels: + app: {{ template "goal-batch-transformer.name" . }} + chart: {{ template "goal-batch-transformer.chart" . }} + release: {{ template "goal-batch-transformer.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + token: {{ required "Service token is required" .Values.serviceToken.token | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/goal-batch-transformer/templates/service.yaml b/manifests/bucketeer/charts/goal-batch-transformer/templates/service.yaml new file mode 100644 index 000000000..7f244ca45 --- /dev/null +++ b/manifests/bucketeer/charts/goal-batch-transformer/templates/service.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "goal-batch-transformer.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "goal-batch-transformer.name" . }} + chart: {{ template "goal-batch-transformer.chart" . }} + release: {{ template "goal-batch-transformer.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "goal-batch-transformer.name" . }} + release: {{ template "goal-batch-transformer.fullname" . }} diff --git a/manifests/bucketeer/charts/goal-batch-transformer/values.yaml b/manifests/bucketeer/charts/goal-batch-transformer/values.yaml new file mode 100644 index 000000000..142e0264c --- /dev/null +++ b/manifests/bucketeer/charts/goal-batch-transformer/values.yaml @@ -0,0 +1,67 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-goal-batch + pullPolicy: IfNotPresent + +fullnameOverride: "goal-batch-transformer" + +namespace: + +env: + port: 9090 + metricsPort: 9002 + project: + featureService: localhost:9001 + userService: localhost:9001 + goalBatchTopic: + goalBatchSubscription: + goalTopic: + maxMps: 100 + numWorkers: 10 + pullerNumGoroutines: "5" + pullerMaxOutstandingMessages: "1000" + pullerMaxOutstandingBytes: "1000000000" + logLevel: info + +affinity: {} + +nodeSelector: + +hpa: + enabled: + minReplicas: + maxReplicas: + metrics: + cpu: + targetAverageUtilization: 60 + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +tls: + service: + secret: + cert: + key: + +serviceToken: + secret: + token: + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} diff --git a/manifests/bucketeer/charts/kafka/Chart.yaml b/manifests/bucketeer/charts/kafka/Chart.yaml new file mode 100644 index 000000000..c914cf329 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-kafka +name: kafka +version: 1.0.0 diff --git a/manifests/bucketeer/charts/kafka/Makefile b/manifests/bucketeer/charts/kafka/Makefile new file mode 100644 index 000000000..4761c35e8 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/Makefile @@ -0,0 +1,9 @@ +VERSION := $(shell helm dep list | grep strimzi-kafka-operator | awk '{ print $$2 }') + +.PHONY: upgrade-crd +upgrade-crd: .unzip-charts + kubectl replace -f .charts/strimzi-kafka-operator/crds + +.PHONY: .unzip-charts +.unzip-charts: + tar xvf charts/strimzi-kafka-operator-helm-3-chart-${VERSION}.tgz -C charts/ diff --git a/manifests/bucketeer/charts/kafka/charts/kafka-cluster/.helmignore b/manifests/bucketeer/charts/kafka/charts/kafka-cluster/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/kafka-cluster/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/manifests/bucketeer/charts/kafka/charts/kafka-cluster/Chart.yaml b/manifests/bucketeer/charts/kafka/charts/kafka-cluster/Chart.yaml new file mode 100644 index 000000000..225834c31 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/kafka-cluster/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: kafka-cluster +version: 0.1.0 diff --git a/manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/NOTES.txt b/manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/NOTES.txt new file mode 100644 index 000000000..d6df461c6 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/NOTES.txt @@ -0,0 +1,2 @@ +The Kafka-cluster has been installed. Check its status by running: + kubectl --namespace {{ .Release.Namespace }} get pods -l "app={{ template "kafka-cluster.name" . }},release={{ .Release.Name }}" diff --git a/manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/_helpers.tpl b/manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/_helpers.tpl new file mode 100644 index 000000000..a495c8c69 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "kafka-cluster.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kafka-cluster.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion value to use for the kafka-cluster managed k8s resources +*/}} +{{- define "kafka-cluster.apiVersion" -}} +{{- if lt .Values.image.tag "v0.12.0" }} +{{- printf "%s" "monitoring.coreos.com/v1alpha1" -}} +{{- else -}} +{{- printf "%s" "monitoring.coreos.com/v1" -}} +{{- end -}} +{{- end -}} diff --git a/manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/cluster.yaml b/manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/cluster.yaml new file mode 100644 index 000000000..f48f887e7 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/cluster.yaml @@ -0,0 +1,126 @@ +{{- if .Values.global.kafka.enabled }} +apiVersion: kafka.strimzi.io/v1beta2 +kind: Kafka +metadata: + name: {{ .Values.metadata.name }} + namespace: {{ .Values.namespace }} +spec: + kafka: + template: + pod: + metadata: + labels: + app: {{ template "kafka-cluster.name" . }} + release: {{ .Release.Name }} + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: {{ toYaml .Values.spec.kafka.affinity.nodeAffinity.matchExpressions | nindent 16 }} + {{- if .Values.spec.kafka.affinity.podAntiAffinity.enabled }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: {{ template "kafka-cluster.name" . }} + release: {{ .Release.Name }} + topologyKey: kubernetes.io/hostname + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + app: {{ template "kafka-cluster.name" . }} + release: {{ .Release.Name }} + topologyKey: failure-domain.beta.kubernetes.io/zone + {{- end }} + version: {{ .Values.spec.kafka.version }} + replicas: {{ .Values.spec.kafka.replicas }} + resources: {{ toYaml .Values.spec.kafka.resources | nindent 6 }} + jvmOptions: {{ toYaml .Values.spec.kafka.jvmOptions | nindent 6 }} + listeners: + - name: plain + port: 9092 + type: internal + tls: false + authentication: + type: scram-sha-512 + - name: external + port: 9094 + type: nodeport + tls: false + authentication: + type: scram-sha-512 + config: {{ toYaml .Values.spec.kafka.config | nindent 6 }} + storage: {{ toYaml .Values.spec.kafka.storage | nindent 6 }} + rack: {{ toYaml .Values.spec.kafka.rack | nindent 6 }} + metricsConfig: + type: jmxPrometheusExporter + valueFrom: + configMapKeyRef: + key: kafka-kafka-jmx-exporter-configuration.yaml + name: kafka-kafka-jmx-exporter-configuration + zookeeper: + template: + pod: + metadata: + labels: + app: {{ template "kafka-cluster.name" . }}-zookeeper + release: {{ .Release.Name }} + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: {{ toYaml .Values.spec.zookeeper.affinity.nodeAffinity.matchExpressions | nindent 16 }} + {{- if .Values.spec.zookeeper.affinity.podAntiAffinity.enabled }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: {{ template "kafka-cluster.name" . }}-zookeeper + release: {{ .Release.Name }} + topologyKey: kubernetes.io/hostname + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + app: {{ template "kafka-cluster.name" . }}-zookeeper + release: {{ .Release.Name }} + topologyKey: failure-domain.beta.kubernetes.io/zone + {{- end }} + replicas: {{ toYaml .Values.spec.zookeeper.replicas }} + resources: {{ toYaml .Values.spec.zookeeper.resources | nindent 6 }} + jvmOptions: {{ toYaml .Values.spec.zookeeper.jvmOptions | nindent 6 }} + storage: {{ toYaml .Values.spec.zookeeper.storage | nindent 6 }} + metricsConfig: + type: jmxPrometheusExporter + valueFrom: + configMapKeyRef: + key: kafka-zookeeper-jmx-exporter-configuration.yaml + name: kafka-zookeeper-jmx-exporter-configuration + entityOperator: + template: + pod: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: {{ toYaml .Values.spec.entityOperator.affinity.nodeAffinity.matchExpressions | nindent 16 }} + userOperator: + resources: {{ toYaml .Values.spec.entityOperator.userOperator.resources | nindent 8 }} + kafkaExporter: + topicRegex: ".*" + groupRegex: ".*" + template: + pod: + metadata: + labels: + kafka: metrics + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: {{ toYaml .Values.spec.kafkaExporter.affinity.nodeAffinity.matchExpressions | nindent 16 }} + resources: {{ toYaml .Values.spec.kafkaExporter.resources | nindent 6 }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/kafkaconfigmap.yaml b/manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/kafkaconfigmap.yaml new file mode 100644 index 000000000..db55946ab --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/kafkaconfigmap.yaml @@ -0,0 +1,10 @@ +{{- if .Values.global.kafka.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: kafka-kafka-jmx-exporter-configuration + namespace: {{ .Values.namespace }} +data: + kafka-kafka-jmx-exporter-configuration.yaml: | +{{ toYaml .Values.spec.kafka.metrics | indent 4 }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/topic.yaml b/manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/topic.yaml new file mode 100644 index 000000000..6e8fd3707 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/topic.yaml @@ -0,0 +1,20 @@ +{{- if .Values.global.kafka.enabled }} +{{- if .Values.topics }} +{{- $root := . -}} +{{- range $index, $topic := .Values.topics }} +--- +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaTopic +metadata: + name: {{ $topic.name }} + namespace: {{ $root.Values.namespace }} + labels: + strimzi.io/cluster: {{ $.Values.metadata.name }} +spec: + topicName : {{ $topic.spec.topicName }} + partitions: {{ $topic.spec.partitions }} + replicas: {{ $topic.spec.replicas }} + config: {{ toYaml $topic.spec.config | nindent 4 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/user.yaml b/manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/user.yaml new file mode 100644 index 000000000..80de7daa7 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/user.yaml @@ -0,0 +1,26 @@ +{{- if .Values.global.kafka.enabled }} +{{- if .Values.users }} +{{- $root := . -}} +{{- range $index, $user := .Values.users }} +--- +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaUser +metadata: + name: {{ $user.name }} + namespace: {{ $root.Values.namespace }} + labels: + strimzi.io/cluster: {{ $.Values.metadata.name }} +spec: + authentication: + type: scram-sha-512 + password: + valueFrom: + secretKeyRef: + name: {{ $user.name }} + key: password + authorization: + type: simple + acls: {{ toYaml $user.spec.authorization.acls | nindent 6 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/usersecret.yaml b/manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/usersecret.yaml new file mode 100644 index 000000000..8809ecf4b --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/usersecret.yaml @@ -0,0 +1,16 @@ +{{- if .Values.global.kafka.enabled }} +{{- if .Values.users }} +{{- $root := . -}} +{{- range $index, $user := .Values.users }} +--- +kind: Secret +apiVersion: v1 +metadata: + name: {{ $user.name }} + namespace: {{ $root.Values.namespace }} +type: Opaque +data: + password: {{ $user.password }} +{{- end }} +{{- end }} +{{- end }} diff --git a/manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/zookeeperconfigmap.yaml b/manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/zookeeperconfigmap.yaml new file mode 100644 index 000000000..d71bebb43 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/kafka-cluster/templates/zookeeperconfigmap.yaml @@ -0,0 +1,10 @@ +{{- if .Values.global.kafka.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: kafka-zookeeper-jmx-exporter-configuration + namespace: {{ .Values.namespace }} +data: + kafka-zookeeper-jmx-exporter-configuration.yaml: | +{{ toYaml .Values.spec.zookeeper.metrics | indent 4 }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/kafka/charts/kafka-cluster/values.yaml b/manifests/bucketeer/charts/kafka/charts/kafka-cluster/values.yaml new file mode 100644 index 000000000..cc0580028 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/kafka-cluster/values.yaml @@ -0,0 +1,202 @@ +namespace: +metadata: + name: kafka +spec: + kafka: + version: + replicas: + resources: {} + jvmOptions: {} + config: + auto.create.topics.enable: "false" + offsets.topic.replication.factor: 3 + transaction.state.log.replication.factor: 3 + transaction.state.log.min.isr: 2 + log.retention.hours: 48 + storage: {} + rack: + # This will be deprecated from 0.17.0. Instead, Use topology.kubernetes.io/zone. + topologyKey: failure-domain.beta.kubernetes.io/zone + affinity: + nodeAffinity: + matchExpressions: + - key: "pool-type" + operator: In + values: ["kafka"] + podAntiAffinity: + enabled: true + metrics: + # Inspired by config from Kafka 2.0.0 example rules: + # https://github.com/prometheus/jmx_exporter/blob/master/example_configs/kafka-2_0_0.yml + lowercaseOutputName: true + rules: + # Special cases and very specific rules + - pattern : kafka.server<>Value + name: kafka_server_$1_$2 + type: GAUGE + labels: + clientId: "$3" + topic: "$4" + partition: "$5" + - pattern : kafka.server<>Value + name: kafka_server_$1_$2 + type: GAUGE + labels: + clientId: "$3" + broker: "$4:$5" + # Some percent metrics use MeanRate attribute + # Ex) kafka.server<>MeanRate + - pattern: kafka.(\w+)<>MeanRate + name: kafka_$1_$2_$3_percent + type: GAUGE + # Generic gauges for percents + - pattern: kafka.(\w+)<>Value + name: kafka_$1_$2_$3_percent + type: GAUGE + - pattern: kafka.(\w+)<>Value + name: kafka_$1_$2_$3_percent + type: GAUGE + labels: + "$4": "$5" + # Generic per-second counters with 0-2 key/value pairs + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_total + type: COUNTER + labels: + "$4": "$5" + "$6": "$7" + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_total + type: COUNTER + labels: + "$4": "$5" + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_total + type: COUNTER + # Generic gauges with 0-2 key/value pairs + - pattern: kafka.(\w+)<>Value + name: kafka_$1_$2_$3 + type: GAUGE + labels: + "$4": "$5" + "$6": "$7" + - pattern: kafka.(\w+)<>Value + name: kafka_$1_$2_$3 + type: GAUGE + labels: + "$4": "$5" + - pattern: kafka.(\w+)<>Value + name: kafka_$1_$2_$3 + type: GAUGE + # Emulate Prometheus 'Summary' metrics for the exported 'Histogram's. + # Note that these are missing the '_sum' metric! + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_count + type: COUNTER + labels: + "$4": "$5" + "$6": "$7" + - pattern: kafka.(\w+)<>(\d+)thPercentile + name: kafka_$1_$2_$3 + type: GAUGE + labels: + "$4": "$5" + "$6": "$7" + quantile: "0.$8" + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_count + type: COUNTER + labels: + "$4": "$5" + - pattern: kafka.(\w+)<>(\d+)thPercentile + name: kafka_$1_$2_$3 + type: GAUGE + labels: + "$4": "$5" + quantile: "0.$6" + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_count + type: COUNTER + - pattern: kafka.(\w+)<>(\d+)thPercentile + name: kafka_$1_$2_$3 + type: GAUGE + labels: + quantile: "0.$4" + zookeeper: + replicas: + resources: {} + affinity: + nodeAffinity: + matchExpressions: + - key: "pool-type" + operator: In + values: ["kafka"] + podAntiAffinity: + enabled: true + jvmOptions: {} + storage: {} + metrics: + # Inspired by Zookeeper rules + # https://github.com/prometheus/jmx_exporter/blob/master/example_configs/zookeeper.yaml + lowercaseOutputName: true + rules: + # replicated Zookeeper + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$2" + type: GAUGE + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$3" + type: GAUGE + labels: + replicaId: "$2" + - pattern: "org.apache.ZooKeeperService<>(Packets\\w+)" + name: "zookeeper_$4" + type: COUNTER + labels: + replicaId: "$2" + memberType: "$3" + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$4" + type: GAUGE + labels: + replicaId: "$2" + memberType: "$3" + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$4_$5" + type: GAUGE + labels: + replicaId: "$2" + memberType: "$3" + # standalone Zookeeper + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + type: GAUGE + name: "zookeeper_$2" + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + type: GAUGE + name: "zookeeper_$2" + entityOperator: + affinity: + nodeAffinity: + matchExpressions: + - key: "pool-type" + operator: In + values: ["kafka"] + userOperator: + resources: + requests: + cpu: 50m + memory: 216Mi + limits: + cpu: 300m + memory: 216Mi + kafkaExporter: + affinity: + nodeAffinity: + matchExpressions: + - key: "pool-type" + operator: In + values: ["kafka"] + resources: {} + +users: {} +topics: {} diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/.helmignore b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/Chart.yaml b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/Chart.yaml new file mode 100644 index 000000000..1f21bf6a6 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +appVersion: 0.27.1 +description: 'Strimzi: Apache Kafka running on Kubernetes' +home: https://strimzi.io/ +icon: https://raw.githubusercontent.com/strimzi/strimzi-kafka-operator/main/documentation/logo/strimzi_logo.png +keywords: +- kafka +- queue +- stream +- event +- messaging +- datastore +- topic +maintainers: +- name: Frawless +- name: ppatierno +- name: samuel-hawker +- name: scholzj +- name: tombentley +- name: sknot-rh +name: strimzi-kafka-operator +sources: +- https://github.com/strimzi/strimzi-kafka-operator +version: 0.27.1 diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/OWNERS b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/OWNERS new file mode 100644 index 000000000..58c4c840f --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/OWNERS @@ -0,0 +1,14 @@ +approvers: +- ppatierno +- scholzj +- tombentley +- samuel-hawker +- Frawless +- sknot-rh +reviewers: +- ppatierno +- scholzj +- tombentley +- samuel-hawker +- Frawless +- sknot-rh \ No newline at end of file diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/README.md b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/README.md new file mode 100644 index 000000000..83bfe6de4 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/README.md @@ -0,0 +1,4 @@ +# Strimzi: Apache Kafka on Kubernetes + +This chart is the custom version of [strimzi/strimzi-kafka-operator](https://github.com/strimzi/strimzi-kafka-operator/tree/main/helm-charts/helm3/strimzi-kafka-operator). +The version is written in [here](../../requirements.yaml). diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/040-Crd-kafka.yaml b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/040-Crd-kafka.yaml new file mode 100644 index 000000000..91de5b484 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/040-Crd-kafka.yaml @@ -0,0 +1,5955 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: kafkas.kafka.strimzi.io + labels: + app: strimzi + strimzi.io/crd-install: "true" + component: kafkas.kafka.strimzi.io-crd +spec: + group: kafka.strimzi.io + names: + kind: Kafka + listKind: KafkaList + singular: kafka + plural: kafkas + shortNames: + - k + categories: + - strimzi + scope: Namespaced + conversion: + strategy: None + versions: + - name: v1beta2 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - name: Desired Kafka replicas + description: The desired number of Kafka replicas in the cluster + jsonPath: .spec.kafka.replicas + type: integer + - name: Desired ZK replicas + description: The desired number of ZooKeeper replicas in the cluster + jsonPath: .spec.zookeeper.replicas + type: integer + - name: Ready + description: The state of the custom resource + jsonPath: .status.conditions[?(@.type=="Ready")].status + type: string + - name: Warnings + description: Warnings related to the custom resource + jsonPath: .status.conditions[?(@.type=="Warning")].status + type: string + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + kafka: + type: object + properties: + version: + type: string + description: The kafka broker version. Defaults to {DefaultKafkaVersion}. Consult the user documentation to understand the process required to upgrade or downgrade the version. + replicas: + type: integer + minimum: 1 + description: The number of pods in the cluster. + image: + type: string + description: The docker image for the pods. The default value depends on the configured `Kafka.spec.kafka.version`. + listeners: + type: array + minItems: 1 + items: + type: object + properties: + name: + type: string + pattern: ^[a-z0-9]{1,11}$ + description: Name of the listener. The name will be used to identify the listener and the related Kubernetes objects. The name has to be unique within given a Kafka cluster. The name can consist of lowercase characters and numbers and be up to 11 characters long. + port: + type: integer + minimum: 9092 + description: Port number used by the listener inside Kafka. The port number has to be unique within a given Kafka cluster. Allowed port numbers are 9092 and higher with the exception of ports 9404 and 9999, which are already used for Prometheus and JMX. Depending on the listener type, the port number might not be the same as the port number that connects Kafka clients. + type: + type: string + enum: + - internal + - route + - loadbalancer + - nodeport + - ingress + description: "Type of the listener. Currently the supported types are `internal`, `route`, `loadbalancer`, `nodeport` and `ingress`. \n\n* `internal` type exposes Kafka internally only within the Kubernetes cluster.\n* `route` type uses OpenShift Routes to expose Kafka.\n* `loadbalancer` type uses LoadBalancer type services to expose Kafka.\n* `nodeport` type uses NodePort type services to expose Kafka.\n* `ingress` type uses Kubernetes Nginx Ingress to expose Kafka.\n" + tls: + type: boolean + description: Enables TLS encryption on the listener. This is a required property. + authentication: + type: object + properties: + accessTokenIsJwt: + type: boolean + description: Configure whether the access token is treated as JWT. This must be set to `false` if the authorization server returns opaque tokens. Defaults to `true`. + checkAccessTokenType: + type: boolean + description: Configure whether the access token type check is performed or not. This should be set to `false` if the authorization server does not include 'typ' claim in JWT token. Defaults to `true`. + checkAudience: + type: boolean + description: Enable or disable audience checking. Audience checks identify the recipients of tokens. If audience checking is enabled, the OAuth Client ID also has to be configured using the `clientId` property. The Kafka broker will reject tokens that do not have its `clientId` in their `aud` (audience) claim.Default value is `false`. + checkIssuer: + type: boolean + description: Enable or disable issuer checking. By default issuer is checked using the value configured by `validIssuerUri`. Default value is `true`. + clientAudience: + type: string + description: The audience to use when making requests to the authorization server's token endpoint. Used for inter-broker authentication and for configuring OAuth 2.0 over PLAIN using the `clientId` and `secret` method. + clientId: + type: string + description: OAuth Client ID which the Kafka broker can use to authenticate against the authorization server and use the introspect endpoint URI. + clientScope: + type: string + description: The scope to use when making requests to the authorization server's token endpoint. Used for inter-broker authentication and for configuring OAuth 2.0 over PLAIN using the `clientId` and `secret` method. + clientSecret: + type: object + properties: + key: + type: string + description: The key under which the secret value is stored in the Kubernetes Secret. + secretName: + type: string + description: The name of the Kubernetes Secret containing the secret value. + required: + - key + - secretName + description: Link to Kubernetes Secret containing the OAuth client secret which the Kafka broker can use to authenticate against the authorization server and use the introspect endpoint URI. + customClaimCheck: + type: string + description: JsonPath filter query to be applied to the JWT token or to the response of the introspection endpoint for additional token validation. Not set by default. + disableTlsHostnameVerification: + type: boolean + description: Enable or disable TLS hostname verification. Default value is `false`. + enableECDSA: + type: boolean + description: Enable or disable ECDSA support by installing BouncyCastle crypto provider. ECDSA support is always enabled. The BouncyCastle libraries are no longer packaged with Strimzi. Value is ignored. + enableOauthBearer: + type: boolean + description: Enable or disable OAuth authentication over SASL_OAUTHBEARER. Default value is `true`. + enablePlain: + type: boolean + description: Enable or disable OAuth authentication over SASL_PLAIN. There is no re-authentication support when this mechanism is used. Default value is `false`. + fallbackUserNameClaim: + type: string + description: The fallback username claim to be used for the user id if the claim specified by `userNameClaim` is not present. This is useful when `client_credentials` authentication only results in the client id being provided in another claim. It only takes effect if `userNameClaim` is set. + fallbackUserNamePrefix: + type: string + description: The prefix to use with the value of `fallbackUserNameClaim` to construct the user id. This only takes effect if `fallbackUserNameClaim` is true, and the value is present for the claim. Mapping usernames and client ids into the same user id space is useful in preventing name collisions. + introspectionEndpointUri: + type: string + description: URI of the token introspection endpoint which can be used to validate opaque non-JWT tokens. + jwksEndpointUri: + type: string + description: URI of the JWKS certificate endpoint, which can be used for local JWT validation. + jwksExpirySeconds: + type: integer + minimum: 1 + description: Configures how often are the JWKS certificates considered valid. The expiry interval has to be at least 60 seconds longer then the refresh interval specified in `jwksRefreshSeconds`. Defaults to 360 seconds. + jwksMinRefreshPauseSeconds: + type: integer + minimum: 0 + description: The minimum pause between two consecutive refreshes. When an unknown signing key is encountered the refresh is scheduled immediately, but will always wait for this minimum pause. Defaults to 1 second. + jwksRefreshSeconds: + type: integer + minimum: 1 + description: Configures how often are the JWKS certificates refreshed. The refresh interval has to be at least 60 seconds shorter then the expiry interval specified in `jwksExpirySeconds`. Defaults to 300 seconds. + maxSecondsWithoutReauthentication: + type: integer + description: Maximum number of seconds the authenticated session remains valid without re-authentication. This enables Apache Kafka re-authentication feature, and causes sessions to expire when the access token expires. If the access token expires before max time or if max time is reached, the client has to re-authenticate, otherwise the server will drop the connection. Not set by default - the authenticated session does not expire when the access token expires. This option only applies to SASL_OAUTHBEARER authentication mechanism (when `enableOauthBearer` is `true`). + tlsTrustedCertificates: + type: array + items: + type: object + properties: + certificate: + type: string + description: The name of the file certificate in the Secret. + secretName: + type: string + description: The name of the Secret containing the certificate. + required: + - certificate + - secretName + description: Trusted certificates for TLS connection to the OAuth server. + tokenEndpointUri: + type: string + description: URI of the Token Endpoint to use with SASL_PLAIN mechanism when the client authenticates with `clientId` and a `secret`. If set, the client can authenticate over SASL_PLAIN by either setting `username` to `clientId`, and setting `password` to client `secret`, or by setting `username` to account username, and `password` to access token prefixed with `$accessToken:`. If this option is not set, the `password` is always interpreted as an access token (without a prefix), and `username` as the account username (a so called 'no-client-credentials' mode). + type: + type: string + enum: + - tls + - scram-sha-512 + - oauth + description: Authentication type. `oauth` type uses SASL OAUTHBEARER Authentication. `scram-sha-512` type uses SASL SCRAM-SHA-512 Authentication. `tls` type uses TLS Client Authentication. `tls` type is supported only on TLS listeners. + userInfoEndpointUri: + type: string + description: 'URI of the User Info Endpoint to use as a fallback to obtaining the user id when the Introspection Endpoint does not return information that can be used for the user id. ' + userNameClaim: + type: string + description: Name of the claim from the JWT authentication token, Introspection Endpoint response or User Info Endpoint response which will be used to extract the user id. Defaults to `sub`. + validIssuerUri: + type: string + description: URI of the token issuer used for authentication. + validTokenType: + type: string + description: Valid value for the `token_type` attribute returned by the Introspection Endpoint. No default value, and not checked by default. + required: + - type + description: Authentication configuration for this listener. + configuration: + type: object + properties: + brokerCertChainAndKey: + type: object + properties: + certificate: + type: string + description: The name of the file certificate in the Secret. + key: + type: string + description: The name of the private key in the Secret. + secretName: + type: string + description: The name of the Secret containing the certificate. + required: + - certificate + - key + - secretName + description: Reference to the `Secret` which holds the certificate and private key pair which will be used for this listener. The certificate can optionally contain the whole chain. This field can be used only with listeners with enabled TLS encryption. + externalTrafficPolicy: + type: string + enum: + - Local + - Cluster + description: Specifies whether the service routes external traffic to node-local or cluster-wide endpoints. `Cluster` may cause a second hop to another node and obscures the client source IP. `Local` avoids a second hop for LoadBalancer and Nodeport type services and preserves the client source IP (when supported by the infrastructure). If unspecified, Kubernetes will use `Cluster` as the default.This field can be used only with `loadbalancer` or `nodeport` type listener. + loadBalancerSourceRanges: + type: array + items: + type: string + description: A list of CIDR ranges (for example `10.0.0.0/8` or `130.211.204.1/32`) from which clients can connect to load balancer type listeners. If supported by the platform, traffic through the loadbalancer is restricted to the specified CIDR ranges. This field is applicable only for loadbalancer type services and is ignored if the cloud provider does not support the feature. For more information, see https://v1-17.docs.kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/. This field can be used only with `loadbalancer` type listener. + bootstrap: + type: object + properties: + alternativeNames: + type: array + items: + type: string + description: Additional alternative names for the bootstrap service. The alternative names will be added to the list of subject alternative names of the TLS certificates. + host: + type: string + description: The bootstrap host. This field will be used in the Ingress resource or in the Route resource to specify the desired hostname. This field can be used only with `route` (optional) or `ingress` (required) type listeners. + nodePort: + type: integer + description: Node port for the bootstrap service. This field can be used only with `nodeport` type listener. + loadBalancerIP: + type: string + description: The loadbalancer is requested with the IP address specified in this field. This feature depends on whether the underlying cloud provider supports specifying the `loadBalancerIP` when a load balancer is created. This field is ignored if the cloud provider does not support the feature.This field can be used only with `loadbalancer` type listener. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations that will be added to the `Ingress`, `Route`, or `Service` resource. You can use this field to configure DNS providers such as External DNS. This field can be used only with `loadbalancer`, `nodeport`, `route`, or `ingress` type listeners. + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels that will be added to the `Ingress`, `Route`, or `Service` resource. This field can be used only with `loadbalancer`, `nodeport`, `route`, or `ingress` type listeners. + description: Bootstrap configuration. + brokers: + type: array + items: + type: object + properties: + broker: + type: integer + description: ID of the kafka broker (broker identifier). Broker IDs start from 0 and correspond to the number of broker replicas. + advertisedHost: + type: string + description: The host name which will be used in the brokers' `advertised.brokers`. + advertisedPort: + type: integer + description: The port number which will be used in the brokers' `advertised.brokers`. + host: + type: string + description: The broker host. This field will be used in the Ingress resource or in the Route resource to specify the desired hostname. This field can be used only with `route` (optional) or `ingress` (required) type listeners. + nodePort: + type: integer + description: Node port for the per-broker service. This field can be used only with `nodeport` type listener. + loadBalancerIP: + type: string + description: The loadbalancer is requested with the IP address specified in this field. This feature depends on whether the underlying cloud provider supports specifying the `loadBalancerIP` when a load balancer is created. This field is ignored if the cloud provider does not support the feature.This field can be used only with `loadbalancer` type listener. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations that will be added to the `Ingress` or `Service` resource. You can use this field to configure DNS providers such as External DNS. This field can be used only with `loadbalancer`, `nodeport`, or `ingress` type listeners. + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels that will be added to the `Ingress`, `Route`, or `Service` resource. This field can be used only with `loadbalancer`, `nodeport`, `route`, or `ingress` type listeners. + required: + - broker + description: Per-broker configurations. + ipFamilyPolicy: + type: string + enum: + - SingleStack + - PreferDualStack + - RequireDualStack + description: Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`. `SingleStack` is for a single IP family. `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters. `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters. If unspecified, Kubernetes will choose the default value based on the service type. Available on Kubernetes 1.20 and newer. + ipFamilies: + type: array + items: + type: string + enum: + - IPv4 + - IPv6 + description: Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting. Available on Kubernetes 1.20 and newer. + class: + type: string + description: Configures the `Ingress` class that defines which `Ingress` controller will be used. This field can be used only with `ingress` type listener. If not specified, the default Ingress controller will be used. + finalizers: + type: array + items: + type: string + description: A list of finalizers which will be configured for the `LoadBalancer` type Services created for this listener. If supported by the platform, the finalizer `service.kubernetes.io/load-balancer-cleanup` to make sure that the external load balancer is deleted together with the service.For more information, see https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#garbage-collecting-load-balancers. This field can be used only with `loadbalancer` type listeners. + maxConnectionCreationRate: + type: integer + description: The maximum connection creation rate we allow in this listener at any time. New connections will be throttled if the limit is reached. + maxConnections: + type: integer + description: The maximum number of connections we allow for this listener in the broker at any time. New connections are blocked if the limit is reached. + preferredNodePortAddressType: + type: string + enum: + - ExternalIP + - ExternalDNS + - InternalIP + - InternalDNS + - Hostname + description: |- + Defines which address type should be used as the node address. Available types are: `ExternalDNS`, `ExternalIP`, `InternalDNS`, `InternalIP` and `Hostname`. By default, the addresses will be used in the following order (the first one found will be used): + + * `ExternalDNS` + * `ExternalIP` + * `InternalDNS` + * `InternalIP` + * `Hostname` + + This field is used to select the preferred address type, which is checked first. If no address is found for this address type, the other types are checked in the default order. This field can only be used with `nodeport` type listener. + useServiceDnsDomain: + type: boolean + description: Configures whether the Kubernetes service DNS domain should be used or not. If set to `true`, the generated addresses will contain the service DNS domain suffix (by default `.cluster.local`, can be configured using environment variable `KUBERNETES_SERVICE_DNS_DOMAIN`). Defaults to `false`.This field can be used only with `internal` type listener. + description: Additional listener configuration. + networkPolicyPeers: + type: array + items: + type: object + properties: + ipBlock: + type: object + properties: + cidr: + type: string + except: + type: array + items: + type: string + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + podSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: List of peers which should be able to connect to this listener. Peers in this list are combined using a logical OR operation. If this field is empty or missing, all connections will be allowed for this listener. If this field is present and contains at least one item, the listener only allows the traffic which matches at least one item in this list. + required: + - name + - port + - type + - tls + description: Configures listeners of Kafka brokers. + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: 'Kafka broker config properties with the following prefixes cannot be set: listeners, advertised., broker., listener., host.name, port, inter.broker.listener.name, sasl., ssl., security., password., principal.builder.class, log.dir, zookeeper.connect, zookeeper.set.acl, zookeeper.ssl, zookeeper.clientCnxnSocket, authorizer., super.user, cruise.control.metrics.topic, cruise.control.metrics.reporter.bootstrap.servers (with the exception of: zookeeper.connection.timeout.ms, ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols,cruise.control.metrics.topic.num.partitions, cruise.control.metrics.topic.replication.factor, cruise.control.metrics.topic.retention.ms,cruise.control.metrics.topic.auto.create.retries, cruise.control.metrics.topic.auto.create.timeout.ms,cruise.control.metrics.topic.min.insync.replicas).' + storage: + type: object + properties: + class: + type: string + description: The storage class to use for dynamic volume allocation. + deleteClaim: + type: boolean + description: Specifies if the persistent volume claim has to be deleted when the cluster is un-deployed. + id: + type: integer + minimum: 0 + description: Storage identification number. It is mandatory only for storage volumes defined in a storage of type 'jbod'. + overrides: + type: array + items: + type: object + properties: + class: + type: string + description: The storage class to use for dynamic volume allocation for this broker. + broker: + type: integer + description: Id of the kafka broker (broker identifier). + description: Overrides for individual brokers. The `overrides` field allows to specify a different configuration for different brokers. + selector: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Specifies a specific persistent volume to use. It contains key:value pairs representing labels for selecting such a volume. + size: + type: string + description: When type=persistent-claim, defines the size of the persistent volume claim (i.e 1Gi). Mandatory when type=persistent-claim. + sizeLimit: + type: string + pattern: ^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ + description: When type=ephemeral, defines the total amount of local storage required for this EmptyDir volume (for example 1Gi). + type: + type: string + enum: + - ephemeral + - persistent-claim + - jbod + description: Storage type, must be either 'ephemeral', 'persistent-claim', or 'jbod'. + volumes: + type: array + items: + type: object + properties: + class: + type: string + description: The storage class to use for dynamic volume allocation. + deleteClaim: + type: boolean + description: Specifies if the persistent volume claim has to be deleted when the cluster is un-deployed. + id: + type: integer + minimum: 0 + description: Storage identification number. It is mandatory only for storage volumes defined in a storage of type 'jbod'. + overrides: + type: array + items: + type: object + properties: + class: + type: string + description: The storage class to use for dynamic volume allocation for this broker. + broker: + type: integer + description: Id of the kafka broker (broker identifier). + description: Overrides for individual brokers. The `overrides` field allows to specify a different configuration for different brokers. + selector: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Specifies a specific persistent volume to use. It contains key:value pairs representing labels for selecting such a volume. + size: + type: string + description: When type=persistent-claim, defines the size of the persistent volume claim (i.e 1Gi). Mandatory when type=persistent-claim. + sizeLimit: + type: string + pattern: ^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ + description: When type=ephemeral, defines the total amount of local storage required for this EmptyDir volume (for example 1Gi). + type: + type: string + enum: + - ephemeral + - persistent-claim + description: Storage type, must be either 'ephemeral' or 'persistent-claim'. + required: + - type + description: List of volumes as Storage objects representing the JBOD disks array. + required: + - type + description: Storage configuration (disk). Cannot be updated. + authorization: + type: object + properties: + allowOnError: + type: boolean + description: Defines whether a Kafka client should be allowed or denied by default when the authorizer fails to query the Open Policy Agent, for example, when it is temporarily unavailable). Defaults to `false` - all actions will be denied. + authorizerClass: + type: string + description: Authorization implementation class, which must be available in classpath. + clientId: + type: string + description: OAuth Client ID which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI. + delegateToKafkaAcls: + type: boolean + description: Whether authorization decision should be delegated to the 'Simple' authorizer if DENIED by Keycloak Authorization Services policies. Default value is `false`. + disableTlsHostnameVerification: + type: boolean + description: Enable or disable TLS hostname verification. Default value is `false`. + expireAfterMs: + type: integer + description: The expiration of the records kept in the local cache to avoid querying the Open Policy Agent for every request. Defines how often the cached authorization decisions are reloaded from the Open Policy Agent server. In milliseconds. Defaults to `3600000`. + grantsRefreshPeriodSeconds: + type: integer + minimum: 0 + description: The time between two consecutive grants refresh runs in seconds. The default value is 60. + grantsRefreshPoolSize: + type: integer + minimum: 1 + description: The number of threads to use to refresh grants for active sessions. The more threads, the more parallelism, so the sooner the job completes. However, using more threads places a heavier load on the authorization server. The default value is 5. + initialCacheCapacity: + type: integer + description: Initial capacity of the local cache used by the authorizer to avoid querying the Open Policy Agent for every request Defaults to `5000`. + maximumCacheSize: + type: integer + description: Maximum capacity of the local cache used by the authorizer to avoid querying the Open Policy Agent for every request. Defaults to `50000`. + superUsers: + type: array + items: + type: string + description: List of super users, which are user principals with unlimited access rights. + supportsAdminApi: + type: boolean + description: Indicates whether the custom authorizer supports the APIs for managing ACLs using the Kafka Admin API. Defaults to `false`. + tlsTrustedCertificates: + type: array + items: + type: object + properties: + certificate: + type: string + description: The name of the file certificate in the Secret. + secretName: + type: string + description: The name of the Secret containing the certificate. + required: + - certificate + - secretName + description: Trusted certificates for TLS connection to the OAuth server. + tokenEndpointUri: + type: string + description: Authorization server token endpoint URI. + type: + type: string + enum: + - simple + - opa + - keycloak + - custom + description: Authorization type. Currently, the supported types are `simple`, `keycloak`, `opa` and `custom`. `simple` authorization type uses Kafka's `kafka.security.authorizer.AclAuthorizer` class for authorization. `keycloak` authorization type uses Keycloak Authorization Services for authorization. `opa` authorization type uses Open Policy Agent based authorization.`custom` authorization type uses user-provided implementation for authorization. + url: + type: string + example: http://opa:8181/v1/data/kafka/authz/allow + description: The URL used to connect to the Open Policy Agent server. The URL has to include the policy which will be queried by the authorizer. This option is required. + required: + - type + description: Authorization configuration for Kafka brokers. + rack: + type: object + properties: + topologyKey: + type: string + example: topology.kubernetes.io/zone + description: A key that matches labels assigned to the Kubernetes cluster nodes. The value of the label is used to set the broker's `broker.rack` config and `client.rack` in Kafka Connect. + required: + - topologyKey + description: Configuration of the `broker.rack` broker config. + brokerRackInitImage: + type: string + description: The image of the init container used for initializing the `broker.rack`. + livenessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1. + description: Pod liveness checking. + readinessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1. + description: Pod readiness checking. + jvmOptions: + type: object + properties: + "-XX": + x-kubernetes-preserve-unknown-fields: true + type: object + description: A map of -XX options to the JVM. + "-Xms": + type: string + pattern: ^[0-9]+[mMgG]?$ + description: -Xms option to to the JVM. + "-Xmx": + type: string + pattern: ^[0-9]+[mMgG]?$ + description: -Xmx option to to the JVM. + gcLoggingEnabled: + type: boolean + description: Specifies whether the Garbage Collection logging is enabled. The default is false. + javaSystemProperties: + type: array + items: + type: object + properties: + name: + type: string + description: The system property name. + value: + type: string + description: The system property value. + description: A map of additional system properties which will be passed using the `-D` option to the JVM. + description: JVM Options for pods. + jmxOptions: + type: object + properties: + authentication: + type: object + properties: + type: + type: string + enum: + - password + description: Authentication type. Currently the only supported types are `password`.`password` type creates a username and protected port with no TLS. + required: + - type + description: Authentication configuration for connecting to the JMX port. + description: JMX Options for Kafka brokers. + resources: + type: object + properties: + limits: + x-kubernetes-preserve-unknown-fields: true + type: object + requests: + x-kubernetes-preserve-unknown-fields: true + type: object + description: CPU and memory resources to reserve. + metricsConfig: + type: object + properties: + type: + type: string + enum: + - jmxPrometheusExporter + description: Metrics type. Only 'jmxPrometheusExporter' supported currently. + valueFrom: + type: object + properties: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: Reference to the key in the ConfigMap containing the configuration. + description: ConfigMap entry where the Prometheus JMX Exporter configuration is stored. For details of the structure of this configuration, see the {JMXExporter}. + required: + - type + - valueFrom + description: Metrics configuration. + logging: + type: object + properties: + loggers: + x-kubernetes-preserve-unknown-fields: true + type: object + description: A Map from logger name to logger level. + type: + type: string + enum: + - inline + - external + description: Logging type, must be either 'inline' or 'external'. + valueFrom: + type: object + properties: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: Reference to the key in the ConfigMap containing the configuration. + description: '`ConfigMap` entry where the logging configuration is stored. ' + required: + - type + description: Logging configuration for Kafka. + template: + type: object + properties: + statefulset: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + podManagementPolicy: + type: string + enum: + - OrderedReady + - Parallel + description: PodManagementPolicy which will be used for this StatefulSet. Valid values are `Parallel` and `OrderedReady`. Defaults to `Parallel`. + description: Template for Kafka `StatefulSet`. + pod: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + imagePullSecrets: + type: array + items: + type: object + properties: + name: + type: string + description: List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored. + securityContext: + type: object + properties: + fsGroup: + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + supplementalGroups: + type: array + items: + type: integer + sysctls: + type: array + items: + type: object + properties: + name: + type: string + value: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Configures pod-level security attributes and common container settings. + terminationGracePeriodSeconds: + type: integer + minimum: 0 + description: The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds. + affinity: + type: object + properties: + nodeAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + preference: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: object + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + podAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + podAntiAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + description: The pod's affinity rules. + tolerations: + type: array + items: + type: object + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + type: integer + value: + type: string + description: The pod's tolerations. + priorityClassName: + type: string + description: The name of the priority class used to assign priority to the pods. For more information about priority classes, see {K8sPriorityClass}. + schedulerName: + type: string + description: The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used. + hostAliases: + type: array + items: + type: object + properties: + hostnames: + type: array + items: + type: string + ip: + type: string + description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified. + tmpDirSizeLimit: + type: string + pattern: ^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ + description: Defines the total amount (for example `1Gi`) of local storage required for temporary EmptyDir volume (`/tmp`). Default value is `1Mi`. + enableServiceLinks: + type: boolean + description: Indicates whether information about services should be injected into Pod's environment variables. + topologySpreadConstraints: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + maxSkew: + type: integer + topologyKey: + type: string + whenUnsatisfiable: + type: string + description: The pod's topology spread constraints. + description: Template for Kafka `Pods`. + bootstrapService: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + ipFamilyPolicy: + type: string + enum: + - SingleStack + - PreferDualStack + - RequireDualStack + description: Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`. `SingleStack` is for a single IP family. `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters. `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters. If unspecified, Kubernetes will choose the default value based on the service type. Available on Kubernetes 1.20 and newer. + ipFamilies: + type: array + items: + type: string + enum: + - IPv4 + - IPv6 + description: Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting. Available on Kubernetes 1.20 and newer. + description: Template for Kafka bootstrap `Service`. + brokersService: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + ipFamilyPolicy: + type: string + enum: + - SingleStack + - PreferDualStack + - RequireDualStack + description: Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`. `SingleStack` is for a single IP family. `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters. `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters. If unspecified, Kubernetes will choose the default value based on the service type. Available on Kubernetes 1.20 and newer. + ipFamilies: + type: array + items: + type: string + enum: + - IPv4 + - IPv6 + description: Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting. Available on Kubernetes 1.20 and newer. + description: Template for Kafka broker `Service`. + externalBootstrapService: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for Kafka external bootstrap `Service`. + perPodService: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for Kafka per-pod `Services` used for access from outside of Kubernetes. + externalBootstrapRoute: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for Kafka external bootstrap `Route`. + perPodRoute: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for Kafka per-pod `Routes` used for access from outside of OpenShift. + externalBootstrapIngress: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for Kafka external bootstrap `Ingress`. + perPodIngress: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for Kafka per-pod `Ingress` used for access from outside of Kubernetes. + persistentVolumeClaim: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for all Kafka `PersistentVolumeClaims`. + podDisruptionBudget: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata to apply to the `PodDisruptionBudgetTemplate` resource. + maxUnavailable: + type: integer + minimum: 0 + description: Maximum number of unavailable pods to allow automatic Pod eviction. A Pod eviction is allowed when the `maxUnavailable` number of pods or fewer are unavailable after the eviction. Setting this value to 0 prevents all voluntary evictions, so the pods must be evicted manually. Defaults to 1. + description: Template for Kafka `PodDisruptionBudget`. + kafkaContainer: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: Environment variables which should be applied to the container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the Kafka broker container. + initContainer: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: Environment variables which should be applied to the container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the Kafka init container. + clusterCaCert: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for Secret with Kafka Cluster certificate public key. + serviceAccount: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for the Kafka service account. + jmxSecret: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for Secret of the Kafka Cluster JMX authentication. + clusterRoleBinding: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for the Kafka ClusterRoleBinding. + description: Template for Kafka cluster resources. The template allows users to specify how are the `StatefulSet`, `Pods` and `Services` generated. + required: + - replicas + - listeners + - storage + description: Configuration of the Kafka cluster. + zookeeper: + type: object + properties: + replicas: + type: integer + minimum: 1 + description: The number of pods in the cluster. + image: + type: string + description: The docker image for the pods. + storage: + type: object + properties: + class: + type: string + description: The storage class to use for dynamic volume allocation. + deleteClaim: + type: boolean + description: Specifies if the persistent volume claim has to be deleted when the cluster is un-deployed. + id: + type: integer + minimum: 0 + description: Storage identification number. It is mandatory only for storage volumes defined in a storage of type 'jbod'. + overrides: + type: array + items: + type: object + properties: + class: + type: string + description: The storage class to use for dynamic volume allocation for this broker. + broker: + type: integer + description: Id of the kafka broker (broker identifier). + description: Overrides for individual brokers. The `overrides` field allows to specify a different configuration for different brokers. + selector: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Specifies a specific persistent volume to use. It contains key:value pairs representing labels for selecting such a volume. + size: + type: string + description: When type=persistent-claim, defines the size of the persistent volume claim (i.e 1Gi). Mandatory when type=persistent-claim. + sizeLimit: + type: string + pattern: ^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ + description: When type=ephemeral, defines the total amount of local storage required for this EmptyDir volume (for example 1Gi). + type: + type: string + enum: + - ephemeral + - persistent-claim + description: Storage type, must be either 'ephemeral' or 'persistent-claim'. + required: + - type + description: Storage configuration (disk). Cannot be updated. + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: 'The ZooKeeper broker config. Properties with the following prefixes cannot be set: server., dataDir, dataLogDir, clientPort, authProvider, quorum.auth, requireClientAuthScheme, snapshot.trust.empty, standaloneEnabled, reconfigEnabled, 4lw.commands.whitelist, secureClientPort, ssl., serverCnxnFactory, sslQuorum (with the exception of: ssl.protocol, ssl.quorum.protocol, ssl.enabledProtocols, ssl.quorum.enabledProtocols, ssl.ciphersuites, ssl.quorum.ciphersuites, ssl.hostnameVerification, ssl.quorum.hostnameVerification).' + livenessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1. + description: Pod liveness checking. + readinessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1. + description: Pod readiness checking. + jvmOptions: + type: object + properties: + "-XX": + x-kubernetes-preserve-unknown-fields: true + type: object + description: A map of -XX options to the JVM. + "-Xms": + type: string + pattern: ^[0-9]+[mMgG]?$ + description: -Xms option to to the JVM. + "-Xmx": + type: string + pattern: ^[0-9]+[mMgG]?$ + description: -Xmx option to to the JVM. + gcLoggingEnabled: + type: boolean + description: Specifies whether the Garbage Collection logging is enabled. The default is false. + javaSystemProperties: + type: array + items: + type: object + properties: + name: + type: string + description: The system property name. + value: + type: string + description: The system property value. + description: A map of additional system properties which will be passed using the `-D` option to the JVM. + description: JVM Options for pods. + jmxOptions: + type: object + properties: + authentication: + type: object + properties: + type: + type: string + enum: + - password + description: Authentication type. Currently the only supported types are `password`.`password` type creates a username and protected port with no TLS. + required: + - type + description: Authentication configuration for connecting to the JMX port. + description: JMX Options for Zookeeper nodes. + resources: + type: object + properties: + limits: + x-kubernetes-preserve-unknown-fields: true + type: object + requests: + x-kubernetes-preserve-unknown-fields: true + type: object + description: CPU and memory resources to reserve. + metricsConfig: + type: object + properties: + type: + type: string + enum: + - jmxPrometheusExporter + description: Metrics type. Only 'jmxPrometheusExporter' supported currently. + valueFrom: + type: object + properties: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: Reference to the key in the ConfigMap containing the configuration. + description: ConfigMap entry where the Prometheus JMX Exporter configuration is stored. For details of the structure of this configuration, see the {JMXExporter}. + required: + - type + - valueFrom + description: Metrics configuration. + logging: + type: object + properties: + loggers: + x-kubernetes-preserve-unknown-fields: true + type: object + description: A Map from logger name to logger level. + type: + type: string + enum: + - inline + - external + description: Logging type, must be either 'inline' or 'external'. + valueFrom: + type: object + properties: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: Reference to the key in the ConfigMap containing the configuration. + description: '`ConfigMap` entry where the logging configuration is stored. ' + required: + - type + description: Logging configuration for ZooKeeper. + template: + type: object + properties: + statefulset: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + podManagementPolicy: + type: string + enum: + - OrderedReady + - Parallel + description: PodManagementPolicy which will be used for this StatefulSet. Valid values are `Parallel` and `OrderedReady`. Defaults to `Parallel`. + description: Template for ZooKeeper `StatefulSet`. + pod: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + imagePullSecrets: + type: array + items: + type: object + properties: + name: + type: string + description: List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored. + securityContext: + type: object + properties: + fsGroup: + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + supplementalGroups: + type: array + items: + type: integer + sysctls: + type: array + items: + type: object + properties: + name: + type: string + value: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Configures pod-level security attributes and common container settings. + terminationGracePeriodSeconds: + type: integer + minimum: 0 + description: The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds. + affinity: + type: object + properties: + nodeAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + preference: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: object + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + podAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + podAntiAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + description: The pod's affinity rules. + tolerations: + type: array + items: + type: object + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + type: integer + value: + type: string + description: The pod's tolerations. + priorityClassName: + type: string + description: The name of the priority class used to assign priority to the pods. For more information about priority classes, see {K8sPriorityClass}. + schedulerName: + type: string + description: The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used. + hostAliases: + type: array + items: + type: object + properties: + hostnames: + type: array + items: + type: string + ip: + type: string + description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified. + tmpDirSizeLimit: + type: string + pattern: ^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ + description: Defines the total amount (for example `1Gi`) of local storage required for temporary EmptyDir volume (`/tmp`). Default value is `1Mi`. + enableServiceLinks: + type: boolean + description: Indicates whether information about services should be injected into Pod's environment variables. + topologySpreadConstraints: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + maxSkew: + type: integer + topologyKey: + type: string + whenUnsatisfiable: + type: string + description: The pod's topology spread constraints. + description: Template for ZooKeeper `Pods`. + clientService: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + ipFamilyPolicy: + type: string + enum: + - SingleStack + - PreferDualStack + - RequireDualStack + description: Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`. `SingleStack` is for a single IP family. `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters. `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters. If unspecified, Kubernetes will choose the default value based on the service type. Available on Kubernetes 1.20 and newer. + ipFamilies: + type: array + items: + type: string + enum: + - IPv4 + - IPv6 + description: Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting. Available on Kubernetes 1.20 and newer. + description: Template for ZooKeeper client `Service`. + nodesService: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + ipFamilyPolicy: + type: string + enum: + - SingleStack + - PreferDualStack + - RequireDualStack + description: Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`. `SingleStack` is for a single IP family. `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters. `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters. If unspecified, Kubernetes will choose the default value based on the service type. Available on Kubernetes 1.20 and newer. + ipFamilies: + type: array + items: + type: string + enum: + - IPv4 + - IPv6 + description: Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting. Available on Kubernetes 1.20 and newer. + description: Template for ZooKeeper nodes `Service`. + persistentVolumeClaim: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for all ZooKeeper `PersistentVolumeClaims`. + podDisruptionBudget: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata to apply to the `PodDisruptionBudgetTemplate` resource. + maxUnavailable: + type: integer + minimum: 0 + description: Maximum number of unavailable pods to allow automatic Pod eviction. A Pod eviction is allowed when the `maxUnavailable` number of pods or fewer are unavailable after the eviction. Setting this value to 0 prevents all voluntary evictions, so the pods must be evicted manually. Defaults to 1. + description: Template for ZooKeeper `PodDisruptionBudget`. + zookeeperContainer: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: Environment variables which should be applied to the container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the ZooKeeper container. + serviceAccount: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for the ZooKeeper service account. + jmxSecret: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for Secret of the Zookeeper Cluster JMX authentication. + description: Template for ZooKeeper cluster resources. The template allows users to specify how are the `StatefulSet`, `Pods` and `Services` generated. + required: + - replicas + - storage + description: Configuration of the ZooKeeper cluster. + entityOperator: + type: object + properties: + topicOperator: + type: object + properties: + watchedNamespace: + type: string + description: The namespace the Topic Operator should watch. + image: + type: string + description: The image to use for the Topic Operator. + reconciliationIntervalSeconds: + type: integer + minimum: 0 + description: Interval between periodic reconciliations. + zookeeperSessionTimeoutSeconds: + type: integer + minimum: 0 + description: Timeout for the ZooKeeper session. + startupProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1. + description: Pod startup checking. + livenessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1. + description: Pod liveness checking. + readinessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1. + description: Pod readiness checking. + resources: + type: object + properties: + limits: + x-kubernetes-preserve-unknown-fields: true + type: object + requests: + x-kubernetes-preserve-unknown-fields: true + type: object + description: CPU and memory resources to reserve. + topicMetadataMaxAttempts: + type: integer + minimum: 0 + description: The number of attempts at getting topic metadata. + logging: + type: object + properties: + loggers: + x-kubernetes-preserve-unknown-fields: true + type: object + description: A Map from logger name to logger level. + type: + type: string + enum: + - inline + - external + description: Logging type, must be either 'inline' or 'external'. + valueFrom: + type: object + properties: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: Reference to the key in the ConfigMap containing the configuration. + description: '`ConfigMap` entry where the logging configuration is stored. ' + required: + - type + description: Logging configuration. + jvmOptions: + type: object + properties: + "-XX": + x-kubernetes-preserve-unknown-fields: true + type: object + description: A map of -XX options to the JVM. + "-Xms": + type: string + pattern: ^[0-9]+[mMgG]?$ + description: -Xms option to to the JVM. + "-Xmx": + type: string + pattern: ^[0-9]+[mMgG]?$ + description: -Xmx option to to the JVM. + gcLoggingEnabled: + type: boolean + description: Specifies whether the Garbage Collection logging is enabled. The default is false. + javaSystemProperties: + type: array + items: + type: object + properties: + name: + type: string + description: The system property name. + value: + type: string + description: The system property value. + description: A map of additional system properties which will be passed using the `-D` option to the JVM. + description: JVM Options for pods. + description: Configuration of the Topic Operator. + userOperator: + type: object + properties: + watchedNamespace: + type: string + description: The namespace the User Operator should watch. + image: + type: string + description: The image to use for the User Operator. + reconciliationIntervalSeconds: + type: integer + minimum: 0 + description: Interval between periodic reconciliations. + zookeeperSessionTimeoutSeconds: + type: integer + minimum: 0 + description: Timeout for the ZooKeeper session. + secretPrefix: + type: string + description: The prefix that will be added to the KafkaUser name to be used as the Secret name. + livenessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1. + description: Pod liveness checking. + readinessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1. + description: Pod readiness checking. + resources: + type: object + properties: + limits: + x-kubernetes-preserve-unknown-fields: true + type: object + requests: + x-kubernetes-preserve-unknown-fields: true + type: object + description: CPU and memory resources to reserve. + logging: + type: object + properties: + loggers: + x-kubernetes-preserve-unknown-fields: true + type: object + description: A Map from logger name to logger level. + type: + type: string + enum: + - inline + - external + description: Logging type, must be either 'inline' or 'external'. + valueFrom: + type: object + properties: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: Reference to the key in the ConfigMap containing the configuration. + description: '`ConfigMap` entry where the logging configuration is stored. ' + required: + - type + description: Logging configuration. + jvmOptions: + type: object + properties: + "-XX": + x-kubernetes-preserve-unknown-fields: true + type: object + description: A map of -XX options to the JVM. + "-Xms": + type: string + pattern: ^[0-9]+[mMgG]?$ + description: -Xms option to to the JVM. + "-Xmx": + type: string + pattern: ^[0-9]+[mMgG]?$ + description: -Xmx option to to the JVM. + gcLoggingEnabled: + type: boolean + description: Specifies whether the Garbage Collection logging is enabled. The default is false. + javaSystemProperties: + type: array + items: + type: object + properties: + name: + type: string + description: The system property name. + value: + type: string + description: The system property value. + description: A map of additional system properties which will be passed using the `-D` option to the JVM. + description: JVM Options for pods. + description: Configuration of the User Operator. + tlsSidecar: + type: object + properties: + image: + type: string + description: The docker image for the container. + livenessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1. + description: Pod liveness checking. + logLevel: + type: string + enum: + - emerg + - alert + - crit + - err + - warning + - notice + - info + - debug + description: The log level for the TLS sidecar. Default value is `notice`. + readinessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1. + description: Pod readiness checking. + resources: + type: object + properties: + limits: + x-kubernetes-preserve-unknown-fields: true + type: object + requests: + x-kubernetes-preserve-unknown-fields: true + type: object + description: CPU and memory resources to reserve. + description: TLS sidecar configuration. + template: + type: object + properties: + deployment: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for Entity Operator `Deployment`. + pod: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + imagePullSecrets: + type: array + items: + type: object + properties: + name: + type: string + description: List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored. + securityContext: + type: object + properties: + fsGroup: + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + supplementalGroups: + type: array + items: + type: integer + sysctls: + type: array + items: + type: object + properties: + name: + type: string + value: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Configures pod-level security attributes and common container settings. + terminationGracePeriodSeconds: + type: integer + minimum: 0 + description: The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds. + affinity: + type: object + properties: + nodeAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + preference: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: object + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + podAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + podAntiAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + description: The pod's affinity rules. + tolerations: + type: array + items: + type: object + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + type: integer + value: + type: string + description: The pod's tolerations. + priorityClassName: + type: string + description: The name of the priority class used to assign priority to the pods. For more information about priority classes, see {K8sPriorityClass}. + schedulerName: + type: string + description: The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used. + hostAliases: + type: array + items: + type: object + properties: + hostnames: + type: array + items: + type: string + ip: + type: string + description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified. + tmpDirSizeLimit: + type: string + pattern: ^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ + description: Defines the total amount (for example `1Gi`) of local storage required for temporary EmptyDir volume (`/tmp`). Default value is `1Mi`. + enableServiceLinks: + type: boolean + description: Indicates whether information about services should be injected into Pod's environment variables. + topologySpreadConstraints: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + maxSkew: + type: integer + topologyKey: + type: string + whenUnsatisfiable: + type: string + description: The pod's topology spread constraints. + description: Template for Entity Operator `Pods`. + topicOperatorContainer: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: Environment variables which should be applied to the container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the Entity Topic Operator container. + userOperatorContainer: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: Environment variables which should be applied to the container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the Entity User Operator container. + tlsSidecarContainer: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: Environment variables which should be applied to the container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the Entity Operator TLS sidecar container. + serviceAccount: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for the Entity Operator service account. + description: Template for Entity Operator resources. The template allows users to specify how is the `Deployment` and `Pods` generated. + description: Configuration of the Entity Operator. + clusterCa: + type: object + properties: + generateCertificateAuthority: + type: boolean + description: If true then Certificate Authority certificates will be generated automatically. Otherwise the user will need to provide a Secret with the CA certificate. Default is true. + generateSecretOwnerReference: + type: boolean + description: If `true`, the Cluster and Client CA Secrets are configured with the `ownerReference` set to the `Kafka` resource. If the `Kafka` resource is deleted when `true`, the CA Secrets are also deleted. If `false`, the `ownerReference` is disabled. If the `Kafka` resource is deleted when `false`, the CA Secrets are retained and available for reuse. Default is `true`. + validityDays: + type: integer + minimum: 1 + description: The number of days generated certificates should be valid for. The default is 365. + renewalDays: + type: integer + minimum: 1 + description: The number of days in the certificate renewal period. This is the number of days before the a certificate expires during which renewal actions may be performed. When `generateCertificateAuthority` is true, this will cause the generation of a new certificate. When `generateCertificateAuthority` is true, this will cause extra logging at WARN level about the pending certificate expiry. Default is 30. + certificateExpirationPolicy: + type: string + enum: + - renew-certificate + - replace-key + description: How should CA certificate expiration be handled when `generateCertificateAuthority=true`. The default is for a new CA certificate to be generated reusing the existing private key. + description: Configuration of the cluster certificate authority. + clientsCa: + type: object + properties: + generateCertificateAuthority: + type: boolean + description: If true then Certificate Authority certificates will be generated automatically. Otherwise the user will need to provide a Secret with the CA certificate. Default is true. + generateSecretOwnerReference: + type: boolean + description: If `true`, the Cluster and Client CA Secrets are configured with the `ownerReference` set to the `Kafka` resource. If the `Kafka` resource is deleted when `true`, the CA Secrets are also deleted. If `false`, the `ownerReference` is disabled. If the `Kafka` resource is deleted when `false`, the CA Secrets are retained and available for reuse. Default is `true`. + validityDays: + type: integer + minimum: 1 + description: The number of days generated certificates should be valid for. The default is 365. + renewalDays: + type: integer + minimum: 1 + description: The number of days in the certificate renewal period. This is the number of days before the a certificate expires during which renewal actions may be performed. When `generateCertificateAuthority` is true, this will cause the generation of a new certificate. When `generateCertificateAuthority` is true, this will cause extra logging at WARN level about the pending certificate expiry. Default is 30. + certificateExpirationPolicy: + type: string + enum: + - renew-certificate + - replace-key + description: How should CA certificate expiration be handled when `generateCertificateAuthority=true`. The default is for a new CA certificate to be generated reusing the existing private key. + description: Configuration of the clients certificate authority. + cruiseControl: + type: object + properties: + image: + type: string + description: The docker image for the pods. + tlsSidecar: + type: object + properties: + image: + type: string + description: The docker image for the container. + livenessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1. + description: Pod liveness checking. + logLevel: + type: string + enum: + - emerg + - alert + - crit + - err + - warning + - notice + - info + - debug + description: The log level for the TLS sidecar. Default value is `notice`. + readinessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1. + description: Pod readiness checking. + resources: + type: object + properties: + limits: + x-kubernetes-preserve-unknown-fields: true + type: object + requests: + x-kubernetes-preserve-unknown-fields: true + type: object + description: CPU and memory resources to reserve. + description: TLS sidecar configuration. + resources: + type: object + properties: + limits: + x-kubernetes-preserve-unknown-fields: true + type: object + requests: + x-kubernetes-preserve-unknown-fields: true + type: object + description: CPU and memory resources to reserve for the Cruise Control container. + livenessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1. + description: Pod liveness checking for the Cruise Control container. + readinessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1. + description: Pod readiness checking for the Cruise Control container. + jvmOptions: + type: object + properties: + "-XX": + x-kubernetes-preserve-unknown-fields: true + type: object + description: A map of -XX options to the JVM. + "-Xms": + type: string + pattern: ^[0-9]+[mMgG]?$ + description: -Xms option to to the JVM. + "-Xmx": + type: string + pattern: ^[0-9]+[mMgG]?$ + description: -Xmx option to to the JVM. + gcLoggingEnabled: + type: boolean + description: Specifies whether the Garbage Collection logging is enabled. The default is false. + javaSystemProperties: + type: array + items: + type: object + properties: + name: + type: string + description: The system property name. + value: + type: string + description: The system property value. + description: A map of additional system properties which will be passed using the `-D` option to the JVM. + description: JVM Options for the Cruise Control container. + logging: + type: object + properties: + loggers: + x-kubernetes-preserve-unknown-fields: true + type: object + description: A Map from logger name to logger level. + type: + type: string + enum: + - inline + - external + description: Logging type, must be either 'inline' or 'external'. + valueFrom: + type: object + properties: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: Reference to the key in the ConfigMap containing the configuration. + description: '`ConfigMap` entry where the logging configuration is stored. ' + required: + - type + description: Logging configuration (Log4j 2) for Cruise Control. + template: + type: object + properties: + deployment: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for Cruise Control `Deployment`. + pod: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + imagePullSecrets: + type: array + items: + type: object + properties: + name: + type: string + description: List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored. + securityContext: + type: object + properties: + fsGroup: + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + supplementalGroups: + type: array + items: + type: integer + sysctls: + type: array + items: + type: object + properties: + name: + type: string + value: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Configures pod-level security attributes and common container settings. + terminationGracePeriodSeconds: + type: integer + minimum: 0 + description: The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds. + affinity: + type: object + properties: + nodeAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + preference: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: object + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + podAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + podAntiAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + description: The pod's affinity rules. + tolerations: + type: array + items: + type: object + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + type: integer + value: + type: string + description: The pod's tolerations. + priorityClassName: + type: string + description: The name of the priority class used to assign priority to the pods. For more information about priority classes, see {K8sPriorityClass}. + schedulerName: + type: string + description: The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used. + hostAliases: + type: array + items: + type: object + properties: + hostnames: + type: array + items: + type: string + ip: + type: string + description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified. + tmpDirSizeLimit: + type: string + pattern: ^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ + description: Defines the total amount (for example `1Gi`) of local storage required for temporary EmptyDir volume (`/tmp`). Default value is `1Mi`. + enableServiceLinks: + type: boolean + description: Indicates whether information about services should be injected into Pod's environment variables. + topologySpreadConstraints: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + maxSkew: + type: integer + topologyKey: + type: string + whenUnsatisfiable: + type: string + description: The pod's topology spread constraints. + description: Template for Cruise Control `Pods`. + apiService: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + ipFamilyPolicy: + type: string + enum: + - SingleStack + - PreferDualStack + - RequireDualStack + description: Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`. `SingleStack` is for a single IP family. `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters. `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters. If unspecified, Kubernetes will choose the default value based on the service type. Available on Kubernetes 1.20 and newer. + ipFamilies: + type: array + items: + type: string + enum: + - IPv4 + - IPv6 + description: Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting. Available on Kubernetes 1.20 and newer. + description: Template for Cruise Control API `Service`. + podDisruptionBudget: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata to apply to the `PodDisruptionBudgetTemplate` resource. + maxUnavailable: + type: integer + minimum: 0 + description: Maximum number of unavailable pods to allow automatic Pod eviction. A Pod eviction is allowed when the `maxUnavailable` number of pods or fewer are unavailable after the eviction. Setting this value to 0 prevents all voluntary evictions, so the pods must be evicted manually. Defaults to 1. + description: Template for Cruise Control `PodDisruptionBudget`. + cruiseControlContainer: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: Environment variables which should be applied to the container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the Cruise Control container. + tlsSidecarContainer: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: Environment variables which should be applied to the container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the Cruise Control TLS sidecar container. + serviceAccount: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for the Cruise Control service account. + description: Template to specify how Cruise Control resources, `Deployments` and `Pods`, are generated. + brokerCapacity: + type: object + properties: + disk: + type: string + pattern: ^[0-9]+([.][0-9]*)?([KMGTPE]i?|e[0-9]+)?$ + description: Broker capacity for disk in bytes, for example, 100Gi. + cpuUtilization: + type: integer + minimum: 0 + maximum: 100 + description: Broker capacity for CPU resource utilization as a percentage (0 - 100). + inboundNetwork: + type: string + pattern: ^[0-9]+([KMG]i?)?B/s$ + description: Broker capacity for inbound network throughput in bytes per second, for example, 10000KB/s. + outboundNetwork: + type: string + pattern: ^[0-9]+([KMG]i?)?B/s$ + description: Broker capacity for outbound network throughput in bytes per second, for example 10000KB/s. + description: The Cruise Control `brokerCapacity` configuration. + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: 'The Cruise Control configuration. For a full list of configuration options refer to https://github.com/linkedin/cruise-control/wiki/Configurations. Note that properties with the following prefixes cannot be set: bootstrap.servers, client.id, zookeeper., network., security., failed.brokers.zk.path,webserver.http., webserver.api.urlprefix, webserver.session.path, webserver.accesslog., two.step., request.reason.required,metric.reporter.sampler.bootstrap.servers, metric.reporter.topic, partition.metric.sample.store.topic, broker.metric.sample.store.topic,capacity.config.file, self.healing., ssl. (with the exception of: ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols, webserver.http.cors.enabled, webserver.http.cors.origin, webserver.http.cors.exposeheaders, webserver.security.enable, webserver.ssl.enable).' + metricsConfig: + type: object + properties: + type: + type: string + enum: + - jmxPrometheusExporter + description: Metrics type. Only 'jmxPrometheusExporter' supported currently. + valueFrom: + type: object + properties: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: Reference to the key in the ConfigMap containing the configuration. + description: ConfigMap entry where the Prometheus JMX Exporter configuration is stored. For details of the structure of this configuration, see the {JMXExporter}. + required: + - type + - valueFrom + description: Metrics configuration. + description: Configuration for Cruise Control deployment. Deploys a Cruise Control instance when specified. + jmxTrans: + type: object + properties: + image: + type: string + description: The image to use for the JmxTrans. + outputDefinitions: + type: array + items: + type: object + properties: + outputType: + type: string + description: Template for setting the format of the data that will be pushed.For more information see https://github.com/jmxtrans/jmxtrans/wiki/OutputWriters[JmxTrans OutputWriters]. + host: + type: string + description: The DNS/hostname of the remote host that the data is pushed to. + port: + type: integer + description: The port of the remote host that the data is pushed to. + flushDelayInSeconds: + type: integer + description: How many seconds the JmxTrans waits before pushing a new set of data out. + typeNames: + type: array + items: + type: string + description: Template for filtering data to be included in response to a wildcard query. For more information see https://github.com/jmxtrans/jmxtrans/wiki/Queries[JmxTrans queries]. + name: + type: string + description: Template for setting the name of the output definition. This is used to identify where to send the results of queries should be sent. + required: + - outputType + - name + description: Defines the output hosts that will be referenced later on. For more information on these properties see, xref:type-JmxTransOutputDefinitionTemplate-reference[`JmxTransOutputDefinitionTemplate` schema reference]. + logLevel: + type: string + description: Sets the logging level of the JmxTrans deployment.For more information see, https://github.com/jmxtrans/jmxtrans-agent/wiki/Troubleshooting[JmxTrans Logging Level]. + kafkaQueries: + type: array + items: + type: object + properties: + targetMBean: + type: string + description: If using wildcards instead of a specific MBean then the data is gathered from multiple MBeans. Otherwise if specifying an MBean then data is gathered from that specified MBean. + attributes: + type: array + items: + type: string + description: Determine which attributes of the targeted MBean should be included. + outputs: + type: array + items: + type: string + description: List of the names of output definitions specified in the spec.kafka.jmxTrans.outputDefinitions that have defined where JMX metrics are pushed to, and in which data format. + required: + - targetMBean + - attributes + - outputs + description: Queries to send to the Kafka brokers to define what data should be read from each broker. For more information on these properties see, xref:type-JmxTransQueryTemplate-reference[`JmxTransQueryTemplate` schema reference]. + resources: + type: object + properties: + limits: + x-kubernetes-preserve-unknown-fields: true + type: object + requests: + x-kubernetes-preserve-unknown-fields: true + type: object + description: CPU and memory resources to reserve. + template: + type: object + properties: + deployment: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for JmxTrans `Deployment`. + pod: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + imagePullSecrets: + type: array + items: + type: object + properties: + name: + type: string + description: List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored. + securityContext: + type: object + properties: + fsGroup: + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + supplementalGroups: + type: array + items: + type: integer + sysctls: + type: array + items: + type: object + properties: + name: + type: string + value: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Configures pod-level security attributes and common container settings. + terminationGracePeriodSeconds: + type: integer + minimum: 0 + description: The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds. + affinity: + type: object + properties: + nodeAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + preference: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: object + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + podAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + podAntiAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + description: The pod's affinity rules. + tolerations: + type: array + items: + type: object + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + type: integer + value: + type: string + description: The pod's tolerations. + priorityClassName: + type: string + description: The name of the priority class used to assign priority to the pods. For more information about priority classes, see {K8sPriorityClass}. + schedulerName: + type: string + description: The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used. + hostAliases: + type: array + items: + type: object + properties: + hostnames: + type: array + items: + type: string + ip: + type: string + description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified. + tmpDirSizeLimit: + type: string + pattern: ^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ + description: Defines the total amount (for example `1Gi`) of local storage required for temporary EmptyDir volume (`/tmp`). Default value is `1Mi`. + enableServiceLinks: + type: boolean + description: Indicates whether information about services should be injected into Pod's environment variables. + topologySpreadConstraints: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + maxSkew: + type: integer + topologyKey: + type: string + whenUnsatisfiable: + type: string + description: The pod's topology spread constraints. + description: Template for JmxTrans `Pods`. + container: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: Environment variables which should be applied to the container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for JmxTrans container. + serviceAccount: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for the JMX Trans service account. + description: Template for JmxTrans resources. + required: + - outputDefinitions + - kafkaQueries + description: Configuration for JmxTrans. When the property is present a JmxTrans deployment is created for gathering JMX metrics from each Kafka broker. For more information see https://github.com/jmxtrans/jmxtrans[JmxTrans GitHub]. + kafkaExporter: + type: object + properties: + image: + type: string + description: The docker image for the pods. + groupRegex: + type: string + description: Regular expression to specify which consumer groups to collect. Default value is `.*`. + topicRegex: + type: string + description: Regular expression to specify which topics to collect. Default value is `.*`. + resources: + type: object + properties: + limits: + x-kubernetes-preserve-unknown-fields: true + type: object + requests: + x-kubernetes-preserve-unknown-fields: true + type: object + description: CPU and memory resources to reserve. + logging: + type: string + description: 'Only log messages with the given severity or above. Valid levels: [`info`, `debug`, `trace`]. Default log level is `info`.' + enableSaramaLogging: + type: boolean + description: Enable Sarama logging, a Go client library used by the Kafka Exporter. + template: + type: object + properties: + deployment: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for Kafka Exporter `Deployment`. + pod: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + imagePullSecrets: + type: array + items: + type: object + properties: + name: + type: string + description: List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored. + securityContext: + type: object + properties: + fsGroup: + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + supplementalGroups: + type: array + items: + type: integer + sysctls: + type: array + items: + type: object + properties: + name: + type: string + value: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Configures pod-level security attributes and common container settings. + terminationGracePeriodSeconds: + type: integer + minimum: 0 + description: The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds. + affinity: + type: object + properties: + nodeAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + preference: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: object + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + podAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + podAntiAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + description: The pod's affinity rules. + tolerations: + type: array + items: + type: object + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + type: integer + value: + type: string + description: The pod's tolerations. + priorityClassName: + type: string + description: The name of the priority class used to assign priority to the pods. For more information about priority classes, see {K8sPriorityClass}. + schedulerName: + type: string + description: The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used. + hostAliases: + type: array + items: + type: object + properties: + hostnames: + type: array + items: + type: string + ip: + type: string + description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified. + tmpDirSizeLimit: + type: string + pattern: ^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ + description: Defines the total amount (for example `1Gi`) of local storage required for temporary EmptyDir volume (`/tmp`). Default value is `1Mi`. + enableServiceLinks: + type: boolean + description: Indicates whether information about services should be injected into Pod's environment variables. + topologySpreadConstraints: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + maxSkew: + type: integer + topologyKey: + type: string + whenUnsatisfiable: + type: string + description: The pod's topology spread constraints. + description: Template for Kafka Exporter `Pods`. + service: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for Kafka Exporter `Service`. + container: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: Environment variables which should be applied to the container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the Kafka Exporter container. + serviceAccount: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for the Kafka Exporter service account. + description: Customization of deployment templates and pods. + livenessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1. + description: Pod liveness check. + readinessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1. + description: Pod readiness check. + description: Configuration of the Kafka Exporter. Kafka Exporter can provide additional metrics, for example lag of consumer group at topic/partition. + maintenanceTimeWindows: + type: array + items: + type: string + description: A list of time windows for maintenance tasks (that is, certificates renewal). Each time window is defined by a cron expression. + required: + - kafka + - zookeeper + description: The specification of the Kafka and ZooKeeper clusters, and Topic Operator. + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: The unique identifier of a condition, used to distinguish between other conditions in the resource. + status: + type: string + description: The status of the condition, either True, False or Unknown. + lastTransitionTime: + type: string + description: Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. + reason: + type: string + description: The reason for the condition's last transition (a single word in CamelCase). + message: + type: string + description: Human-readable message indicating details about the condition's last transition. + description: List of status conditions. + observedGeneration: + type: integer + description: The generation of the CRD that was last reconciled by the operator. + listeners: + type: array + items: + type: object + properties: + type: + type: string + description: 'The type of the listener. Can be one of the following three types: `plain`, `tls`, and `external`.' + addresses: + type: array + items: + type: object + properties: + host: + type: string + description: The DNS name or IP address of the Kafka bootstrap service. + port: + type: integer + description: The port of the Kafka bootstrap service. + description: A list of the addresses for this listener. + bootstrapServers: + type: string + description: A comma-separated list of `host:port` pairs for connecting to the Kafka cluster using this listener. + certificates: + type: array + items: + type: string + description: A list of TLS certificates which can be used to verify the identity of the server when connecting to the given listener. Set only for `tls` and `external` listeners. + description: Addresses of the internal and external listeners. + clusterId: + type: string + description: Kafka cluster Id. + description: The status of the Kafka and ZooKeeper clusters, and Topic Operator. diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/041-Crd-kafkaconnect.yaml b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/041-Crd-kafkaconnect.yaml new file mode 100644 index 000000000..0ca716b52 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/041-Crd-kafkaconnect.yaml @@ -0,0 +1,1906 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: kafkaconnects.kafka.strimzi.io + labels: + app: strimzi + strimzi.io/crd-install: "true" + component: kafkaconnects.kafka.strimzi.io-crd +spec: + group: kafka.strimzi.io + names: + kind: KafkaConnect + listKind: KafkaConnectList + singular: kafkaconnect + plural: kafkaconnects + shortNames: + - kc + categories: + - strimzi + scope: Namespaced + conversion: + strategy: None + versions: + - name: v1beta2 + served: true + storage: true + subresources: + status: {} + scale: + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + labelSelectorPath: .status.labelSelector + additionalPrinterColumns: + - name: Desired replicas + description: The desired number of Kafka Connect replicas + jsonPath: .spec.replicas + type: integer + - name: Ready + description: The state of the custom resource + jsonPath: .status.conditions[?(@.type=="Ready")].status + type: string + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + version: + type: string + description: The Kafka Connect version. Defaults to {DefaultKafkaVersion}. Consult the user documentation to understand the process required to upgrade or downgrade the version. + replicas: + type: integer + description: The number of pods in the Kafka Connect group. + image: + type: string + description: The docker image for the pods. + bootstrapServers: + type: string + description: Bootstrap servers to connect to. This should be given as a comma separated list of __:__ pairs. + tls: + type: object + properties: + trustedCertificates: + type: array + items: + type: object + properties: + certificate: + type: string + description: The name of the file certificate in the Secret. + secretName: + type: string + description: The name of the Secret containing the certificate. + required: + - certificate + - secretName + description: Trusted certificates for TLS connection. + description: TLS configuration. + authentication: + type: object + properties: + accessToken: + type: object + properties: + key: + type: string + description: The key under which the secret value is stored in the Kubernetes Secret. + secretName: + type: string + description: The name of the Kubernetes Secret containing the secret value. + required: + - key + - secretName + description: Link to Kubernetes Secret containing the access token which was obtained from the authorization server. + accessTokenIsJwt: + type: boolean + description: Configure whether access token should be treated as JWT. This should be set to `false` if the authorization server returns opaque tokens. Defaults to `true`. + audience: + type: string + description: OAuth audience to use when authenticating against the authorization server. Some authorization servers require the audience to be explicitly set. The possible values depend on how the authorization server is configured. By default, `audience` is not specified when performing the token endpoint request. + certificateAndKey: + type: object + properties: + certificate: + type: string + description: The name of the file certificate in the Secret. + key: + type: string + description: The name of the private key in the Secret. + secretName: + type: string + description: The name of the Secret containing the certificate. + required: + - certificate + - key + - secretName + description: Reference to the `Secret` which holds the certificate and private key pair. + clientId: + type: string + description: OAuth Client ID which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI. + clientSecret: + type: object + properties: + key: + type: string + description: The key under which the secret value is stored in the Kubernetes Secret. + secretName: + type: string + description: The name of the Kubernetes Secret containing the secret value. + required: + - key + - secretName + description: Link to Kubernetes Secret containing the OAuth client secret which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI. + disableTlsHostnameVerification: + type: boolean + description: Enable or disable TLS hostname verification. Default value is `false`. + maxTokenExpirySeconds: + type: integer + description: Set or limit time-to-live of the access tokens to the specified number of seconds. This should be set if the authorization server returns opaque tokens. + passwordSecret: + type: object + properties: + password: + type: string + description: The name of the key in the Secret under which the password is stored. + secretName: + type: string + description: The name of the Secret containing the password. + required: + - password + - secretName + description: Reference to the `Secret` which holds the password. + refreshToken: + type: object + properties: + key: + type: string + description: The key under which the secret value is stored in the Kubernetes Secret. + secretName: + type: string + description: The name of the Kubernetes Secret containing the secret value. + required: + - key + - secretName + description: Link to Kubernetes Secret containing the refresh token which can be used to obtain access token from the authorization server. + scope: + type: string + description: OAuth scope to use when authenticating against the authorization server. Some authorization servers require this to be set. The possible values depend on how authorization server is configured. By default `scope` is not specified when doing the token endpoint request. + tlsTrustedCertificates: + type: array + items: + type: object + properties: + certificate: + type: string + description: The name of the file certificate in the Secret. + secretName: + type: string + description: The name of the Secret containing the certificate. + required: + - certificate + - secretName + description: Trusted certificates for TLS connection to the OAuth server. + tokenEndpointUri: + type: string + description: Authorization server token endpoint URI. + type: + type: string + enum: + - tls + - scram-sha-256 + - scram-sha-512 + - plain + - oauth + description: Authentication type. Currently the only supported types are `tls`, `scram-sha-256`, `scram-sha-512`, and `plain`. `scram-sha-256` and `scram-sha-512` types use SASL SCRAM-SHA-256 and SASL SCRAM-SHA-512 Authentication, respectively. `plain` type uses SASL PLAIN Authentication. `oauth` type uses SASL OAUTHBEARER Authentication. The `tls` type uses TLS Client Authentication. The `tls` type is supported only over TLS connections. + username: + type: string + description: Username used for the authentication. + required: + - type + description: Authentication configuration for Kafka Connect. + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: 'The Kafka Connect configuration. Properties with the following prefixes cannot be set: ssl., sasl., security., listeners, plugin.path, rest., bootstrap.servers, consumer.interceptor.classes, producer.interceptor.classes (with the exception of: ssl.endpoint.identification.algorithm, ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols).' + resources: + type: object + properties: + limits: + x-kubernetes-preserve-unknown-fields: true + type: object + requests: + x-kubernetes-preserve-unknown-fields: true + type: object + description: The maximum limits for CPU and memory resources and the requested initial resources. + livenessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1. + description: Pod liveness checking. + readinessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1. + description: Pod readiness checking. + jvmOptions: + type: object + properties: + "-XX": + x-kubernetes-preserve-unknown-fields: true + type: object + description: A map of -XX options to the JVM. + "-Xms": + type: string + pattern: ^[0-9]+[mMgG]?$ + description: -Xms option to to the JVM. + "-Xmx": + type: string + pattern: ^[0-9]+[mMgG]?$ + description: -Xmx option to to the JVM. + gcLoggingEnabled: + type: boolean + description: Specifies whether the Garbage Collection logging is enabled. The default is false. + javaSystemProperties: + type: array + items: + type: object + properties: + name: + type: string + description: The system property name. + value: + type: string + description: The system property value. + description: A map of additional system properties which will be passed using the `-D` option to the JVM. + description: JVM Options for pods. + jmxOptions: + type: object + properties: + authentication: + type: object + properties: + type: + type: string + enum: + - password + description: Authentication type. Currently the only supported types are `password`.`password` type creates a username and protected port with no TLS. + required: + - type + description: Authentication configuration for connecting to the JMX port. + description: JMX Options. + logging: + type: object + properties: + loggers: + x-kubernetes-preserve-unknown-fields: true + type: object + description: A Map from logger name to logger level. + type: + type: string + enum: + - inline + - external + description: Logging type, must be either 'inline' or 'external'. + valueFrom: + type: object + properties: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: Reference to the key in the ConfigMap containing the configuration. + description: '`ConfigMap` entry where the logging configuration is stored. ' + required: + - type + description: Logging configuration for Kafka Connect. + tracing: + type: object + properties: + type: + type: string + enum: + - jaeger + description: Type of the tracing used. Currently the only supported type is `jaeger` for Jaeger tracing. + required: + - type + description: The configuration of tracing in Kafka Connect. + template: + type: object + properties: + deployment: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + deploymentStrategy: + type: string + enum: + - RollingUpdate + - Recreate + description: DeploymentStrategy which will be used for this Deployment. Valid values are `RollingUpdate` and `Recreate`. Defaults to `RollingUpdate`. + description: Template for Kafka Connect `Deployment`. + pod: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + imagePullSecrets: + type: array + items: + type: object + properties: + name: + type: string + description: List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored. + securityContext: + type: object + properties: + fsGroup: + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + supplementalGroups: + type: array + items: + type: integer + sysctls: + type: array + items: + type: object + properties: + name: + type: string + value: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Configures pod-level security attributes and common container settings. + terminationGracePeriodSeconds: + type: integer + minimum: 0 + description: The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds. + affinity: + type: object + properties: + nodeAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + preference: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: object + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + podAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + podAntiAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + description: The pod's affinity rules. + tolerations: + type: array + items: + type: object + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + type: integer + value: + type: string + description: The pod's tolerations. + priorityClassName: + type: string + description: The name of the priority class used to assign priority to the pods. For more information about priority classes, see {K8sPriorityClass}. + schedulerName: + type: string + description: The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used. + hostAliases: + type: array + items: + type: object + properties: + hostnames: + type: array + items: + type: string + ip: + type: string + description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified. + tmpDirSizeLimit: + type: string + pattern: ^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ + description: Defines the total amount (for example `1Gi`) of local storage required for temporary EmptyDir volume (`/tmp`). Default value is `1Mi`. + enableServiceLinks: + type: boolean + description: Indicates whether information about services should be injected into Pod's environment variables. + topologySpreadConstraints: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + maxSkew: + type: integer + topologyKey: + type: string + whenUnsatisfiable: + type: string + description: The pod's topology spread constraints. + description: Template for Kafka Connect `Pods`. + apiService: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + ipFamilyPolicy: + type: string + enum: + - SingleStack + - PreferDualStack + - RequireDualStack + description: Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`. `SingleStack` is for a single IP family. `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters. `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters. If unspecified, Kubernetes will choose the default value based on the service type. Available on Kubernetes 1.20 and newer. + ipFamilies: + type: array + items: + type: string + enum: + - IPv4 + - IPv6 + description: Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting. Available on Kubernetes 1.20 and newer. + description: Template for Kafka Connect API `Service`. + connectContainer: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: Environment variables which should be applied to the container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the Kafka Connect container. + initContainer: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: Environment variables which should be applied to the container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the Kafka init container. + podDisruptionBudget: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata to apply to the `PodDisruptionBudgetTemplate` resource. + maxUnavailable: + type: integer + minimum: 0 + description: Maximum number of unavailable pods to allow automatic Pod eviction. A Pod eviction is allowed when the `maxUnavailable` number of pods or fewer are unavailable after the eviction. Setting this value to 0 prevents all voluntary evictions, so the pods must be evicted manually. Defaults to 1. + description: Template for Kafka Connect `PodDisruptionBudget`. + serviceAccount: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for the Kafka Connect service account. + clusterRoleBinding: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for the Kafka Connect ClusterRoleBinding. + buildPod: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + imagePullSecrets: + type: array + items: + type: object + properties: + name: + type: string + description: List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored. + securityContext: + type: object + properties: + fsGroup: + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + supplementalGroups: + type: array + items: + type: integer + sysctls: + type: array + items: + type: object + properties: + name: + type: string + value: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Configures pod-level security attributes and common container settings. + terminationGracePeriodSeconds: + type: integer + minimum: 0 + description: The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds. + affinity: + type: object + properties: + nodeAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + preference: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: object + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + podAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + podAntiAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + description: The pod's affinity rules. + tolerations: + type: array + items: + type: object + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + type: integer + value: + type: string + description: The pod's tolerations. + priorityClassName: + type: string + description: The name of the priority class used to assign priority to the pods. For more information about priority classes, see {K8sPriorityClass}. + schedulerName: + type: string + description: The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used. + hostAliases: + type: array + items: + type: object + properties: + hostnames: + type: array + items: + type: string + ip: + type: string + description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified. + tmpDirSizeLimit: + type: string + pattern: ^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ + description: Defines the total amount (for example `1Gi`) of local storage required for temporary EmptyDir volume (`/tmp`). Default value is `1Mi`. + enableServiceLinks: + type: boolean + description: Indicates whether information about services should be injected into Pod's environment variables. + topologySpreadConstraints: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + maxSkew: + type: integer + topologyKey: + type: string + whenUnsatisfiable: + type: string + description: The pod's topology spread constraints. + description: Template for Kafka Connect Build `Pods`. The build pod is used only on Kubernetes. + buildContainer: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: Environment variables which should be applied to the container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the Kafka Connect Build container. The build container is used only on Kubernetes. + buildConfig: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata to apply to the `PodDisruptionBudgetTemplate` resource. + pullSecret: + type: string + description: Container Registry Secret with the credentials for pulling the base image. + description: Template for the Kafka Connect BuildConfig used to build new container images. The BuildConfig is used only on OpenShift. + buildServiceAccount: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for the Kafka Connect Build service account. + jmxSecret: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for Secret of the Kafka Connect Cluster JMX authentication. + description: Template for Kafka Connect and Kafka Mirror Maker 2 resources. The template allows users to specify how the `Deployment`, `Pods` and `Service` are generated. + externalConfiguration: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: Name of the environment variable which will be passed to the Kafka Connect pods. The name of the environment variable cannot start with `KAFKA_` or `STRIMZI_`. + valueFrom: + type: object + properties: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: Reference to a key in a ConfigMap. + secretKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: Reference to a key in a Secret. + description: Value of the environment variable which will be passed to the Kafka Connect pods. It can be passed either as a reference to Secret or ConfigMap field. The field has to specify exactly one Secret or ConfigMap. + required: + - name + - valueFrom + description: Makes data from a Secret or ConfigMap available in the Kafka Connect pods as environment variables. + volumes: + type: array + items: + type: object + properties: + configMap: + type: object + properties: + defaultMode: + type: integer + items: + type: array + items: + type: object + properties: + key: + type: string + mode: + type: integer + path: + type: string + name: + type: string + optional: + type: boolean + description: Reference to a key in a ConfigMap. Exactly one Secret or ConfigMap has to be specified. + name: + type: string + description: Name of the volume which will be added to the Kafka Connect pods. + secret: + type: object + properties: + defaultMode: + type: integer + items: + type: array + items: + type: object + properties: + key: + type: string + mode: + type: integer + path: + type: string + optional: + type: boolean + secretName: + type: string + description: Reference to a key in a Secret. Exactly one Secret or ConfigMap has to be specified. + required: + - name + description: Makes data from a Secret or ConfigMap available in the Kafka Connect pods as volumes. + description: Pass data from Secrets or ConfigMaps to the Kafka Connect pods and use them to configure connectors. + build: + type: object + properties: + output: + type: object + properties: + additionalKanikoOptions: + type: array + items: + type: string + description: 'Configures additional options which will be passed to the Kaniko executor when building the new Connect image. Allowed options are: --customPlatform, --insecure, --insecure-pull, --insecure-registry, --log-format, --log-timestamp, --registry-mirror, --reproducible, --single-snapshot, --skip-tls-verify, --skip-tls-verify-pull, --skip-tls-verify-registry, --verbosity, --snapshotMode, --use-new-run. These options will be used only on Kubernetes where the Kaniko executor is used. They will be ignored on OpenShift. The options are described in the link:https://github.com/GoogleContainerTools/kaniko[Kaniko GitHub repository^]. Changing this field does not trigger new build of the Kafka Connect image.' + image: + type: string + description: The name of the image which will be built. Required. + pushSecret: + type: string + description: Container Registry Secret with the credentials for pushing the newly built image. + type: + type: string + enum: + - docker + - imagestream + description: Output type. Must be either `docker` for pushing the newly build image to Docker compatible registry or `imagestream` for pushing the image to OpenShift ImageStream. Required. + required: + - image + - type + description: Configures where should the newly built image be stored. Required. + resources: + type: object + properties: + limits: + x-kubernetes-preserve-unknown-fields: true + type: object + requests: + x-kubernetes-preserve-unknown-fields: true + type: object + description: CPU and memory resources to reserve for the build. + plugins: + type: array + items: + type: object + properties: + name: + type: string + pattern: ^[a-z0-9][-_a-z0-9]*[a-z0-9]$ + description: 'The unique name of the connector plugin. Will be used to generate the path where the connector artifacts will be stored. The name has to be unique within the KafkaConnect resource. The name has to follow the following pattern: `^[a-z][-_a-z0-9]*[a-z]$`. Required.' + artifacts: + type: array + items: + type: object + properties: + artifact: + type: string + description: Maven artifact id. Applicable to the `maven` artifact type only. + fileName: + type: string + description: Name under which the artifact will be stored. + group: + type: string + description: Maven group id. Applicable to the `maven` artifact type only. + insecure: + type: boolean + description: By default, connections using TLS are verified to check they are secure. The server certificate used must be valid, trusted, and contain the server name. By setting this option to `true`, all TLS verification is disabled and the artifact will be downloaded, even when the server is considered insecure. + repository: + type: string + description: Maven repository to download the artifact from. Applicable to the `maven` artifact type only. + sha512sum: + type: string + description: 'SHA512 checksum of the artifact. Optional. If specified, the checksum will be verified while building the new container. If not specified, the downloaded artifact will not be verified. Not applicable to the `maven` artifact type. ' + type: + type: string + enum: + - jar + - tgz + - zip + - maven + - other + description: Artifact type. Currently, the supported artifact types are `tgz`, `jar`, `zip`, `other` and `maven`. + url: + type: string + pattern: ^(https?|ftp)://[-a-zA-Z0-9+&@#/%?=~_|!:,.;]*[-a-zA-Z0-9+&@#/%=~_|]$ + description: URL of the artifact which will be downloaded. Strimzi does not do any security scanning of the downloaded artifacts. For security reasons, you should first verify the artifacts manually and configure the checksum verification to make sure the same artifact is used in the automated build. Required for `jar`, `zip`, `tgz` and `other` artifacts. Not applicable to the `maven` artifact type. + version: + type: string + description: Maven version number. Applicable to the `maven` artifact type only. + required: + - type + description: List of artifacts which belong to this connector plugin. Required. + required: + - name + - artifacts + description: List of connector plugins which should be added to the Kafka Connect. Required. + required: + - output + - plugins + description: Configures how the Connect container image should be built. Optional. + clientRackInitImage: + type: string + description: The image of the init container used for initializing the `client.rack`. + metricsConfig: + type: object + properties: + type: + type: string + enum: + - jmxPrometheusExporter + description: Metrics type. Only 'jmxPrometheusExporter' supported currently. + valueFrom: + type: object + properties: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: Reference to the key in the ConfigMap containing the configuration. + description: ConfigMap entry where the Prometheus JMX Exporter configuration is stored. For details of the structure of this configuration, see the {JMXExporter}. + required: + - type + - valueFrom + description: Metrics configuration. + rack: + type: object + properties: + topologyKey: + type: string + example: topology.kubernetes.io/zone + description: A key that matches labels assigned to the Kubernetes cluster nodes. The value of the label is used to set the broker's `broker.rack` config and `client.rack` in Kafka Connect. + required: + - topologyKey + description: Configuration of the node label which will be used as the client.rack consumer configuration. + required: + - bootstrapServers + description: The specification of the Kafka Connect cluster. + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: The unique identifier of a condition, used to distinguish between other conditions in the resource. + status: + type: string + description: The status of the condition, either True, False or Unknown. + lastTransitionTime: + type: string + description: Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. + reason: + type: string + description: The reason for the condition's last transition (a single word in CamelCase). + message: + type: string + description: Human-readable message indicating details about the condition's last transition. + description: List of status conditions. + observedGeneration: + type: integer + description: The generation of the CRD that was last reconciled by the operator. + url: + type: string + description: The URL of the REST API endpoint for managing and monitoring Kafka Connect connectors. + connectorPlugins: + type: array + items: + type: object + properties: + type: + type: string + description: The type of the connector plugin. The available types are `sink` and `source`. + version: + type: string + description: The version of the connector plugin. + class: + type: string + description: The class of the connector plugin. + description: The list of connector plugins available in this Kafka Connect deployment. + labelSelector: + type: string + description: Label selector for pods providing this resource. + replicas: + type: integer + description: The current number of pods being used to provide this resource. + description: The status of the Kafka Connect cluster. diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/042-Crd-strimzipodset.yaml b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/042-Crd-strimzipodset.yaml new file mode 100644 index 000000000..0b9c04b46 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/042-Crd-strimzipodset.yaml @@ -0,0 +1,119 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: strimzipodsets.core.strimzi.io + labels: + app: strimzi + strimzi.io/crd-install: "true" + component: stirmzipodsets.core.strimzi.io-crd +spec: + group: core.strimzi.io + names: + kind: StrimziPodSet + listKind: StrimziPodSetList + singular: strimzipodset + plural: strimzipodsets + shortNames: + - sps + categories: + - strimzi + scope: Namespaced + conversion: + strategy: None + versions: + - name: v1beta2 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - name: Pods + description: Number of pods managed by the StrimziPodSet + jsonPath: .status.pods + type: integer + - name: Ready Pods + description: Number of ready pods managed by the StrimziPodSet + jsonPath: .status.readyPods + type: integer + - name: Current Pods + description: Number of up-to-date pods managed by the StrimziPodSet + jsonPath: .status.currentPods + type: integer + - name: Age + description: Age of the StrimziPodSet + jsonPath: .metadata.creationTimestamp + type: date + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + selector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Selector is a label query which matches all the pods managed by this `StrimziPodSet`. Only `matchLabels` is supported. If `matchExpressions` is set, it will be ignored. + pods: + type: array + items: + x-kubernetes-preserve-unknown-fields: true + type: object + description: The Pods managed by this StrimziPodSet. + required: + - selector + - pods + description: The specification of the StrimziPodSet. + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: The unique identifier of a condition, used to distinguish between other conditions in the resource. + status: + type: string + description: The status of the condition, either True, False or Unknown. + lastTransitionTime: + type: string + description: Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. + reason: + type: string + description: The reason for the condition's last transition (a single word in CamelCase). + message: + type: string + description: Human-readable message indicating details about the condition's last transition. + description: List of status conditions. + observedGeneration: + type: integer + description: The generation of the CRD that was last reconciled by the operator. + pods: + type: integer + description: Number of pods managed by the StrimziPodSet controller. + readyPods: + type: integer + description: Number of pods managed by the StrimziPodSet controller that are ready. + currentPods: + type: integer + description: Number of pods managed by the StrimziPodSet controller that have the current revision. + description: The status of the StrimziPodSet. diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/043-Crd-kafkatopic.yaml b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/043-Crd-kafkatopic.yaml new file mode 100644 index 000000000..ba1aa9327 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/043-Crd-kafkatopic.yaml @@ -0,0 +1,254 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: kafkatopics.kafka.strimzi.io + labels: + app: strimzi + strimzi.io/crd-install: "true" + component: kafkatopics.kafka.strimzi.io-crd +spec: + group: kafka.strimzi.io + names: + kind: KafkaTopic + listKind: KafkaTopicList + singular: kafkatopic + plural: kafkatopics + shortNames: + - kt + categories: + - strimzi + scope: Namespaced + conversion: + strategy: None + versions: + - name: v1beta2 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - name: Cluster + description: The name of the Kafka cluster this topic belongs to + jsonPath: .metadata.labels.strimzi\.io/cluster + type: string + - name: Partitions + description: The desired number of partitions in the topic + jsonPath: .spec.partitions + type: integer + - name: Replication factor + description: The desired number of replicas of each partition + jsonPath: .spec.replicas + type: integer + - name: Ready + description: The state of the custom resource + jsonPath: .status.conditions[?(@.type=="Ready")].status + type: string + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + partitions: + type: integer + minimum: 1 + description: The number of partitions the topic should have. This cannot be decreased after topic creation. It can be increased after topic creation, but it is important to understand the consequences that has, especially for topics with semantic partitioning. When absent this will default to the broker configuration for `num.partitions`. + replicas: + type: integer + minimum: 1 + maximum: 32767 + description: The number of replicas the topic should have. When absent this will default to the broker configuration for `default.replication.factor`. + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: The topic configuration. + topicName: + type: string + description: The name of the topic. When absent this will default to the metadata.name of the topic. It is recommended to not set this unless the topic name is not a valid Kubernetes resource name. + description: The specification of the topic. + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: The unique identifier of a condition, used to distinguish between other conditions in the resource. + status: + type: string + description: The status of the condition, either True, False or Unknown. + lastTransitionTime: + type: string + description: Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. + reason: + type: string + description: The reason for the condition's last transition (a single word in CamelCase). + message: + type: string + description: Human-readable message indicating details about the condition's last transition. + description: List of status conditions. + observedGeneration: + type: integer + description: The generation of the CRD that was last reconciled by the operator. + topicName: + type: string + description: Topic name. + description: The status of the topic. + - name: v1beta1 + served: true + storage: false + subresources: + status: {} + additionalPrinterColumns: + - name: Cluster + description: The name of the Kafka cluster this topic belongs to + jsonPath: .metadata.labels.strimzi\.io/cluster + type: string + - name: Partitions + description: The desired number of partitions in the topic + jsonPath: .spec.partitions + type: integer + - name: Replication factor + description: The desired number of replicas of each partition + jsonPath: .spec.replicas + type: integer + - name: Ready + description: The state of the custom resource + jsonPath: .status.conditions[?(@.type=="Ready")].status + type: string + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + partitions: + type: integer + minimum: 1 + description: The number of partitions the topic should have. This cannot be decreased after topic creation. It can be increased after topic creation, but it is important to understand the consequences that has, especially for topics with semantic partitioning. When absent this will default to the broker configuration for `num.partitions`. + replicas: + type: integer + minimum: 1 + maximum: 32767 + description: The number of replicas the topic should have. When absent this will default to the broker configuration for `default.replication.factor`. + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: The topic configuration. + topicName: + type: string + description: The name of the topic. When absent this will default to the metadata.name of the topic. It is recommended to not set this unless the topic name is not a valid Kubernetes resource name. + description: The specification of the topic. + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: The unique identifier of a condition, used to distinguish between other conditions in the resource. + status: + type: string + description: The status of the condition, either True, False or Unknown. + lastTransitionTime: + type: string + description: Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. + reason: + type: string + description: The reason for the condition's last transition (a single word in CamelCase). + message: + type: string + description: Human-readable message indicating details about the condition's last transition. + description: List of status conditions. + observedGeneration: + type: integer + description: The generation of the CRD that was last reconciled by the operator. + topicName: + type: string + description: Topic name. + description: The status of the topic. + - name: v1alpha1 + served: true + storage: false + subresources: + status: {} + additionalPrinterColumns: + - name: Cluster + description: The name of the Kafka cluster this topic belongs to + jsonPath: .metadata.labels.strimzi\.io/cluster + type: string + - name: Partitions + description: The desired number of partitions in the topic + jsonPath: .spec.partitions + type: integer + - name: Replication factor + description: The desired number of replicas of each partition + jsonPath: .spec.replicas + type: integer + - name: Ready + description: The state of the custom resource + jsonPath: .status.conditions[?(@.type=="Ready")].status + type: string + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + partitions: + type: integer + minimum: 1 + description: The number of partitions the topic should have. This cannot be decreased after topic creation. It can be increased after topic creation, but it is important to understand the consequences that has, especially for topics with semantic partitioning. When absent this will default to the broker configuration for `num.partitions`. + replicas: + type: integer + minimum: 1 + maximum: 32767 + description: The number of replicas the topic should have. When absent this will default to the broker configuration for `default.replication.factor`. + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: The topic configuration. + topicName: + type: string + description: The name of the topic. When absent this will default to the metadata.name of the topic. It is recommended to not set this unless the topic name is not a valid Kubernetes resource name. + description: The specification of the topic. + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: The unique identifier of a condition, used to distinguish between other conditions in the resource. + status: + type: string + description: The status of the condition, either True, False or Unknown. + lastTransitionTime: + type: string + description: Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. + reason: + type: string + description: The reason for the condition's last transition (a single word in CamelCase). + message: + type: string + description: Human-readable message indicating details about the condition's last transition. + description: List of status conditions. + observedGeneration: + type: integer + description: The generation of the CRD that was last reconciled by the operator. + topicName: + type: string + description: Topic name. + description: The status of the topic. diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/044-Crd-kafkauser.yaml b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/044-Crd-kafkauser.yaml new file mode 100644 index 000000000..8e34f0b82 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/044-Crd-kafkauser.yaml @@ -0,0 +1,638 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: kafkausers.kafka.strimzi.io + labels: + app: strimzi + strimzi.io/crd-install: "true" + component: kafkausers.kafka.strimzi.io-crd +spec: + group: kafka.strimzi.io + names: + kind: KafkaUser + listKind: KafkaUserList + singular: kafkauser + plural: kafkausers + shortNames: + - ku + categories: + - strimzi + scope: Namespaced + conversion: + strategy: None + versions: + - name: v1beta2 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - name: Cluster + description: The name of the Kafka cluster this user belongs to + jsonPath: .metadata.labels.strimzi\.io/cluster + type: string + - name: Authentication + description: How the user is authenticated + jsonPath: .spec.authentication.type + type: string + - name: Authorization + description: How the user is authorised + jsonPath: .spec.authorization.type + type: string + - name: Ready + description: The state of the custom resource + jsonPath: .status.conditions[?(@.type=="Ready")].status + type: string + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + authentication: + type: object + properties: + password: + type: object + properties: + valueFrom: + type: object + properties: + secretKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: Selects a key of a Secret in the resource's namespace. + description: Secret from which the password should be read. + required: + - valueFrom + description: Specify the password for the user. If not set, a new password is generated by the User Operator. + type: + type: string + enum: + - tls + - tls-external + - scram-sha-512 + description: Authentication type. + required: + - type + description: "Authentication mechanism enabled for this Kafka user. The supported authentication mechanisms are `scram-sha-512`, `tls`, and `tls-external`. \n\n* `scram-sha-512` generates a secret with SASL SCRAM-SHA-512 credentials.\n* `tls` generates a secret with user certificate for mutual TLS authentication.\n* `tls-external` does not generate a user certificate. But prepares the user for using mutual TLS authentication using a user certificate generated outside the User Operator.\n ACLs and quotas set for this user are configured in the `CN=` format.\n\nAuthentication is optional. If authentication is not configured, no credentials are generated. ACLs and quotas set for the user are configured in the `` format suitable for SASL authentication." + authorization: + type: object + properties: + acls: + type: array + items: + type: object + properties: + host: + type: string + description: The host from which the action described in the ACL rule is allowed or denied. + operation: + type: string + enum: + - Read + - Write + - Create + - Delete + - Alter + - Describe + - ClusterAction + - AlterConfigs + - DescribeConfigs + - IdempotentWrite + - All + description: 'Operation which will be allowed or denied. Supported operations are: Read, Write, Create, Delete, Alter, Describe, ClusterAction, AlterConfigs, DescribeConfigs, IdempotentWrite and All.' + resource: + type: object + properties: + name: + type: string + description: Name of resource for which given ACL rule applies. Can be combined with `patternType` field to use prefix pattern. + patternType: + type: string + enum: + - literal + - prefix + description: Describes the pattern used in the resource field. The supported types are `literal` and `prefix`. With `literal` pattern type, the resource field will be used as a definition of a full name. With `prefix` pattern type, the resource name will be used only as a prefix. Default value is `literal`. + type: + type: string + enum: + - topic + - group + - cluster + - transactionalId + description: Resource type. The available resource types are `topic`, `group`, `cluster`, and `transactionalId`. + required: + - type + description: Indicates the resource for which given ACL rule applies. + type: + type: string + enum: + - allow + - deny + description: The type of the rule. Currently the only supported type is `allow`. ACL rules with type `allow` are used to allow user to execute the specified operations. Default value is `allow`. + required: + - operation + - resource + description: List of ACL rules which should be applied to this user. + type: + type: string + enum: + - simple + description: Authorization type. Currently the only supported type is `simple`. `simple` authorization type uses Kafka's `kafka.security.authorizer.AclAuthorizer` class for authorization. + required: + - acls + - type + description: Authorization rules for this Kafka user. + quotas: + type: object + properties: + consumerByteRate: + type: integer + minimum: 0 + description: A quota on the maximum bytes per-second that each client group can fetch from a broker before the clients in the group are throttled. Defined on a per-broker basis. + controllerMutationRate: + type: number + minimum: 0 + description: A quota on the rate at which mutations are accepted for the create topics request, the create partitions request and the delete topics request. The rate is accumulated by the number of partitions created or deleted. + producerByteRate: + type: integer + minimum: 0 + description: A quota on the maximum bytes per-second that each client group can publish to a broker before the clients in the group are throttled. Defined on a per-broker basis. + requestPercentage: + type: integer + minimum: 0 + description: A quota on the maximum CPU utilization of each client group as a percentage of network and I/O threads. + description: Quotas on requests to control the broker resources used by clients. Network bandwidth and request rate quotas can be enforced.Kafka documentation for Kafka User quotas can be found at http://kafka.apache.org/documentation/#design_quotas. + template: + type: object + properties: + secret: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for KafkaUser resources. The template allows users to specify how the `Secret` with password or TLS certificates is generated. + description: Template to specify how Kafka User `Secrets` are generated. + description: The specification of the user. + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: The unique identifier of a condition, used to distinguish between other conditions in the resource. + status: + type: string + description: The status of the condition, either True, False or Unknown. + lastTransitionTime: + type: string + description: Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. + reason: + type: string + description: The reason for the condition's last transition (a single word in CamelCase). + message: + type: string + description: Human-readable message indicating details about the condition's last transition. + description: List of status conditions. + observedGeneration: + type: integer + description: The generation of the CRD that was last reconciled by the operator. + username: + type: string + description: Username. + secret: + type: string + description: The name of `Secret` where the credentials are stored. + description: The status of the Kafka User. + - name: v1beta1 + served: true + storage: false + subresources: + status: {} + additionalPrinterColumns: + - name: Cluster + description: The name of the Kafka cluster this user belongs to + jsonPath: .metadata.labels.strimzi\.io/cluster + type: string + - name: Authentication + description: How the user is authenticated + jsonPath: .spec.authentication.type + type: string + - name: Authorization + description: How the user is authorised + jsonPath: .spec.authorization.type + type: string + - name: Ready + description: The state of the custom resource + jsonPath: .status.conditions[?(@.type=="Ready")].status + type: string + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + authentication: + type: object + properties: + password: + type: object + properties: + valueFrom: + type: object + properties: + secretKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: Selects a key of a Secret in the resource's namespace. + description: Secret from which the password should be read. + required: + - valueFrom + description: Specify the password for the user. If not set, a new password is generated by the User Operator. + type: + type: string + enum: + - tls + - tls-external + - scram-sha-512 + description: Authentication type. + required: + - type + description: "Authentication mechanism enabled for this Kafka user. The supported authentication mechanisms are `scram-sha-512`, `tls`, and `tls-external`. \n\n* `scram-sha-512` generates a secret with SASL SCRAM-SHA-512 credentials.\n* `tls` generates a secret with user certificate for mutual TLS authentication.\n* `tls-external` does not generate a user certificate. But prepares the user for using mutual TLS authentication using a user certificate generated outside the User Operator.\n ACLs and quotas set for this user are configured in the `CN=` format.\n\nAuthentication is optional. If authentication is not configured, no credentials are generated. ACLs and quotas set for the user are configured in the `` format suitable for SASL authentication." + authorization: + type: object + properties: + acls: + type: array + items: + type: object + properties: + host: + type: string + description: The host from which the action described in the ACL rule is allowed or denied. + operation: + type: string + enum: + - Read + - Write + - Create + - Delete + - Alter + - Describe + - ClusterAction + - AlterConfigs + - DescribeConfigs + - IdempotentWrite + - All + description: 'Operation which will be allowed or denied. Supported operations are: Read, Write, Create, Delete, Alter, Describe, ClusterAction, AlterConfigs, DescribeConfigs, IdempotentWrite and All.' + resource: + type: object + properties: + name: + type: string + description: Name of resource for which given ACL rule applies. Can be combined with `patternType` field to use prefix pattern. + patternType: + type: string + enum: + - literal + - prefix + description: Describes the pattern used in the resource field. The supported types are `literal` and `prefix`. With `literal` pattern type, the resource field will be used as a definition of a full name. With `prefix` pattern type, the resource name will be used only as a prefix. Default value is `literal`. + type: + type: string + enum: + - topic + - group + - cluster + - transactionalId + description: Resource type. The available resource types are `topic`, `group`, `cluster`, and `transactionalId`. + required: + - type + description: Indicates the resource for which given ACL rule applies. + type: + type: string + enum: + - allow + - deny + description: The type of the rule. Currently the only supported type is `allow`. ACL rules with type `allow` are used to allow user to execute the specified operations. Default value is `allow`. + required: + - operation + - resource + description: List of ACL rules which should be applied to this user. + type: + type: string + enum: + - simple + description: Authorization type. Currently the only supported type is `simple`. `simple` authorization type uses Kafka's `kafka.security.authorizer.AclAuthorizer` class for authorization. + required: + - acls + - type + description: Authorization rules for this Kafka user. + quotas: + type: object + properties: + consumerByteRate: + type: integer + minimum: 0 + description: A quota on the maximum bytes per-second that each client group can fetch from a broker before the clients in the group are throttled. Defined on a per-broker basis. + controllerMutationRate: + type: number + minimum: 0 + description: A quota on the rate at which mutations are accepted for the create topics request, the create partitions request and the delete topics request. The rate is accumulated by the number of partitions created or deleted. + producerByteRate: + type: integer + minimum: 0 + description: A quota on the maximum bytes per-second that each client group can publish to a broker before the clients in the group are throttled. Defined on a per-broker basis. + requestPercentage: + type: integer + minimum: 0 + description: A quota on the maximum CPU utilization of each client group as a percentage of network and I/O threads. + description: Quotas on requests to control the broker resources used by clients. Network bandwidth and request rate quotas can be enforced.Kafka documentation for Kafka User quotas can be found at http://kafka.apache.org/documentation/#design_quotas. + template: + type: object + properties: + secret: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for KafkaUser resources. The template allows users to specify how the `Secret` with password or TLS certificates is generated. + description: Template to specify how Kafka User `Secrets` are generated. + description: The specification of the user. + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: The unique identifier of a condition, used to distinguish between other conditions in the resource. + status: + type: string + description: The status of the condition, either True, False or Unknown. + lastTransitionTime: + type: string + description: Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. + reason: + type: string + description: The reason for the condition's last transition (a single word in CamelCase). + message: + type: string + description: Human-readable message indicating details about the condition's last transition. + description: List of status conditions. + observedGeneration: + type: integer + description: The generation of the CRD that was last reconciled by the operator. + username: + type: string + description: Username. + secret: + type: string + description: The name of `Secret` where the credentials are stored. + description: The status of the Kafka User. + - name: v1alpha1 + served: true + storage: false + subresources: + status: {} + additionalPrinterColumns: + - name: Cluster + description: The name of the Kafka cluster this user belongs to + jsonPath: .metadata.labels.strimzi\.io/cluster + type: string + - name: Authentication + description: How the user is authenticated + jsonPath: .spec.authentication.type + type: string + - name: Authorization + description: How the user is authorised + jsonPath: .spec.authorization.type + type: string + - name: Ready + description: The state of the custom resource + jsonPath: .status.conditions[?(@.type=="Ready")].status + type: string + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + authentication: + type: object + properties: + password: + type: object + properties: + valueFrom: + type: object + properties: + secretKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: Selects a key of a Secret in the resource's namespace. + description: Secret from which the password should be read. + required: + - valueFrom + description: Specify the password for the user. If not set, a new password is generated by the User Operator. + type: + type: string + enum: + - tls + - tls-external + - scram-sha-512 + description: Authentication type. + required: + - type + description: "Authentication mechanism enabled for this Kafka user. The supported authentication mechanisms are `scram-sha-512`, `tls`, and `tls-external`. \n\n* `scram-sha-512` generates a secret with SASL SCRAM-SHA-512 credentials.\n* `tls` generates a secret with user certificate for mutual TLS authentication.\n* `tls-external` does not generate a user certificate. But prepares the user for using mutual TLS authentication using a user certificate generated outside the User Operator.\n ACLs and quotas set for this user are configured in the `CN=` format.\n\nAuthentication is optional. If authentication is not configured, no credentials are generated. ACLs and quotas set for the user are configured in the `` format suitable for SASL authentication." + authorization: + type: object + properties: + acls: + type: array + items: + type: object + properties: + host: + type: string + description: The host from which the action described in the ACL rule is allowed or denied. + operation: + type: string + enum: + - Read + - Write + - Create + - Delete + - Alter + - Describe + - ClusterAction + - AlterConfigs + - DescribeConfigs + - IdempotentWrite + - All + description: 'Operation which will be allowed or denied. Supported operations are: Read, Write, Create, Delete, Alter, Describe, ClusterAction, AlterConfigs, DescribeConfigs, IdempotentWrite and All.' + resource: + type: object + properties: + name: + type: string + description: Name of resource for which given ACL rule applies. Can be combined with `patternType` field to use prefix pattern. + patternType: + type: string + enum: + - literal + - prefix + description: Describes the pattern used in the resource field. The supported types are `literal` and `prefix`. With `literal` pattern type, the resource field will be used as a definition of a full name. With `prefix` pattern type, the resource name will be used only as a prefix. Default value is `literal`. + type: + type: string + enum: + - topic + - group + - cluster + - transactionalId + description: Resource type. The available resource types are `topic`, `group`, `cluster`, and `transactionalId`. + required: + - type + description: Indicates the resource for which given ACL rule applies. + type: + type: string + enum: + - allow + - deny + description: The type of the rule. Currently the only supported type is `allow`. ACL rules with type `allow` are used to allow user to execute the specified operations. Default value is `allow`. + required: + - operation + - resource + description: List of ACL rules which should be applied to this user. + type: + type: string + enum: + - simple + description: Authorization type. Currently the only supported type is `simple`. `simple` authorization type uses Kafka's `kafka.security.authorizer.AclAuthorizer` class for authorization. + required: + - acls + - type + description: Authorization rules for this Kafka user. + quotas: + type: object + properties: + consumerByteRate: + type: integer + minimum: 0 + description: A quota on the maximum bytes per-second that each client group can fetch from a broker before the clients in the group are throttled. Defined on a per-broker basis. + controllerMutationRate: + type: number + minimum: 0 + description: A quota on the rate at which mutations are accepted for the create topics request, the create partitions request and the delete topics request. The rate is accumulated by the number of partitions created or deleted. + producerByteRate: + type: integer + minimum: 0 + description: A quota on the maximum bytes per-second that each client group can publish to a broker before the clients in the group are throttled. Defined on a per-broker basis. + requestPercentage: + type: integer + minimum: 0 + description: A quota on the maximum CPU utilization of each client group as a percentage of network and I/O threads. + description: Quotas on requests to control the broker resources used by clients. Network bandwidth and request rate quotas can be enforced.Kafka documentation for Kafka User quotas can be found at http://kafka.apache.org/documentation/#design_quotas. + template: + type: object + properties: + secret: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for KafkaUser resources. The template allows users to specify how the `Secret` with password or TLS certificates is generated. + description: Template to specify how Kafka User `Secrets` are generated. + description: The specification of the user. + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: The unique identifier of a condition, used to distinguish between other conditions in the resource. + status: + type: string + description: The status of the condition, either True, False or Unknown. + lastTransitionTime: + type: string + description: Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. + reason: + type: string + description: The reason for the condition's last transition (a single word in CamelCase). + message: + type: string + description: Human-readable message indicating details about the condition's last transition. + description: List of status conditions. + observedGeneration: + type: integer + description: The generation of the CRD that was last reconciled by the operator. + username: + type: string + description: Username. + secret: + type: string + description: The name of `Secret` where the credentials are stored. + description: The status of the Kafka User. diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/045-Crd-kafkamirrormaker.yaml b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/045-Crd-kafkamirrormaker.yaml new file mode 100644 index 000000000..3d878db70 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/045-Crd-kafkamirrormaker.yaml @@ -0,0 +1,1183 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: kafkamirrormakers.kafka.strimzi.io + labels: + app: strimzi + strimzi.io/crd-install: "true" + component: kafkamirrormakers.kafka.strimzi.io-crd +spec: + group: kafka.strimzi.io + names: + kind: KafkaMirrorMaker + listKind: KafkaMirrorMakerList + singular: kafkamirrormaker + plural: kafkamirrormakers + shortNames: + - kmm + categories: + - strimzi + scope: Namespaced + conversion: + strategy: None + versions: + - name: v1beta2 + served: true + storage: true + subresources: + status: {} + scale: + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + labelSelectorPath: .status.labelSelector + additionalPrinterColumns: + - name: Desired replicas + description: The desired number of Kafka MirrorMaker replicas + jsonPath: .spec.replicas + type: integer + - name: Consumer Bootstrap Servers + description: The boostrap servers for the consumer + jsonPath: .spec.consumer.bootstrapServers + type: string + priority: 1 + - name: Producer Bootstrap Servers + description: The boostrap servers for the producer + jsonPath: .spec.producer.bootstrapServers + type: string + priority: 1 + - name: Ready + description: The state of the custom resource + jsonPath: .status.conditions[?(@.type=="Ready")].status + type: string + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + version: + type: string + description: The Kafka MirrorMaker version. Defaults to {DefaultKafkaVersion}. Consult the documentation to understand the process required to upgrade or downgrade the version. + replicas: + type: integer + minimum: 0 + description: The number of pods in the `Deployment`. + image: + type: string + description: The docker image for the pods. + consumer: + type: object + properties: + numStreams: + type: integer + minimum: 1 + description: Specifies the number of consumer stream threads to create. + offsetCommitInterval: + type: integer + description: Specifies the offset auto-commit interval in ms. Default value is 60000. + bootstrapServers: + type: string + description: A list of host:port pairs for establishing the initial connection to the Kafka cluster. + groupId: + type: string + description: A unique string that identifies the consumer group this consumer belongs to. + authentication: + type: object + properties: + accessToken: + type: object + properties: + key: + type: string + description: The key under which the secret value is stored in the Kubernetes Secret. + secretName: + type: string + description: The name of the Kubernetes Secret containing the secret value. + required: + - key + - secretName + description: Link to Kubernetes Secret containing the access token which was obtained from the authorization server. + accessTokenIsJwt: + type: boolean + description: Configure whether access token should be treated as JWT. This should be set to `false` if the authorization server returns opaque tokens. Defaults to `true`. + audience: + type: string + description: OAuth audience to use when authenticating against the authorization server. Some authorization servers require the audience to be explicitly set. The possible values depend on how the authorization server is configured. By default, `audience` is not specified when performing the token endpoint request. + certificateAndKey: + type: object + properties: + certificate: + type: string + description: The name of the file certificate in the Secret. + key: + type: string + description: The name of the private key in the Secret. + secretName: + type: string + description: The name of the Secret containing the certificate. + required: + - certificate + - key + - secretName + description: Reference to the `Secret` which holds the certificate and private key pair. + clientId: + type: string + description: OAuth Client ID which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI. + clientSecret: + type: object + properties: + key: + type: string + description: The key under which the secret value is stored in the Kubernetes Secret. + secretName: + type: string + description: The name of the Kubernetes Secret containing the secret value. + required: + - key + - secretName + description: Link to Kubernetes Secret containing the OAuth client secret which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI. + disableTlsHostnameVerification: + type: boolean + description: Enable or disable TLS hostname verification. Default value is `false`. + maxTokenExpirySeconds: + type: integer + description: Set or limit time-to-live of the access tokens to the specified number of seconds. This should be set if the authorization server returns opaque tokens. + passwordSecret: + type: object + properties: + password: + type: string + description: The name of the key in the Secret under which the password is stored. + secretName: + type: string + description: The name of the Secret containing the password. + required: + - password + - secretName + description: Reference to the `Secret` which holds the password. + refreshToken: + type: object + properties: + key: + type: string + description: The key under which the secret value is stored in the Kubernetes Secret. + secretName: + type: string + description: The name of the Kubernetes Secret containing the secret value. + required: + - key + - secretName + description: Link to Kubernetes Secret containing the refresh token which can be used to obtain access token from the authorization server. + scope: + type: string + description: OAuth scope to use when authenticating against the authorization server. Some authorization servers require this to be set. The possible values depend on how authorization server is configured. By default `scope` is not specified when doing the token endpoint request. + tlsTrustedCertificates: + type: array + items: + type: object + properties: + certificate: + type: string + description: The name of the file certificate in the Secret. + secretName: + type: string + description: The name of the Secret containing the certificate. + required: + - certificate + - secretName + description: Trusted certificates for TLS connection to the OAuth server. + tokenEndpointUri: + type: string + description: Authorization server token endpoint URI. + type: + type: string + enum: + - tls + - scram-sha-256 + - scram-sha-512 + - plain + - oauth + description: Authentication type. Currently the only supported types are `tls`, `scram-sha-256`, `scram-sha-512`, and `plain`. `scram-sha-256` and `scram-sha-512` types use SASL SCRAM-SHA-256 and SASL SCRAM-SHA-512 Authentication, respectively. `plain` type uses SASL PLAIN Authentication. `oauth` type uses SASL OAUTHBEARER Authentication. The `tls` type uses TLS Client Authentication. The `tls` type is supported only over TLS connections. + username: + type: string + description: Username used for the authentication. + required: + - type + description: Authentication configuration for connecting to the cluster. + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: 'The MirrorMaker consumer config. Properties with the following prefixes cannot be set: ssl., bootstrap.servers, group.id, sasl., security., interceptor.classes (with the exception of: ssl.endpoint.identification.algorithm, ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols).' + tls: + type: object + properties: + trustedCertificates: + type: array + items: + type: object + properties: + certificate: + type: string + description: The name of the file certificate in the Secret. + secretName: + type: string + description: The name of the Secret containing the certificate. + required: + - certificate + - secretName + description: Trusted certificates for TLS connection. + description: TLS configuration for connecting MirrorMaker to the cluster. + required: + - bootstrapServers + - groupId + description: Configuration of source cluster. + producer: + type: object + properties: + bootstrapServers: + type: string + description: A list of host:port pairs for establishing the initial connection to the Kafka cluster. + abortOnSendFailure: + type: boolean + description: Flag to set the MirrorMaker to exit on a failed send. Default value is `true`. + authentication: + type: object + properties: + accessToken: + type: object + properties: + key: + type: string + description: The key under which the secret value is stored in the Kubernetes Secret. + secretName: + type: string + description: The name of the Kubernetes Secret containing the secret value. + required: + - key + - secretName + description: Link to Kubernetes Secret containing the access token which was obtained from the authorization server. + accessTokenIsJwt: + type: boolean + description: Configure whether access token should be treated as JWT. This should be set to `false` if the authorization server returns opaque tokens. Defaults to `true`. + audience: + type: string + description: OAuth audience to use when authenticating against the authorization server. Some authorization servers require the audience to be explicitly set. The possible values depend on how the authorization server is configured. By default, `audience` is not specified when performing the token endpoint request. + certificateAndKey: + type: object + properties: + certificate: + type: string + description: The name of the file certificate in the Secret. + key: + type: string + description: The name of the private key in the Secret. + secretName: + type: string + description: The name of the Secret containing the certificate. + required: + - certificate + - key + - secretName + description: Reference to the `Secret` which holds the certificate and private key pair. + clientId: + type: string + description: OAuth Client ID which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI. + clientSecret: + type: object + properties: + key: + type: string + description: The key under which the secret value is stored in the Kubernetes Secret. + secretName: + type: string + description: The name of the Kubernetes Secret containing the secret value. + required: + - key + - secretName + description: Link to Kubernetes Secret containing the OAuth client secret which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI. + disableTlsHostnameVerification: + type: boolean + description: Enable or disable TLS hostname verification. Default value is `false`. + maxTokenExpirySeconds: + type: integer + description: Set or limit time-to-live of the access tokens to the specified number of seconds. This should be set if the authorization server returns opaque tokens. + passwordSecret: + type: object + properties: + password: + type: string + description: The name of the key in the Secret under which the password is stored. + secretName: + type: string + description: The name of the Secret containing the password. + required: + - password + - secretName + description: Reference to the `Secret` which holds the password. + refreshToken: + type: object + properties: + key: + type: string + description: The key under which the secret value is stored in the Kubernetes Secret. + secretName: + type: string + description: The name of the Kubernetes Secret containing the secret value. + required: + - key + - secretName + description: Link to Kubernetes Secret containing the refresh token which can be used to obtain access token from the authorization server. + scope: + type: string + description: OAuth scope to use when authenticating against the authorization server. Some authorization servers require this to be set. The possible values depend on how authorization server is configured. By default `scope` is not specified when doing the token endpoint request. + tlsTrustedCertificates: + type: array + items: + type: object + properties: + certificate: + type: string + description: The name of the file certificate in the Secret. + secretName: + type: string + description: The name of the Secret containing the certificate. + required: + - certificate + - secretName + description: Trusted certificates for TLS connection to the OAuth server. + tokenEndpointUri: + type: string + description: Authorization server token endpoint URI. + type: + type: string + enum: + - tls + - scram-sha-256 + - scram-sha-512 + - plain + - oauth + description: Authentication type. Currently the only supported types are `tls`, `scram-sha-256`, `scram-sha-512`, and `plain`. `scram-sha-256` and `scram-sha-512` types use SASL SCRAM-SHA-256 and SASL SCRAM-SHA-512 Authentication, respectively. `plain` type uses SASL PLAIN Authentication. `oauth` type uses SASL OAUTHBEARER Authentication. The `tls` type uses TLS Client Authentication. The `tls` type is supported only over TLS connections. + username: + type: string + description: Username used for the authentication. + required: + - type + description: Authentication configuration for connecting to the cluster. + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: 'The MirrorMaker producer config. Properties with the following prefixes cannot be set: ssl., bootstrap.servers, sasl., security., interceptor.classes (with the exception of: ssl.endpoint.identification.algorithm, ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols).' + tls: + type: object + properties: + trustedCertificates: + type: array + items: + type: object + properties: + certificate: + type: string + description: The name of the file certificate in the Secret. + secretName: + type: string + description: The name of the Secret containing the certificate. + required: + - certificate + - secretName + description: Trusted certificates for TLS connection. + description: TLS configuration for connecting MirrorMaker to the cluster. + required: + - bootstrapServers + description: Configuration of target cluster. + resources: + type: object + properties: + limits: + x-kubernetes-preserve-unknown-fields: true + type: object + requests: + x-kubernetes-preserve-unknown-fields: true + type: object + description: CPU and memory resources to reserve. + whitelist: + type: string + description: List of topics which are included for mirroring. This option allows any regular expression using Java-style regular expressions. Mirroring two topics named A and B is achieved by using the expression `A\|B`. Or, as a special case, you can mirror all topics using the regular expression `*`. You can also specify multiple regular expressions separated by commas. + include: + type: string + description: List of topics which are included for mirroring. This option allows any regular expression using Java-style regular expressions. Mirroring two topics named A and B is achieved by using the expression `A\|B`. Or, as a special case, you can mirror all topics using the regular expression `*`. You can also specify multiple regular expressions separated by commas. + jvmOptions: + type: object + properties: + "-XX": + x-kubernetes-preserve-unknown-fields: true + type: object + description: A map of -XX options to the JVM. + "-Xms": + type: string + pattern: ^[0-9]+[mMgG]?$ + description: -Xms option to to the JVM. + "-Xmx": + type: string + pattern: ^[0-9]+[mMgG]?$ + description: -Xmx option to to the JVM. + gcLoggingEnabled: + type: boolean + description: Specifies whether the Garbage Collection logging is enabled. The default is false. + javaSystemProperties: + type: array + items: + type: object + properties: + name: + type: string + description: The system property name. + value: + type: string + description: The system property value. + description: A map of additional system properties which will be passed using the `-D` option to the JVM. + description: JVM Options for pods. + logging: + type: object + properties: + loggers: + x-kubernetes-preserve-unknown-fields: true + type: object + description: A Map from logger name to logger level. + type: + type: string + enum: + - inline + - external + description: Logging type, must be either 'inline' or 'external'. + valueFrom: + type: object + properties: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: Reference to the key in the ConfigMap containing the configuration. + description: '`ConfigMap` entry where the logging configuration is stored. ' + required: + - type + description: Logging configuration for MirrorMaker. + metricsConfig: + type: object + properties: + type: + type: string + enum: + - jmxPrometheusExporter + description: Metrics type. Only 'jmxPrometheusExporter' supported currently. + valueFrom: + type: object + properties: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: Reference to the key in the ConfigMap containing the configuration. + description: ConfigMap entry where the Prometheus JMX Exporter configuration is stored. For details of the structure of this configuration, see the {JMXExporter}. + required: + - type + - valueFrom + description: Metrics configuration. + tracing: + type: object + properties: + type: + type: string + enum: + - jaeger + description: Type of the tracing used. Currently the only supported type is `jaeger` for Jaeger tracing. + required: + - type + description: The configuration of tracing in Kafka MirrorMaker. + template: + type: object + properties: + deployment: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + deploymentStrategy: + type: string + enum: + - RollingUpdate + - Recreate + description: DeploymentStrategy which will be used for this Deployment. Valid values are `RollingUpdate` and `Recreate`. Defaults to `RollingUpdate`. + description: Template for Kafka MirrorMaker `Deployment`. + pod: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + imagePullSecrets: + type: array + items: + type: object + properties: + name: + type: string + description: List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored. + securityContext: + type: object + properties: + fsGroup: + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + supplementalGroups: + type: array + items: + type: integer + sysctls: + type: array + items: + type: object + properties: + name: + type: string + value: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Configures pod-level security attributes and common container settings. + terminationGracePeriodSeconds: + type: integer + minimum: 0 + description: The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds. + affinity: + type: object + properties: + nodeAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + preference: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: object + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + podAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + podAntiAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + description: The pod's affinity rules. + tolerations: + type: array + items: + type: object + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + type: integer + value: + type: string + description: The pod's tolerations. + priorityClassName: + type: string + description: The name of the priority class used to assign priority to the pods. For more information about priority classes, see {K8sPriorityClass}. + schedulerName: + type: string + description: The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used. + hostAliases: + type: array + items: + type: object + properties: + hostnames: + type: array + items: + type: string + ip: + type: string + description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified. + tmpDirSizeLimit: + type: string + pattern: ^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ + description: Defines the total amount (for example `1Gi`) of local storage required for temporary EmptyDir volume (`/tmp`). Default value is `1Mi`. + enableServiceLinks: + type: boolean + description: Indicates whether information about services should be injected into Pod's environment variables. + topologySpreadConstraints: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + maxSkew: + type: integer + topologyKey: + type: string + whenUnsatisfiable: + type: string + description: The pod's topology spread constraints. + description: Template for Kafka MirrorMaker `Pods`. + podDisruptionBudget: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata to apply to the `PodDisruptionBudgetTemplate` resource. + maxUnavailable: + type: integer + minimum: 0 + description: Maximum number of unavailable pods to allow automatic Pod eviction. A Pod eviction is allowed when the `maxUnavailable` number of pods or fewer are unavailable after the eviction. Setting this value to 0 prevents all voluntary evictions, so the pods must be evicted manually. Defaults to 1. + description: Template for Kafka MirrorMaker `PodDisruptionBudget`. + mirrorMakerContainer: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: Environment variables which should be applied to the container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for Kafka MirrorMaker container. + serviceAccount: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for the Kafka MirrorMaker service account. + description: Template to specify how Kafka MirrorMaker resources, `Deployments` and `Pods`, are generated. + livenessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1. + description: Pod liveness checking. + readinessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1. + description: Pod readiness checking. + oneOf: + - properties: + include: {} + required: + - include + - properties: + whitelist: {} + required: + - whitelist + required: + - replicas + - consumer + - producer + description: The specification of Kafka MirrorMaker. + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: The unique identifier of a condition, used to distinguish between other conditions in the resource. + status: + type: string + description: The status of the condition, either True, False or Unknown. + lastTransitionTime: + type: string + description: Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. + reason: + type: string + description: The reason for the condition's last transition (a single word in CamelCase). + message: + type: string + description: Human-readable message indicating details about the condition's last transition. + description: List of status conditions. + observedGeneration: + type: integer + description: The generation of the CRD that was last reconciled by the operator. + labelSelector: + type: string + description: Label selector for pods providing this resource. + replicas: + type: integer + description: The current number of pods being used to provide this resource. + description: The status of Kafka MirrorMaker. diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/046-Crd-kafkabridge.yaml b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/046-Crd-kafkabridge.yaml new file mode 100644 index 000000000..e0374e3e7 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/046-Crd-kafkabridge.yaml @@ -0,0 +1,1039 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: kafkabridges.kafka.strimzi.io + labels: + app: strimzi + strimzi.io/crd-install: "true" + component: kafkabridges.kafka.strimzi.io-crd +spec: + group: kafka.strimzi.io + names: + kind: KafkaBridge + listKind: KafkaBridgeList + singular: kafkabridge + plural: kafkabridges + shortNames: + - kb + categories: + - strimzi + scope: Namespaced + conversion: + strategy: None + versions: + - name: v1beta2 + served: true + storage: true + subresources: + status: {} + scale: + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + labelSelectorPath: .status.labelSelector + additionalPrinterColumns: + - name: Desired replicas + description: The desired number of Kafka Bridge replicas + jsonPath: .spec.replicas + type: integer + - name: Bootstrap Servers + description: The boostrap servers + jsonPath: .spec.bootstrapServers + type: string + priority: 1 + - name: Ready + description: The state of the custom resource + jsonPath: .status.conditions[?(@.type=="Ready")].status + type: string + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + replicas: + type: integer + minimum: 0 + description: The number of pods in the `Deployment`. + image: + type: string + description: The docker image for the pods. + bootstrapServers: + type: string + description: A list of host:port pairs for establishing the initial connection to the Kafka cluster. + tls: + type: object + properties: + trustedCertificates: + type: array + items: + type: object + properties: + certificate: + type: string + description: The name of the file certificate in the Secret. + secretName: + type: string + description: The name of the Secret containing the certificate. + required: + - certificate + - secretName + description: Trusted certificates for TLS connection. + description: TLS configuration for connecting Kafka Bridge to the cluster. + authentication: + type: object + properties: + accessToken: + type: object + properties: + key: + type: string + description: The key under which the secret value is stored in the Kubernetes Secret. + secretName: + type: string + description: The name of the Kubernetes Secret containing the secret value. + required: + - key + - secretName + description: Link to Kubernetes Secret containing the access token which was obtained from the authorization server. + accessTokenIsJwt: + type: boolean + description: Configure whether access token should be treated as JWT. This should be set to `false` if the authorization server returns opaque tokens. Defaults to `true`. + audience: + type: string + description: OAuth audience to use when authenticating against the authorization server. Some authorization servers require the audience to be explicitly set. The possible values depend on how the authorization server is configured. By default, `audience` is not specified when performing the token endpoint request. + certificateAndKey: + type: object + properties: + certificate: + type: string + description: The name of the file certificate in the Secret. + key: + type: string + description: The name of the private key in the Secret. + secretName: + type: string + description: The name of the Secret containing the certificate. + required: + - certificate + - key + - secretName + description: Reference to the `Secret` which holds the certificate and private key pair. + clientId: + type: string + description: OAuth Client ID which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI. + clientSecret: + type: object + properties: + key: + type: string + description: The key under which the secret value is stored in the Kubernetes Secret. + secretName: + type: string + description: The name of the Kubernetes Secret containing the secret value. + required: + - key + - secretName + description: Link to Kubernetes Secret containing the OAuth client secret which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI. + disableTlsHostnameVerification: + type: boolean + description: Enable or disable TLS hostname verification. Default value is `false`. + maxTokenExpirySeconds: + type: integer + description: Set or limit time-to-live of the access tokens to the specified number of seconds. This should be set if the authorization server returns opaque tokens. + passwordSecret: + type: object + properties: + password: + type: string + description: The name of the key in the Secret under which the password is stored. + secretName: + type: string + description: The name of the Secret containing the password. + required: + - password + - secretName + description: Reference to the `Secret` which holds the password. + refreshToken: + type: object + properties: + key: + type: string + description: The key under which the secret value is stored in the Kubernetes Secret. + secretName: + type: string + description: The name of the Kubernetes Secret containing the secret value. + required: + - key + - secretName + description: Link to Kubernetes Secret containing the refresh token which can be used to obtain access token from the authorization server. + scope: + type: string + description: OAuth scope to use when authenticating against the authorization server. Some authorization servers require this to be set. The possible values depend on how authorization server is configured. By default `scope` is not specified when doing the token endpoint request. + tlsTrustedCertificates: + type: array + items: + type: object + properties: + certificate: + type: string + description: The name of the file certificate in the Secret. + secretName: + type: string + description: The name of the Secret containing the certificate. + required: + - certificate + - secretName + description: Trusted certificates for TLS connection to the OAuth server. + tokenEndpointUri: + type: string + description: Authorization server token endpoint URI. + type: + type: string + enum: + - tls + - scram-sha-256 + - scram-sha-512 + - plain + - oauth + description: Authentication type. Currently the only supported types are `tls`, `scram-sha-256`, `scram-sha-512`, and `plain`. `scram-sha-256` and `scram-sha-512` types use SASL SCRAM-SHA-256 and SASL SCRAM-SHA-512 Authentication, respectively. `plain` type uses SASL PLAIN Authentication. `oauth` type uses SASL OAUTHBEARER Authentication. The `tls` type uses TLS Client Authentication. The `tls` type is supported only over TLS connections. + username: + type: string + description: Username used for the authentication. + required: + - type + description: Authentication configuration for connecting to the cluster. + http: + type: object + properties: + port: + type: integer + minimum: 1023 + description: The port which is the server listening on. + cors: + type: object + properties: + allowedOrigins: + type: array + items: + type: string + description: List of allowed origins. Java regular expressions can be used. + allowedMethods: + type: array + items: + type: string + description: List of allowed HTTP methods. + required: + - allowedOrigins + - allowedMethods + description: CORS configuration for the HTTP Bridge. + description: The HTTP related configuration. + adminClient: + type: object + properties: + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: The Kafka AdminClient configuration used for AdminClient instances created by the bridge. + description: Kafka AdminClient related configuration. + consumer: + type: object + properties: + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: 'The Kafka consumer configuration used for consumer instances created by the bridge. Properties with the following prefixes cannot be set: ssl., bootstrap.servers, group.id, sasl., security. (with the exception of: ssl.endpoint.identification.algorithm, ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols).' + description: Kafka consumer related configuration. + producer: + type: object + properties: + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: 'The Kafka producer configuration used for producer instances created by the bridge. Properties with the following prefixes cannot be set: ssl., bootstrap.servers, sasl., security. (with the exception of: ssl.endpoint.identification.algorithm, ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols).' + description: Kafka producer related configuration. + resources: + type: object + properties: + limits: + x-kubernetes-preserve-unknown-fields: true + type: object + requests: + x-kubernetes-preserve-unknown-fields: true + type: object + description: CPU and memory resources to reserve. + jvmOptions: + type: object + properties: + "-XX": + x-kubernetes-preserve-unknown-fields: true + type: object + description: A map of -XX options to the JVM. + "-Xms": + type: string + pattern: ^[0-9]+[mMgG]?$ + description: -Xms option to to the JVM. + "-Xmx": + type: string + pattern: ^[0-9]+[mMgG]?$ + description: -Xmx option to to the JVM. + gcLoggingEnabled: + type: boolean + description: Specifies whether the Garbage Collection logging is enabled. The default is false. + javaSystemProperties: + type: array + items: + type: object + properties: + name: + type: string + description: The system property name. + value: + type: string + description: The system property value. + description: A map of additional system properties which will be passed using the `-D` option to the JVM. + description: '**Currently not supported** JVM Options for pods.' + logging: + type: object + properties: + loggers: + x-kubernetes-preserve-unknown-fields: true + type: object + description: A Map from logger name to logger level. + type: + type: string + enum: + - inline + - external + description: Logging type, must be either 'inline' or 'external'. + valueFrom: + type: object + properties: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: Reference to the key in the ConfigMap containing the configuration. + description: '`ConfigMap` entry where the logging configuration is stored. ' + required: + - type + description: Logging configuration for Kafka Bridge. + enableMetrics: + type: boolean + description: Enable the metrics for the Kafka Bridge. Default is false. + livenessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1. + description: Pod liveness checking. + readinessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1. + description: Pod readiness checking. + template: + type: object + properties: + deployment: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + deploymentStrategy: + type: string + enum: + - RollingUpdate + - Recreate + description: DeploymentStrategy which will be used for this Deployment. Valid values are `RollingUpdate` and `Recreate`. Defaults to `RollingUpdate`. + description: Template for Kafka Bridge `Deployment`. + pod: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + imagePullSecrets: + type: array + items: + type: object + properties: + name: + type: string + description: List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored. + securityContext: + type: object + properties: + fsGroup: + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + supplementalGroups: + type: array + items: + type: integer + sysctls: + type: array + items: + type: object + properties: + name: + type: string + value: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Configures pod-level security attributes and common container settings. + terminationGracePeriodSeconds: + type: integer + minimum: 0 + description: The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds. + affinity: + type: object + properties: + nodeAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + preference: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: object + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + podAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + podAntiAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + description: The pod's affinity rules. + tolerations: + type: array + items: + type: object + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + type: integer + value: + type: string + description: The pod's tolerations. + priorityClassName: + type: string + description: The name of the priority class used to assign priority to the pods. For more information about priority classes, see {K8sPriorityClass}. + schedulerName: + type: string + description: The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used. + hostAliases: + type: array + items: + type: object + properties: + hostnames: + type: array + items: + type: string + ip: + type: string + description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified. + tmpDirSizeLimit: + type: string + pattern: ^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ + description: Defines the total amount (for example `1Gi`) of local storage required for temporary EmptyDir volume (`/tmp`). Default value is `1Mi`. + enableServiceLinks: + type: boolean + description: Indicates whether information about services should be injected into Pod's environment variables. + topologySpreadConstraints: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + maxSkew: + type: integer + topologyKey: + type: string + whenUnsatisfiable: + type: string + description: The pod's topology spread constraints. + description: Template for Kafka Bridge `Pods`. + apiService: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + ipFamilyPolicy: + type: string + enum: + - SingleStack + - PreferDualStack + - RequireDualStack + description: Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`. `SingleStack` is for a single IP family. `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters. `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters. If unspecified, Kubernetes will choose the default value based on the service type. Available on Kubernetes 1.20 and newer. + ipFamilies: + type: array + items: + type: string + enum: + - IPv4 + - IPv6 + description: Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting. Available on Kubernetes 1.20 and newer. + description: Template for Kafka Bridge API `Service`. + podDisruptionBudget: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata to apply to the `PodDisruptionBudgetTemplate` resource. + maxUnavailable: + type: integer + minimum: 0 + description: Maximum number of unavailable pods to allow automatic Pod eviction. A Pod eviction is allowed when the `maxUnavailable` number of pods or fewer are unavailable after the eviction. Setting this value to 0 prevents all voluntary evictions, so the pods must be evicted manually. Defaults to 1. + description: Template for Kafka Bridge `PodDisruptionBudget`. + bridgeContainer: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: Environment variables which should be applied to the container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the Kafka Bridge container. + serviceAccount: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for the Kafka Bridge service account. + description: Template for Kafka Bridge resources. The template allows users to specify how is the `Deployment` and `Pods` generated. + tracing: + type: object + properties: + type: + type: string + enum: + - jaeger + description: Type of the tracing used. Currently the only supported type is `jaeger` for Jaeger tracing. + required: + - type + description: The configuration of tracing in Kafka Bridge. + required: + - bootstrapServers + description: The specification of the Kafka Bridge. + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: The unique identifier of a condition, used to distinguish between other conditions in the resource. + status: + type: string + description: The status of the condition, either True, False or Unknown. + lastTransitionTime: + type: string + description: Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. + reason: + type: string + description: The reason for the condition's last transition (a single word in CamelCase). + message: + type: string + description: Human-readable message indicating details about the condition's last transition. + description: List of status conditions. + observedGeneration: + type: integer + description: The generation of the CRD that was last reconciled by the operator. + url: + type: string + description: The URL at which external client applications can access the Kafka Bridge. + labelSelector: + type: string + description: Label selector for pods providing this resource. + replicas: + type: integer + description: The current number of pods being used to provide this resource. + description: The status of the Kafka Bridge. diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/047-Crd-kafkaconnector.yaml b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/047-Crd-kafkaconnector.yaml new file mode 100644 index 000000000..5bbd9e208 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/047-Crd-kafkaconnector.yaml @@ -0,0 +1,110 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: kafkaconnectors.kafka.strimzi.io + labels: + app: strimzi + strimzi.io/crd-install: "true" + component: kafkaconnectors.kafka.strimzi.io-crd +spec: + group: kafka.strimzi.io + names: + kind: KafkaConnector + listKind: KafkaConnectorList + singular: kafkaconnector + plural: kafkaconnectors + shortNames: + - kctr + categories: + - strimzi + scope: Namespaced + conversion: + strategy: None + versions: + - name: v1beta2 + served: true + storage: true + subresources: + status: {} + scale: + specReplicasPath: .spec.tasksMax + statusReplicasPath: .status.tasksMax + additionalPrinterColumns: + - name: Cluster + description: The name of the Kafka Connect cluster this connector belongs to + jsonPath: .metadata.labels.strimzi\.io/cluster + type: string + - name: Connector class + description: The class used by this connector + jsonPath: .spec.class + type: string + - name: Max Tasks + description: Maximum number of tasks + jsonPath: .spec.tasksMax + type: integer + - name: Ready + description: The state of the custom resource + jsonPath: .status.conditions[?(@.type=="Ready")].status + type: string + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + class: + type: string + description: The Class for the Kafka Connector. + tasksMax: + type: integer + minimum: 1 + description: The maximum number of tasks for the Kafka Connector. + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: 'The Kafka Connector configuration. The following properties cannot be set: connector.class, tasks.max.' + pause: + type: boolean + description: Whether the connector should be paused. Defaults to false. + description: The specification of the Kafka Connector. + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: The unique identifier of a condition, used to distinguish between other conditions in the resource. + status: + type: string + description: The status of the condition, either True, False or Unknown. + lastTransitionTime: + type: string + description: Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. + reason: + type: string + description: The reason for the condition's last transition (a single word in CamelCase). + message: + type: string + description: Human-readable message indicating details about the condition's last transition. + description: List of status conditions. + observedGeneration: + type: integer + description: The generation of the CRD that was last reconciled by the operator. + connectorStatus: + x-kubernetes-preserve-unknown-fields: true + type: object + description: The connector status, as reported by the Kafka Connect REST API. + tasksMax: + type: integer + description: The maximum number of tasks for the Kafka Connector. + topics: + type: array + items: + type: string + description: The list of topics used by the Kafka Connector. + description: The status of the Kafka Connector. diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/048-Crd-kafkamirrormaker2.yaml b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/048-Crd-kafkamirrormaker2.yaml new file mode 100644 index 000000000..c9e39119f --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/048-Crd-kafkamirrormaker2.yaml @@ -0,0 +1,1897 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: kafkamirrormaker2s.kafka.strimzi.io + labels: + app: strimzi + strimzi.io/crd-install: "true" + component: kafkamirrormaker2.kafka.strimzi.io-crd +spec: + group: kafka.strimzi.io + names: + kind: KafkaMirrorMaker2 + listKind: KafkaMirrorMaker2List + singular: kafkamirrormaker2 + plural: kafkamirrormaker2s + shortNames: + - kmm2 + categories: + - strimzi + scope: Namespaced + conversion: + strategy: None + versions: + - name: v1beta2 + served: true + storage: true + subresources: + status: {} + scale: + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + labelSelectorPath: .status.labelSelector + additionalPrinterColumns: + - name: Desired replicas + description: The desired number of Kafka MirrorMaker 2.0 replicas + jsonPath: .spec.replicas + type: integer + - name: Ready + description: The state of the custom resource + jsonPath: .status.conditions[?(@.type=="Ready")].status + type: string + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + version: + type: string + description: The Kafka Connect version. Defaults to {DefaultKafkaVersion}. Consult the user documentation to understand the process required to upgrade or downgrade the version. + replicas: + type: integer + description: The number of pods in the Kafka Connect group. + image: + type: string + description: The docker image for the pods. + connectCluster: + type: string + description: The cluster alias used for Kafka Connect. The alias must match a cluster in the list at `spec.clusters`. + clusters: + type: array + items: + type: object + properties: + alias: + type: string + pattern: ^[a-zA-Z0-9\._\-]{1,100}$ + description: Alias used to reference the Kafka cluster. + bootstrapServers: + type: string + description: A comma-separated list of `host:port` pairs for establishing the connection to the Kafka cluster. + tls: + type: object + properties: + trustedCertificates: + type: array + items: + type: object + properties: + certificate: + type: string + description: The name of the file certificate in the Secret. + secretName: + type: string + description: The name of the Secret containing the certificate. + required: + - certificate + - secretName + description: Trusted certificates for TLS connection. + description: TLS configuration for connecting MirrorMaker 2.0 connectors to a cluster. + authentication: + type: object + properties: + accessToken: + type: object + properties: + key: + type: string + description: The key under which the secret value is stored in the Kubernetes Secret. + secretName: + type: string + description: The name of the Kubernetes Secret containing the secret value. + required: + - key + - secretName + description: Link to Kubernetes Secret containing the access token which was obtained from the authorization server. + accessTokenIsJwt: + type: boolean + description: Configure whether access token should be treated as JWT. This should be set to `false` if the authorization server returns opaque tokens. Defaults to `true`. + audience: + type: string + description: OAuth audience to use when authenticating against the authorization server. Some authorization servers require the audience to be explicitly set. The possible values depend on how the authorization server is configured. By default, `audience` is not specified when performing the token endpoint request. + certificateAndKey: + type: object + properties: + certificate: + type: string + description: The name of the file certificate in the Secret. + key: + type: string + description: The name of the private key in the Secret. + secretName: + type: string + description: The name of the Secret containing the certificate. + required: + - certificate + - key + - secretName + description: Reference to the `Secret` which holds the certificate and private key pair. + clientId: + type: string + description: OAuth Client ID which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI. + clientSecret: + type: object + properties: + key: + type: string + description: The key under which the secret value is stored in the Kubernetes Secret. + secretName: + type: string + description: The name of the Kubernetes Secret containing the secret value. + required: + - key + - secretName + description: Link to Kubernetes Secret containing the OAuth client secret which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI. + disableTlsHostnameVerification: + type: boolean + description: Enable or disable TLS hostname verification. Default value is `false`. + maxTokenExpirySeconds: + type: integer + description: Set or limit time-to-live of the access tokens to the specified number of seconds. This should be set if the authorization server returns opaque tokens. + passwordSecret: + type: object + properties: + password: + type: string + description: The name of the key in the Secret under which the password is stored. + secretName: + type: string + description: The name of the Secret containing the password. + required: + - password + - secretName + description: Reference to the `Secret` which holds the password. + refreshToken: + type: object + properties: + key: + type: string + description: The key under which the secret value is stored in the Kubernetes Secret. + secretName: + type: string + description: The name of the Kubernetes Secret containing the secret value. + required: + - key + - secretName + description: Link to Kubernetes Secret containing the refresh token which can be used to obtain access token from the authorization server. + scope: + type: string + description: OAuth scope to use when authenticating against the authorization server. Some authorization servers require this to be set. The possible values depend on how authorization server is configured. By default `scope` is not specified when doing the token endpoint request. + tlsTrustedCertificates: + type: array + items: + type: object + properties: + certificate: + type: string + description: The name of the file certificate in the Secret. + secretName: + type: string + description: The name of the Secret containing the certificate. + required: + - certificate + - secretName + description: Trusted certificates for TLS connection to the OAuth server. + tokenEndpointUri: + type: string + description: Authorization server token endpoint URI. + type: + type: string + enum: + - tls + - scram-sha-256 + - scram-sha-512 + - plain + - oauth + description: Authentication type. Currently the only supported types are `tls`, `scram-sha-256`, `scram-sha-512`, and `plain`. `scram-sha-256` and `scram-sha-512` types use SASL SCRAM-SHA-256 and SASL SCRAM-SHA-512 Authentication, respectively. `plain` type uses SASL PLAIN Authentication. `oauth` type uses SASL OAUTHBEARER Authentication. The `tls` type uses TLS Client Authentication. The `tls` type is supported only over TLS connections. + username: + type: string + description: Username used for the authentication. + required: + - type + description: Authentication configuration for connecting to the cluster. + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: 'The MirrorMaker 2.0 cluster config. Properties with the following prefixes cannot be set: ssl., sasl., security., listeners, plugin.path, rest., bootstrap.servers, consumer.interceptor.classes, producer.interceptor.classes (with the exception of: ssl.endpoint.identification.algorithm, ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols).' + required: + - alias + - bootstrapServers + description: Kafka clusters for mirroring. + mirrors: + type: array + items: + type: object + properties: + sourceCluster: + type: string + description: The alias of the source cluster used by the Kafka MirrorMaker 2.0 connectors. The alias must match a cluster in the list at `spec.clusters`. + targetCluster: + type: string + description: The alias of the target cluster used by the Kafka MirrorMaker 2.0 connectors. The alias must match a cluster in the list at `spec.clusters`. + sourceConnector: + type: object + properties: + tasksMax: + type: integer + minimum: 1 + description: The maximum number of tasks for the Kafka Connector. + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: 'The Kafka Connector configuration. The following properties cannot be set: connector.class, tasks.max.' + pause: + type: boolean + description: Whether the connector should be paused. Defaults to false. + description: The specification of the Kafka MirrorMaker 2.0 source connector. + heartbeatConnector: + type: object + properties: + tasksMax: + type: integer + minimum: 1 + description: The maximum number of tasks for the Kafka Connector. + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: 'The Kafka Connector configuration. The following properties cannot be set: connector.class, tasks.max.' + pause: + type: boolean + description: Whether the connector should be paused. Defaults to false. + description: The specification of the Kafka MirrorMaker 2.0 heartbeat connector. + checkpointConnector: + type: object + properties: + tasksMax: + type: integer + minimum: 1 + description: The maximum number of tasks for the Kafka Connector. + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: 'The Kafka Connector configuration. The following properties cannot be set: connector.class, tasks.max.' + pause: + type: boolean + description: Whether the connector should be paused. Defaults to false. + description: The specification of the Kafka MirrorMaker 2.0 checkpoint connector. + topicsPattern: + type: string + description: A regular expression matching the topics to be mirrored, for example, "topic1\|topic2\|topic3". Comma-separated lists are also supported. + topicsBlacklistPattern: + type: string + description: A regular expression matching the topics to exclude from mirroring. Comma-separated lists are also supported. + topicsExcludePattern: + type: string + description: A regular expression matching the topics to exclude from mirroring. Comma-separated lists are also supported. + groupsPattern: + type: string + description: A regular expression matching the consumer groups to be mirrored. Comma-separated lists are also supported. + groupsBlacklistPattern: + type: string + description: A regular expression matching the consumer groups to exclude from mirroring. Comma-separated lists are also supported. + groupsExcludePattern: + type: string + description: A regular expression matching the consumer groups to exclude from mirroring. Comma-separated lists are also supported. + required: + - sourceCluster + - targetCluster + description: Configuration of the MirrorMaker 2.0 connectors. + resources: + type: object + properties: + limits: + x-kubernetes-preserve-unknown-fields: true + type: object + requests: + x-kubernetes-preserve-unknown-fields: true + type: object + description: The maximum limits for CPU and memory resources and the requested initial resources. + livenessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1. + description: Pod liveness checking. + readinessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1. + description: Pod readiness checking. + jvmOptions: + type: object + properties: + "-XX": + x-kubernetes-preserve-unknown-fields: true + type: object + description: A map of -XX options to the JVM. + "-Xms": + type: string + pattern: ^[0-9]+[mMgG]?$ + description: -Xms option to to the JVM. + "-Xmx": + type: string + pattern: ^[0-9]+[mMgG]?$ + description: -Xmx option to to the JVM. + gcLoggingEnabled: + type: boolean + description: Specifies whether the Garbage Collection logging is enabled. The default is false. + javaSystemProperties: + type: array + items: + type: object + properties: + name: + type: string + description: The system property name. + value: + type: string + description: The system property value. + description: A map of additional system properties which will be passed using the `-D` option to the JVM. + description: JVM Options for pods. + jmxOptions: + type: object + properties: + authentication: + type: object + properties: + type: + type: string + enum: + - password + description: Authentication type. Currently the only supported types are `password`.`password` type creates a username and protected port with no TLS. + required: + - type + description: Authentication configuration for connecting to the JMX port. + description: JMX Options. + logging: + type: object + properties: + loggers: + x-kubernetes-preserve-unknown-fields: true + type: object + description: A Map from logger name to logger level. + type: + type: string + enum: + - inline + - external + description: Logging type, must be either 'inline' or 'external'. + valueFrom: + type: object + properties: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: Reference to the key in the ConfigMap containing the configuration. + description: '`ConfigMap` entry where the logging configuration is stored. ' + required: + - type + description: Logging configuration for Kafka Connect. + tracing: + type: object + properties: + type: + type: string + enum: + - jaeger + description: Type of the tracing used. Currently the only supported type is `jaeger` for Jaeger tracing. + required: + - type + description: The configuration of tracing in Kafka Connect. + template: + type: object + properties: + deployment: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + deploymentStrategy: + type: string + enum: + - RollingUpdate + - Recreate + description: DeploymentStrategy which will be used for this Deployment. Valid values are `RollingUpdate` and `Recreate`. Defaults to `RollingUpdate`. + description: Template for Kafka Connect `Deployment`. + pod: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + imagePullSecrets: + type: array + items: + type: object + properties: + name: + type: string + description: List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored. + securityContext: + type: object + properties: + fsGroup: + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + supplementalGroups: + type: array + items: + type: integer + sysctls: + type: array + items: + type: object + properties: + name: + type: string + value: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Configures pod-level security attributes and common container settings. + terminationGracePeriodSeconds: + type: integer + minimum: 0 + description: The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds. + affinity: + type: object + properties: + nodeAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + preference: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: object + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + podAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + podAntiAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + description: The pod's affinity rules. + tolerations: + type: array + items: + type: object + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + type: integer + value: + type: string + description: The pod's tolerations. + priorityClassName: + type: string + description: The name of the priority class used to assign priority to the pods. For more information about priority classes, see {K8sPriorityClass}. + schedulerName: + type: string + description: The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used. + hostAliases: + type: array + items: + type: object + properties: + hostnames: + type: array + items: + type: string + ip: + type: string + description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified. + tmpDirSizeLimit: + type: string + pattern: ^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ + description: Defines the total amount (for example `1Gi`) of local storage required for temporary EmptyDir volume (`/tmp`). Default value is `1Mi`. + enableServiceLinks: + type: boolean + description: Indicates whether information about services should be injected into Pod's environment variables. + topologySpreadConstraints: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + maxSkew: + type: integer + topologyKey: + type: string + whenUnsatisfiable: + type: string + description: The pod's topology spread constraints. + description: Template for Kafka Connect `Pods`. + apiService: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + ipFamilyPolicy: + type: string + enum: + - SingleStack + - PreferDualStack + - RequireDualStack + description: Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`. `SingleStack` is for a single IP family. `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters. `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters. If unspecified, Kubernetes will choose the default value based on the service type. Available on Kubernetes 1.20 and newer. + ipFamilies: + type: array + items: + type: string + enum: + - IPv4 + - IPv6 + description: Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting. Available on Kubernetes 1.20 and newer. + description: Template for Kafka Connect API `Service`. + connectContainer: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: Environment variables which should be applied to the container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the Kafka Connect container. + initContainer: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: Environment variables which should be applied to the container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the Kafka init container. + podDisruptionBudget: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata to apply to the `PodDisruptionBudgetTemplate` resource. + maxUnavailable: + type: integer + minimum: 0 + description: Maximum number of unavailable pods to allow automatic Pod eviction. A Pod eviction is allowed when the `maxUnavailable` number of pods or fewer are unavailable after the eviction. Setting this value to 0 prevents all voluntary evictions, so the pods must be evicted manually. Defaults to 1. + description: Template for Kafka Connect `PodDisruptionBudget`. + serviceAccount: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for the Kafka Connect service account. + clusterRoleBinding: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for the Kafka Connect ClusterRoleBinding. + buildPod: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + imagePullSecrets: + type: array + items: + type: object + properties: + name: + type: string + description: List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored. + securityContext: + type: object + properties: + fsGroup: + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + supplementalGroups: + type: array + items: + type: integer + sysctls: + type: array + items: + type: object + properties: + name: + type: string + value: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Configures pod-level security attributes and common container settings. + terminationGracePeriodSeconds: + type: integer + minimum: 0 + description: The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds. + affinity: + type: object + properties: + nodeAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + preference: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: object + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + podAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + podAntiAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + description: The pod's affinity rules. + tolerations: + type: array + items: + type: object + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + type: integer + value: + type: string + description: The pod's tolerations. + priorityClassName: + type: string + description: The name of the priority class used to assign priority to the pods. For more information about priority classes, see {K8sPriorityClass}. + schedulerName: + type: string + description: The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used. + hostAliases: + type: array + items: + type: object + properties: + hostnames: + type: array + items: + type: string + ip: + type: string + description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified. + tmpDirSizeLimit: + type: string + pattern: ^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ + description: Defines the total amount (for example `1Gi`) of local storage required for temporary EmptyDir volume (`/tmp`). Default value is `1Mi`. + enableServiceLinks: + type: boolean + description: Indicates whether information about services should be injected into Pod's environment variables. + topologySpreadConstraints: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + maxSkew: + type: integer + topologyKey: + type: string + whenUnsatisfiable: + type: string + description: The pod's topology spread constraints. + description: Template for Kafka Connect Build `Pods`. The build pod is used only on Kubernetes. + buildContainer: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: Environment variables which should be applied to the container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the Kafka Connect Build container. The build container is used only on Kubernetes. + buildConfig: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata to apply to the `PodDisruptionBudgetTemplate` resource. + pullSecret: + type: string + description: Container Registry Secret with the credentials for pulling the base image. + description: Template for the Kafka Connect BuildConfig used to build new container images. The BuildConfig is used only on OpenShift. + buildServiceAccount: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for the Kafka Connect Build service account. + jmxSecret: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Labels added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: Annotations added to the resource template. Can be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. + description: Metadata applied to the resource. + description: Template for Secret of the Kafka Connect Cluster JMX authentication. + description: Template for Kafka Connect and Kafka Mirror Maker 2 resources. The template allows users to specify how the `Deployment`, `Pods` and `Service` are generated. + externalConfiguration: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: Name of the environment variable which will be passed to the Kafka Connect pods. The name of the environment variable cannot start with `KAFKA_` or `STRIMZI_`. + valueFrom: + type: object + properties: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: Reference to a key in a ConfigMap. + secretKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: Reference to a key in a Secret. + description: Value of the environment variable which will be passed to the Kafka Connect pods. It can be passed either as a reference to Secret or ConfigMap field. The field has to specify exactly one Secret or ConfigMap. + required: + - name + - valueFrom + description: Makes data from a Secret or ConfigMap available in the Kafka Connect pods as environment variables. + volumes: + type: array + items: + type: object + properties: + configMap: + type: object + properties: + defaultMode: + type: integer + items: + type: array + items: + type: object + properties: + key: + type: string + mode: + type: integer + path: + type: string + name: + type: string + optional: + type: boolean + description: Reference to a key in a ConfigMap. Exactly one Secret or ConfigMap has to be specified. + name: + type: string + description: Name of the volume which will be added to the Kafka Connect pods. + secret: + type: object + properties: + defaultMode: + type: integer + items: + type: array + items: + type: object + properties: + key: + type: string + mode: + type: integer + path: + type: string + optional: + type: boolean + secretName: + type: string + description: Reference to a key in a Secret. Exactly one Secret or ConfigMap has to be specified. + required: + - name + description: Makes data from a Secret or ConfigMap available in the Kafka Connect pods as volumes. + description: Pass data from Secrets or ConfigMaps to the Kafka Connect pods and use them to configure connectors. + metricsConfig: + type: object + properties: + type: + type: string + enum: + - jmxPrometheusExporter + description: Metrics type. Only 'jmxPrometheusExporter' supported currently. + valueFrom: + type: object + properties: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: Reference to the key in the ConfigMap containing the configuration. + description: ConfigMap entry where the Prometheus JMX Exporter configuration is stored. For details of the structure of this configuration, see the {JMXExporter}. + required: + - type + - valueFrom + description: Metrics configuration. + required: + - connectCluster + description: The specification of the Kafka MirrorMaker 2.0 cluster. + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: The unique identifier of a condition, used to distinguish between other conditions in the resource. + status: + type: string + description: The status of the condition, either True, False or Unknown. + lastTransitionTime: + type: string + description: Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. + reason: + type: string + description: The reason for the condition's last transition (a single word in CamelCase). + message: + type: string + description: Human-readable message indicating details about the condition's last transition. + description: List of status conditions. + observedGeneration: + type: integer + description: The generation of the CRD that was last reconciled by the operator. + url: + type: string + description: The URL of the REST API endpoint for managing and monitoring Kafka Connect connectors. + connectorPlugins: + type: array + items: + type: object + properties: + type: + type: string + description: The type of the connector plugin. The available types are `sink` and `source`. + version: + type: string + description: The version of the connector plugin. + class: + type: string + description: The class of the connector plugin. + description: The list of connector plugins available in this Kafka Connect deployment. + connectors: + type: array + items: + x-kubernetes-preserve-unknown-fields: true + type: object + description: List of MirrorMaker 2.0 connector statuses, as reported by the Kafka Connect REST API. + labelSelector: + type: string + description: Label selector for pods providing this resource. + replicas: + type: integer + description: The current number of pods being used to provide this resource. + description: The status of the Kafka MirrorMaker 2.0 cluster. diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/049-Crd-kafkarebalance.yaml b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/049-Crd-kafkarebalance.yaml new file mode 100644 index 000000000..617bf5f45 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/crds/049-Crd-kafkarebalance.yaml @@ -0,0 +1,108 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: kafkarebalances.kafka.strimzi.io + labels: + app: strimzi + strimzi.io/crd-install: "true" + component: kafkarebalances.kafka.strimzi.io-crd +spec: + group: kafka.strimzi.io + names: + kind: KafkaRebalance + listKind: KafkaRebalanceList + singular: kafkarebalance + plural: kafkarebalances + shortNames: + - kr + categories: + - strimzi + scope: Namespaced + conversion: + strategy: None + versions: + - name: v1beta2 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - name: Cluster + description: The name of the Kafka cluster this resource rebalances + jsonPath: .metadata.labels.strimzi\.io/cluster + type: string + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + goals: + type: array + items: + type: string + description: A list of goals, ordered by decreasing priority, to use for generating and executing the rebalance proposal. The supported goals are available at https://github.com/linkedin/cruise-control#goals. If an empty goals list is provided, the goals declared in the default.goals Cruise Control configuration parameter are used. + skipHardGoalCheck: + type: boolean + description: Whether to allow the hard goals specified in the Kafka CR to be skipped in optimization proposal generation. This can be useful when some of those hard goals are preventing a balance solution being found. Default is false. + excludedTopics: + type: string + description: A regular expression where any matching topics will be excluded from the calculation of optimization proposals. This expression will be parsed by the java.util.regex.Pattern class; for more information on the supported formar consult the documentation for that class. + concurrentPartitionMovementsPerBroker: + type: integer + minimum: 0 + description: The upper bound of ongoing partition replica movements going into/out of each broker. Default is 5. + concurrentIntraBrokerPartitionMovements: + type: integer + minimum: 0 + description: The upper bound of ongoing partition replica movements between disks within each broker. Default is 2. + concurrentLeaderMovements: + type: integer + minimum: 0 + description: The upper bound of ongoing partition leadership movements. Default is 1000. + replicationThrottle: + type: integer + minimum: 0 + description: The upper bound, in bytes per second, on the bandwidth used to move replicas. There is no limit by default. + replicaMovementStrategies: + type: array + items: + type: string + description: A list of strategy class names used to determine the execution order for the replica movements in the generated optimization proposal. By default BaseReplicaMovementStrategy is used, which will execute the replica movements in the order that they were generated. + description: The specification of the Kafka rebalance. + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: The unique identifier of a condition, used to distinguish between other conditions in the resource. + status: + type: string + description: The status of the condition, either True, False or Unknown. + lastTransitionTime: + type: string + description: Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. + reason: + type: string + description: The reason for the condition's last transition (a single word in CamelCase). + message: + type: string + description: Human-readable message indicating details about the condition's last transition. + description: List of status conditions. + observedGeneration: + type: integer + description: The generation of the CRD that was last reconciled by the operator. + sessionId: + type: string + description: The session identifier for requests to Cruise Control pertaining to this KafkaRebalance resource. This is used by the Kafka Rebalance operator to track the status of ongoing rebalancing operations. + optimizationResult: + x-kubernetes-preserve-unknown-fields: true + type: object + description: A JSON object describing the optimization result. + description: The status of the Kafka rebalance. diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/010-ServiceAccount-strimzi-cluster-operator.yaml b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/010-ServiceAccount-strimzi-cluster-operator.yaml new file mode 100644 index 000000000..09e40aa81 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/010-ServiceAccount-strimzi-cluster-operator.yaml @@ -0,0 +1,13 @@ +{{- if .Values.global.kafka.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: strimzi-cluster-operator + namespace: {{ .Values.namespace }} + labels: + app: {{ template "strimzi.name" . }} + chart: {{ template "strimzi.chart" . }} + component: service-account + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- end }} diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/020-ClusterRole-strimzi-cluster-operator-role.yaml b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/020-ClusterRole-strimzi-cluster-operator-role.yaml new file mode 100644 index 000000000..cc9c8039f --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/020-ClusterRole-strimzi-cluster-operator-role.yaml @@ -0,0 +1,219 @@ +{{- if .Values.global.kafka.enabled }} +{{- if .Values.createGlobalResources -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: strimzi-cluster-operator-namespaced + labels: + app: {{ template "strimzi.name" . }} + chart: {{ template "strimzi.chart" . }} + component: role + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +- apiGroups: + - "rbac.authorization.k8s.io" + resources: + # The cluster operator needs to access and manage rolebindings to grant Strimzi components cluster permissions + - rolebindings + verbs: + - get + - list + - watch + - create + - delete + - patch + - update +- apiGroups: + - "rbac.authorization.k8s.io" + resources: + # The cluster operator needs to access and manage roles to grant the entity operator permissions + - roles + verbs: + - get + - list + - watch + - create + - delete + - patch + - update +- apiGroups: + - "" + resources: + # The cluster operator needs to access and delete pods, this is to allow it to monitor pod health and coordinate rolling updates + - pods + # The cluster operator needs to access and manage service accounts to grant Strimzi components cluster permissions + - serviceaccounts + # The cluster operator needs to access and manage config maps for Strimzi components configuration + - configmaps + # The cluster operator needs to access and manage services and endpoints to expose Strimzi components to network traffic + - services + - endpoints + # The cluster operator needs to access and manage secrets to handle credentials + - secrets + # The cluster operator needs to access and manage persistent volume claims to bind them to Strimzi components for persistent data + - persistentvolumeclaims + verbs: + - get + - list + - watch + - create + - delete + - patch + - update +- apiGroups: + - "kafka.strimzi.io" + resources: + # The cluster operator runs the KafkaAssemblyOperator, which needs to access and manage Kafka resources + - kafkas + - kafkas/status + # The cluster operator runs the KafkaConnectAssemblyOperator, which needs to access and manage KafkaConnect resources + - kafkaconnects + - kafkaconnects/status + # The cluster operator runs the KafkaConnectorAssemblyOperator, which needs to access and manage KafkaConnector resources + - kafkaconnectors + - kafkaconnectors/status + # The cluster operator runs the KafkaMirrorMakerAssemblyOperator, which needs to access and manage KafkaMirrorMaker resources + - kafkamirrormakers + - kafkamirrormakers/status + # The cluster operator runs the KafkaBridgeAssemblyOperator, which needs to access and manage BridgeMaker resources + - kafkabridges + - kafkabridges/status + # The cluster operator runs the KafkaMirrorMaker2AssemblyOperator, which needs to access and manage KafkaMirrorMaker2 resources + - kafkamirrormaker2s + - kafkamirrormaker2s/status + # The cluster operator runs the KafkaRebalanceAssemblyOperator, which needs to access and manage KafkaRebalance resources + - kafkarebalances + - kafkarebalances/status + verbs: + - get + - list + - watch + - create + - delete + - patch + - update +- apiGroups: + - "core.strimzi.io" + resources: + # The cluster operator uses StrimziPodSets to manage the Kafka and ZooKeeper pods + - strimzipodsets + - strimzipodsets/status + verbs: + - get + - list + - watch + - create + - delete + - patch + - update +- apiGroups: + # The cluster operator needs the extensions api as the operator supports Kubernetes version 1.11+ + # apps/v1 was introduced in Kubernetes 1.14 + - "extensions" + resources: + # The cluster operator needs to access and manage deployments to run deployment based Strimzi components + - deployments + - deployments/scale + # The cluster operator needs to access replica sets to manage Strimzi components and to determine error states + - replicasets + # The cluster operator needs to access and manage replication controllers to manage replicasets + - replicationcontrollers + # The cluster operator needs to access and manage network policies to lock down communication between Strimzi components + - networkpolicies + # The cluster operator needs to access and manage ingresses which allow external access to the services in a cluster + - ingresses + verbs: + - get + - list + - watch + - create + - delete + - patch + - update +- apiGroups: + - "apps" + resources: + # The cluster operator needs to access and manage deployments to run deployment based Strimzi components + - deployments + - deployments/scale + - deployments/status + # The cluster operator needs to access and manage stateful sets to run stateful sets based Strimzi components + - statefulsets + # The cluster operator needs to access replica-sets to manage Strimzi components and to determine error states + - replicasets + verbs: + - get + - list + - watch + - create + - delete + - patch + - update +- apiGroups: + - "" + resources: + # The cluster operator needs to be able to create events and delegate permissions to do so + - events + verbs: + - create +- apiGroups: + # Kafka Connect Build on OpenShift requirement + - build.openshift.io + resources: + - buildconfigs + - buildconfigs/instantiate + - builds + verbs: + - get + - list + - watch + - create + - delete + - patch + - update +- apiGroups: + - networking.k8s.io + resources: + # The cluster operator needs to access and manage network policies to lock down communication between Strimzi components + - networkpolicies + # The cluster operator needs to access and manage ingresses which allow external access to the services in a cluster + - ingresses + verbs: + - get + - list + - watch + - create + - delete + - patch + - update +- apiGroups: + - route.openshift.io + resources: + # The cluster operator needs to access and manage routes to expose Strimzi components for external access + - routes + - routes/custom-host + verbs: + - get + - list + - watch + - create + - delete + - patch + - update +- apiGroups: + - policy + resources: + # The cluster operator needs to access and manage pod disruption budgets this limits the number of concurrent disruptions + # that a Strimzi component experiences, allowing for higher availability + - poddisruptionbudgets + verbs: + - get + - list + - watch + - create + - delete + - patch + - update +{{- end -}} +{{- end }} diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/020-RoleBinding-strimzi-cluster-operator.yaml b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/020-RoleBinding-strimzi-cluster-operator.yaml new file mode 100644 index 000000000..09a056ac7 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/020-RoleBinding-strimzi-cluster-operator.yaml @@ -0,0 +1,33 @@ +{{- if .Values.global.kafka.enabled }} +{{- $root := . -}} +{{- range .Values.watchNamespaces }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +{{- if $root.Values.watchAnyNamespace }} +kind: ClusterRoleBinding +{{- else }} +kind: RoleBinding +{{- end }} +metadata: + {{- if $root.Values.watchAnyNamespace }} + name: strimzi-cluster-operator-namespaced + {{- else }} + name: strimzi-cluster-operator + {{- end }} + namespace: {{ . }} + labels: + app: {{ template "strimzi.name" $root }} + chart: {{ template "strimzi.chart" $root }} + component: role-binding + release: {{ $root.Release.Name }} + heritage: {{ $root.Release.Service }} +subjects: + - kind: ServiceAccount + name: strimzi-cluster-operator + namespace: {{ $root.Values.namespace }} +roleRef: + kind: ClusterRole + name: strimzi-cluster-operator-namespaced + apiGroup: rbac.authorization.k8s.io +{{- end }} +{{- end }} diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/021-ClusterRole-strimzi-cluster-operator-role.yaml b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/021-ClusterRole-strimzi-cluster-operator-role.yaml new file mode 100644 index 000000000..74728fa68 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/021-ClusterRole-strimzi-cluster-operator-role.yaml @@ -0,0 +1,46 @@ +{{- if .Values.global.kafka.enabled }} +{{- if .Values.createGlobalResources -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: strimzi-cluster-operator-global + labels: + app: {{ template "strimzi.name" . }} + chart: {{ template "strimzi.chart" . }} + component: role + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +- apiGroups: + - "rbac.authorization.k8s.io" + resources: + # The cluster operator needs to create and manage cluster role bindings in the case of an install where a user + # has specified they want their cluster role bindings generated + - clusterrolebindings + verbs: + - get + - list + - watch + - create + - delete + - patch + - update +- apiGroups: + - storage.k8s.io + resources: + # The cluster operator requires "get" permissions to view storage class details + # This is because only a persistent volume of a supported storage class type can be resized + - storageclasses + verbs: + - get +- apiGroups: + - "" + resources: + # The cluster operator requires "list" permissions to view all nodes in a cluster + # The listing is used to determine the node addresses when NodePort access is configured + # These addresses are then exposed in the custom resource states + - nodes + verbs: + - list +{{- end -}} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/021-ClusterRoleBinding-strimzi-cluster-operator.yaml b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/021-ClusterRoleBinding-strimzi-cluster-operator.yaml new file mode 100644 index 000000000..5d978cf0d --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/021-ClusterRoleBinding-strimzi-cluster-operator.yaml @@ -0,0 +1,22 @@ +{{- if .Values.global.kafka.enabled }} +{{- if .Values.createGlobalResources -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: strimzi-cluster-operator + labels: + app: {{ template "strimzi.name" . }} + chart: {{ template "strimzi.chart" . }} + component: role-binding + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +subjects: + - kind: ServiceAccount + name: strimzi-cluster-operator + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: strimzi-cluster-operator-global + apiGroup: rbac.authorization.k8s.io +{{- end -}} +{{- end }} diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/030-ClusterRole-strimzi-kafka-broker.yaml b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/030-ClusterRole-strimzi-kafka-broker.yaml new file mode 100644 index 000000000..c86e63657 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/030-ClusterRole-strimzi-kafka-broker.yaml @@ -0,0 +1,23 @@ +{{- if .Values.global.kafka.enabled }} +{{- if .Values.createGlobalResources -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: strimzi-kafka-broker + labels: + app: {{ template "strimzi.name" . }} + chart: {{ template "strimzi.chart" . }} + component: broker-role + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +- apiGroups: + - "" + resources: + # The Kafka Brokers require "get" permissions to view the node they are on + # This information is used to generate a Rack ID that is used for High Availability configurations + - nodes + verbs: + - get +{{- end -}} +{{- end }} diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/030-ClusterRoleBinding-strimzi-cluster-operator-kafka-broker-delegation.yaml b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/030-ClusterRoleBinding-strimzi-cluster-operator-kafka-broker-delegation.yaml new file mode 100644 index 000000000..f4fc45842 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/030-ClusterRoleBinding-strimzi-cluster-operator-kafka-broker-delegation.yaml @@ -0,0 +1,25 @@ +{{- if .Values.global.kafka.enabled }} +{{- if .Values.createGlobalResources -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: strimzi-cluster-operator-kafka-broker-delegation + labels: + app: {{ template "strimzi.name" . }} + chart: {{ template "strimzi.chart" . }} + component: broker-role-binding + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +# The Kafka broker cluster role must be bound to the cluster operator service account so that it can delegate the cluster role to the Kafka brokers. +# This must be done to avoid escalating privileges which would be blocked by Kubernetes. +subjects: + - kind: ServiceAccount + name: strimzi-cluster-operator + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: strimzi-kafka-broker + apiGroup: rbac.authorization.k8s.io + +{{- end -}} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/031-ClusterRole-strimzi-entity-operator.yaml b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/031-ClusterRole-strimzi-entity-operator.yaml new file mode 100644 index 000000000..8b603f2a4 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/031-ClusterRole-strimzi-entity-operator.yaml @@ -0,0 +1,52 @@ +{{- if .Values.global.kafka.enabled }} +{{- if .Values.createGlobalResources -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: strimzi-entity-operator + labels: + app: {{ template "strimzi.name" . }} + chart: {{ template "strimzi.chart" . }} + component: entity-operator-role + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +- apiGroups: + - "kafka.strimzi.io" + resources: + # The entity operator runs the KafkaTopic assembly operator, which needs to access and manage KafkaTopic resources + - kafkatopics + - kafkatopics/status + # The entity operator runs the KafkaUser assembly operator, which needs to access and manage KafkaUser resources + - kafkausers + - kafkausers/status + verbs: + - get + - list + - watch + - create + - patch + - update + - delete +- apiGroups: + - "" + resources: + - events + verbs: + # The entity operator needs to be able to create events + - create +- apiGroups: + - "" + resources: + # The entity operator user-operator needs to access and manage secrets to store generated credentials + - secrets + verbs: + - get + - list + - watch + - create + - delete + - patch + - update +{{- end -}} +{{- end }} diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/031-RoleBinding-strimzi-cluster-operator-entity-operator-delegation.yaml b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/031-RoleBinding-strimzi-cluster-operator-entity-operator-delegation.yaml new file mode 100644 index 000000000..c66cffcfe --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/031-RoleBinding-strimzi-cluster-operator-entity-operator-delegation.yaml @@ -0,0 +1,31 @@ +{{- if .Values.global.kafka.enabled }} +{{- $root := . -}} +{{- range .Values.watchNamespaces }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +{{- if $root.Values.watchAnyNamespace }} +kind: ClusterRoleBinding +{{- else }} +kind: RoleBinding +{{- end }} +metadata: + name: strimzi-cluster-operator-entity-operator-delegation + namespace: {{ . }} + labels: + app: {{ template "strimzi.name" $root }} + chart: {{ template "strimzi.chart" $root }} + component: entity-operator-role-binding + release: {{ $root.Release.Name }} + heritage: {{ $root.Release.Service }} +# The Entity Operator cluster role must be bound to the cluster operator service account so that it can delegate the cluster role to the Entity Operator. +# This must be done to avoid escalating privileges which would be blocked by Kubernetes. +subjects: + - kind: ServiceAccount + name: strimzi-cluster-operator + namespace: {{ $root.Values.namespace }} +roleRef: + kind: ClusterRole + name: strimzi-entity-operator + apiGroup: rbac.authorization.k8s.io +{{- end }} +{{- end }} diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/033-ClusterRole-strimzi-kafka-client.yaml b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/033-ClusterRole-strimzi-kafka-client.yaml new file mode 100644 index 000000000..869de8fae --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/033-ClusterRole-strimzi-kafka-client.yaml @@ -0,0 +1,24 @@ +{{- if .Values.global.kafka.enabled }} +{{- if .Values.createGlobalResources -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: strimzi-kafka-client + labels: + app: {{ template "strimzi.name" . }} + chart: {{ template "strimzi.chart" . }} + component: client-role + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +- apiGroups: + - "" + resources: + # The Kafka clients (Connect, Mirror Maker, etc.) require "get" permissions to view the node they are on + # This information is used to generate a Rack ID (client.rack option) that is used for consuming from the closest + # replicas when enabled + - nodes + verbs: + - get +{{- end -}} +{{- end }} diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/033-ClusterRoleBinding-strimzi-cluster-operator-kafka-client-delegation.yaml b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/033-ClusterRoleBinding-strimzi-cluster-operator-kafka-client-delegation.yaml new file mode 100644 index 000000000..c2e5c202a --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/033-ClusterRoleBinding-strimzi-cluster-operator-kafka-client-delegation.yaml @@ -0,0 +1,26 @@ +{{- if .Values.global.kafka.enabled }} +{{- if .Values.createGlobalResources -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: strimzi-cluster-operator-kafka-client-delegation + labels: + app: {{ template "strimzi.name" . }} + chart: {{ template "strimzi.chart" . }} + component: client-role-binding + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +# The Kafka clients cluster role must be bound to the cluster operator service account so that it can delegate the +# cluster role to the Kafka clients using it for consuming from closest replica. +# This must be done to avoid escalating privileges which would be blocked by Kubernetes. +subjects: + - kind: ServiceAccount + name: strimzi-cluster-operator + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: strimzi-kafka-client + apiGroup: rbac.authorization.k8s.io + +{{- end -}} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/050-ConfigMap-strimzi-cluster-operator.yaml b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/050-ConfigMap-strimzi-cluster-operator.yaml new file mode 100644 index 000000000..29c07deed --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/050-ConfigMap-strimzi-cluster-operator.yaml @@ -0,0 +1,38 @@ +{{- if .Values.global.kafka.enabled }} +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ .Values.logConfigMap }} + namespace: {{ .Values.namespace }} + labels: + app: strimzi +data: + log4j2.properties: | + name = COConfig + monitorInterval = 30 + + appender.console.type = Console + appender.console.name = STDOUT + appender.console.layout.type = PatternLayout + appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n + + rootLogger.level = {{ default .Values.logLevel .Values.logLevelOverride }} + rootLogger.appenderRefs = stdout + rootLogger.appenderRef.console.ref = STDOUT + rootLogger.additivity = false + + # Kafka AdminClient logging is a bit noisy at INFO level + logger.kafka.name = org.apache.kafka + logger.kafka.level = WARN + logger.kafka.additivity = false + + # Zookeeper is very verbose even on INFO level -> We set it to WARN by default + logger.zookeepertrustmanager.name = org.apache.zookeeper + logger.zookeepertrustmanager.level = WARN + logger.zookeepertrustmanager.additivity = false + + # Keeps separate level for Netty logging -> to not be changed by the root logger + logger.netty.name = io.netty + logger.netty.level = INFO + logger.netty.additivity = false +{{- end }} diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/060-Deployment-strimzi-cluster-operator.yaml b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/060-Deployment-strimzi-cluster-operator.yaml new file mode 100644 index 000000000..f9aadc5bb --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/060-Deployment-strimzi-cluster-operator.yaml @@ -0,0 +1,167 @@ +{{- if .Values.global.kafka.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: strimzi-cluster-operator + namespace: {{ .Values.namespace }} + labels: + app: {{ template "strimzi.name" . }} + chart: {{ template "strimzi.chart" . }} + component: deployment + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: 1 + selector: + matchLabels: + name: strimzi-cluster-operator + strimzi.io/kind: cluster-operator + template: + metadata: + labels: + name: strimzi-cluster-operator + strimzi.io/kind: cluster-operator + {{- with .Values.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: strimzi-cluster-operator + {{- if .Values.image.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.image.imagePullSecrets }} + {{- end }} + {{- with .Values.podSecurityContext }} + securityContext: {{ toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + volumes: + - name: strimzi-tmp + emptyDir: + medium: Memory + sizeLimit: {{ .Values.tmpDirSizeLimit }} + - name: {{ .Values.logVolume }} + configMap: + name: {{ .Values.logConfigMap }} + containers: + - name: strimzi-cluster-operator + image: {{ default .Values.defaultImageRegistry .Values.image.registry }}/{{ default .Values.defaultImageRepository .Values.image.repository}}/{{ .Values.image.name }}:{{ default .Values.defaultImageTag .Values.image.tag }} + ports: + - containerPort: 8080 + name: http + {{- if .Values.image.imagePullPolicy }} + imagePullPolicy: {{ .Values.image.imagePullPolicy | quote }} + {{- end }} + args: + - /opt/strimzi/bin/cluster_operator_run.sh + volumeMounts: + - name: strimzi-tmp + mountPath: /tmp + - name: {{ .Values.logVolume }} + mountPath: /opt/strimzi/custom-config/ + env: + - name: STRIMZI_NAMESPACE + {{- if .Values.watchAnyNamespace }} + value: "*" + {{- else }} + {{- if .Values.watchNamespaces -}} + {{- $ns := .Values.watchNamespaces -}} + {{- $ns := append $ns .Release.Namespace }} + value: "{{ join "," $ns }}" + {{- else }} + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- end }} + {{- end }} + - name: STRIMZI_FULL_RECONCILIATION_INTERVAL_MS + value: {{ .Values.fullReconciliationIntervalMs | quote }} + - name: STRIMZI_OPERATION_TIMEOUT_MS + value: {{ .Values.operationTimeoutMs | quote }} + {{- template "strimzi.kafka.image.map" . }} + - name: STRIMZI_DEFAULT_TOPIC_OPERATOR_IMAGE + value: {{ default .Values.defaultImageRegistry .Values.topicOperator.image.registry }}/{{ default .Values.defaultImageRepository .Values.topicOperator.image.repository }}/{{ .Values.topicOperator.image.name }}:{{ default .Values.defaultImageTag .Values.topicOperator.image.tag }} + - name: STRIMZI_DEFAULT_USER_OPERATOR_IMAGE + value: {{ default .Values.defaultImageRegistry .Values.userOperator.image.registry }}/{{ default .Values.defaultImageRepository .Values.userOperator.image.repository }}/{{ .Values.userOperator.image.name }}:{{ default .Values.defaultImageTag .Values.userOperator.image.tag }} + - name: STRIMZI_DEFAULT_KAFKA_INIT_IMAGE + value: {{ default .Values.defaultImageRegistry .Values.kafkaInit.image.registry }}/{{ default .Values.defaultImageRepository .Values.kafkaInit.image.repository }}/{{ .Values.kafkaInit.image.name }}:{{ default .Values.defaultImageTag .Values.kafkaInit.image.tag }} + - name: STRIMZI_DEFAULT_KAFKA_BRIDGE_IMAGE + value: {{ default .Values.defaultImageRegistry .Values.kafkaBridge.image.registry }}/{{ default .Values.defaultImageRepository .Values.kafkaBridge.image.repository }}/{{ .Values.kafkaBridge.image.name }}:{{ default .Values.defaultImageTag .Values.kafkaBridge.image.tag }} + - name: STRIMZI_DEFAULT_JMXTRANS_IMAGE + value: {{ default .Values.defaultImageRegistry .Values.jmxTrans.image.registry }}/{{ default .Values.defaultImageRepository .Values.jmxTrans.image.repository }}/{{ .Values.jmxTrans.image.name }}:{{ default .Values.defaultImageTag .Values.jmxTrans.image.tag }} + - name: STRIMZI_DEFAULT_KANIKO_EXECUTOR_IMAGE + value: {{ default .Values.defaultImageRegistry .Values.kanikoExecutor.image.registry }}/{{ default .Values.defaultImageRepository .Values.kanikoExecutor.image.repository }}/{{ .Values.kanikoExecutor.image.name }}:{{ default .Values.defaultImageTag .Values.kanikoExecutor.image.tag }} + - name: STRIMZI_DEFAULT_MAVEN_BUILDER + value: {{ default .Values.defaultImageRegistry .Values.mavenBuilder.image.registry }}/{{ default .Values.defaultImageRepository .Values.mavenBuilder.image.repository }}/{{ .Values.mavenBuilder.image.name }}:{{ default .Values.defaultImageTag .Values.mavenBuilder.image.tag }} + - name: STRIMZI_OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.image.imagePullSecrets }} + - name: STRIMZI_IMAGE_PULL_SECRETS + value: {{ .Values.image.imagePullSecrets }} + {{- end }} + {{- if .Values.image.operatorNamespaceLabels }} + - name: STRIMZI_OPERATOR_NAMESPACE_LABELS + value: {{ .Values.image.operatorNamespaceLabels }} + {{- end }} + {{- if .Values.image.imagePullPolicy }} + - name: STRIMZI_IMAGE_PULL_POLICY + value: {{ .Values.image.imagePullPolicy }} + {{- end }} + {{ if ne .Values.kubernetesServiceDnsDomain "cluster.local" }}- name: KUBERNETES_SERVICE_DNS_DOMAIN + value: {{ .Values.kubernetesServiceDnsDomain | quote }}{{ end }} + - name: STRIMZI_FEATURE_GATES + value: {{ .Values.featureGates | quote }} + {{- if .Values.labelsExclusionPattern }} + - name: STRIMZI_LABELS_EXCLUSION_PATTERN + value: {{ .Values.labelsExclusionPattern | quote }} + {{- end }} + {{- if ne .Values.generateNetworkPolicy true}} + - name: STRIMZI_NETWORK_POLICY_GENERATION + value: {{ .Values.generateNetworkPolicy | quote }} + {{- end }} + {{- if ne (int .Values.connectBuildTimeoutMs) 300000 }} + - name: STRIMZI_CONNECT_BUILD_TIMEOUT_MS + value: {{ .Values.connectBuildTimeoutMs | quote }} + {{- end }} + {{- if .Values.extraEnvs }} +{{ toYaml .Values.extraEnvs | indent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: /healthy + port: http + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + readinessProbe: + httpGet: + path: /ready + port: http + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + {{- with .Values.securityContext }} + securityContext: {{ toYaml .| nindent 12 }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + strategy: + type: Recreate +{{- end }} diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/NOTES.txt b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/NOTES.txt new file mode 100644 index 000000000..fec183edc --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/NOTES.txt @@ -0,0 +1,5 @@ +Thank you for installing {{ .Chart.Name }}-{{ .Chart.Version }} + +To create a Kafka cluster refer to the following documentation. + +https://strimzi.io/docs/operators/latest/using.html#deploying-cluster-operator-helm-chart-str diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/_helpers.tpl b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/_helpers.tpl new file mode 100644 index 000000000..d5de90351 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "strimzi.name" -}} +{{- default "strimzi" .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "strimzi.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "strimzi.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/_kafka_image_map.tpl b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/_kafka_image_map.tpl new file mode 100644 index 000000000..74d2e7a7f --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/templates/_kafka_image_map.tpl @@ -0,0 +1,36 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* This file is generated in helm-charts/Makefile */}} +{{/* DO NOT EDIT BY HAND */}} + +{{/* Generate the kafka image map */}} +{{- define "strimzi.kafka.image.map" }} + - name: STRIMZI_DEFAULT_TLS_SIDECAR_ENTITY_OPERATOR_IMAGE + value: {{ default .Values.defaultImageRegistry .Values.tlsSidecarEntityOperator.image.registry }}/{{ default .Values.defaultImageRepository .Values.tlsSidecarEntityOperator.image.repository }}/{{ .Values.tlsSidecarEntityOperator.image.name }}:{{ default .Values.defaultImageTag .Values.tlsSidecarEntityOperator.image.tagPrefix }}-kafka-3.0.0 + - name: STRIMZI_DEFAULT_KAFKA_EXPORTER_IMAGE + value: {{ default .Values.defaultImageRegistry .Values.kafkaExporter.image.registry }}/{{ default .Values.defaultImageRepository .Values.kafkaExporter.image.repository }}/{{ .Values.kafkaExporter.image.name }}:{{ default .Values.defaultImageTag .Values.kafkaExporter.image.tagPrefix }}-kafka-3.0.0 + - name: STRIMZI_DEFAULT_CRUISE_CONTROL_IMAGE + value: {{ default .Values.defaultImageRegistry .Values.cruiseControl.image.registry }}/{{ default .Values.defaultImageRepository .Values.cruiseControl.image.repository }}/{{ .Values.cruiseControl.image.name }}:{{ default .Values.defaultImageTag .Values.cruiseControl.image.tagPrefix }}-kafka-3.0.0 + - name: STRIMZI_DEFAULT_TLS_SIDECAR_CRUISE_CONTROL_IMAGE + value: {{ default .Values.defaultImageRegistry .Values.tlsSidecarCruiseControl.image.registry }}/{{ default .Values.defaultImageRepository .Values.tlsSidecarCruiseControl.image.repository }}/{{ .Values.tlsSidecarCruiseControl.image.name }}:{{ default .Values.defaultImageTag .Values.tlsSidecarCruiseControl.image.tagPrefix }}-kafka-3.0.0 + - name: STRIMZI_KAFKA_IMAGES + value: | + 2.8.0={{ default .Values.defaultImageRegistry .Values.kafka.image.registry }}/{{ default .Values.defaultImageRepository .Values.kafka.image.repository }}/{{ .Values.kafka.image.name }}:{{ default .Values.defaultImageTag .Values.kafka.image.tagPrefix }}-kafka-2.8.0 + 2.8.1={{ default .Values.defaultImageRegistry .Values.kafka.image.registry }}/{{ default .Values.defaultImageRepository .Values.kafka.image.repository }}/{{ .Values.kafka.image.name }}:{{ default .Values.defaultImageTag .Values.kafka.image.tagPrefix }}-kafka-2.8.1 + 3.0.0={{ default .Values.defaultImageRegistry .Values.kafka.image.registry }}/{{ default .Values.defaultImageRepository .Values.kafka.image.repository }}/{{ .Values.kafka.image.name }}:{{ default .Values.defaultImageTag .Values.kafka.image.tagPrefix }}-kafka-3.0.0 + - name: STRIMZI_KAFKA_CONNECT_IMAGES + value: | + 2.8.0={{ default .Values.defaultImageRegistry .Values.kafkaConnect.image.registry }}/{{ default .Values.defaultImageRepository .Values.kafkaConnect.image.repository }}/{{ .Values.kafkaConnect.image.name }}:{{ default .Values.defaultImageTag .Values.kafkaConnect.image.tagPrefix }}-kafka-2.8.0 + 2.8.1={{ default .Values.defaultImageRegistry .Values.kafkaConnect.image.registry }}/{{ default .Values.defaultImageRepository .Values.kafkaConnect.image.repository }}/{{ .Values.kafkaConnect.image.name }}:{{ default .Values.defaultImageTag .Values.kafkaConnect.image.tagPrefix }}-kafka-2.8.1 + 3.0.0={{ default .Values.defaultImageRegistry .Values.kafkaConnect.image.registry }}/{{ default .Values.defaultImageRepository .Values.kafkaConnect.image.repository }}/{{ .Values.kafkaConnect.image.name }}:{{ default .Values.defaultImageTag .Values.kafkaConnect.image.tagPrefix }}-kafka-3.0.0 + - name: STRIMZI_KAFKA_MIRROR_MAKER_IMAGES + value: | + 2.8.0={{ default .Values.defaultImageRegistry .Values.kafkaMirrorMaker.image.registry }}/{{ default .Values.defaultImageRepository .Values.kafkaMirrorMaker.image.repository }}/{{ .Values.kafkaMirrorMaker.image.name }}:{{ default .Values.defaultImageTag .Values.kafkaMirrorMaker.image.tagPrefix }}-kafka-2.8.0 + 2.8.1={{ default .Values.defaultImageRegistry .Values.kafkaMirrorMaker.image.registry }}/{{ default .Values.defaultImageRepository .Values.kafkaMirrorMaker.image.repository }}/{{ .Values.kafkaMirrorMaker.image.name }}:{{ default .Values.defaultImageTag .Values.kafkaMirrorMaker.image.tagPrefix }}-kafka-2.8.1 + 3.0.0={{ default .Values.defaultImageRegistry .Values.kafkaMirrorMaker.image.registry }}/{{ default .Values.defaultImageRepository .Values.kafkaMirrorMaker.image.repository }}/{{ .Values.kafkaMirrorMaker.image.name }}:{{ default .Values.defaultImageTag .Values.kafkaMirrorMaker.image.tagPrefix }}-kafka-3.0.0 + - name: STRIMZI_KAFKA_MIRROR_MAKER_2_IMAGES + value: | + 2.8.0={{ default .Values.defaultImageRegistry .Values.kafkaMirrorMaker2.image.registry }}/{{ default .Values.defaultImageRepository .Values.kafkaMirrorMaker2.image.repository }}/{{ .Values.kafkaMirrorMaker2.image.name }}:{{ default .Values.defaultImageTag .Values.kafkaMirrorMaker2.image.tagPrefix }}-kafka-2.8.0 + 2.8.1={{ default .Values.defaultImageRegistry .Values.kafkaMirrorMaker2.image.registry }}/{{ default .Values.defaultImageRepository .Values.kafkaMirrorMaker2.image.repository }}/{{ .Values.kafkaMirrorMaker2.image.name }}:{{ default .Values.defaultImageTag .Values.kafkaMirrorMaker2.image.tagPrefix }}-kafka-2.8.1 + 3.0.0={{ default .Values.defaultImageRegistry .Values.kafkaMirrorMaker2.image.registry }}/{{ default .Values.defaultImageRepository .Values.kafkaMirrorMaker2.image.repository }}/{{ .Values.kafkaMirrorMaker2.image.name }}:{{ default .Values.defaultImageTag .Values.kafkaMirrorMaker2.image.tagPrefix }}-kafka-3.0.0 +{{- end -}} diff --git a/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/values.yaml b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/values.yaml new file mode 100644 index 000000000..848142e5a --- /dev/null +++ b/manifests/bucketeer/charts/kafka/charts/strimzi-kafka-operator/values.yaml @@ -0,0 +1,163 @@ +# Default values for strimzi-kafka-operator. + +namespace: +# If you set `watchNamespaces` to the same value as ``.Release.Namespace` (e.g. `helm ... --namespace $NAMESPACE`), +# the chart will fail because duplicate RoleBindings will be attempted to be created in the same namespace +watchNamespaces: [] +watchAnyNamespace: false + +defaultImageRegistry: quay.io +defaultImageRepository: strimzi +defaultImageTag: 0.27.1 + +image: + registry: "" + repository: "" + name: operator + tag: "" +logVolume: co-config-volume +logConfigMap: strimzi-cluster-operator +logLevel: ${env:STRIMZI_LOG_LEVEL:-INFO} +fullReconciliationIntervalMs: 120000 +operationTimeoutMs: 300000 +kubernetesServiceDnsDomain: cluster.local +featureGates: "" +tmpDirSizeLimit: 1Mi + +# Example on how to configure extraEnvs +# extraEnvs: +# - name: JAVA_OPTS +# value: "-Xms=256m -Xmx=256m" + +extraEnvs: [] + +tolerations: [] +affinity: {} +annotations: {} +labels: {} +nodeSelector: {} +priorityClassName: "" + +podSecurityContext: {} +securityContext: {} + +# Docker images that operator uses to provision various components of Strimzi. To use your own registry prefix the +# repository name with your registry URL. +# Ex) repository: registry.xyzcorp.com/strimzi/zookeeper +zookeeper: + image: + registry: "" + repository: "" + name: kafka + tagPrefix: "" +kafka: + image: + registry: "" + repository: "" + name: kafka + tagPrefix: "" +kafkaConnect: + image: + registry: "" + repository: "" + name: kafka + tagPrefix: "" +topicOperator: + image: + registry: "" + repository: "" + name: operator + tag: "" +userOperator: + image: + registry: + repository: + name: operator + tag: "" +kafkaInit: + image: + registry: "" + repository: "" + name: operator + tag: "" +tlsSidecarEntityOperator: + image: + registry: "" + repository: "" + name: kafka + tagPrefix: "" +kafkaMirrorMaker: + image: + registry: "" + repository: "" + name: kafka + tagPrefix: "" +kafkaBridge: + image: + registry: "" + repository: + name: kafka-bridge + tag: 0.21.3 +kafkaExporter: + image: + registry: "" + repository: "" + name: kafka + tagPrefix: "" +jmxTrans: + image: + registry: "" + repository: "" + name: jmxtrans + tag: "" +kafkaMirrorMaker2: + image: + registry: "" + repository: "" + name: kafka + tagPrefix: "" +cruiseControl: + image: + registry: "" + repository: "" + name: kafka + tagPrefix: "" +tlsSidecarCruiseControl: + image: + registry: "" + repository: "" + name: kafka + tagPrefix: "" +kanikoExecutor: + image: + registry: "" + repository: "" + name: kaniko-executor + tag: "" +mavenBuilder: + image: + registry: "" + repository: "" + name: maven-builder + tag: "" +resources: + limits: + memory: 384Mi + cpu: 1000m + requests: + memory: 384Mi + cpu: 200m +livenessProbe: + initialDelaySeconds: 10 + periodSeconds: 30 +readinessProbe: + initialDelaySeconds: 10 + periodSeconds: 30 + +createGlobalResources: true +# Override the exclude pattern for exclude some labels +labelsExclusionPattern: "" +# Controls whether Strimzi generates network policy resources (By default true) +generateNetworkPolicy: true +# Override the value for Connect build timeout +connectBuildTimeoutMs: 300000 diff --git a/manifests/bucketeer/charts/kafka/requirements.lock b/manifests/bucketeer/charts/kafka/requirements.lock new file mode 100644 index 000000000..77bf4f9ec --- /dev/null +++ b/manifests/bucketeer/charts/kafka/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: strimzi-kafka-operator + repository: https://strimzi.io/charts/ + version: 0.27.1 +digest: sha256:b473351af036ceb220cb8f6112f3d92f451901e15c8838b7c6247bcbf471edba +generated: "2022-02-14T09:36:40.195905+09:00" diff --git a/manifests/bucketeer/charts/kafka/requirements.yaml b/manifests/bucketeer/charts/kafka/requirements.yaml new file mode 100644 index 000000000..07574fc2f --- /dev/null +++ b/manifests/bucketeer/charts/kafka/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: + - name: strimzi-kafka-operator + repository: https://strimzi.io/charts/ + version: 0.27.1 diff --git a/manifests/bucketeer/charts/kafka/values.yaml b/manifests/bucketeer/charts/kafka/values.yaml new file mode 100644 index 000000000..b593da862 --- /dev/null +++ b/manifests/bucketeer/charts/kafka/values.yaml @@ -0,0 +1,215 @@ +kafka-cluster: + namespace: + metadata: + name: kafka + spec: + kafka: + version: + replicas: + resources: {} + jvmOptions: {} + config: + auto.create.topics.enable: "false" + offsets.topic.replication.factor: 3 + transaction.state.log.replication.factor: 3 + transaction.state.log.min.isr: 2 + log.retention.hours: 48 + storage: {} + rack: + # This will be deprecated from 0.17.0. Instead, Use topology.kubernetes.io/zone. + topologyKey: failure-domain.beta.kubernetes.io/zone + affinity: + nodeAffinity: + matchExpressions: + - key: "pool-type" + operator: In + values: ["kafka"] + podAntiAffinity: + enabled: true + metrics: + # Inspired by config from Kafka 2.0.0 example rules: + # https://github.com/prometheus/jmx_exporter/blob/master/example_configs/kafka-2_0_0.yml + lowercaseOutputName: true + rules: + # Special cases and very specific rules + - pattern: kafka.server<>Value + name: kafka_server_$1_$2 + type: GAUGE + labels: + clientId: "$3" + topic: "$4" + partition: "$5" + - pattern: kafka.server<>Value + name: kafka_server_$1_$2 + type: GAUGE + labels: + clientId: "$3" + broker: "$4:$5" + # Some percent metrics use MeanRate attribute + # Ex) kafka.server<>MeanRate + - pattern: kafka.(\w+)<>MeanRate + name: kafka_$1_$2_$3_percent + type: GAUGE + # Generic gauges for percents + - pattern: kafka.(\w+)<>Value + name: kafka_$1_$2_$3_percent + type: GAUGE + - pattern: kafka.(\w+)<>Value + name: kafka_$1_$2_$3_percent + type: GAUGE + labels: + "$4": "$5" + # Generic per-second counters with 0-2 key/value pairs + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_total + type: COUNTER + labels: + "$4": "$5" + "$6": "$7" + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_total + type: COUNTER + labels: + "$4": "$5" + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_total + type: COUNTER + # Generic gauges with 0-2 key/value pairs + - pattern: kafka.(\w+)<>Value + name: kafka_$1_$2_$3 + type: GAUGE + labels: + "$4": "$5" + "$6": "$7" + - pattern: kafka.(\w+)<>Value + name: kafka_$1_$2_$3 + type: GAUGE + labels: + "$4": "$5" + - pattern: kafka.(\w+)<>Value + name: kafka_$1_$2_$3 + type: GAUGE + # Emulate Prometheus 'Summary' metrics for the exported 'Histogram's. + # Note that these are missing the '_sum' metric! + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_count + type: COUNTER + labels: + "$4": "$5" + "$6": "$7" + - pattern: kafka.(\w+)<>(\d+)thPercentile + name: kafka_$1_$2_$3 + type: GAUGE + labels: + "$4": "$5" + "$6": "$7" + quantile: "0.$8" + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_count + type: COUNTER + labels: + "$4": "$5" + - pattern: kafka.(\w+)<>(\d+)thPercentile + name: kafka_$1_$2_$3 + type: GAUGE + labels: + "$4": "$5" + quantile: "0.$6" + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_count + type: COUNTER + - pattern: kafka.(\w+)<>(\d+)thPercentile + name: kafka_$1_$2_$3 + type: GAUGE + labels: + quantile: "0.$4" + zookeeper: + replicas: + resources: {} + affinity: + nodeAffinity: + matchExpressions: + - key: "pool-type" + operator: In + values: ["kafka"] + podAntiAffinity: + enabled: true + jvmOptions: {} + storage: {} + metrics: + # Inspired by Zookeeper rules + # https://github.com/prometheus/jmx_exporter/blob/master/example_configs/zookeeper.yaml + lowercaseOutputName: true + rules: + # replicated Zookeeper + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$2" + type: GAUGE + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$3" + type: GAUGE + labels: + replicaId: "$2" + - pattern: "org.apache.ZooKeeperService<>(Packets\\w+)" + name: "zookeeper_$4" + type: COUNTER + labels: + replicaId: "$2" + memberType: "$3" + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$4" + type: GAUGE + labels: + replicaId: "$2" + memberType: "$3" + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$4_$5" + type: GAUGE + labels: + replicaId: "$2" + memberType: "$3" + # standalone Zookeeper + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + type: GAUGE + name: "zookeeper_$2" + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + type: GAUGE + name: "zookeeper_$2" + entityOperator: + affinity: + nodeAffinity: + matchExpressions: + - key: "pool-type" + operator: In + values: ["kafka"] + userOperator: + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 200m + memory: 512Mi + kafkaExporter: + affinity: + nodeAffinity: + matchExpressions: + - key: "pool-type" + operator: In + values: ["kafka"] + resources: {} + + users: {} + topics: {} + +strimzi-kafka-operator: + namespace: + nodeSelector: + cloud.google.com/gke-nodepool: kafka-pool + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 200m + memory: 512Mi diff --git a/manifests/bucketeer/charts/metrics-event-persister/Chart.yaml b/manifests/bucketeer/charts/metrics-event-persister/Chart.yaml new file mode 100644 index 000000000..31a93b536 --- /dev/null +++ b/manifests/bucketeer/charts/metrics-event-persister/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-metrics-event-persister +name: metrics-event-persister +version: 1.0.0 diff --git a/manifests/bucketeer/charts/metrics-event-persister/templates/NOTES.txt b/manifests/bucketeer/charts/metrics-event-persister/templates/NOTES.txt new file mode 100644 index 000000000..850676d4c --- /dev/null +++ b/manifests/bucketeer/charts/metrics-event-persister/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "metrics-event-persister.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "metrics-event-persister.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "metrics-event-persister.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "metrics-event-persister.name" . }},release={{ template "metrics-event-persister.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/manifests/bucketeer/charts/metrics-event-persister/templates/_helpers.tpl b/manifests/bucketeer/charts/metrics-event-persister/templates/_helpers.tpl new file mode 100644 index 000000000..105b319ab --- /dev/null +++ b/manifests/bucketeer/charts/metrics-event-persister/templates/_helpers.tpl @@ -0,0 +1,48 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "metrics-event-persister.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "metrics-event-persister.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "metrics-event-persister.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "metrics-event-persister.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "service-token-secret" -}} +{{- if .Values.serviceToken.secret }} +{{- printf "%s" .Values.serviceToken.secret -}} +{{- else -}} +{{ template "metrics-event-persister.fullname" . }}-service-token +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/manifests/bucketeer/charts/metrics-event-persister/templates/deployment.yaml b/manifests/bucketeer/charts/metrics-event-persister/templates/deployment.yaml new file mode 100644 index 000000000..c61072314 --- /dev/null +++ b/manifests/bucketeer/charts/metrics-event-persister/templates/deployment.yaml @@ -0,0 +1,147 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "metrics-event-persister.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "metrics-event-persister.name" . }} + chart: {{ template "metrics-event-persister.chart" . }} + release: {{ template "metrics-event-persister.fullname" . }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "metrics-event-persister.name" . }} + release: {{ template "metrics-event-persister.fullname" . }} + template: + metadata: + labels: + app: {{ template "metrics-event-persister.name" . }} + release: {{ template "metrics-event-persister.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "metrics-event-persister.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["persister"] + env: + - name: BUCKETEER_METRICS_EVENT_PROJECT + value: "{{ .Values.env.project }}" + - name: BUCKETEER_METRICS_EVENT_TOPIC + value: "{{ .Values.env.topic }}" + - name: BUCKETEER_METRICS_EVENT_SUBSCRIPTION + value: "{{ .Values.env.subscription }}" + - name: BUCKETEER_METRICS_EVENT_MAX_MPS + value: "{{ .Values.env.maxMps }}" + - name: BUCKETEER_METRICS_EVENT_NUM_WORKERS + value: "{{ .Values.env.numWorkers }}" + - name: BUCKETEER_METRICS_EVENT_FLUSH_SIZE + value: "{{ .Values.env.flushSize }}" + - name: BUCKETEER_METRICS_EVENT_FLUSH_INTERVAL + value: "{{ .Values.env.flushInterval }}" + - name: BUCKETEER_METRICS_EVENT_PULLER_NUM_GOROUTINES + value: "{{ .Values.env.pullerNumGoroutines }}" + - name: BUCKETEER_METRICS_EVENT_PULLER_MAX_OUTSTANDING_MESSAGES + value: "{{ .Values.env.pullerMaxOutstandingMessages }}" + - name: BUCKETEER_METRICS_EVENT_PULLER_MAX_OUTSTANDING_BYTES + value: "{{ .Values.env.pullerMaxOutstandingBytes }}" + - name: BUCKETEER_METRICS_EVENT_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_METRICS_EVENT_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_METRICS_EVENT_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + - name: BUCKETEER_METRICS_EVENT_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_METRICS_EVENT_KEY + value: /usr/local/certs/service/tls.key + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/metrics-event-persister/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/metrics-event-persister/templates/envoy-configmap.yaml new file mode 100644 index 000000000..ff85bd4a1 --- /dev/null +++ b/manifests/bucketeer/charts/metrics-event-persister/templates/envoy-configmap.yaml @@ -0,0 +1,127 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "metrics-event-persister.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "metrics-event-persister.name" . }} + chart: {{ template "metrics-event-persister.chart" . }} + release: {{ template "metrics-event-persister.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: metrics-event-persister + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: metrics-event-persister + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + metrics-event-persister: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: / + route: + cluster: metrics-event-persister + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: ingress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true diff --git a/manifests/bucketeer/charts/metrics-event-persister/templates/hpa.yaml b/manifests/bucketeer/charts/metrics-event-persister/templates/hpa.yaml new file mode 100644 index 000000000..f5c8f049a --- /dev/null +++ b/manifests/bucketeer/charts/metrics-event-persister/templates/hpa.yaml @@ -0,0 +1,19 @@ +{{ if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "metrics-event-persister.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "metrics-event-persister.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.metrics.cpu.targetAverageUtilization }} +{{ end }} diff --git a/manifests/bucketeer/charts/metrics-event-persister/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/metrics-event-persister/templates/service-cert-secret.yaml new file mode 100644 index 000000000..410cfc5da --- /dev/null +++ b/manifests/bucketeer/charts/metrics-event-persister/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "metrics-event-persister.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "metrics-event-persister.name" . }} + chart: {{ template "metrics-event-persister.chart" . }} + release: {{ template "metrics-event-persister.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} diff --git a/manifests/bucketeer/charts/metrics-event-persister/templates/service.yaml b/manifests/bucketeer/charts/metrics-event-persister/templates/service.yaml new file mode 100644 index 000000000..ff92c0b0f --- /dev/null +++ b/manifests/bucketeer/charts/metrics-event-persister/templates/service.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "metrics-event-persister.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "metrics-event-persister.name" . }} + chart: {{ template "metrics-event-persister.chart" . }} + release: {{ template "metrics-event-persister.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "metrics-event-persister.name" . }} + release: {{ template "metrics-event-persister.fullname" . }} diff --git a/manifests/bucketeer/charts/metrics-event-persister/values.yaml b/manifests/bucketeer/charts/metrics-event-persister/values.yaml new file mode 100644 index 000000000..7710feb7e --- /dev/null +++ b/manifests/bucketeer/charts/metrics-event-persister/values.yaml @@ -0,0 +1,62 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-metrics-event + pullPolicy: IfNotPresent + +fullnameOverride: "metrics-event-persister" + +namespace: + +env: + project: + topic: + subscription: + maxMps: "1500" + numWorkers: 2 + flushSize: 100 + flushInterval: 2s + pullerNumGoroutines: 5 + pullerMaxOutstandingMessages: "1000" + pullerMaxOutstandingBytes: "1000000000" + logLevel: info + port: 9090 + metricsPort: 9002 + +affinity: {} + +nodeSelector: {} + +hpa: + enabled: + minReplicas: + maxReplicas: + metrics: + cpu: + targetAverageUtilization: + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +tls: + service: + secret: + cert: + key: + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} diff --git a/manifests/bucketeer/charts/migration-mysql/Chart.yaml b/manifests/bucketeer/charts/migration-mysql/Chart.yaml new file mode 100644 index 000000000..66bd4bb91 --- /dev/null +++ b/manifests/bucketeer/charts/migration-mysql/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-migration-mysql +name: migration-mysql +version: 1.0.0 diff --git a/manifests/bucketeer/charts/migration-mysql/templates/NOTES.txt b/manifests/bucketeer/charts/migration-mysql/templates/NOTES.txt new file mode 100644 index 000000000..315089bca --- /dev/null +++ b/manifests/bucketeer/charts/migration-mysql/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "migration-mysql.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "migration-mysql.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "migration-mysql.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "migration-mysql.name" . }},release={{ template "migration-mysql.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/manifests/bucketeer/charts/migration-mysql/templates/_helpers.tpl b/manifests/bucketeer/charts/migration-mysql/templates/_helpers.tpl new file mode 100644 index 000000000..a6f2121bb --- /dev/null +++ b/manifests/bucketeer/charts/migration-mysql/templates/_helpers.tpl @@ -0,0 +1,48 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "migration-mysql.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "migration-mysql.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "migration-mysql.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "migration-mysql.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "oauth-key-secret" -}} +{{- if .Values.oauth.key.secret }} +{{- printf "%s" .Values.oauth.key.secret -}} +{{- else -}} +{{ template "migration-mysql.fullname" . }}-oauth-key +{{- end -}} +{{- end -}} diff --git a/manifests/bucketeer/charts/migration-mysql/templates/deployment.yaml b/manifests/bucketeer/charts/migration-mysql/templates/deployment.yaml new file mode 100644 index 000000000..c0f65704a --- /dev/null +++ b/manifests/bucketeer/charts/migration-mysql/templates/deployment.yaml @@ -0,0 +1,162 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "migration-mysql.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "migration-mysql.name" . }} + chart: {{ template "migration-mysql.chart" . }} + release: {{ template "migration-mysql.fullname" . }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ template "migration-mysql.name" . }} + release: {{ template "migration-mysql.fullname" . }} + template: + metadata: + labels: + app: {{ template "migration-mysql.name" . }} + release: {{ template "migration-mysql.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "migration-mysql.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + - name: oauth-key-secret + secret: + secretName: {{ template "oauth-key-secret" . }} + - name: github-access-token-secret + secret: + secretName: github-token + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["mysql-server"] + env: + - name: BUCKETEER_MIGRATION_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_MIGRATION_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_MIGRATION_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + - name: BUCKETEER_MIGRATION_GITHUB_USER + value: "{{ .Values.env.githubUser }}" + - name: BUCKETEER_MIGRATION_GITHUB_ACCESS_TOKEN_PATH + value: /usr/local/github-access-token/bucketeer-bot-access-token + - name: BUCKETEER_MIGRATION_GITHUB_MIGRATION_SOURCE_PATH + value: "{{ .Values.env.githubMigrationSourcePath }}" + - name: BUCKETEER_MIGRATION_MYSQL_USER + value: "{{ .Values.env.mysqlUser }}" + - name: BUCKETEER_MIGRATION_MYSQL_PASS + value: "{{ .Values.env.mysqlPass }}" + - name: BUCKETEER_MIGRATION_MYSQL_HOST + value: "{{ .Values.env.mysqlHost }}" + - name: BUCKETEER_MIGRATION_MYSQL_PORT + value: "{{ .Values.env.mysqlPort }}" + - name: BUCKETEER_MIGRATION_MYSQL_DB_NAME + value: "{{ .Values.env.mysqlDbName }}" + - name: BUCKETEER_MIGRATION_OAUTH_CLIENT_ID + value: "{{ .Values.oauth.clientId }}" + - name: BUCKETEER_MIGRATION_OAUTH_ISSUER + value: "{{ .Values.oauth.issuer }}" + - name: BUCKETEER_MIGRATION_OAUTH_KEY + value: /usr/local/oauth-key/public.pem + - name: BUCKETEER_MIGRATION_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_MIGRATION_KEY + value: /usr/local/certs/service/tls.key + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + - name: oauth-key-secret + mountPath: /usr/local/oauth-key + readOnly: true + - name: github-access-token-secret + mountPath: /usr/local/github-access-token + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/migration-mysql/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/migration-mysql/templates/envoy-configmap.yaml new file mode 100644 index 000000000..c90204dd5 --- /dev/null +++ b/manifests/bucketeer/charts/migration-mysql/templates/envoy-configmap.yaml @@ -0,0 +1,127 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "migration-mysql.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "migration-mysql.name" . }} + chart: {{ template "migration-mysql.chart" . }} + release: {{ template "migration-mysql.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: migration-mysql + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: migration-mysql + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + migration-mysql: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: / + route: + cluster: migration-mysql + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 600s + stat_prefix: ingress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true diff --git a/manifests/bucketeer/charts/migration-mysql/templates/hpa.yaml b/manifests/bucketeer/charts/migration-mysql/templates/hpa.yaml new file mode 100644 index 000000000..a2c1cb71c --- /dev/null +++ b/manifests/bucketeer/charts/migration-mysql/templates/hpa.yaml @@ -0,0 +1,19 @@ +{{ if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "migration-mysql.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "migration-mysql.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.metrics.cpu.targetAverageUtilization }} +{{ end }} diff --git a/manifests/bucketeer/charts/migration-mysql/templates/oauth-key-secret.yaml b/manifests/bucketeer/charts/migration-mysql/templates/oauth-key-secret.yaml new file mode 100644 index 000000000..e7220b1c5 --- /dev/null +++ b/manifests/bucketeer/charts/migration-mysql/templates/oauth-key-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.oauth.key.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "migration-mysql.fullname" . }}-oauth-key + namespace: {{ .Values.namespace }} + labels: + app: {{ template "migration-mysql.name" . }} + chart: {{ template "migration-mysql.chart" . }} + release: {{ template "migration-mysql.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + public.pem: {{ required "OAuth key is required" .Values.oauth.key.public | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/migration-mysql/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/migration-mysql/templates/service-cert-secret.yaml new file mode 100644 index 000000000..0828af6e4 --- /dev/null +++ b/manifests/bucketeer/charts/migration-mysql/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "migration-mysql.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "migration-mysql.name" . }} + chart: {{ template "migration-mysql.chart" . }} + release: {{ template "migration-mysql.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/migration-mysql/templates/service.yaml b/manifests/bucketeer/charts/migration-mysql/templates/service.yaml new file mode 100644 index 000000000..943634101 --- /dev/null +++ b/manifests/bucketeer/charts/migration-mysql/templates/service.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "migration-mysql.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "migration-mysql.name" . }} + chart: {{ template "migration-mysql.chart" . }} + release: {{ template "migration-mysql.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "migration-mysql.name" . }} + release: {{ template "migration-mysql.fullname" . }} + diff --git a/manifests/bucketeer/charts/migration-mysql/values.yaml b/manifests/bucketeer/charts/migration-mysql/values.yaml new file mode 100644 index 000000000..5c138d7ba --- /dev/null +++ b/manifests/bucketeer/charts/migration-mysql/values.yaml @@ -0,0 +1,68 @@ +replicaCount: 1 + +image: + repository: ghcr.io/bucketeer-io/bucketeer-migration + pullPolicy: IfNotPresent + +fullnameOverride: "migration-mysql" + +namespace: + +env: + logLevel: info + port: 9090 + metricsPort: 9002 + githubUser: + githubMigrationSourcePath: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + +affinity: {} + +nodeSelector: {} + +hpa: + enabled: false + minReplicas: + maxReplicas: + metrics: + cpu: + targetAverageUtilization: + +tls: + service: + secret: + cert: + key: + +oauth: + key: + secret: + public: + clientId: + issuer: + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} diff --git a/manifests/bucketeer/charts/notification-sender/Chart.yaml b/manifests/bucketeer/charts/notification-sender/Chart.yaml new file mode 100644 index 000000000..e160b133d --- /dev/null +++ b/manifests/bucketeer/charts/notification-sender/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-notification-sender +name: notification-sender +version: 1.0.0 diff --git a/manifests/bucketeer/charts/notification-sender/templates/NOTES.txt b/manifests/bucketeer/charts/notification-sender/templates/NOTES.txt new file mode 100644 index 000000000..f105cf68c --- /dev/null +++ b/manifests/bucketeer/charts/notification-sender/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "notification-sender.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "notification-sender.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "notification-sender.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "notification-sender.name" . }},release={{ template "notification-sender.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/manifests/bucketeer/charts/notification-sender/templates/_helpers.tpl b/manifests/bucketeer/charts/notification-sender/templates/_helpers.tpl new file mode 100644 index 000000000..fd37c0d9e --- /dev/null +++ b/manifests/bucketeer/charts/notification-sender/templates/_helpers.tpl @@ -0,0 +1,48 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "notification-sender.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "notification-sender.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "notification-sender.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "notification-sender.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "service-token-secret" -}} +{{- if .Values.serviceToken.secret }} +{{- printf "%s" .Values.serviceToken.secret -}} +{{- else -}} +{{ template "notification-sender.fullname" . }}-service-token +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/manifests/bucketeer/charts/notification-sender/templates/deployment.yaml b/manifests/bucketeer/charts/notification-sender/templates/deployment.yaml new file mode 100644 index 000000000..33d99ba94 --- /dev/null +++ b/manifests/bucketeer/charts/notification-sender/templates/deployment.yaml @@ -0,0 +1,168 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "notification-sender.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "notification-sender.name" . }} + chart: {{ template "notification-sender.chart" . }} + release: {{ template "notification-sender.fullname" . }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ template "notification-sender.name" . }} + release: {{ template "notification-sender.fullname" . }} + template: + metadata: + labels: + app: {{ template "notification-sender.name" . }} + release: {{ template "notification-sender.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "notification-sender.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + - name: service-token-secret + secret: + secretName: {{ template "service-token-secret" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["sender"] + env: + - name: BUCKETEER_NOTIFICATION_PROJECT + value: "{{ .Values.env.project }}" + - name: BUCKETEER_NOTIFICATION_DOMAIN_TOPIC + value: "{{ .Values.env.domainTopic }}" + - name: BUCKETEER_NOTIFICATION_DOMAIN_SUBSCRIPTION + value: "{{ .Values.env.domainSubscription }}" + - name: BUCKETEER_NOTIFICATION_NOTIFICATION_SERVICE + value: "{{ .Values.env.notificationService }}" + - name: BUCKETEER_NOTIFICATION_ENVIRONMENT_SERVICE + value: "{{ .Values.env.environmentService }}" + - name: BUCKETEER_NOTIFICATION_EXPERIMENT_SERVICE + value: "{{ .Values.env.experimentService }}" + - name: BUCKETEER_NOTIFICATION_EVENT_COUNTER_SERVICE + value: "{{ .Values.env.eventCounterService }}" + - name: BUCKETEER_NOTIFICATION_FEATURE_SERVICE + value: "{{ .Values.env.featureService }}" + - name: BUCKETEER_NOTIFICATION_SCHEDULE_FEATURE_STALE_WATCHER + value: "{{ .Values.env.scheduleFeatureStaleWatcher }}" + - name: BUCKETEER_NOTIFICATION_SCHEDULE_EXPERIMENT_RUNNING_WATCHER + value: "{{ .Values.env.scheduleExperimentRunningWatcher }}" + - name: BUCKETEER_NOTIFICATION_SCHEDULE_MAU_COUNT_WATCHER + value: "{{ .Values.env.scheduleMauCountWatcher }}" + - name: BUCKETEER_NOTIFICATION_WEB_URL + value: "{{ .Values.env.webURL }}" + - name: BUCKETEER_NOTIFICATION_MAX_MPS + value: "{{ .Values.env.maxMps }}" + - name: BUCKETEER_NOTIFICATION_NUM_WORKERS + value: "{{ .Values.env.numWorkers }}" + - name: BUCKETEER_NOTIFICATION_PULLER_NUM_GOROUTINES + value: "{{ .Values.env.pullerNumGoroutines }}" + - name: BUCKETEER_NOTIFICATION_PULLER_MAX_OUTSTANDING_MESSAGES + value: "{{ .Values.env.pullerMaxOutstandingMessages }}" + - name: BUCKETEER_NOTIFICATION_PULLER_MAX_OUTSTANDING_BYTES + value: "{{ .Values.env.pullerMaxOutstandingBytes }}" + - name: BUCKETEER_NOTIFICATION_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + - name: BUCKETEER_NOTIFICATION_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_NOTIFICATION_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_NOTIFICATION_SERVICE_TOKEN + value: /usr/local/service-token/token + - name: BUCKETEER_NOTIFICATION_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_NOTIFICATION_KEY + value: /usr/local/certs/service/tls.key + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + - name: service-token-secret + mountPath: /usr/local/service-token + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/notification-sender/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/notification-sender/templates/envoy-configmap.yaml new file mode 100644 index 000000000..fe6253e77 --- /dev/null +++ b/manifests/bucketeer/charts/notification-sender/templates/envoy-configmap.yaml @@ -0,0 +1,453 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "notification-sender.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "notification-sender.name" . }} + chart: {{ template "notification-sender.chart" . }} + release: {{ template "notification-sender.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + + - name: notification-sender + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: notification-sender + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + + - name: notification + connect_timeout: 5s + type: strict_dns + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: notification + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: notification.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + + - name: environment + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: environment + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: environment.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + + - name: experiment + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: experiment + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: experiment.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + + - name: event-counter-server + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: event-counter-server + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: event-counter.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + + - name: feature + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: feature + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: feature.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + notification-sender: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: / + route: + cluster: notification-sender + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: ingress_http + stream_idle_timeout: 3600s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true + + - name: egress + address: + socket_address: + address: 127.0.0.1 + port_value: 9001 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + environment: + value: 25 + event-counter-server: + value: 25 + experiment: + value: 25 + feature: + value: 25 + notification: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: egress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.notification.NotificationService + route: + cluster: notification + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.environment.EnvironmentService + route: + cluster: environment + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.experiment.ExperimentService + route: + cluster: experiment + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.eventcounter.EventCounterService + route: + cluster: event-counter-server + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 3600s + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.feature.FeatureService + route: + cluster: feature + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: egress_http + stream_idle_timeout: 3600s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true diff --git a/manifests/bucketeer/charts/notification-sender/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/notification-sender/templates/service-cert-secret.yaml new file mode 100644 index 000000000..95499b0de --- /dev/null +++ b/manifests/bucketeer/charts/notification-sender/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "notification-sender.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "notification-sender.name" . }} + chart: {{ template "notification-sender.chart" . }} + release: {{ template "notification-sender.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} diff --git a/manifests/bucketeer/charts/notification-sender/templates/service-token-secret.yaml b/manifests/bucketeer/charts/notification-sender/templates/service-token-secret.yaml new file mode 100644 index 000000000..417c01890 --- /dev/null +++ b/manifests/bucketeer/charts/notification-sender/templates/service-token-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.serviceToken.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "notification-sender.fullname" . }}-service-token + namespace: {{ .Values.namespace }} + labels: + app: {{ template "notification-sender.name" . }} + chart: {{ template "notification-sender.chart" . }} + release: {{ template "notification-sender.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + token: {{ required "Service token is required" .Values.serviceToken.token | b64enc | quote }} +{{- end }} diff --git a/manifests/bucketeer/charts/notification-sender/templates/service.yaml b/manifests/bucketeer/charts/notification-sender/templates/service.yaml new file mode 100644 index 000000000..1fccc3217 --- /dev/null +++ b/manifests/bucketeer/charts/notification-sender/templates/service.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "notification-sender.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "notification-sender.name" . }} + chart: {{ template "notification-sender.chart" . }} + release: {{ template "notification-sender.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "notification-sender.name" . }} + release: {{ template "notification-sender.fullname" . }} diff --git a/manifests/bucketeer/charts/notification-sender/values.yaml b/manifests/bucketeer/charts/notification-sender/values.yaml new file mode 100644 index 000000000..1a34a349f --- /dev/null +++ b/manifests/bucketeer/charts/notification-sender/values.yaml @@ -0,0 +1,67 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-notification + pullPolicy: IfNotPresent + +fullnameOverride: "notification-sender" + +namespace: + +env: + project: + domainTopic: + domainSubscription: + notificationService: localhost:9001 + environmentService: localhost:9001 + experimentService: localhost:9001 + eventCounterService: localhost:9001 + featureService: localhost:9001 + scheduleFeatureStaleWatcher: + scheduleExperimentRunningWatcher: + scheduleMauCountWatcher: + webURL: + maxMps: "1000" + numWorkers: 1 + pullerNumGoroutines: 5 + pullerMaxOutstandingMessages: "1000" + pullerMaxOutstandingBytes: "1000000000" + logLevel: info + port: 9090 + metricsPort: 9002 + +affinity: {} + +nodeSelector: {} + +replicaCount: 1 + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +tls: + service: + secret: + cert: + key: + +serviceToken: + secret: + token: + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} diff --git a/manifests/bucketeer/charts/notification/Chart.yaml b/manifests/bucketeer/charts/notification/Chart.yaml new file mode 100644 index 000000000..2f074b3cd --- /dev/null +++ b/manifests/bucketeer/charts/notification/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-notification +name: notification +version: 1.0.0 diff --git a/manifests/bucketeer/charts/notification/templates/NOTES.txt b/manifests/bucketeer/charts/notification/templates/NOTES.txt new file mode 100644 index 000000000..25e838ad3 --- /dev/null +++ b/manifests/bucketeer/charts/notification/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "notification.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "notification.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "notification.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "notification.name" . }},release={{ template "notification.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/manifests/bucketeer/charts/notification/templates/_helpers.tpl b/manifests/bucketeer/charts/notification/templates/_helpers.tpl new file mode 100644 index 000000000..5e0adca16 --- /dev/null +++ b/manifests/bucketeer/charts/notification/templates/_helpers.tpl @@ -0,0 +1,56 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "notification.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "notification.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "notification.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "notification.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "oauth-key-secret" -}} +{{- if .Values.oauth.key.secret }} +{{- printf "%s" .Values.oauth.key.secret -}} +{{- else -}} +{{ template "notification.fullname" . }}-oauth-key +{{- end -}} +{{- end -}} + +{{- define "service-token-secret" -}} +{{- if .Values.serviceToken.secret }} +{{- printf "%s" .Values.serviceToken.secret -}} +{{- else -}} +{{ template "notification.fullname" . }}-service-token +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/manifests/bucketeer/charts/notification/templates/deployment.yaml b/manifests/bucketeer/charts/notification/templates/deployment.yaml new file mode 100644 index 000000000..55dd98a24 --- /dev/null +++ b/manifests/bucketeer/charts/notification/templates/deployment.yaml @@ -0,0 +1,163 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "notification.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "notification.name" . }} + chart: {{ template "notification.chart" . }} + release: {{ template "notification.fullname" . }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "notification.name" . }} + release: {{ template "notification.fullname" . }} + template: + metadata: + labels: + app: {{ template "notification.name" . }} + release: {{ template "notification.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "notification.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + - name: oauth-key-secret + secret: + secretName: {{ template "oauth-key-secret" . }} + - name: service-token-secret + secret: + secretName: {{ template "service-token-secret" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["server"] + env: + - name: BUCKETEER_NOTIFICATION_PROJECT + value: "{{ .Values.env.project }}" + - name: BUCKETEER_NOTIFICATION_MYSQL_USER + value: "{{ .Values.env.mysqlUser }}" + - name: BUCKETEER_NOTIFICATION_MYSQL_PASS + value: "{{ .Values.env.mysqlPass }}" + - name: BUCKETEER_NOTIFICATION_MYSQL_HOST + value: "{{ .Values.env.mysqlHost }}" + - name: BUCKETEER_NOTIFICATION_MYSQL_PORT + value: "{{ .Values.env.mysqlPort }}" + - name: BUCKETEER_NOTIFICATION_MYSQL_DB_NAME + value: "{{ .Values.env.mysqlDbName }}" + - name: BUCKETEER_NOTIFICATION_DOMAIN_EVENT_TOPIC + value: "{{ .Values.env.domainEventTopic }}" + - name: BUCKETEER_NOTIFICATION_ACCOUNT_SERVICE + value: "{{ .Values.env.accountService }}" + - name: BUCKETEER_NOTIFICATION_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_NOTIFICATION_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_NOTIFICATION_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + - name: BUCKETEER_NOTIFICATION_OAUTH_CLIENT_ID + value: "{{ .Values.oauth.clientId }}" + - name: BUCKETEER_NOTIFICATION_OAUTH_ISSUER + value: "{{ .Values.oauth.issuer }}" + - name: BUCKETEER_NOTIFICATION_OAUTH_KEY + value: /usr/local/oauth-key/public.pem + - name: BUCKETEER_NOTIFICATION_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_NOTIFICATION_KEY + value: /usr/local/certs/service/tls.key + - name: BUCKETEER_NOTIFICATION_SERVICE_TOKEN + value: /usr/local/service-token/token + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + - name: oauth-key-secret + mountPath: /usr/local/oauth-key + readOnly: true + - name: service-token-secret + mountPath: /usr/local/service-token + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/notification/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/notification/templates/envoy-configmap.yaml new file mode 100644 index 000000000..4eba3d817 --- /dev/null +++ b/manifests/bucketeer/charts/notification/templates/envoy-configmap.yaml @@ -0,0 +1,232 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "notification.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "notification.name" . }} + chart: {{ template "notification.chart" . }} + release: {{ template "notification.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: notification + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: notification + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + + - name: account + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: account + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: account.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + notification: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: / + route: + cluster: notification + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: ingress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true + + - name: egress + address: + socket_address: + address: 127.0.0.1 + port_value: 9001 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + account: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: egress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.account.AccountService + route: + cluster: account + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: egress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true diff --git a/manifests/bucketeer/charts/notification/templates/hpa.yaml b/manifests/bucketeer/charts/notification/templates/hpa.yaml new file mode 100644 index 000000000..d0a9b02c2 --- /dev/null +++ b/manifests/bucketeer/charts/notification/templates/hpa.yaml @@ -0,0 +1,19 @@ +{{ if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "notification.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "notification.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.metrics.cpu.targetAverageUtilization }} +{{ end }} diff --git a/manifests/bucketeer/charts/notification/templates/oauth-key-secret.yaml b/manifests/bucketeer/charts/notification/templates/oauth-key-secret.yaml new file mode 100644 index 000000000..dff92e07b --- /dev/null +++ b/manifests/bucketeer/charts/notification/templates/oauth-key-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.oauth.key.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "notification.fullname" . }}-oauth-key + namespace: {{ .Values.namespace }} + labels: + app: {{ template "notification.name" . }} + chart: {{ template "notification.chart" . }} + release: {{ template "notification.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + public.pem: {{ required "OAuth key is required" .Values.oauth.key.public | b64enc | quote }} +{{- end }} diff --git a/manifests/bucketeer/charts/notification/templates/pdb.yaml b/manifests/bucketeer/charts/notification/templates/pdb.yaml new file mode 100644 index 000000000..e252938bf --- /dev/null +++ b/manifests/bucketeer/charts/notification/templates/pdb.yaml @@ -0,0 +1,12 @@ +{{ if .Values.pdb.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "notification.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + selector: + matchLabels: + app: {{ template "notification.name" . }} +{{ end }} diff --git a/manifests/bucketeer/charts/notification/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/notification/templates/service-cert-secret.yaml new file mode 100644 index 000000000..f194a0630 --- /dev/null +++ b/manifests/bucketeer/charts/notification/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "notification.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "notification.name" . }} + chart: {{ template "notification.chart" . }} + release: {{ template "notification.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} diff --git a/manifests/bucketeer/charts/notification/templates/service-token-secret.yaml b/manifests/bucketeer/charts/notification/templates/service-token-secret.yaml new file mode 100644 index 000000000..9c583c1e2 --- /dev/null +++ b/manifests/bucketeer/charts/notification/templates/service-token-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.serviceToken.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "notification.fullname" . }}-service-token + namespace: {{ .Values.namespace }} + labels: + app: {{ template "notification.name" . }} + chart: {{ template "notification.chart" . }} + release: {{ template "notification.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + token: {{ required "Service token is required" .Values.serviceToken.token | b64enc | quote }} +{{- end }} diff --git a/manifests/bucketeer/charts/notification/templates/service.yaml b/manifests/bucketeer/charts/notification/templates/service.yaml new file mode 100644 index 000000000..a27d71006 --- /dev/null +++ b/manifests/bucketeer/charts/notification/templates/service.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "notification.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "notification.name" . }} + chart: {{ template "notification.chart" . }} + release: {{ template "notification.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "notification.name" . }} + release: {{ template "notification.fullname" . }} diff --git a/manifests/bucketeer/charts/notification/values.yaml b/manifests/bucketeer/charts/notification/values.yaml new file mode 100644 index 000000000..1b2814e30 --- /dev/null +++ b/manifests/bucketeer/charts/notification/values.yaml @@ -0,0 +1,75 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-notification + pullPolicy: IfNotPresent + +fullnameOverride: "notification" + +namespace: + +env: + project: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + logLevel: info + port: 9090 + metricsPort: 9002 + domainEventTopic: + accountService: localhost:9001 + +affinity: {} + +nodeSelector: {} + +pdb: + enabled: + maxUnavailable: 50% + +hpa: + enabled: true + minReplicas: + maxReplicas: + metrics: + cpu: + targetAverageUtilization: 75 + +tls: + service: + secret: + cert: + key: + +oauth: + key: + secret: + public: + clientId: + issuer: + +serviceToken: + secret: + token: + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} diff --git a/manifests/bucketeer/charts/ops-event-batch/Chart.yaml b/manifests/bucketeer/charts/ops-event-batch/Chart.yaml new file mode 100644 index 000000000..84a708605 --- /dev/null +++ b/manifests/bucketeer/charts/ops-event-batch/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-ops-event-batch +name: ops-event-batch +version: 1.0.0 diff --git a/manifests/bucketeer/charts/ops-event-batch/templates/NOTES.txt b/manifests/bucketeer/charts/ops-event-batch/templates/NOTES.txt new file mode 100644 index 000000000..6cefb0762 --- /dev/null +++ b/manifests/bucketeer/charts/ops-event-batch/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "ops-event-batch.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "ops-event-batch.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "ops-event-batch.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "ops-event-batch.name" . }},release={{ template "ops-event-batch.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/manifests/bucketeer/charts/ops-event-batch/templates/_helpers.tpl b/manifests/bucketeer/charts/ops-event-batch/templates/_helpers.tpl new file mode 100644 index 000000000..478912de8 --- /dev/null +++ b/manifests/bucketeer/charts/ops-event-batch/templates/_helpers.tpl @@ -0,0 +1,48 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "ops-event-batch.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "ops-event-batch.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "ops-event-batch.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "ops-event-batch.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "service-token-secret" -}} +{{- if .Values.serviceToken.secret }} +{{- printf "%s" .Values.serviceToken.secret -}} +{{- else -}} +{{ template "ops-event-batch.fullname" . }}-service-token +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/manifests/bucketeer/charts/ops-event-batch/templates/deployment.yaml b/manifests/bucketeer/charts/ops-event-batch/templates/deployment.yaml new file mode 100644 index 000000000..dd0c737e2 --- /dev/null +++ b/manifests/bucketeer/charts/ops-event-batch/templates/deployment.yaml @@ -0,0 +1,160 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "ops-event-batch.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "ops-event-batch.name" . }} + chart: {{ template "ops-event-batch.chart" . }} + release: {{ template "ops-event-batch.fullname" . }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ template "ops-event-batch.name" . }} + release: {{ template "ops-event-batch.fullname" . }} + template: + metadata: + labels: + app: {{ template "ops-event-batch.name" . }} + release: {{ template "ops-event-batch.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "ops-event-batch.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + - name: service-token-secret + secret: + secretName: {{ template "service-token-secret" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["batch"] + env: + - name: BUCKETEER_OPS_EVENT_PROJECT + value: "{{ .Values.env.project }}" + - name: BUCKETEER_OPS_EVENT_MYSQL_USER + value: "{{ .Values.env.mysqlUser }}" + - name: BUCKETEER_OPS_EVENT_MYSQL_PASS + value: "{{ .Values.env.mysqlPass }}" + - name: BUCKETEER_OPS_EVENT_MYSQL_HOST + value: "{{ .Values.env.mysqlHost }}" + - name: BUCKETEER_OPS_EVENT_MYSQL_PORT + value: "{{ .Values.env.mysqlPort }}" + - name: BUCKETEER_OPS_EVENT_MYSQL_DB_NAME + value: "{{ .Values.env.mysqlDbName }}" + - name: BUCKETEER_OPS_EVENT_AUTO_OPS_SERVICE + value: "{{ .Values.env.autoOpsService }}" + - name: BUCKETEER_OPS_EVENT_ENVIRONMENT_SERVICE + value: "{{ .Values.env.environmentService }}" + - name: BUCKETEER_OPS_EVENT_EVENT_COUNTER_SERVICE + value: "{{ .Values.env.eventCounterService }}" + - name: BUCKETEER_OPS_EVENT_FEATURE_SERVICE + value: "{{ .Values.env.featureService }}" + - name: BUCKETEER_OPS_EVENT_SCHEDULE_COUNT_WATCHER + value: "{{ .Values.env.scheduleCountWatcher }}" + - name: BUCKETEER_OPS_EVENT_SCHEDULE_DATETIME_WATCHER + value: "{{ .Values.env.scheduleDatetimeWatcher }}" + - name: BUCKETEER_OPS_EVENT_REFRESH_INTERVAL + value: "{{ .Values.env.refreshInterval }}" + - name: BUCKETEER_OPS_EVENT_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + - name: BUCKETEER_OPS_EVENT_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_OPS_EVENT_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_OPS_EVENT_SERVICE_TOKEN + value: /usr/local/service-token/token + - name: BUCKETEER_OPS_EVENT_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_OPS_EVENT_KEY + value: /usr/local/certs/service/tls.key + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + - name: service-token-secret + mountPath: /usr/local/service-token + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/ops-event-batch/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/ops-event-batch/templates/envoy-configmap.yaml new file mode 100644 index 000000000..b93bed2ea --- /dev/null +++ b/manifests/bucketeer/charts/ops-event-batch/templates/envoy-configmap.yaml @@ -0,0 +1,396 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "ops-event-batch.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "ops-event-batch.name" . }} + chart: {{ template "ops-event-batch.chart" . }} + release: {{ template "ops-event-batch.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: ops-event-batch + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: ops-event-batch + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + + - name: environment + type: strict_dns + lb_policy: round_robin + connect_timeout: 5s + dns_lookup_family: V4_ONLY + load_assignment: + cluster_name: environment + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: environment.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + + - name: auto-ops + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: auto-ops + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: auto-ops.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + + - name: event-counter-server + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: event-counter-server + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: event-counter.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + type: strict_dns + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + + - name: feature + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: feature + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: feature.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + ops-event-batch: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: / + route: + cluster: ops-event-batch + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: ingress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true + - name: egress + address: + socket_address: + address: 127.0.0.1 + port_value: 9001 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + auto-ops: + value: 25 + environment: + value: 25 + event-counter-server: + value: 25 + feature: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: egress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.environment.EnvironmentService + route: + cluster: environment + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.autoops.AutoOpsService + route: + cluster: auto-ops + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.eventcounter.EventCounterService + route: + cluster: event-counter-server + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.feature.FeatureService + route: + cluster: feature + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: egress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true diff --git a/manifests/bucketeer/charts/ops-event-batch/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/ops-event-batch/templates/service-cert-secret.yaml new file mode 100644 index 000000000..fdd9d0d35 --- /dev/null +++ b/manifests/bucketeer/charts/ops-event-batch/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "ops-event-batch.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "ops-event-batch.name" . }} + chart: {{ template "ops-event-batch.chart" . }} + release: {{ template "ops-event-batch.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} diff --git a/manifests/bucketeer/charts/ops-event-batch/templates/service-token-secret.yaml b/manifests/bucketeer/charts/ops-event-batch/templates/service-token-secret.yaml new file mode 100644 index 000000000..7d73ba046 --- /dev/null +++ b/manifests/bucketeer/charts/ops-event-batch/templates/service-token-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.serviceToken.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "ops-event-batch.fullname" . }}-service-token + namespace: {{ .Values.namespace }} + labels: + app: {{ template "ops-event-batch.name" . }} + chart: {{ template "ops-event-batch.chart" . }} + release: {{ template "ops-event-batch.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + token: {{ required "Service token is required" .Values.serviceToken.token | b64enc | quote }} +{{- end }} diff --git a/manifests/bucketeer/charts/ops-event-batch/templates/service.yaml b/manifests/bucketeer/charts/ops-event-batch/templates/service.yaml new file mode 100644 index 000000000..602451b71 --- /dev/null +++ b/manifests/bucketeer/charts/ops-event-batch/templates/service.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "ops-event-batch.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "ops-event-batch.name" . }} + chart: {{ template "ops-event-batch.chart" . }} + release: {{ template "ops-event-batch.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "ops-event-batch.name" . }} + release: {{ template "ops-event-batch.fullname" . }} diff --git a/manifests/bucketeer/charts/ops-event-batch/values.yaml b/manifests/bucketeer/charts/ops-event-batch/values.yaml new file mode 100644 index 000000000..07ae510d9 --- /dev/null +++ b/manifests/bucketeer/charts/ops-event-batch/values.yaml @@ -0,0 +1,62 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-ops-event + pullPolicy: IfNotPresent + +fullnameOverride: "ops-event-batch" + +namespace: + +env: + project: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + autoOpsService: localhost:9001 + environmentService: localhost:9001 + eventCounterService: localhost:9001 + featureService: localhost:9001 + refreshInterval: 10m + logLevel: info + port: 9090 + metricsPort: 9002 + scheduleCountWatcher: "0,10,20,30,40,50 * * * * *" + scheduleDatetimeWatcher: "0,10,20,30,40,50 * * * * *" + +affinity: {} + +nodeSelector: {} + +replicaCount: 1 + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +tls: + service: + secret: + cert: + key: + +serviceToken: + secret: + token: + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + +resources: {} diff --git a/manifests/bucketeer/charts/push-sender/Chart.yaml b/manifests/bucketeer/charts/push-sender/Chart.yaml new file mode 100644 index 000000000..afed6a0e3 --- /dev/null +++ b/manifests/bucketeer/charts/push-sender/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-push-sender +name: push-sender +version: 1.0.0 diff --git a/manifests/bucketeer/charts/push-sender/templates/NOTES.txt b/manifests/bucketeer/charts/push-sender/templates/NOTES.txt new file mode 100644 index 000000000..940a932ea --- /dev/null +++ b/manifests/bucketeer/charts/push-sender/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "push-sender.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "push-sender.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "push-sender.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "push-sender.name" . }},release={{ template "push-sender.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/manifests/bucketeer/charts/push-sender/templates/_helpers.tpl b/manifests/bucketeer/charts/push-sender/templates/_helpers.tpl new file mode 100644 index 000000000..4dfe2575c --- /dev/null +++ b/manifests/bucketeer/charts/push-sender/templates/_helpers.tpl @@ -0,0 +1,48 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "push-sender.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "push-sender.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "push-sender.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "push-sender.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "service-token-secret" -}} +{{- if .Values.serviceToken.secret }} +{{- printf "%s" .Values.serviceToken.secret -}} +{{- else -}} +{{ template "push-sender.fullname" . }}-service-token +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/manifests/bucketeer/charts/push-sender/templates/deployment.yaml b/manifests/bucketeer/charts/push-sender/templates/deployment.yaml new file mode 100644 index 000000000..805b642b6 --- /dev/null +++ b/manifests/bucketeer/charts/push-sender/templates/deployment.yaml @@ -0,0 +1,162 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "push-sender.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "push-sender.name" . }} + chart: {{ template "push-sender.chart" . }} + release: {{ template "push-sender.fullname" . }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ template "push-sender.name" . }} + release: {{ template "push-sender.fullname" . }} + template: + metadata: + labels: + app: {{ template "push-sender.name" . }} + release: {{ template "push-sender.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "push-sender.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + - name: service-token-secret + secret: + secretName: {{ template "service-token-secret" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["sender"] + env: + - name: BUCKETEER_PUSH_PROJECT + value: "{{ .Values.env.project }}" + - name: BUCKETEER_PUSH_DOMAIN_TOPIC + value: "{{ .Values.env.domainTopic }}" + - name: BUCKETEER_PUSH_DOMAIN_SUBSCRIPTION + value: "{{ .Values.env.domainSubscription }}" + - name: BUCKETEER_PUSH_PUSH_SERVICE + value: "{{ .Values.env.pushService }}" + - name: BUCKETEER_PUSH_FEATURE_SERVICE + value: "{{ .Values.env.featureService }}" + - name: BUCKETEER_PUSH_MAX_MPS + value: "{{ .Values.env.maxMps }}" + - name: BUCKETEER_PUSH_NUM_WORKERS + value: "{{ .Values.env.numWorkers }}" + - name: BUCKETEER_PUSH_PULLER_NUM_GOROUTINES + value: "{{ .Values.env.pullerNumGoroutines }}" + - name: BUCKETEER_PUSH_PULLER_MAX_OUTSTANDING_MESSAGES + value: "{{ .Values.env.pullerMaxOutstandingMessages }}" + - name: BUCKETEER_PUSH_PULLER_MAX_OUTSTANDING_BYTES + value: "{{ .Values.env.pullerMaxOutstandingBytes }}" + - name: BUCKETEER_PUSH_REDIS_SERVER_NAME + value: "{{ .Values.env.redis.serverName }}" + - name: BUCKETEER_PUSH_REDIS_ADDR + value: "{{ .Values.env.redis.addr }}" + - name: BUCKETEER_PUSH_REDIS_POOL_MAX_IDLE + value: "{{ .Values.env.redis.poolMaxIdle }}" + - name: BUCKETEER_PUSH_REDIS_POOL_MAX_ACTIVE + value: "{{ .Values.env.redis.poolMaxActive }}" + - name: BUCKETEER_PUSH_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + - name: BUCKETEER_PUSH_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_PUSH_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_PUSH_SERVICE_TOKEN + value: /usr/local/service-token/token + - name: BUCKETEER_PUSH_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_PUSH_KEY + value: /usr/local/certs/service/tls.key + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + - name: service-token-secret + mountPath: /usr/local/service-token + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/push-sender/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/push-sender/templates/envoy-configmap.yaml new file mode 100644 index 000000000..b4e301c3c --- /dev/null +++ b/manifests/bucketeer/charts/push-sender/templates/envoy-configmap.yaml @@ -0,0 +1,286 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "push-sender.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "push-sender.name" . }} + chart: {{ template "push-sender.chart" . }} + release: {{ template "push-sender.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: push-sender + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: push-sender + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + + - name: feature + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: feature + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: feature.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + + - name: push + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: push + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: push.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + push-sender: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: / + route: + cluster: push-sender + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: ingress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true + + - name: egress + address: + socket_address: + address: 127.0.0.1 + port_value: 9001 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + feature: + value: 25 + push: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: egress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.feature.FeatureService + route: + cluster: feature + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.push.PushService + route: + cluster: push + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: egress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true diff --git a/manifests/bucketeer/charts/push-sender/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/push-sender/templates/service-cert-secret.yaml new file mode 100644 index 000000000..66a5c50a0 --- /dev/null +++ b/manifests/bucketeer/charts/push-sender/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "push-sender.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "push-sender.name" . }} + chart: {{ template "push-sender.chart" . }} + release: {{ template "push-sender.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} diff --git a/manifests/bucketeer/charts/push-sender/templates/service-token-secret.yaml b/manifests/bucketeer/charts/push-sender/templates/service-token-secret.yaml new file mode 100644 index 000000000..0e8ce26b1 --- /dev/null +++ b/manifests/bucketeer/charts/push-sender/templates/service-token-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.serviceToken.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "push-sender.fullname" . }}-service-token + namespace: {{ .Values.namespace }} + labels: + app: {{ template "push-sender.name" . }} + chart: {{ template "push-sender.chart" . }} + release: {{ template "push-sender.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + token: {{ required "Service token is required" .Values.serviceToken.token | b64enc | quote }} +{{- end }} diff --git a/manifests/bucketeer/charts/push-sender/templates/service.yaml b/manifests/bucketeer/charts/push-sender/templates/service.yaml new file mode 100644 index 000000000..c9ad610a1 --- /dev/null +++ b/manifests/bucketeer/charts/push-sender/templates/service.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "push-sender.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "push-sender.name" . }} + chart: {{ template "push-sender.chart" . }} + release: {{ template "push-sender.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "push-sender.name" . }} + release: {{ template "push-sender.fullname" . }} diff --git a/manifests/bucketeer/charts/push-sender/values.yaml b/manifests/bucketeer/charts/push-sender/values.yaml new file mode 100644 index 000000000..87a8c97e4 --- /dev/null +++ b/manifests/bucketeer/charts/push-sender/values.yaml @@ -0,0 +1,65 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-push + pullPolicy: IfNotPresent + +fullnameOverride: "push-sender" + +namespace: + +env: + project: + domainTopic: + domainSubscription: + pushService: localhost:9001 + featureService: localhost:9001 + maxMps: "1000" + numWorkers: 2 + pullerNumGoroutines: 5 + pullerMaxOutstandingMessages: "1000" + pullerMaxOutstandingBytes: "1000000000" + redis: + serverName: non-persistent-redis + poolMaxIdle: 5 + poolMaxActive: 20 + addr: + logLevel: info + port: 9090 + metricsPort: 9002 + +affinity: {} + +nodeSelector: {} + +replicaCount: 1 + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +tls: + service: + secret: + cert: + key: + +serviceToken: + secret: + token: + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} diff --git a/manifests/bucketeer/charts/push/Chart.yaml b/manifests/bucketeer/charts/push/Chart.yaml new file mode 100644 index 000000000..3b1bc5dff --- /dev/null +++ b/manifests/bucketeer/charts/push/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-push +name: push +version: 1.0.0 diff --git a/manifests/bucketeer/charts/push/templates/NOTES.txt b/manifests/bucketeer/charts/push/templates/NOTES.txt new file mode 100644 index 000000000..a6c86e161 --- /dev/null +++ b/manifests/bucketeer/charts/push/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "push.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "push.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "push.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "push.name" . }},release={{ template "push.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/manifests/bucketeer/charts/push/templates/_helpers.tpl b/manifests/bucketeer/charts/push/templates/_helpers.tpl new file mode 100644 index 000000000..700815f7d --- /dev/null +++ b/manifests/bucketeer/charts/push/templates/_helpers.tpl @@ -0,0 +1,56 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "push.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "push.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "push.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "push.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "oauth-key-secret" -}} +{{- if .Values.oauth.key.secret }} +{{- printf "%s" .Values.oauth.key.secret -}} +{{- else -}} +{{ template "push.fullname" . }}-oauth-key +{{- end -}} +{{- end -}} + +{{- define "service-token-secret" -}} +{{- if .Values.serviceToken.secret }} +{{- printf "%s" .Values.serviceToken.secret -}} +{{- else -}} +{{ template "push.fullname" . }}-service-token +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/manifests/bucketeer/charts/push/templates/deployment.yaml b/manifests/bucketeer/charts/push/templates/deployment.yaml new file mode 100644 index 000000000..d32c14eaa --- /dev/null +++ b/manifests/bucketeer/charts/push/templates/deployment.yaml @@ -0,0 +1,167 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "push.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "push.name" . }} + chart: {{ template "push.chart" . }} + release: {{ template "push.fullname" . }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "push.name" . }} + release: {{ template "push.fullname" . }} + template: + metadata: + labels: + app: {{ template "push.name" . }} + release: {{ template "push.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "push.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + - name: oauth-key-secret + secret: + secretName: {{ template "oauth-key-secret" . }} + - name: service-token-secret + secret: + secretName: {{ template "service-token-secret" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["server"] + env: + - name: BUCKETEER_PUSH_PROJECT + value: "{{ .Values.env.project }}" + - name: BUCKETEER_PUSH_MYSQL_USER + value: "{{ .Values.env.mysqlUser }}" + - name: BUCKETEER_PUSH_MYSQL_PASS + value: "{{ .Values.env.mysqlPass }}" + - name: BUCKETEER_PUSH_MYSQL_HOST + value: "{{ .Values.env.mysqlHost }}" + - name: BUCKETEER_PUSH_MYSQL_PORT + value: "{{ .Values.env.mysqlPort }}" + - name: BUCKETEER_PUSH_MYSQL_DB_NAME + value: "{{ .Values.env.mysqlDbName }}" + - name: BUCKETEER_PUSH_DOMAIN_EVENT_TOPIC + value: "{{ .Values.env.domainEventTopic }}" + - name: BUCKETEER_PUSH_ACCOUNT_SERVICE + value: "{{ .Values.env.accountService }}" + - name: BUCKETEER_PUSH_FEATURE_SERVICE + value: "{{ .Values.env.featureService }}" + - name: BUCKETEER_PUSH_EXPERIMENT_SERVICE + value: "{{ .Values.env.experimentService }}" + - name: BUCKETEER_PUSH_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_PUSH_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_PUSH_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + - name: BUCKETEER_PUSH_OAUTH_CLIENT_ID + value: "{{ .Values.oauth.clientId }}" + - name: BUCKETEER_PUSH_OAUTH_ISSUER + value: "{{ .Values.oauth.issuer }}" + - name: BUCKETEER_PUSH_OAUTH_KEY + value: /usr/local/oauth-key/public.pem + - name: BUCKETEER_PUSH_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_PUSH_KEY + value: /usr/local/certs/service/tls.key + - name: BUCKETEER_PUSH_SERVICE_TOKEN + value: /usr/local/service-token/token + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + - name: oauth-key-secret + mountPath: /usr/local/oauth-key + readOnly: true + - name: service-token-secret + mountPath: /usr/local/service-token + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/push/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/push/templates/envoy-configmap.yaml new file mode 100644 index 000000000..f88506f9a --- /dev/null +++ b/manifests/bucketeer/charts/push/templates/envoy-configmap.yaml @@ -0,0 +1,341 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "push.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "push.name" . }} + chart: {{ template "push.chart" . }} + release: {{ template "push.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: push + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: push + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + + - name: account + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: account + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: account.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + + - name: experiment + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: experiment + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: experiment.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + + - name: feature + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: feature + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: feature.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + push: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: / + route: + cluster: push + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: ingress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true + + - name: egress + address: + socket_address: + address: 127.0.0.1 + port_value: 9001 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + account: + value: 25 + experiment: + value: 25 + feature: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: egress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.account.AccountService + route: + cluster: account + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.experiment.ExperimentService + route: + cluster: experiment + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.feature.FeatureService + route: + cluster: feature + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: egress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true diff --git a/manifests/bucketeer/charts/push/templates/hpa.yaml b/manifests/bucketeer/charts/push/templates/hpa.yaml new file mode 100644 index 000000000..ed34ad329 --- /dev/null +++ b/manifests/bucketeer/charts/push/templates/hpa.yaml @@ -0,0 +1,19 @@ +{{ if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "push.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "push.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.metrics.cpu.targetAverageUtilization }} +{{ end }} diff --git a/manifests/bucketeer/charts/push/templates/oauth-key-secret.yaml b/manifests/bucketeer/charts/push/templates/oauth-key-secret.yaml new file mode 100644 index 000000000..3d5c20481 --- /dev/null +++ b/manifests/bucketeer/charts/push/templates/oauth-key-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.oauth.key.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "push.fullname" . }}-oauth-key + namespace: {{ .Values.namespace }} + labels: + app: {{ template "push.name" . }} + chart: {{ template "push.chart" . }} + release: {{ template "push.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + public.pem: {{ required "OAuth key is required" .Values.oauth.key.public | b64enc | quote }} +{{- end }} diff --git a/manifests/bucketeer/charts/push/templates/pdb.yaml b/manifests/bucketeer/charts/push/templates/pdb.yaml new file mode 100644 index 000000000..3f9f09e9a --- /dev/null +++ b/manifests/bucketeer/charts/push/templates/pdb.yaml @@ -0,0 +1,12 @@ +{{ if .Values.pdb.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "push.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + selector: + matchLabels: + app: {{ template "push.name" . }} +{{ end }} diff --git a/manifests/bucketeer/charts/push/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/push/templates/service-cert-secret.yaml new file mode 100644 index 000000000..6a684ef28 --- /dev/null +++ b/manifests/bucketeer/charts/push/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "push.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "push.name" . }} + chart: {{ template "push.chart" . }} + release: {{ template "push.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} diff --git a/manifests/bucketeer/charts/push/templates/service-token-secret.yaml b/manifests/bucketeer/charts/push/templates/service-token-secret.yaml new file mode 100644 index 000000000..db41c5a6d --- /dev/null +++ b/manifests/bucketeer/charts/push/templates/service-token-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.serviceToken.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "push.fullname" . }}-service-token + namespace: {{ .Values.namespace }} + labels: + app: {{ template "push.name" . }} + chart: {{ template "push.chart" . }} + release: {{ template "push.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + token: {{ required "Service token is required" .Values.serviceToken.token | b64enc | quote }} +{{- end }} diff --git a/manifests/bucketeer/charts/push/templates/service.yaml b/manifests/bucketeer/charts/push/templates/service.yaml new file mode 100644 index 000000000..c809e4e3b --- /dev/null +++ b/manifests/bucketeer/charts/push/templates/service.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "push.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "push.name" . }} + chart: {{ template "push.chart" . }} + release: {{ template "push.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "push.name" . }} + release: {{ template "push.fullname" . }} diff --git a/manifests/bucketeer/charts/push/values.yaml b/manifests/bucketeer/charts/push/values.yaml new file mode 100644 index 000000000..9c43e9a7e --- /dev/null +++ b/manifests/bucketeer/charts/push/values.yaml @@ -0,0 +1,77 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-push + pullPolicy: IfNotPresent + +fullnameOverride: "push" + +namespace: + +env: + project: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + logLevel: info + port: 9090 + metricsPort: 9002 + domainEventTopic: + accountService: localhost:9001 + featureService: localhost:9001 + experimentService: localhost:9001 + +affinity: {} + +nodeSelector: {} + +pdb: + enabled: + maxUnavailable: 50% + +hpa: + enabled: true + minReplicas: + maxReplicas: + metrics: + cpu: + targetAverageUtilization: 75 + +tls: + service: + secret: + cert: + key: + +oauth: + key: + secret: + public: + clientId: + issuer: + +serviceToken: + secret: + token: + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} diff --git a/manifests/bucketeer/charts/user-persister/Chart.yaml b/manifests/bucketeer/charts/user-persister/Chart.yaml new file mode 100644 index 000000000..0e603811f --- /dev/null +++ b/manifests/bucketeer/charts/user-persister/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-user-persister +name: user-persister +version: 1.0.0 diff --git a/manifests/bucketeer/charts/user-persister/templates/NOTES.txt b/manifests/bucketeer/charts/user-persister/templates/NOTES.txt new file mode 100644 index 000000000..895cec59d --- /dev/null +++ b/manifests/bucketeer/charts/user-persister/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "user-persister.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "user-persister.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "user-persister.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "user-persister.name" . }},release={{ template "user-persister.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/manifests/bucketeer/charts/user-persister/templates/_helpers.tpl b/manifests/bucketeer/charts/user-persister/templates/_helpers.tpl new file mode 100644 index 000000000..61afb6da1 --- /dev/null +++ b/manifests/bucketeer/charts/user-persister/templates/_helpers.tpl @@ -0,0 +1,48 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "user-persister.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "user-persister.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "user-persister.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "user-persister.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "service-token-secret" -}} +{{- if .Values.serviceToken.secret }} +{{- printf "%s" .Values.serviceToken.secret -}} +{{- else -}} +{{ template "user-persister.fullname" . }}-service-token +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/manifests/bucketeer/charts/user-persister/templates/deployment.yaml b/manifests/bucketeer/charts/user-persister/templates/deployment.yaml new file mode 100644 index 000000000..612526e5e --- /dev/null +++ b/manifests/bucketeer/charts/user-persister/templates/deployment.yaml @@ -0,0 +1,167 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "user-persister.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "user-persister.name" . }} + chart: {{ template "user-persister.chart" . }} + release: {{ template "user-persister.fullname" . }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "user-persister.name" . }} + release: {{ template "user-persister.fullname" . }} + template: + metadata: + labels: + app: {{ template "user-persister.name" . }} + release: {{ template "user-persister.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "user-persister.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + - name: service-token-secret + secret: + secretName: {{ template "service-token-secret" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["persister"] + env: + - name: BUCKETEER_USER_PROJECT + value: "{{ .Values.env.project }}" + - name: BUCKETEER_USER_MYSQL_USER + value: "{{ .Values.env.mysqlUser }}" + - name: BUCKETEER_USER_MYSQL_PASS + value: "{{ .Values.env.mysqlPass }}" + - name: BUCKETEER_USER_MYSQL_HOST + value: "{{ .Values.env.mysqlHost }}" + - name: BUCKETEER_USER_MYSQL_PORT + value: "{{ .Values.env.mysqlPort }}" + - name: BUCKETEER_USER_MYSQL_DB_NAME + value: "{{ .Values.env.mysqlDbName }}" + - name: BUCKETEER_USER_TOPIC + value: "{{ .Values.env.topic }}" + - name: BUCKETEER_USER_SUBSCRIPTION + value: "{{ .Values.env.subscription }}" + - name: BUCKETEER_USER_MAX_MPS + value: "{{ .Values.env.maxMps }}" + - name: BUCKETEER_USER_NUM_WORKERS + value: "{{ .Values.env.numWorkers }}" + - name: BUCKETEER_USER_FLUSH_SIZE + value: "{{ .Values.env.flushSize }}" + - name: BUCKETEER_USER_FLUSH_INTERVAL + value: "{{ .Values.env.flushInterval }}" + - name: BUCKETEER_USER_PULLER_NUM_GOROUTINES + value: "{{ .Values.env.pullerNumGoroutines }}" + - name: BUCKETEER_USER_PULLER_MAX_OUTSTANDING_MESSAGES + value: "{{ .Values.env.pullerMaxOutstandingMessages }}" + - name: BUCKETEER_USER_PULLER_MAX_OUTSTANDING_BYTES + value: "{{ .Values.env.pullerMaxOutstandingBytes }}" + - name: BUCKETEER_USER_FEATURE_SERVICE + value: "{{ .Values.env.featureService }}" + - name: BUCKETEER_USER_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_USER_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_USER_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + - name: BUCKETEER_USER_SERVICE_TOKEN + value: /usr/local/service-token/token + - name: BUCKETEER_USER_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_USER_KEY + value: /usr/local/certs/service/tls.key + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + - name: service-token-secret + mountPath: /usr/local/service-token + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/user-persister/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/user-persister/templates/envoy-configmap.yaml new file mode 100644 index 000000000..2ad852538 --- /dev/null +++ b/manifests/bucketeer/charts/user-persister/templates/envoy-configmap.yaml @@ -0,0 +1,231 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "user-persister.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "user-persister.name" . }} + chart: {{ template "user-persister.chart" . }} + release: {{ template "user-persister.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: user-persister + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: user-persister + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + + - name: feature + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: feature + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: feature.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + user-persister: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: / + route: + cluster: user-persister + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: ingress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true + + - name: egress + address: + socket_address: + address: 127.0.0.1 + port_value: 9001 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + feature: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: egress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.feature.FeatureService + route: + cluster: feature + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: egress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true diff --git a/manifests/bucketeer/charts/user-persister/templates/hpa.yaml b/manifests/bucketeer/charts/user-persister/templates/hpa.yaml new file mode 100644 index 000000000..7e8df5544 --- /dev/null +++ b/manifests/bucketeer/charts/user-persister/templates/hpa.yaml @@ -0,0 +1,19 @@ +{{ if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "user-persister.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "user-persister.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.metrics.cpu.targetAverageUtilization }} +{{ end }} diff --git a/manifests/bucketeer/charts/user-persister/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/user-persister/templates/service-cert-secret.yaml new file mode 100644 index 000000000..4e240f388 --- /dev/null +++ b/manifests/bucketeer/charts/user-persister/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "user-persister.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "user-persister.name" . }} + chart: {{ template "user-persister.chart" . }} + release: {{ template "user-persister.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} diff --git a/manifests/bucketeer/charts/user-persister/templates/service.yaml b/manifests/bucketeer/charts/user-persister/templates/service.yaml new file mode 100644 index 000000000..9b42ef40e --- /dev/null +++ b/manifests/bucketeer/charts/user-persister/templates/service.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "user-persister.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "user-persister.name" . }} + chart: {{ template "user-persister.chart" . }} + release: {{ template "user-persister.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "user-persister.name" . }} + release: {{ template "user-persister.fullname" . }} diff --git a/manifests/bucketeer/charts/user-persister/values.yaml b/manifests/bucketeer/charts/user-persister/values.yaml new file mode 100644 index 000000000..1c57cdce6 --- /dev/null +++ b/manifests/bucketeer/charts/user-persister/values.yaml @@ -0,0 +1,72 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-user + pullPolicy: IfNotPresent + +fullnameOverride: "user-persister" + +namespace: + +env: + project: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + topic: + subscription: + maxMps: "1000" + numWorkers: 2 + flushSize: 100 + flushInterval: 2s + pullerNumGoroutines: 5 + pullerMaxOutstandingMessages: "1000" + pullerMaxOutstandingBytes: "1000000000" + logLevel: info + port: 9090 + metricsPort: 9002 + featureService: localhost:9001 + +affinity: {} + +nodeSelector: {} + +hpa: + enabled: + minReplicas: + maxReplicas: + metrics: + cpu: + targetAverageUtilization: + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +tls: + service: + secret: + cert: + key: + +serviceToken: + secret: + token: + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} diff --git a/manifests/bucketeer/charts/user/Chart.yaml b/manifests/bucketeer/charts/user/Chart.yaml new file mode 100644 index 000000000..fdd658aef --- /dev/null +++ b/manifests/bucketeer/charts/user/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-user +name: user +version: 1.0.0 diff --git a/manifests/bucketeer/charts/user/templates/NOTES.txt b/manifests/bucketeer/charts/user/templates/NOTES.txt new file mode 100644 index 000000000..18db511a3 --- /dev/null +++ b/manifests/bucketeer/charts/user/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "user.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "user.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "user.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "user.name" . }},release={{ template "user.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/manifests/bucketeer/charts/user/templates/_helpers.tpl b/manifests/bucketeer/charts/user/templates/_helpers.tpl new file mode 100644 index 000000000..751fdf276 --- /dev/null +++ b/manifests/bucketeer/charts/user/templates/_helpers.tpl @@ -0,0 +1,56 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "user.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "user.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "user.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "user.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "oauth-key-secret" -}} +{{- if .Values.oauth.key.secret }} +{{- printf "%s" .Values.oauth.key.secret -}} +{{- else -}} +{{ template "user.fullname" . }}-oauth-key +{{- end -}} +{{- end -}} + +{{- define "service-token-secret" -}} +{{- if .Values.serviceToken.secret }} +{{- printf "%s" .Values.serviceToken.secret -}} +{{- else -}} +{{ template "user.fullname" . }}-service-token +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/manifests/bucketeer/charts/user/templates/deployment.yaml b/manifests/bucketeer/charts/user/templates/deployment.yaml new file mode 100644 index 000000000..afd513d66 --- /dev/null +++ b/manifests/bucketeer/charts/user/templates/deployment.yaml @@ -0,0 +1,163 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "user.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "user.name" . }} + chart: {{ template "user.chart" . }} + release: {{ template "user.fullname" . }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "user.name" . }} + release: {{ template "user.fullname" . }} + template: + metadata: + labels: + app: {{ template "user.name" . }} + release: {{ template "user.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + volumes: + - name: envoy-config + configMap: + name: {{ template "user.fullname" . }}-envoy-config + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + - name: oauth-key-secret + secret: + secretName: {{ template "oauth-key-secret" . }} + - name: service-token-secret + secret: + secretName: {{ template "service-token-secret" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: ["server"] + env: + - name: BUCKETEER_USER_PROJECT + value: "{{ .Values.env.project }}" + - name: BUCKETEER_USER_DATABASE # TODO: It should be removed after the migration is done + value: "{{ .Values.env.database }}" + - name: BUCKETEER_USER_MYSQL_USER + value: "{{ .Values.env.mysqlUser }}" + - name: BUCKETEER_USER_MYSQL_PASS + value: "{{ .Values.env.mysqlPass }}" + - name: BUCKETEER_USER_MYSQL_HOST + value: "{{ .Values.env.mysqlHost }}" + - name: BUCKETEER_USER_MYSQL_PORT + value: "{{ .Values.env.mysqlPort }}" + - name: BUCKETEER_USER_MYSQL_DB_NAME + value: "{{ .Values.env.mysqlDbName }}" + - name: BUCKETEER_USER_ACCOUNT_SERVICE + value: "{{ .Values.env.accountService }}" + - name: BUCKETEER_USER_PORT + value: "{{ .Values.env.port }}" + - name: BUCKETEER_USER_METRICS_PORT + value: "{{ .Values.env.metricsPort }}" + - name: BUCKETEER_USER_LOG_LEVEL + value: "{{ .Values.env.logLevel }}" + - name: BUCKETEER_USER_OAUTH_CLIENT_ID + value: "{{ .Values.oauth.clientId }}" + - name: BUCKETEER_USER_OAUTH_ISSUER + value: "{{ .Values.oauth.issuer }}" + - name: BUCKETEER_USER_OAUTH_KEY + value: /usr/local/oauth-key/public.pem + - name: BUCKETEER_USER_CERT + value: /usr/local/certs/service/tls.crt + - name: BUCKETEER_USER_KEY + value: /usr/local/certs/service/tls.key + - name: BUCKETEER_USER_SERVICE_TOKEN + value: /usr/local/service-token/token + volumeMounts: + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + - name: oauth-key-secret + mountPath: /usr/local/oauth-key + readOnly: true + - name: service-token-secret + mountPath: /usr/local/service-token + readOnly: true + ports: + - name: service + containerPort: {{ .Values.env.port }} + - name: metrics + containerPort: {{ .Values.env.metricsPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: service + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: service + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + - name: envoy + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf/ + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + ports: + - name: envoy + containerPort: {{ .Values.envoy.port }} + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /health + port: envoy + scheme: HTTPS + resources: +{{ toYaml .Values.envoy.resources | indent 12 }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/user/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/user/templates/envoy-configmap.yaml new file mode 100644 index 000000000..d9375698d --- /dev/null +++ b/manifests/bucketeer/charts/user/templates/envoy-configmap.yaml @@ -0,0 +1,231 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "user.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "user.name" . }} + chart: {{ template "user.chart" . }} + release: {{ template "user.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: user + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: user + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: localhost + port_value: 9090 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + + - name: account + type: strict_dns + connect_timeout: 5s + dns_lookup_family: V4_ONLY + lb_policy: round_robin + load_assignment: + cluster_name: account + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: account.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - grpc_health_check: {} + healthy_threshold: 1 + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + timeout: 1s + unhealthy_threshold: 2 + ignore_health_on_host_removal: true + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 9000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + user: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: ingress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: / + route: + cluster: user + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: ingress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true + + - name: egress + address: + socket_address: + address: 127.0.0.1 + port_value: 9001 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + access_log: + name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + http_filters: + - name: envoy.filters.http.health_check + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck + cluster_min_healthy_percentages: + account: + value: 25 + headers: + - name: :path + string_match: + exact: /health + pass_through_mode: false + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - domains: + - '*' + name: egress_services + routes: + - match: + headers: + - name: content-type + string_match: + exact: application/grpc + prefix: /bucketeer.account.AccountService + route: + cluster: account + retry_policy: + num_retries: 3 + retry_on: 5xx + timeout: 15s + stat_prefix: egress_http + stream_idle_timeout: 300s + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + common_tls_context: + alpn_protocols: + - h2 + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + require_client_certificate: true diff --git a/manifests/bucketeer/charts/user/templates/hpa.yaml b/manifests/bucketeer/charts/user/templates/hpa.yaml new file mode 100644 index 000000000..734eb177b --- /dev/null +++ b/manifests/bucketeer/charts/user/templates/hpa.yaml @@ -0,0 +1,19 @@ +{{ if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "user.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "user.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.metrics.cpu.targetAverageUtilization }} +{{ end }} diff --git a/manifests/bucketeer/charts/user/templates/oauth-key-secret.yaml b/manifests/bucketeer/charts/user/templates/oauth-key-secret.yaml new file mode 100644 index 000000000..4303212c2 --- /dev/null +++ b/manifests/bucketeer/charts/user/templates/oauth-key-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.oauth.key.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "user.fullname" . }}-oauth-key + namespace: {{ .Values.namespace }} + labels: + app: {{ template "user.name" . }} + chart: {{ template "user.chart" . }} + release: {{ template "user.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + public.pem: {{ required "OAuth key is required" .Values.oauth.key.public | b64enc | quote }} +{{- end }} diff --git a/manifests/bucketeer/charts/user/templates/pdb.yaml b/manifests/bucketeer/charts/user/templates/pdb.yaml new file mode 100644 index 000000000..5db7ec525 --- /dev/null +++ b/manifests/bucketeer/charts/user/templates/pdb.yaml @@ -0,0 +1,12 @@ +{{ if .Values.pdb.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "user.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + selector: + matchLabels: + app: {{ template "user.name" . }} +{{ end }} diff --git a/manifests/bucketeer/charts/user/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/user/templates/service-cert-secret.yaml new file mode 100644 index 000000000..bebd38f15 --- /dev/null +++ b/manifests/bucketeer/charts/user/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "user.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "user.name" . }} + chart: {{ template "user.chart" . }} + release: {{ template "user.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} diff --git a/manifests/bucketeer/charts/user/templates/service-token-secret.yaml b/manifests/bucketeer/charts/user/templates/service-token-secret.yaml new file mode 100644 index 000000000..5354f6821 --- /dev/null +++ b/manifests/bucketeer/charts/user/templates/service-token-secret.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.serviceToken.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "user.fullname" . }}-service-token + namespace: {{ .Values.namespace }} + labels: + app: {{ template "user.name" . }} + chart: {{ template "user.chart" . }} + release: {{ template "user.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + token: {{ required "Service token is required" .Values.serviceToken.token | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/user/templates/service.yaml b/manifests/bucketeer/charts/user/templates/service.yaml new file mode 100644 index 000000000..542af150d --- /dev/null +++ b/manifests/bucketeer/charts/user/templates/service.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "user.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "user.name" . }} + chart: {{ template "user.chart" . }} + release: {{ template "user.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" + metrics: "true" +spec: + type: {{ .Values.service.type }} + clusterIP: {{ .Values.service.clusterIP }} + ports: + - name: service + port: {{ .Values.service.externalPort }} + targetPort: envoy + protocol: TCP + - name: metrics + port: {{ .Values.env.metricsPort }} + protocol: TCP + - name: admin + port: {{ .Values.envoy.adminPort }} + protocol: TCP + selector: + app: {{ template "user.name" . }} + release: {{ template "user.fullname" . }} + diff --git a/manifests/bucketeer/charts/user/values.yaml b/manifests/bucketeer/charts/user/values.yaml new file mode 100644 index 000000000..e3882fdd7 --- /dev/null +++ b/manifests/bucketeer/charts/user/values.yaml @@ -0,0 +1,75 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-user + pullPolicy: IfNotPresent + +fullnameOverride: "user" + +namespace: + +env: + project: + database: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + accountService: localhost:9001 + logLevel: info + port: 9090 + metricsPort: 9002 + +affinity: {} + +nodeSelector: {} + +pdb: + enabled: + maxUnavailable: 50% + +hpa: + enabled: + minReplicas: + maxReplicas: + metrics: + cpu: + targetAverageUtilization: + +tls: + service: + secret: + cert: + key: + +oauth: + key: + secret: + public: + clientId: + issuer: + +serviceToken: + secret: + token: + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + +service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} diff --git a/manifests/bucketeer/charts/web-gateway/.helmignore b/manifests/bucketeer/charts/web-gateway/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/manifests/bucketeer/charts/web-gateway/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/manifests/bucketeer/charts/web-gateway/Chart.yaml b/manifests/bucketeer/charts/web-gateway/Chart.yaml new file mode 100644 index 000000000..910b81ea9 --- /dev/null +++ b/manifests/bucketeer/charts/web-gateway/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: 1.0 +description: A Helm chart for web-gateway +name: web-gateway +version: 1.0.0 diff --git a/manifests/bucketeer/charts/web-gateway/templates/NOTES.txt b/manifests/bucketeer/charts/web-gateway/templates/NOTES.txt new file mode 100644 index 000000000..1f1706fce --- /dev/null +++ b/manifests/bucketeer/charts/web-gateway/templates/NOTES.txt @@ -0,0 +1,14 @@ +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "web-gateway.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "web-gateway.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "web-gateway.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "web-gateway.name" . }},release={{ template "web-gateway.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/web-gateway/templates/_helpers.tpl b/manifests/bucketeer/charts/web-gateway/templates/_helpers.tpl new file mode 100644 index 000000000..06920fb7f --- /dev/null +++ b/manifests/bucketeer/charts/web-gateway/templates/_helpers.tpl @@ -0,0 +1,48 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "web-gateway.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "web-gateway.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "web-gateway.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "service-cert-secret" -}} +{{- if .Values.tls.service.secret }} +{{- printf "%s" .Values.tls.service.secret -}} +{{- else -}} +{{ template "web-gateway.fullname" . }}-service-cert +{{- end -}} +{{- end -}} + +{{- define "bucketeer-jp-cert-secret" -}} +{{- if .Values.tls.bucketeerJP.secret }} +{{- printf "%s" .Values.tls.bucketeerJP.secret -}} +{{- else -}} +{{ template "web-gateway.fullname" . }}-bucketeer-jp-cert +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/manifests/bucketeer/charts/web-gateway/templates/backend-config.yaml b/manifests/bucketeer/charts/web-gateway/templates/backend-config.yaml new file mode 100644 index 000000000..aaa4bb12a --- /dev/null +++ b/manifests/bucketeer/charts/web-gateway/templates/backend-config.yaml @@ -0,0 +1,9 @@ +apiVersion: cloud.google.com/v1 +kind: BackendConfig +metadata: + name: {{ template "web-gateway.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + timeoutSec: 40 + connectionDraining: + drainingTimeoutSec: 60 diff --git a/manifests/bucketeer/charts/web-gateway/templates/bucketeer-jp-cert-secret.yaml b/manifests/bucketeer/charts/web-gateway/templates/bucketeer-jp-cert-secret.yaml new file mode 100644 index 000000000..83044275e --- /dev/null +++ b/manifests/bucketeer/charts/web-gateway/templates/bucketeer-jp-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.bucketeerJP.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "web-gateway.fullname" . }}-bucketeer-jp-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "web-gateway.name" . }} + chart: {{ template "web-gateway.chart" . }} + release: {{ template "web-gateway.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "BukceteerJP TLS certificate is required" .Values.tls.bucketeerJP.cert | b64enc | quote }} + tls.key: {{ required "BukceteerJP TLS key is required" .Values.tls.bucketeerJP.key | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/web-gateway/templates/deployment.yaml b/manifests/bucketeer/charts/web-gateway/templates/deployment.yaml new file mode 100644 index 000000000..91b4c18c4 --- /dev/null +++ b/manifests/bucketeer/charts/web-gateway/templates/deployment.yaml @@ -0,0 +1,171 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "web-gateway.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "web-gateway.name" . }} + chart: {{ template "web-gateway.chart" . }} + release: {{ template "web-gateway.fullname" . }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "web-gateway.name" . }} + release: {{ template "web-gateway.fullname" . }} + template: + metadata: + labels: + app: {{ template "web-gateway.name" . }} + release: {{ template "web-gateway.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envoy-configmap.yaml") . | sha256sum }} + spec: + volumes: + - name: envoy-config + configMap: + name: {{ template "web-gateway.fullname" . }}-envoy-config + - name: envoy-account-descriptor-secret + secret: + secretName: {{ template "web-gateway.fullname" . }}-account + - name: envoy-auditlog-descriptor-secret + secret: + secretName: {{ template "web-gateway.fullname" . }}-auditlog + - name: envoy-auth-descriptor-secret + secret: + secretName: {{ template "web-gateway.fullname" . }}-auth + - name: envoy-autoops-descriptor-secret + secret: + secretName: {{ template "web-gateway.fullname" . }}-autoops + - name: envoy-environment-descriptor-secret + secret: + secretName: {{ template "web-gateway.fullname" . }}-environment + - name: envoy-eventcounter-descriptor-secret + secret: + secretName: {{ template "web-gateway.fullname" . }}-eventcounter + - name: envoy-experiment-descriptor-secret + secret: + secretName: {{ template "web-gateway.fullname" . }}-experiment + - name: envoy-feature-descriptor-secret + secret: + secretName: {{ template "web-gateway.fullname" . }}-feature + - name: envoy-notification-descriptor-secret + secret: + secretName: {{ template "web-gateway.fullname" . }}-notification + - name: envoy-push-descriptor-secret + secret: + secretName: {{ template "web-gateway.fullname" . }}-push + - name: envoy-user-descriptor-secret + secret: + secretName: {{ template "web-gateway.fullname" . }}-user + - name: envoy-migration-descriptor-secret + secret: + secretName: {{ template "web-gateway.fullname" . }}-migration + - name: service-cert-secret + secret: + secretName: {{ template "service-cert-secret" . }} + - name: bucketeer-jp-cert-secret + secret: + secretName: {{ template "bucketeer-jp-cert-secret" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.envoy.image.repository }}:{{ .Values.envoy.image.tag }}" + imagePullPolicy: {{ .Values.envoy.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l) -ne 0 ]; do sleep 1; done;" + - "sleep 10;" + command: ["envoy"] + args: + - "-c" + - "/usr/local/conf/config.yaml" + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: envoy-config + mountPath: /usr/local/conf + readOnly: true + - name: envoy-account-descriptor-secret + mountPath: /usr/local/account-secret + readOnly: true + - name: envoy-auditlog-descriptor-secret + mountPath: /usr/local/auditlog-secret + readOnly: true + - name: envoy-auth-descriptor-secret + mountPath: /usr/local/auth-secret + readOnly: true + - name: envoy-autoops-descriptor-secret + mountPath: /usr/local/autoops-secret + readOnly: true + - name: envoy-environment-descriptor-secret + mountPath: /usr/local/environment-secret + readOnly: true + - name: envoy-eventcounter-descriptor-secret + mountPath: /usr/local/eventcounter-secret + readOnly: true + - name: envoy-experiment-descriptor-secret + mountPath: /usr/local/experiment-secret + readOnly: true + - name: envoy-feature-descriptor-secret + mountPath: /usr/local/feature-secret + readOnly: true + - name: envoy-notification-descriptor-secret + mountPath: /usr/local/notification-secret + readOnly: true + - name: envoy-push-descriptor-secret + mountPath: /usr/local/push-secret + readOnly: true + - name: envoy-user-descriptor-secret + mountPath: /usr/local/user-secret + readOnly: true + - name: envoy-migration-descriptor-secret + mountPath: /usr/local/migration-secret + readOnly: true + - name: service-cert-secret + mountPath: /usr/local/certs/service + readOnly: true + - name: bucketeer-jp-cert-secret + mountPath: /usr/local/certs/bucketeer-jp + readOnly: true + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + - name: admin + containerPort: {{ .Values.envoy.adminPort }} + protocol: TCP + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /server_info + port: admin + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /server_info + port: admin + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/web-gateway/templates/envoy-configmap.yaml b/manifests/bucketeer/charts/web-gateway/templates/envoy-configmap.yaml new file mode 100644 index 000000000..1e97b06f1 --- /dev/null +++ b/manifests/bucketeer/charts/web-gateway/templates/envoy-configmap.yaml @@ -0,0 +1,876 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "web-gateway.fullname" . }}-envoy-config + namespace: {{ .Values.namespace }} + labels: + app: {{ template "web-gateway.name" . }} + chart: {{ template "web-gateway.chart" . }} + release: {{ template "web-gateway.fullname" . }} + heritage: {{ .Release.Service }} +data: + config.yaml: |- + admin: + access_log: + - name: envoy.access_loggers.file + typed_config: + '@type': type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: "/dev/stdout" + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 + static_resources: + clusters: + - name: feature + dns_lookup_family: V4_ONLY + connect_timeout: 5s + ignore_health_on_host_removal: true + type: strict_dns + lb_policy: round_robin + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + load_assignment: + cluster_name: feature + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: feature.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: ["h2"] + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + health_checks: + - http_health_check: + path: /health + timeout: 1s + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + healthy_threshold: 1 + unhealthy_threshold: 2 + + - name: experiment + dns_lookup_family: V4_ONLY + connect_timeout: 5s + ignore_health_on_host_removal: true + type: strict_dns + lb_policy: round_robin + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + load_assignment: + cluster_name: experiment + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: experiment.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: ["h2"] + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + health_checks: + - http_health_check: + path: /health + timeout: 1s + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + healthy_threshold: 1 + unhealthy_threshold: 2 + + - name: event-counter + dns_lookup_family: V4_ONLY + connect_timeout: 5s + ignore_health_on_host_removal: true + type: strict_dns + lb_policy: round_robin + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + load_assignment: + cluster_name: event-counter + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: event-counter.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: ["h2"] + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + health_checks: + - http_health_check: + path: /health + timeout: 1s + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + healthy_threshold: 1 + unhealthy_threshold: 2 + + - name: auditlog + dns_lookup_family: V4_ONLY + connect_timeout: 5s + ignore_health_on_host_removal: true + type: strict_dns + lb_policy: round_robin + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + load_assignment: + cluster_name: auditlog + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: auditlog.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: ["h2"] + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + health_checks: + - http_health_check: + path: /health + timeout: 1s + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + healthy_threshold: 1 + unhealthy_threshold: 2 + + - name: account + dns_lookup_family: V4_ONLY + connect_timeout: 5s + ignore_health_on_host_removal: true + type: strict_dns + lb_policy: round_robin + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + load_assignment: + cluster_name: account + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: account.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: ["h2"] + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + health_checks: + - http_health_check: + path: /health + timeout: 1s + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + healthy_threshold: 1 + unhealthy_threshold: 2 + + - name: auth + dns_lookup_family: V4_ONLY + connect_timeout: 5s + ignore_health_on_host_removal: true + type: strict_dns + lb_policy: round_robin + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + load_assignment: + cluster_name: auth + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: auth.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: ["h2"] + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + health_checks: + - http_health_check: + path: /health + timeout: 1s + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + healthy_threshold: 1 + unhealthy_threshold: 2 + + - name: user + dns_lookup_family: V4_ONLY + connect_timeout: 5s + ignore_health_on_host_removal: true + type: strict_dns + lb_policy: round_robin + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + load_assignment: + cluster_name: user + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: user.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: ["h2"] + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + health_checks: + - http_health_check: + path: /health + timeout: 1s + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + healthy_threshold: 1 + unhealthy_threshold: 2 + + - name: environment + dns_lookup_family: V4_ONLY + connect_timeout: 5s + ignore_health_on_host_removal: true + type: strict_dns + lb_policy: round_robin + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + load_assignment: + cluster_name: environment + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: environment.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: ["h2"] + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + health_checks: + - http_health_check: + path: /health + timeout: 1s + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + healthy_threshold: 1 + unhealthy_threshold: 2 + + - name: auto-ops + dns_lookup_family: V4_ONLY + connect_timeout: 5s + ignore_health_on_host_removal: true + type: strict_dns + lb_policy: round_robin + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + load_assignment: + cluster_name: auto-ops + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: auto-ops.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: ["h2"] + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + health_checks: + - http_health_check: + path: /health + timeout: 1s + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + healthy_threshold: 1 + unhealthy_threshold: 2 + + - name: push + dns_lookup_family: V4_ONLY + connect_timeout: 5s + ignore_health_on_host_removal: true + type: strict_dns + lb_policy: round_robin + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + load_assignment: + cluster_name: push + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: push.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: ["h2"] + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + health_checks: + - http_health_check: + path: /health + timeout: 1s + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + healthy_threshold: 1 + unhealthy_threshold: 2 + + - name: notification + dns_lookup_family: V4_ONLY + connect_timeout: 5s + ignore_health_on_host_removal: true + type: strict_dns + lb_policy: round_robin + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + load_assignment: + cluster_name: notification + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: notification.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: ["h2"] + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + health_checks: + - http_health_check: + path: /health + timeout: 1s + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + healthy_threshold: 1 + unhealthy_threshold: 2 + + - name: migration-mysql + dns_lookup_family: V4_ONLY + connect_timeout: 5s + ignore_health_on_host_removal: true + type: strict_dns + lb_policy: round_robin + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + load_assignment: + cluster_name: migration-mysql + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: migration-mysql.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: ["h2"] + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + health_checks: + - http_health_check: + path: /health + timeout: 1s + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + healthy_threshold: 1 + unhealthy_threshold: 2 + + - name: dex + dns_lookup_family: V4_ONLY + connect_timeout: 5s + ignore_health_on_host_removal: true + type: strict_dns + lb_policy: round_robin + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + load_assignment: + cluster_name: dex + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: dex.{{ .Values.namespace }}.svc.cluster.local + port_value: 9000 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: ["h2"] + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + health_checks: + - http_health_check: + path: /health + timeout: 1s + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + healthy_threshold: 1 + unhealthy_threshold: 2 + + - name: web + dns_lookup_family: V4_ONLY + connect_timeout: 5s + ignore_health_on_host_removal: true + type: strict_dns + lb_policy: round_robin + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + '@type': type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + load_assignment: + cluster_name: web + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: web.{{ .Values.namespace }}.svc.cluster.local + port_value: 443 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + alpn_protocols: ["h2"] + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/service/tls.crt + private_key: + filename: /usr/local/certs/service/tls.key + health_checks: + - http_health_check: + path: /health + timeout: 1s + interval: 10s + interval_jitter: 1s + no_traffic_interval: 2s + healthy_threshold: 1 + unhealthy_threshold: 2 + listeners: + - name: ingress + address: + socket_address: + address: 0.0.0.0 + port_value: 443 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + access_log: + name: envoy.access_loggers.file + typed_config: + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + codec_type: auto + common_http_protocol_options: + # set longer timeout than lb session timeout (600s) + idle_timeout: 620s + stream_idle_timeout: 3600s + request_timeout: 3600s + http_filters: + - name: envoy.filters.http.cors + - name: envoy.filters.http.grpc_web + - name: envoy.filters.http.grpc_json_transcoder + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder + auto_mapping: true + proto_descriptor: /usr/local/account-secret/account_proto_descriptor.pb + services: + - bucketeer.account.AccountService + print_options: + always_print_primitive_fields: true + - name: envoy.filters.http.grpc_json_transcoder + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder + auto_mapping: true + proto_descriptor: /usr/local/auditlog-secret/auditlog_proto_descriptor.pb + services: + - bucketeer.auditlog.AuditLogService + print_options: + always_print_primitive_fields: true + - name: envoy.filters.http.grpc_json_transcoder + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder + auto_mapping: true + proto_descriptor: /usr/local/auth-secret/auth_proto_descriptor.pb + services: + - bucketeer.auth.AuthService + print_options: + always_print_primitive_fields: true + - name: envoy.filters.http.grpc_json_transcoder + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder + auto_mapping: true + proto_descriptor: /usr/local/autoops-secret/autoops_proto_descriptor.pb + services: + - bucketeer.autoops.AutoOpsService + print_options: + always_print_primitive_fields: true + - name: envoy.filters.http.grpc_json_transcoder + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder + auto_mapping: true + proto_descriptor: /usr/local/environment-secret/environment_proto_descriptor.pb + services: + - bucketeer.environment.EnvironmentService + print_options: + always_print_primitive_fields: true + - name: envoy.filters.http.grpc_json_transcoder + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder + auto_mapping: true + proto_descriptor: /usr/local/eventcounter-secret/eventcounter_proto_descriptor.pb + services: + - bucketeer.eventcounter.EventCounterService + print_options: + always_print_primitive_fields: true + - name: envoy.filters.http.grpc_json_transcoder + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder + auto_mapping: true + proto_descriptor: /usr/local/experiment-secret/experiment_proto_descriptor.pb + services: + - bucketeer.experiment.ExperimentService + print_options: + always_print_primitive_fields: true + - name: envoy.filters.http.grpc_json_transcoder + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder + auto_mapping: true + proto_descriptor: /usr/local/feature-secret/feature_proto_descriptor.pb + services: + - bucketeer.feature.FeatureService + print_options: + always_print_primitive_fields: true + - name: envoy.filters.http.grpc_json_transcoder + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder + auto_mapping: true + proto_descriptor: /usr/local/notification-secret/notification_proto_descriptor.pb + services: + - bucketeer.notification.NotificationService + print_options: + always_print_primitive_fields: true + - name: envoy.filters.http.grpc_json_transcoder + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder + auto_mapping: true + proto_descriptor: /usr/local/push-secret/push_proto_descriptor.pb + services: + - bucketeer.push.PushService + print_options: + always_print_primitive_fields: true + - name: envoy.filters.http.grpc_json_transcoder + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder + auto_mapping: true + proto_descriptor: /usr/local/user-secret/user_proto_descriptor.pb + services: + - bucketeer.user.UserService + print_options: + always_print_primitive_fields: true + - name: envoy.filters.http.grpc_json_transcoder + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder + auto_mapping: true + proto_descriptor: /usr/local/migration-secret/migration_proto_descriptor.pb + services: + - bucketeer.migration.MigrationMySQLService + print_options: + always_print_primitive_fields: true + - name: envoy.filters.http.router + route_config: + virtual_hosts: + - name: web-gateway + domains: + - "*" + cors: + allow_origin_string_match: + - prefix: "*" + allow_headers: "content-type, x-grpc-web, authorization" + allow_methods: "POST" + allow_credentials: true + routes: + - match: + prefix: /bucketeer.account.AccountService + route: + cluster: account + timeout: 15s + retry_policy: + retry_on: 5xx + num_retries: 3 + - match: + prefix: /bucketeer.auditlog.AuditLogService + route: + cluster: auditlog + timeout: 15s + retry_policy: + retry_on: 5xx + num_retries: 3 + - match: + prefix: /bucketeer.auth.AuthService + route: + cluster: auth + timeout: 15s + retry_policy: + retry_on: 5xx + num_retries: 3 + - match: + prefix: /bucketeer.autoops.AutoOpsService + route: + cluster: auto-ops + timeout: 15s + retry_policy: + retry_on: 5xx + num_retries: 3 + - match: + prefix: /hook + route: + cluster: auto-ops + timeout: 15s + retry_policy: + retry_on: 5xx + num_retries: 3 + - match: + prefix: /bucketeer.environment.EnvironmentService + route: + cluster: environment + timeout: 60s + retry_policy: + retry_on: 5xx + num_retries: 3 + - match: + prefix: /bucketeer.eventcounter.EventCounterService + route: + cluster: event-counter + timeout: 3600s + retry_policy: + retry_on: 5xx + num_retries: 3 + - match: + prefix: /bucketeer.experiment.ExperimentService + route: + cluster: experiment + timeout: 15s + retry_policy: + retry_on: 5xx + num_retries: 3 + - match: + prefix: /bucketeer.feature.FeatureService + route: + cluster: feature + timeout: 60s + retry_policy: + retry_on: 5xx + num_retries: 3 + - match: + prefix: /bucketeer.notification.NotificationService + route: + cluster: notification + timeout: 15s + retry_policy: + retry_on: 5xx + num_retries: 3 + - match: + prefix: /bucketeer.push.PushService + route: + cluster: push + timeout: 15s + retry_policy: + retry_on: 5xx + num_retries: 3 + - match: + prefix: /bucketeer.user.UserService + route: + cluster: user + timeout: 15s + retry_policy: + retry_on: 5xx + num_retries: 3 + - match: + prefix: /bucketeer.migration.MigrationMySQLService + route: + cluster: migration-mysql + timeout: 600s + retry_policy: + retry_on: 5xx + num_retries: 3 + - match: + prefix: /dex + route: + cluster: dex + timeout: 15s + retry_policy: + retry_on: 5xx + num_retries: 3 + - match: + prefix: / + route: + cluster: web + timeout: 15s + retry_policy: + retry_on: 5xx + num_retries: 3 + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + require_client_certificate: true + common_tls_context: + alpn_protocols: ["h2"] + tls_certificates: + - certificate_chain: + filename: /usr/local/certs/bucketeer-jp/tls.crt + private_key: + filename: /usr/local/certs/bucketeer-jp/tls.key diff --git a/manifests/bucketeer/charts/web-gateway/templates/hpa.yaml b/manifests/bucketeer/charts/web-gateway/templates/hpa.yaml new file mode 100644 index 000000000..43bd078bc --- /dev/null +++ b/manifests/bucketeer/charts/web-gateway/templates/hpa.yaml @@ -0,0 +1,19 @@ +{{ if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "web-gateway.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "web-gateway.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.metrics.cpu.targetAverageUtilization }} +{{ end }} diff --git a/manifests/bucketeer/charts/web-gateway/templates/pdb.yaml b/manifests/bucketeer/charts/web-gateway/templates/pdb.yaml new file mode 100644 index 000000000..f08823637 --- /dev/null +++ b/manifests/bucketeer/charts/web-gateway/templates/pdb.yaml @@ -0,0 +1,12 @@ +{{ if .Values.pdb.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "web-gateway.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + selector: + matchLabels: + app: {{ template "web-gateway.name" . }} +{{ end }} diff --git a/manifests/bucketeer/charts/web-gateway/templates/secret.yaml b/manifests/bucketeer/charts/web-gateway/templates/secret.yaml new file mode 100644 index 000000000..bfa9c68cd --- /dev/null +++ b/manifests/bucketeer/charts/web-gateway/templates/secret.yaml @@ -0,0 +1,156 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "web-gateway.fullname" . }}-account + namespace: {{ .Values.namespace }} + labels: + app: {{ template "web-gateway.name" . }} + chart: {{ template "web-gateway.chart" . }} + release: {{ template "web-gateway.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + account_proto_descriptor.pb: {{ required "Envoy account descriptor is required" .Values.envoy.accountDescriptor | quote }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "web-gateway.fullname" . }}-auditlog + labels: + app: {{ template "web-gateway.name" . }} + chart: {{ template "web-gateway.chart" . }} + release: {{ template "web-gateway.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + auditlog_proto_descriptor.pb: {{ required "Envoy auditlog descriptor is required" .Values.envoy.auditlogDescriptor | quote }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "web-gateway.fullname" . }}-auth + labels: + app: {{ template "web-gateway.name" . }} + chart: {{ template "web-gateway.chart" . }} + release: {{ template "web-gateway.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + auth_proto_descriptor.pb: {{ required "Envoy auditlog descriptor is required" .Values.envoy.authDescriptor | quote }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "web-gateway.fullname" . }}-autoops + labels: + app: {{ template "web-gateway.name" . }} + chart: {{ template "web-gateway.chart" . }} + release: {{ template "web-gateway.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + autoops_proto_descriptor.pb: {{ required "Envoy auditlog descriptor is required" .Values.envoy.autoopsDescriptor | quote }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "web-gateway.fullname" . }}-environment + labels: + app: {{ template "web-gateway.name" . }} + chart: {{ template "web-gateway.chart" . }} + release: {{ template "web-gateway.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + environment_proto_descriptor.pb: {{ required "Envoy auditlog descriptor is required" .Values.envoy.environmentDescriptor | quote }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "web-gateway.fullname" . }}-eventcounter + labels: + app: {{ template "web-gateway.name" . }} + chart: {{ template "web-gateway.chart" . }} + release: {{ template "web-gateway.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + eventcounter_proto_descriptor.pb: {{ required "Envoy auditlog descriptor is required" .Values.envoy.eventcounterDescriptor | quote }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "web-gateway.fullname" . }}-experiment + labels: + app: {{ template "web-gateway.name" . }} + chart: {{ template "web-gateway.chart" . }} + release: {{ template "web-gateway.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + experiment_proto_descriptor.pb: {{ required "Envoy auditlog descriptor is required" .Values.envoy.experimentDescriptor | quote }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "web-gateway.fullname" . }}-feature + labels: + app: {{ template "web-gateway.name" . }} + chart: {{ template "web-gateway.chart" . }} + release: {{ template "web-gateway.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + feature_proto_descriptor.pb: {{ required "Envoy auditlog descriptor is required" .Values.envoy.featureDescriptor | quote }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "web-gateway.fullname" . }}-notification + labels: + app: {{ template "web-gateway.name" . }} + chart: {{ template "web-gateway.chart" . }} + release: {{ template "web-gateway.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + notification_proto_descriptor.pb: {{ required "Envoy auditlog descriptor is required" .Values.envoy.notificationDescriptor | quote }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "web-gateway.fullname" . }}-push + labels: + app: {{ template "web-gateway.name" . }} + chart: {{ template "web-gateway.chart" . }} + release: {{ template "web-gateway.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + push_proto_descriptor.pb: {{ required "Envoy auditlog descriptor is required" .Values.envoy.pushDescriptor | quote }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "web-gateway.fullname" . }}-user + labels: + app: {{ template "web-gateway.name" . }} + chart: {{ template "web-gateway.chart" . }} + release: {{ template "web-gateway.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + user_proto_descriptor.pb: {{ required "Envoy auditlog descriptor is required" .Values.envoy.userDescriptor | quote }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "web-gateway.fullname" . }}-migration + labels: + app: {{ template "web-gateway.name" . }} + chart: {{ template "web-gateway.chart" . }} + release: {{ template "web-gateway.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + migration_proto_descriptor.pb: {{ required "Envoy migration descriptor is required" .Values.envoy.migrationDescriptor | quote }} diff --git a/manifests/bucketeer/charts/web-gateway/templates/service-cert-secret.yaml b/manifests/bucketeer/charts/web-gateway/templates/service-cert-secret.yaml new file mode 100644 index 000000000..394ca5ef3 --- /dev/null +++ b/manifests/bucketeer/charts/web-gateway/templates/service-cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.service.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "web-gateway.fullname" . }}-service-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "web-gateway.name" . }} + chart: {{ template "web-gateway.chart" . }} + release: {{ template "web-gateway.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + tls.crt: {{ required "Service TLS certificate is required" .Values.tls.service.cert | b64enc | quote }} + tls.key: {{ required "Service TLS key is required" .Values.tls.service.key | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/manifests/bucketeer/charts/web-gateway/templates/service.yaml b/manifests/bucketeer/charts/web-gateway/templates/service.yaml new file mode 100644 index 000000000..0489f015b --- /dev/null +++ b/manifests/bucketeer/charts/web-gateway/templates/service.yaml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "web-gateway.fullname" . }} + namespace: {{ .Values.namespace }} + annotations: + cloud.google.com/backend-config: '{"default": "{{ template "web-gateway.fullname" . }}"}' + cloud.google.com/neg: '{"ingress": true}' + labels: + app: {{ template "web-gateway.name" . }} + chart: {{ template "web-gateway.chart" . }} + release: {{ template "web-gateway.fullname" . }} + heritage: {{ .Release.Service }} + envoy: "true" +spec: + type: {{ .Values.service.type }} + {{- if eq .Values.service.type "LoadBalancer" }} + {{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- end }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + - port: {{ .Values.envoy.adminPort }} + targetPort: admin + protocol: TCP + name: admin + selector: + app: {{ template "web-gateway.name" . }} + release: {{ template "web-gateway.fullname" . }} diff --git a/manifests/bucketeer/charts/web-gateway/values.yaml b/manifests/bucketeer/charts/web-gateway/values.yaml new file mode 100644 index 000000000..37fd4c3a2 --- /dev/null +++ b/manifests/bucketeer/charts/web-gateway/values.yaml @@ -0,0 +1,64 @@ +fullnameOverride: "web-gateway" + +namespace: + +tls: + bucketeerJP: + secret: + cert: + key: + service: + secret: + cert: + key: + +envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + serviceCluster: bucketeer + adminPort: 8001 + accountDescriptor: "CoYECh5nb29nbGUvcHJvdG9idWYvd3JhcHBlcnMucHJvdG8SD2dvb2dsZS5wcm90b2J1ZiIjCgtEb3VibGVWYWx1ZRIUCgV2YWx1ZRgBIAEoAVIFdmFsdWUiIgoKRmxvYXRWYWx1ZRIUCgV2YWx1ZRgBIAEoAlIFdmFsdWUiIgoKSW50NjRWYWx1ZRIUCgV2YWx1ZRgBIAEoA1IFdmFsdWUiIwoLVUludDY0VmFsdWUSFAoFdmFsdWUYASABKARSBXZhbHVlIiIKCkludDMyVmFsdWUSFAoFdmFsdWUYASABKAVSBXZhbHVlIiMKC1VJbnQzMlZhbHVlEhQKBXZhbHVlGAEgASgNUgV2YWx1ZSIhCglCb29sVmFsdWUSFAoFdmFsdWUYASABKAhSBXZhbHVlIiMKC1N0cmluZ1ZhbHVlEhQKBXZhbHVlGAEgASgJUgV2YWx1ZSIiCgpCeXRlc1ZhbHVlEhQKBXZhbHVlGAEgASgMUgV2YWx1ZUKDAQoTY29tLmdvb2dsZS5wcm90b2J1ZkINV3JhcHBlcnNQcm90b1ABWjFnb29nbGUuZ29sYW5nLm9yZy9wcm90b2J1Zi90eXBlcy9rbm93bi93cmFwcGVyc3Bi+AEBogIDR1BCqgIeR29vZ2xlLlByb3RvYnVmLldlbGxLbm93blR5cGVzYgZwcm90bzMK4wIKI3Byb3RvL2Vudmlyb25tZW50L2Vudmlyb25tZW50LnByb3RvEhVidWNrZXRlZXIuZW52aXJvbm1lbnQi7AEKC0Vudmlyb25tZW50Eg4KAmlkGAEgASgJUgJpZBIcCgluYW1lc3BhY2UYAiABKAlSCW5hbWVzcGFjZRIWCgRuYW1lGAMgASgJQgIYAVIEbmFtZRIgCgtkZXNjcmlwdGlvbhgEIAEoCVILZGVzY3JpcHRpb24SGAoHZGVsZXRlZBgFIAEoCFIHZGVsZXRlZBIdCgpjcmVhdGVkX2F0GAYgASgDUgljcmVhdGVkQXQSHQoKdXBkYXRlZF9hdBgHIAEoA1IJdXBkYXRlZEF0Eh0KCnByb2plY3RfaWQYCCABKAlSCXByb2plY3RJZEIuWixnaXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9lbnZpcm9ubWVudGIGcHJvdG8zCpEFChtwcm90by9hY2NvdW50L2FjY291bnQucHJvdG8SEWJ1Y2tldGVlci5hY2NvdW50GiNwcm90by9lbnZpcm9ubWVudC9lbnZpcm9ubWVudC5wcm90byKnAgoHQWNjb3VudBIOCgJpZBgBIAEoCVICaWQSFAoFZW1haWwYAiABKAlSBWVtYWlsEhIKBG5hbWUYAyABKAlSBG5hbWUSMwoEcm9sZRgEIAEoDjIfLmJ1Y2tldGVlci5hY2NvdW50LkFjY291bnQuUm9sZVIEcm9sZRIaCghkaXNhYmxlZBgFIAEoCFIIZGlzYWJsZWQSHQoKY3JlYXRlZF9hdBgGIAEoA1IJY3JlYXRlZEF0Eh0KCnVwZGF0ZWRfYXQYByABKANSCXVwZGF0ZWRBdBIYCgdkZWxldGVkGAggASgIUgdkZWxldGVkIjkKBFJvbGUSCgoGVklFV0VSEAASCgoGRURJVE9SEAESCQoFT1dORVIQAhIOCgpVTkFTU0lHTkVEEGMi2wEKD0Vudmlyb25tZW50Um9sZRJECgtlbnZpcm9ubWVudBgBIAEoCzIiLmJ1Y2tldGVlci5lbnZpcm9ubWVudC5FbnZpcm9ubWVudFILZW52aXJvbm1lbnQSMwoEcm9sZRgCIAEoDjIfLmJ1Y2tldGVlci5hY2NvdW50LkFjY291bnQuUm9sZVIEcm9sZRIjCg10cmlhbF9wcm9qZWN0GAMgASgIUgx0cmlhbFByb2plY3QSKAoQdHJpYWxfc3RhcnRlZF9hdBgEIAEoA1IOdHJpYWxTdGFydGVkQXRCKlooZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vYWNjb3VudGIGcHJvdG8zCvEDChtwcm90by9hY2NvdW50L2FwaV9rZXkucHJvdG8SEWJ1Y2tldGVlci5hY2NvdW50ItgBCgZBUElLZXkSDgoCaWQYASABKAlSAmlkEhIKBG5hbWUYAiABKAlSBG5hbWUSMgoEcm9sZRgDIAEoDjIeLmJ1Y2tldGVlci5hY2NvdW50LkFQSUtleS5Sb2xlUgRyb2xlEhoKCGRpc2FibGVkGAQgASgIUghkaXNhYmxlZBIdCgpjcmVhdGVkX2F0GAUgASgDUgljcmVhdGVkQXQSHQoKdXBkYXRlZF9hdBgGIAEoA1IJdXBkYXRlZEF0IhwKBFJvbGUSBwoDU0RLEAASCwoHU0VSVklDRRABIq8BChFFbnZpcm9ubWVudEFQSUtleRIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYASABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlEjIKB2FwaV9rZXkYAiABKAsyGS5idWNrZXRlZXIuYWNjb3VudC5BUElLZXlSBmFwaUtleRIxChRlbnZpcm9ubWVudF9kaXNhYmxlZBgDIAEoCFITZW52aXJvbm1lbnREaXNhYmxlZEIqWihnaXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9hY2NvdW50YgZwcm90bzMK3wUKG3Byb3RvL2FjY291bnQvY29tbWFuZC5wcm90bxIRYnVja2V0ZWVyLmFjY291bnQaG3Byb3RvL2FjY291bnQvYWNjb3VudC5wcm90bxobcHJvdG8vYWNjb3VudC9hcGlfa2V5LnByb3RvIjEKGUNyZWF0ZUFkbWluQWNjb3VudENvbW1hbmQSFAoFZW1haWwYASABKAlSBWVtYWlsIhsKGUVuYWJsZUFkbWluQWNjb3VudENvbW1hbmQiHAoaRGlzYWJsZUFkbWluQWNjb3VudENvbW1hbmQiFwoVQ29udmVydEFjY291bnRDb21tYW5kIhYKFERlbGV0ZUFjY291bnRDb21tYW5kImEKFENyZWF0ZUFjY291bnRDb21tYW5kEhQKBWVtYWlsGAEgASgJUgVlbWFpbBIzCgRyb2xlGAIgASgOMh8uYnVja2V0ZWVyLmFjY291bnQuQWNjb3VudC5Sb2xlUgRyb2xlIk8KGENoYW5nZUFjY291bnRSb2xlQ29tbWFuZBIzCgRyb2xlGAEgASgOMh8uYnVja2V0ZWVyLmFjY291bnQuQWNjb3VudC5Sb2xlUgRyb2xlIhYKFEVuYWJsZUFjY291bnRDb21tYW5kIhcKFURpc2FibGVBY2NvdW50Q29tbWFuZCJdChNDcmVhdGVBUElLZXlDb21tYW5kEhIKBG5hbWUYASABKAlSBG5hbWUSMgoEcm9sZRgCIAEoDjIeLmJ1Y2tldGVlci5hY2NvdW50LkFQSUtleS5Sb2xlUgRyb2xlIi0KF0NoYW5nZUFQSUtleU5hbWVDb21tYW5kEhIKBG5hbWUYASABKAlSBG5hbWUiFQoTRW5hYmxlQVBJS2V5Q29tbWFuZCIWChREaXNhYmxlQVBJS2V5Q29tbWFuZEIqWihnaXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9hY2NvdW50YgZwcm90bzMKyDoKG3Byb3RvL2FjY291bnQvc2VydmljZS5wcm90bxIRYnVja2V0ZWVyLmFjY291bnQaHmdvb2dsZS9wcm90b2J1Zi93cmFwcGVycy5wcm90bxobcHJvdG8vYWNjb3VudC9hY2NvdW50LnByb3RvGhtwcm90by9hY2NvdW50L2FwaV9rZXkucHJvdG8aG3Byb3RvL2FjY291bnQvY29tbWFuZC5wcm90byIOCgxHZXRNZVJlcXVlc3QiKwoTR2V0TWVCeUVtYWlsUmVxdWVzdBIUCgVlbWFpbBgBIAEoCVIFZW1haWwizQIKDUdldE1lUmVzcG9uc2USOAoHYWNjb3VudBgBIAEoCzIaLmJ1Y2tldGVlci5hY2NvdW50LkFjY291bnRCAhgBUgdhY2NvdW50EhQKBWVtYWlsGAIgASgJUgVlbWFpbBIZCghpc19hZG1pbhgDIAEoCFIHaXNBZG1pbhJCCgphZG1pbl9yb2xlGAQgASgOMh8uYnVja2V0ZWVyLmFjY291bnQuQWNjb3VudC5Sb2xlQgIYAVIJYWRtaW5Sb2xlEh4KCGRpc2FibGVkGAUgASgIQgIYAVIIZGlzYWJsZWQSTwoRZW52aXJvbm1lbnRfcm9sZXMYBiADKAsyIi5idWNrZXRlZXIuYWNjb3VudC5FbnZpcm9ubWVudFJvbGVSEGVudmlyb25tZW50Um9sZXMSHAoHZGVsZXRlZBgHIAEoCEICGAFSB2RlbGV0ZWQiYwoZQ3JlYXRlQWRtaW5BY2NvdW50UmVxdWVzdBJGCgdjb21tYW5kGAEgASgLMiwuYnVja2V0ZWVyLmFjY291bnQuQ3JlYXRlQWRtaW5BY2NvdW50Q29tbWFuZFIHY29tbWFuZCIcChpDcmVhdGVBZG1pbkFjY291bnRSZXNwb25zZSJzChlFbmFibGVBZG1pbkFjY291bnRSZXF1ZXN0Eg4KAmlkGAEgASgJUgJpZBJGCgdjb21tYW5kGAIgASgLMiwuYnVja2V0ZWVyLmFjY291bnQuRW5hYmxlQWRtaW5BY2NvdW50Q29tbWFuZFIHY29tbWFuZCIcChpFbmFibGVBZG1pbkFjY291bnRSZXNwb25zZSJ1ChpEaXNhYmxlQWRtaW5BY2NvdW50UmVxdWVzdBIOCgJpZBgBIAEoCVICaWQSRwoHY29tbWFuZBgCIAEoCzItLmJ1Y2tldGVlci5hY2NvdW50LkRpc2FibGVBZG1pbkFjY291bnRDb21tYW5kUgdjb21tYW5kIh0KG0Rpc2FibGVBZG1pbkFjY291bnRSZXNwb25zZSIuChZHZXRBZG1pbkFjY291bnRSZXF1ZXN0EhQKBWVtYWlsGAEgASgJUgVlbWFpbCJPChdHZXRBZG1pbkFjY291bnRSZXNwb25zZRI0CgdhY2NvdW50GAEgASgLMhouYnVja2V0ZWVyLmFjY291bnQuQWNjb3VudFIHYWNjb3VudCLLAwoYTGlzdEFkbWluQWNjb3VudHNSZXF1ZXN0EhsKCXBhZ2Vfc2l6ZRgBIAEoA1IIcGFnZVNpemUSFgoGY3Vyc29yGAIgASgJUgZjdXJzb3ISTgoIb3JkZXJfYnkYAyABKA4yMy5idWNrZXRlZXIuYWNjb3VudC5MaXN0QWRtaW5BY2NvdW50c1JlcXVlc3QuT3JkZXJCeVIHb3JkZXJCeRJjCg9vcmRlcl9kaXJlY3Rpb24YBCABKA4yOi5idWNrZXRlZXIuYWNjb3VudC5MaXN0QWRtaW5BY2NvdW50c1JlcXVlc3QuT3JkZXJEaXJlY3Rpb25SDm9yZGVyRGlyZWN0aW9uEiUKDnNlYXJjaF9rZXl3b3JkGAUgASgJUg1zZWFyY2hLZXl3b3JkEjYKCGRpc2FibGVkGAYgASgLMhouZ29vZ2xlLnByb3RvYnVmLkJvb2xWYWx1ZVIIZGlzYWJsZWQiQQoHT3JkZXJCeRILCgdERUZBVUxUEAASCQoFRU1BSUwQARIOCgpDUkVBVEVEX0FUEAISDgoKVVBEQVRFRF9BVBADIiMKDk9yZGVyRGlyZWN0aW9uEgcKA0FTQxAAEggKBERFU0MQASKMAQoZTGlzdEFkbWluQWNjb3VudHNSZXNwb25zZRI2CghhY2NvdW50cxgBIAMoCzIaLmJ1Y2tldGVlci5hY2NvdW50LkFjY291bnRSCGFjY291bnRzEhYKBmN1cnNvchgCIAEoCVIGY3Vyc29yEh8KC3RvdGFsX2NvdW50GAMgASgDUgp0b3RhbENvdW50ImsKFUNvbnZlcnRBY2NvdW50UmVxdWVzdBIOCgJpZBgBIAEoCVICaWQSQgoHY29tbWFuZBgCIAEoCzIoLmJ1Y2tldGVlci5hY2NvdW50LkNvbnZlcnRBY2NvdW50Q29tbWFuZFIHY29tbWFuZCIYChZDb252ZXJ0QWNjb3VudFJlc3BvbnNlIo4BChRDcmVhdGVBY2NvdW50UmVxdWVzdBJBCgdjb21tYW5kGAEgASgLMicuYnVja2V0ZWVyLmFjY291bnQuQ3JlYXRlQWNjb3VudENvbW1hbmRSB2NvbW1hbmQSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAIgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZSIXChVDcmVhdGVBY2NvdW50UmVzcG9uc2UingEKFEVuYWJsZUFjY291bnRSZXF1ZXN0Eg4KAmlkGAEgASgJUgJpZBJBCgdjb21tYW5kGAIgASgLMicuYnVja2V0ZWVyLmFjY291bnQuRW5hYmxlQWNjb3VudENvbW1hbmRSB2NvbW1hbmQSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAMgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZSIXChVFbmFibGVBY2NvdW50UmVzcG9uc2UioAEKFURpc2FibGVBY2NvdW50UmVxdWVzdBIOCgJpZBgBIAEoCVICaWQSQgoHY29tbWFuZBgCIAEoCzIoLmJ1Y2tldGVlci5hY2NvdW50LkRpc2FibGVBY2NvdW50Q29tbWFuZFIHY29tbWFuZBIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYAyABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlIhgKFkRpc2FibGVBY2NvdW50UmVzcG9uc2UipgEKGENoYW5nZUFjY291bnRSb2xlUmVxdWVzdBIOCgJpZBgBIAEoCVICaWQSRQoHY29tbWFuZBgCIAEoCzIrLmJ1Y2tldGVlci5hY2NvdW50LkNoYW5nZUFjY291bnRSb2xlQ29tbWFuZFIHY29tbWFuZBIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYAyABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlIhsKGUNoYW5nZUFjY291bnRSb2xlUmVzcG9uc2UiXgoRR2V0QWNjb3VudFJlcXVlc3QSFAoFZW1haWwYASABKAlSBWVtYWlsEjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgCIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2UiSgoSR2V0QWNjb3VudFJlc3BvbnNlEjQKB2FjY291bnQYASABKAsyGi5idWNrZXRlZXIuYWNjb3VudC5BY2NvdW50UgdhY2NvdW50IqIEChNMaXN0QWNjb3VudHNSZXF1ZXN0EhsKCXBhZ2Vfc2l6ZRgBIAEoA1IIcGFnZVNpemUSFgoGY3Vyc29yGAIgASgJUgZjdXJzb3ISMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAMgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZRJJCghvcmRlcl9ieRgEIAEoDjIuLmJ1Y2tldGVlci5hY2NvdW50Lkxpc3RBY2NvdW50c1JlcXVlc3QuT3JkZXJCeVIHb3JkZXJCeRJeCg9vcmRlcl9kaXJlY3Rpb24YBSABKA4yNS5idWNrZXRlZXIuYWNjb3VudC5MaXN0QWNjb3VudHNSZXF1ZXN0Lk9yZGVyRGlyZWN0aW9uUg5vcmRlckRpcmVjdGlvbhIlCg5zZWFyY2hfa2V5d29yZBgGIAEoCVINc2VhcmNoS2V5d29yZBI2CghkaXNhYmxlZBgHIAEoCzIaLmdvb2dsZS5wcm90b2J1Zi5Cb29sVmFsdWVSCGRpc2FibGVkEi8KBHJvbGUYCCABKAsyGy5nb29nbGUucHJvdG9idWYuSW50MzJWYWx1ZVIEcm9sZSJBCgdPcmRlckJ5EgsKB0RFRkFVTFQQABIJCgVFTUFJTBABEg4KCkNSRUFURURfQVQQAhIOCgpVUERBVEVEX0FUEAMiIwoOT3JkZXJEaXJlY3Rpb24SBwoDQVNDEAASCAoEREVTQxABIocBChRMaXN0QWNjb3VudHNSZXNwb25zZRI2CghhY2NvdW50cxgBIAMoCzIaLmJ1Y2tldGVlci5hY2NvdW50LkFjY291bnRSCGFjY291bnRzEhYKBmN1cnNvchgCIAEoCVIGY3Vyc29yEh8KC3RvdGFsX2NvdW50GAMgASgDUgp0b3RhbENvdW50IowBChNDcmVhdGVBUElLZXlSZXF1ZXN0EkAKB2NvbW1hbmQYASABKAsyJi5idWNrZXRlZXIuYWNjb3VudC5DcmVhdGVBUElLZXlDb21tYW5kUgdjb21tYW5kEjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgCIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2UiSgoUQ3JlYXRlQVBJS2V5UmVzcG9uc2USMgoHYXBpX2tleRgBIAEoCzIZLmJ1Y2tldGVlci5hY2NvdW50LkFQSUtleVIGYXBpS2V5IqQBChdDaGFuZ2VBUElLZXlOYW1lUmVxdWVzdBIOCgJpZBgBIAEoCVICaWQSRAoHY29tbWFuZBgCIAEoCzIqLmJ1Y2tldGVlci5hY2NvdW50LkNoYW5nZUFQSUtleU5hbWVDb21tYW5kUgdjb21tYW5kEjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgDIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2UiGgoYQ2hhbmdlQVBJS2V5TmFtZVJlc3BvbnNlIpwBChNFbmFibGVBUElLZXlSZXF1ZXN0Eg4KAmlkGAEgASgJUgJpZBJACgdjb21tYW5kGAIgASgLMiYuYnVja2V0ZWVyLmFjY291bnQuRW5hYmxlQVBJS2V5Q29tbWFuZFIHY29tbWFuZBIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYAyABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlIhYKFEVuYWJsZUFQSUtleVJlc3BvbnNlIp4BChREaXNhYmxlQVBJS2V5UmVxdWVzdBIOCgJpZBgBIAEoCVICaWQSQQoHY29tbWFuZBgCIAEoCzInLmJ1Y2tldGVlci5hY2NvdW50LkRpc2FibGVBUElLZXlDb21tYW5kUgdjb21tYW5kEjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgDIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2UiFwoVRGlzYWJsZUFQSUtleVJlc3BvbnNlIlcKEEdldEFQSUtleVJlcXVlc3QSDgoCaWQYASABKAlSAmlkEjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgCIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2UiRwoRR2V0QVBJS2V5UmVzcG9uc2USMgoHYXBpX2tleRgBIAEoCzIZLmJ1Y2tldGVlci5hY2NvdW50LkFQSUtleVIGYXBpS2V5Iu0DChJMaXN0QVBJS2V5c1JlcXVlc3QSGwoJcGFnZV9zaXplGAEgASgDUghwYWdlU2l6ZRIWCgZjdXJzb3IYAiABKAlSBmN1cnNvchIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYAyABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlEkgKCG9yZGVyX2J5GAQgASgOMi0uYnVja2V0ZWVyLmFjY291bnQuTGlzdEFQSUtleXNSZXF1ZXN0Lk9yZGVyQnlSB29yZGVyQnkSXQoPb3JkZXJfZGlyZWN0aW9uGAUgASgOMjQuYnVja2V0ZWVyLmFjY291bnQuTGlzdEFQSUtleXNSZXF1ZXN0Lk9yZGVyRGlyZWN0aW9uUg5vcmRlckRpcmVjdGlvbhIlCg5zZWFyY2hfa2V5d29yZBgGIAEoCVINc2VhcmNoS2V5d29yZBI2CghkaXNhYmxlZBgHIAEoCzIaLmdvb2dsZS5wcm90b2J1Zi5Cb29sVmFsdWVSCGRpc2FibGVkIkAKB09yZGVyQnkSCwoHREVGQVVMVBAAEggKBE5BTUUQARIOCgpDUkVBVEVEX0FUEAISDgoKVVBEQVRFRF9BVBADIiMKDk9yZGVyRGlyZWN0aW9uEgcKA0FTQxAAEggKBERFU0MQASKEAQoTTGlzdEFQSUtleXNSZXNwb25zZRI0CghhcGlfa2V5cxgBIAMoCzIZLmJ1Y2tldGVlci5hY2NvdW50LkFQSUtleVIHYXBpS2V5cxIWCgZjdXJzb3IYAiABKAlSBmN1cnNvchIfCgt0b3RhbF9jb3VudBgDIAEoA1IKdG90YWxDb3VudCI8CipHZXRBUElLZXlCeVNlYXJjaGluZ0FsbEVudmlyb25tZW50c1JlcXVlc3QSDgoCaWQYASABKAlSAmlkIoMBCitHZXRBUElLZXlCeVNlYXJjaGluZ0FsbEVudmlyb25tZW50c1Jlc3BvbnNlElQKE2Vudmlyb25tZW50X2FwaV9rZXkYASABKAsyJC5idWNrZXRlZXIuYWNjb3VudC5FbnZpcm9ubWVudEFQSUtleVIRZW52aXJvbm1lbnRBcGlLZXkynhEKDkFjY291bnRTZXJ2aWNlEkoKBUdldE1lEh8uYnVja2V0ZWVyLmFjY291bnQuR2V0TWVSZXF1ZXN0GiAuYnVja2V0ZWVyLmFjY291bnQuR2V0TWVSZXNwb25zZRJYCgxHZXRNZUJ5RW1haWwSJi5idWNrZXRlZXIuYWNjb3VudC5HZXRNZUJ5RW1haWxSZXF1ZXN0GiAuYnVja2V0ZWVyLmFjY291bnQuR2V0TWVSZXNwb25zZRJxChJDcmVhdGVBZG1pbkFjY291bnQSLC5idWNrZXRlZXIuYWNjb3VudC5DcmVhdGVBZG1pbkFjY291bnRSZXF1ZXN0Gi0uYnVja2V0ZWVyLmFjY291bnQuQ3JlYXRlQWRtaW5BY2NvdW50UmVzcG9uc2UScQoSRW5hYmxlQWRtaW5BY2NvdW50EiwuYnVja2V0ZWVyLmFjY291bnQuRW5hYmxlQWRtaW5BY2NvdW50UmVxdWVzdBotLmJ1Y2tldGVlci5hY2NvdW50LkVuYWJsZUFkbWluQWNjb3VudFJlc3BvbnNlEnQKE0Rpc2FibGVBZG1pbkFjY291bnQSLS5idWNrZXRlZXIuYWNjb3VudC5EaXNhYmxlQWRtaW5BY2NvdW50UmVxdWVzdBouLmJ1Y2tldGVlci5hY2NvdW50LkRpc2FibGVBZG1pbkFjY291bnRSZXNwb25zZRJoCg9HZXRBZG1pbkFjY291bnQSKS5idWNrZXRlZXIuYWNjb3VudC5HZXRBZG1pbkFjY291bnRSZXF1ZXN0GiouYnVja2V0ZWVyLmFjY291bnQuR2V0QWRtaW5BY2NvdW50UmVzcG9uc2USbgoRTGlzdEFkbWluQWNjb3VudHMSKy5idWNrZXRlZXIuYWNjb3VudC5MaXN0QWRtaW5BY2NvdW50c1JlcXVlc3QaLC5idWNrZXRlZXIuYWNjb3VudC5MaXN0QWRtaW5BY2NvdW50c1Jlc3BvbnNlEmUKDkNvbnZlcnRBY2NvdW50EiguYnVja2V0ZWVyLmFjY291bnQuQ29udmVydEFjY291bnRSZXF1ZXN0GikuYnVja2V0ZWVyLmFjY291bnQuQ29udmVydEFjY291bnRSZXNwb25zZRJiCg1DcmVhdGVBY2NvdW50EicuYnVja2V0ZWVyLmFjY291bnQuQ3JlYXRlQWNjb3VudFJlcXVlc3QaKC5idWNrZXRlZXIuYWNjb3VudC5DcmVhdGVBY2NvdW50UmVzcG9uc2USYgoNRW5hYmxlQWNjb3VudBInLmJ1Y2tldGVlci5hY2NvdW50LkVuYWJsZUFjY291bnRSZXF1ZXN0GiguYnVja2V0ZWVyLmFjY291bnQuRW5hYmxlQWNjb3VudFJlc3BvbnNlEmUKDkRpc2FibGVBY2NvdW50EiguYnVja2V0ZWVyLmFjY291bnQuRGlzYWJsZUFjY291bnRSZXF1ZXN0GikuYnVja2V0ZWVyLmFjY291bnQuRGlzYWJsZUFjY291bnRSZXNwb25zZRJuChFDaGFuZ2VBY2NvdW50Um9sZRIrLmJ1Y2tldGVlci5hY2NvdW50LkNoYW5nZUFjY291bnRSb2xlUmVxdWVzdBosLmJ1Y2tldGVlci5hY2NvdW50LkNoYW5nZUFjY291bnRSb2xlUmVzcG9uc2USWQoKR2V0QWNjb3VudBIkLmJ1Y2tldGVlci5hY2NvdW50LkdldEFjY291bnRSZXF1ZXN0GiUuYnVja2V0ZWVyLmFjY291bnQuR2V0QWNjb3VudFJlc3BvbnNlEl8KDExpc3RBY2NvdW50cxImLmJ1Y2tldGVlci5hY2NvdW50Lkxpc3RBY2NvdW50c1JlcXVlc3QaJy5idWNrZXRlZXIuYWNjb3VudC5MaXN0QWNjb3VudHNSZXNwb25zZRJfCgxDcmVhdGVBUElLZXkSJi5idWNrZXRlZXIuYWNjb3VudC5DcmVhdGVBUElLZXlSZXF1ZXN0GicuYnVja2V0ZWVyLmFjY291bnQuQ3JlYXRlQVBJS2V5UmVzcG9uc2USawoQQ2hhbmdlQVBJS2V5TmFtZRIqLmJ1Y2tldGVlci5hY2NvdW50LkNoYW5nZUFQSUtleU5hbWVSZXF1ZXN0GisuYnVja2V0ZWVyLmFjY291bnQuQ2hhbmdlQVBJS2V5TmFtZVJlc3BvbnNlEl8KDEVuYWJsZUFQSUtleRImLmJ1Y2tldGVlci5hY2NvdW50LkVuYWJsZUFQSUtleVJlcXVlc3QaJy5idWNrZXRlZXIuYWNjb3VudC5FbmFibGVBUElLZXlSZXNwb25zZRJiCg1EaXNhYmxlQVBJS2V5EicuYnVja2V0ZWVyLmFjY291bnQuRGlzYWJsZUFQSUtleVJlcXVlc3QaKC5idWNrZXRlZXIuYWNjb3VudC5EaXNhYmxlQVBJS2V5UmVzcG9uc2USVgoJR2V0QVBJS2V5EiMuYnVja2V0ZWVyLmFjY291bnQuR2V0QVBJS2V5UmVxdWVzdBokLmJ1Y2tldGVlci5hY2NvdW50LkdldEFQSUtleVJlc3BvbnNlElwKC0xpc3RBUElLZXlzEiUuYnVja2V0ZWVyLmFjY291bnQuTGlzdEFQSUtleXNSZXF1ZXN0GiYuYnVja2V0ZWVyLmFjY291bnQuTGlzdEFQSUtleXNSZXNwb25zZRKkAQojR2V0QVBJS2V5QnlTZWFyY2hpbmdBbGxFbnZpcm9ubWVudHMSPS5idWNrZXRlZXIuYWNjb3VudC5HZXRBUElLZXlCeVNlYXJjaGluZ0FsbEVudmlyb25tZW50c1JlcXVlc3QaPi5idWNrZXRlZXIuYWNjb3VudC5HZXRBUElLZXlCeVNlYXJjaGluZ0FsbEVudmlyb25tZW50c1Jlc3BvbnNlQipaKGdpdGh1Yi5jb20vY2EtZHAvYnVja2V0ZWVyL3Byb3RvL2FjY291bnRiBnByb3RvMw==" + auditlogDescriptor: "CoYECh5nb29nbGUvcHJvdG9idWYvd3JhcHBlcnMucHJvdG8SD2dvb2dsZS5wcm90b2J1ZiIjCgtEb3VibGVWYWx1ZRIUCgV2YWx1ZRgBIAEoAVIFdmFsdWUiIgoKRmxvYXRWYWx1ZRIUCgV2YWx1ZRgBIAEoAlIFdmFsdWUiIgoKSW50NjRWYWx1ZRIUCgV2YWx1ZRgBIAEoA1IFdmFsdWUiIwoLVUludDY0VmFsdWUSFAoFdmFsdWUYASABKARSBXZhbHVlIiIKCkludDMyVmFsdWUSFAoFdmFsdWUYASABKAVSBXZhbHVlIiMKC1VJbnQzMlZhbHVlEhQKBXZhbHVlGAEgASgNUgV2YWx1ZSIhCglCb29sVmFsdWUSFAoFdmFsdWUYASABKAhSBXZhbHVlIiMKC1N0cmluZ1ZhbHVlEhQKBXZhbHVlGAEgASgJUgV2YWx1ZSIiCgpCeXRlc1ZhbHVlEhQKBXZhbHVlGAEgASgMUgV2YWx1ZUKDAQoTY29tLmdvb2dsZS5wcm90b2J1ZkINV3JhcHBlcnNQcm90b1ABWjFnb29nbGUuZ29sYW5nLm9yZy9wcm90b2J1Zi90eXBlcy9rbm93bi93cmFwcGVyc3Bi+AEBogIDR1BCqgIeR29vZ2xlLlByb3RvYnVmLldlbGxLbm93blR5cGVzYgZwcm90bzMK5AEKGWdvb2dsZS9wcm90b2J1Zi9hbnkucHJvdG8SD2dvb2dsZS5wcm90b2J1ZiI2CgNBbnkSGQoIdHlwZV91cmwYASABKAlSB3R5cGVVcmwSFAoFdmFsdWUYAiABKAxSBXZhbHVlQnYKE2NvbS5nb29nbGUucHJvdG9idWZCCEFueVByb3RvUAFaLGdvb2dsZS5nb2xhbmcub3JnL3Byb3RvYnVmL3R5cGVzL2tub3duL2FueXBiogIDR1BCqgIeR29vZ2xlLlByb3RvYnVmLldlbGxLbm93blR5cGVzYgZwcm90bzMKmQMKGnByb3RvL2ZlYXR1cmUvY2xhdXNlLnByb3RvEhFidWNrZXRlZXIuZmVhdHVyZSKzAgoGQ2xhdXNlEg4KAmlkGAEgASgJUgJpZBIcCglhdHRyaWJ1dGUYAiABKAlSCWF0dHJpYnV0ZRI+CghvcGVyYXRvchgDIAEoDjIiLmJ1Y2tldGVlci5mZWF0dXJlLkNsYXVzZS5PcGVyYXRvclIIb3BlcmF0b3ISFgoGdmFsdWVzGAQgAygJUgZ2YWx1ZXMiogEKCE9wZXJhdG9yEgoKBkVRVUFMUxAAEgYKAklOEAESDQoJRU5EU19XSVRIEAISDwoLU1RBUlRTX1dJVEgQAxILCgdTRUdNRU5UEAQSCwoHR1JFQVRFUhAFEhQKEEdSRUFURVJfT1JfRVFVQUwQBhIICgRMRVNTEAcSEQoNTEVTU19PUl9FUVVBTBAIEgoKBkJFRk9SRRAJEgkKBUFGVEVSEApCKlooZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vZmVhdHVyZWIGcHJvdG8zCrQEChxwcm90by9mZWF0dXJlL3N0cmF0ZWd5LnByb3RvEhFidWNrZXRlZXIuZmVhdHVyZSItCg1GaXhlZFN0cmF0ZWd5EhwKCXZhcmlhdGlvbhgBIAEoCVIJdmFyaWF0aW9uIqIBCg9Sb2xsb3V0U3RyYXRlZ3kSTAoKdmFyaWF0aW9ucxgBIAMoCzIsLmJ1Y2tldGVlci5mZWF0dXJlLlJvbGxvdXRTdHJhdGVneS5WYXJpYXRpb25SCnZhcmlhdGlvbnMaQQoJVmFyaWF0aW9uEhwKCXZhcmlhdGlvbhgBIAEoCVIJdmFyaWF0aW9uEhYKBndlaWdodBgCIAEoBVIGd2VpZ2h0IvgBCghTdHJhdGVneRI0CgR0eXBlGAEgASgOMiAuYnVja2V0ZWVyLmZlYXR1cmUuU3RyYXRlZ3kuVHlwZVIEdHlwZRJHCg5maXhlZF9zdHJhdGVneRgCIAEoCzIgLmJ1Y2tldGVlci5mZWF0dXJlLkZpeGVkU3RyYXRlZ3lSDWZpeGVkU3RyYXRlZ3kSTQoQcm9sbG91dF9zdHJhdGVneRgDIAEoCzIiLmJ1Y2tldGVlci5mZWF0dXJlLlJvbGxvdXRTdHJhdGVneVIPcm9sbG91dFN0cmF0ZWd5Ih4KBFR5cGUSCQoFRklYRUQQABILCgdST0xMT1VUEAFCKlooZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vZmVhdHVyZWIGcHJvdG8zCqICChhwcm90by9mZWF0dXJlL3J1bGUucHJvdG8SEWJ1Y2tldGVlci5mZWF0dXJlGhpwcm90by9mZWF0dXJlL2NsYXVzZS5wcm90bxoccHJvdG8vZmVhdHVyZS9zdHJhdGVneS5wcm90byKEAQoEUnVsZRIOCgJpZBgBIAEoCVICaWQSNwoIc3RyYXRlZ3kYAiABKAsyGy5idWNrZXRlZXIuZmVhdHVyZS5TdHJhdGVneVIIc3RyYXRlZ3kSMwoHY2xhdXNlcxgDIAMoCzIZLmJ1Y2tldGVlci5mZWF0dXJlLkNsYXVzZVIHY2xhdXNlc0IqWihnaXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9mZWF0dXJlYgZwcm90bzMKoQEKGnByb3RvL2ZlYXR1cmUvdGFyZ2V0LnByb3RvEhFidWNrZXRlZXIuZmVhdHVyZSI8CgZUYXJnZXQSHAoJdmFyaWF0aW9uGAEgASgJUgl2YXJpYXRpb24SFAoFdXNlcnMYAiADKAlSBXVzZXJzQipaKGdpdGh1Yi5jb20vY2EtZHAvYnVja2V0ZWVyL3Byb3RvL2ZlYXR1cmViBnByb3RvMwrPAQodcHJvdG8vZmVhdHVyZS92YXJpYXRpb24ucHJvdG8SEWJ1Y2tldGVlci5mZWF0dXJlImcKCVZhcmlhdGlvbhIOCgJpZBgBIAEoCVICaWQSFAoFdmFsdWUYAiABKAlSBXZhbHVlEhIKBG5hbWUYAyABKAlSBG5hbWUSIAoLZGVzY3JpcHRpb24YBCABKAlSC2Rlc2NyaXB0aW9uQipaKGdpdGh1Yi5jb20vY2EtZHAvYnVja2V0ZWVyL3Byb3RvL2ZlYXR1cmViBnByb3RvMwrtAgoqcHJvdG8vZmVhdHVyZS9mZWF0dXJlX2xhc3RfdXNlZF9pbmZvLnByb3RvEhFidWNrZXRlZXIuZmVhdHVyZSL3AQoTRmVhdHVyZUxhc3RVc2VkSW5mbxIdCgpmZWF0dXJlX2lkGAEgASgJUglmZWF0dXJlSWQSGAoHdmVyc2lvbhgCIAEoBVIHdmVyc2lvbhIgCgxsYXN0X3VzZWRfYXQYAyABKANSCmxhc3RVc2VkQXQSHQoKY3JlYXRlZF9hdBgEIAEoA1IJY3JlYXRlZEF0EjIKFWNsaWVudF9vbGRlc3RfdmVyc2lvbhgFIAEoCVITY2xpZW50T2xkZXN0VmVyc2lvbhIyChVjbGllbnRfbGF0ZXN0X3ZlcnNpb24YBiABKAlSE2NsaWVudExhdGVzdFZlcnNpb25CKlooZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vZmVhdHVyZWIGcHJvdG8zCrsBCiBwcm90by9mZWF0dXJlL3ByZXJlcXVpc2l0ZS5wcm90bxIRYnVja2V0ZWVyLmZlYXR1cmUiUAoMUHJlcmVxdWlzaXRlEh0KCmZlYXR1cmVfaWQYASABKAlSCWZlYXR1cmVJZBIhCgx2YXJpYXRpb25faWQYAiABKAlSC3ZhcmlhdGlvbklkQipaKGdpdGh1Yi5jb20vY2EtZHAvYnVja2V0ZWVyL3Byb3RvL2ZlYXR1cmViBnByb3RvMwqTCwobcHJvdG8vZmVhdHVyZS9mZWF0dXJlLnByb3RvEhFidWNrZXRlZXIuZmVhdHVyZRoYcHJvdG8vZmVhdHVyZS9ydWxlLnByb3RvGhpwcm90by9mZWF0dXJlL3RhcmdldC5wcm90bxodcHJvdG8vZmVhdHVyZS92YXJpYXRpb24ucHJvdG8aHHByb3RvL2ZlYXR1cmUvc3RyYXRlZ3kucHJvdG8aKnByb3RvL2ZlYXR1cmUvZmVhdHVyZV9sYXN0X3VzZWRfaW5mby5wcm90bxogcHJvdG8vZmVhdHVyZS9wcmVyZXF1aXNpdGUucHJvdG8i0gcKB0ZlYXR1cmUSDgoCaWQYASABKAlSAmlkEhIKBG5hbWUYAiABKAlSBG5hbWUSIAoLZGVzY3JpcHRpb24YAyABKAlSC2Rlc2NyaXB0aW9uEhgKB2VuYWJsZWQYBCABKAhSB2VuYWJsZWQSGAoHZGVsZXRlZBgFIAEoCFIHZGVsZXRlZBI5ChZldmFsdWF0aW9uX3VuZGVsYXlhYmxlGAYgASgIQgIYAVIVZXZhbHVhdGlvblVuZGVsYXlhYmxlEhAKA3R0bBgHIAEoBVIDdHRsEhgKB3ZlcnNpb24YCCABKAVSB3ZlcnNpb24SHQoKY3JlYXRlZF9hdBgJIAEoA1IJY3JlYXRlZEF0Eh0KCnVwZGF0ZWRfYXQYCiABKANSCXVwZGF0ZWRBdBI8Cgp2YXJpYXRpb25zGAsgAygLMhwuYnVja2V0ZWVyLmZlYXR1cmUuVmFyaWF0aW9uUgp2YXJpYXRpb25zEjMKB3RhcmdldHMYDCADKAsyGS5idWNrZXRlZXIuZmVhdHVyZS5UYXJnZXRSB3RhcmdldHMSLQoFcnVsZXMYDSADKAsyFy5idWNrZXRlZXIuZmVhdHVyZS5SdWxlUgVydWxlcxJGChBkZWZhdWx0X3N0cmF0ZWd5GA4gASgLMhsuYnVja2V0ZWVyLmZlYXR1cmUuU3RyYXRlZ3lSD2RlZmF1bHRTdHJhdGVneRIjCg1vZmZfdmFyaWF0aW9uGA8gASgJUgxvZmZWYXJpYXRpb24SEgoEdGFncxgQIAMoCVIEdGFncxJMCg5sYXN0X3VzZWRfaW5mbxgRIAEoCzImLmJ1Y2tldGVlci5mZWF0dXJlLkZlYXR1cmVMYXN0VXNlZEluZm9SDGxhc3RVc2VkSW5mbxIeCgptYWludGFpbmVyGBIgASgJUgptYWludGFpbmVyEk8KDnZhcmlhdGlvbl90eXBlGBMgASgOMiguYnVja2V0ZWVyLmZlYXR1cmUuRmVhdHVyZS5WYXJpYXRpb25UeXBlUg12YXJpYXRpb25UeXBlEhoKCGFyY2hpdmVkGBQgASgIUghhcmNoaXZlZBJFCg1wcmVyZXF1aXNpdGVzGBUgAygLMh8uYnVja2V0ZWVyLmZlYXR1cmUuUHJlcmVxdWlzaXRlUg1wcmVyZXF1aXNpdGVzEiMKDXNhbXBsaW5nX3NlZWQYFiABKAlSDHNhbXBsaW5nU2VlZCI+Cg1WYXJpYXRpb25UeXBlEgoKBlNUUklORxAAEgsKB0JPT0xFQU4QARIKCgZOVU1CRVIQAhIICgRKU09OEAMiQgoIRmVhdHVyZXMSNgoIZmVhdHVyZXMYASADKAsyGi5idWNrZXRlZXIuZmVhdHVyZS5GZWF0dXJlUghmZWF0dXJlcyJTCgNUYWcSDgoCaWQYASABKAlSAmlkEh0KCmNyZWF0ZWRfYXQYAiABKANSCWNyZWF0ZWRBdBIdCgp1cGRhdGVkX2F0GAMgASgDUgl1cGRhdGVkQXRCKlooZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vZmVhdHVyZWIGcHJvdG8zCrkHChtwcm90by9mZWF0dXJlL3NlZ21lbnQucHJvdG8SEWJ1Y2tldGVlci5mZWF0dXJlGhhwcm90by9mZWF0dXJlL3J1bGUucHJvdG8i/AMKB1NlZ21lbnQSDgoCaWQYASABKAlSAmlkEhIKBG5hbWUYAiABKAlSBG5hbWUSIAoLZGVzY3JpcHRpb24YAyABKAlSC2Rlc2NyaXB0aW9uEi0KBXJ1bGVzGAQgAygLMhcuYnVja2V0ZWVyLmZlYXR1cmUuUnVsZVIFcnVsZXMSHQoKY3JlYXRlZF9hdBgFIAEoA1IJY3JlYXRlZEF0Eh0KCnVwZGF0ZWRfYXQYBiABKANSCXVwZGF0ZWRBdBIcCgd2ZXJzaW9uGAcgASgDQgIYAVIHdmVyc2lvbhIYCgdkZWxldGVkGAggASgIUgdkZWxldGVkEi4KE2luY2x1ZGVkX3VzZXJfY291bnQYCSABKANSEWluY2x1ZGVkVXNlckNvdW50EjIKE2V4Y2x1ZGVkX3VzZXJfY291bnQYCiABKANCAhgBUhFleGNsdWRlZFVzZXJDb3VudBI5CgZzdGF0dXMYCyABKA4yIS5idWNrZXRlZXIuZmVhdHVyZS5TZWdtZW50LlN0YXR1c1IGc3RhdHVzEicKEGlzX2luX3VzZV9zdGF0dXMYDCABKAhSDWlzSW5Vc2VTdGF0dXMiPgoGU3RhdHVzEgsKB0lOSVRJQUwQABINCglVUExPQURJTkcQARIMCghTVUNFRURFRBACEgoKBkZBSUxFRBADItQBCgtTZWdtZW50VXNlchIOCgJpZBgBIAEoCVICaWQSHQoKc2VnbWVudF9pZBgCIAEoCVIJc2VnbWVudElkEhcKB3VzZXJfaWQYAyABKAlSBnVzZXJJZBI6CgVzdGF0ZRgEIAEoDjIkLmJ1Y2tldGVlci5mZWF0dXJlLlNlZ21lbnRVc2VyLlN0YXRlUgVzdGF0ZRIYCgdkZWxldGVkGAUgASgIUgdkZWxldGVkIicKBVN0YXRlEgwKCElOQ0xVREVEEAASEAoIRVhDTFVERUQQARoCCAEiYwoMU2VnbWVudFVzZXJzEh0KCnNlZ21lbnRfaWQYASABKAlSCXNlZ21lbnRJZBI0CgV1c2VycxgCIAMoCzIeLmJ1Y2tldGVlci5mZWF0dXJlLlNlZ21lbnRVc2VyUgV1c2Vyc0IqWihnaXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9mZWF0dXJlYgZwcm90bzMK4wIKI3Byb3RvL2Vudmlyb25tZW50L2Vudmlyb25tZW50LnByb3RvEhVidWNrZXRlZXIuZW52aXJvbm1lbnQi7AEKC0Vudmlyb25tZW50Eg4KAmlkGAEgASgJUgJpZBIcCgluYW1lc3BhY2UYAiABKAlSCW5hbWVzcGFjZRIWCgRuYW1lGAMgASgJQgIYAVIEbmFtZRIgCgtkZXNjcmlwdGlvbhgEIAEoCVILZGVzY3JpcHRpb24SGAoHZGVsZXRlZBgFIAEoCFIHZGVsZXRlZBIdCgpjcmVhdGVkX2F0GAYgASgDUgljcmVhdGVkQXQSHQoKdXBkYXRlZF9hdBgHIAEoA1IJdXBkYXRlZEF0Eh0KCnByb2plY3RfaWQYCCABKAlSCXByb2plY3RJZEIuWixnaXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9lbnZpcm9ubWVudGIGcHJvdG8zCpEFChtwcm90by9hY2NvdW50L2FjY291bnQucHJvdG8SEWJ1Y2tldGVlci5hY2NvdW50GiNwcm90by9lbnZpcm9ubWVudC9lbnZpcm9ubWVudC5wcm90byKnAgoHQWNjb3VudBIOCgJpZBgBIAEoCVICaWQSFAoFZW1haWwYAiABKAlSBWVtYWlsEhIKBG5hbWUYAyABKAlSBG5hbWUSMwoEcm9sZRgEIAEoDjIfLmJ1Y2tldGVlci5hY2NvdW50LkFjY291bnQuUm9sZVIEcm9sZRIaCghkaXNhYmxlZBgFIAEoCFIIZGlzYWJsZWQSHQoKY3JlYXRlZF9hdBgGIAEoA1IJY3JlYXRlZEF0Eh0KCnVwZGF0ZWRfYXQYByABKANSCXVwZGF0ZWRBdBIYCgdkZWxldGVkGAggASgIUgdkZWxldGVkIjkKBFJvbGUSCgoGVklFV0VSEAASCgoGRURJVE9SEAESCQoFT1dORVIQAhIOCgpVTkFTU0lHTkVEEGMi2wEKD0Vudmlyb25tZW50Um9sZRJECgtlbnZpcm9ubWVudBgBIAEoCzIiLmJ1Y2tldGVlci5lbnZpcm9ubWVudC5FbnZpcm9ubWVudFILZW52aXJvbm1lbnQSMwoEcm9sZRgCIAEoDjIfLmJ1Y2tldGVlci5hY2NvdW50LkFjY291bnQuUm9sZVIEcm9sZRIjCg10cmlhbF9wcm9qZWN0GAMgASgIUgx0cmlhbFByb2plY3QSKAoQdHJpYWxfc3RhcnRlZF9hdBgEIAEoA1IOdHJpYWxTdGFydGVkQXRCKlooZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vYWNjb3VudGIGcHJvdG8zCvEDChtwcm90by9hY2NvdW50L2FwaV9rZXkucHJvdG8SEWJ1Y2tldGVlci5hY2NvdW50ItgBCgZBUElLZXkSDgoCaWQYASABKAlSAmlkEhIKBG5hbWUYAiABKAlSBG5hbWUSMgoEcm9sZRgDIAEoDjIeLmJ1Y2tldGVlci5hY2NvdW50LkFQSUtleS5Sb2xlUgRyb2xlEhoKCGRpc2FibGVkGAQgASgIUghkaXNhYmxlZBIdCgpjcmVhdGVkX2F0GAUgASgDUgljcmVhdGVkQXQSHQoKdXBkYXRlZF9hdBgGIAEoA1IJdXBkYXRlZEF0IhwKBFJvbGUSBwoDU0RLEAASCwoHU0VSVklDRRABIq8BChFFbnZpcm9ubWVudEFQSUtleRIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYASABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlEjIKB2FwaV9rZXkYAiABKAsyGS5idWNrZXRlZXIuYWNjb3VudC5BUElLZXlSBmFwaUtleRIxChRlbnZpcm9ubWVudF9kaXNhYmxlZBgDIAEoCFITZW52aXJvbm1lbnREaXNhYmxlZEIqWihnaXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9hY2NvdW50YgZwcm90bzMKjAcKGnByb3RvL2F1dG9vcHMvY2xhdXNlLnByb3RvEhFidWNrZXRlZXIuYXV0b29wcxoZZ29vZ2xlL3Byb3RvYnVmL2FueS5wcm90byJGCgZDbGF1c2USDgoCaWQYASABKAlSAmlkEiwKBmNsYXVzZRgCIAEoCzIULmdvb2dsZS5wcm90b2J1Zi5BbnlSBmNsYXVzZSKfAgoST3BzRXZlbnRSYXRlQ2xhdXNlEiEKDHZhcmlhdGlvbl9pZBgCIAEoCVILdmFyaWF0aW9uSWQSFwoHZ29hbF9pZBgDIAEoCVIGZ29hbElkEhsKCW1pbl9jb3VudBgEIAEoA1IIbWluQ291bnQSKQoQdGhyZWFkc2hvbGRfcmF0ZRgFIAEoAVIPdGhyZWFkc2hvbGRSYXRlEkoKCG9wZXJhdG9yGAYgASgOMi4uYnVja2V0ZWVyLmF1dG9vcHMuT3BzRXZlbnRSYXRlQ2xhdXNlLk9wZXJhdG9yUghvcGVyYXRvciIzCghPcGVyYXRvchIUChBHUkVBVEVSX09SX0VRVUFMEAASEQoNTEVTU19PUl9FUVVBTBABSgQIARACIiQKDkRhdGV0aW1lQ2xhdXNlEhIKBHRpbWUYASABKANSBHRpbWUi+wIKDVdlYmhvb2tDbGF1c2USHQoKd2ViaG9va19pZBgBIAEoCVIJd2ViaG9va0lkEkoKCmNvbmRpdGlvbnMYAiADKAsyKi5idWNrZXRlZXIuYXV0b29wcy5XZWJob29rQ2xhdXNlLkNvbmRpdGlvblIKY29uZGl0aW9ucxr+AQoJQ29uZGl0aW9uEhYKBmZpbHRlchgBIAEoCVIGZmlsdGVyEhQKBXZhbHVlGAIgASgJUgV2YWx1ZRJPCghvcGVyYXRvchgDIAEoDjIzLmJ1Y2tldGVlci5hdXRvb3BzLldlYmhvb2tDbGF1c2UuQ29uZGl0aW9uLk9wZXJhdG9yUghvcGVyYXRvciJyCghPcGVyYXRvchIJCgVFUVVBTBAAEg0KCU5PVF9FUVVBTBABEg0KCU1PUkVfVEhBThACEhYKEk1PUkVfVEhBTl9PUl9FUVVBTBADEg0KCUxFU1NfVEhBThAEEhYKEkxFU1NfVEhBTl9PUl9FUVVBTBAFQipaKGdpdGh1Yi5jb20vY2EtZHAvYnVja2V0ZWVyL3Byb3RvL2F1dG9vcHNiBnByb3RvMwrgAwohcHJvdG8vYXV0b29wcy9hdXRvX29wc19ydWxlLnByb3RvEhFidWNrZXRlZXIuYXV0b29wcxoacHJvdG8vYXV0b29wcy9jbGF1c2UucHJvdG8iowIKC0F1dG9PcHNSdWxlEg4KAmlkGAEgASgJUgJpZBIdCgpmZWF0dXJlX2lkGAIgASgJUglmZWF0dXJlSWQSNQoIb3BzX3R5cGUYAyABKA4yGi5idWNrZXRlZXIuYXV0b29wcy5PcHNUeXBlUgdvcHNUeXBlEjMKB2NsYXVzZXMYBCADKAsyGS5idWNrZXRlZXIuYXV0b29wcy5DbGF1c2VSB2NsYXVzZXMSIQoMdHJpZ2dlcmVkX2F0GAYgASgDUgt0cmlnZ2VyZWRBdBIdCgpjcmVhdGVkX2F0GAcgASgDUgljcmVhdGVkQXQSHQoKdXBkYXRlZF9hdBgIIAEoA1IJdXBkYXRlZEF0EhgKB2RlbGV0ZWQYCSABKAhSB2RlbGV0ZWQqMgoHT3BzVHlwZRISCg5FTkFCTEVfRkVBVFVSRRAAEhMKD0RJU0FCTEVfRkVBVFVSRRABQipaKGdpdGh1Yi5jb20vY2EtZHAvYnVja2V0ZWVyL3Byb3RvL2F1dG9vcHNiBnByb3RvMwr6AgoicHJvdG8vbm90aWZpY2F0aW9uL3JlY2lwaWVudC5wcm90bxIWYnVja2V0ZWVyLm5vdGlmaWNhdGlvbiLIAQoJUmVjaXBpZW50EjoKBHR5cGUYASABKA4yJi5idWNrZXRlZXIubm90aWZpY2F0aW9uLlJlY2lwaWVudC5UeXBlUgR0eXBlEmUKF3NsYWNrX2NoYW5uZWxfcmVjaXBpZW50GAIgASgLMi0uYnVja2V0ZWVyLm5vdGlmaWNhdGlvbi5TbGFja0NoYW5uZWxSZWNpcGllbnRSFXNsYWNrQ2hhbm5lbFJlY2lwaWVudCIYCgRUeXBlEhAKDFNsYWNrQ2hhbm5lbBAAIjgKFVNsYWNrQ2hhbm5lbFJlY2lwaWVudBIfCgt3ZWJob29rX3VybBgBIAEoCVIKd2ViaG9va1VybEIvWi1naXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9ub3RpZmljYXRpb25iBnByb3RvMwqSBwolcHJvdG8vbm90aWZpY2F0aW9uL3N1YnNjcmlwdGlvbi5wcm90bxIWYnVja2V0ZWVyLm5vdGlmaWNhdGlvbhoicHJvdG8vbm90aWZpY2F0aW9uL3JlY2lwaWVudC5wcm90byLzBQoMU3Vic2NyaXB0aW9uEg4KAmlkGAEgASgJUgJpZBIdCgpjcmVhdGVkX2F0GAIgASgDUgljcmVhdGVkQXQSHQoKdXBkYXRlZF9hdBgDIAEoA1IJdXBkYXRlZEF0EhoKCGRpc2FibGVkGAQgASgIUghkaXNhYmxlZBJSCgxzb3VyY2VfdHlwZXMYBSADKA4yLy5idWNrZXRlZXIubm90aWZpY2F0aW9uLlN1YnNjcmlwdGlvbi5Tb3VyY2VUeXBlUgtzb3VyY2VUeXBlcxI/CglyZWNpcGllbnQYBiABKAsyIS5idWNrZXRlZXIubm90aWZpY2F0aW9uLlJlY2lwaWVudFIJcmVjaXBpZW50EhIKBG5hbWUYByABKAlSBG5hbWUizwMKClNvdXJjZVR5cGUSGAoURE9NQUlOX0VWRU5UX0ZFQVRVUkUQABIVChFET01BSU5fRVZFTlRfR09BTBABEhsKF0RPTUFJTl9FVkVOVF9FWFBFUklNRU5UEAISGAoURE9NQUlOX0VWRU5UX0FDQ09VTlQQAxIXChNET01BSU5fRVZFTlRfQVBJS0VZEAQSGAoURE9NQUlOX0VWRU5UX1NFR01FTlQQBRIcChhET01BSU5fRVZFTlRfRU5WSVJPTk1FTlQQBhIeChpET01BSU5fRVZFTlRfQURNSU5fQUNDT1VOVBAHEh0KGURPTUFJTl9FVkVOVF9BVVRPT1BTX1JVTEUQCBIVChFET01BSU5fRVZFTlRfUFVTSBAJEh0KGURPTUFJTl9FVkVOVF9TVUJTQ1JJUFRJT04QChIjCh9ET01BSU5fRVZFTlRfQURNSU5fU1VCU0NSSVBUSU9OEAsSGAoURE9NQUlOX0VWRU5UX1BST0pFQ1QQDBIYChRET01BSU5fRVZFTlRfV0VCSE9PSxANEhEKDUZFQVRVUkVfU1RBTEUQZBIXChJFWFBFUklNRU5UX1JVTk5JTkcQyAESDgoJTUFVX0NPVU5UEKwCQi9aLWdpdGh1Yi5jb20vY2EtZHAvYnVja2V0ZWVyL3Byb3RvL25vdGlmaWNhdGlvbmIGcHJvdG8zCriNAQoecHJvdG8vZXZlbnQvZG9tYWluL2V2ZW50LnByb3RvEhZidWNrZXRlZXIuZXZlbnQuZG9tYWluGhlnb29nbGUvcHJvdG9idWYvYW55LnByb3RvGh5nb29nbGUvcHJvdG9idWYvd3JhcHBlcnMucHJvdG8aGnByb3RvL2ZlYXR1cmUvY2xhdXNlLnByb3RvGhtwcm90by9mZWF0dXJlL2ZlYXR1cmUucHJvdG8aGHByb3RvL2ZlYXR1cmUvcnVsZS5wcm90bxodcHJvdG8vZmVhdHVyZS92YXJpYXRpb24ucHJvdG8aHHByb3RvL2ZlYXR1cmUvc3RyYXRlZ3kucHJvdG8aG3Byb3RvL2ZlYXR1cmUvc2VnbWVudC5wcm90bxoacHJvdG8vZmVhdHVyZS90YXJnZXQucHJvdG8aG3Byb3RvL2FjY291bnQvYWNjb3VudC5wcm90bxobcHJvdG8vYWNjb3VudC9hcGlfa2V5LnByb3RvGiFwcm90by9hdXRvb3BzL2F1dG9fb3BzX3J1bGUucHJvdG8aGnByb3RvL2F1dG9vcHMvY2xhdXNlLnByb3RvGiVwcm90by9ub3RpZmljYXRpb24vc3Vic2NyaXB0aW9uLnByb3RvGiJwcm90by9ub3RpZmljYXRpb24vcmVjaXBpZW50LnByb3RvGiBwcm90by9mZWF0dXJlL3ByZXJlcXVpc2l0ZS5wcm90byK3IQoFRXZlbnQSDgoCaWQYASABKAlSAmlkEhwKCXRpbWVzdGFtcBgCIAEoA1IJdGltZXN0YW1wEkkKC2VudGl0eV90eXBlGAMgASgOMiguYnVja2V0ZWVyLmV2ZW50LmRvbWFpbi5FdmVudC5FbnRpdHlUeXBlUgplbnRpdHlUeXBlEhsKCWVudGl0eV9pZBgEIAEoCVIIZW50aXR5SWQSNgoEdHlwZRgFIAEoDjIiLmJ1Y2tldGVlci5ldmVudC5kb21haW4uRXZlbnQuVHlwZVIEdHlwZRI2CgZlZGl0b3IYBiABKAsyHi5idWNrZXRlZXIuZXZlbnQuZG9tYWluLkVkaXRvclIGZWRpdG9yEigKBGRhdGEYByABKAsyFC5nb29nbGUucHJvdG9idWYuQW55UgRkYXRhEjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgIIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2USJAoOaXNfYWRtaW5fZXZlbnQYCSABKAhSDGlzQWRtaW5FdmVudBI5CgdvcHRpb25zGAogASgLMh8uYnVja2V0ZWVyLmV2ZW50LmRvbWFpbi5PcHRpb25zUgdvcHRpb25zIt0BCgpFbnRpdHlUeXBlEgsKB0ZFQVRVUkUQABIICgRHT0FMEAESDgoKRVhQRVJJTUVOVBACEgsKB0FDQ09VTlQQAxIKCgZBUElLRVkQBBILCgdTRUdNRU5UEAUSDwoLRU5WSVJPTk1FTlQQBhIRCg1BRE1JTl9BQ0NPVU5UEAcSEAoMQVVUT09QU19SVUxFEAgSCAoEUFVTSBAJEhAKDFNVQlNDUklQVElPThAKEhYKEkFETUlOX1NVQlNDUklQVElPThALEgsKB1BST0pFQ1QQDBILCgdXRUJIT09LEA0ihxwKBFR5cGUSCwoHVU5LTk9XThAAEhMKD0ZFQVRVUkVfQ1JFQVRFRBABEhMKD0ZFQVRVUkVfUkVOQU1FRBACEhMKD0ZFQVRVUkVfRU5BQkxFRBADEhQKEEZFQVRVUkVfRElTQUJMRUQQBBITCg9GRUFUVVJFX0RFTEVURUQQBRIkCiBGRUFUVVJFX0VWQUxVQVRJT05fREVMQVlBQkxFX1NFVBAGEiYKIkZFQVRVUkVfRVZBTFVBVElPTl9VTkRFTEFZQUJMRV9TRVQQBxIfChtGRUFUVVJFX0RFU0NSSVBUSU9OX0NIQU5HRUQQCBIbChdGRUFUVVJFX1ZBUklBVElPTl9BRERFRBAJEh0KGUZFQVRVUkVfVkFSSUFUSU9OX1JFTU9WRUQQChIhCh1GRUFUVVJFX09GRl9WQVJJQVRJT05fQ0hBTkdFRBALEhsKF1ZBUklBVElPTl9WQUxVRV9DSEFOR0VEEAwSGgoWVkFSSUFUSU9OX05BTUVfQ0hBTkdFRBANEiEKHVZBUklBVElPTl9ERVNDUklQVElPTl9DSEFOR0VEEA4SGAoUVkFSSUFUSU9OX1VTRVJfQURERUQQDxIaChZWQVJJQVRJT05fVVNFUl9SRU1PVkVEEBASFgoSRkVBVFVSRV9SVUxFX0FEREVEEBESIQodRkVBVFVSRV9SVUxFX1NUUkFURUdZX0NIQU5HRUQQEhIYChRGRUFUVVJFX1JVTEVfREVMRVRFRBATEhUKEVJVTEVfQ0xBVVNFX0FEREVEEBQSFwoTUlVMRV9DTEFVU0VfREVMRVRFRBAVEh8KG1JVTEVfRklYRURfU1RSQVRFR1lfQ0hBTkdFRBAWEiEKHVJVTEVfUk9MTE9VVF9TVFJBVEVHWV9DSEFOR0VEEBcSHAoYQ0xBVVNFX0FUVFJJQlVURV9DSEFOR0VEEBgSGwoXQ0xBVVNFX09QRVJBVE9SX0NIQU5HRUQQGRIWChJDTEFVU0VfVkFMVUVfQURERUQQGhIYChRDTEFVU0VfVkFMVUVfUkVNT1ZFRBAbEiQKIEZFQVRVUkVfREVGQVVMVF9TVFJBVEVHWV9DSEFOR0VEEBwSFQoRRkVBVFVSRV9UQUdfQURERUQQHRIXChNGRUFUVVJFX1RBR19SRU1PVkVEEB4SHwobRkVBVFVSRV9WRVJTSU9OX0lOQ1JFTUVOVEVEEB8SFAoQRkVBVFVSRV9BUkNISVZFRBAgEhIKDkZFQVRVUkVfQ0xPTkVEECESFgoSRkVBVFVSRV9VTkFSQ0hJVkVEECMSFwoTU0FNUExJTkdfU0VFRF9SRVNFVBAiEhYKElBSRVJFUVVJU0lURV9BRERFRBAkEhgKFFBSRVJFUVVJU0lURV9SRU1PVkVEECUSIgoeUFJFUkVRVUlTSVRFX1ZBUklBVElPTl9DSEFOR0VEECYSEAoMR09BTF9DUkVBVEVEEGQSEAoMR09BTF9SRU5BTUVEEGUSHAoYR09BTF9ERVNDUklQVElPTl9DSEFOR0VEEGYSEAoMR09BTF9ERUxFVEVEEGcSEQoNR09BTF9BUkNISVZFRBBoEhcKEkVYUEVSSU1FTlRfQ1JFQVRFRBDIARIXChJFWFBFUklNRU5UX1NUT1BQRUQQyQESIAobRVhQRVJJTUVOVF9TVEFSVF9BVF9DSEFOR0VEEMoBEh8KGkVYUEVSSU1FTlRfU1RPUF9BVF9DSEFOR0VEEMsBEhcKEkVYUEVSSU1FTlRfREVMRVRFRBDMARIeChlFWFBFUklNRU5UX1BFUklPRF9DSEFOR0VEEM0BEhwKF0VYUEVSSU1FTlRfTkFNRV9DSEFOR0VEEM4BEiMKHkVYUEVSSU1FTlRfREVTQ1JJUFRJT05fQ0hBTkdFRBDPARIXChJFWFBFUklNRU5UX1NUQVJURUQQ0AESGAoTRVhQRVJJTUVOVF9GSU5JU0hFRBDRARIYChNFWFBFUklNRU5UX0FSQ0hJVkVEENIBEhQKD0FDQ09VTlRfQ1JFQVRFRBCsAhIZChRBQ0NPVU5UX1JPTEVfQ0hBTkdFRBCtAhIUCg9BQ0NPVU5UX0VOQUJMRUQQrgISFQoQQUNDT1VOVF9ESVNBQkxFRBCvAhIUCg9BQ0NPVU5UX0RFTEVURUQQsAISEwoOQVBJS0VZX0NSRUFURUQQkAMSGAoTQVBJS0VZX05BTUVfQ0hBTkdFRBCRAxITCg5BUElLRVlfRU5BQkxFRBCSAxIUCg9BUElLRVlfRElTQUJMRUQQkwMSFAoPU0VHTUVOVF9DUkVBVEVEEPQDEhQKD1NFR01FTlRfREVMRVRFRBD1AxIZChRTRUdNRU5UX05BTUVfQ0hBTkdFRBD2AxIgChtTRUdNRU5UX0RFU0NSSVBUSU9OX0NIQU5HRUQQ9wMSFwoSU0VHTUVOVF9SVUxFX0FEREVEEPgDEhkKFFNFR01FTlRfUlVMRV9ERUxFVEVEEPkDEh4KGVNFR01FTlRfUlVMRV9DTEFVU0VfQURERUQQ+gMSIAobU0VHTUVOVF9SVUxFX0NMQVVTRV9ERUxFVEVEEPsDEiUKIFNFR01FTlRfQ0xBVVNFX0FUVFJJQlVURV9DSEFOR0VEEPwDEiQKH1NFR01FTlRfQ0xBVVNFX09QRVJBVE9SX0NIQU5HRUQQ/QMSHwoaU0VHTUVOVF9DTEFVU0VfVkFMVUVfQURERUQQ/gMSIQocU0VHTUVOVF9DTEFVU0VfVkFMVUVfUkVNT1ZFRBD/AxIXChJTRUdNRU5UX1VTRVJfQURERUQQgAQSGQoUU0VHTUVOVF9VU0VSX0RFTEVURUQQgQQSHgoZU0VHTUVOVF9CVUxLX1VQTE9BRF9VU0VSUxCCBBItCihTRUdNRU5UX0JVTEtfVVBMT0FEX1VTRVJTX1NUQVRVU19DSEFOR0VEEIMEEhgKE0VOVklST05NRU5UX0NSRUFURUQQ2AQSGAoTRU5WSVJPTk1FTlRfUkVOQU1FRBDZBBIkCh9FTlZJUk9OTUVOVF9ERVNDUklQVElPTl9DSEFOR0VEENoEEhgKE0VOVklST05NRU5UX0RFTEVURUQQ2wQSGgoVQURNSU5fQUNDT1VOVF9DUkVBVEVEELwFEhoKFUFETUlOX0FDQ09VTlRfRU5BQkxFRBC+BRIbChZBRE1JTl9BQ0NPVU5UX0RJU0FCTEVEEL8FEhkKFEFVVE9PUFNfUlVMRV9DUkVBVEVEEKAGEhkKFEFVVE9PUFNfUlVMRV9ERUxFVEVEEKEGEiIKHUFVVE9PUFNfUlVMRV9PUFNfVFlQRV9DSEFOR0VEEKIGEiAKG0FVVE9PUFNfUlVMRV9DTEFVU0VfREVMRVRFRBCjBhImCiFBVVRPT1BTX1JVTEVfVFJJR0dFUkVEX0FUX0NIQU5HRUQQpAYSIAobT1BTX0VWRU5UX1JBVEVfQ0xBVVNFX0FEREVEEKUGEiIKHU9QU19FVkVOVF9SQVRFX0NMQVVTRV9DSEFOR0VEEKYGEhoKFURBVEVUSU1FX0NMQVVTRV9BRERFRBCnBhIcChdEQVRFVElNRV9DTEFVU0VfQ0hBTkdFRBCoBhIRCgxQVVNIX0NSRUFURUQQhAcSEQoMUFVTSF9ERUxFVEVEEIUHEhQKD1BVU0hfVEFHU19BRERFRBCGBxIWChFQVVNIX1RBR1NfREVMRVRFRBCHBxIRCgxQVVNIX1JFTkFNRUQQiAcSGQoUU1VCU0NSSVBUSU9OX0NSRUFURUQQ6AcSGQoUU1VCU0NSSVBUSU9OX0RFTEVURUQQ6QcSGQoUU1VCU0NSSVBUSU9OX0VOQUJMRUQQ6gcSGgoVU1VCU0NSSVBUSU9OX0RJU0FCTEVEEOsHEiMKHlNVQlNDUklQVElPTl9TT1VSQ0VfVFlQRV9BRERFRBDsBxIlCiBTVUJTQ1JJUFRJT05fU09VUkNFX1RZUEVfREVMRVRFRBDtBxIZChRTVUJTQ1JJUFRJT05fUkVOQU1FRBDuBxIfChpBRE1JTl9TVUJTQ1JJUFRJT05fQ1JFQVRFRBDMCBIfChpBRE1JTl9TVUJTQ1JJUFRJT05fREVMRVRFRBDNCBIfChpBRE1JTl9TVUJTQ1JJUFRJT05fRU5BQkxFRBDOCBIgChtBRE1JTl9TVUJTQ1JJUFRJT05fRElTQUJMRUQQzwgSKQokQURNSU5fU1VCU0NSSVBUSU9OX1NPVVJDRV9UWVBFX0FEREVEENAIEisKJkFETUlOX1NVQlNDUklQVElPTl9TT1VSQ0VfVFlQRV9ERUxFVEVEENEIEh8KGkFETUlOX1NVQlNDUklQVElPTl9SRU5BTUVEENIIEhQKD1BST0pFQ1RfQ1JFQVRFRBCwCRIgChtQUk9KRUNUX0RFU0NSSVBUSU9OX0NIQU5HRUQQsQkSFAoPUFJPSkVDVF9FTkFCTEVEELIJEhUKEFBST0pFQ1RfRElTQUJMRUQQswkSGgoVUFJPSkVDVF9UUklBTF9DUkVBVEVEELQJEhwKF1BST0pFQ1RfVFJJQUxfQ09OVkVSVEVEELUJEhQKD1dFQkhPT0tfQ1JFQVRFRBCUChIUCg9XRUJIT09LX0RFTEVURUQQlQoSGQoUV0VCSE9PS19OQU1FX0NIQU5HRUQQlgoSIAobV0VCSE9PS19ERVNDUklQVElPTl9DSEFOR0VEEJcKEhkKFFdFQkhPT0tfQ0xBVVNFX0FEREVEEJgKEhsKFldFQkhPT0tfQ0xBVVNFX0NIQU5HRUQQmQoibgoGRWRpdG9yEhQKBWVtYWlsGAEgASgJUgVlbWFpbBIzCgRyb2xlGAIgASgOMh8uYnVja2V0ZWVyLmFjY291bnQuQWNjb3VudC5Sb2xlUgRyb2xlEhkKCGlzX2FkbWluGAMgASgIUgdpc0FkbWluIkQKB09wdGlvbnMSGAoHY29tbWVudBgBIAEoCVIHY29tbWVudBIfCgtuZXdfdmVyc2lvbhgCIAEoBVIKbmV3VmVyc2lvbiK0AwoTRmVhdHVyZUNyZWF0ZWRFdmVudBIOCgJpZBgBIAEoCVICaWQSEgoEbmFtZRgCIAEoCVIEbmFtZRIgCgtkZXNjcmlwdGlvbhgDIAEoCVILZGVzY3JpcHRpb24SEgoEdXNlchgEIAEoCVIEdXNlchI8Cgp2YXJpYXRpb25zGAUgAygLMhwuYnVja2V0ZWVyLmZlYXR1cmUuVmFyaWF0aW9uUgp2YXJpYXRpb25zElgKGmRlZmF1bHRfb25fdmFyaWF0aW9uX2luZGV4GAYgASgLMhsuZ29vZ2xlLnByb3RvYnVmLkludDMyVmFsdWVSF2RlZmF1bHRPblZhcmlhdGlvbkluZGV4EloKG2RlZmF1bHRfb2ZmX3ZhcmlhdGlvbl9pbmRleBgHIAEoCzIbLmdvb2dsZS5wcm90b2J1Zi5JbnQzMlZhbHVlUhhkZWZhdWx0T2ZmVmFyaWF0aW9uSW5kZXgSTwoOdmFyaWF0aW9uX3R5cGUYCCABKA4yKC5idWNrZXRlZXIuZmVhdHVyZS5GZWF0dXJlLlZhcmlhdGlvblR5cGVSDXZhcmlhdGlvblR5cGUiJQoTRmVhdHVyZUVuYWJsZWRFdmVudBIOCgJpZBgBIAEoCVICaWQiJgoURmVhdHVyZURpc2FibGVkRXZlbnQSDgoCaWQYASABKAlSAmlkIiYKFEZlYXR1cmVBcmNoaXZlZEV2ZW50Eg4KAmlkGAEgASgJUgJpZCIoChZGZWF0dXJlVW5hcmNoaXZlZEV2ZW50Eg4KAmlkGAEgASgJUgJpZCIlChNGZWF0dXJlRGVsZXRlZEV2ZW50Eg4KAmlkGAEgASgJUgJpZCItChtFdmFsdWF0aW9uRGVsYXlhYmxlU2V0RXZlbnQSDgoCaWQYASABKAlSAmlkIi8KHUV2YWx1YXRpb25VbmRlbGF5YWJsZVNldEV2ZW50Eg4KAmlkGAEgASgJUgJpZCI5ChNGZWF0dXJlUmVuYW1lZEV2ZW50Eg4KAmlkGAEgASgJUgJpZBISCgRuYW1lGAIgASgJUgRuYW1lIlIKHkZlYXR1cmVEZXNjcmlwdGlvbkNoYW5nZWRFdmVudBIOCgJpZBgBIAEoCVICaWQSIAoLZGVzY3JpcHRpb24YAiABKAlSC2Rlc2NyaXB0aW9uIlYKH0ZlYXR1cmVPZmZWYXJpYXRpb25DaGFuZ2VkRXZlbnQSDgoCaWQYASABKAlSAmlkEiMKDW9mZl92YXJpYXRpb24YAiABKAlSDG9mZlZhcmlhdGlvbiJoChpGZWF0dXJlVmFyaWF0aW9uQWRkZWRFdmVudBIOCgJpZBgBIAEoCVICaWQSOgoJdmFyaWF0aW9uGAIgASgLMhwuYnVja2V0ZWVyLmZlYXR1cmUuVmFyaWF0aW9uUgl2YXJpYXRpb24iUQocRmVhdHVyZVZhcmlhdGlvblJlbW92ZWRFdmVudBIOCgJpZBgBIAEoCVICaWQSIQoMdmFyaWF0aW9uX2lkGAIgASgJUgt2YXJpYXRpb25JZCJhChpWYXJpYXRpb25WYWx1ZUNoYW5nZWRFdmVudBIdCgpmZWF0dXJlX2lkGAEgASgJUglmZWF0dXJlSWQSDgoCaWQYAiABKAlSAmlkEhQKBXZhbHVlGAMgASgJUgV2YWx1ZSJeChlWYXJpYXRpb25OYW1lQ2hhbmdlZEV2ZW50Eh0KCmZlYXR1cmVfaWQYASABKAlSCWZlYXR1cmVJZBIOCgJpZBgCIAEoCVICaWQSEgoEbmFtZRgDIAEoCVIEbmFtZSJzCiBWYXJpYXRpb25EZXNjcmlwdGlvbkNoYW5nZWRFdmVudBIdCgpmZWF0dXJlX2lkGAEgASgJUglmZWF0dXJlSWQSDgoCaWQYAiABKAlSAmlkEiAKC2Rlc2NyaXB0aW9uGAMgASgJUgtkZXNjcmlwdGlvbiJcChdWYXJpYXRpb25Vc2VyQWRkZWRFdmVudBIdCgpmZWF0dXJlX2lkGAEgASgJUglmZWF0dXJlSWQSDgoCaWQYAiABKAlSAmlkEhIKBHVzZXIYAyABKAlSBHVzZXIiXgoZVmFyaWF0aW9uVXNlclJlbW92ZWRFdmVudBIdCgpmZWF0dXJlX2lkGAEgASgJUglmZWF0dXJlSWQSDgoCaWQYAiABKAlSAmlkEhIKBHVzZXIYAyABKAlSBHVzZXIiVAoVRmVhdHVyZVJ1bGVBZGRlZEV2ZW50Eg4KAmlkGAEgASgJUgJpZBIrCgRydWxlGAIgASgLMhcuYnVja2V0ZWVyLmZlYXR1cmUuUnVsZVIEcnVsZSKRAQoeRmVhdHVyZUNoYW5nZVJ1bGVTdHJhdGVneUV2ZW50Eh0KCmZlYXR1cmVfaWQYASABKAlSCWZlYXR1cmVJZBIXCgdydWxlX2lkGAIgASgJUgZydWxlSWQSNwoIc3RyYXRlZ3kYAyABKAsyGy5idWNrZXRlZXIuZmVhdHVyZS5TdHJhdGVneVIIc3RyYXRlZ3kiQgoXRmVhdHVyZVJ1bGVEZWxldGVkRXZlbnQSDgoCaWQYASABKAlSAmlkEhcKB3J1bGVfaWQYAiABKAlSBnJ1bGVJZCKYAQogRmVhdHVyZUZpeGVkU3RyYXRlZ3lDaGFuZ2VkRXZlbnQSHQoKZmVhdHVyZV9pZBgBIAEoCVIJZmVhdHVyZUlkEhcKB3J1bGVfaWQYAiABKAlSBnJ1bGVJZBI8CghzdHJhdGVneRgDIAEoCzIgLmJ1Y2tldGVlci5mZWF0dXJlLkZpeGVkU3RyYXRlZ3lSCHN0cmF0ZWd5IpwBCiJGZWF0dXJlUm9sbG91dFN0cmF0ZWd5Q2hhbmdlZEV2ZW50Eh0KCmZlYXR1cmVfaWQYASABKAlSCWZlYXR1cmVJZBIXCgdydWxlX2lkGAIgASgJUgZydWxlSWQSPgoIc3RyYXRlZ3kYAyABKAsyIi5idWNrZXRlZXIuZmVhdHVyZS5Sb2xsb3V0U3RyYXRlZ3lSCHN0cmF0ZWd5IoEBChRSdWxlQ2xhdXNlQWRkZWRFdmVudBIdCgpmZWF0dXJlX2lkGAEgASgJUglmZWF0dXJlSWQSFwoHcnVsZV9pZBgCIAEoCVIGcnVsZUlkEjEKBmNsYXVzZRgDIAEoCzIZLmJ1Y2tldGVlci5mZWF0dXJlLkNsYXVzZVIGY2xhdXNlImAKFlJ1bGVDbGF1c2VEZWxldGVkRXZlbnQSHQoKZmVhdHVyZV9pZBgBIAEoCVIJZmVhdHVyZUlkEhcKB3J1bGVfaWQYAiABKAlSBnJ1bGVJZBIOCgJpZBgDIAEoCVICaWQigwEKG0NsYXVzZUF0dHJpYnV0ZUNoYW5nZWRFdmVudBIdCgpmZWF0dXJlX2lkGAEgASgJUglmZWF0dXJlSWQSFwoHcnVsZV9pZBgCIAEoCVIGcnVsZUlkEg4KAmlkGAMgASgJUgJpZBIcCglhdHRyaWJ1dGUYBCABKAlSCWF0dHJpYnV0ZSKkAQoaQ2xhdXNlT3BlcmF0b3JDaGFuZ2VkRXZlbnQSHQoKZmVhdHVyZV9pZBgBIAEoCVIJZmVhdHVyZUlkEhcKB3J1bGVfaWQYAiABKAlSBnJ1bGVJZBIOCgJpZBgDIAEoCVICaWQSPgoIb3BlcmF0b3IYBCABKA4yIi5idWNrZXRlZXIuZmVhdHVyZS5DbGF1c2UuT3BlcmF0b3JSCG9wZXJhdG9yInUKFUNsYXVzZVZhbHVlQWRkZWRFdmVudBIdCgpmZWF0dXJlX2lkGAEgASgJUglmZWF0dXJlSWQSFwoHcnVsZV9pZBgCIAEoCVIGcnVsZUlkEg4KAmlkGAMgASgJUgJpZBIUCgV2YWx1ZRgEIAEoCVIFdmFsdWUidwoXQ2xhdXNlVmFsdWVSZW1vdmVkRXZlbnQSHQoKZmVhdHVyZV9pZBgBIAEoCVIJZmVhdHVyZUlkEhcKB3J1bGVfaWQYAiABKAlSBnJ1bGVJZBIOCgJpZBgDIAEoCVICaWQSFAoFdmFsdWUYBCABKAlSBXZhbHVlIm0KIkZlYXR1cmVEZWZhdWx0U3RyYXRlZ3lDaGFuZ2VkRXZlbnQSDgoCaWQYASABKAlSAmlkEjcKCHN0cmF0ZWd5GAIgASgLMhsuYnVja2V0ZWVyLmZlYXR1cmUuU3RyYXRlZ3lSCHN0cmF0ZWd5IjgKFEZlYXR1cmVUYWdBZGRlZEV2ZW50Eg4KAmlkGAEgASgJUgJpZBIQCgN0YWcYAiABKAlSA3RhZyI6ChZGZWF0dXJlVGFnUmVtb3ZlZEV2ZW50Eg4KAmlkGAEgASgJUgJpZBIQCgN0YWcYAiABKAlSA3RhZyJKCh5GZWF0dXJlVmVyc2lvbkluY3JlbWVudGVkRXZlbnQSDgoCaWQYASABKAlSAmlkEhgKB3ZlcnNpb24YAiABKAVSB3ZlcnNpb24i7gMKEkZlYXR1cmVDbG9uZWRFdmVudBIOCgJpZBgBIAEoCVICaWQSEgoEbmFtZRgCIAEoCVIEbmFtZRIgCgtkZXNjcmlwdGlvbhgDIAEoCVILZGVzY3JpcHRpb24SPAoKdmFyaWF0aW9ucxgEIAMoCzIcLmJ1Y2tldGVlci5mZWF0dXJlLlZhcmlhdGlvblIKdmFyaWF0aW9ucxIzCgd0YXJnZXRzGAUgAygLMhkuYnVja2V0ZWVyLmZlYXR1cmUuVGFyZ2V0Ugd0YXJnZXRzEi0KBXJ1bGVzGAYgAygLMhcuYnVja2V0ZWVyLmZlYXR1cmUuUnVsZVIFcnVsZXMSRgoQZGVmYXVsdF9zdHJhdGVneRgHIAEoCzIbLmJ1Y2tldGVlci5mZWF0dXJlLlN0cmF0ZWd5Ug9kZWZhdWx0U3RyYXRlZ3kSIwoNb2ZmX3ZhcmlhdGlvbhgIIAEoCVIMb2ZmVmFyaWF0aW9uEhIKBHRhZ3MYCSADKAlSBHRhZ3MSHgoKbWFpbnRhaW5lchgKIAEoCVIKbWFpbnRhaW5lchJPCg52YXJpYXRpb25fdHlwZRgLIAEoDjIoLmJ1Y2tldGVlci5mZWF0dXJlLkZlYXR1cmUuVmFyaWF0aW9uVHlwZVINdmFyaWF0aW9uVHlwZSJECh1GZWF0dXJlU2FtcGxpbmdTZWVkUmVzZXRFdmVudBIjCg1zYW1wbGluZ19zZWVkGAEgASgJUgxzYW1wbGluZ1NlZWQisAEKEEdvYWxDcmVhdGVkRXZlbnQSDgoCaWQYASABKAlSAmlkEhIKBG5hbWUYAiABKAlSBG5hbWUSIAoLZGVzY3JpcHRpb24YAyABKAlSC2Rlc2NyaXB0aW9uEhgKB2RlbGV0ZWQYBCABKAhSB2RlbGV0ZWQSHQoKY3JlYXRlZF9hdBgFIAEoA1IJY3JlYXRlZEF0Eh0KCnVwZGF0ZWRfYXQYBiABKANSCXVwZGF0ZWRBdCI2ChBHb2FsUmVuYW1lZEV2ZW50Eg4KAmlkGAEgASgJUgJpZBISCgRuYW1lGAIgASgJUgRuYW1lIk8KG0dvYWxEZXNjcmlwdGlvbkNoYW5nZWRFdmVudBIOCgJpZBgBIAEoCVICaWQSIAoLZGVzY3JpcHRpb24YAiABKAlSC2Rlc2NyaXB0aW9uIiMKEUdvYWxBcmNoaXZlZEV2ZW50Eg4KAmlkGAEgASgJUgJpZCIiChBHb2FsRGVsZXRlZEV2ZW50Eg4KAmlkGAEgASgJUgJpZCLzAwoWRXhwZXJpbWVudENyZWF0ZWRFdmVudBIOCgJpZBgBIAEoCVICaWQSHQoKZmVhdHVyZV9pZBgCIAEoCVIJZmVhdHVyZUlkEicKD2ZlYXR1cmVfdmVyc2lvbhgDIAEoBVIOZmVhdHVyZVZlcnNpb24SPAoKdmFyaWF0aW9ucxgEIAMoCzIcLmJ1Y2tldGVlci5mZWF0dXJlLlZhcmlhdGlvblIKdmFyaWF0aW9ucxIbCgdnb2FsX2lkGAUgASgJQgIYAVIGZ29hbElkEhkKCHN0YXJ0X2F0GAYgASgDUgdzdGFydEF0EhcKB3N0b3BfYXQYByABKANSBnN0b3BBdBIYCgdzdG9wcGVkGAggASgIUgdzdG9wcGVkEh0KCnN0b3BwZWRfYXQYCSABKANSCXN0b3BwZWRBdBIdCgpjcmVhdGVkX2F0GAogASgDUgljcmVhdGVkQXQSHQoKdXBkYXRlZF9hdBgLIAEoA1IJdXBkYXRlZEF0EhkKCGdvYWxfaWRzGAwgAygJUgdnb2FsSWRzEhIKBG5hbWUYDSABKAlSBG5hbWUSIAoLZGVzY3JpcHRpb24YDiABKAlSC2Rlc2NyaXB0aW9uEioKEWJhc2VfdmFyaWF0aW9uX2lkGA8gASgJUg9iYXNlVmFyaWF0aW9uSWQiRwoWRXhwZXJpbWVudFN0b3BwZWRFdmVudBIOCgJpZBgBIAEoCVICaWQSHQoKc3RvcHBlZF9hdBgCIAEoA1IJc3RvcHBlZEF0IikKF0V4cGVyaW1lbnRBcmNoaXZlZEV2ZW50Eg4KAmlkGAEgASgJUgJpZCIoChZFeHBlcmltZW50RGVsZXRlZEV2ZW50Eg4KAmlkGAEgASgJUgJpZCJKCh1FeHBlcmltZW50U3RhcnRBdENoYW5nZWRFdmVudBIOCgJpZBgBIAEoCVICaWQSGQoIc3RhcnRfYXQYAiABKANSB3N0YXJ0QXQiRwocRXhwZXJpbWVudFN0b3BBdENoYW5nZWRFdmVudBIOCgJpZBgBIAEoCVICaWQSFwoHc3RvcF9hdBgCIAEoA1IGc3RvcEF0ImIKHEV4cGVyaW1lbnRQZXJpb2RDaGFuZ2VkRXZlbnQSDgoCaWQYASABKAlSAmlkEhkKCHN0YXJ0X2F0GAIgASgDUgdzdGFydEF0EhcKB3N0b3BfYXQYAyABKANSBnN0b3BBdCJAChpFeHBlcmltZW50TmFtZUNoYW5nZWRFdmVudBIOCgJpZBgBIAEoCVICaWQSEgoEbmFtZRgCIAEoCVIEbmFtZSJVCiFFeHBlcmltZW50RGVzY3JpcHRpb25DaGFuZ2VkRXZlbnQSDgoCaWQYASABKAlSAmlkEiAKC2Rlc2NyaXB0aW9uGAIgASgJUgtkZXNjcmlwdGlvbiIYChZFeHBlcmltZW50U3RhcnRlZEV2ZW50IhkKF0V4cGVyaW1lbnRGaW5pc2hlZEV2ZW50It4BChNBY2NvdW50Q3JlYXRlZEV2ZW50Eg4KAmlkGAEgASgJUgJpZBIUCgVlbWFpbBgCIAEoCVIFZW1haWwSEgoEbmFtZRgDIAEoCVIEbmFtZRIzCgRyb2xlGAQgASgOMh8uYnVja2V0ZWVyLmFjY291bnQuQWNjb3VudC5Sb2xlUgRyb2xlEhoKCGRpc2FibGVkGAUgASgIUghkaXNhYmxlZBIdCgpjcmVhdGVkX2F0GAYgASgDUgljcmVhdGVkQXQSHQoKdXBkYXRlZF9hdBgHIAEoA1IJdXBkYXRlZEF0Il4KF0FjY291bnRSb2xlQ2hhbmdlZEV2ZW50Eg4KAmlkGAEgASgJUgJpZBIzCgRyb2xlGAIgASgOMh8uYnVja2V0ZWVyLmFjY291bnQuQWNjb3VudC5Sb2xlUgRyb2xlIiUKE0FjY291bnRFbmFibGVkRXZlbnQSDgoCaWQYASABKAlSAmlkIiYKFEFjY291bnREaXNhYmxlZEV2ZW50Eg4KAmlkGAEgASgJUgJpZCIlChNBY2NvdW50RGVsZXRlZEV2ZW50Eg4KAmlkGAEgASgJUgJpZCLGAQoSQVBJS2V5Q3JlYXRlZEV2ZW50Eg4KAmlkGAEgASgJUgJpZBISCgRuYW1lGAIgASgJUgRuYW1lEjIKBHJvbGUYAyABKA4yHi5idWNrZXRlZXIuYWNjb3VudC5BUElLZXkuUm9sZVIEcm9sZRIaCghkaXNhYmxlZBgEIAEoCFIIZGlzYWJsZWQSHQoKY3JlYXRlZF9hdBgFIAEoA1IJY3JlYXRlZEF0Eh0KCnVwZGF0ZWRfYXQYBiABKANSCXVwZGF0ZWRBdCI8ChZBUElLZXlOYW1lQ2hhbmdlZEV2ZW50Eg4KAmlkGAEgASgJUgJpZBISCgRuYW1lGAIgASgJUgRuYW1lIiQKEkFQSUtleUVuYWJsZWRFdmVudBIOCgJpZBgBIAEoCVICaWQiJQoTQVBJS2V5RGlzYWJsZWRFdmVudBIOCgJpZBgBIAEoCVICaWQiWwoTU2VnbWVudENyZWF0ZWRFdmVudBIOCgJpZBgBIAEoCVICaWQSEgoEbmFtZRgCIAEoCVIEbmFtZRIgCgtkZXNjcmlwdGlvbhgDIAEoCVILZGVzY3JpcHRpb24iJQoTU2VnbWVudERlbGV0ZWRFdmVudBIOCgJpZBgBIAEoCVICaWQiPQoXU2VnbWVudE5hbWVDaGFuZ2VkRXZlbnQSDgoCaWQYASABKAlSAmlkEhIKBG5hbWUYAiABKAlSBG5hbWUiUgoeU2VnbWVudERlc2NyaXB0aW9uQ2hhbmdlZEV2ZW50Eg4KAmlkGAEgASgJUgJpZBIgCgtkZXNjcmlwdGlvbhgCIAEoCVILZGVzY3JpcHRpb24iVAoVU2VnbWVudFJ1bGVBZGRlZEV2ZW50Eg4KAmlkGAEgASgJUgJpZBIrCgRydWxlGAIgASgLMhcuYnVja2V0ZWVyLmZlYXR1cmUuUnVsZVIEcnVsZSJCChdTZWdtZW50UnVsZURlbGV0ZWRFdmVudBIOCgJpZBgBIAEoCVICaWQSFwoHcnVsZV9pZBgCIAEoCVIGcnVsZUlkIogBChtTZWdtZW50UnVsZUNsYXVzZUFkZGVkRXZlbnQSHQoKc2VnbWVudF9pZBgBIAEoCVIJc2VnbWVudElkEhcKB3J1bGVfaWQYAiABKAlSBnJ1bGVJZBIxCgZjbGF1c2UYAyABKAsyGS5idWNrZXRlZXIuZmVhdHVyZS5DbGF1c2VSBmNsYXVzZSJ0Ch1TZWdtZW50UnVsZUNsYXVzZURlbGV0ZWRFdmVudBIdCgpzZWdtZW50X2lkGAEgASgJUglzZWdtZW50SWQSFwoHcnVsZV9pZBgCIAEoCVIGcnVsZUlkEhsKCWNsYXVzZV9pZBgDIAEoCVIIY2xhdXNlSWQilwEKIlNlZ21lbnRDbGF1c2VBdHRyaWJ1dGVDaGFuZ2VkRXZlbnQSHQoKc2VnbWVudF9pZBgBIAEoCVIJc2VnbWVudElkEhcKB3J1bGVfaWQYAiABKAlSBnJ1bGVJZBIbCgljbGF1c2VfaWQYAyABKAlSCGNsYXVzZUlkEhwKCWF0dHJpYnV0ZRgEIAEoCVIJYXR0cmlidXRlIrgBCiFTZWdtZW50Q2xhdXNlT3BlcmF0b3JDaGFuZ2VkRXZlbnQSHQoKc2VnbWVudF9pZBgBIAEoCVIJc2VnbWVudElkEhcKB3J1bGVfaWQYAiABKAlSBnJ1bGVJZBIbCgljbGF1c2VfaWQYAyABKAlSCGNsYXVzZUlkEj4KCG9wZXJhdG9yGAQgASgOMiIuYnVja2V0ZWVyLmZlYXR1cmUuQ2xhdXNlLk9wZXJhdG9yUghvcGVyYXRvciKJAQocU2VnbWVudENsYXVzZVZhbHVlQWRkZWRFdmVudBIdCgpzZWdtZW50X2lkGAEgASgJUglzZWdtZW50SWQSFwoHcnVsZV9pZBgCIAEoCVIGcnVsZUlkEhsKCWNsYXVzZV9pZBgDIAEoCVIIY2xhdXNlSWQSFAoFdmFsdWUYBCABKAlSBXZhbHVlIosBCh5TZWdtZW50Q2xhdXNlVmFsdWVSZW1vdmVkRXZlbnQSHQoKc2VnbWVudF9pZBgBIAEoCVIJc2VnbWVudElkEhcKB3J1bGVfaWQYAiABKAlSBnJ1bGVJZBIbCgljbGF1c2VfaWQYAyABKAlSCGNsYXVzZUlkEhQKBXZhbHVlGAQgASgJUgV2YWx1ZSKNAQoVU2VnbWVudFVzZXJBZGRlZEV2ZW50Eh0KCnNlZ21lbnRfaWQYASABKAlSCXNlZ21lbnRJZBIZCgh1c2VyX2lkcxgCIAMoCVIHdXNlcklkcxI6CgVzdGF0ZRgDIAEoDjIkLmJ1Y2tldGVlci5mZWF0dXJlLlNlZ21lbnRVc2VyLlN0YXRlUgVzdGF0ZSKPAQoXU2VnbWVudFVzZXJEZWxldGVkRXZlbnQSHQoKc2VnbWVudF9pZBgBIAEoCVIJc2VnbWVudElkEhkKCHVzZXJfaWRzGAIgAygJUgd1c2VySWRzEjoKBXN0YXRlGAMgASgOMiQuYnVja2V0ZWVyLmZlYXR1cmUuU2VnbWVudFVzZXIuU3RhdGVSBXN0YXRlIrMBChtTZWdtZW50QnVsa1VwbG9hZFVzZXJzRXZlbnQSHQoKc2VnbWVudF9pZBgBIAEoCVIJc2VnbWVudElkEjkKBnN0YXR1cxgCIAEoDjIhLmJ1Y2tldGVlci5mZWF0dXJlLlNlZ21lbnQuU3RhdHVzUgZzdGF0dXMSOgoFc3RhdGUYAyABKA4yJC5idWNrZXRlZXIuZmVhdHVyZS5TZWdtZW50VXNlci5TdGF0ZVIFc3RhdGUi1gEKKFNlZ21lbnRCdWxrVXBsb2FkVXNlcnNTdGF0dXNDaGFuZ2VkRXZlbnQSHQoKc2VnbWVudF9pZBgBIAEoCVIJc2VnbWVudElkEjkKBnN0YXR1cxgCIAEoDjIhLmJ1Y2tldGVlci5mZWF0dXJlLlNlZ21lbnQuU3RhdHVzUgZzdGF0dXMSOgoFc3RhdGUYAyABKA4yJC5idWNrZXRlZXIuZmVhdHVyZS5TZWdtZW50VXNlci5TdGF0ZVIFc3RhdGUSFAoFY291bnQYBCABKANSBWNvdW50IvQBChdFbnZpcm9ubWVudENyZWF0ZWRFdmVudBIOCgJpZBgBIAEoCVICaWQSHAoJbmFtZXNwYWNlGAIgASgJUgluYW1lc3BhY2USEgoEbmFtZRgDIAEoCVIEbmFtZRIgCgtkZXNjcmlwdGlvbhgEIAEoCVILZGVzY3JpcHRpb24SGAoHZGVsZXRlZBgFIAEoCFIHZGVsZXRlZBIdCgpjcmVhdGVkX2F0GAYgASgDUgljcmVhdGVkQXQSHQoKdXBkYXRlZF9hdBgHIAEoA1IJdXBkYXRlZEF0Eh0KCnByb2plY3RfaWQYCCABKAlSCXByb2plY3RJZCI9ChdFbnZpcm9ubWVudFJlbmFtZWRFdmVudBIOCgJpZBgBIAEoCVICaWQSEgoEbmFtZRgCIAEoCVIEbmFtZSJWCiJFbnZpcm9ubWVudERlc2NyaXB0aW9uQ2hhbmdlZEV2ZW50Eg4KAmlkGAEgASgJUgJpZBIgCgtkZXNjcmlwdGlvbhgCIAEoCVILZGVzY3JpcHRpb24iRwoXRW52aXJvbm1lbnREZWxldGVkRXZlbnQSDgoCaWQYASABKAlSAmlkEhwKCW5hbWVzcGFjZRgCIAEoCVIJbmFtZXNwYWNlIuMBChhBZG1pbkFjY291bnRDcmVhdGVkRXZlbnQSDgoCaWQYASABKAlSAmlkEhQKBWVtYWlsGAIgASgJUgVlbWFpbBISCgRuYW1lGAMgASgJUgRuYW1lEjMKBHJvbGUYBCABKA4yHy5idWNrZXRlZXIuYWNjb3VudC5BY2NvdW50LlJvbGVSBHJvbGUSGgoIZGlzYWJsZWQYBSABKAhSCGRpc2FibGVkEh0KCmNyZWF0ZWRfYXQYBiABKANSCWNyZWF0ZWRBdBIdCgp1cGRhdGVkX2F0GAcgASgDUgl1cGRhdGVkQXQiKgoYQWRtaW5BY2NvdW50RW5hYmxlZEV2ZW50Eg4KAmlkGAEgASgJUgJpZCIrChlBZG1pbkFjY291bnREaXNhYmxlZEV2ZW50Eg4KAmlkGAEgASgJUgJpZCIqChhBZG1pbkFjY291bnREZWxldGVkRXZlbnQSDgoCaWQYASABKAlSAmlkIoUCChdBdXRvT3BzUnVsZUNyZWF0ZWRFdmVudBIdCgpmZWF0dXJlX2lkGAEgASgJUglmZWF0dXJlSWQSNQoIb3BzX3R5cGUYAiABKA4yGi5idWNrZXRlZXIuYXV0b29wcy5PcHNUeXBlUgdvcHNUeXBlEjMKB2NsYXVzZXMYAyADKAsyGS5idWNrZXRlZXIuYXV0b29wcy5DbGF1c2VSB2NsYXVzZXMSIQoMdHJpZ2dlcmVkX2F0GAQgASgDUgt0cmlnZ2VyZWRBdBIdCgpjcmVhdGVkX2F0GAUgASgDUgljcmVhdGVkQXQSHQoKdXBkYXRlZF9hdBgGIAEoA1IJdXBkYXRlZEF0IhkKF0F1dG9PcHNSdWxlRGVsZXRlZEV2ZW50IlcKHkF1dG9PcHNSdWxlT3BzVHlwZUNoYW5nZWRFdmVudBI1CghvcHNfdHlwZRgBIAEoDjIaLmJ1Y2tldGVlci5hdXRvb3BzLk9wc1R5cGVSB29wc1R5cGUiJAoiQXV0b09wc1J1bGVUcmlnZ2VyZWRBdENoYW5nZWRFdmVudCKVAQocT3BzRXZlbnRSYXRlQ2xhdXNlQWRkZWRFdmVudBIbCgljbGF1c2VfaWQYASABKAlSCGNsYXVzZUlkElgKFW9wc19ldmVudF9yYXRlX2NsYXVzZRgCIAEoCzIlLmJ1Y2tldGVlci5hdXRvb3BzLk9wc0V2ZW50UmF0ZUNsYXVzZVISb3BzRXZlbnRSYXRlQ2xhdXNlIpcBCh5PcHNFdmVudFJhdGVDbGF1c2VDaGFuZ2VkRXZlbnQSGwoJY2xhdXNlX2lkGAEgASgJUghjbGF1c2VJZBJYChVvcHNfZXZlbnRfcmF0ZV9jbGF1c2UYAiABKAsyJS5idWNrZXRlZXIuYXV0b29wcy5PcHNFdmVudFJhdGVDbGF1c2VSEm9wc0V2ZW50UmF0ZUNsYXVzZSI8Ch1BdXRvT3BzUnVsZUNsYXVzZURlbGV0ZWRFdmVudBIbCgljbGF1c2VfaWQYASABKAlSCGNsYXVzZUlkIoMBChhEYXRldGltZUNsYXVzZUFkZGVkRXZlbnQSGwoJY2xhdXNlX2lkGAEgASgJUghjbGF1c2VJZBJKCg9kYXRldGltZV9jbGF1c2UYAiABKAsyIS5idWNrZXRlZXIuYXV0b29wcy5EYXRldGltZUNsYXVzZVIOZGF0ZXRpbWVDbGF1c2UihQEKGkRhdGV0aW1lQ2xhdXNlQ2hhbmdlZEV2ZW50EhsKCWNsYXVzZV9pZBgBIAEoCVIIY2xhdXNlSWQSSgoPZGF0ZXRpbWVfY2xhdXNlGAIgASgLMiEuYnVja2V0ZWVyLmF1dG9vcHMuRGF0ZXRpbWVDbGF1c2VSDmRhdGV0aW1lQ2xhdXNlIloKEFB1c2hDcmVhdGVkRXZlbnQSHgoLZmNtX2FwaV9rZXkYAiABKAlSCWZjbUFwaUtleRISCgR0YWdzGAMgAygJUgR0YWdzEhIKBG5hbWUYBCABKAlSBG5hbWUiEgoQUHVzaERlbGV0ZWRFdmVudCIoChJQdXNoVGFnc0FkZGVkRXZlbnQSEgoEdGFncxgCIAMoCVIEdGFncyIqChRQdXNoVGFnc0RlbGV0ZWRFdmVudBISCgR0YWdzGAIgAygJUgR0YWdzIiYKEFB1c2hSZW5hbWVkRXZlbnQSEgoEbmFtZRgCIAEoCVIEbmFtZSLDAQoYU3Vic2NyaXB0aW9uQ3JlYXRlZEV2ZW50ElIKDHNvdXJjZV90eXBlcxgBIAMoDjIvLmJ1Y2tldGVlci5ub3RpZmljYXRpb24uU3Vic2NyaXB0aW9uLlNvdXJjZVR5cGVSC3NvdXJjZVR5cGVzEj8KCXJlY2lwaWVudBgCIAEoCzIhLmJ1Y2tldGVlci5ub3RpZmljYXRpb24uUmVjaXBpZW50UglyZWNpcGllbnQSEgoEbmFtZRgDIAEoCVIEbmFtZSIaChhTdWJzY3JpcHRpb25EZWxldGVkRXZlbnQiGgoYU3Vic2NyaXB0aW9uRW5hYmxlZEV2ZW50IhsKGVN1YnNjcmlwdGlvbkRpc2FibGVkRXZlbnQidwohU3Vic2NyaXB0aW9uU291cmNlVHlwZXNBZGRlZEV2ZW50ElIKDHNvdXJjZV90eXBlcxgBIAMoDjIvLmJ1Y2tldGVlci5ub3RpZmljYXRpb24uU3Vic2NyaXB0aW9uLlNvdXJjZVR5cGVSC3NvdXJjZVR5cGVzInkKI1N1YnNjcmlwdGlvblNvdXJjZVR5cGVzRGVsZXRlZEV2ZW50ElIKDHNvdXJjZV90eXBlcxgBIAMoDjIvLmJ1Y2tldGVlci5ub3RpZmljYXRpb24uU3Vic2NyaXB0aW9uLlNvdXJjZVR5cGVSC3NvdXJjZVR5cGVzIi4KGFN1YnNjcmlwdGlvblJlbmFtZWRFdmVudBISCgRuYW1lGAEgASgJUgRuYW1lIsgBCh1BZG1pblN1YnNjcmlwdGlvbkNyZWF0ZWRFdmVudBJSCgxzb3VyY2VfdHlwZXMYASADKA4yLy5idWNrZXRlZXIubm90aWZpY2F0aW9uLlN1YnNjcmlwdGlvbi5Tb3VyY2VUeXBlUgtzb3VyY2VUeXBlcxI/CglyZWNpcGllbnQYAiABKAsyIS5idWNrZXRlZXIubm90aWZpY2F0aW9uLlJlY2lwaWVudFIJcmVjaXBpZW50EhIKBG5hbWUYAyABKAlSBG5hbWUiHwodQWRtaW5TdWJzY3JpcHRpb25EZWxldGVkRXZlbnQiHwodQWRtaW5TdWJzY3JpcHRpb25FbmFibGVkRXZlbnQiIAoeQWRtaW5TdWJzY3JpcHRpb25EaXNhYmxlZEV2ZW50InwKJkFkbWluU3Vic2NyaXB0aW9uU291cmNlVHlwZXNBZGRlZEV2ZW50ElIKDHNvdXJjZV90eXBlcxgBIAMoDjIvLmJ1Y2tldGVlci5ub3RpZmljYXRpb24uU3Vic2NyaXB0aW9uLlNvdXJjZVR5cGVSC3NvdXJjZVR5cGVzIn4KKEFkbWluU3Vic2NyaXB0aW9uU291cmNlVHlwZXNEZWxldGVkRXZlbnQSUgoMc291cmNlX3R5cGVzGAEgAygOMi8uYnVja2V0ZWVyLm5vdGlmaWNhdGlvbi5TdWJzY3JpcHRpb24uU291cmNlVHlwZVILc291cmNlVHlwZXMiMwodQWRtaW5TdWJzY3JpcHRpb25SZW5hbWVkRXZlbnQSEgoEbmFtZRgBIAEoCVIEbmFtZSLcAQoTUHJvamVjdENyZWF0ZWRFdmVudBIOCgJpZBgBIAEoCVICaWQSIAoLZGVzY3JpcHRpb24YAiABKAlSC2Rlc2NyaXB0aW9uEhoKCGRpc2FibGVkGAMgASgIUghkaXNhYmxlZBIUCgV0cmlhbBgEIAEoCFIFdHJpYWwSIwoNY3JlYXRvcl9lbWFpbBgFIAEoCVIMY3JlYXRvckVtYWlsEh0KCmNyZWF0ZWRfYXQYBiABKANSCWNyZWF0ZWRBdBIdCgp1cGRhdGVkX2F0GAcgASgDUgl1cGRhdGVkQXQiUgoeUHJvamVjdERlc2NyaXB0aW9uQ2hhbmdlZEV2ZW50Eg4KAmlkGAEgASgJUgJpZBIgCgtkZXNjcmlwdGlvbhgCIAEoCVILZGVzY3JpcHRpb24iJQoTUHJvamVjdEVuYWJsZWRFdmVudBIOCgJpZBgBIAEoCVICaWQiJgoUUHJvamVjdERpc2FibGVkRXZlbnQSDgoCaWQYASABKAlSAmlkIuEBChhQcm9qZWN0VHJpYWxDcmVhdGVkRXZlbnQSDgoCaWQYASABKAlSAmlkEiAKC2Rlc2NyaXB0aW9uGAIgASgJUgtkZXNjcmlwdGlvbhIaCghkaXNhYmxlZBgDIAEoCFIIZGlzYWJsZWQSFAoFdHJpYWwYBCABKAhSBXRyaWFsEiMKDWNyZWF0b3JfZW1haWwYBSABKAlSDGNyZWF0b3JFbWFpbBIdCgpjcmVhdGVkX2F0GAYgASgDUgljcmVhdGVkQXQSHQoKdXBkYXRlZF9hdBgHIAEoA1IJdXBkYXRlZEF0IiwKGlByb2plY3RUcmlhbENvbnZlcnRlZEV2ZW50Eg4KAmlkGAEgASgJUgJpZCJdChZQcmVyZXF1aXNpdGVBZGRlZEV2ZW50EkMKDHByZXJlcXVpc2l0ZRgBIAEoCzIfLmJ1Y2tldGVlci5mZWF0dXJlLlByZXJlcXVpc2l0ZVIMcHJlcmVxdWlzaXRlImgKIVByZXJlcXVpc2l0ZVZhcmlhdGlvbkNoYW5nZWRFdmVudBJDCgxwcmVyZXF1aXNpdGUYASABKAsyHy5idWNrZXRlZXIuZmVhdHVyZS5QcmVyZXF1aXNpdGVSDHByZXJlcXVpc2l0ZSI5ChhQcmVyZXF1aXNpdGVSZW1vdmVkRXZlbnQSHQoKZmVhdHVyZV9pZBgBIAEoCVIJZmVhdHVyZUlkIpkBChNXZWJob29rQ3JlYXRlZEV2ZW50Eg4KAmlkGAEgASgJUgJpZBISCgRuYW1lGAIgASgJUgRuYW1lEiAKC2Rlc2NyaXB0aW9uGAMgASgJUgtkZXNjcmlwdGlvbhIdCgpjcmVhdGVkX2F0GAQgASgDUgljcmVhdGVkQXQSHQoKdXBkYXRlZF9hdBgFIAEoA1IJdXBkYXRlZEF0IiUKE1dlYmhvb2tEZWxldGVkRXZlbnQSDgoCaWQYASABKAlSAmlkIj0KF1dlYmhvb2tOYW1lQ2hhbmdlZEV2ZW50Eg4KAmlkGAEgASgJUgJpZBISCgRuYW1lGAIgASgJUgRuYW1lIlIKHldlYmhvb2tEZXNjcmlwdGlvbkNoYW5nZWRFdmVudBIOCgJpZBgBIAEoCVICaWQSIAoLZGVzY3JpcHRpb24YAiABKAlSC2Rlc2NyaXB0aW9uIn8KF1dlYmhvb2tDbGF1c2VBZGRlZEV2ZW50EhsKCWNsYXVzZV9pZBgBIAEoCVIIY2xhdXNlSWQSRwoOd2ViaG9va19jbGF1c2UYAiABKAsyIC5idWNrZXRlZXIuYXV0b29wcy5XZWJob29rQ2xhdXNlUg13ZWJob29rQ2xhdXNlIoEBChlXZWJob29rQ2xhdXNlQ2hhbmdlZEV2ZW50EhsKCWNsYXVzZV9pZBgBIAEoCVIIY2xhdXNlSWQSRwoOd2ViaG9va19jbGF1c2UYAiABKAsyIC5idWNrZXRlZXIuYXV0b29wcy5XZWJob29rQ2xhdXNlUg13ZWJob29rQ2xhdXNlQi9aLWdpdGh1Yi5jb20vY2EtZHAvYnVja2V0ZWVyL3Byb3RvL2V2ZW50L2RvbWFpbmIGcHJvdG8zCsMBCipwcm90by9ldmVudC9kb21haW4vbG9jYWxpemVkX21lc3NhZ2UucHJvdG8SFmJ1Y2tldGVlci5ldmVudC5kb21haW4iRAoQTG9jYWxpemVkTWVzc2FnZRIWCgZsb2NhbGUYASABKAlSBmxvY2FsZRIYCgdtZXNzYWdlGAIgASgJUgdtZXNzYWdlQi9aLWdpdGh1Yi5jb20vY2EtZHAvYnVja2V0ZWVyL3Byb3RvL2V2ZW50L2RvbWFpbmIGcHJvdG8zCqAFCh1wcm90by9hdWRpdGxvZy9hdWRpdGxvZy5wcm90bxISYnVja2V0ZWVyLmF1ZGl0bG9nGhlnb29nbGUvcHJvdG9idWYvYW55LnByb3RvGh5wcm90by9ldmVudC9kb21haW4vZXZlbnQucHJvdG8aKnByb3RvL2V2ZW50L2RvbWFpbi9sb2NhbGl6ZWRfbWVzc2FnZS5wcm90byLOAwoIQXVkaXRMb2cSDgoCaWQYASABKAlSAmlkEhwKCXRpbWVzdGFtcBgCIAEoA1IJdGltZXN0YW1wEkkKC2VudGl0eV90eXBlGAMgASgOMiguYnVja2V0ZWVyLmV2ZW50LmRvbWFpbi5FdmVudC5FbnRpdHlUeXBlUgplbnRpdHlUeXBlEhsKCWVudGl0eV9pZBgEIAEoCVIIZW50aXR5SWQSNgoEdHlwZRgFIAEoDjIiLmJ1Y2tldGVlci5ldmVudC5kb21haW4uRXZlbnQuVHlwZVIEdHlwZRIqCgVldmVudBgGIAEoCzIULmdvb2dsZS5wcm90b2J1Zi5BbnlSBWV2ZW50EjYKBmVkaXRvchgHIAEoCzIeLmJ1Y2tldGVlci5ldmVudC5kb21haW4uRWRpdG9yUgZlZGl0b3ISOQoHb3B0aW9ucxgIIAEoCzIfLmJ1Y2tldGVlci5ldmVudC5kb21haW4uT3B0aW9uc1IHb3B0aW9ucxJVChFsb2NhbGl6ZWRfbWVzc2FnZRgJIAEoCzIoLmJ1Y2tldGVlci5ldmVudC5kb21haW4uTG9jYWxpemVkTWVzc2FnZVIQbG9jYWxpemVkTWVzc2FnZUIrWilnaXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9hdWRpdGxvZ2IGcHJvdG8zCqkTChxwcm90by9hdWRpdGxvZy9zZXJ2aWNlLnByb3RvEhJidWNrZXRlZXIuYXVkaXRsb2caHmdvb2dsZS9wcm90b2J1Zi93cmFwcGVycy5wcm90bxodcHJvdG8vYXVkaXRsb2cvYXVkaXRsb2cucHJvdG8ihAQKFExpc3RBdWRpdExvZ3NSZXF1ZXN0EhsKCXBhZ2Vfc2l6ZRgBIAEoA1IIcGFnZVNpemUSFgoGY3Vyc29yGAIgASgJUgZjdXJzb3ISMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAMgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZRJLCghvcmRlcl9ieRgEIAEoDjIwLmJ1Y2tldGVlci5hdWRpdGxvZy5MaXN0QXVkaXRMb2dzUmVxdWVzdC5PcmRlckJ5UgdvcmRlckJ5EmAKD29yZGVyX2RpcmVjdGlvbhgFIAEoDjI3LmJ1Y2tldGVlci5hdWRpdGxvZy5MaXN0QXVkaXRMb2dzUmVxdWVzdC5PcmRlckRpcmVjdGlvblIOb3JkZXJEaXJlY3Rpb24SJQoOc2VhcmNoX2tleXdvcmQYBiABKAlSDXNlYXJjaEtleXdvcmQSEgoEZnJvbRgHIAEoA1IEZnJvbRIOCgJ0bxgIIAEoA1ICdG8SPAoLZW50aXR5X3R5cGUYCSABKAsyGy5nb29nbGUucHJvdG9idWYuSW50MzJWYWx1ZVIKZW50aXR5VHlwZSIlCgdPcmRlckJ5EgsKB0RFRkFVTFQQABINCglUSU1FU1RBTVAQASIjCg5PcmRlckRpcmVjdGlvbhIICgRERVNDEAASBwoDQVNDEAEijQEKFUxpc3RBdWRpdExvZ3NSZXNwb25zZRI7CgphdWRpdF9sb2dzGAEgAygLMhwuYnVja2V0ZWVyLmF1ZGl0bG9nLkF1ZGl0TG9nUglhdWRpdExvZ3MSFgoGY3Vyc29yGAIgASgJUgZjdXJzb3ISHwoLdG90YWxfY291bnQYAyABKANSCnRvdGFsQ291bnQi3gMKGUxpc3RBZG1pbkF1ZGl0TG9nc1JlcXVlc3QSGwoJcGFnZV9zaXplGAEgASgDUghwYWdlU2l6ZRIWCgZjdXJzb3IYAiABKAlSBmN1cnNvchJQCghvcmRlcl9ieRgDIAEoDjI1LmJ1Y2tldGVlci5hdWRpdGxvZy5MaXN0QWRtaW5BdWRpdExvZ3NSZXF1ZXN0Lk9yZGVyQnlSB29yZGVyQnkSZQoPb3JkZXJfZGlyZWN0aW9uGAQgASgOMjwuYnVja2V0ZWVyLmF1ZGl0bG9nLkxpc3RBZG1pbkF1ZGl0TG9nc1JlcXVlc3QuT3JkZXJEaXJlY3Rpb25SDm9yZGVyRGlyZWN0aW9uEiUKDnNlYXJjaF9rZXl3b3JkGAUgASgJUg1zZWFyY2hLZXl3b3JkEhIKBGZyb20YBiABKANSBGZyb20SDgoCdG8YByABKANSAnRvEjwKC2VudGl0eV90eXBlGAggASgLMhsuZ29vZ2xlLnByb3RvYnVmLkludDMyVmFsdWVSCmVudGl0eVR5cGUiJQoHT3JkZXJCeRILCgdERUZBVUxUEAASDQoJVElNRVNUQU1QEAEiIwoOT3JkZXJEaXJlY3Rpb24SCAoEREVTQxAAEgcKA0FTQxABIpIBChpMaXN0QWRtaW5BdWRpdExvZ3NSZXNwb25zZRI7CgphdWRpdF9sb2dzGAEgAygLMhwuYnVja2V0ZWVyLmF1ZGl0bG9nLkF1ZGl0TG9nUglhdWRpdExvZ3MSFgoGY3Vyc29yGAIgASgJUgZjdXJzb3ISHwoLdG90YWxfY291bnQYAyABKANSCnRvdGFsQ291bnQi9AMKGUxpc3RGZWF0dXJlSGlzdG9yeVJlcXVlc3QSHQoKZmVhdHVyZV9pZBgBIAEoCVIJZmVhdHVyZUlkEhsKCXBhZ2Vfc2l6ZRgCIAEoA1IIcGFnZVNpemUSFgoGY3Vyc29yGAMgASgJUgZjdXJzb3ISMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAQgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZRJQCghvcmRlcl9ieRgFIAEoDjI1LmJ1Y2tldGVlci5hdWRpdGxvZy5MaXN0RmVhdHVyZUhpc3RvcnlSZXF1ZXN0Lk9yZGVyQnlSB29yZGVyQnkSZQoPb3JkZXJfZGlyZWN0aW9uGAYgASgOMjwuYnVja2V0ZWVyLmF1ZGl0bG9nLkxpc3RGZWF0dXJlSGlzdG9yeVJlcXVlc3QuT3JkZXJEaXJlY3Rpb25SDm9yZGVyRGlyZWN0aW9uEiUKDnNlYXJjaF9rZXl3b3JkGAcgASgJUg1zZWFyY2hLZXl3b3JkEhIKBGZyb20YCCABKANSBGZyb20SDgoCdG8YCSABKANSAnRvIiUKB09yZGVyQnkSCwoHREVGQVVMVBAAEg0KCVRJTUVTVEFNUBABIiMKDk9yZGVyRGlyZWN0aW9uEggKBERFU0MQABIHCgNBU0MQASKSAQoaTGlzdEZlYXR1cmVIaXN0b3J5UmVzcG9uc2USOwoKYXVkaXRfbG9ncxgBIAMoCzIcLmJ1Y2tldGVlci5hdWRpdGxvZy5BdWRpdExvZ1IJYXVkaXRMb2dzEhYKBmN1cnNvchgCIAEoCVIGY3Vyc29yEh8KC3RvdGFsX2NvdW50GAMgASgDUgp0b3RhbENvdW50MucCCg9BdWRpdExvZ1NlcnZpY2USZgoNTGlzdEF1ZGl0TG9ncxIoLmJ1Y2tldGVlci5hdWRpdGxvZy5MaXN0QXVkaXRMb2dzUmVxdWVzdBopLmJ1Y2tldGVlci5hdWRpdGxvZy5MaXN0QXVkaXRMb2dzUmVzcG9uc2UiABJ1ChJMaXN0QWRtaW5BdWRpdExvZ3MSLS5idWNrZXRlZXIuYXVkaXRsb2cuTGlzdEFkbWluQXVkaXRMb2dzUmVxdWVzdBouLmJ1Y2tldGVlci5hdWRpdGxvZy5MaXN0QWRtaW5BdWRpdExvZ3NSZXNwb25zZSIAEnUKEkxpc3RGZWF0dXJlSGlzdG9yeRItLmJ1Y2tldGVlci5hdWRpdGxvZy5MaXN0RmVhdHVyZUhpc3RvcnlSZXF1ZXN0Gi4uYnVja2V0ZWVyLmF1ZGl0bG9nLkxpc3RGZWF0dXJlSGlzdG9yeVJlc3BvbnNlIgBCK1opZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vYXVkaXRsb2diBnByb3RvMw==" + authDescriptor: "CsECChZwcm90by9hdXRoL3Rva2VuLnByb3RvEg5idWNrZXRlZXIuYXV0aCKhAQoFVG9rZW4SIQoMYWNjZXNzX3Rva2VuGAEgASgJUgthY2Nlc3NUb2tlbhIdCgp0b2tlbl90eXBlGAIgASgJUgl0b2tlblR5cGUSIwoNcmVmcmVzaF90b2tlbhgDIAEoCVIMcmVmcmVzaFRva2VuEhYKBmV4cGlyeRgEIAEoA1IGZXhwaXJ5EhkKCGlkX3Rva2VuGAUgASgJUgdpZFRva2VuIkIKDklEVG9rZW5TdWJqZWN0EhcKB3VzZXJfaWQYASABKAlSBnVzZXJJZBIXCgdjb25uX2lkGAIgASgJUgZjb25uSWRCJ1olZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vYXV0aGIGcHJvdG8zCtQGChhwcm90by9hdXRoL3NlcnZpY2UucHJvdG8SDmJ1Y2tldGVlci5hdXRoGhZwcm90by9hdXRoL3Rva2VuLnByb3RvIlAKFUdldEF1dGhDb2RlVVJMUmVxdWVzdBIUCgVzdGF0ZRgBIAEoCVIFc3RhdGUSIQoMcmVkaXJlY3RfdXJsGAIgASgJUgtyZWRpcmVjdFVybCIqChZHZXRBdXRoQ29kZVVSTFJlc3BvbnNlEhAKA3VybBgBIAEoCVIDdXJsIk0KFEV4Y2hhbmdlVG9rZW5SZXF1ZXN0EhIKBGNvZGUYASABKAlSBGNvZGUSIQoMcmVkaXJlY3RfdXJsGAIgASgJUgtyZWRpcmVjdFVybCJEChVFeGNoYW5nZVRva2VuUmVzcG9uc2USKwoFdG9rZW4YASABKAsyFS5idWNrZXRlZXIuYXV0aC5Ub2tlblIFdG9rZW4iXQoTUmVmcmVzaFRva2VuUmVxdWVzdBIjCg1yZWZyZXNoX3Rva2VuGAEgASgJUgxyZWZyZXNoVG9rZW4SIQoMcmVkaXJlY3RfdXJsGAIgASgJUgtyZWRpcmVjdFVybCJDChRSZWZyZXNoVG9rZW5SZXNwb25zZRIrCgV0b2tlbhgBIAEoCzIVLmJ1Y2tldGVlci5hdXRoLlRva2VuUgV0b2tlbjKnAgoLQXV0aFNlcnZpY2USXwoOR2V0QXV0aENvZGVVUkwSJS5idWNrZXRlZXIuYXV0aC5HZXRBdXRoQ29kZVVSTFJlcXVlc3QaJi5idWNrZXRlZXIuYXV0aC5HZXRBdXRoQ29kZVVSTFJlc3BvbnNlElwKDUV4Y2hhbmdlVG9rZW4SJC5idWNrZXRlZXIuYXV0aC5FeGNoYW5nZVRva2VuUmVxdWVzdBolLmJ1Y2tldGVlci5hdXRoLkV4Y2hhbmdlVG9rZW5SZXNwb25zZRJZCgxSZWZyZXNoVG9rZW4SIy5idWNrZXRlZXIuYXV0aC5SZWZyZXNoVG9rZW5SZXF1ZXN0GiQuYnVja2V0ZWVyLmF1dGguUmVmcmVzaFRva2VuUmVzcG9uc2VCJ1olZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vYXV0aGIGcHJvdG8z" + autoopsDescriptor: "CuQBChlnb29nbGUvcHJvdG9idWYvYW55LnByb3RvEg9nb29nbGUucHJvdG9idWYiNgoDQW55EhkKCHR5cGVfdXJsGAEgASgJUgd0eXBlVXJsEhQKBXZhbHVlGAIgASgMUgV2YWx1ZUJ2ChNjb20uZ29vZ2xlLnByb3RvYnVmQghBbnlQcm90b1ABWixnb29nbGUuZ29sYW5nLm9yZy9wcm90b2J1Zi90eXBlcy9rbm93bi9hbnlwYqICA0dQQqoCHkdvb2dsZS5Qcm90b2J1Zi5XZWxsS25vd25UeXBlc2IGcHJvdG8zCowHChpwcm90by9hdXRvb3BzL2NsYXVzZS5wcm90bxIRYnVja2V0ZWVyLmF1dG9vcHMaGWdvb2dsZS9wcm90b2J1Zi9hbnkucHJvdG8iRgoGQ2xhdXNlEg4KAmlkGAEgASgJUgJpZBIsCgZjbGF1c2UYAiABKAsyFC5nb29nbGUucHJvdG9idWYuQW55UgZjbGF1c2UinwIKEk9wc0V2ZW50UmF0ZUNsYXVzZRIhCgx2YXJpYXRpb25faWQYAiABKAlSC3ZhcmlhdGlvbklkEhcKB2dvYWxfaWQYAyABKAlSBmdvYWxJZBIbCgltaW5fY291bnQYBCABKANSCG1pbkNvdW50EikKEHRocmVhZHNob2xkX3JhdGUYBSABKAFSD3RocmVhZHNob2xkUmF0ZRJKCghvcGVyYXRvchgGIAEoDjIuLmJ1Y2tldGVlci5hdXRvb3BzLk9wc0V2ZW50UmF0ZUNsYXVzZS5PcGVyYXRvclIIb3BlcmF0b3IiMwoIT3BlcmF0b3ISFAoQR1JFQVRFUl9PUl9FUVVBTBAAEhEKDUxFU1NfT1JfRVFVQUwQAUoECAEQAiIkCg5EYXRldGltZUNsYXVzZRISCgR0aW1lGAEgASgDUgR0aW1lIvsCCg1XZWJob29rQ2xhdXNlEh0KCndlYmhvb2tfaWQYASABKAlSCXdlYmhvb2tJZBJKCgpjb25kaXRpb25zGAIgAygLMiouYnVja2V0ZWVyLmF1dG9vcHMuV2ViaG9va0NsYXVzZS5Db25kaXRpb25SCmNvbmRpdGlvbnMa/gEKCUNvbmRpdGlvbhIWCgZmaWx0ZXIYASABKAlSBmZpbHRlchIUCgV2YWx1ZRgCIAEoCVIFdmFsdWUSTwoIb3BlcmF0b3IYAyABKA4yMy5idWNrZXRlZXIuYXV0b29wcy5XZWJob29rQ2xhdXNlLkNvbmRpdGlvbi5PcGVyYXRvclIIb3BlcmF0b3IicgoIT3BlcmF0b3ISCQoFRVFVQUwQABINCglOT1RfRVFVQUwQARINCglNT1JFX1RIQU4QAhIWChJNT1JFX1RIQU5fT1JfRVFVQUwQAxINCglMRVNTX1RIQU4QBBIWChJMRVNTX1RIQU5fT1JfRVFVQUwQBUIqWihnaXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9hdXRvb3BzYgZwcm90bzMK4AMKIXByb3RvL2F1dG9vcHMvYXV0b19vcHNfcnVsZS5wcm90bxIRYnVja2V0ZWVyLmF1dG9vcHMaGnByb3RvL2F1dG9vcHMvY2xhdXNlLnByb3RvIqMCCgtBdXRvT3BzUnVsZRIOCgJpZBgBIAEoCVICaWQSHQoKZmVhdHVyZV9pZBgCIAEoCVIJZmVhdHVyZUlkEjUKCG9wc190eXBlGAMgASgOMhouYnVja2V0ZWVyLmF1dG9vcHMuT3BzVHlwZVIHb3BzVHlwZRIzCgdjbGF1c2VzGAQgAygLMhkuYnVja2V0ZWVyLmF1dG9vcHMuQ2xhdXNlUgdjbGF1c2VzEiEKDHRyaWdnZXJlZF9hdBgGIAEoA1ILdHJpZ2dlcmVkQXQSHQoKY3JlYXRlZF9hdBgHIAEoA1IJY3JlYXRlZEF0Eh0KCnVwZGF0ZWRfYXQYCCABKANSCXVwZGF0ZWRBdBIYCgdkZWxldGVkGAkgASgIUgdkZWxldGVkKjIKB09wc1R5cGUSEgoORU5BQkxFX0ZFQVRVUkUQABITCg9ESVNBQkxFX0ZFQVRVUkUQAUIqWihnaXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9hdXRvb3BzYgZwcm90bzMK8AwKG3Byb3RvL2F1dG9vcHMvY29tbWFuZC5wcm90bxIRYnVja2V0ZWVyLmF1dG9vcHMaIXByb3RvL2F1dG9vcHMvYXV0b19vcHNfcnVsZS5wcm90bxoacHJvdG8vYXV0b29wcy9jbGF1c2UucHJvdG8i5QIKGENyZWF0ZUF1dG9PcHNSdWxlQ29tbWFuZBIdCgpmZWF0dXJlX2lkGAEgASgJUglmZWF0dXJlSWQSNQoIb3BzX3R5cGUYAiABKA4yGi5idWNrZXRlZXIuYXV0b29wcy5PcHNUeXBlUgdvcHNUeXBlEloKFm9wc19ldmVudF9yYXRlX2NsYXVzZXMYAyADKAsyJS5idWNrZXRlZXIuYXV0b29wcy5PcHNFdmVudFJhdGVDbGF1c2VSE29wc0V2ZW50UmF0ZUNsYXVzZXMSTAoQZGF0ZXRpbWVfY2xhdXNlcxgEIAMoCzIhLmJ1Y2tldGVlci5hdXRvb3BzLkRhdGV0aW1lQ2xhdXNlUg9kYXRldGltZUNsYXVzZXMSSQoPd2ViaG9va19jbGF1c2VzGAUgAygLMiAuYnVja2V0ZWVyLmF1dG9vcHMuV2ViaG9va0NsYXVzZVIOd2ViaG9va0NsYXVzZXMiWAofQ2hhbmdlQXV0b09wc1J1bGVPcHNUeXBlQ29tbWFuZBI1CghvcHNfdHlwZRgBIAEoDjIaLmJ1Y2tldGVlci5hdXRvb3BzLk9wc1R5cGVSB29wc1R5cGUiGgoYRGVsZXRlQXV0b09wc1J1bGVDb21tYW5kIiUKI0NoYW5nZUF1dG9PcHNSdWxlVHJpZ2dlcmVkQXRDb21tYW5kIngKHEFkZE9wc0V2ZW50UmF0ZUNsYXVzZUNvbW1hbmQSWAoVb3BzX2V2ZW50X3JhdGVfY2xhdXNlGAEgASgLMiUuYnVja2V0ZWVyLmF1dG9vcHMuT3BzRXZlbnRSYXRlQ2xhdXNlUhJvcHNFdmVudFJhdGVDbGF1c2UiiwEKH0NoYW5nZU9wc0V2ZW50UmF0ZUNsYXVzZUNvbW1hbmQSDgoCaWQYASABKAlSAmlkElgKFW9wc19ldmVudF9yYXRlX2NsYXVzZRgCIAEoCzIlLmJ1Y2tldGVlci5hdXRvb3BzLk9wc0V2ZW50UmF0ZUNsYXVzZVISb3BzRXZlbnRSYXRlQ2xhdXNlIiUKE0RlbGV0ZUNsYXVzZUNvbW1hbmQSDgoCaWQYASABKAlSAmlkImYKGEFkZERhdGV0aW1lQ2xhdXNlQ29tbWFuZBJKCg9kYXRldGltZV9jbGF1c2UYASABKAsyIS5idWNrZXRlZXIuYXV0b29wcy5EYXRldGltZUNsYXVzZVIOZGF0ZXRpbWVDbGF1c2UieQobQ2hhbmdlRGF0ZXRpbWVDbGF1c2VDb21tYW5kEg4KAmlkGAEgASgJUgJpZBJKCg9kYXRldGltZV9jbGF1c2UYAiABKAsyIS5idWNrZXRlZXIuYXV0b29wcy5EYXRldGltZUNsYXVzZVIOZGF0ZXRpbWVDbGF1c2UiTAoUQ3JlYXRlV2ViaG9va0NvbW1hbmQSEgoEbmFtZRgBIAEoCVIEbmFtZRIgCgtkZXNjcmlwdGlvbhgCIAEoCVILZGVzY3JpcHRpb24iLgoYQ2hhbmdlV2ViaG9va05hbWVDb21tYW5kEhIKBG5hbWUYASABKAlSBG5hbWUiQwofQ2hhbmdlV2ViaG9va0Rlc2NyaXB0aW9uQ29tbWFuZBIgCgtkZXNjcmlwdGlvbhgBIAEoCVILZGVzY3JpcHRpb24iFgoURGVsZXRlV2ViaG9va0NvbW1hbmQiYgoXQWRkV2ViaG9va0NsYXVzZUNvbW1hbmQSRwoOd2ViaG9va19jbGF1c2UYASABKAsyIC5idWNrZXRlZXIuYXV0b29wcy5XZWJob29rQ2xhdXNlUg13ZWJob29rQ2xhdXNlInUKGkNoYW5nZVdlYmhvb2tDbGF1c2VDb21tYW5kEg4KAmlkGAEgASgJUgJpZBJHCg53ZWJob29rX2NsYXVzZRgCIAEoCzIgLmJ1Y2tldGVlci5hdXRvb3BzLldlYmhvb2tDbGF1c2VSDXdlYmhvb2tDbGF1c2VCKlooZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vYXV0b29wc2IGcHJvdG8zCtoCCh1wcm90by9hdXRvb3BzL29wc19jb3VudC5wcm90bxIRYnVja2V0ZWVyLmF1dG9vcHMi8QEKCE9wc0NvdW50Eg4KAmlkGAEgASgJUgJpZBInChBhdXRvX29wc19ydWxlX2lkGAIgASgJUg1hdXRvT3BzUnVsZUlkEhsKCWNsYXVzZV9pZBgDIAEoCVIIY2xhdXNlSWQSHQoKdXBkYXRlZF9hdBgEIAEoA1IJdXBkYXRlZEF0EiYKD29wc19ldmVudF9jb3VudBgFIAEoA1INb3BzRXZlbnRDb3VudBIpChBldmFsdWF0aW9uX2NvdW50GAYgASgDUg9ldmFsdWF0aW9uQ291bnQSHQoKZmVhdHVyZV9pZBgHIAEoCVIJZmVhdHVyZUlkQipaKGdpdGh1Yi5jb20vY2EtZHAvYnVja2V0ZWVyL3Byb3RvL2F1dG9vcHNiBnByb3RvMwr0AQobcHJvdG8vYXV0b29wcy93ZWJob29rLnByb3RvEhFidWNrZXRlZXIuYXV0b29wcyKNAQoHV2ViaG9vaxIOCgJpZBgBIAEoCVICaWQSEgoEbmFtZRgCIAEoCVIEbmFtZRIgCgtkZXNjcmlwdGlvbhgDIAEoCVILZGVzY3JpcHRpb24SHQoKY3JlYXRlZF9hdBgEIAEoA1IJY3JlYXRlZEF0Eh0KCnVwZGF0ZWRfYXQYBSABKANSCXVwZGF0ZWRBdEIqWihnaXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9hdXRvb3BzYgZwcm90bzMK3SsKG3Byb3RvL2F1dG9vcHMvc2VydmljZS5wcm90bxIRYnVja2V0ZWVyLmF1dG9vcHMaIXByb3RvL2F1dG9vcHMvYXV0b19vcHNfcnVsZS5wcm90bxobcHJvdG8vYXV0b29wcy9jb21tYW5kLnByb3RvGh1wcm90by9hdXRvb3BzL29wc19jb3VudC5wcm90bxobcHJvdG8vYXV0b29wcy93ZWJob29rLnByb3RvIlwKFUdldEF1dG9PcHNSdWxlUmVxdWVzdBIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYASABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlEg4KAmlkGAIgASgJUgJpZCJcChZHZXRBdXRvT3BzUnVsZVJlc3BvbnNlEkIKDWF1dG9fb3BzX3J1bGUYASABKAsyHi5idWNrZXRlZXIuYXV0b29wcy5BdXRvT3BzUnVsZVILYXV0b09wc1J1bGUilgEKGENyZWF0ZUF1dG9PcHNSdWxlUmVxdWVzdBIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYASABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlEkUKB2NvbW1hbmQYAiABKAsyKy5idWNrZXRlZXIuYXV0b29wcy5DcmVhdGVBdXRvT3BzUnVsZUNvbW1hbmRSB2NvbW1hbmQiGwoZQ3JlYXRlQXV0b09wc1J1bGVSZXNwb25zZSKkAQoXTGlzdEF1dG9PcHNSdWxlc1JlcXVlc3QSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAEgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZRIbCglwYWdlX3NpemUYAiABKANSCHBhZ2VTaXplEhYKBmN1cnNvchgDIAEoCVIGY3Vyc29yEh8KC2ZlYXR1cmVfaWRzGAQgAygJUgpmZWF0dXJlSWRzIngKGExpc3RBdXRvT3BzUnVsZXNSZXNwb25zZRJECg5hdXRvX29wc19ydWxlcxgBIAMoCzIeLmJ1Y2tldGVlci5hdXRvb3BzLkF1dG9PcHNSdWxlUgxhdXRvT3BzUnVsZXMSFgoGY3Vyc29yGAIgASgJUgZjdXJzb3IipgEKGERlbGV0ZUF1dG9PcHNSdWxlUmVxdWVzdBIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYASABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlEg4KAmlkGAIgASgJUgJpZBJFCgdjb21tYW5kGAMgASgLMisuYnVja2V0ZWVyLmF1dG9vcHMuRGVsZXRlQXV0b09wc1J1bGVDb21tYW5kUgdjb21tYW5kIhsKGURlbGV0ZUF1dG9PcHNSdWxlUmVzcG9uc2UiiAgKGFVwZGF0ZUF1dG9PcHNSdWxlUmVxdWVzdBIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYASABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlEg4KAmlkGAIgASgJUgJpZBKCAQolY2hhbmdlX2F1dG9fb3BzX3J1bGVfb3BzX3R5cGVfY29tbWFuZBgDIAEoCzIyLmJ1Y2tldGVlci5hdXRvb3BzLkNoYW5nZUF1dG9PcHNSdWxlT3BzVHlwZUNvbW1hbmRSH2NoYW5nZUF1dG9PcHNSdWxlT3BzVHlwZUNvbW1hbmQSegoiYWRkX29wc19ldmVudF9yYXRlX2NsYXVzZV9jb21tYW5kcxgEIAMoCzIvLmJ1Y2tldGVlci5hdXRvb3BzLkFkZE9wc0V2ZW50UmF0ZUNsYXVzZUNvbW1hbmRSHWFkZE9wc0V2ZW50UmF0ZUNsYXVzZUNvbW1hbmRzEoMBCiVjaGFuZ2Vfb3BzX2V2ZW50X3JhdGVfY2xhdXNlX2NvbW1hbmRzGAUgAygLMjIuYnVja2V0ZWVyLmF1dG9vcHMuQ2hhbmdlT3BzRXZlbnRSYXRlQ2xhdXNlQ29tbWFuZFIgY2hhbmdlT3BzRXZlbnRSYXRlQ2xhdXNlQ29tbWFuZHMSXAoWZGVsZXRlX2NsYXVzZV9jb21tYW5kcxgGIAMoCzImLmJ1Y2tldGVlci5hdXRvb3BzLkRlbGV0ZUNsYXVzZUNvbW1hbmRSFGRlbGV0ZUNsYXVzZUNvbW1hbmRzEmwKHGFkZF9kYXRldGltZV9jbGF1c2VfY29tbWFuZHMYByADKAsyKy5idWNrZXRlZXIuYXV0b29wcy5BZGREYXRldGltZUNsYXVzZUNvbW1hbmRSGWFkZERhdGV0aW1lQ2xhdXNlQ29tbWFuZHMSdQofY2hhbmdlX2RhdGV0aW1lX2NsYXVzZV9jb21tYW5kcxgIIAMoCzIuLmJ1Y2tldGVlci5hdXRvb3BzLkNoYW5nZURhdGV0aW1lQ2xhdXNlQ29tbWFuZFIcY2hhbmdlRGF0ZXRpbWVDbGF1c2VDb21tYW5kcxJpChthZGRfd2ViaG9va19jbGF1c2VfY29tbWFuZHMYCSADKAsyKi5idWNrZXRlZXIuYXV0b29wcy5BZGRXZWJob29rQ2xhdXNlQ29tbWFuZFIYYWRkV2ViaG9va0NsYXVzZUNvbW1hbmRzEnIKHmNoYW5nZV93ZWJob29rX2NsYXVzZV9jb21tYW5kcxgKIAMoCzItLmJ1Y2tldGVlci5hdXRvb3BzLkNoYW5nZVdlYmhvb2tDbGF1c2VDb21tYW5kUhtjaGFuZ2VXZWJob29rQ2xhdXNlQ29tbWFuZHMiGwoZVXBkYXRlQXV0b09wc1J1bGVSZXNwb25zZSLtAQoVRXhlY3V0ZUF1dG9PcHNSZXF1ZXN0EjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgBIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2USDgoCaWQYAiABKAlSAmlkEo4BCiljaGFuZ2VfYXV0b19vcHNfcnVsZV90cmlnZ2VyZWRfYXRfY29tbWFuZBgDIAEoCzI2LmJ1Y2tldGVlci5hdXRvb3BzLkNoYW5nZUF1dG9PcHNSdWxlVHJpZ2dlcmVkQXRDb21tYW5kUiNjaGFuZ2VBdXRvT3BzUnVsZVRyaWdnZXJlZEF0Q29tbWFuZCJFChZFeGVjdXRlQXV0b09wc1Jlc3BvbnNlEisKEWFscmVhZHlfdHJpZ2dlcmVkGAEgASgIUhBhbHJlYWR5VHJpZ2dlcmVkIswBChRMaXN0T3BzQ291bnRzUmVxdWVzdBIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYASABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlEhsKCXBhZ2Vfc2l6ZRgCIAEoA1IIcGFnZVNpemUSFgoGY3Vyc29yGAMgASgJUgZjdXJzb3ISKQoRYXV0b19vcHNfcnVsZV9pZHMYBCADKAlSDmF1dG9PcHNSdWxlSWRzEh8KC2ZlYXR1cmVfaWRzGAUgAygJUgpmZWF0dXJlSWRzImsKFUxpc3RPcHNDb3VudHNSZXNwb25zZRIWCgZjdXJzb3IYASABKAlSBmN1cnNvchI6CgpvcHNfY291bnRzGAIgAygLMhsuYnVja2V0ZWVyLmF1dG9vcHMuT3BzQ291bnRSCW9wc0NvdW50cyKOAQoUQ3JlYXRlV2ViaG9va1JlcXVlc3QSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAEgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZRJBCgdjb21tYW5kGAIgASgLMicuYnVja2V0ZWVyLmF1dG9vcHMuQ3JlYXRlV2ViaG9va0NvbW1hbmRSB2NvbW1hbmQiXwoVQ3JlYXRlV2ViaG9va1Jlc3BvbnNlEjQKB3dlYmhvb2sYASABKAsyGi5idWNrZXRlZXIuYXV0b29wcy5XZWJob29rUgd3ZWJob29rEhAKA3VybBgCIAEoCVIDdXJsIlgKEUdldFdlYmhvb2tSZXF1ZXN0Eg4KAmlkGAEgASgJUgJpZBIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYAiABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlIlwKEkdldFdlYmhvb2tSZXNwb25zZRI0Cgd3ZWJob29rGAEgASgLMhouYnVja2V0ZWVyLmF1dG9vcHMuV2ViaG9va1IHd2ViaG9vaxIQCgN1cmwYAiABKAlSA3VybCLCAgoUVXBkYXRlV2ViaG9va1JlcXVlc3QSDgoCaWQYASABKAlSAmlkEjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgCIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2USZwoYY2hhbmdlV2ViaG9va05hbWVDb21tYW5kGAMgASgLMisuYnVja2V0ZWVyLmF1dG9vcHMuQ2hhbmdlV2ViaG9va05hbWVDb21tYW5kUhhjaGFuZ2VXZWJob29rTmFtZUNvbW1hbmQSfAofY2hhbmdlV2ViaG9va0Rlc2NyaXB0aW9uQ29tbWFuZBgEIAEoCzIyLmJ1Y2tldGVlci5hdXRvb3BzLkNoYW5nZVdlYmhvb2tEZXNjcmlwdGlvbkNvbW1hbmRSH2NoYW5nZVdlYmhvb2tEZXNjcmlwdGlvbkNvbW1hbmQiFwoVVXBkYXRlV2ViaG9va1Jlc3BvbnNlIp4BChREZWxldGVXZWJob29rUmVxdWVzdBIOCgJpZBgBIAEoCVICaWQSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAIgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZRJBCgdjb21tYW5kGAMgASgLMicuYnVja2V0ZWVyLmF1dG9vcHMuRGVsZXRlV2ViaG9va0NvbW1hbmRSB2NvbW1hbmQiFwoVRGVsZXRlV2ViaG9va1Jlc3BvbnNlIrgDChNMaXN0V2ViaG9va3NSZXF1ZXN0EjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgBIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2USGwoJcGFnZV9zaXplGAIgASgDUghwYWdlU2l6ZRIWCgZjdXJzb3IYAyABKAlSBmN1cnNvchJJCghvcmRlcl9ieRgEIAEoDjIuLmJ1Y2tldGVlci5hdXRvb3BzLkxpc3RXZWJob29rc1JlcXVlc3QuT3JkZXJCeVIHb3JkZXJCeRJeCg9vcmRlcl9kaXJlY3Rpb24YBSABKA4yNS5idWNrZXRlZXIuYXV0b29wcy5MaXN0V2ViaG9va3NSZXF1ZXN0Lk9yZGVyRGlyZWN0aW9uUg5vcmRlckRpcmVjdGlvbhIlCg5zZWFyY2hfa2V5d29yZBgGIAEoCVINc2VhcmNoS2V5d29yZCJACgdPcmRlckJ5EgsKB0RFRkFVTFQQABIICgROQU1FEAESDgoKQ1JFQVRFRF9BVBACEg4KClVQREFURURfQVQQAyIjCg5PcmRlckRpcmVjdGlvbhIHCgNBU0MQABIICgRERVNDEAEihwEKFExpc3RXZWJob29rc1Jlc3BvbnNlEjYKCHdlYmhvb2tzGAEgAygLMhouYnVja2V0ZWVyLmF1dG9vcHMuV2ViaG9va1IId2ViaG9va3MSFgoGY3Vyc29yGAIgASgJUgZjdXJzb3ISHwoLdG90YWxfY291bnQYAyABKANSCnRvdGFsQ291bnQy/wkKDkF1dG9PcHNTZXJ2aWNlEmcKDkdldEF1dG9PcHNSdWxlEiguYnVja2V0ZWVyLmF1dG9vcHMuR2V0QXV0b09wc1J1bGVSZXF1ZXN0GikuYnVja2V0ZWVyLmF1dG9vcHMuR2V0QXV0b09wc1J1bGVSZXNwb25zZSIAEm0KEExpc3RBdXRvT3BzUnVsZXMSKi5idWNrZXRlZXIuYXV0b29wcy5MaXN0QXV0b09wc1J1bGVzUmVxdWVzdBorLmJ1Y2tldGVlci5hdXRvb3BzLkxpc3RBdXRvT3BzUnVsZXNSZXNwb25zZSIAEnAKEUNyZWF0ZUF1dG9PcHNSdWxlEisuYnVja2V0ZWVyLmF1dG9vcHMuQ3JlYXRlQXV0b09wc1J1bGVSZXF1ZXN0GiwuYnVja2V0ZWVyLmF1dG9vcHMuQ3JlYXRlQXV0b09wc1J1bGVSZXNwb25zZSIAEnAKEURlbGV0ZUF1dG9PcHNSdWxlEisuYnVja2V0ZWVyLmF1dG9vcHMuRGVsZXRlQXV0b09wc1J1bGVSZXF1ZXN0GiwuYnVja2V0ZWVyLmF1dG9vcHMuRGVsZXRlQXV0b09wc1J1bGVSZXNwb25zZSIAEnAKEVVwZGF0ZUF1dG9PcHNSdWxlEisuYnVja2V0ZWVyLmF1dG9vcHMuVXBkYXRlQXV0b09wc1J1bGVSZXF1ZXN0GiwuYnVja2V0ZWVyLmF1dG9vcHMuVXBkYXRlQXV0b09wc1J1bGVSZXNwb25zZSIAEmcKDkV4ZWN1dGVBdXRvT3BzEiguYnVja2V0ZWVyLmF1dG9vcHMuRXhlY3V0ZUF1dG9PcHNSZXF1ZXN0GikuYnVja2V0ZWVyLmF1dG9vcHMuRXhlY3V0ZUF1dG9PcHNSZXNwb25zZSIAEmQKDUxpc3RPcHNDb3VudHMSJy5idWNrZXRlZXIuYXV0b29wcy5MaXN0T3BzQ291bnRzUmVxdWVzdBooLmJ1Y2tldGVlci5hdXRvb3BzLkxpc3RPcHNDb3VudHNSZXNwb25zZSIAEmQKDUNyZWF0ZVdlYmhvb2sSJy5idWNrZXRlZXIuYXV0b29wcy5DcmVhdGVXZWJob29rUmVxdWVzdBooLmJ1Y2tldGVlci5hdXRvb3BzLkNyZWF0ZVdlYmhvb2tSZXNwb25zZSIAElsKCkdldFdlYmhvb2sSJC5idWNrZXRlZXIuYXV0b29wcy5HZXRXZWJob29rUmVxdWVzdBolLmJ1Y2tldGVlci5hdXRvb3BzLkdldFdlYmhvb2tSZXNwb25zZSIAEmQKDVVwZGF0ZVdlYmhvb2sSJy5idWNrZXRlZXIuYXV0b29wcy5VcGRhdGVXZWJob29rUmVxdWVzdBooLmJ1Y2tldGVlci5hdXRvb3BzLlVwZGF0ZVdlYmhvb2tSZXNwb25zZSIAEmQKDURlbGV0ZVdlYmhvb2sSJy5idWNrZXRlZXIuYXV0b29wcy5EZWxldGVXZWJob29rUmVxdWVzdBooLmJ1Y2tldGVlci5hdXRvb3BzLkRlbGV0ZVdlYmhvb2tSZXNwb25zZSIAEmEKDExpc3RXZWJob29rcxImLmJ1Y2tldGVlci5hdXRvb3BzLkxpc3RXZWJob29rc1JlcXVlc3QaJy5idWNrZXRlZXIuYXV0b29wcy5MaXN0V2ViaG9va3NSZXNwb25zZSIAQipaKGdpdGh1Yi5jb20vY2EtZHAvYnVja2V0ZWVyL3Byb3RvL2F1dG9vcHNiBnByb3RvMw==" + environmentDescriptor: "CoYECh5nb29nbGUvcHJvdG9idWYvd3JhcHBlcnMucHJvdG8SD2dvb2dsZS5wcm90b2J1ZiIjCgtEb3VibGVWYWx1ZRIUCgV2YWx1ZRgBIAEoAVIFdmFsdWUiIgoKRmxvYXRWYWx1ZRIUCgV2YWx1ZRgBIAEoAlIFdmFsdWUiIgoKSW50NjRWYWx1ZRIUCgV2YWx1ZRgBIAEoA1IFdmFsdWUiIwoLVUludDY0VmFsdWUSFAoFdmFsdWUYASABKARSBXZhbHVlIiIKCkludDMyVmFsdWUSFAoFdmFsdWUYASABKAVSBXZhbHVlIiMKC1VJbnQzMlZhbHVlEhQKBXZhbHVlGAEgASgNUgV2YWx1ZSIhCglCb29sVmFsdWUSFAoFdmFsdWUYASABKAhSBXZhbHVlIiMKC1N0cmluZ1ZhbHVlEhQKBXZhbHVlGAEgASgJUgV2YWx1ZSIiCgpCeXRlc1ZhbHVlEhQKBXZhbHVlGAEgASgMUgV2YWx1ZUKDAQoTY29tLmdvb2dsZS5wcm90b2J1ZkINV3JhcHBlcnNQcm90b1ABWjFnb29nbGUuZ29sYW5nLm9yZy9wcm90b2J1Zi90eXBlcy9rbm93bi93cmFwcGVyc3Bi+AEBogIDR1BCqgIeR29vZ2xlLlByb3RvYnVmLldlbGxLbm93blR5cGVzYgZwcm90bzMK4wIKI3Byb3RvL2Vudmlyb25tZW50L2Vudmlyb25tZW50LnByb3RvEhVidWNrZXRlZXIuZW52aXJvbm1lbnQi7AEKC0Vudmlyb25tZW50Eg4KAmlkGAEgASgJUgJpZBIcCgluYW1lc3BhY2UYAiABKAlSCW5hbWVzcGFjZRIWCgRuYW1lGAMgASgJQgIYAVIEbmFtZRIgCgtkZXNjcmlwdGlvbhgEIAEoCVILZGVzY3JpcHRpb24SGAoHZGVsZXRlZBgFIAEoCFIHZGVsZXRlZBIdCgpjcmVhdGVkX2F0GAYgASgDUgljcmVhdGVkQXQSHQoKdXBkYXRlZF9hdBgHIAEoA1IJdXBkYXRlZEF0Eh0KCnByb2plY3RfaWQYCCABKAlSCXByb2plY3RJZEIuWixnaXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9lbnZpcm9ubWVudGIGcHJvdG8zCsMCCh9wcm90by9lbnZpcm9ubWVudC9wcm9qZWN0LnByb3RvEhVidWNrZXRlZXIuZW52aXJvbm1lbnQi0AEKB1Byb2plY3QSDgoCaWQYASABKAlSAmlkEiAKC2Rlc2NyaXB0aW9uGAIgASgJUgtkZXNjcmlwdGlvbhIaCghkaXNhYmxlZBgDIAEoCFIIZGlzYWJsZWQSFAoFdHJpYWwYBCABKAhSBXRyaWFsEiMKDWNyZWF0b3JfZW1haWwYBSABKAlSDGNyZWF0b3JFbWFpbBIdCgpjcmVhdGVkX2F0GAYgASgDUgljcmVhdGVkQXQSHQoKdXBkYXRlZF9hdBgHIAEoA1IJdXBkYXRlZEF0Qi5aLGdpdGh1Yi5jb20vY2EtZHAvYnVja2V0ZWVyL3Byb3RvL2Vudmlyb25tZW50YgZwcm90bzMKzgUKH3Byb3RvL2Vudmlyb25tZW50L2NvbW1hbmQucHJvdG8SFWJ1Y2tldGVlci5lbnZpcm9ubWVudCKlAQoYQ3JlYXRlRW52aXJvbm1lbnRDb21tYW5kEiAKCW5hbWVzcGFjZRgBIAEoCUICGAFSCW5hbWVzcGFjZRIWCgRuYW1lGAIgASgJQgIYAVIEbmFtZRIgCgtkZXNjcmlwdGlvbhgDIAEoCVILZGVzY3JpcHRpb24SDgoCaWQYBCABKAlSAmlkEh0KCnByb2plY3RfaWQYBSABKAlSCXByb2plY3RJZCIuChhSZW5hbWVFbnZpcm9ubWVudENvbW1hbmQSEgoEbmFtZRgBIAEoCVIEbmFtZSJHCiNDaGFuZ2VEZXNjcmlwdGlvbkVudmlyb25tZW50Q29tbWFuZBIgCgtkZXNjcmlwdGlvbhgBIAEoCVILZGVzY3JpcHRpb24iGgoYRGVsZXRlRW52aXJvbm1lbnRDb21tYW5kIkgKFENyZWF0ZVByb2plY3RDb21tYW5kEg4KAmlkGAEgASgJUgJpZBIgCgtkZXNjcmlwdGlvbhgCIAEoCVILZGVzY3JpcHRpb24iQQoZQ3JlYXRlVHJpYWxQcm9qZWN0Q29tbWFuZBIOCgJpZBgBIAEoCVICaWQSFAoFZW1haWwYAiABKAlSBWVtYWlsIkMKH0NoYW5nZURlc2NyaXB0aW9uUHJvamVjdENvbW1hbmQSIAoLZGVzY3JpcHRpb24YASABKAlSC2Rlc2NyaXB0aW9uIhYKFEVuYWJsZVByb2plY3RDb21tYW5kIhcKFURpc2FibGVQcm9qZWN0Q29tbWFuZCIcChpDb252ZXJ0VHJpYWxQcm9qZWN0Q29tbWFuZEIuWixnaXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9lbnZpcm9ubWVudGIGcHJvdG8zCu0mCh9wcm90by9lbnZpcm9ubWVudC9zZXJ2aWNlLnByb3RvEhVidWNrZXRlZXIuZW52aXJvbm1lbnQaHmdvb2dsZS9wcm90b2J1Zi93cmFwcGVycy5wcm90bxojcHJvdG8vZW52aXJvbm1lbnQvZW52aXJvbm1lbnQucHJvdG8aH3Byb3RvL2Vudmlyb25tZW50L3Byb2plY3QucHJvdG8aH3Byb3RvL2Vudmlyb25tZW50L2NvbW1hbmQucHJvdG8iJwoVR2V0RW52aXJvbm1lbnRSZXF1ZXN0Eg4KAmlkGAEgASgJUgJpZCJeChZHZXRFbnZpcm9ubWVudFJlc3BvbnNlEkQKC2Vudmlyb25tZW50GAEgASgLMiIuYnVja2V0ZWVyLmVudmlyb25tZW50LkVudmlyb25tZW50UgtlbnZpcm9ubWVudCJACiBHZXRFbnZpcm9ubWVudEJ5TmFtZXNwYWNlUmVxdWVzdBIcCgluYW1lc3BhY2UYASABKAlSCW5hbWVzcGFjZSJpCiFHZXRFbnZpcm9ubWVudEJ5TmFtZXNwYWNlUmVzcG9uc2USRAoLZW52aXJvbm1lbnQYASABKAsyIi5idWNrZXRlZXIuZW52aXJvbm1lbnQuRW52aXJvbm1lbnRSC2Vudmlyb25tZW50IrQDChdMaXN0RW52aXJvbm1lbnRzUmVxdWVzdBIbCglwYWdlX3NpemUYASABKANSCHBhZ2VTaXplEhYKBmN1cnNvchgCIAEoCVIGY3Vyc29yEh0KCnByb2plY3RfaWQYAyABKAlSCXByb2plY3RJZBJRCghvcmRlcl9ieRgEIAEoDjI2LmJ1Y2tldGVlci5lbnZpcm9ubWVudC5MaXN0RW52aXJvbm1lbnRzUmVxdWVzdC5PcmRlckJ5UgdvcmRlckJ5EmYKD29yZGVyX2RpcmVjdGlvbhgFIAEoDjI9LmJ1Y2tldGVlci5lbnZpcm9ubWVudC5MaXN0RW52aXJvbm1lbnRzUmVxdWVzdC5PcmRlckRpcmVjdGlvblIOb3JkZXJEaXJlY3Rpb24SJQoOc2VhcmNoX2tleXdvcmQYBiABKAlSDXNlYXJjaEtleXdvcmQiPgoHT3JkZXJCeRILCgdERUZBVUxUEAASBgoCSUQQARIOCgpDUkVBVEVEX0FUEAISDgoKVVBEQVRFRF9BVBADIiMKDk9yZGVyRGlyZWN0aW9uEgcKA0FTQxAAEggKBERFU0MQASKbAQoYTGlzdEVudmlyb25tZW50c1Jlc3BvbnNlEkYKDGVudmlyb25tZW50cxgBIAMoCzIiLmJ1Y2tldGVlci5lbnZpcm9ubWVudC5FbnZpcm9ubWVudFIMZW52aXJvbm1lbnRzEhYKBmN1cnNvchgCIAEoCVIGY3Vyc29yEh8KC3RvdGFsX2NvdW50GAMgASgDUgp0b3RhbENvdW50ImUKGENyZWF0ZUVudmlyb25tZW50UmVxdWVzdBJJCgdjb21tYW5kGAEgASgLMi8uYnVja2V0ZWVyLmVudmlyb25tZW50LkNyZWF0ZUVudmlyb25tZW50Q29tbWFuZFIHY29tbWFuZCIbChlDcmVhdGVFbnZpcm9ubWVudFJlc3BvbnNlIoACChhVcGRhdGVFbnZpcm9ubWVudFJlcXVlc3QSDgoCaWQYASABKAlSAmlkEloKDnJlbmFtZV9jb21tYW5kGAIgASgLMi8uYnVja2V0ZWVyLmVudmlyb25tZW50LlJlbmFtZUVudmlyb25tZW50Q29tbWFuZEICGAFSDXJlbmFtZUNvbW1hbmQSeAoaY2hhbmdlX2Rlc2NyaXB0aW9uX2NvbW1hbmQYAyABKAsyOi5idWNrZXRlZXIuZW52aXJvbm1lbnQuQ2hhbmdlRGVzY3JpcHRpb25FbnZpcm9ubWVudENvbW1hbmRSGGNoYW5nZURlc2NyaXB0aW9uQ29tbWFuZCIbChlVcGRhdGVFbnZpcm9ubWVudFJlc3BvbnNlInUKGERlbGV0ZUVudmlyb25tZW50UmVxdWVzdBIOCgJpZBgBIAEoCVICaWQSSQoHY29tbWFuZBgCIAEoCzIvLmJ1Y2tldGVlci5lbnZpcm9ubWVudC5EZWxldGVFbnZpcm9ubWVudENvbW1hbmRSB2NvbW1hbmQiGwoZRGVsZXRlRW52aXJvbm1lbnRSZXNwb25zZSIjChFHZXRQcm9qZWN0UmVxdWVzdBIOCgJpZBgBIAEoCVICaWQiTgoSR2V0UHJvamVjdFJlc3BvbnNlEjgKB3Byb2plY3QYASABKAsyHi5idWNrZXRlZXIuZW52aXJvbm1lbnQuUHJvamVjdFIHcHJvamVjdCLBAwoTTGlzdFByb2plY3RzUmVxdWVzdBIbCglwYWdlX3NpemUYASABKANSCHBhZ2VTaXplEhYKBmN1cnNvchgCIAEoCVIGY3Vyc29yEk0KCG9yZGVyX2J5GAMgASgOMjIuYnVja2V0ZWVyLmVudmlyb25tZW50Lkxpc3RQcm9qZWN0c1JlcXVlc3QuT3JkZXJCeVIHb3JkZXJCeRJiCg9vcmRlcl9kaXJlY3Rpb24YBCABKA4yOS5idWNrZXRlZXIuZW52aXJvbm1lbnQuTGlzdFByb2plY3RzUmVxdWVzdC5PcmRlckRpcmVjdGlvblIOb3JkZXJEaXJlY3Rpb24SJQoOc2VhcmNoX2tleXdvcmQYBSABKAlSDXNlYXJjaEtleXdvcmQSNgoIZGlzYWJsZWQYBiABKAsyGi5nb29nbGUucHJvdG9idWYuQm9vbFZhbHVlUghkaXNhYmxlZCI+CgdPcmRlckJ5EgsKB0RFRkFVTFQQABIGCgJJRBABEg4KCkNSRUFURURfQVQQAhIOCgpVUERBVEVEX0FUEAMiIwoOT3JkZXJEaXJlY3Rpb24SBwoDQVNDEAASCAoEREVTQxABIosBChRMaXN0UHJvamVjdHNSZXNwb25zZRI6Cghwcm9qZWN0cxgBIAMoCzIeLmJ1Y2tldGVlci5lbnZpcm9ubWVudC5Qcm9qZWN0Ughwcm9qZWN0cxIWCgZjdXJzb3IYAiABKAlSBmN1cnNvchIfCgt0b3RhbF9jb3VudBgDIAEoA1IKdG90YWxDb3VudCJdChRDcmVhdGVQcm9qZWN0UmVxdWVzdBJFCgdjb21tYW5kGAEgASgLMisuYnVja2V0ZWVyLmVudmlyb25tZW50LkNyZWF0ZVByb2plY3RDb21tYW5kUgdjb21tYW5kIhcKFUNyZWF0ZVByb2plY3RSZXNwb25zZSJnChlDcmVhdGVUcmlhbFByb2plY3RSZXF1ZXN0EkoKB2NvbW1hbmQYASABKAsyMC5idWNrZXRlZXIuZW52aXJvbm1lbnQuQ3JlYXRlVHJpYWxQcm9qZWN0Q29tbWFuZFIHY29tbWFuZCIcChpDcmVhdGVUcmlhbFByb2plY3RSZXNwb25zZSKcAQoUVXBkYXRlUHJvamVjdFJlcXVlc3QSDgoCaWQYASABKAlSAmlkEnQKGmNoYW5nZV9kZXNjcmlwdGlvbl9jb21tYW5kGAIgASgLMjYuYnVja2V0ZWVyLmVudmlyb25tZW50LkNoYW5nZURlc2NyaXB0aW9uUHJvamVjdENvbW1hbmRSGGNoYW5nZURlc2NyaXB0aW9uQ29tbWFuZCIXChVVcGRhdGVQcm9qZWN0UmVzcG9uc2UibQoURW5hYmxlUHJvamVjdFJlcXVlc3QSDgoCaWQYASABKAlSAmlkEkUKB2NvbW1hbmQYAiABKAsyKy5idWNrZXRlZXIuZW52aXJvbm1lbnQuRW5hYmxlUHJvamVjdENvbW1hbmRSB2NvbW1hbmQiFwoVRW5hYmxlUHJvamVjdFJlc3BvbnNlIm8KFURpc2FibGVQcm9qZWN0UmVxdWVzdBIOCgJpZBgBIAEoCVICaWQSRgoHY29tbWFuZBgCIAEoCzIsLmJ1Y2tldGVlci5lbnZpcm9ubWVudC5EaXNhYmxlUHJvamVjdENvbW1hbmRSB2NvbW1hbmQiGAoWRGlzYWJsZVByb2plY3RSZXNwb25zZSJ5ChpDb252ZXJ0VHJpYWxQcm9qZWN0UmVxdWVzdBIOCgJpZBgBIAEoCVICaWQSSwoHY29tbWFuZBgCIAEoCzIxLmJ1Y2tldGVlci5lbnZpcm9ubWVudC5Db252ZXJ0VHJpYWxQcm9qZWN0Q29tbWFuZFIHY29tbWFuZCIdChtDb252ZXJ0VHJpYWxQcm9qZWN0UmVzcG9uc2UyhQ0KEkVudmlyb25tZW50U2VydmljZRJvCg5HZXRFbnZpcm9ubWVudBIsLmJ1Y2tldGVlci5lbnZpcm9ubWVudC5HZXRFbnZpcm9ubWVudFJlcXVlc3QaLS5idWNrZXRlZXIuZW52aXJvbm1lbnQuR2V0RW52aXJvbm1lbnRSZXNwb25zZSIAEpABChlHZXRFbnZpcm9ubWVudEJ5TmFtZXNwYWNlEjcuYnVja2V0ZWVyLmVudmlyb25tZW50LkdldEVudmlyb25tZW50QnlOYW1lc3BhY2VSZXF1ZXN0GjguYnVja2V0ZWVyLmVudmlyb25tZW50LkdldEVudmlyb25tZW50QnlOYW1lc3BhY2VSZXNwb25zZSIAEnUKEExpc3RFbnZpcm9ubWVudHMSLi5idWNrZXRlZXIuZW52aXJvbm1lbnQuTGlzdEVudmlyb25tZW50c1JlcXVlc3QaLy5idWNrZXRlZXIuZW52aXJvbm1lbnQuTGlzdEVudmlyb25tZW50c1Jlc3BvbnNlIgASeAoRQ3JlYXRlRW52aXJvbm1lbnQSLy5idWNrZXRlZXIuZW52aXJvbm1lbnQuQ3JlYXRlRW52aXJvbm1lbnRSZXF1ZXN0GjAuYnVja2V0ZWVyLmVudmlyb25tZW50LkNyZWF0ZUVudmlyb25tZW50UmVzcG9uc2UiABJ4ChFVcGRhdGVFbnZpcm9ubWVudBIvLmJ1Y2tldGVlci5lbnZpcm9ubWVudC5VcGRhdGVFbnZpcm9ubWVudFJlcXVlc3QaMC5idWNrZXRlZXIuZW52aXJvbm1lbnQuVXBkYXRlRW52aXJvbm1lbnRSZXNwb25zZSIAEngKEURlbGV0ZUVudmlyb25tZW50Ei8uYnVja2V0ZWVyLmVudmlyb25tZW50LkRlbGV0ZUVudmlyb25tZW50UmVxdWVzdBowLmJ1Y2tldGVlci5lbnZpcm9ubWVudC5EZWxldGVFbnZpcm9ubWVudFJlc3BvbnNlIgASYwoKR2V0UHJvamVjdBIoLmJ1Y2tldGVlci5lbnZpcm9ubWVudC5HZXRQcm9qZWN0UmVxdWVzdBopLmJ1Y2tldGVlci5lbnZpcm9ubWVudC5HZXRQcm9qZWN0UmVzcG9uc2UiABJpCgxMaXN0UHJvamVjdHMSKi5idWNrZXRlZXIuZW52aXJvbm1lbnQuTGlzdFByb2plY3RzUmVxdWVzdBorLmJ1Y2tldGVlci5lbnZpcm9ubWVudC5MaXN0UHJvamVjdHNSZXNwb25zZSIAEmwKDUNyZWF0ZVByb2plY3QSKy5idWNrZXRlZXIuZW52aXJvbm1lbnQuQ3JlYXRlUHJvamVjdFJlcXVlc3QaLC5idWNrZXRlZXIuZW52aXJvbm1lbnQuQ3JlYXRlUHJvamVjdFJlc3BvbnNlIgASewoSQ3JlYXRlVHJpYWxQcm9qZWN0EjAuYnVja2V0ZWVyLmVudmlyb25tZW50LkNyZWF0ZVRyaWFsUHJvamVjdFJlcXVlc3QaMS5idWNrZXRlZXIuZW52aXJvbm1lbnQuQ3JlYXRlVHJpYWxQcm9qZWN0UmVzcG9uc2UiABJsCg1VcGRhdGVQcm9qZWN0EisuYnVja2V0ZWVyLmVudmlyb25tZW50LlVwZGF0ZVByb2plY3RSZXF1ZXN0GiwuYnVja2V0ZWVyLmVudmlyb25tZW50LlVwZGF0ZVByb2plY3RSZXNwb25zZSIAEmwKDUVuYWJsZVByb2plY3QSKy5idWNrZXRlZXIuZW52aXJvbm1lbnQuRW5hYmxlUHJvamVjdFJlcXVlc3QaLC5idWNrZXRlZXIuZW52aXJvbm1lbnQuRW5hYmxlUHJvamVjdFJlc3BvbnNlIgASbwoORGlzYWJsZVByb2plY3QSLC5idWNrZXRlZXIuZW52aXJvbm1lbnQuRGlzYWJsZVByb2plY3RSZXF1ZXN0Gi0uYnVja2V0ZWVyLmVudmlyb25tZW50LkRpc2FibGVQcm9qZWN0UmVzcG9uc2UiABJ+ChNDb252ZXJ0VHJpYWxQcm9qZWN0EjEuYnVja2V0ZWVyLmVudmlyb25tZW50LkNvbnZlcnRUcmlhbFByb2plY3RSZXF1ZXN0GjIuYnVja2V0ZWVyLmVudmlyb25tZW50LkNvbnZlcnRUcmlhbFByb2plY3RSZXNwb25zZSIAQi5aLGdpdGh1Yi5jb20vY2EtZHAvYnVja2V0ZWVyL3Byb3RvL2Vudmlyb25tZW50YgZwcm90bzM=" + eventcounterDescriptor: "CoYECh5nb29nbGUvcHJvdG9idWYvd3JhcHBlcnMucHJvdG8SD2dvb2dsZS5wcm90b2J1ZiIjCgtEb3VibGVWYWx1ZRIUCgV2YWx1ZRgBIAEoAVIFdmFsdWUiIgoKRmxvYXRWYWx1ZRIUCgV2YWx1ZRgBIAEoAlIFdmFsdWUiIgoKSW50NjRWYWx1ZRIUCgV2YWx1ZRgBIAEoA1IFdmFsdWUiIwoLVUludDY0VmFsdWUSFAoFdmFsdWUYASABKARSBXZhbHVlIiIKCkludDMyVmFsdWUSFAoFdmFsdWUYASABKAVSBXZhbHVlIiMKC1VJbnQzMlZhbHVlEhQKBXZhbHVlGAEgASgNUgV2YWx1ZSIhCglCb29sVmFsdWUSFAoFdmFsdWUYASABKAhSBXZhbHVlIiMKC1N0cmluZ1ZhbHVlEhQKBXZhbHVlGAEgASgJUgV2YWx1ZSIiCgpCeXRlc1ZhbHVlEhQKBXZhbHVlGAEgASgMUgV2YWx1ZUKDAQoTY29tLmdvb2dsZS5wcm90b2J1ZkINV3JhcHBlcnNQcm90b1ABWjFnb29nbGUuZ29sYW5nLm9yZy9wcm90b2J1Zi90eXBlcy9rbm93bi93cmFwcGVyc3Bi+AEBogIDR1BCqgIeR29vZ2xlLlByb3RvYnVmLldlbGxLbm93blR5cGVzYgZwcm90bzMKygMKKHByb3RvL2V2ZW50Y291bnRlci92YXJpYXRpb25fY291bnQucHJvdG8SFmJ1Y2tldGVlci5ldmVudGNvdW50ZXIizAIKDlZhcmlhdGlvbkNvdW50EiEKDHZhcmlhdGlvbl9pZBgBIAEoCVILdmFyaWF0aW9uSWQSHQoKdXNlcl9jb3VudBgCIAEoA1IJdXNlckNvdW50Eh8KC2V2ZW50X2NvdW50GAMgASgDUgpldmVudENvdW50EhsKCXZhbHVlX3N1bRgEIAEoAVIIdmFsdWVTdW0SHQoKY3JlYXRlZF9hdBgFIAEoA1IJY3JlYXRlZEF0EicKD3ZhcmlhdGlvbl92YWx1ZRgGIAEoCVIOdmFyaWF0aW9uVmFsdWUSNAoXdmFsdWVfc3VtX3Blcl91c2VyX21lYW4YByABKAFSE3ZhbHVlU3VtUGVyVXNlck1lYW4SPAobdmFsdWVfc3VtX3Blcl91c2VyX3ZhcmlhbmNlGAggASgBUhd2YWx1ZVN1bVBlclVzZXJWYXJpYW5jZUIvWi1naXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9ldmVudGNvdW50ZXJiBnByb3RvMwrNAwopcHJvdG8vZXZlbnRjb3VudGVyL2V2YWx1YXRpb25fY291bnQucHJvdG8SFmJ1Y2tldGVlci5ldmVudGNvdW50ZXIaKHByb3RvL2V2ZW50Y291bnRlci92YXJpYXRpb25fY291bnQucHJvdG8ipAIKD0V2YWx1YXRpb25Db3VudBIOCgJpZBgBIAEoCVICaWQSHQoKZmVhdHVyZV9pZBgCIAEoCVIJZmVhdHVyZUlkEicKD2ZlYXR1cmVfdmVyc2lvbhgDIAEoBVIOZmVhdHVyZVZlcnNpb24STwoPcmVhbHRpbWVfY291bnRzGAQgAygLMiYuYnVja2V0ZWVyLmV2ZW50Y291bnRlci5WYXJpYXRpb25Db3VudFIOcmVhbHRpbWVDb3VudHMSSQoMYmF0Y2hfY291bnRzGAUgAygLMiYuYnVja2V0ZWVyLmV2ZW50Y291bnRlci5WYXJpYXRpb25Db3VudFILYmF0Y2hDb3VudHMSHQoKdXBkYXRlZF9hdBgGIAEoA1IJdXBkYXRlZEF0Qi9aLWdpdGh1Yi5jb20vY2EtZHAvYnVja2V0ZWVyL3Byb3RvL2V2ZW50Y291bnRlcmIGcHJvdG8zCv8FCilwcm90by9ldmVudGNvdW50ZXIvZXhwZXJpbWVudF9jb3VudC5wcm90bxIWYnVja2V0ZWVyLmV2ZW50Y291bnRlchoocHJvdG8vZXZlbnRjb3VudGVyL3ZhcmlhdGlvbl9jb3VudC5wcm90byKOAwoPRXhwZXJpbWVudENvdW50Eg4KAmlkGAEgASgJUgJpZBIdCgpmZWF0dXJlX2lkGAIgASgJUglmZWF0dXJlSWQSJwoPZmVhdHVyZV92ZXJzaW9uGAMgASgFUg5mZWF0dXJlVmVyc2lvbhIbCgdnb2FsX2lkGAQgASgJQgIYAVIGZ29hbElkElMKD3JlYWx0aW1lX2NvdW50cxgFIAMoCzImLmJ1Y2tldGVlci5ldmVudGNvdW50ZXIuVmFyaWF0aW9uQ291bnRCAhgBUg5yZWFsdGltZUNvdW50cxJNCgxiYXRjaF9jb3VudHMYBiADKAsyJi5idWNrZXRlZXIuZXZlbnRjb3VudGVyLlZhcmlhdGlvbkNvdW50QgIYAVILYmF0Y2hDb3VudHMSHQoKdXBkYXRlZF9hdBgHIAEoA1IJdXBkYXRlZEF0EkMKC2dvYWxfY291bnRzGAggAygLMiIuYnVja2V0ZWVyLmV2ZW50Y291bnRlci5Hb2FsQ291bnRzUgpnb2FsQ291bnRzIsUBCgpHb2FsQ291bnRzEhcKB2dvYWxfaWQYASABKAlSBmdvYWxJZBJPCg9yZWFsdGltZV9jb3VudHMYAiADKAsyJi5idWNrZXRlZXIuZXZlbnRjb3VudGVyLlZhcmlhdGlvbkNvdW50Ug5yZWFsdGltZUNvdW50cxJNCgxiYXRjaF9jb3VudHMYAyADKAsyJi5idWNrZXRlZXIuZXZlbnRjb3VudGVyLlZhcmlhdGlvbkNvdW50QgIYAVILYmF0Y2hDb3VudHNCL1otZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vZXZlbnRjb3VudGVyYgZwcm90bzMKqgEKInByb3RvL2V2ZW50Y291bnRlci9oaXN0b2dyYW0ucHJvdG8SFmJ1Y2tldGVlci5ldmVudGNvdW50ZXIiMwoJSGlzdG9ncmFtEhIKBGhpc3QYASADKANSBGhpc3QSEgoEYmlucxgCIAMoAVIEYmluc0IvWi1naXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9ldmVudGNvdW50ZXJiBnByb3RvMwqZAwotcHJvdG8vZXZlbnRjb3VudGVyL2Rpc3RyaWJ1dGlvbl9zdW1tYXJ5LnByb3RvEhZidWNrZXRlZXIuZXZlbnRjb3VudGVyGiJwcm90by9ldmVudGNvdW50ZXIvaGlzdG9ncmFtLnByb3RvIvIBChNEaXN0cmlidXRpb25TdW1tYXJ5EhIKBG1lYW4YASABKAFSBG1lYW4SDgoCc2QYAiABKAFSAnNkEhIKBHJoYXQYAyABKAFSBHJoYXQSPwoJaGlzdG9ncmFtGAQgASgLMiEuYnVja2V0ZWVyLmV2ZW50Y291bnRlci5IaXN0b2dyYW1SCWhpc3RvZ3JhbRIWCgZtZWRpYW4YBSABKAFSBm1lZGlhbhIkCg1wZXJjZW50aWxlMDI1GAYgASgBUg1wZXJjZW50aWxlMDI1EiQKDXBlcmNlbnRpbGU5NzUYByABKAFSDXBlcmNlbnRpbGU5NzVCL1otZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vZXZlbnRjb3VudGVyYgZwcm90bzMKugIKI3Byb3RvL2V2ZW50Y291bnRlci90aW1lc2VyaWVzLnByb3RvEhZidWNrZXRlZXIuZXZlbnRjb3VudGVyInwKE1ZhcmlhdGlvblRpbWVzZXJpZXMSIQoMdmFyaWF0aW9uX2lkGAEgASgJUgt2YXJpYXRpb25JZBJCCgp0aW1lc2VyaWVzGAIgASgLMiIuYnVja2V0ZWVyLmV2ZW50Y291bnRlci5UaW1lc2VyaWVzUgp0aW1lc2VyaWVzIkQKClRpbWVzZXJpZXMSHgoKdGltZXN0YW1wcxgBIAMoA1IKdGltZXN0YW1wcxIWCgZ2YWx1ZXMYAiADKAFSBnZhbHVlc0IvWi1naXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9ldmVudGNvdW50ZXJiBnByb3RvMwqpEwopcHJvdG8vZXZlbnRjb3VudGVyL3ZhcmlhdGlvbl9yZXN1bHQucHJvdG8SFmJ1Y2tldGVlci5ldmVudGNvdW50ZXIaKHByb3RvL2V2ZW50Y291bnRlci92YXJpYXRpb25fY291bnQucHJvdG8aLXByb3RvL2V2ZW50Y291bnRlci9kaXN0cmlidXRpb25fc3VtbWFyeS5wcm90bxojcHJvdG8vZXZlbnRjb3VudGVyL3RpbWVzZXJpZXMucHJvdG8irBEKD1ZhcmlhdGlvblJlc3VsdBIhCgx2YXJpYXRpb25faWQYASABKAlSC3ZhcmlhdGlvbklkElEKEGV4cGVyaW1lbnRfY291bnQYAiABKAsyJi5idWNrZXRlZXIuZXZlbnRjb3VudGVyLlZhcmlhdGlvbkNvdW50Ug9leHBlcmltZW50Q291bnQSUQoQZXZhbHVhdGlvbl9jb3VudBgDIAEoCzImLmJ1Y2tldGVlci5ldmVudGNvdW50ZXIuVmFyaWF0aW9uQ291bnRSD2V2YWx1YXRpb25Db3VudBJPCg1jdnJfcHJvYl9iZXN0GAQgASgLMisuYnVja2V0ZWVyLmV2ZW50Y291bnRlci5EaXN0cmlidXRpb25TdW1tYXJ5UgtjdnJQcm9iQmVzdBJgChZjdnJfcHJvYl9iZWF0X2Jhc2VsaW5lGAUgASgLMisuYnVja2V0ZWVyLmV2ZW50Y291bnRlci5EaXN0cmlidXRpb25TdW1tYXJ5UhNjdnJQcm9iQmVhdEJhc2VsaW5lEkYKCGN2cl9wcm9iGAYgASgLMisuYnVja2V0ZWVyLmV2ZW50Y291bnRlci5EaXN0cmlidXRpb25TdW1tYXJ5UgdjdnJQcm9iEmsKIGV2YWx1YXRpb25fdXNlcl9jb3VudF90aW1lc2VyaWVzGAcgASgLMiIuYnVja2V0ZWVyLmV2ZW50Y291bnRlci5UaW1lc2VyaWVzUh1ldmFsdWF0aW9uVXNlckNvdW50VGltZXNlcmllcxJtCiFldmFsdWF0aW9uX2V2ZW50X2NvdW50X3RpbWVzZXJpZXMYCCABKAsyIi5idWNrZXRlZXIuZXZlbnRjb3VudGVyLlRpbWVzZXJpZXNSHmV2YWx1YXRpb25FdmVudENvdW50VGltZXNlcmllcxJfChpnb2FsX3VzZXJfY291bnRfdGltZXNlcmllcxgJIAEoCzIiLmJ1Y2tldGVlci5ldmVudGNvdW50ZXIuVGltZXNlcmllc1IXZ29hbFVzZXJDb3VudFRpbWVzZXJpZXMSYQobZ29hbF9ldmVudF9jb3VudF90aW1lc2VyaWVzGAogASgLMiIuYnVja2V0ZWVyLmV2ZW50Y291bnRlci5UaW1lc2VyaWVzUhhnb2FsRXZlbnRDb3VudFRpbWVzZXJpZXMSXQoZZ29hbF92YWx1ZV9zdW1fdGltZXNlcmllcxgLIAEoCzIiLmJ1Y2tldGVlci5ldmVudGNvdW50ZXIuVGltZXNlcmllc1IWZ29hbFZhbHVlU3VtVGltZXNlcmllcxJWChVjdnJfbWVkaWFuX3RpbWVzZXJpZXMYDCABKAsyIi5idWNrZXRlZXIuZXZlbnRjb3VudGVyLlRpbWVzZXJpZXNSE2N2ck1lZGlhblRpbWVzZXJpZXMSZAocY3ZyX3BlcmNlbnRpbGUwMjVfdGltZXNlcmllcxgNIAEoCzIiLmJ1Y2tldGVlci5ldmVudGNvdW50ZXIuVGltZXNlcmllc1IaY3ZyUGVyY2VudGlsZTAyNVRpbWVzZXJpZXMSZAocY3ZyX3BlcmNlbnRpbGU5NzVfdGltZXNlcmllcxgOIAEoCzIiLmJ1Y2tldGVlci5ldmVudGNvdW50ZXIuVGltZXNlcmllc1IaY3ZyUGVyY2VudGlsZTk3NVRpbWVzZXJpZXMSSQoOY3ZyX3RpbWVzZXJpZXMYDyABKAsyIi5idWNrZXRlZXIuZXZlbnRjb3VudGVyLlRpbWVzZXJpZXNSDWN2clRpbWVzZXJpZXMSbQoiZ29hbF92YWx1ZV9zdW1fcGVyX3VzZXJfdGltZXNlcmllcxgQIAEoCzIiLmJ1Y2tldGVlci5ldmVudGNvdW50ZXIuVGltZXNlcmllc1IdZ29hbFZhbHVlU3VtUGVyVXNlclRpbWVzZXJpZXMSagocZ29hbF92YWx1ZV9zdW1fcGVyX3VzZXJfcHJvYhgRIAEoCzIrLmJ1Y2tldGVlci5ldmVudGNvdW50ZXIuRGlzdHJpYnV0aW9uU3VtbWFyeVIXZ29hbFZhbHVlU3VtUGVyVXNlclByb2IScwohZ29hbF92YWx1ZV9zdW1fcGVyX3VzZXJfcHJvYl9iZXN0GBIgASgLMisuYnVja2V0ZWVyLmV2ZW50Y291bnRlci5EaXN0cmlidXRpb25TdW1tYXJ5Uhtnb2FsVmFsdWVTdW1QZXJVc2VyUHJvYkJlc3QShAEKKmdvYWxfdmFsdWVfc3VtX3Blcl91c2VyX3Byb2JfYmVhdF9iYXNlbGluZRgTIAEoCzIrLmJ1Y2tldGVlci5ldmVudGNvdW50ZXIuRGlzdHJpYnV0aW9uU3VtbWFyeVIjZ29hbFZhbHVlU3VtUGVyVXNlclByb2JCZWF0QmFzZWxpbmUSegopZ29hbF92YWx1ZV9zdW1fcGVyX3VzZXJfbWVkaWFuX3RpbWVzZXJpZXMYFCABKAsyIi5idWNrZXRlZXIuZXZlbnRjb3VudGVyLlRpbWVzZXJpZXNSI2dvYWxWYWx1ZVN1bVBlclVzZXJNZWRpYW5UaW1lc2VyaWVzEogBCjBnb2FsX3ZhbHVlX3N1bV9wZXJfdXNlcl9wZXJjZW50aWxlMDI1X3RpbWVzZXJpZXMYFSABKAsyIi5idWNrZXRlZXIuZXZlbnRjb3VudGVyLlRpbWVzZXJpZXNSKmdvYWxWYWx1ZVN1bVBlclVzZXJQZXJjZW50aWxlMDI1VGltZXNlcmllcxKIAQowZ29hbF92YWx1ZV9zdW1fcGVyX3VzZXJfcGVyY2VudGlsZTk3NV90aW1lc2VyaWVzGBYgASgLMiIuYnVja2V0ZWVyLmV2ZW50Y291bnRlci5UaW1lc2VyaWVzUipnb2FsVmFsdWVTdW1QZXJVc2VyUGVyY2VudGlsZTk3NVRpbWVzZXJpZXNCL1otZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vZXZlbnRjb3VudGVyYgZwcm90bzMKnwIKJHByb3RvL2V2ZW50Y291bnRlci9nb2FsX3Jlc3VsdC5wcm90bxIWYnVja2V0ZWVyLmV2ZW50Y291bnRlchopcHJvdG8vZXZlbnRjb3VudGVyL3ZhcmlhdGlvbl9yZXN1bHQucHJvdG8iewoKR29hbFJlc3VsdBIXCgdnb2FsX2lkGAEgASgJUgZnb2FsSWQSVAoRdmFyaWF0aW9uX3Jlc3VsdHMYAiADKAsyJy5idWNrZXRlZXIuZXZlbnRjb3VudGVyLlZhcmlhdGlvblJlc3VsdFIQdmFyaWF0aW9uUmVzdWx0c0IvWi1naXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9ldmVudGNvdW50ZXJiBnByb3RvMwrTAgoqcHJvdG8vZXZlbnRjb3VudGVyL2V4cGVyaW1lbnRfcmVzdWx0LnByb3RvEhZidWNrZXRlZXIuZXZlbnRjb3VudGVyGiRwcm90by9ldmVudGNvdW50ZXIvZ29hbF9yZXN1bHQucHJvdG8irQEKEEV4cGVyaW1lbnRSZXN1bHQSDgoCaWQYASABKAlSAmlkEiMKDWV4cGVyaW1lbnRfaWQYAiABKAlSDGV4cGVyaW1lbnRJZBIdCgp1cGRhdGVkX2F0GAMgASgDUgl1cGRhdGVkQXQSRQoMZ29hbF9yZXN1bHRzGAQgAygLMiIuYnVja2V0ZWVyLmV2ZW50Y291bnRlci5Hb2FsUmVzdWx0Ugtnb2FsUmVzdWx0c0IvWi1naXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9ldmVudGNvdW50ZXJiBnByb3RvMwqEAgofcHJvdG8vZXZlbnRjb3VudGVyL2ZpbHRlci5wcm90bxIWYnVja2V0ZWVyLmV2ZW50Y291bnRlciKPAQoGRmlsdGVyEhAKA2tleRgBIAEoCVIDa2V5EkMKCG9wZXJhdG9yGAIgASgOMicuYnVja2V0ZWVyLmV2ZW50Y291bnRlci5GaWx0ZXIuT3BlcmF0b3JSCG9wZXJhdG9yEhYKBnZhbHVlcxgDIAMoCVIGdmFsdWVzIhYKCE9wZXJhdG9yEgoKBkVRVUFMUxAAQi9aLWdpdGh1Yi5jb20vY2EtZHAvYnVja2V0ZWVyL3Byb3RvL2V2ZW50Y291bnRlcmIGcHJvdG8zCsQCCh5wcm90by9ldmVudGNvdW50ZXIvdGFibGUucHJvdG8SFmJ1Y2tldGVlci5ldmVudGNvdW50ZXIiOQoDUm93EjIKBWNlbGxzGAEgAygLMhwuYnVja2V0ZWVyLmV2ZW50Y291bnRlci5DZWxsUgVjZWxscyKVAQoEQ2VsbBI1CgR0eXBlGAEgASgOMiEuYnVja2V0ZWVyLmV2ZW50Y291bnRlci5DZWxsLlR5cGVSBHR5cGUSFAoFdmFsdWUYAiABKAlSBXZhbHVlEiAKC3ZhbHVlRG91YmxlGAQgASgBUgt2YWx1ZURvdWJsZSIeCgRUeXBlEgoKBlNUUklORxAAEgoKBkRPVUJMRRACQi9aLWdpdGh1Yi5jb20vY2EtZHAvYnVja2V0ZWVyL3Byb3RvL2V2ZW50Y291bnRlcmIGcHJvdG8zCtsgCiBwcm90by9ldmVudGNvdW50ZXIvc2VydmljZS5wcm90bxIWYnVja2V0ZWVyLmV2ZW50Y291bnRlchoeZ29vZ2xlL3Byb3RvYnVmL3dyYXBwZXJzLnByb3RvGilwcm90by9ldmVudGNvdW50ZXIvZXZhbHVhdGlvbl9jb3VudC5wcm90bxopcHJvdG8vZXZlbnRjb3VudGVyL2V4cGVyaW1lbnRfY291bnQucHJvdG8aKnByb3RvL2V2ZW50Y291bnRlci9leHBlcmltZW50X3Jlc3VsdC5wcm90bxofcHJvdG8vZXZlbnRjb3VudGVyL2ZpbHRlci5wcm90bxoecHJvdG8vZXZlbnRjb3VudGVyL3RhYmxlLnByb3RvGiNwcm90by9ldmVudGNvdW50ZXIvdGltZXNlcmllcy5wcm90byLxAQobR2V0RXZhbHVhdGlvbkNvdW50VjJSZXF1ZXN0EjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgBIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2USGQoIc3RhcnRfYXQYAiABKANSB3N0YXJ0QXQSFQoGZW5kX2F0GAMgASgDUgVlbmRBdBIdCgpmZWF0dXJlX2lkGAQgASgJUglmZWF0dXJlSWQSJwoPZmVhdHVyZV92ZXJzaW9uGAUgASgFUg5mZWF0dXJlVmVyc2lvbhIjCg12YXJpYXRpb25faWRzGAYgAygJUgx2YXJpYXRpb25JZHMiXQocR2V0RXZhbHVhdGlvbkNvdW50VjJSZXNwb25zZRI9CgVjb3VudBgBIAEoCzInLmJ1Y2tldGVlci5ldmVudGNvdW50ZXIuRXZhbHVhdGlvbkNvdW50UgVjb3VudCJ5CiNHZXRFdmFsdWF0aW9uVGltZXNlcmllc0NvdW50UmVxdWVzdBIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYASABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlEh0KCmZlYXR1cmVfaWQYAiABKAlSCWZlYXR1cmVJZCLEAQokR2V0RXZhbHVhdGlvblRpbWVzZXJpZXNDb3VudFJlc3BvbnNlEkwKC3VzZXJfY291bnRzGAEgAygLMisuYnVja2V0ZWVyLmV2ZW50Y291bnRlci5WYXJpYXRpb25UaW1lc2VyaWVzUgp1c2VyQ291bnRzEk4KDGV2ZW50X2NvdW50cxgCIAMoCzIrLmJ1Y2tldGVlci5ldmVudGNvdW50ZXIuVmFyaWF0aW9uVGltZXNlcmllc1ILZXZlbnRDb3VudHMidgoaR2V0RXhwZXJpbWVudFJlc3VsdFJlcXVlc3QSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAEgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZRIjCg1leHBlcmltZW50X2lkGAIgASgJUgxleHBlcmltZW50SWQidAobR2V0RXhwZXJpbWVudFJlc3VsdFJlc3BvbnNlElUKEWV4cGVyaW1lbnRfcmVzdWx0GAEgASgLMiguYnVja2V0ZWVyLmV2ZW50Y291bnRlci5FeHBlcmltZW50UmVzdWx0UhBleHBlcmltZW50UmVzdWx0IrgBChxMaXN0RXhwZXJpbWVudFJlc3VsdHNSZXF1ZXN0Eh0KCmZlYXR1cmVfaWQYASABKAlSCWZlYXR1cmVJZBJECg9mZWF0dXJlX3ZlcnNpb24YAiABKAsyGy5nb29nbGUucHJvdG9idWYuSW50MzJWYWx1ZVIOZmVhdHVyZVZlcnNpb24SMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAMgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZSLjAQodTGlzdEV4cGVyaW1lbnRSZXN1bHRzUmVzcG9uc2USXAoHcmVzdWx0cxgBIAMoCzJCLmJ1Y2tldGVlci5ldmVudGNvdW50ZXIuTGlzdEV4cGVyaW1lbnRSZXN1bHRzUmVzcG9uc2UuUmVzdWx0c0VudHJ5UgdyZXN1bHRzGmQKDFJlc3VsdHNFbnRyeRIQCgNrZXkYASABKAlSA2tleRI+CgV2YWx1ZRgCIAEoCzIoLmJ1Y2tldGVlci5ldmVudGNvdW50ZXIuRXhwZXJpbWVudFJlc3VsdFIFdmFsdWU6AjgBIk4KF0xpc3RVc2VyRGF0YUtleXNSZXF1ZXN0EjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgBIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2UiLgoYTGlzdFVzZXJEYXRhS2V5c1Jlc3BvbnNlEhIKBGtleXMYASADKAlSBGtleXMiYgoZTGlzdFVzZXJEYXRhVmFsdWVzUmVxdWVzdBIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYASABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlEhAKA2tleRgCIAEoCVIDa2V5IjQKGkxpc3RVc2VyRGF0YVZhbHVlc1Jlc3BvbnNlEhYKBnZhbHVlcxgBIAMoCVIGdmFsdWVzIssCChNHZXRHb2FsQ291bnRSZXF1ZXN0EjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgBIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2USHQoKZmVhdHVyZV9pZBgCIAEoCVIJZmVhdHVyZUlkEicKD2ZlYXR1cmVfdmVyc2lvbhgDIAEoBVIOZmVhdHVyZVZlcnNpb24SFwoHZ29hbF9pZBgEIAEoCVIGZ29hbElkEhkKCHN0YXJ0X2F0GAUgASgDUgdzdGFydEF0EhUKBmVuZF9hdBgGIAEoA1IFZW5kQXQSOAoHZmlsdGVycxgHIAMoCzIeLmJ1Y2tldGVlci5ldmVudGNvdW50ZXIuRmlsdGVyUgdmaWx0ZXJzEhoKCHNlZ21lbnRzGAggAygJUghzZWdtZW50cxIWCgZyZWFzb24YCSABKAlSBnJlYXNvbiJ+ChRHZXRHb2FsQ291bnRSZXNwb25zZRI1CgdoZWFkZXJzGAEgASgLMhsuYnVja2V0ZWVyLmV2ZW50Y291bnRlci5Sb3dSB2hlYWRlcnMSLwoEcm93cxgCIAMoCzIbLmJ1Y2tldGVlci5ldmVudGNvdW50ZXIuUm93UgRyb3dzIoQCChVHZXRHb2FsQ291bnRWMlJlcXVlc3QSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAEgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZRIZCghzdGFydF9hdBgCIAEoA1IHc3RhcnRBdBIVCgZlbmRfYXQYAyABKANSBWVuZEF0EhcKB2dvYWxfaWQYBCABKAlSBmdvYWxJZBIdCgpmZWF0dXJlX2lkGAUgASgJUglmZWF0dXJlSWQSJwoPZmVhdHVyZV92ZXJzaW9uGAYgASgFUg5mZWF0dXJlVmVyc2lvbhIjCg12YXJpYXRpb25faWRzGAcgAygJUgx2YXJpYXRpb25JZHMiXQoWR2V0R29hbENvdW50VjJSZXNwb25zZRJDCgtnb2FsX2NvdW50cxgBIAEoCzIiLmJ1Y2tldGVlci5ldmVudGNvdW50ZXIuR29hbENvdW50c1IKZ29hbENvdW50cyJ+ChVHZXRVc2VyQ291bnRWMlJlcXVlc3QSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAEgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZRIZCghzdGFydF9hdBgCIAEoA1IHc3RhcnRBdBIVCgZlbmRfYXQYAyABKANSBWVuZEF0IlgKFkdldFVzZXJDb3VudFYyUmVzcG9uc2USHwoLZXZlbnRfY291bnQYASABKANSCmV2ZW50Q291bnQSHQoKdXNlcl9jb3VudBgCIAEoA1IJdXNlckNvdW50Ik4KF0xpc3RVc2VyTWV0YWRhdGFSZXF1ZXN0EjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgBIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2UiLgoYTGlzdFVzZXJNZXRhZGF0YVJlc3BvbnNlEhIKBGRhdGEYASADKAlSBGRhdGEykQgKE0V2ZW50Q291bnRlclNlcnZpY2USgwEKFEdldEV2YWx1YXRpb25Db3VudFYyEjMuYnVja2V0ZWVyLmV2ZW50Y291bnRlci5HZXRFdmFsdWF0aW9uQ291bnRWMlJlcXVlc3QaNC5idWNrZXRlZXIuZXZlbnRjb3VudGVyLkdldEV2YWx1YXRpb25Db3VudFYyUmVzcG9uc2UiABKbAQocR2V0RXZhbHVhdGlvblRpbWVzZXJpZXNDb3VudBI7LmJ1Y2tldGVlci5ldmVudGNvdW50ZXIuR2V0RXZhbHVhdGlvblRpbWVzZXJpZXNDb3VudFJlcXVlc3QaPC5idWNrZXRlZXIuZXZlbnRjb3VudGVyLkdldEV2YWx1YXRpb25UaW1lc2VyaWVzQ291bnRSZXNwb25zZSIAEoABChNHZXRFeHBlcmltZW50UmVzdWx0EjIuYnVja2V0ZWVyLmV2ZW50Y291bnRlci5HZXRFeHBlcmltZW50UmVzdWx0UmVxdWVzdBozLmJ1Y2tldGVlci5ldmVudGNvdW50ZXIuR2V0RXhwZXJpbWVudFJlc3VsdFJlc3BvbnNlIgAShgEKFUxpc3RFeHBlcmltZW50UmVzdWx0cxI0LmJ1Y2tldGVlci5ldmVudGNvdW50ZXIuTGlzdEV4cGVyaW1lbnRSZXN1bHRzUmVxdWVzdBo1LmJ1Y2tldGVlci5ldmVudGNvdW50ZXIuTGlzdEV4cGVyaW1lbnRSZXN1bHRzUmVzcG9uc2UiABJrCgxHZXRHb2FsQ291bnQSKy5idWNrZXRlZXIuZXZlbnRjb3VudGVyLkdldEdvYWxDb3VudFJlcXVlc3QaLC5idWNrZXRlZXIuZXZlbnRjb3VudGVyLkdldEdvYWxDb3VudFJlc3BvbnNlIgAScQoOR2V0R29hbENvdW50VjISLS5idWNrZXRlZXIuZXZlbnRjb3VudGVyLkdldEdvYWxDb3VudFYyUmVxdWVzdBouLmJ1Y2tldGVlci5ldmVudGNvdW50ZXIuR2V0R29hbENvdW50VjJSZXNwb25zZSIAEnEKDkdldFVzZXJDb3VudFYyEi0uYnVja2V0ZWVyLmV2ZW50Y291bnRlci5HZXRVc2VyQ291bnRWMlJlcXVlc3QaLi5idWNrZXRlZXIuZXZlbnRjb3VudGVyLkdldFVzZXJDb3VudFYyUmVzcG9uc2UiABJ3ChBMaXN0VXNlck1ldGFkYXRhEi8uYnVja2V0ZWVyLmV2ZW50Y291bnRlci5MaXN0VXNlck1ldGFkYXRhUmVxdWVzdBowLmJ1Y2tldGVlci5ldmVudGNvdW50ZXIuTGlzdFVzZXJNZXRhZGF0YVJlc3BvbnNlIgBCL1otZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vZXZlbnRjb3VudGVyYgZwcm90bzM=" + experimentDescriptor: "CoYECh5nb29nbGUvcHJvdG9idWYvd3JhcHBlcnMucHJvdG8SD2dvb2dsZS5wcm90b2J1ZiIjCgtEb3VibGVWYWx1ZRIUCgV2YWx1ZRgBIAEoAVIFdmFsdWUiIgoKRmxvYXRWYWx1ZRIUCgV2YWx1ZRgBIAEoAlIFdmFsdWUiIgoKSW50NjRWYWx1ZRIUCgV2YWx1ZRgBIAEoA1IFdmFsdWUiIwoLVUludDY0VmFsdWUSFAoFdmFsdWUYASABKARSBXZhbHVlIiIKCkludDMyVmFsdWUSFAoFdmFsdWUYASABKAVSBXZhbHVlIiMKC1VJbnQzMlZhbHVlEhQKBXZhbHVlGAEgASgNUgV2YWx1ZSIhCglCb29sVmFsdWUSFAoFdmFsdWUYASABKAhSBXZhbHVlIiMKC1N0cmluZ1ZhbHVlEhQKBXZhbHVlGAEgASgJUgV2YWx1ZSIiCgpCeXRlc1ZhbHVlEhQKBXZhbHVlGAEgASgMUgV2YWx1ZUKDAQoTY29tLmdvb2dsZS5wcm90b2J1ZkINV3JhcHBlcnNQcm90b1ABWjFnb29nbGUuZ29sYW5nLm9yZy9wcm90b2J1Zi90eXBlcy9rbm93bi93cmFwcGVyc3Bi+AEBogIDR1BCqgIeR29vZ2xlLlByb3RvYnVmLldlbGxLbm93blR5cGVzYgZwcm90bzMKpQcKHnByb3RvL2V4cGVyaW1lbnQvY29tbWFuZC5wcm90bxIUYnVja2V0ZWVyLmV4cGVyaW1lbnQiWQoRQ3JlYXRlR29hbENvbW1hbmQSDgoCaWQYASABKAlSAmlkEhIKBG5hbWUYAiABKAlSBG5hbWUSIAoLZGVzY3JpcHRpb24YAyABKAlSC2Rlc2NyaXB0aW9uIicKEVJlbmFtZUdvYWxDb21tYW5kEhIKBG5hbWUYASABKAlSBG5hbWUiQAocQ2hhbmdlRGVzY3JpcHRpb25Hb2FsQ29tbWFuZBIgCgtkZXNjcmlwdGlvbhgBIAEoCVILZGVzY3JpcHRpb24iFAoSQXJjaGl2ZUdvYWxDb21tYW5kIhMKEURlbGV0ZUdvYWxDb21tYW5kIu8BChdDcmVhdGVFeHBlcmltZW50Q29tbWFuZBIdCgpmZWF0dXJlX2lkGAEgASgJUglmZWF0dXJlSWQSGQoIc3RhcnRfYXQYAyABKANSB3N0YXJ0QXQSFwoHc3RvcF9hdBgEIAEoA1IGc3RvcEF0EhkKCGdvYWxfaWRzGAUgAygJUgdnb2FsSWRzEhIKBG5hbWUYBiABKAlSBG5hbWUSIAoLZGVzY3JpcHRpb24YByABKAlSC2Rlc2NyaXB0aW9uEioKEWJhc2VfdmFyaWF0aW9uX2lkGAggASgJUg9iYXNlVmFyaWF0aW9uSWRKBAgCEAMiUwodQ2hhbmdlRXhwZXJpbWVudFBlcmlvZENvbW1hbmQSGQoIc3RhcnRfYXQYASABKANSB3N0YXJ0QXQSFwoHc3RvcF9hdBgCIAEoA1IGc3RvcEF0IjEKG0NoYW5nZUV4cGVyaW1lbnROYW1lQ29tbWFuZBISCgRuYW1lGAEgASgJUgRuYW1lIkYKIkNoYW5nZUV4cGVyaW1lbnREZXNjcmlwdGlvbkNvbW1hbmQSIAoLZGVzY3JpcHRpb24YASABKAlSC2Rlc2NyaXB0aW9uIhcKFVN0b3BFeHBlcmltZW50Q29tbWFuZCIaChhBcmNoaXZlRXhwZXJpbWVudENvbW1hbmQiGQoXRGVsZXRlRXhwZXJpbWVudENvbW1hbmQiGAoWU3RhcnRFeHBlcmltZW50Q29tbWFuZCIZChdGaW5pc2hFeHBlcmltZW50Q29tbWFuZEItWitnaXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9leHBlcmltZW50YgZwcm90bzMK1gIKG3Byb3RvL2V4cGVyaW1lbnQvZ29hbC5wcm90bxIUYnVja2V0ZWVyLmV4cGVyaW1lbnQi6QEKBEdvYWwSDgoCaWQYASABKAlSAmlkEhIKBG5hbWUYAiABKAlSBG5hbWUSIAoLZGVzY3JpcHRpb24YAyABKAlSC2Rlc2NyaXB0aW9uEhgKB2RlbGV0ZWQYBCABKAhSB2RlbGV0ZWQSHQoKY3JlYXRlZF9hdBgFIAEoA1IJY3JlYXRlZEF0Eh0KCnVwZGF0ZWRfYXQYBiABKANSCXVwZGF0ZWRBdBInChBpc19pbl91c2Vfc3RhdHVzGAcgASgIUg1pc0luVXNlU3RhdHVzEhoKCGFyY2hpdmVkGAggASgIUghhcmNoaXZlZEItWitnaXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9leHBlcmltZW50YgZwcm90bzMKzwEKHXByb3RvL2ZlYXR1cmUvdmFyaWF0aW9uLnByb3RvEhFidWNrZXRlZXIuZmVhdHVyZSJnCglWYXJpYXRpb24SDgoCaWQYASABKAlSAmlkEhQKBXZhbHVlGAIgASgJUgV2YWx1ZRISCgRuYW1lGAMgASgJUgRuYW1lEiAKC2Rlc2NyaXB0aW9uGAQgASgJUgtkZXNjcmlwdGlvbkIqWihnaXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9mZWF0dXJlYgZwcm90bzMKtQcKIXByb3RvL2V4cGVyaW1lbnQvZXhwZXJpbWVudC5wcm90bxIUYnVja2V0ZWVyLmV4cGVyaW1lbnQaHXByb3RvL2ZlYXR1cmUvdmFyaWF0aW9uLnByb3RvItAFCgpFeHBlcmltZW50Eg4KAmlkGAEgASgJUgJpZBIbCgdnb2FsX2lkGAIgASgJQgIYAVIGZ29hbElkEh0KCmZlYXR1cmVfaWQYAyABKAlSCWZlYXR1cmVJZBInCg9mZWF0dXJlX3ZlcnNpb24YBCABKAVSDmZlYXR1cmVWZXJzaW9uEjwKCnZhcmlhdGlvbnMYBSADKAsyHC5idWNrZXRlZXIuZmVhdHVyZS5WYXJpYXRpb25SCnZhcmlhdGlvbnMSGQoIc3RhcnRfYXQYBiABKANSB3N0YXJ0QXQSFwoHc3RvcF9hdBgHIAEoA1IGc3RvcEF0EhwKB3N0b3BwZWQYCCABKAhCAhgBUgdzdG9wcGVkEiEKCnN0b3BwZWRfYXQYCSABKANCAjABUglzdG9wcGVkQXQSHQoKY3JlYXRlZF9hdBgKIAEoA1IJY3JlYXRlZEF0Eh0KCnVwZGF0ZWRfYXQYCyABKANSCXVwZGF0ZWRBdBIYCgdkZWxldGVkGAwgASgIUgdkZWxldGVkEhkKCGdvYWxfaWRzGA0gAygJUgdnb2FsSWRzEhIKBG5hbWUYDiABKAlSBG5hbWUSIAoLZGVzY3JpcHRpb24YDyABKAlSC2Rlc2NyaXB0aW9uEioKEWJhc2VfdmFyaWF0aW9uX2lkGBAgASgJUg9iYXNlVmFyaWF0aW9uSWQSPwoGc3RhdHVzGBIgASgOMicuYnVja2V0ZWVyLmV4cGVyaW1lbnQuRXhwZXJpbWVudC5TdGF0dXNSBnN0YXR1cxIeCgptYWludGFpbmVyGBMgASgJUgptYWludGFpbmVyEhoKCGFyY2hpdmVkGBQgASgIUghhcmNoaXZlZCJCCgZTdGF0dXMSCwoHV0FJVElORxAAEgsKB1JVTk5JTkcQARILCgdTVE9QUEVEEAISEQoNRk9SQ0VfU1RPUFBFRBADSgQIERASIlEKC0V4cGVyaW1lbnRzEkIKC2V4cGVyaW1lbnRzGAEgAygLMiAuYnVja2V0ZWVyLmV4cGVyaW1lbnQuRXhwZXJpbWVudFILZXhwZXJpbWVudHNCLVorZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vZXhwZXJpbWVudGIGcHJvdG8zCpAyCh5wcm90by9leHBlcmltZW50L3NlcnZpY2UucHJvdG8SFGJ1Y2tldGVlci5leHBlcmltZW50Gh5nb29nbGUvcHJvdG9idWYvd3JhcHBlcnMucHJvdG8aHnByb3RvL2V4cGVyaW1lbnQvY29tbWFuZC5wcm90bxobcHJvdG8vZXhwZXJpbWVudC9nb2FsLnByb3RvGiFwcm90by9leHBlcmltZW50L2V4cGVyaW1lbnQucHJvdG8iVQoOR2V0R29hbFJlcXVlc3QSDgoCaWQYASABKAlSAmlkEjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgCIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2UiQQoPR2V0R29hbFJlc3BvbnNlEi4KBGdvYWwYASABKAsyGi5idWNrZXRlZXIuZXhwZXJpbWVudC5Hb2FsUgRnb2FsIrIEChBMaXN0R29hbHNSZXF1ZXN0EhsKCXBhZ2Vfc2l6ZRgBIAEoA1IIcGFnZVNpemUSFgoGY3Vyc29yGAIgASgJUgZjdXJzb3ISMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAMgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZRJJCghvcmRlcl9ieRgEIAEoDjIuLmJ1Y2tldGVlci5leHBlcmltZW50Lkxpc3RHb2Fsc1JlcXVlc3QuT3JkZXJCeVIHb3JkZXJCeRJeCg9vcmRlcl9kaXJlY3Rpb24YBSABKA4yNS5idWNrZXRlZXIuZXhwZXJpbWVudC5MaXN0R29hbHNSZXF1ZXN0Lk9yZGVyRGlyZWN0aW9uUg5vcmRlckRpcmVjdGlvbhIlCg5zZWFyY2hfa2V5d29yZBgGIAEoCVINc2VhcmNoS2V5d29yZBJDChBpc19pbl91c2Vfc3RhdHVzGAcgASgLMhouZ29vZ2xlLnByb3RvYnVmLkJvb2xWYWx1ZVINaXNJblVzZVN0YXR1cxI2CghhcmNoaXZlZBgIIAEoCzIaLmdvb2dsZS5wcm90b2J1Zi5Cb29sVmFsdWVSCGFyY2hpdmVkIkAKB09yZGVyQnkSCwoHREVGQVVMVBAAEggKBE5BTUUQARIOCgpDUkVBVEVEX0FUEAISDgoKVVBEQVRFRF9BVBADIiMKDk9yZGVyRGlyZWN0aW9uEgcKA0FTQxAAEggKBERFU0MQASJ+ChFMaXN0R29hbHNSZXNwb25zZRIwCgVnb2FscxgBIAMoCzIaLmJ1Y2tldGVlci5leHBlcmltZW50LkdvYWxSBWdvYWxzEhYKBmN1cnNvchgCIAEoCVIGY3Vyc29yEh8KC3RvdGFsX2NvdW50GAMgASgDUgp0b3RhbENvdW50IosBChFDcmVhdGVHb2FsUmVxdWVzdBJBCgdjb21tYW5kGAEgASgLMicuYnVja2V0ZWVyLmV4cGVyaW1lbnQuQ3JlYXRlR29hbENvbW1hbmRSB2NvbW1hbmQSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAIgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZSIUChJDcmVhdGVHb2FsUmVzcG9uc2UinQEKEkFyY2hpdmVHb2FsUmVxdWVzdBIOCgJpZBgBIAEoCVICaWQSQgoHY29tbWFuZBgCIAEoCzIoLmJ1Y2tldGVlci5leHBlcmltZW50LkFyY2hpdmVHb2FsQ29tbWFuZFIHY29tbWFuZBIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYAyABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlIhUKE0FyY2hpdmVHb2FsUmVzcG9uc2UimwEKEURlbGV0ZUdvYWxSZXF1ZXN0Eg4KAmlkGAEgASgJUgJpZBJBCgdjb21tYW5kGAIgASgLMicuYnVja2V0ZWVyLmV4cGVyaW1lbnQuRGVsZXRlR29hbENvbW1hbmRSB2NvbW1hbmQSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAMgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZSIUChJEZWxldGVHb2FsUmVzcG9uc2UimgIKEVVwZGF0ZUdvYWxSZXF1ZXN0Eg4KAmlkGAEgASgJUgJpZBJOCg5yZW5hbWVfY29tbWFuZBgCIAEoCzInLmJ1Y2tldGVlci5leHBlcmltZW50LlJlbmFtZUdvYWxDb21tYW5kUg1yZW5hbWVDb21tYW5kEnAKGmNoYW5nZV9kZXNjcmlwdGlvbl9jb21tYW5kGAMgASgLMjIuYnVja2V0ZWVyLmV4cGVyaW1lbnQuQ2hhbmdlRGVzY3JpcHRpb25Hb2FsQ29tbWFuZFIYY2hhbmdlRGVzY3JpcHRpb25Db21tYW5kEjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgEIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2UiFAoSVXBkYXRlR29hbFJlc3BvbnNlIlsKFEdldEV4cGVyaW1lbnRSZXF1ZXN0Eg4KAmlkGAEgASgJUgJpZBIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYAiABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlIlkKFUdldEV4cGVyaW1lbnRSZXNwb25zZRJACgpleHBlcmltZW50GAEgASgLMiAuYnVja2V0ZWVyLmV4cGVyaW1lbnQuRXhwZXJpbWVudFIKZXhwZXJpbWVudCKiBgoWTGlzdEV4cGVyaW1lbnRzUmVxdWVzdBIdCgpmZWF0dXJlX2lkGAEgASgJUglmZWF0dXJlSWQSRAoPZmVhdHVyZV92ZXJzaW9uGAIgASgLMhsuZ29vZ2xlLnByb3RvYnVmLkludDMyVmFsdWVSDmZlYXR1cmVWZXJzaW9uEhIKBGZyb20YAyABKANSBGZyb20SDgoCdG8YBCABKANSAnRvEhsKCXBhZ2Vfc2l6ZRgFIAEoA1IIcGFnZVNpemUSFgoGY3Vyc29yGAYgASgJUgZjdXJzb3ISMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAcgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZRIzCgZzdGF0dXMYCCABKAsyGy5nb29nbGUucHJvdG9idWYuSW50MzJWYWx1ZVIGc3RhdHVzEh4KCm1haW50YWluZXIYCSABKAlSCm1haW50YWluZXISTwoIb3JkZXJfYnkYCiABKA4yNC5idWNrZXRlZXIuZXhwZXJpbWVudC5MaXN0RXhwZXJpbWVudHNSZXF1ZXN0Lk9yZGVyQnlSB29yZGVyQnkSZAoPb3JkZXJfZGlyZWN0aW9uGAsgASgOMjsuYnVja2V0ZWVyLmV4cGVyaW1lbnQuTGlzdEV4cGVyaW1lbnRzUmVxdWVzdC5PcmRlckRpcmVjdGlvblIOb3JkZXJEaXJlY3Rpb24SJQoOc2VhcmNoX2tleXdvcmQYDCABKAlSDXNlYXJjaEtleXdvcmQSNgoIYXJjaGl2ZWQYDSABKAsyGi5nb29nbGUucHJvdG9idWYuQm9vbFZhbHVlUghhcmNoaXZlZBJDCghzdGF0dXNlcxgOIAMoDjInLmJ1Y2tldGVlci5leHBlcmltZW50LkV4cGVyaW1lbnQuU3RhdHVzUghzdGF0dXNlcyJACgdPcmRlckJ5EgsKB0RFRkFVTFQQABIICgROQU1FEAESDgoKQ1JFQVRFRF9BVBACEg4KClVQREFURURfQVQQAyIjCg5PcmRlckRpcmVjdGlvbhIHCgNBU0MQABIICgRERVNDEAEilgEKF0xpc3RFeHBlcmltZW50c1Jlc3BvbnNlEkIKC2V4cGVyaW1lbnRzGAEgAygLMiAuYnVja2V0ZWVyLmV4cGVyaW1lbnQuRXhwZXJpbWVudFILZXhwZXJpbWVudHMSFgoGY3Vyc29yGAIgASgJUgZjdXJzb3ISHwoLdG90YWxfY291bnQYAyABKANSCnRvdGFsQ291bnQilwEKF0NyZWF0ZUV4cGVyaW1lbnRSZXF1ZXN0EkcKB2NvbW1hbmQYASABKAsyLS5idWNrZXRlZXIuZXhwZXJpbWVudC5DcmVhdGVFeHBlcmltZW50Q29tbWFuZFIHY29tbWFuZBIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYAiABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlIlwKGENyZWF0ZUV4cGVyaW1lbnRSZXNwb25zZRJACgpleHBlcmltZW50GAEgASgLMiAuYnVja2V0ZWVyLmV4cGVyaW1lbnQuRXhwZXJpbWVudFIKZXhwZXJpbWVudCLDAwoXVXBkYXRlRXhwZXJpbWVudFJlcXVlc3QSDgoCaWQYASABKAlSAmlkEjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgEIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2USfAogY2hhbmdlX2V4cGVyaW1lbnRfcGVyaW9kX2NvbW1hbmQYBSABKAsyMy5idWNrZXRlZXIuZXhwZXJpbWVudC5DaGFuZ2VFeHBlcmltZW50UGVyaW9kQ29tbWFuZFIdY2hhbmdlRXhwZXJpbWVudFBlcmlvZENvbW1hbmQSYQoTY2hhbmdlX25hbWVfY29tbWFuZBgGIAEoCzIxLmJ1Y2tldGVlci5leHBlcmltZW50LkNoYW5nZUV4cGVyaW1lbnROYW1lQ29tbWFuZFIRY2hhbmdlTmFtZUNvbW1hbmQSdgoaY2hhbmdlX2Rlc2NyaXB0aW9uX2NvbW1hbmQYByABKAsyOC5idWNrZXRlZXIuZXhwZXJpbWVudC5DaGFuZ2VFeHBlcmltZW50RGVzY3JpcHRpb25Db21tYW5kUhhjaGFuZ2VEZXNjcmlwdGlvbkNvbW1hbmRKBAgCEANKBAgDEAQiGgoYVXBkYXRlRXhwZXJpbWVudFJlc3BvbnNlIqUBChZTdGFydEV4cGVyaW1lbnRSZXF1ZXN0EjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgBIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2USDgoCaWQYAiABKAlSAmlkEkYKB2NvbW1hbmQYAyABKAsyLC5idWNrZXRlZXIuZXhwZXJpbWVudC5TdGFydEV4cGVyaW1lbnRDb21tYW5kUgdjb21tYW5kIhkKF1N0YXJ0RXhwZXJpbWVudFJlc3BvbnNlIqcBChdGaW5pc2hFeHBlcmltZW50UmVxdWVzdBIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYASABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlEg4KAmlkGAIgASgJUgJpZBJHCgdjb21tYW5kGAMgASgLMi0uYnVja2V0ZWVyLmV4cGVyaW1lbnQuRmluaXNoRXhwZXJpbWVudENvbW1hbmRSB2NvbW1hbmQiGgoYRmluaXNoRXhwZXJpbWVudFJlc3BvbnNlIqMBChVTdG9wRXhwZXJpbWVudFJlcXVlc3QSDgoCaWQYASABKAlSAmlkEkUKB2NvbW1hbmQYAiABKAsyKy5idWNrZXRlZXIuZXhwZXJpbWVudC5TdG9wRXhwZXJpbWVudENvbW1hbmRSB2NvbW1hbmQSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAMgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZSIYChZTdG9wRXhwZXJpbWVudFJlc3BvbnNlIqkBChhBcmNoaXZlRXhwZXJpbWVudFJlcXVlc3QSDgoCaWQYASABKAlSAmlkEkgKB2NvbW1hbmQYAiABKAsyLi5idWNrZXRlZXIuZXhwZXJpbWVudC5BcmNoaXZlRXhwZXJpbWVudENvbW1hbmRSB2NvbW1hbmQSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAMgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZSIbChlBcmNoaXZlRXhwZXJpbWVudFJlc3BvbnNlIqcBChdEZWxldGVFeHBlcmltZW50UmVxdWVzdBIOCgJpZBgBIAEoCVICaWQSRwoHY29tbWFuZBgCIAEoCzItLmJ1Y2tldGVlci5leHBlcmltZW50LkRlbGV0ZUV4cGVyaW1lbnRDb21tYW5kUgdjb21tYW5kEjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgDIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2UiGgoYRGVsZXRlRXhwZXJpbWVudFJlc3BvbnNlMucMChFFeHBlcmltZW50U2VydmljZRJYCgdHZXRHb2FsEiQuYnVja2V0ZWVyLmV4cGVyaW1lbnQuR2V0R29hbFJlcXVlc3QaJS5idWNrZXRlZXIuZXhwZXJpbWVudC5HZXRHb2FsUmVzcG9uc2UiABJeCglMaXN0R29hbHMSJi5idWNrZXRlZXIuZXhwZXJpbWVudC5MaXN0R29hbHNSZXF1ZXN0GicuYnVja2V0ZWVyLmV4cGVyaW1lbnQuTGlzdEdvYWxzUmVzcG9uc2UiABJhCgpDcmVhdGVHb2FsEicuYnVja2V0ZWVyLmV4cGVyaW1lbnQuQ3JlYXRlR29hbFJlcXVlc3QaKC5idWNrZXRlZXIuZXhwZXJpbWVudC5DcmVhdGVHb2FsUmVzcG9uc2UiABJhCgpVcGRhdGVHb2FsEicuYnVja2V0ZWVyLmV4cGVyaW1lbnQuVXBkYXRlR29hbFJlcXVlc3QaKC5idWNrZXRlZXIuZXhwZXJpbWVudC5VcGRhdGVHb2FsUmVzcG9uc2UiABJkCgtBcmNoaXZlR29hbBIoLmJ1Y2tldGVlci5leHBlcmltZW50LkFyY2hpdmVHb2FsUmVxdWVzdBopLmJ1Y2tldGVlci5leHBlcmltZW50LkFyY2hpdmVHb2FsUmVzcG9uc2UiABJhCgpEZWxldGVHb2FsEicuYnVja2V0ZWVyLmV4cGVyaW1lbnQuRGVsZXRlR29hbFJlcXVlc3QaKC5idWNrZXRlZXIuZXhwZXJpbWVudC5EZWxldGVHb2FsUmVzcG9uc2UiABJqCg1HZXRFeHBlcmltZW50EiouYnVja2V0ZWVyLmV4cGVyaW1lbnQuR2V0RXhwZXJpbWVudFJlcXVlc3QaKy5idWNrZXRlZXIuZXhwZXJpbWVudC5HZXRFeHBlcmltZW50UmVzcG9uc2UiABJwCg9MaXN0RXhwZXJpbWVudHMSLC5idWNrZXRlZXIuZXhwZXJpbWVudC5MaXN0RXhwZXJpbWVudHNSZXF1ZXN0Gi0uYnVja2V0ZWVyLmV4cGVyaW1lbnQuTGlzdEV4cGVyaW1lbnRzUmVzcG9uc2UiABJzChBDcmVhdGVFeHBlcmltZW50Ei0uYnVja2V0ZWVyLmV4cGVyaW1lbnQuQ3JlYXRlRXhwZXJpbWVudFJlcXVlc3QaLi5idWNrZXRlZXIuZXhwZXJpbWVudC5DcmVhdGVFeHBlcmltZW50UmVzcG9uc2UiABJzChBVcGRhdGVFeHBlcmltZW50Ei0uYnVja2V0ZWVyLmV4cGVyaW1lbnQuVXBkYXRlRXhwZXJpbWVudFJlcXVlc3QaLi5idWNrZXRlZXIuZXhwZXJpbWVudC5VcGRhdGVFeHBlcmltZW50UmVzcG9uc2UiABJwCg9TdGFydEV4cGVyaW1lbnQSLC5idWNrZXRlZXIuZXhwZXJpbWVudC5TdGFydEV4cGVyaW1lbnRSZXF1ZXN0Gi0uYnVja2V0ZWVyLmV4cGVyaW1lbnQuU3RhcnRFeHBlcmltZW50UmVzcG9uc2UiABJzChBGaW5pc2hFeHBlcmltZW50Ei0uYnVja2V0ZWVyLmV4cGVyaW1lbnQuRmluaXNoRXhwZXJpbWVudFJlcXVlc3QaLi5idWNrZXRlZXIuZXhwZXJpbWVudC5GaW5pc2hFeHBlcmltZW50UmVzcG9uc2UiABJtCg5TdG9wRXhwZXJpbWVudBIrLmJ1Y2tldGVlci5leHBlcmltZW50LlN0b3BFeHBlcmltZW50UmVxdWVzdBosLmJ1Y2tldGVlci5leHBlcmltZW50LlN0b3BFeHBlcmltZW50UmVzcG9uc2UiABJ2ChFBcmNoaXZlRXhwZXJpbWVudBIuLmJ1Y2tldGVlci5leHBlcmltZW50LkFyY2hpdmVFeHBlcmltZW50UmVxdWVzdBovLmJ1Y2tldGVlci5leHBlcmltZW50LkFyY2hpdmVFeHBlcmltZW50UmVzcG9uc2UiABJzChBEZWxldGVFeHBlcmltZW50Ei0uYnVja2V0ZWVyLmV4cGVyaW1lbnQuRGVsZXRlRXhwZXJpbWVudFJlcXVlc3QaLi5idWNrZXRlZXIuZXhwZXJpbWVudC5EZWxldGVFeHBlcmltZW50UmVzcG9uc2UiAEItWitnaXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9leHBlcmltZW50YgZwcm90bzM=" + featureDescriptor: "CoYECh5nb29nbGUvcHJvdG9idWYvd3JhcHBlcnMucHJvdG8SD2dvb2dsZS5wcm90b2J1ZiIjCgtEb3VibGVWYWx1ZRIUCgV2YWx1ZRgBIAEoAVIFdmFsdWUiIgoKRmxvYXRWYWx1ZRIUCgV2YWx1ZRgBIAEoAlIFdmFsdWUiIgoKSW50NjRWYWx1ZRIUCgV2YWx1ZRgBIAEoA1IFdmFsdWUiIwoLVUludDY0VmFsdWUSFAoFdmFsdWUYASABKARSBXZhbHVlIiIKCkludDMyVmFsdWUSFAoFdmFsdWUYASABKAVSBXZhbHVlIiMKC1VJbnQzMlZhbHVlEhQKBXZhbHVlGAEgASgNUgV2YWx1ZSIhCglCb29sVmFsdWUSFAoFdmFsdWUYASABKAhSBXZhbHVlIiMKC1N0cmluZ1ZhbHVlEhQKBXZhbHVlGAEgASgJUgV2YWx1ZSIiCgpCeXRlc1ZhbHVlEhQKBXZhbHVlGAEgASgMUgV2YWx1ZUKDAQoTY29tLmdvb2dsZS5wcm90b2J1ZkINV3JhcHBlcnNQcm90b1ABWjFnb29nbGUuZ29sYW5nLm9yZy9wcm90b2J1Zi90eXBlcy9rbm93bi93cmFwcGVyc3Bi+AEBogIDR1BCqgIeR29vZ2xlLlByb3RvYnVmLldlbGxLbm93blR5cGVzYgZwcm90bzMK5AEKGWdvb2dsZS9wcm90b2J1Zi9hbnkucHJvdG8SD2dvb2dsZS5wcm90b2J1ZiI2CgNBbnkSGQoIdHlwZV91cmwYASABKAlSB3R5cGVVcmwSFAoFdmFsdWUYAiABKAxSBXZhbHVlQnYKE2NvbS5nb29nbGUucHJvdG9idWZCCEFueVByb3RvUAFaLGdvb2dsZS5nb2xhbmcub3JnL3Byb3RvYnVmL3R5cGVzL2tub3duL2FueXBiogIDR1BCqgIeR29vZ2xlLlByb3RvYnVmLldlbGxLbm93blR5cGVzYgZwcm90bzMKmQMKGnByb3RvL2ZlYXR1cmUvY2xhdXNlLnByb3RvEhFidWNrZXRlZXIuZmVhdHVyZSKzAgoGQ2xhdXNlEg4KAmlkGAEgASgJUgJpZBIcCglhdHRyaWJ1dGUYAiABKAlSCWF0dHJpYnV0ZRI+CghvcGVyYXRvchgDIAEoDjIiLmJ1Y2tldGVlci5mZWF0dXJlLkNsYXVzZS5PcGVyYXRvclIIb3BlcmF0b3ISFgoGdmFsdWVzGAQgAygJUgZ2YWx1ZXMiogEKCE9wZXJhdG9yEgoKBkVRVUFMUxAAEgYKAklOEAESDQoJRU5EU19XSVRIEAISDwoLU1RBUlRTX1dJVEgQAxILCgdTRUdNRU5UEAQSCwoHR1JFQVRFUhAFEhQKEEdSRUFURVJfT1JfRVFVQUwQBhIICgRMRVNTEAcSEQoNTEVTU19PUl9FUVVBTBAIEgoKBkJFRk9SRRAJEgkKBUFGVEVSEApCKlooZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vZmVhdHVyZWIGcHJvdG8zCrQEChxwcm90by9mZWF0dXJlL3N0cmF0ZWd5LnByb3RvEhFidWNrZXRlZXIuZmVhdHVyZSItCg1GaXhlZFN0cmF0ZWd5EhwKCXZhcmlhdGlvbhgBIAEoCVIJdmFyaWF0aW9uIqIBCg9Sb2xsb3V0U3RyYXRlZ3kSTAoKdmFyaWF0aW9ucxgBIAMoCzIsLmJ1Y2tldGVlci5mZWF0dXJlLlJvbGxvdXRTdHJhdGVneS5WYXJpYXRpb25SCnZhcmlhdGlvbnMaQQoJVmFyaWF0aW9uEhwKCXZhcmlhdGlvbhgBIAEoCVIJdmFyaWF0aW9uEhYKBndlaWdodBgCIAEoBVIGd2VpZ2h0IvgBCghTdHJhdGVneRI0CgR0eXBlGAEgASgOMiAuYnVja2V0ZWVyLmZlYXR1cmUuU3RyYXRlZ3kuVHlwZVIEdHlwZRJHCg5maXhlZF9zdHJhdGVneRgCIAEoCzIgLmJ1Y2tldGVlci5mZWF0dXJlLkZpeGVkU3RyYXRlZ3lSDWZpeGVkU3RyYXRlZ3kSTQoQcm9sbG91dF9zdHJhdGVneRgDIAEoCzIiLmJ1Y2tldGVlci5mZWF0dXJlLlJvbGxvdXRTdHJhdGVneVIPcm9sbG91dFN0cmF0ZWd5Ih4KBFR5cGUSCQoFRklYRUQQABILCgdST0xMT1VUEAFCKlooZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vZmVhdHVyZWIGcHJvdG8zCqICChhwcm90by9mZWF0dXJlL3J1bGUucHJvdG8SEWJ1Y2tldGVlci5mZWF0dXJlGhpwcm90by9mZWF0dXJlL2NsYXVzZS5wcm90bxoccHJvdG8vZmVhdHVyZS9zdHJhdGVneS5wcm90byKEAQoEUnVsZRIOCgJpZBgBIAEoCVICaWQSNwoIc3RyYXRlZ3kYAiABKAsyGy5idWNrZXRlZXIuZmVhdHVyZS5TdHJhdGVneVIIc3RyYXRlZ3kSMwoHY2xhdXNlcxgDIAMoCzIZLmJ1Y2tldGVlci5mZWF0dXJlLkNsYXVzZVIHY2xhdXNlc0IqWihnaXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9mZWF0dXJlYgZwcm90bzMKoQEKGnByb3RvL2ZlYXR1cmUvdGFyZ2V0LnByb3RvEhFidWNrZXRlZXIuZmVhdHVyZSI8CgZUYXJnZXQSHAoJdmFyaWF0aW9uGAEgASgJUgl2YXJpYXRpb24SFAoFdXNlcnMYAiADKAlSBXVzZXJzQipaKGdpdGh1Yi5jb20vY2EtZHAvYnVja2V0ZWVyL3Byb3RvL2ZlYXR1cmViBnByb3RvMwrPAQodcHJvdG8vZmVhdHVyZS92YXJpYXRpb24ucHJvdG8SEWJ1Y2tldGVlci5mZWF0dXJlImcKCVZhcmlhdGlvbhIOCgJpZBgBIAEoCVICaWQSFAoFdmFsdWUYAiABKAlSBXZhbHVlEhIKBG5hbWUYAyABKAlSBG5hbWUSIAoLZGVzY3JpcHRpb24YBCABKAlSC2Rlc2NyaXB0aW9uQipaKGdpdGh1Yi5jb20vY2EtZHAvYnVja2V0ZWVyL3Byb3RvL2ZlYXR1cmViBnByb3RvMwrtAgoqcHJvdG8vZmVhdHVyZS9mZWF0dXJlX2xhc3RfdXNlZF9pbmZvLnByb3RvEhFidWNrZXRlZXIuZmVhdHVyZSL3AQoTRmVhdHVyZUxhc3RVc2VkSW5mbxIdCgpmZWF0dXJlX2lkGAEgASgJUglmZWF0dXJlSWQSGAoHdmVyc2lvbhgCIAEoBVIHdmVyc2lvbhIgCgxsYXN0X3VzZWRfYXQYAyABKANSCmxhc3RVc2VkQXQSHQoKY3JlYXRlZF9hdBgEIAEoA1IJY3JlYXRlZEF0EjIKFWNsaWVudF9vbGRlc3RfdmVyc2lvbhgFIAEoCVITY2xpZW50T2xkZXN0VmVyc2lvbhIyChVjbGllbnRfbGF0ZXN0X3ZlcnNpb24YBiABKAlSE2NsaWVudExhdGVzdFZlcnNpb25CKlooZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vZmVhdHVyZWIGcHJvdG8zCrsBCiBwcm90by9mZWF0dXJlL3ByZXJlcXVpc2l0ZS5wcm90bxIRYnVja2V0ZWVyLmZlYXR1cmUiUAoMUHJlcmVxdWlzaXRlEh0KCmZlYXR1cmVfaWQYASABKAlSCWZlYXR1cmVJZBIhCgx2YXJpYXRpb25faWQYAiABKAlSC3ZhcmlhdGlvbklkQipaKGdpdGh1Yi5jb20vY2EtZHAvYnVja2V0ZWVyL3Byb3RvL2ZlYXR1cmViBnByb3RvMwqTCwobcHJvdG8vZmVhdHVyZS9mZWF0dXJlLnByb3RvEhFidWNrZXRlZXIuZmVhdHVyZRoYcHJvdG8vZmVhdHVyZS9ydWxlLnByb3RvGhpwcm90by9mZWF0dXJlL3RhcmdldC5wcm90bxodcHJvdG8vZmVhdHVyZS92YXJpYXRpb24ucHJvdG8aHHByb3RvL2ZlYXR1cmUvc3RyYXRlZ3kucHJvdG8aKnByb3RvL2ZlYXR1cmUvZmVhdHVyZV9sYXN0X3VzZWRfaW5mby5wcm90bxogcHJvdG8vZmVhdHVyZS9wcmVyZXF1aXNpdGUucHJvdG8i0gcKB0ZlYXR1cmUSDgoCaWQYASABKAlSAmlkEhIKBG5hbWUYAiABKAlSBG5hbWUSIAoLZGVzY3JpcHRpb24YAyABKAlSC2Rlc2NyaXB0aW9uEhgKB2VuYWJsZWQYBCABKAhSB2VuYWJsZWQSGAoHZGVsZXRlZBgFIAEoCFIHZGVsZXRlZBI5ChZldmFsdWF0aW9uX3VuZGVsYXlhYmxlGAYgASgIQgIYAVIVZXZhbHVhdGlvblVuZGVsYXlhYmxlEhAKA3R0bBgHIAEoBVIDdHRsEhgKB3ZlcnNpb24YCCABKAVSB3ZlcnNpb24SHQoKY3JlYXRlZF9hdBgJIAEoA1IJY3JlYXRlZEF0Eh0KCnVwZGF0ZWRfYXQYCiABKANSCXVwZGF0ZWRBdBI8Cgp2YXJpYXRpb25zGAsgAygLMhwuYnVja2V0ZWVyLmZlYXR1cmUuVmFyaWF0aW9uUgp2YXJpYXRpb25zEjMKB3RhcmdldHMYDCADKAsyGS5idWNrZXRlZXIuZmVhdHVyZS5UYXJnZXRSB3RhcmdldHMSLQoFcnVsZXMYDSADKAsyFy5idWNrZXRlZXIuZmVhdHVyZS5SdWxlUgVydWxlcxJGChBkZWZhdWx0X3N0cmF0ZWd5GA4gASgLMhsuYnVja2V0ZWVyLmZlYXR1cmUuU3RyYXRlZ3lSD2RlZmF1bHRTdHJhdGVneRIjCg1vZmZfdmFyaWF0aW9uGA8gASgJUgxvZmZWYXJpYXRpb24SEgoEdGFncxgQIAMoCVIEdGFncxJMCg5sYXN0X3VzZWRfaW5mbxgRIAEoCzImLmJ1Y2tldGVlci5mZWF0dXJlLkZlYXR1cmVMYXN0VXNlZEluZm9SDGxhc3RVc2VkSW5mbxIeCgptYWludGFpbmVyGBIgASgJUgptYWludGFpbmVyEk8KDnZhcmlhdGlvbl90eXBlGBMgASgOMiguYnVja2V0ZWVyLmZlYXR1cmUuRmVhdHVyZS5WYXJpYXRpb25UeXBlUg12YXJpYXRpb25UeXBlEhoKCGFyY2hpdmVkGBQgASgIUghhcmNoaXZlZBJFCg1wcmVyZXF1aXNpdGVzGBUgAygLMh8uYnVja2V0ZWVyLmZlYXR1cmUuUHJlcmVxdWlzaXRlUg1wcmVyZXF1aXNpdGVzEiMKDXNhbXBsaW5nX3NlZWQYFiABKAlSDHNhbXBsaW5nU2VlZCI+Cg1WYXJpYXRpb25UeXBlEgoKBlNUUklORxAAEgsKB0JPT0xFQU4QARIKCgZOVU1CRVIQAhIICgRKU09OEAMiQgoIRmVhdHVyZXMSNgoIZmVhdHVyZXMYASADKAsyGi5idWNrZXRlZXIuZmVhdHVyZS5GZWF0dXJlUghmZWF0dXJlcyJTCgNUYWcSDgoCaWQYASABKAlSAmlkEh0KCmNyZWF0ZWRfYXQYAiABKANSCWNyZWF0ZWRBdBIdCgp1cGRhdGVkX2F0GAMgASgDUgl1cGRhdGVkQXRCKlooZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vZmVhdHVyZWIGcHJvdG8zCrkHChtwcm90by9mZWF0dXJlL3NlZ21lbnQucHJvdG8SEWJ1Y2tldGVlci5mZWF0dXJlGhhwcm90by9mZWF0dXJlL3J1bGUucHJvdG8i/AMKB1NlZ21lbnQSDgoCaWQYASABKAlSAmlkEhIKBG5hbWUYAiABKAlSBG5hbWUSIAoLZGVzY3JpcHRpb24YAyABKAlSC2Rlc2NyaXB0aW9uEi0KBXJ1bGVzGAQgAygLMhcuYnVja2V0ZWVyLmZlYXR1cmUuUnVsZVIFcnVsZXMSHQoKY3JlYXRlZF9hdBgFIAEoA1IJY3JlYXRlZEF0Eh0KCnVwZGF0ZWRfYXQYBiABKANSCXVwZGF0ZWRBdBIcCgd2ZXJzaW9uGAcgASgDQgIYAVIHdmVyc2lvbhIYCgdkZWxldGVkGAggASgIUgdkZWxldGVkEi4KE2luY2x1ZGVkX3VzZXJfY291bnQYCSABKANSEWluY2x1ZGVkVXNlckNvdW50EjIKE2V4Y2x1ZGVkX3VzZXJfY291bnQYCiABKANCAhgBUhFleGNsdWRlZFVzZXJDb3VudBI5CgZzdGF0dXMYCyABKA4yIS5idWNrZXRlZXIuZmVhdHVyZS5TZWdtZW50LlN0YXR1c1IGc3RhdHVzEicKEGlzX2luX3VzZV9zdGF0dXMYDCABKAhSDWlzSW5Vc2VTdGF0dXMiPgoGU3RhdHVzEgsKB0lOSVRJQUwQABINCglVUExPQURJTkcQARIMCghTVUNFRURFRBACEgoKBkZBSUxFRBADItQBCgtTZWdtZW50VXNlchIOCgJpZBgBIAEoCVICaWQSHQoKc2VnbWVudF9pZBgCIAEoCVIJc2VnbWVudElkEhcKB3VzZXJfaWQYAyABKAlSBnVzZXJJZBI6CgVzdGF0ZRgEIAEoDjIkLmJ1Y2tldGVlci5mZWF0dXJlLlNlZ21lbnRVc2VyLlN0YXRlUgVzdGF0ZRIYCgdkZWxldGVkGAUgASgIUgdkZWxldGVkIicKBVN0YXRlEgwKCElOQ0xVREVEEAASEAoIRVhDTFVERUQQARoCCAEiYwoMU2VnbWVudFVzZXJzEh0KCnNlZ21lbnRfaWQYASABKAlSCXNlZ21lbnRJZBI0CgV1c2VycxgCIAMoCzIeLmJ1Y2tldGVlci5mZWF0dXJlLlNlZ21lbnRVc2VyUgV1c2Vyc0IqWihnaXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9mZWF0dXJlYgZwcm90bzMKuR8KG3Byb3RvL2ZlYXR1cmUvY29tbWFuZC5wcm90bxIRYnVja2V0ZWVyLmZlYXR1cmUaGWdvb2dsZS9wcm90b2J1Zi9hbnkucHJvdG8aHmdvb2dsZS9wcm90b2J1Zi93cmFwcGVycy5wcm90bxoacHJvdG8vZmVhdHVyZS9jbGF1c2UucHJvdG8aG3Byb3RvL2ZlYXR1cmUvZmVhdHVyZS5wcm90bxoYcHJvdG8vZmVhdHVyZS9ydWxlLnByb3RvGh1wcm90by9mZWF0dXJlL3ZhcmlhdGlvbi5wcm90bxoccHJvdG8vZmVhdHVyZS9zdHJhdGVneS5wcm90bxobcHJvdG8vZmVhdHVyZS9zZWdtZW50LnByb3RvGiBwcm90by9mZWF0dXJlL3ByZXJlcXVpc2l0ZS5wcm90byI5CgdDb21tYW5kEi4KB2NvbW1hbmQYASABKAsyFC5nb29nbGUucHJvdG9idWYuQW55Ugdjb21tYW5kIrUDChRDcmVhdGVGZWF0dXJlQ29tbWFuZBIOCgJpZBgBIAEoCVICaWQSEgoEbmFtZRgCIAEoCVIEbmFtZRIgCgtkZXNjcmlwdGlvbhgDIAEoCVILZGVzY3JpcHRpb24SPAoKdmFyaWF0aW9ucxgEIAMoCzIcLmJ1Y2tldGVlci5mZWF0dXJlLlZhcmlhdGlvblIKdmFyaWF0aW9ucxISCgR0YWdzGAUgAygJUgR0YWdzElgKGmRlZmF1bHRfb25fdmFyaWF0aW9uX2luZGV4GAYgASgLMhsuZ29vZ2xlLnByb3RvYnVmLkludDMyVmFsdWVSF2RlZmF1bHRPblZhcmlhdGlvbkluZGV4EloKG2RlZmF1bHRfb2ZmX3ZhcmlhdGlvbl9pbmRleBgHIAEoCzIbLmdvb2dsZS5wcm90b2J1Zi5JbnQzMlZhbHVlUhhkZWZhdWx0T2ZmVmFyaWF0aW9uSW5kZXgSTwoOdmFyaWF0aW9uX3R5cGUYCCABKA4yKC5idWNrZXRlZXIuZmVhdHVyZS5GZWF0dXJlLlZhcmlhdGlvblR5cGVSDXZhcmlhdGlvblR5cGUiFwoVQXJjaGl2ZUZlYXR1cmVDb21tYW5kIhkKF1VuYXJjaGl2ZUZlYXR1cmVDb21tYW5kIhYKFERlbGV0ZUZlYXR1cmVDb21tYW5kIioKFFJlbmFtZUZlYXR1cmVDb21tYW5kEhIKBG5hbWUYASABKAlSBG5hbWUiPAoYQ2hhbmdlRGVzY3JpcHRpb25Db21tYW5kEiAKC2Rlc2NyaXB0aW9uGAEgASgJUgtkZXNjcmlwdGlvbiK4AQopQ2hhbmdlQnVsa1VwbG9hZFNlZ21lbnRVc2Vyc1N0YXR1c0NvbW1hbmQSOQoGc3RhdHVzGAEgASgOMiEuYnVja2V0ZWVyLmZlYXR1cmUuU2VnbWVudC5TdGF0dXNSBnN0YXR1cxI6CgVzdGF0ZRgCIAEoDjIkLmJ1Y2tldGVlci5mZWF0dXJlLlNlZ21lbnRVc2VyLlN0YXRlUgVzdGF0ZRIUCgVjb3VudBgDIAEoA1IFY291bnQiIQoNQWRkVGFnQ29tbWFuZBIQCgN0YWcYASABKAlSA3RhZyIkChBSZW1vdmVUYWdDb21tYW5kEhAKA3RhZxgBIAEoCVIDdGFnIhYKFEVuYWJsZUZlYXR1cmVDb21tYW5kIhcKFURpc2FibGVGZWF0dXJlQ29tbWFuZCJhChNBZGRWYXJpYXRpb25Db21tYW5kEhQKBXZhbHVlGAEgASgJUgV2YWx1ZRISCgRuYW1lGAIgASgJUgRuYW1lEiAKC2Rlc2NyaXB0aW9uGAMgASgJUgtkZXNjcmlwdGlvbiIoChZSZW1vdmVWYXJpYXRpb25Db21tYW5kEg4KAmlkGAEgASgJUgJpZCJDChtDaGFuZ2VWYXJpYXRpb25WYWx1ZUNvbW1hbmQSDgoCaWQYASABKAlSAmlkEhQKBXZhbHVlGAIgASgJUgV2YWx1ZSJAChpDaGFuZ2VWYXJpYXRpb25OYW1lQ29tbWFuZBIOCgJpZBgBIAEoCVICaWQSEgoEbmFtZRgCIAEoCVIEbmFtZSJVCiFDaGFuZ2VWYXJpYXRpb25EZXNjcmlwdGlvbkNvbW1hbmQSDgoCaWQYASABKAlSAmlkEiAKC2Rlc2NyaXB0aW9uGAIgASgJUgtkZXNjcmlwdGlvbiIrChlDaGFuZ2VPZmZWYXJpYXRpb25Db21tYW5kEg4KAmlkGAEgASgJUgJpZCI/ChlBZGRVc2VyVG9WYXJpYXRpb25Db21tYW5kEg4KAmlkGAEgASgJUgJpZBISCgR1c2VyGAIgASgJUgR1c2VyIkQKHlJlbW92ZVVzZXJGcm9tVmFyaWF0aW9uQ29tbWFuZBIOCgJpZBgBIAEoCVICaWQSEgoEdXNlchgCIAEoCVIEdXNlciJXChxDaGFuZ2VEZWZhdWx0U3RyYXRlZ3lDb21tYW5kEjcKCHN0cmF0ZWd5GAEgASgLMhsuYnVja2V0ZWVyLmZlYXR1cmUuU3RyYXRlZ3lSCHN0cmF0ZWd5Ij0KDkFkZFJ1bGVDb21tYW5kEisKBHJ1bGUYASABKAsyFy5idWNrZXRlZXIuZmVhdHVyZS5SdWxlUgRydWxlIn0KGUNoYW5nZVJ1bGVTdHJhdGVneUNvbW1hbmQSDgoCaWQYASABKAlSAmlkEhcKB3J1bGVfaWQYAiABKAlSBnJ1bGVJZBI3CghzdHJhdGVneRgDIAEoCzIbLmJ1Y2tldGVlci5mZWF0dXJlLlN0cmF0ZWd5UghzdHJhdGVneSIjChFEZWxldGVSdWxlQ29tbWFuZBIOCgJpZBgBIAEoCVICaWQiXgoQQWRkQ2xhdXNlQ29tbWFuZBIXCgdydWxlX2lkGAEgASgJUgZydWxlSWQSMQoGY2xhdXNlGAIgASgLMhkuYnVja2V0ZWVyLmZlYXR1cmUuQ2xhdXNlUgZjbGF1c2UiPgoTRGVsZXRlQ2xhdXNlQ29tbWFuZBIOCgJpZBgBIAEoCVICaWQSFwoHcnVsZV9pZBgCIAEoCVIGcnVsZUlkImUKHENoYW5nZUNsYXVzZUF0dHJpYnV0ZUNvbW1hbmQSDgoCaWQYASABKAlSAmlkEhcKB3J1bGVfaWQYAiABKAlSBnJ1bGVJZBIcCglhdHRyaWJ1dGUYAyABKAlSCWF0dHJpYnV0ZSKGAQobQ2hhbmdlQ2xhdXNlT3BlcmF0b3JDb21tYW5kEg4KAmlkGAEgASgJUgJpZBIXCgdydWxlX2lkGAIgASgJUgZydWxlSWQSPgoIb3BlcmF0b3IYAyABKA4yIi5idWNrZXRlZXIuZmVhdHVyZS5DbGF1c2UuT3BlcmF0b3JSCG9wZXJhdG9yIlYKFUFkZENsYXVzZVZhbHVlQ29tbWFuZBIOCgJpZBgBIAEoCVICaWQSFwoHcnVsZV9pZBgCIAEoCVIGcnVsZUlkEhQKBXZhbHVlGAMgASgJUgV2YWx1ZSJZChhSZW1vdmVDbGF1c2VWYWx1ZUNvbW1hbmQSDgoCaWQYASABKAlSAmlkEhcKB3J1bGVfaWQYAiABKAlSBnJ1bGVJZBIUCgV2YWx1ZRgDIAEoCVIFdmFsdWUigwEKGkNoYW5nZUZpeGVkU3RyYXRlZ3lDb21tYW5kEg4KAmlkGAEgASgJUgJpZBIXCgdydWxlX2lkGAIgASgJUgZydWxlSWQSPAoIc3RyYXRlZ3kYAyABKAsyIC5idWNrZXRlZXIuZmVhdHVyZS5GaXhlZFN0cmF0ZWd5UghzdHJhdGVneSKHAQocQ2hhbmdlUm9sbG91dFN0cmF0ZWd5Q29tbWFuZBIOCgJpZBgBIAEoCVICaWQSFwoHcnVsZV9pZBgCIAEoCVIGcnVsZUlkEj4KCHN0cmF0ZWd5GAMgASgLMiIuYnVja2V0ZWVyLmZlYXR1cmUuUm9sbG91dFN0cmF0ZWd5UghzdHJhdGVneSJMChRDcmVhdGVTZWdtZW50Q29tbWFuZBISCgRuYW1lGAEgASgJUgRuYW1lEiAKC2Rlc2NyaXB0aW9uGAIgASgJUgtkZXNjcmlwdGlvbiIWChREZWxldGVTZWdtZW50Q29tbWFuZCIuChhDaGFuZ2VTZWdtZW50TmFtZUNvbW1hbmQSEgoEbmFtZRgBIAEoCVIEbmFtZSJDCh9DaGFuZ2VTZWdtZW50RGVzY3JpcHRpb25Db21tYW5kEiAKC2Rlc2NyaXB0aW9uGAEgASgJUgtkZXNjcmlwdGlvbiJuChVBZGRTZWdtZW50VXNlckNvbW1hbmQSGQoIdXNlcl9pZHMYASADKAlSB3VzZXJJZHMSOgoFc3RhdGUYAiABKA4yJC5idWNrZXRlZXIuZmVhdHVyZS5TZWdtZW50VXNlci5TdGF0ZVIFc3RhdGUicQoYRGVsZXRlU2VnbWVudFVzZXJDb21tYW5kEhkKCHVzZXJfaWRzGAEgAygJUgd1c2VySWRzEjoKBXN0YXRlGAIgASgOMiQuYnVja2V0ZWVyLmZlYXR1cmUuU2VnbWVudFVzZXIuU3RhdGVSBXN0YXRlIm8KHUJ1bGtVcGxvYWRTZWdtZW50VXNlcnNDb21tYW5kEhIKBGRhdGEYASABKAxSBGRhdGESOgoFc3RhdGUYAiABKA4yJC5idWNrZXRlZXIuZmVhdHVyZS5TZWdtZW50VXNlci5TdGF0ZVIFc3RhdGUiIAoeSW5jcmVtZW50RmVhdHVyZVZlcnNpb25Db21tYW5kIkoKE0Nsb25lRmVhdHVyZUNvbW1hbmQSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAEgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZSIaChhSZXNldFNhbXBsaW5nU2VlZENvbW1hbmQiXQoWQWRkUHJlcmVxdWlzaXRlQ29tbWFuZBJDCgxwcmVyZXF1aXNpdGUYASABKAsyHy5idWNrZXRlZXIuZmVhdHVyZS5QcmVyZXF1aXNpdGVSDHByZXJlcXVpc2l0ZSI6ChlSZW1vdmVQcmVyZXF1aXNpdGVDb21tYW5kEh0KCmZlYXR1cmVfaWQYASABKAlSCWZlYXR1cmVJZCJpCiJDaGFuZ2VQcmVyZXF1aXNpdGVWYXJpYXRpb25Db21tYW5kEkMKDHByZXJlcXVpc2l0ZRgBIAEoCzIfLmJ1Y2tldGVlci5mZWF0dXJlLlByZXJlcXVpc2l0ZVIMcHJlcmVxdWlzaXRlQipaKGdpdGh1Yi5jb20vY2EtZHAvYnVja2V0ZWVyL3Byb3RvL2ZlYXR1cmViBnByb3RvMwqXAgoacHJvdG8vZmVhdHVyZS9yZWFzb24ucHJvdG8SEWJ1Y2tldGVlci5mZWF0dXJlIrEBCgZSZWFzb24SMgoEdHlwZRgBIAEoDjIeLmJ1Y2tldGVlci5mZWF0dXJlLlJlYXNvbi5UeXBlUgR0eXBlEhcKB3J1bGVfaWQYAiABKAlSBnJ1bGVJZCJaCgRUeXBlEgoKBlRBUkdFVBAAEggKBFJVTEUQARILCgdERUZBVUxUEAMSCgoGQ0xJRU5UEAQSEQoNT0ZGX1ZBUklBVElPThAFEhAKDFBSRVJFUVVJU0lURRAGQipaKGdpdGh1Yi5jb20vY2EtZHAvYnVja2V0ZWVyL3Byb3RvL2ZlYXR1cmViBnByb3RvMwqRBQoecHJvdG8vZmVhdHVyZS9ldmFsdWF0aW9uLnByb3RvEhFidWNrZXRlZXIuZmVhdHVyZRodcHJvdG8vZmVhdHVyZS92YXJpYXRpb24ucHJvdG8aGnByb3RvL2ZlYXR1cmUvcmVhc29uLnByb3RvIrwCCgpFdmFsdWF0aW9uEg4KAmlkGAEgASgJUgJpZBIdCgpmZWF0dXJlX2lkGAIgASgJUglmZWF0dXJlSWQSJwoPZmVhdHVyZV92ZXJzaW9uGAMgASgFUg5mZWF0dXJlVmVyc2lvbhIXCgd1c2VyX2lkGAQgASgJUgZ1c2VySWQSIQoMdmFyaWF0aW9uX2lkGAUgASgJUgt2YXJpYXRpb25JZBI+Cgl2YXJpYXRpb24YBiABKAsyHC5idWNrZXRlZXIuZmVhdHVyZS5WYXJpYXRpb25CAhgBUgl2YXJpYXRpb24SMQoGcmVhc29uGAcgASgLMhkuYnVja2V0ZWVyLmZlYXR1cmUuUmVhc29uUgZyZWFzb24SJwoPdmFyaWF0aW9uX3ZhbHVlGAggASgJUg52YXJpYXRpb25WYWx1ZSKtAQoPVXNlckV2YWx1YXRpb25zEg4KAmlkGAEgASgJUgJpZBI/CgtldmFsdWF0aW9ucxgCIAMoCzIdLmJ1Y2tldGVlci5mZWF0dXJlLkV2YWx1YXRpb25SC2V2YWx1YXRpb25zEh0KCmNyZWF0ZWRfYXQYAyABKANSCWNyZWF0ZWRBdCIqCgVTdGF0ZRIKCgZRVUVVRUQQABILCgdQQVJUSUFMEAESCAoERlVMTBACQipaKGdpdGh1Yi5jb20vY2EtZHAvYnVja2V0ZWVyL3Byb3RvL2ZlYXR1cmViBnByb3RvMwq5BAoVcHJvdG8vdXNlci91c2VyLnByb3RvEg5idWNrZXRlZXIudXNlciLeAwoEVXNlchIOCgJpZBgBIAEoCVICaWQSMgoEZGF0YRgCIAMoCzIeLmJ1Y2tldGVlci51c2VyLlVzZXIuRGF0YUVudHJ5UgRkYXRhEkUKC3RhZ2dlZF9kYXRhGAMgAygLMiQuYnVja2V0ZWVyLnVzZXIuVXNlci5UYWdnZWREYXRhRW50cnlSCnRhZ2dlZERhdGESGwoJbGFzdF9zZWVuGAQgASgDUghsYXN0U2VlbhIdCgpjcmVhdGVkX2F0GAUgASgDUgljcmVhdGVkQXQafAoERGF0YRI6CgV2YWx1ZRgBIAMoCzIkLmJ1Y2tldGVlci51c2VyLlVzZXIuRGF0YS5WYWx1ZUVudHJ5UgV2YWx1ZRo4CgpWYWx1ZUVudHJ5EhAKA2tleRgBIAEoCVIDa2V5EhQKBXZhbHVlGAIgASgJUgV2YWx1ZToCOAEaNwoJRGF0YUVudHJ5EhAKA2tleRgBIAEoCVIDa2V5EhQKBXZhbHVlGAIgASgJUgV2YWx1ZToCOAEaWAoPVGFnZ2VkRGF0YUVudHJ5EhAKA2tleRgBIAEoCVIDa2V5Ei8KBXZhbHVlGAIgASgLMhkuYnVja2V0ZWVyLnVzZXIuVXNlci5EYXRhUgV2YWx1ZToCOAFCJ1olZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vdXNlcmIGcHJvdG8zCqxUChtwcm90by9mZWF0dXJlL3NlcnZpY2UucHJvdG8SEWJ1Y2tldGVlci5mZWF0dXJlGh5nb29nbGUvcHJvdG9idWYvd3JhcHBlcnMucHJvdG8aG3Byb3RvL2ZlYXR1cmUvY29tbWFuZC5wcm90bxobcHJvdG8vZmVhdHVyZS9mZWF0dXJlLnByb3RvGh5wcm90by9mZWF0dXJlL2V2YWx1YXRpb24ucHJvdG8aFXByb3RvL3VzZXIvdXNlci5wcm90bxobcHJvdG8vZmVhdHVyZS9zZWdtZW50LnByb3RvIlgKEUdldEZlYXR1cmVSZXF1ZXN0Eg4KAmlkGAEgASgJUgJpZBIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYAiABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlIkoKEkdldEZlYXR1cmVSZXNwb25zZRI0CgdmZWF0dXJlGAEgASgLMhouYnVja2V0ZWVyLmZlYXR1cmUuRmVhdHVyZVIHZmVhdHVyZSJbChJHZXRGZWF0dXJlc1JlcXVlc3QSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAEgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZRIQCgNpZHMYAiADKAlSA2lkcyJNChNHZXRGZWF0dXJlc1Jlc3BvbnNlEjYKCGZlYXR1cmVzGAEgAygLMhouYnVja2V0ZWVyLmZlYXR1cmUuRmVhdHVyZVIIZmVhdHVyZXMitAUKE0xpc3RGZWF0dXJlc1JlcXVlc3QSGwoJcGFnZV9zaXplGAEgASgDUghwYWdlU2l6ZRIWCgZjdXJzb3IYAiABKAlSBmN1cnNvchISCgR0YWdzGAMgAygJUgR0YWdzEkkKCG9yZGVyX2J5GAQgASgOMi4uYnVja2V0ZWVyLmZlYXR1cmUuTGlzdEZlYXR1cmVzUmVxdWVzdC5PcmRlckJ5UgdvcmRlckJ5El4KD29yZGVyX2RpcmVjdGlvbhgFIAEoDjI1LmJ1Y2tldGVlci5mZWF0dXJlLkxpc3RGZWF0dXJlc1JlcXVlc3QuT3JkZXJEaXJlY3Rpb25SDm9yZGVyRGlyZWN0aW9uEjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgGIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2USHgoKbWFpbnRhaW5lchgHIAEoCVIKbWFpbnRhaW5lchI0CgdlbmFibGVkGAggASgLMhouZ29vZ2xlLnByb3RvYnVmLkJvb2xWYWx1ZVIHZW5hYmxlZBJBCg5oYXNfZXhwZXJpbWVudBgJIAEoCzIaLmdvb2dsZS5wcm90b2J1Zi5Cb29sVmFsdWVSDWhhc0V4cGVyaW1lbnQSJQoOc2VhcmNoX2tleXdvcmQYCiABKAlSDXNlYXJjaEtleXdvcmQSNgoIYXJjaGl2ZWQYCyABKAsyGi5nb29nbGUucHJvdG9idWYuQm9vbFZhbHVlUghhcmNoaXZlZCJXCgdPcmRlckJ5EgsKB0RFRkFVTFQQABIICgROQU1FEAESDgoKQ1JFQVRFRF9BVBACEg4KClVQREFURURfQVQQAxIICgRUQUdTEAQSCwoHRU5BQkxFRBAFIiMKDk9yZGVyRGlyZWN0aW9uEgcKA0FTQxAAEggKBERFU0MQASKHAQoUTGlzdEZlYXR1cmVzUmVzcG9uc2USNgoIZmVhdHVyZXMYASADKAsyGi5idWNrZXRlZXIuZmVhdHVyZS5GZWF0dXJlUghmZWF0dXJlcxIWCgZjdXJzb3IYAiABKAlSBmN1cnNvchIfCgt0b3RhbF9jb3VudBgDIAEoA1IKdG90YWxDb3VudCKaAQoaTGlzdEVuYWJsZWRGZWF0dXJlc1JlcXVlc3QSGwoJcGFnZV9zaXplGAEgASgDUghwYWdlU2l6ZRIWCgZjdXJzb3IYAiABKAlSBmN1cnNvchISCgR0YWdzGAMgAygJUgR0YWdzEjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgEIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2UibQobTGlzdEVuYWJsZWRGZWF0dXJlc1Jlc3BvbnNlEjYKCGZlYXR1cmVzGAEgAygLMhouYnVja2V0ZWVyLmZlYXR1cmUuRmVhdHVyZVIIZmVhdHVyZXMSFgoGY3Vyc29yGAIgASgJUgZjdXJzb3IijgEKFENyZWF0ZUZlYXR1cmVSZXF1ZXN0EkEKB2NvbW1hbmQYASABKAsyJy5idWNrZXRlZXIuZmVhdHVyZS5DcmVhdGVGZWF0dXJlQ29tbWFuZFIHY29tbWFuZBIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYAiABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlIhcKFUNyZWF0ZUZlYXR1cmVSZXNwb25zZSK4AQoURW5hYmxlRmVhdHVyZVJlcXVlc3QSDgoCaWQYASABKAlSAmlkEkEKB2NvbW1hbmQYAiABKAsyJy5idWNrZXRlZXIuZmVhdHVyZS5FbmFibGVGZWF0dXJlQ29tbWFuZFIHY29tbWFuZBIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYAyABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlEhgKB2NvbW1lbnQYBCABKAlSB2NvbW1lbnQiFwoVRW5hYmxlRmVhdHVyZVJlc3BvbnNlIroBChVEaXNhYmxlRmVhdHVyZVJlcXVlc3QSDgoCaWQYASABKAlSAmlkEkIKB2NvbW1hbmQYAiABKAsyKC5idWNrZXRlZXIuZmVhdHVyZS5EaXNhYmxlRmVhdHVyZUNvbW1hbmRSB2NvbW1hbmQSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAMgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZRIYCgdjb21tZW50GAQgASgJUgdjb21tZW50IhgKFkRpc2FibGVGZWF0dXJlUmVzcG9uc2UiugEKFUFyY2hpdmVGZWF0dXJlUmVxdWVzdBIOCgJpZBgBIAEoCVICaWQSQgoHY29tbWFuZBgCIAEoCzIoLmJ1Y2tldGVlci5mZWF0dXJlLkFyY2hpdmVGZWF0dXJlQ29tbWFuZFIHY29tbWFuZBIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYAyABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlEhgKB2NvbW1lbnQYBCABKAlSB2NvbW1lbnQiGAoWQXJjaGl2ZUZlYXR1cmVSZXNwb25zZSK+AQoXVW5hcmNoaXZlRmVhdHVyZVJlcXVlc3QSDgoCaWQYASABKAlSAmlkEkQKB2NvbW1hbmQYAiABKAsyKi5idWNrZXRlZXIuZmVhdHVyZS5VbmFyY2hpdmVGZWF0dXJlQ29tbWFuZFIHY29tbWFuZBIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYAyABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlEhgKB2NvbW1lbnQYBCABKAlSB2NvbW1lbnQiGgoYVW5hcmNoaXZlRmVhdHVyZVJlc3BvbnNlIrgBChREZWxldGVGZWF0dXJlUmVxdWVzdBIOCgJpZBgBIAEoCVICaWQSQQoHY29tbWFuZBgCIAEoCzInLmJ1Y2tldGVlci5mZWF0dXJlLkRlbGV0ZUZlYXR1cmVDb21tYW5kUgdjb21tYW5kEjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgDIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2USGAoHY29tbWVudBgEIAEoCVIHY29tbWVudCIXChVEZWxldGVGZWF0dXJlUmVzcG9uc2Ui5wMKG1VwZGF0ZUZlYXR1cmVEZXRhaWxzUmVxdWVzdBIOCgJpZBgBIAEoCVICaWQSXQoWcmVuYW1lX2ZlYXR1cmVfY29tbWFuZBgCIAEoCzInLmJ1Y2tldGVlci5mZWF0dXJlLlJlbmFtZUZlYXR1cmVDb21tYW5kUhRyZW5hbWVGZWF0dXJlQ29tbWFuZBJpChpjaGFuZ2VfZGVzY3JpcHRpb25fY29tbWFuZBgDIAEoCzIrLmJ1Y2tldGVlci5mZWF0dXJlLkNoYW5nZURlc2NyaXB0aW9uQ29tbWFuZFIYY2hhbmdlRGVzY3JpcHRpb25Db21tYW5kEkoKEGFkZF90YWdfY29tbWFuZHMYBCADKAsyIC5idWNrZXRlZXIuZmVhdHVyZS5BZGRUYWdDb21tYW5kUg5hZGRUYWdDb21tYW5kcxJTChNyZW1vdmVfdGFnX2NvbW1hbmRzGAUgAygLMiMuYnVja2V0ZWVyLmZlYXR1cmUuUmVtb3ZlVGFnQ29tbWFuZFIRcmVtb3ZlVGFnQ29tbWFuZHMSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAYgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZRIYCgdjb21tZW50GAcgASgJUgdjb21tZW50Ih4KHFVwZGF0ZUZlYXR1cmVEZXRhaWxzUmVzcG9uc2UitwEKHlVwZGF0ZUZlYXR1cmVWYXJpYXRpb25zUmVxdWVzdBIOCgJpZBgBIAEoCVICaWQSNgoIY29tbWFuZHMYAiADKAsyGi5idWNrZXRlZXIuZmVhdHVyZS5Db21tYW5kUghjb21tYW5kcxIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYAyABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlEhgKB2NvbW1lbnQYBCABKAlSB2NvbW1lbnQiIQofVXBkYXRlRmVhdHVyZVZhcmlhdGlvbnNSZXNwb25zZSK2AQodVXBkYXRlRmVhdHVyZVRhcmdldGluZ1JlcXVlc3QSDgoCaWQYASABKAlSAmlkEjYKCGNvbW1hbmRzGAIgAygLMhouYnVja2V0ZWVyLmZlYXR1cmUuQ29tbWFuZFIIY29tbWFuZHMSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAMgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZRIYCgdjb21tZW50GAQgASgJUgdjb21tZW50IiAKHlVwZGF0ZUZlYXR1cmVUYXJnZXRpbmdSZXNwb25zZSKcAQoTQ2xvbmVGZWF0dXJlUmVxdWVzdBIOCgJpZBgBIAEoCVICaWQSQAoHY29tbWFuZBgCIAEoCzImLmJ1Y2tldGVlci5mZWF0dXJlLkNsb25lRmVhdHVyZUNvbW1hbmRSB2NvbW1hbmQSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAMgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZSIWChRDbG9uZUZlYXR1cmVSZXNwb25zZSKOAQoUQ3JlYXRlU2VnbWVudFJlcXVlc3QSQQoHY29tbWFuZBgBIAEoCzInLmJ1Y2tldGVlci5mZWF0dXJlLkNyZWF0ZVNlZ21lbnRDb21tYW5kUgdjb21tYW5kEjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgCIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2UiTQoVQ3JlYXRlU2VnbWVudFJlc3BvbnNlEjQKB3NlZ21lbnQYASABKAsyGi5idWNrZXRlZXIuZmVhdHVyZS5TZWdtZW50UgdzZWdtZW50IlgKEUdldFNlZ21lbnRSZXF1ZXN0Eg4KAmlkGAEgASgJUgJpZBIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYAiABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlIkoKEkdldFNlZ21lbnRSZXNwb25zZRI0CgdzZWdtZW50GAEgASgLMhouYnVja2V0ZWVyLmZlYXR1cmUuU2VnbWVudFIHc2VnbWVudCKyBAoTTGlzdFNlZ21lbnRzUmVxdWVzdBIbCglwYWdlX3NpemUYASABKANSCHBhZ2VTaXplEhYKBmN1cnNvchgCIAEoCVIGY3Vyc29yEjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgDIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2USSQoIb3JkZXJfYnkYBCABKA4yLi5idWNrZXRlZXIuZmVhdHVyZS5MaXN0U2VnbWVudHNSZXF1ZXN0Lk9yZGVyQnlSB29yZGVyQnkSXgoPb3JkZXJfZGlyZWN0aW9uGAUgASgOMjUuYnVja2V0ZWVyLmZlYXR1cmUuTGlzdFNlZ21lbnRzUmVxdWVzdC5PcmRlckRpcmVjdGlvblIOb3JkZXJEaXJlY3Rpb24SJQoOc2VhcmNoX2tleXdvcmQYBiABKAlSDXNlYXJjaEtleXdvcmQSMwoGc3RhdHVzGAcgASgLMhsuZ29vZ2xlLnByb3RvYnVmLkludDMyVmFsdWVSBnN0YXR1cxJDChBpc19pbl91c2Vfc3RhdHVzGAggASgLMhouZ29vZ2xlLnByb3RvYnVmLkJvb2xWYWx1ZVINaXNJblVzZVN0YXR1cyJACgdPcmRlckJ5EgsKB0RFRkFVTFQQABIICgROQU1FEAESDgoKQ1JFQVRFRF9BVBACEg4KClVQREFURURfQVQQAyIjCg5PcmRlckRpcmVjdGlvbhIHCgNBU0MQABIICgRERVNDEAEihwEKFExpc3RTZWdtZW50c1Jlc3BvbnNlEjYKCHNlZ21lbnRzGAEgAygLMhouYnVja2V0ZWVyLmZlYXR1cmUuU2VnbWVudFIIc2VnbWVudHMSFgoGY3Vyc29yGAIgASgJUgZjdXJzb3ISHwoLdG90YWxfY291bnQYAyABKANSCnRvdGFsQ291bnQingEKFERlbGV0ZVNlZ21lbnRSZXF1ZXN0Eg4KAmlkGAEgASgJUgJpZBJBCgdjb21tYW5kGAIgASgLMicuYnVja2V0ZWVyLmZlYXR1cmUuRGVsZXRlU2VnbWVudENvbW1hbmRSB2NvbW1hbmQSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAMgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZSIXChVEZWxldGVTZWdtZW50UmVzcG9uc2UikwEKFFVwZGF0ZVNlZ21lbnRSZXF1ZXN0Eg4KAmlkGAEgASgJUgJpZBI2Cghjb21tYW5kcxgCIAMoCzIaLmJ1Y2tldGVlci5mZWF0dXJlLkNvbW1hbmRSCGNvbW1hbmRzEjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgDIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2UiFwoVVXBkYXRlU2VnbWVudFJlc3BvbnNlIqABChVBZGRTZWdtZW50VXNlclJlcXVlc3QSDgoCaWQYASABKAlSAmlkEkIKB2NvbW1hbmQYAiABKAsyKC5idWNrZXRlZXIuZmVhdHVyZS5BZGRTZWdtZW50VXNlckNvbW1hbmRSB2NvbW1hbmQSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAMgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZSIYChZBZGRTZWdtZW50VXNlclJlc3BvbnNlIqYBChhEZWxldGVTZWdtZW50VXNlclJlcXVlc3QSDgoCaWQYASABKAlSAmlkEkUKB2NvbW1hbmQYAiABKAsyKy5idWNrZXRlZXIuZmVhdHVyZS5EZWxldGVTZWdtZW50VXNlckNvbW1hbmRSB2NvbW1hbmQSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAMgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZSIbChlEZWxldGVTZWdtZW50VXNlclJlc3BvbnNlIsABChVHZXRTZWdtZW50VXNlclJlcXVlc3QSHQoKc2VnbWVudF9pZBgBIAEoCVIJc2VnbWVudElkEhcKB3VzZXJfaWQYAiABKAlSBnVzZXJJZBI6CgVzdGF0ZRgDIAEoDjIkLmJ1Y2tldGVlci5mZWF0dXJlLlNlZ21lbnRVc2VyLlN0YXRlUgVzdGF0ZRIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYBCABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlIkwKFkdldFNlZ21lbnRVc2VyUmVzcG9uc2USMgoEdXNlchgBIAEoCzIeLmJ1Y2tldGVlci5mZWF0dXJlLlNlZ21lbnRVc2VyUgR1c2VyIu4BChdMaXN0U2VnbWVudFVzZXJzUmVxdWVzdBIbCglwYWdlX3NpemUYASABKANSCHBhZ2VTaXplEhYKBmN1cnNvchgCIAEoCVIGY3Vyc29yEh0KCnNlZ21lbnRfaWQYAyABKAlSCXNlZ21lbnRJZBIxCgVzdGF0ZRgEIAEoCzIbLmdvb2dsZS5wcm90b2J1Zi5JbnQzMlZhbHVlUgVzdGF0ZRIXCgd1c2VyX2lkGAUgASgJUgZ1c2VySWQSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAYgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZSJoChhMaXN0U2VnbWVudFVzZXJzUmVzcG9uc2USNAoFdXNlcnMYASADKAsyHi5idWNrZXRlZXIuZmVhdHVyZS5TZWdtZW50VXNlclIFdXNlcnMSFgoGY3Vyc29yGAIgASgJUgZjdXJzb3IivwEKHUJ1bGtVcGxvYWRTZWdtZW50VXNlcnNSZXF1ZXN0EjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgBIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2USHQoKc2VnbWVudF9pZBgCIAEoCVIJc2VnbWVudElkEkoKB2NvbW1hbmQYAyABKAsyMC5idWNrZXRlZXIuZmVhdHVyZS5CdWxrVXBsb2FkU2VnbWVudFVzZXJzQ29tbWFuZFIHY29tbWFuZCIgCh5CdWxrVXBsb2FkU2VnbWVudFVzZXJzUmVzcG9uc2UisQEKH0J1bGtEb3dubG9hZFNlZ21lbnRVc2Vyc1JlcXVlc3QSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAEgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZRIdCgpzZWdtZW50X2lkGAIgASgJUglzZWdtZW50SWQSOgoFc3RhdGUYAyABKA4yJC5idWNrZXRlZXIuZmVhdHVyZS5TZWdtZW50VXNlci5TdGF0ZVIFc3RhdGUiNgogQnVsa0Rvd25sb2FkU2VnbWVudFVzZXJzUmVzcG9uc2USEgoEZGF0YRgBIAEoDFIEZGF0YSKKAQoXRXZhbHVhdGVGZWF0dXJlc1JlcXVlc3QSKAoEdXNlchgBIAEoCzIULmJ1Y2tldGVlci51c2VyLlVzZXJSBHVzZXISMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAIgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZRIQCgN0YWcYAyABKAlSA3RhZyJpChhFdmFsdWF0ZUZlYXR1cmVzUmVzcG9uc2USTQoQdXNlcl9ldmFsdWF0aW9ucxgBIAEoCzIiLmJ1Y2tldGVlci5mZWF0dXJlLlVzZXJFdmFsdWF0aW9uc1IPdXNlckV2YWx1YXRpb25zInsKGUdldFVzZXJFdmFsdWF0aW9uc1JlcXVlc3QSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAEgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZRIQCgN0YWcYAiABKAlSA3RhZxIXCgd1c2VyX2lkGAMgASgJUgZ1c2VySWQiXQoaR2V0VXNlckV2YWx1YXRpb25zUmVzcG9uc2USPwoLZXZhbHVhdGlvbnMYASADKAsyHS5idWNrZXRlZXIuZmVhdHVyZS5FdmFsdWF0aW9uUgtldmFsdWF0aW9ucyKjAQobVXBzZXJ0VXNlckV2YWx1YXRpb25SZXF1ZXN0EjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgBIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2USEAoDdGFnGAIgASgJUgN0YWcSPQoKZXZhbHVhdGlvbhgDIAEoCzIdLmJ1Y2tldGVlci5mZWF0dXJlLkV2YWx1YXRpb25SCmV2YWx1YXRpb24iHgocVXBzZXJ0VXNlckV2YWx1YXRpb25SZXNwb25zZTKbGAoORmVhdHVyZVNlcnZpY2USWwoKR2V0RmVhdHVyZRIkLmJ1Y2tldGVlci5mZWF0dXJlLkdldEZlYXR1cmVSZXF1ZXN0GiUuYnVja2V0ZWVyLmZlYXR1cmUuR2V0RmVhdHVyZVJlc3BvbnNlIgASXgoLR2V0RmVhdHVyZXMSJS5idWNrZXRlZXIuZmVhdHVyZS5HZXRGZWF0dXJlc1JlcXVlc3QaJi5idWNrZXRlZXIuZmVhdHVyZS5HZXRGZWF0dXJlc1Jlc3BvbnNlIgASYQoMTGlzdEZlYXR1cmVzEiYuYnVja2V0ZWVyLmZlYXR1cmUuTGlzdEZlYXR1cmVzUmVxdWVzdBonLmJ1Y2tldGVlci5mZWF0dXJlLkxpc3RGZWF0dXJlc1Jlc3BvbnNlIgASdgoTTGlzdEVuYWJsZWRGZWF0dXJlcxItLmJ1Y2tldGVlci5mZWF0dXJlLkxpc3RFbmFibGVkRmVhdHVyZXNSZXF1ZXN0Gi4uYnVja2V0ZWVyLmZlYXR1cmUuTGlzdEVuYWJsZWRGZWF0dXJlc1Jlc3BvbnNlIgASZAoNQ3JlYXRlRmVhdHVyZRInLmJ1Y2tldGVlci5mZWF0dXJlLkNyZWF0ZUZlYXR1cmVSZXF1ZXN0GiguYnVja2V0ZWVyLmZlYXR1cmUuQ3JlYXRlRmVhdHVyZVJlc3BvbnNlIgASZwoNRW5hYmxlRmVhdHVyZRInLmJ1Y2tldGVlci5mZWF0dXJlLkVuYWJsZUZlYXR1cmVSZXF1ZXN0GiguYnVja2V0ZWVyLmZlYXR1cmUuRW5hYmxlRmVhdHVyZVJlc3BvbnNlIgOIAgESagoORGlzYWJsZUZlYXR1cmUSKC5idWNrZXRlZXIuZmVhdHVyZS5EaXNhYmxlRmVhdHVyZVJlcXVlc3QaKS5idWNrZXRlZXIuZmVhdHVyZS5EaXNhYmxlRmVhdHVyZVJlc3BvbnNlIgOIAgESZwoOQXJjaGl2ZUZlYXR1cmUSKC5idWNrZXRlZXIuZmVhdHVyZS5BcmNoaXZlRmVhdHVyZVJlcXVlc3QaKS5idWNrZXRlZXIuZmVhdHVyZS5BcmNoaXZlRmVhdHVyZVJlc3BvbnNlIgASbQoQVW5hcmNoaXZlRmVhdHVyZRIqLmJ1Y2tldGVlci5mZWF0dXJlLlVuYXJjaGl2ZUZlYXR1cmVSZXF1ZXN0GisuYnVja2V0ZWVyLmZlYXR1cmUuVW5hcmNoaXZlRmVhdHVyZVJlc3BvbnNlIgASZAoNRGVsZXRlRmVhdHVyZRInLmJ1Y2tldGVlci5mZWF0dXJlLkRlbGV0ZUZlYXR1cmVSZXF1ZXN0GiguYnVja2V0ZWVyLmZlYXR1cmUuRGVsZXRlRmVhdHVyZVJlc3BvbnNlIgASeQoUVXBkYXRlRmVhdHVyZURldGFpbHMSLi5idWNrZXRlZXIuZmVhdHVyZS5VcGRhdGVGZWF0dXJlRGV0YWlsc1JlcXVlc3QaLy5idWNrZXRlZXIuZmVhdHVyZS5VcGRhdGVGZWF0dXJlRGV0YWlsc1Jlc3BvbnNlIgASggEKF1VwZGF0ZUZlYXR1cmVWYXJpYXRpb25zEjEuYnVja2V0ZWVyLmZlYXR1cmUuVXBkYXRlRmVhdHVyZVZhcmlhdGlvbnNSZXF1ZXN0GjIuYnVja2V0ZWVyLmZlYXR1cmUuVXBkYXRlRmVhdHVyZVZhcmlhdGlvbnNSZXNwb25zZSIAEn8KFlVwZGF0ZUZlYXR1cmVUYXJnZXRpbmcSMC5idWNrZXRlZXIuZmVhdHVyZS5VcGRhdGVGZWF0dXJlVGFyZ2V0aW5nUmVxdWVzdBoxLmJ1Y2tldGVlci5mZWF0dXJlLlVwZGF0ZUZlYXR1cmVUYXJnZXRpbmdSZXNwb25zZSIAEmEKDENsb25lRmVhdHVyZRImLmJ1Y2tldGVlci5mZWF0dXJlLkNsb25lRmVhdHVyZVJlcXVlc3QaJy5idWNrZXRlZXIuZmVhdHVyZS5DbG9uZUZlYXR1cmVSZXNwb25zZSIAEmQKDUNyZWF0ZVNlZ21lbnQSJy5idWNrZXRlZXIuZmVhdHVyZS5DcmVhdGVTZWdtZW50UmVxdWVzdBooLmJ1Y2tldGVlci5mZWF0dXJlLkNyZWF0ZVNlZ21lbnRSZXNwb25zZSIAElsKCkdldFNlZ21lbnQSJC5idWNrZXRlZXIuZmVhdHVyZS5HZXRTZWdtZW50UmVxdWVzdBolLmJ1Y2tldGVlci5mZWF0dXJlLkdldFNlZ21lbnRSZXNwb25zZSIAEmEKDExpc3RTZWdtZW50cxImLmJ1Y2tldGVlci5mZWF0dXJlLkxpc3RTZWdtZW50c1JlcXVlc3QaJy5idWNrZXRlZXIuZmVhdHVyZS5MaXN0U2VnbWVudHNSZXNwb25zZSIAEmQKDURlbGV0ZVNlZ21lbnQSJy5idWNrZXRlZXIuZmVhdHVyZS5EZWxldGVTZWdtZW50UmVxdWVzdBooLmJ1Y2tldGVlci5mZWF0dXJlLkRlbGV0ZVNlZ21lbnRSZXNwb25zZSIAEmQKDVVwZGF0ZVNlZ21lbnQSJy5idWNrZXRlZXIuZmVhdHVyZS5VcGRhdGVTZWdtZW50UmVxdWVzdBooLmJ1Y2tldGVlci5mZWF0dXJlLlVwZGF0ZVNlZ21lbnRSZXNwb25zZSIAEmoKDkFkZFNlZ21lbnRVc2VyEiguYnVja2V0ZWVyLmZlYXR1cmUuQWRkU2VnbWVudFVzZXJSZXF1ZXN0GikuYnVja2V0ZWVyLmZlYXR1cmUuQWRkU2VnbWVudFVzZXJSZXNwb25zZSIDiAIBEnMKEURlbGV0ZVNlZ21lbnRVc2VyEisuYnVja2V0ZWVyLmZlYXR1cmUuRGVsZXRlU2VnbWVudFVzZXJSZXF1ZXN0GiwuYnVja2V0ZWVyLmZlYXR1cmUuRGVsZXRlU2VnbWVudFVzZXJSZXNwb25zZSIDiAIBEmoKDkdldFNlZ21lbnRVc2VyEiguYnVja2V0ZWVyLmZlYXR1cmUuR2V0U2VnbWVudFVzZXJSZXF1ZXN0GikuYnVja2V0ZWVyLmZlYXR1cmUuR2V0U2VnbWVudFVzZXJSZXNwb25zZSIDiAIBEm0KEExpc3RTZWdtZW50VXNlcnMSKi5idWNrZXRlZXIuZmVhdHVyZS5MaXN0U2VnbWVudFVzZXJzUmVxdWVzdBorLmJ1Y2tldGVlci5mZWF0dXJlLkxpc3RTZWdtZW50VXNlcnNSZXNwb25zZSIAEn8KFkJ1bGtVcGxvYWRTZWdtZW50VXNlcnMSMC5idWNrZXRlZXIuZmVhdHVyZS5CdWxrVXBsb2FkU2VnbWVudFVzZXJzUmVxdWVzdBoxLmJ1Y2tldGVlci5mZWF0dXJlLkJ1bGtVcGxvYWRTZWdtZW50VXNlcnNSZXNwb25zZSIAEoUBChhCdWxrRG93bmxvYWRTZWdtZW50VXNlcnMSMi5idWNrZXRlZXIuZmVhdHVyZS5CdWxrRG93bmxvYWRTZWdtZW50VXNlcnNSZXF1ZXN0GjMuYnVja2V0ZWVyLmZlYXR1cmUuQnVsa0Rvd25sb2FkU2VnbWVudFVzZXJzUmVzcG9uc2UiABJtChBFdmFsdWF0ZUZlYXR1cmVzEiouYnVja2V0ZWVyLmZlYXR1cmUuRXZhbHVhdGVGZWF0dXJlc1JlcXVlc3QaKy5idWNrZXRlZXIuZmVhdHVyZS5FdmFsdWF0ZUZlYXR1cmVzUmVzcG9uc2UiABJzChJHZXRVc2VyRXZhbHVhdGlvbnMSLC5idWNrZXRlZXIuZmVhdHVyZS5HZXRVc2VyRXZhbHVhdGlvbnNSZXF1ZXN0Gi0uYnVja2V0ZWVyLmZlYXR1cmUuR2V0VXNlckV2YWx1YXRpb25zUmVzcG9uc2UiABJ5ChRVcHNlcnRVc2VyRXZhbHVhdGlvbhIuLmJ1Y2tldGVlci5mZWF0dXJlLlVwc2VydFVzZXJFdmFsdWF0aW9uUmVxdWVzdBovLmJ1Y2tldGVlci5mZWF0dXJlLlVwc2VydFVzZXJFdmFsdWF0aW9uUmVzcG9uc2UiAEIqWihnaXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9mZWF0dXJlYgZwcm90bzM=" + migrationDescriptor: "CqUECiNwcm90by9taWdyYXRpb24vbXlzcWxfc2VydmljZS5wcm90bxITYnVja2V0ZWVyLm1pZ3JhdGlvbiIfCh1NaWdyYXRlQWxsTWFzdGVyU2NoZW1hUmVxdWVzdCIgCh5NaWdyYXRlQWxsTWFzdGVyU2NoZW1hUmVzcG9uc2UiMQobUm9sbGJhY2tNYXN0ZXJTY2hlbWFSZXF1ZXN0EhIKBHN0ZXAYASABKANSBHN0ZXAiHgocUm9sbGJhY2tNYXN0ZXJTY2hlbWFSZXNwb25zZTKcAgoVTWlncmF0aW9uTXlTUUxTZXJ2aWNlEoMBChZNaWdyYXRlQWxsTWFzdGVyU2NoZW1hEjIuYnVja2V0ZWVyLm1pZ3JhdGlvbi5NaWdyYXRlQWxsTWFzdGVyU2NoZW1hUmVxdWVzdBozLmJ1Y2tldGVlci5taWdyYXRpb24uTWlncmF0ZUFsbE1hc3RlclNjaGVtYVJlc3BvbnNlIgASfQoUUm9sbGJhY2tNYXN0ZXJTY2hlbWESMC5idWNrZXRlZXIubWlncmF0aW9uLlJvbGxiYWNrTWFzdGVyU2NoZW1hUmVxdWVzdBoxLmJ1Y2tldGVlci5taWdyYXRpb24uUm9sbGJhY2tNYXN0ZXJTY2hlbWFSZXNwb25zZSIAQixaKmdpdGh1Yi5jb20vY2EtZHAvYnVja2V0ZWVyL3Byb3RvL21pZ3JhdGlvbmIGcHJvdG8z" + notificationDescriptor: "CoYECh5nb29nbGUvcHJvdG9idWYvd3JhcHBlcnMucHJvdG8SD2dvb2dsZS5wcm90b2J1ZiIjCgtEb3VibGVWYWx1ZRIUCgV2YWx1ZRgBIAEoAVIFdmFsdWUiIgoKRmxvYXRWYWx1ZRIUCgV2YWx1ZRgBIAEoAlIFdmFsdWUiIgoKSW50NjRWYWx1ZRIUCgV2YWx1ZRgBIAEoA1IFdmFsdWUiIwoLVUludDY0VmFsdWUSFAoFdmFsdWUYASABKARSBXZhbHVlIiIKCkludDMyVmFsdWUSFAoFdmFsdWUYASABKAVSBXZhbHVlIiMKC1VJbnQzMlZhbHVlEhQKBXZhbHVlGAEgASgNUgV2YWx1ZSIhCglCb29sVmFsdWUSFAoFdmFsdWUYASABKAhSBXZhbHVlIiMKC1N0cmluZ1ZhbHVlEhQKBXZhbHVlGAEgASgJUgV2YWx1ZSIiCgpCeXRlc1ZhbHVlEhQKBXZhbHVlGAEgASgMUgV2YWx1ZUKDAQoTY29tLmdvb2dsZS5wcm90b2J1ZkINV3JhcHBlcnNQcm90b1ABWjFnb29nbGUuZ29sYW5nLm9yZy9wcm90b2J1Zi90eXBlcy9rbm93bi93cmFwcGVyc3Bi+AEBogIDR1BCqgIeR29vZ2xlLlByb3RvYnVmLldlbGxLbm93blR5cGVzYgZwcm90bzMK+gIKInByb3RvL25vdGlmaWNhdGlvbi9yZWNpcGllbnQucHJvdG8SFmJ1Y2tldGVlci5ub3RpZmljYXRpb24iyAEKCVJlY2lwaWVudBI6CgR0eXBlGAEgASgOMiYuYnVja2V0ZWVyLm5vdGlmaWNhdGlvbi5SZWNpcGllbnQuVHlwZVIEdHlwZRJlChdzbGFja19jaGFubmVsX3JlY2lwaWVudBgCIAEoCzItLmJ1Y2tldGVlci5ub3RpZmljYXRpb24uU2xhY2tDaGFubmVsUmVjaXBpZW50UhVzbGFja0NoYW5uZWxSZWNpcGllbnQiGAoEVHlwZRIQCgxTbGFja0NoYW5uZWwQACI4ChVTbGFja0NoYW5uZWxSZWNpcGllbnQSHwoLd2ViaG9va191cmwYASABKAlSCndlYmhvb2tVcmxCL1otZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vbm90aWZpY2F0aW9uYgZwcm90bzMKkgcKJXByb3RvL25vdGlmaWNhdGlvbi9zdWJzY3JpcHRpb24ucHJvdG8SFmJ1Y2tldGVlci5ub3RpZmljYXRpb24aInByb3RvL25vdGlmaWNhdGlvbi9yZWNpcGllbnQucHJvdG8i8wUKDFN1YnNjcmlwdGlvbhIOCgJpZBgBIAEoCVICaWQSHQoKY3JlYXRlZF9hdBgCIAEoA1IJY3JlYXRlZEF0Eh0KCnVwZGF0ZWRfYXQYAyABKANSCXVwZGF0ZWRBdBIaCghkaXNhYmxlZBgEIAEoCFIIZGlzYWJsZWQSUgoMc291cmNlX3R5cGVzGAUgAygOMi8uYnVja2V0ZWVyLm5vdGlmaWNhdGlvbi5TdWJzY3JpcHRpb24uU291cmNlVHlwZVILc291cmNlVHlwZXMSPwoJcmVjaXBpZW50GAYgASgLMiEuYnVja2V0ZWVyLm5vdGlmaWNhdGlvbi5SZWNpcGllbnRSCXJlY2lwaWVudBISCgRuYW1lGAcgASgJUgRuYW1lIs8DCgpTb3VyY2VUeXBlEhgKFERPTUFJTl9FVkVOVF9GRUFUVVJFEAASFQoRRE9NQUlOX0VWRU5UX0dPQUwQARIbChdET01BSU5fRVZFTlRfRVhQRVJJTUVOVBACEhgKFERPTUFJTl9FVkVOVF9BQ0NPVU5UEAMSFwoTRE9NQUlOX0VWRU5UX0FQSUtFWRAEEhgKFERPTUFJTl9FVkVOVF9TRUdNRU5UEAUSHAoYRE9NQUlOX0VWRU5UX0VOVklST05NRU5UEAYSHgoaRE9NQUlOX0VWRU5UX0FETUlOX0FDQ09VTlQQBxIdChlET01BSU5fRVZFTlRfQVVUT09QU19SVUxFEAgSFQoRRE9NQUlOX0VWRU5UX1BVU0gQCRIdChlET01BSU5fRVZFTlRfU1VCU0NSSVBUSU9OEAoSIwofRE9NQUlOX0VWRU5UX0FETUlOX1NVQlNDUklQVElPThALEhgKFERPTUFJTl9FVkVOVF9QUk9KRUNUEAwSGAoURE9NQUlOX0VWRU5UX1dFQkhPT0sQDRIRCg1GRUFUVVJFX1NUQUxFEGQSFwoSRVhQRVJJTUVOVF9SVU5OSU5HEMgBEg4KCU1BVV9DT1VOVBCsAkIvWi1naXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by9ub3RpZmljYXRpb25iBnByb3RvMwrTCgogcHJvdG8vbm90aWZpY2F0aW9uL2NvbW1hbmQucHJvdG8SFmJ1Y2tldGVlci5ub3RpZmljYXRpb24aJXByb3RvL25vdGlmaWNhdGlvbi9zdWJzY3JpcHRpb24ucHJvdG8aInByb3RvL25vdGlmaWNhdGlvbi9yZWNpcGllbnQucHJvdG8iyQEKHkNyZWF0ZUFkbWluU3Vic2NyaXB0aW9uQ29tbWFuZBJSCgxzb3VyY2VfdHlwZXMYASADKA4yLy5idWNrZXRlZXIubm90aWZpY2F0aW9uLlN1YnNjcmlwdGlvbi5Tb3VyY2VUeXBlUgtzb3VyY2VUeXBlcxI/CglyZWNpcGllbnQYAiABKAsyIS5idWNrZXRlZXIubm90aWZpY2F0aW9uLlJlY2lwaWVudFIJcmVjaXBpZW50EhIKBG5hbWUYAyABKAlSBG5hbWUifAomQWRkQWRtaW5TdWJzY3JpcHRpb25Tb3VyY2VUeXBlc0NvbW1hbmQSUgoMc291cmNlX3R5cGVzGAEgAygOMi8uYnVja2V0ZWVyLm5vdGlmaWNhdGlvbi5TdWJzY3JpcHRpb24uU291cmNlVHlwZVILc291cmNlVHlwZXMifwopRGVsZXRlQWRtaW5TdWJzY3JpcHRpb25Tb3VyY2VUeXBlc0NvbW1hbmQSUgoMc291cmNlX3R5cGVzGAEgAygOMi8uYnVja2V0ZWVyLm5vdGlmaWNhdGlvbi5TdWJzY3JpcHRpb24uU291cmNlVHlwZVILc291cmNlVHlwZXMiIAoeRW5hYmxlQWRtaW5TdWJzY3JpcHRpb25Db21tYW5kIiEKH0Rpc2FibGVBZG1pblN1YnNjcmlwdGlvbkNvbW1hbmQiIAoeRGVsZXRlQWRtaW5TdWJzY3JpcHRpb25Db21tYW5kIjQKHlJlbmFtZUFkbWluU3Vic2NyaXB0aW9uQ29tbWFuZBISCgRuYW1lGAEgASgJUgRuYW1lIsQBChlDcmVhdGVTdWJzY3JpcHRpb25Db21tYW5kElIKDHNvdXJjZV90eXBlcxgBIAMoDjIvLmJ1Y2tldGVlci5ub3RpZmljYXRpb24uU3Vic2NyaXB0aW9uLlNvdXJjZVR5cGVSC3NvdXJjZVR5cGVzEj8KCXJlY2lwaWVudBgCIAEoCzIhLmJ1Y2tldGVlci5ub3RpZmljYXRpb24uUmVjaXBpZW50UglyZWNpcGllbnQSEgoEbmFtZRgDIAEoCVIEbmFtZSJrChVBZGRTb3VyY2VUeXBlc0NvbW1hbmQSUgoMc291cmNlX3R5cGVzGAEgAygOMi8uYnVja2V0ZWVyLm5vdGlmaWNhdGlvbi5TdWJzY3JpcHRpb24uU291cmNlVHlwZVILc291cmNlVHlwZXMibgoYRGVsZXRlU291cmNlVHlwZXNDb21tYW5kElIKDHNvdXJjZV90eXBlcxgBIAMoDjIvLmJ1Y2tldGVlci5ub3RpZmljYXRpb24uU3Vic2NyaXB0aW9uLlNvdXJjZVR5cGVSC3NvdXJjZVR5cGVzIhsKGUVuYWJsZVN1YnNjcmlwdGlvbkNvbW1hbmQiHAoaRGlzYWJsZVN1YnNjcmlwdGlvbkNvbW1hbmQiGwoZRGVsZXRlU3Vic2NyaXB0aW9uQ29tbWFuZCIvChlSZW5hbWVTdWJzY3JpcHRpb25Db21tYW5kEhIKBG5hbWUYASABKAlSBG5hbWVCL1otZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vbm90aWZpY2F0aW9uYgZwcm90bzMKuTkKIHByb3RvL25vdGlmaWNhdGlvbi9zZXJ2aWNlLnByb3RvEhZidWNrZXRlZXIubm90aWZpY2F0aW9uGh5nb29nbGUvcHJvdG9idWYvd3JhcHBlcnMucHJvdG8aJXByb3RvL25vdGlmaWNhdGlvbi9zdWJzY3JpcHRpb24ucHJvdG8aIHByb3RvL25vdGlmaWNhdGlvbi9jb21tYW5kLnByb3RvIi0KG0dldEFkbWluU3Vic2NyaXB0aW9uUmVxdWVzdBIOCgJpZBgBIAEoCVICaWQiaAocR2V0QWRtaW5TdWJzY3JpcHRpb25SZXNwb25zZRJICgxzdWJzY3JpcHRpb24YASABKAsyJC5idWNrZXRlZXIubm90aWZpY2F0aW9uLlN1YnNjcmlwdGlvblIMc3Vic2NyaXB0aW9uIrcECh1MaXN0QWRtaW5TdWJzY3JpcHRpb25zUmVxdWVzdBIbCglwYWdlX3NpemUYASABKANSCHBhZ2VTaXplEhYKBmN1cnNvchgCIAEoCVIGY3Vyc29yElIKDHNvdXJjZV90eXBlcxgDIAMoDjIvLmJ1Y2tldGVlci5ub3RpZmljYXRpb24uU3Vic2NyaXB0aW9uLlNvdXJjZVR5cGVSC3NvdXJjZVR5cGVzElgKCG9yZGVyX2J5GAQgASgOMj0uYnVja2V0ZWVyLm5vdGlmaWNhdGlvbi5MaXN0QWRtaW5TdWJzY3JpcHRpb25zUmVxdWVzdC5PcmRlckJ5UgdvcmRlckJ5Em0KD29yZGVyX2RpcmVjdGlvbhgFIAEoDjJELmJ1Y2tldGVlci5ub3RpZmljYXRpb24uTGlzdEFkbWluU3Vic2NyaXB0aW9uc1JlcXVlc3QuT3JkZXJEaXJlY3Rpb25SDm9yZGVyRGlyZWN0aW9uEiUKDnNlYXJjaF9rZXl3b3JkGAYgASgJUg1zZWFyY2hLZXl3b3JkEjYKCGRpc2FibGVkGAcgASgLMhouZ29vZ2xlLnByb3RvYnVmLkJvb2xWYWx1ZVIIZGlzYWJsZWQiQAoHT3JkZXJCeRILCgdERUZBVUxUEAASCAoETkFNRRABEg4KCkNSRUFURURfQVQQAhIOCgpVUERBVEVEX0FUEAMiIwoOT3JkZXJEaXJlY3Rpb24SBwoDQVNDEAASCAoEREVTQxABIqUBCh5MaXN0QWRtaW5TdWJzY3JpcHRpb25zUmVzcG9uc2USSgoNc3Vic2NyaXB0aW9ucxgBIAMoCzIkLmJ1Y2tldGVlci5ub3RpZmljYXRpb24uU3Vic2NyaXB0aW9uUg1zdWJzY3JpcHRpb25zEhYKBmN1cnNvchgCIAEoCVIGY3Vyc29yEh8KC3RvdGFsX2NvdW50GAMgASgDUgp0b3RhbENvdW50Iq8BCiRMaXN0RW5hYmxlZEFkbWluU3Vic2NyaXB0aW9uc1JlcXVlc3QSGwoJcGFnZV9zaXplGAEgASgDUghwYWdlU2l6ZRIWCgZjdXJzb3IYAiABKAlSBmN1cnNvchJSCgxzb3VyY2VfdHlwZXMYAyADKA4yLy5idWNrZXRlZXIubm90aWZpY2F0aW9uLlN1YnNjcmlwdGlvbi5Tb3VyY2VUeXBlUgtzb3VyY2VUeXBlcyKLAQolTGlzdEVuYWJsZWRBZG1pblN1YnNjcmlwdGlvbnNSZXNwb25zZRJKCg1zdWJzY3JpcHRpb25zGAEgAygLMiQuYnVja2V0ZWVyLm5vdGlmaWNhdGlvbi5TdWJzY3JpcHRpb25SDXN1YnNjcmlwdGlvbnMSFgoGY3Vyc29yGAIgASgJUgZjdXJzb3IicgoeQ3JlYXRlQWRtaW5TdWJzY3JpcHRpb25SZXF1ZXN0ElAKB2NvbW1hbmQYASABKAsyNi5idWNrZXRlZXIubm90aWZpY2F0aW9uLkNyZWF0ZUFkbWluU3Vic2NyaXB0aW9uQ29tbWFuZFIHY29tbWFuZCIhCh9DcmVhdGVBZG1pblN1YnNjcmlwdGlvblJlc3BvbnNlIoIBCh5EZWxldGVBZG1pblN1YnNjcmlwdGlvblJlcXVlc3QSDgoCaWQYASABKAlSAmlkElAKB2NvbW1hbmQYAiABKAsyNi5idWNrZXRlZXIubm90aWZpY2F0aW9uLkRlbGV0ZUFkbWluU3Vic2NyaXB0aW9uQ29tbWFuZFIHY29tbWFuZCIhCh9EZWxldGVBZG1pblN1YnNjcmlwdGlvblJlc3BvbnNlIoIBCh5FbmFibGVBZG1pblN1YnNjcmlwdGlvblJlcXVlc3QSDgoCaWQYASABKAlSAmlkElAKB2NvbW1hbmQYAiABKAsyNi5idWNrZXRlZXIubm90aWZpY2F0aW9uLkVuYWJsZUFkbWluU3Vic2NyaXB0aW9uQ29tbWFuZFIHY29tbWFuZCIhCh9FbmFibGVBZG1pblN1YnNjcmlwdGlvblJlc3BvbnNlIoQBCh9EaXNhYmxlQWRtaW5TdWJzY3JpcHRpb25SZXF1ZXN0Eg4KAmlkGAEgASgJUgJpZBJRCgdjb21tYW5kGAIgASgLMjcuYnVja2V0ZWVyLm5vdGlmaWNhdGlvbi5EaXNhYmxlQWRtaW5TdWJzY3JpcHRpb25Db21tYW5kUgdjb21tYW5kIiIKIERpc2FibGVBZG1pblN1YnNjcmlwdGlvblJlc3BvbnNlIqQDCh5VcGRhdGVBZG1pblN1YnNjcmlwdGlvblJlcXVlc3QSDgoCaWQYASABKAlSAmlkEncKGGFkZF9zb3VyY2VfdHlwZXNfY29tbWFuZBgCIAEoCzI+LmJ1Y2tldGVlci5ub3RpZmljYXRpb24uQWRkQWRtaW5TdWJzY3JpcHRpb25Tb3VyY2VUeXBlc0NvbW1hbmRSFWFkZFNvdXJjZVR5cGVzQ29tbWFuZBKAAQobZGVsZXRlX3NvdXJjZV90eXBlc19jb21tYW5kGAMgASgLMkEuYnVja2V0ZWVyLm5vdGlmaWNhdGlvbi5EZWxldGVBZG1pblN1YnNjcmlwdGlvblNvdXJjZVR5cGVzQ29tbWFuZFIYZGVsZXRlU291cmNlVHlwZXNDb21tYW5kEnYKG3JlbmFtZV9zdWJzY3JpcHRpb25fY29tbWFuZBgEIAEoCzI2LmJ1Y2tldGVlci5ub3RpZmljYXRpb24uUmVuYW1lQWRtaW5TdWJzY3JpcHRpb25Db21tYW5kUhlyZW5hbWVTdWJzY3JpcHRpb25Db21tYW5kIiEKH1VwZGF0ZUFkbWluU3Vic2NyaXB0aW9uUmVzcG9uc2UiXQoWR2V0U3Vic2NyaXB0aW9uUmVxdWVzdBIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYASABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlEg4KAmlkGAIgASgJUgJpZCJjChdHZXRTdWJzY3JpcHRpb25SZXNwb25zZRJICgxzdWJzY3JpcHRpb24YASABKAsyJC5idWNrZXRlZXIubm90aWZpY2F0aW9uLlN1YnNjcmlwdGlvblIMc3Vic2NyaXB0aW9uIt0EChhMaXN0U3Vic2NyaXB0aW9uc1JlcXVlc3QSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAEgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZRIbCglwYWdlX3NpemUYAiABKANSCHBhZ2VTaXplEhYKBmN1cnNvchgDIAEoCVIGY3Vyc29yElIKDHNvdXJjZV90eXBlcxgEIAMoDjIvLmJ1Y2tldGVlci5ub3RpZmljYXRpb24uU3Vic2NyaXB0aW9uLlNvdXJjZVR5cGVSC3NvdXJjZVR5cGVzElMKCG9yZGVyX2J5GAUgASgOMjguYnVja2V0ZWVyLm5vdGlmaWNhdGlvbi5MaXN0U3Vic2NyaXB0aW9uc1JlcXVlc3QuT3JkZXJCeVIHb3JkZXJCeRJoCg9vcmRlcl9kaXJlY3Rpb24YBiABKA4yPy5idWNrZXRlZXIubm90aWZpY2F0aW9uLkxpc3RTdWJzY3JpcHRpb25zUmVxdWVzdC5PcmRlckRpcmVjdGlvblIOb3JkZXJEaXJlY3Rpb24SJQoOc2VhcmNoX2tleXdvcmQYByABKAlSDXNlYXJjaEtleXdvcmQSNgoIZGlzYWJsZWQYCCABKAsyGi5nb29nbGUucHJvdG9idWYuQm9vbFZhbHVlUghkaXNhYmxlZCJACgdPcmRlckJ5EgsKB0RFRkFVTFQQABIICgROQU1FEAESDgoKQ1JFQVRFRF9BVBACEg4KClVQREFURURfQVQQAyIjCg5PcmRlckRpcmVjdGlvbhIHCgNBU0MQABIICgRERVNDEAEioAEKGUxpc3RTdWJzY3JpcHRpb25zUmVzcG9uc2USSgoNc3Vic2NyaXB0aW9ucxgBIAMoCzIkLmJ1Y2tldGVlci5ub3RpZmljYXRpb24uU3Vic2NyaXB0aW9uUg1zdWJzY3JpcHRpb25zEhYKBmN1cnNvchgCIAEoCVIGY3Vyc29yEh8KC3RvdGFsX2NvdW50GAMgASgDUgp0b3RhbENvdW50It8BCh9MaXN0RW5hYmxlZFN1YnNjcmlwdGlvbnNSZXF1ZXN0EjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgBIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2USGwoJcGFnZV9zaXplGAIgASgDUghwYWdlU2l6ZRIWCgZjdXJzb3IYAyABKAlSBmN1cnNvchJSCgxzb3VyY2VfdHlwZXMYBCADKA4yLy5idWNrZXRlZXIubm90aWZpY2F0aW9uLlN1YnNjcmlwdGlvbi5Tb3VyY2VUeXBlUgtzb3VyY2VUeXBlcyKGAQogTGlzdEVuYWJsZWRTdWJzY3JpcHRpb25zUmVzcG9uc2USSgoNc3Vic2NyaXB0aW9ucxgBIAMoCzIkLmJ1Y2tldGVlci5ub3RpZmljYXRpb24uU3Vic2NyaXB0aW9uUg1zdWJzY3JpcHRpb25zEhYKBmN1cnNvchgCIAEoCVIGY3Vyc29yIp0BChlDcmVhdGVTdWJzY3JpcHRpb25SZXF1ZXN0EjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgBIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2USSwoHY29tbWFuZBgCIAEoCzIxLmJ1Y2tldGVlci5ub3RpZmljYXRpb24uQ3JlYXRlU3Vic2NyaXB0aW9uQ29tbWFuZFIHY29tbWFuZCIcChpDcmVhdGVTdWJzY3JpcHRpb25SZXNwb25zZSKtAQoZRGVsZXRlU3Vic2NyaXB0aW9uUmVxdWVzdBIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYASABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlEg4KAmlkGAIgASgJUgJpZBJLCgdjb21tYW5kGAMgASgLMjEuYnVja2V0ZWVyLm5vdGlmaWNhdGlvbi5EZWxldGVTdWJzY3JpcHRpb25Db21tYW5kUgdjb21tYW5kIhwKGkRlbGV0ZVN1YnNjcmlwdGlvblJlc3BvbnNlIq0BChlFbmFibGVTdWJzY3JpcHRpb25SZXF1ZXN0EjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgBIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2USDgoCaWQYAiABKAlSAmlkEksKB2NvbW1hbmQYAyABKAsyMS5idWNrZXRlZXIubm90aWZpY2F0aW9uLkVuYWJsZVN1YnNjcmlwdGlvbkNvbW1hbmRSB2NvbW1hbmQiHAoaRW5hYmxlU3Vic2NyaXB0aW9uUmVzcG9uc2UirwEKGkRpc2FibGVTdWJzY3JpcHRpb25SZXF1ZXN0EjMKFWVudmlyb25tZW50X25hbWVzcGFjZRgBIAEoCVIUZW52aXJvbm1lbnROYW1lc3BhY2USDgoCaWQYAiABKAlSAmlkEkwKB2NvbW1hbmQYAyABKAsyMi5idWNrZXRlZXIubm90aWZpY2F0aW9uLkRpc2FibGVTdWJzY3JpcHRpb25Db21tYW5kUgdjb21tYW5kIh0KG0Rpc2FibGVTdWJzY3JpcHRpb25SZXNwb25zZSKsAwoZVXBkYXRlU3Vic2NyaXB0aW9uUmVxdWVzdBIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYASABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlEg4KAmlkGAIgASgJUgJpZBJmChhhZGRfc291cmNlX3R5cGVzX2NvbW1hbmQYAyABKAsyLS5idWNrZXRlZXIubm90aWZpY2F0aW9uLkFkZFNvdXJjZVR5cGVzQ29tbWFuZFIVYWRkU291cmNlVHlwZXNDb21tYW5kEm8KG2RlbGV0ZV9zb3VyY2VfdHlwZXNfY29tbWFuZBgEIAEoCzIwLmJ1Y2tldGVlci5ub3RpZmljYXRpb24uRGVsZXRlU291cmNlVHlwZXNDb21tYW5kUhhkZWxldGVTb3VyY2VUeXBlc0NvbW1hbmQScQobcmVuYW1lX3N1YnNjcmlwdGlvbl9jb21tYW5kGAUgASgLMjEuYnVja2V0ZWVyLm5vdGlmaWNhdGlvbi5SZW5hbWVTdWJzY3JpcHRpb25Db21tYW5kUhlyZW5hbWVTdWJzY3JpcHRpb25Db21tYW5kIhwKGlVwZGF0ZVN1YnNjcmlwdGlvblJlc3BvbnNlMpkRChNOb3RpZmljYXRpb25TZXJ2aWNlEoMBChRHZXRBZG1pblN1YnNjcmlwdGlvbhIzLmJ1Y2tldGVlci5ub3RpZmljYXRpb24uR2V0QWRtaW5TdWJzY3JpcHRpb25SZXF1ZXN0GjQuYnVja2V0ZWVyLm5vdGlmaWNhdGlvbi5HZXRBZG1pblN1YnNjcmlwdGlvblJlc3BvbnNlIgASiQEKFkxpc3RBZG1pblN1YnNjcmlwdGlvbnMSNS5idWNrZXRlZXIubm90aWZpY2F0aW9uLkxpc3RBZG1pblN1YnNjcmlwdGlvbnNSZXF1ZXN0GjYuYnVja2V0ZWVyLm5vdGlmaWNhdGlvbi5MaXN0QWRtaW5TdWJzY3JpcHRpb25zUmVzcG9uc2UiABKeAQodTGlzdEVuYWJsZWRBZG1pblN1YnNjcmlwdGlvbnMSPC5idWNrZXRlZXIubm90aWZpY2F0aW9uLkxpc3RFbmFibGVkQWRtaW5TdWJzY3JpcHRpb25zUmVxdWVzdBo9LmJ1Y2tldGVlci5ub3RpZmljYXRpb24uTGlzdEVuYWJsZWRBZG1pblN1YnNjcmlwdGlvbnNSZXNwb25zZSIAEowBChdDcmVhdGVBZG1pblN1YnNjcmlwdGlvbhI2LmJ1Y2tldGVlci5ub3RpZmljYXRpb24uQ3JlYXRlQWRtaW5TdWJzY3JpcHRpb25SZXF1ZXN0GjcuYnVja2V0ZWVyLm5vdGlmaWNhdGlvbi5DcmVhdGVBZG1pblN1YnNjcmlwdGlvblJlc3BvbnNlIgASjAEKF0RlbGV0ZUFkbWluU3Vic2NyaXB0aW9uEjYuYnVja2V0ZWVyLm5vdGlmaWNhdGlvbi5EZWxldGVBZG1pblN1YnNjcmlwdGlvblJlcXVlc3QaNy5idWNrZXRlZXIubm90aWZpY2F0aW9uLkRlbGV0ZUFkbWluU3Vic2NyaXB0aW9uUmVzcG9uc2UiABKMAQoXRW5hYmxlQWRtaW5TdWJzY3JpcHRpb24SNi5idWNrZXRlZXIubm90aWZpY2F0aW9uLkVuYWJsZUFkbWluU3Vic2NyaXB0aW9uUmVxdWVzdBo3LmJ1Y2tldGVlci5ub3RpZmljYXRpb24uRW5hYmxlQWRtaW5TdWJzY3JpcHRpb25SZXNwb25zZSIAEo8BChhEaXNhYmxlQWRtaW5TdWJzY3JpcHRpb24SNy5idWNrZXRlZXIubm90aWZpY2F0aW9uLkRpc2FibGVBZG1pblN1YnNjcmlwdGlvblJlcXVlc3QaOC5idWNrZXRlZXIubm90aWZpY2F0aW9uLkRpc2FibGVBZG1pblN1YnNjcmlwdGlvblJlc3BvbnNlIgASjAEKF1VwZGF0ZUFkbWluU3Vic2NyaXB0aW9uEjYuYnVja2V0ZWVyLm5vdGlmaWNhdGlvbi5VcGRhdGVBZG1pblN1YnNjcmlwdGlvblJlcXVlc3QaNy5idWNrZXRlZXIubm90aWZpY2F0aW9uLlVwZGF0ZUFkbWluU3Vic2NyaXB0aW9uUmVzcG9uc2UiABJ0Cg9HZXRTdWJzY3JpcHRpb24SLi5idWNrZXRlZXIubm90aWZpY2F0aW9uLkdldFN1YnNjcmlwdGlvblJlcXVlc3QaLy5idWNrZXRlZXIubm90aWZpY2F0aW9uLkdldFN1YnNjcmlwdGlvblJlc3BvbnNlIgASegoRTGlzdFN1YnNjcmlwdGlvbnMSMC5idWNrZXRlZXIubm90aWZpY2F0aW9uLkxpc3RTdWJzY3JpcHRpb25zUmVxdWVzdBoxLmJ1Y2tldGVlci5ub3RpZmljYXRpb24uTGlzdFN1YnNjcmlwdGlvbnNSZXNwb25zZSIAEo8BChhMaXN0RW5hYmxlZFN1YnNjcmlwdGlvbnMSNy5idWNrZXRlZXIubm90aWZpY2F0aW9uLkxpc3RFbmFibGVkU3Vic2NyaXB0aW9uc1JlcXVlc3QaOC5idWNrZXRlZXIubm90aWZpY2F0aW9uLkxpc3RFbmFibGVkU3Vic2NyaXB0aW9uc1Jlc3BvbnNlIgASfQoSQ3JlYXRlU3Vic2NyaXB0aW9uEjEuYnVja2V0ZWVyLm5vdGlmaWNhdGlvbi5DcmVhdGVTdWJzY3JpcHRpb25SZXF1ZXN0GjIuYnVja2V0ZWVyLm5vdGlmaWNhdGlvbi5DcmVhdGVTdWJzY3JpcHRpb25SZXNwb25zZSIAEn0KEkRlbGV0ZVN1YnNjcmlwdGlvbhIxLmJ1Y2tldGVlci5ub3RpZmljYXRpb24uRGVsZXRlU3Vic2NyaXB0aW9uUmVxdWVzdBoyLmJ1Y2tldGVlci5ub3RpZmljYXRpb24uRGVsZXRlU3Vic2NyaXB0aW9uUmVzcG9uc2UiABJ9ChJFbmFibGVTdWJzY3JpcHRpb24SMS5idWNrZXRlZXIubm90aWZpY2F0aW9uLkVuYWJsZVN1YnNjcmlwdGlvblJlcXVlc3QaMi5idWNrZXRlZXIubm90aWZpY2F0aW9uLkVuYWJsZVN1YnNjcmlwdGlvblJlc3BvbnNlIgASgAEKE0Rpc2FibGVTdWJzY3JpcHRpb24SMi5idWNrZXRlZXIubm90aWZpY2F0aW9uLkRpc2FibGVTdWJzY3JpcHRpb25SZXF1ZXN0GjMuYnVja2V0ZWVyLm5vdGlmaWNhdGlvbi5EaXNhYmxlU3Vic2NyaXB0aW9uUmVzcG9uc2UiABJ9ChJVcGRhdGVTdWJzY3JpcHRpb24SMS5idWNrZXRlZXIubm90aWZpY2F0aW9uLlVwZGF0ZVN1YnNjcmlwdGlvblJlcXVlc3QaMi5idWNrZXRlZXIubm90aWZpY2F0aW9uLlVwZGF0ZVN1YnNjcmlwdGlvblJlc3BvbnNlIgBCL1otZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vbm90aWZpY2F0aW9uYgZwcm90bzM=" + pushDescriptor: "CpECChVwcm90by9wdXNoL3B1c2gucHJvdG8SDmJ1Y2tldGVlci5wdXNoIrYBCgRQdXNoEg4KAmlkGAEgASgJUgJpZBIeCgtmY21fYXBpX2tleRgCIAEoCVIJZmNtQXBpS2V5EhIKBHRhZ3MYAyADKAlSBHRhZ3MSGAoHZGVsZXRlZBgEIAEoCFIHZGVsZXRlZBISCgRuYW1lGAUgASgJUgRuYW1lEh0KCmNyZWF0ZWRfYXQYBiABKANSCWNyZWF0ZWRBdBIdCgp1cGRhdGVkX2F0GAcgASgDUgl1cGRhdGVkQXRCJ1olZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vcHVzaGIGcHJvdG8zCs0CChhwcm90by9wdXNoL2NvbW1hbmQucHJvdG8SDmJ1Y2tldGVlci5wdXNoIlsKEUNyZWF0ZVB1c2hDb21tYW5kEh4KC2ZjbV9hcGlfa2V5GAEgASgJUglmY21BcGlLZXkSEgoEdGFncxgCIAMoCVIEdGFncxISCgRuYW1lGAMgASgJUgRuYW1lIigKEkFkZFB1c2hUYWdzQ29tbWFuZBISCgR0YWdzGAEgAygJUgR0YWdzIisKFURlbGV0ZVB1c2hUYWdzQ29tbWFuZBISCgR0YWdzGAEgAygJUgR0YWdzIhMKEURlbGV0ZVB1c2hDb21tYW5kIicKEVJlbmFtZVB1c2hDb21tYW5kEhIKBG5hbWUYASABKAlSBG5hbWVCJ1olZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vcHVzaGIGcHJvdG8zCusNChhwcm90by9wdXNoL3NlcnZpY2UucHJvdG8SDmJ1Y2tldGVlci5wdXNoGhVwcm90by9wdXNoL3B1c2gucHJvdG8aGHByb3RvL3B1c2gvY29tbWFuZC5wcm90byKFAQoRQ3JlYXRlUHVzaFJlcXVlc3QSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAEgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZRI7Cgdjb21tYW5kGAIgASgLMiEuYnVja2V0ZWVyLnB1c2guQ3JlYXRlUHVzaENvbW1hbmRSB2NvbW1hbmQiFAoSQ3JlYXRlUHVzaFJlc3BvbnNlIqwDChFMaXN0UHVzaGVzUmVxdWVzdBIzChVlbnZpcm9ubWVudF9uYW1lc3BhY2UYASABKAlSFGVudmlyb25tZW50TmFtZXNwYWNlEhsKCXBhZ2Vfc2l6ZRgCIAEoA1IIcGFnZVNpemUSFgoGY3Vyc29yGAMgASgJUgZjdXJzb3ISRAoIb3JkZXJfYnkYBCABKA4yKS5idWNrZXRlZXIucHVzaC5MaXN0UHVzaGVzUmVxdWVzdC5PcmRlckJ5UgdvcmRlckJ5ElkKD29yZGVyX2RpcmVjdGlvbhgFIAEoDjIwLmJ1Y2tldGVlci5wdXNoLkxpc3RQdXNoZXNSZXF1ZXN0Lk9yZGVyRGlyZWN0aW9uUg5vcmRlckRpcmVjdGlvbhIlCg5zZWFyY2hfa2V5d29yZBgGIAEoCVINc2VhcmNoS2V5d29yZCJACgdPcmRlckJ5EgsKB0RFRkFVTFQQABIICgROQU1FEAESDgoKQ1JFQVRFRF9BVBACEg4KClVQREFURURfQVQQAyIjCg5PcmRlckRpcmVjdGlvbhIHCgNBU0MQABIICgRERVNDEAEiewoSTGlzdFB1c2hlc1Jlc3BvbnNlEiwKBnB1c2hlcxgBIAMoCzIULmJ1Y2tldGVlci5wdXNoLlB1c2hSBnB1c2hlcxIWCgZjdXJzb3IYAiABKAlSBmN1cnNvchIfCgt0b3RhbF9jb3VudBgDIAEoA1IKdG90YWxDb3VudCKVAQoRRGVsZXRlUHVzaFJlcXVlc3QSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAEgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZRIOCgJpZBgCIAEoCVICaWQSOwoHY29tbWFuZBgDIAEoCzIhLmJ1Y2tldGVlci5wdXNoLkRlbGV0ZVB1c2hDb21tYW5kUgdjb21tYW5kIhQKEkRlbGV0ZVB1c2hSZXNwb25zZSLiAgoRVXBkYXRlUHVzaFJlcXVlc3QSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAEgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZRIOCgJpZBgCIAEoCVICaWQSVQoVYWRkX3B1c2hfdGFnc19jb21tYW5kGAMgASgLMiIuYnVja2V0ZWVyLnB1c2guQWRkUHVzaFRhZ3NDb21tYW5kUhJhZGRQdXNoVGFnc0NvbW1hbmQSXgoYZGVsZXRlX3B1c2hfdGFnc19jb21tYW5kGAQgASgLMiUuYnVja2V0ZWVyLnB1c2guRGVsZXRlUHVzaFRhZ3NDb21tYW5kUhVkZWxldGVQdXNoVGFnc0NvbW1hbmQSUQoTcmVuYW1lX3B1c2hfY29tbWFuZBgFIAEoCzIhLmJ1Y2tldGVlci5wdXNoLlJlbmFtZVB1c2hDb21tYW5kUhFyZW5hbWVQdXNoQ29tbWFuZCIUChJVcGRhdGVQdXNoUmVzcG9uc2Uy6QIKC1B1c2hTZXJ2aWNlElUKCkxpc3RQdXNoZXMSIS5idWNrZXRlZXIucHVzaC5MaXN0UHVzaGVzUmVxdWVzdBoiLmJ1Y2tldGVlci5wdXNoLkxpc3RQdXNoZXNSZXNwb25zZSIAElUKCkNyZWF0ZVB1c2gSIS5idWNrZXRlZXIucHVzaC5DcmVhdGVQdXNoUmVxdWVzdBoiLmJ1Y2tldGVlci5wdXNoLkNyZWF0ZVB1c2hSZXNwb25zZSIAElUKCkRlbGV0ZVB1c2gSIS5idWNrZXRlZXIucHVzaC5EZWxldGVQdXNoUmVxdWVzdBoiLmJ1Y2tldGVlci5wdXNoLkRlbGV0ZVB1c2hSZXNwb25zZSIAElUKClVwZGF0ZVB1c2gSIS5idWNrZXRlZXIucHVzaC5VcGRhdGVQdXNoUmVxdWVzdBoiLmJ1Y2tldGVlci5wdXNoLlVwZGF0ZVB1c2hSZXNwb25zZSIAQidaJWdpdGh1Yi5jb20vY2EtZHAvYnVja2V0ZWVyL3Byb3RvL3B1c2hiBnByb3RvMw==" + userDescriptor: "CrkEChVwcm90by91c2VyL3VzZXIucHJvdG8SDmJ1Y2tldGVlci51c2VyIt4DCgRVc2VyEg4KAmlkGAEgASgJUgJpZBIyCgRkYXRhGAIgAygLMh4uYnVja2V0ZWVyLnVzZXIuVXNlci5EYXRhRW50cnlSBGRhdGESRQoLdGFnZ2VkX2RhdGEYAyADKAsyJC5idWNrZXRlZXIudXNlci5Vc2VyLlRhZ2dlZERhdGFFbnRyeVIKdGFnZ2VkRGF0YRIbCglsYXN0X3NlZW4YBCABKANSCGxhc3RTZWVuEh0KCmNyZWF0ZWRfYXQYBSABKANSCWNyZWF0ZWRBdBp8CgREYXRhEjoKBXZhbHVlGAEgAygLMiQuYnVja2V0ZWVyLnVzZXIuVXNlci5EYXRhLlZhbHVlRW50cnlSBXZhbHVlGjgKClZhbHVlRW50cnkSEAoDa2V5GAEgASgJUgNrZXkSFAoFdmFsdWUYAiABKAlSBXZhbHVlOgI4ARo3CglEYXRhRW50cnkSEAoDa2V5GAEgASgJUgNrZXkSFAoFdmFsdWUYAiABKAlSBXZhbHVlOgI4ARpYCg9UYWdnZWREYXRhRW50cnkSEAoDa2V5GAEgASgJUgNrZXkSLwoFdmFsdWUYAiABKAsyGS5idWNrZXRlZXIudXNlci5Vc2VyLkRhdGFSBXZhbHVlOgI4AUInWiVnaXRodWIuY29tL2NhLWRwL2J1Y2tldGVlci9wcm90by91c2VyYgZwcm90bzMK3wcKGHByb3RvL3VzZXIvc2VydmljZS5wcm90bxIOYnVja2V0ZWVyLnVzZXIaFXByb3RvL3VzZXIvdXNlci5wcm90byJeCg5HZXRVc2VyUmVxdWVzdBIXCgd1c2VyX2lkGAEgASgJUgZ1c2VySWQSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAIgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZSI7Cg9HZXRVc2VyUmVzcG9uc2USKAoEdXNlchgBIAEoCzIULmJ1Y2tldGVlci51c2VyLlVzZXJSBHVzZXIiwgMKEExpc3RVc2Vyc1JlcXVlc3QSMwoVZW52aXJvbm1lbnRfbmFtZXNwYWNlGAEgASgJUhRlbnZpcm9ubWVudE5hbWVzcGFjZRIbCglwYWdlX3NpemUYAiABKANSCHBhZ2VTaXplEhYKBmN1cnNvchgDIAEoCVIGY3Vyc29yEkMKCG9yZGVyX2J5GAQgASgOMiguYnVja2V0ZWVyLnVzZXIuTGlzdFVzZXJzUmVxdWVzdC5PcmRlckJ5UgdvcmRlckJ5ElgKD29yZGVyX2RpcmVjdGlvbhgFIAEoDjIvLmJ1Y2tldGVlci51c2VyLkxpc3RVc2Vyc1JlcXVlc3QuT3JkZXJEaXJlY3Rpb25SDm9yZGVyRGlyZWN0aW9uEiUKDnNlYXJjaF9rZXl3b3JkGAYgASgJUg1zZWFyY2hLZXl3b3JkEhIKBGZyb20YByABKANSBGZyb20SDgoCdG8YCCABKANSAnRvIjUKB09yZGVyQnkSCwoHREVGQVVMVBAAEg4KCkNSRUFURURfQVQQARINCglMQVNUX1NFRU4QAiIjCg5PcmRlckRpcmVjdGlvbhIHCgNBU0MQABIICgRERVNDEAEiVwoRTGlzdFVzZXJzUmVzcG9uc2USKgoFdXNlcnMYASADKAsyFC5idWNrZXRlZXIudXNlci5Vc2VyUgV1c2VycxIWCgZjdXJzb3IYAiABKAlSBmN1cnNvcjKvAQoLVXNlclNlcnZpY2USTAoHR2V0VXNlchIeLmJ1Y2tldGVlci51c2VyLkdldFVzZXJSZXF1ZXN0Gh8uYnVja2V0ZWVyLnVzZXIuR2V0VXNlclJlc3BvbnNlIgASUgoJTGlzdFVzZXJzEiAuYnVja2V0ZWVyLnVzZXIuTGlzdFVzZXJzUmVxdWVzdBohLmJ1Y2tldGVlci51c2VyLkxpc3RVc2Vyc1Jlc3BvbnNlIgBCJ1olZ2l0aHViLmNvbS9jYS1kcC9idWNrZXRlZXIvcHJvdG8vdXNlcmIGcHJvdG8z" + config: + +service: + type: LoadBalancer + loadBalancerIP: + port: 443 + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: {} + +affinity: {} + +nodeSelector: {} + +pdb: + enabled: + maxUnavailable: 50% + +hpa: + enabled: + minReplicas: + maxReplicas: + metrics: + cpu: + targetAverageUtilization: + +tolerations: [] diff --git a/manifests/bucketeer/charts/web/.helmignore b/manifests/bucketeer/charts/web/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/manifests/bucketeer/charts/web/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/manifests/bucketeer/charts/web/Chart.yaml b/manifests/bucketeer/charts/web/Chart.yaml new file mode 100644 index 000000000..bcf374b68 --- /dev/null +++ b/manifests/bucketeer/charts/web/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for bucketeer-web +name: web +version: 1.0.0 diff --git a/manifests/bucketeer/charts/web/templates/NOTES.txt b/manifests/bucketeer/charts/web/templates/NOTES.txt new file mode 100644 index 000000000..d34c3e721 --- /dev/null +++ b/manifests/bucketeer/charts/web/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "web.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "web.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "web.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "web.name" . }},release={{ template "web.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/manifests/bucketeer/charts/web/templates/_helpers.tpl b/manifests/bucketeer/charts/web/templates/_helpers.tpl new file mode 100644 index 000000000..4da631b5b --- /dev/null +++ b/manifests/bucketeer/charts/web/templates/_helpers.tpl @@ -0,0 +1,40 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "web.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "web.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "web.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "cert-secret" -}} +{{- if .Values.tls.secret }} +{{- printf "%s" .Values.tls.secret -}} +{{- else -}} +{{ template "web.fullname" . }}-cert +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/manifests/bucketeer/charts/web/templates/cert-secret.yaml b/manifests/bucketeer/charts/web/templates/cert-secret.yaml new file mode 100644 index 000000000..9a27621fc --- /dev/null +++ b/manifests/bucketeer/charts/web/templates/cert-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.tls.secret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "web.fullname" . }}-cert + namespace: {{ .Values.namespace }} + labels: + app: {{ template "web.name" . }} + chart: {{ template "web.chart" . }} + release: {{ template "web.fullname" . }} + heritage: {{ .Release.Service }} +type: Opaque +data: + server.crt: {{ required "TLS certificate is required" .Values.tls.cert | b64enc | quote }} + server.key: {{ required "TLS key is required" .Values.tls.key | b64enc | quote }} +{{- end}} diff --git a/manifests/bucketeer/charts/web/templates/configmap.yaml b/manifests/bucketeer/charts/web/templates/configmap.yaml new file mode 100644 index 000000000..bd912badf --- /dev/null +++ b/manifests/bucketeer/charts/web/templates/configmap.yaml @@ -0,0 +1,58 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "web.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "web.name" . }} + chart: {{ template "web.chart" . }} + release: {{ template "web.fullname" . }} + heritage: {{ .Release.Service }} +data: + nginx.conf: |- + user nginx; + worker_processes 1; + error_log /dev/stdout debug; + pid /var/run/nginx.pid; + events { + worker_connections 10000; + multi_accept on; + } + http { + include mime.types; + default_type application/octet-stream; + access_log /dev/stdout; + sendfile on; + gzip on; + gzip_types text/css text/javascript application/javascript image/svg+xml; + gzip_min_length 1000; + gzip_proxied any; + keepalive_timeout 65; + proxy_read_timeout 5; + server_tokens off; + server { + listen 80; + return 301 https://$host$request_uri; + } + server { + listen 443 ssl http2; + root /var/www; + charset utf-8; + ssl_certificate /usr/local/certs/tls.crt; + ssl_certificate_key /usr/local/certs/tls.key; + ssl_session_cache shared:SSL:1m; + ssl_session_timeout 5m; + ssl_session_tickets off; + ssl_protocols TLSv1.2; + ssl_prefer_server_ciphers on; + ssl_ciphers 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256'; + add_header Strict-Transport-Security max-age=15768000; + add_header Cache-Control no-store; + location = /alive { + return 200; + } + location / { + try_files $uri /index.html; + } + } + } diff --git a/manifests/bucketeer/charts/web/templates/deployment.yaml b/manifests/bucketeer/charts/web/templates/deployment.yaml new file mode 100644 index 000000000..0f540accb --- /dev/null +++ b/manifests/bucketeer/charts/web/templates/deployment.yaml @@ -0,0 +1,85 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "web.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "web.name" . }} + chart: {{ template "web.chart" . }} + release: {{ template "web.fullname" . }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "web.name" . }} + release: {{ template "web.fullname" . }} + template: + metadata: + labels: + app: {{ template "web.name" . }} + release: {{ template "web.fullname" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + spec: + {{- with .Values.global.image.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "web.fullname" . }} + - name: certs + secret: + secretName: {{ template "cert-secret" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + lifecycle: + preStop: + exec: + command: + - "/bin/sh" + - "-c" + - "sleep 5 && /usr/sbin/nginx -s quit" + volumeMounts: + - name: config + mountPath: /var/nginx + readOnly: true + - name: certs + mountPath: /usr/local/certs + readOnly: true + ports: + - name: https + containerPort: 443 + protocol: TCP + livenessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + periodSeconds: {{ .Values.health.periodSeconds }} + failureThreshold: {{ .Values.health.failureThreshold }} + httpGet: + path: /alive + port: https + scheme: HTTPS + readinessProbe: + initialDelaySeconds: {{ .Values.health.initialDelaySeconds }} + httpGet: + path: /alive + port: https + scheme: HTTPS + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + strategy: + type: RollingUpdate diff --git a/manifests/bucketeer/charts/web/templates/hpa.yaml b/manifests/bucketeer/charts/web/templates/hpa.yaml new file mode 100644 index 000000000..fcbac8cb1 --- /dev/null +++ b/manifests/bucketeer/charts/web/templates/hpa.yaml @@ -0,0 +1,19 @@ +{{ if .Values.hpa.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "web.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "web.fullname" . }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.hpa.metrics.cpu.targetAverageUtilization }} +{{ end }} diff --git a/manifests/bucketeer/charts/web/templates/pdb.yaml b/manifests/bucketeer/charts/web/templates/pdb.yaml new file mode 100644 index 000000000..39b6b99e9 --- /dev/null +++ b/manifests/bucketeer/charts/web/templates/pdb.yaml @@ -0,0 +1,12 @@ +{{ if .Values.pdb.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "web.fullname" . }} + namespace: {{ .Values.namespace }} +spec: + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + selector: + matchLabels: + app: {{ template "web.name" . }} +{{ end }} diff --git a/manifests/bucketeer/charts/web/templates/service.yaml b/manifests/bucketeer/charts/web/templates/service.yaml new file mode 100644 index 000000000..c0316c5cd --- /dev/null +++ b/manifests/bucketeer/charts/web/templates/service.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "web.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ template "web.name" . }} + chart: {{ template "web.chart" . }} + release: {{ template "web.fullname" . }} + heritage: {{ .Release.Service }} +spec: + type: {{ .Values.service.type }} + {{- if .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + ports: + - port: {{ .Values.service.port }} + targetPort: https + protocol: TCP + name: https + selector: + app: {{ template "web.name" . }} + release: {{ template "web.fullname" . }} diff --git a/manifests/bucketeer/charts/web/values.yaml b/manifests/bucketeer/charts/web/values.yaml new file mode 100644 index 000000000..0de3966d7 --- /dev/null +++ b/manifests/bucketeer/charts/web/values.yaml @@ -0,0 +1,58 @@ +image: + repository: ghcr.io/bucketeer-io/bucketeer-web-v2 + pullPolicy: Always + +fullnameOverride: "web" + +namespace: + +nginx: + config: + +tls: + secret: + cert: + key: + +service: + type: ClusterIP + clusterIP: None + port: 443 + +health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + +resources: + {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +affinity: {} + +nodeSelector: {} + +pdb: + enabled: + maxUnavailable: 50% + +hpa: + enabled: + minReplicas: + maxReplicas: + metrics: + cpu: + targetAverageUtilization: + memory: + targetAverageUtilization: + +tolerations: [] diff --git a/manifests/bucketeer/values.yaml b/manifests/bucketeer/values.yaml new file mode 100644 index 000000000..a01c139bb --- /dev/null +++ b/manifests/bucketeer/values.yaml @@ -0,0 +1,2437 @@ +global: + image: + tag: + imagePullSecrets: + project: + tls: + cert: + key: + druid: + enabled: true + kafka: + enabled: true + +account: + image: + repository: ghcr.io/bucketeer-io/bucketeer-account + pullPolicy: IfNotPresent + fullnameOverride: "account" + namespace: default + env: + project: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + topic: bucketeer-domain-events + environmentService: localhost:9001 + logLevel: info + port: 9090 + metricsPort: 9002 + affinity: {} + nodeSelector: {} + pdb: + enabled: + maxUnavailable: 50% + hpa: + enabled: false + namespace: + minReplicas: + maxReplicas: + metrics: {} + tls: + service: + secret: + cert: + key: + serviceToken: + secret: + token: + oauth: + key: + secret: + public: + clientId: + issuer: + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + +account-apikey-cacher: + image: + repository: ghcr.io/bucketeer-io/bucketeer-account + pullPolicy: IfNotPresent + fullnameOverride: "account-apikey-cacher" + namespace: default + env: + project: + accountService: localhost:9001 + environmentService: localhost:9001 + maxMps: "1000" + numWorkers: 2 + flushSize: 100 + flushInterval: 2s + pullerNumGoroutines: 5 + pullerMaxOutstandingMessages: "1000" + pullerMaxOutstandingBytes: "1000000000" + redis: + serverName: bucketeer-redis + addr: + logLevel: info + port: 9090 + metricsPort: 9002 + topic: bucketeer-domain-events + subscription: bucketeer-domain-events-account-apikey-cacher + affinity: {} + nodeSelector: {} + hpa: + enabled: false + namespace: + minReplicas: + maxReplicas: + metrics: {} + tls: + service: + secret: + cert: + key: + serviceToken: + secret: + token: + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + +api-gateway: + image: + repository: ghcr.io/bucketeer-io/bucketeer-gateway + pullPolicy: IfNotPresent + fullnameOverride: "api-gateway" + namespace: default + env: + project: + bigtableInstance: bucketeer-cbt + goalTopic: bucketeer-goal-events + goalBatchTopic: bucketeer-goal-batch-events + evaluationTopic: bucketeer-evaluation-events + userTopic: bucketeer-user-events + metricsTopic: bucketeer-metrics-events + publishNumGoroutines: 200 + publishTimeout: 1m + redis: + serverName: bucketeer-redis + addr: bucketeer-redis.bucketeer.private:6379 + poolMaxIdle: 50 + poolMaxActive: 200 + oldestEventTimestamp: "168h" + furthestEventTimestamp: + logLevel: info + port: 9090 + metricsPort: 9002 + featureService: localhost:9001 + accountService: localhost:9001 + traceSamplingProbability: 0.0001 + affinity: {} + nodeSelector: {} + pdb: + enabled: + maxUnavailable: 20% + hpa: + enabled: false + namespace: + minReplicas: + maxReplicas: + metrics: {} + tls: + bucketeerJP: + secrets: + service: + secret: + cert: + key: + serviceToken: + secret: + token: + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + service: + externalPort: 9000 + ingress: + host: + staticIPName: + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + +auditlog: + image: + repository: ghcr.io/bucketeer-io/bucketeer-auditlog + pullPolicy: IfNotPresent + fullnameOverride: "auditlog" + namespace: default + env: + project: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + accountService: localhost:9001 + logLevel: info + port: 9090 + metricsPort: 9002 + affinity: {} + nodeSelector: {} + pdb: + enabled: + maxUnavailable: 50% + hpa: + enabled: false + namespace: + minReplicas: + maxReplicas: + metrics: {} + tls: + service: + secret: + cert: + key: + oauth: + key: + secret: + public: + clientId: + issuer: + serviceToken: + secret: + token: + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + +auditlog-persister: + image: + repository: ghcr.io/bucketeer-io/bucketeer-auditlog + pullPolicy: IfNotPresent + fullnameOverride: "auditlog-persister" + namespace: default + env: + project: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + topic: bucketeer-domain-events + subscription: bucketeer-domain-events-auditlog-persister + maxMps: "1000" + numWorkers: 2 + flushSize: 100 + flushInterval: 2s + pullerNumGoroutines: 5 + pullerMaxOutstandingMessages: "1000" + pullerMaxOutstandingBytes: "1000000000" + logLevel: info + port: 9090 + metricsPort: 9002 + affinity: {} + nodeSelector: {} + hpa: + enabled: false + namespace: + minReplicas: + maxReplicas: + metrics: {} + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + tls: + service: + secret: + cert: + key: + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + +auth: + image: + repository: ghcr.io/bucketeer-io/bucketeer-auth + pullPolicy: IfNotPresent + fullnameOverride: "auth" + namespace: default + env: + accountService: localhost:9001 + emailFilter: "^[a-zA-Z0-9_.+-]+@([a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]*\\.)+[a-zA-Z]{2,}$" + logLevel: info + port: 9090 + metricsPort: 9002 + affinity: {} + nodeSelector: {} + pdb: + enabled: + maxUnavailable: 50% + hpa: + enabled: false + namespace: + minReplicas: + maxReplicas: + metrics: {} + tls: + service: + secret: + cert: + key: + issuer: + secret: + cert: + oauth: + key: + secret: + private: + clientId: + clientSecret: + redirectUrls: + issuer: + serviceToken: + secret: + token: + + webhook: + kmsResourceName: + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + +auto-ops: + image: + repository: ghcr.io/bucketeer-io/bucketeer-auto-ops + pullPolicy: IfNotPresent + fullnameOverride: "auto-ops" + namespace: default + env: + project: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + logLevel: info + port: 9090 + metricsPort: 9002 + domainEventTopic: bucketeer-domain-events + accountService: localhost:9001 + featureService: localhost:9001 + experimentService: localhost:9001 + authService: localhost:9001 + webhook: + baseURL: + kmsResourceName: + affinity: {} + nodeSelector: {} + pdb: + enabled: + maxUnavailable: 50% + hpa: + enabled: false + minReplicas: + maxReplicas: + metrics: {} + tls: + service: + secret: + cert: + key: + oauth: + key: + secret: + public: + clientId: + issuer: + serviceToken: + secret: + token: + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + +calculator: + replicaCount: 1 + image: + repository: ghcr.io/bucketeer-io/bucketeer-calculator + pullPolicy: IfNotPresent + fullnameOverride: "calculator" + namespace: default + env: + project: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + environmentService: localhost:9001 + experimentService: localhost:9001 + eventCounterService: localhost:9001 + logLevel: info + port: 9090 + metricsPort: 9002 + traceSamplingProbability: 0.001 + affinity: {} + nodeSelector: + hpa: + enabled: false + namespace: + minReplicas: + maxReplicas: + metrics: {} + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + tls: + service: + secret: + cert: + key: + serviceToken: + secret: + token: + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + health: + periodSeconds: 10 + failureThreshold: 10 + # It is necessary to wait for the model compilation to be done. + # The duration is up to resources. + # cf. CPU: 500m, MEM: 3Gi -> 280sec + initialDelaySeconds: + resources: {} + +dex: + replicaCount: 1 + image: + repository: dexidp/dex + tag: v2.27.0 + pullPolicy: IfNotPresent + fullnameOverride: "dex" + namespace: default + affinity: {} + nodeSelector: {} + pdb: + enabled: + maxUnavailable: 50% + service: + name: dex + type: ClusterIP + clusterIP: None + externalPort: 9000 + internalPort: 9000 + dexPort: 5556 + tls: + cert: + key: + envoy: + adminPort: 8001 + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + resources: + limits: + cpu: 50m + memory: 64Mi + requests: + cpu: 50m + memory: 64Mi + health: + initialDelaySeconds: 10 + resources: {} + config: + issuer: + client: + id: bucketeer + name: Bucketeer + secret: + redirectURIs: + google: + issuer: + clientID: + clientSecret: + redirectURIs: + +druid: + druid-cluster: + namespace: default + spec: + imagePullSecrets: + image: ghcr.io/bucketeer-io/druid:0.5.0 + nodeSelector: {} + jvmOptions: |- + -server + -XX:MaxDirectMemorySize=10240g + -Duser.timezone=UTC + -Dfile.encoding=UTF-8 + -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager + -Dorg.jboss.logging.provider=slf4j + -Dnet.spy.log.LoggerImpl=net.spy.memcached.compat.log.SLF4JLogger + -Dlog4j.shutdownCallbackRegistry=org.apache.druid.common.config.Log4jShutdown + -Dlog4j.shutdownHookEnabled=true + -XX:+UseG1GC + -XX:MaxGCPauseMillis=200 + -XX:+ExitOnOutOfMemoryError + log4jConfig: |- + + + + + + + + + + + + + + commonRuntimeProperties: | + druid.indexing.doubleStorage=double + # Extensions + druid.extensions.loadList=["druid-basic-security","druid-datasketches","druid-distinctcount","druid-google-extensions","druid-kafka-indexing-service","druid-stats","mysql-metadata-storage"] + # Service discovery + druid.router.defaultBrokerServiceName=druid/broker + druid.selectors.indexing.serviceName=druid/overlord + druid.selectors.coordinator.serviceName=druid/coordinator + druid.sql.enable=true + # Authenticator + druid.auth.authenticatorChain=["BasicMetadataAuthenticator"] + druid.auth.authenticator.BasicMetadataAuthenticator.type=basic + druid.auth.authenticator.BasicMetadataAuthenticator.initialAdminPassword=password + druid.auth.authenticator.BasicMetadataAuthenticator.initialInternalClientPassword=password + druid.auth.authenticator.BasicMetadataAuthenticator.credentialsValidator.type=metadata + druid.auth.authenticator.BasicMetadataAuthenticator.skipOnFailure=false + druid.auth.authenticator.BasicMetadataAuthenticator.authorizerName=BasicMetadataAuthorizer + # Escalator + druid.escalator.type=basic + druid.escalator.internalClientUsername=username + druid.escalator.internalClientPassword=password + druid.escalator.authorizerName=BasicMetadataAuthorizer + # Authorizer + druid.auth.authorizers=["BasicMetadataAuthorizer"] + druid.auth.authorizer.BasicMetadataAuthorizer.type=basic + # Monitoring + druid.monitoring.monitors=[] + druid.emitter.http.recipientBaseUrl=http://druid-exporter.monitoring.svc.cluster.local:8080/druid + druid.emitter=http + deepStorage: + spec: + properties: |- + druid.storage.type=google + druid.google.bucket=example-druid-deep-storage + type: default + metadataStore: + spec: + properties: |- + druid.metadata.storage.type=mysql + druid.metadata.storage.connector.connectURI=jdbc:mysql://druid-mysql.example.com/druid + druid.metadata.storage.connector.user=user + druid.metadata.storage.connector.password=password + druid.metadata.storage.connector.createTables=true + type: default + zookeeper: + spec: + properties: |- + druid.zk.service.host=druid-zookeeper-client.druid.svc.cluster.local + druid.zk.paths.base=/druid + type: default + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /var/secrets/google/token + volumeMounts: + - name: google-cloud-key + mountPath: /var/secrets/google + volumes: + - name: google-cloud-key + secret: + secretName: druid-gcp-sa-key + nodes: + brokers: + runtimeProperties: | + druid.service=druid/broker + # HTTP server threads + druid.broker.http.numConnections=5 + druid.server.http.numThreads=10 + # Processing threads and buffers + druid.processing.buffer.sizeBytes=1000 + druid.processing.numMergeBuffers=1 + druid.processing.numThreads=1 + # Monitoring + druid.monitoring.monitors=["org.apache.druid.server.metrics.QueryCountStatsMonitor"] + extraJvmOptions: |- + -Xmx1G + -Xms1G + resources: {} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + nodeSpecUniqueStr: druid-cluster-brokers + podDisruptionBudgetSpec: + maxUnavailable: 1 + hpAutoscaler: + minReplicas: 1 + maxReplicas: 1 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 50 + coordinators: + runtimeProperties: | + druid.service=druid/coordinator + # HTTP server threads + druid.coordinator.startDelay=PT30S + druid.coordinator.period=PT30S + # Configure this coordinator to also run as Overlord + druid.coordinator.asOverlord.enable=false + # druid.coordinator.asOverlord.overlordService=druid/overlord + druid.indexer.queue.startDelay=PT30S + druid.indexer.runner.type=local + extraJvmOptions: |- + -Xmx1G + -Xms1G + resources: {} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + nodeSpecUniqueStr: druid-cluster-coordinators + podDisruptionBudgetSpec: + maxUnavailable: 1 + hpAutoscaler: + minReplicas: 2 + maxReplicas: 2 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 60 + overlords: + nodeConfigMountPath: /opt/druid/conf/druid/cluster/master/overlord + runtimeProperties: |- + druid.service=druid/overlord + druid.indexer.queue.startDelay=PT30S + druid.indexer.runner.type=remote + druid.indexer.storage.type=metadata + # Monitoring + druid.monitoring.monitors=["org.apache.druid.server.metrics.TaskCountStatsMonitor"] + extraJvmOptions: |- + -Xmx4G + -Xms4G + resources: {} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + nodeSpecUniqueStr: druid-cluster-overlords + podDisruptionBudgetSpec: + maxUnavailable: 1 + hpAutoscaler: + maxReplicas: 2 + minReplicas: 2 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 60 + - type: Resource + resource: + name: memory + targetAverageUtilization: 60 + historicals: + runtimeProperties: | + druid.service=druid/historical + druid.server.http.numThreads=5 + # Memory tuning and resource limits for groupBy v2 + druid.processing.buffer.sizeBytes=1000 + druid.query.groupBy.maxOnDiskStorage=100000 + druid.processing.numMergeBuffers=1 + druid.processing.numThreads=1 + # Segment storage + druid.segmentCache.locations=[{\"path\":\"/druid/data/segments\",\"maxSize\":10737418240}] + druid.server.maxSize=10737418240 + # Monitoring + druid.monitoring.monitors=["org.apache.druid.server.metrics.HistoricalMetricsMonitor"] + extraJvmOptions: |- + -Xmx1G + -Xms1G + volumeClaimTemplates: {} + volumeMounts: + - mountPath: /druid/data + name: data-volume + resources: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + nodeSpecUniqueStr: druid-cluster-historicals + podDisruptionBudgetSpec: + maxUnavailable: 1 + hpAutoscaler: + maxReplicas: 1 + minReplicas: 1 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 60 + - type: Resource + resource: + name: memory + targetAverageUtilization: 60 + middlemanagers: + runtimeProperties: | + druid.service=druid/middleManager + druid.worker.capacity=3 + druid.server.http.numThreads=10 + druid.processing.buffer.sizebytes=536870912 + # Resources for peons + druid.indexer.runner.javaOpts=-server -Xms1G -Xmx1G -XX:MaxDirectMemorySize=10g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -Djava.io.tmpdir=/druid/data/tmp -XX:+UnlockDiagnosticVMOptions -XX:+PrintSafepointStatistics -XX:PrintSafepointStatisticsCount=1 -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintGCApplicationConcurrentTime -XX:+ExitOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError -XX:+UseG1GC + druid.indexer.task.baseTaskDir=/druid/data/baseTaskDir + # Peon properties + druid.indexer.fork.property.druid.processing.numThreads=1 + druid.indexer.fork.property.druid.processing.numMergeBuffers=2 + druid.indexer.fork.property.druid.processing.buffer.sizeBytes=536870912 + extraJvmOptions: |- + -Xmx4G + -Xms4G + volumeClaimTemplates: + - metadata: + name: data-volume + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + storageClassName: standard + resources: {} + podDisruptionBudgetSpec: + maxUnavailable: 1 + hpAutoscaler: + maxReplicas: 1 + minReplicas: 1 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 60 + - type: Resource + resource: + name: memory + targetAverageUtilization: 60 + routers: + nodeConfigMountPath: /opt/druid/conf/druid/cluster/query/router + runtimeProperties: | + druid.service=druid/router + druid.plaintextPort=8888 + # HTTP proxy + druid.router.http.numConnections=50 + druid.router.http.readTimeout=PT5M + druid.router.http.numMaxThreads=100 + druid.server.http.numThreads=100 + # Service discovery + druid.router.defaultBrokerServiceName=druid/broker + druid.router.coordinatorServiceName=druid/coordinator + # Management proxy to coordinator / overlord: required for unified web console. + druid.router.managementProxy.enabled=true + extraJvmOptions: |- + -Xmx512m + -Xms512m + resources: {} + affinity: {} + + druid-operator: + fullnameOverride: druid-operator + namespace: default + image: + repository: druidio/druid-operator + tag: 0.0.4 + nodeSelector: {} + resources: {} + + zookeeper-operator: + global: + imagePullSecrets: + namespace: default + fullnameOverride: "druid-zookeeper-operator" + annotations: + "helm.sh/resource-policy": keep + image: + repository: ghcr.io/bucketeer-io/pravega/zookeeper-operator + tag: 0.2.9-13 + watchNamespace: "druid" + resources: {} + nodeSelector: {} + + zookeeper: + namespace: default + fullnameOverride: "druid-zookeeper" + replicas: 3 + domainName: + labels: {} + ports: [] + kubernetesClusterDomain: "cluster.local" + probes: + readiness: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 3 + successThreshold: 1 + timeoutSeconds: 10 + liveness: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 3 + timeoutSeconds: 10 + pod: + nodeSelector: {} + affinity: {} + resources: {} + env: + - name: SERVER_JVMFLAGS + value: "-Djute.maxbuffer=10485760" + annotations: + "helm.sh/resource-policy": keep + terminationGracePeriodSeconds: 30 + serviceAccountName: zookeeper + config: {} + storageType: persistence + persistence: + storageClassName: standard + reclaimPolicy: Retain + volumeSize: 5Gi + ephemeral: + emptydirvolumesource: + medium: "" + sizeLimit: 20Gi + hooks: + image: + repository: lachlanevenson/k8s-kubectl + tag: v1.16.10 + backoffLimit: 10 + containers: [] + volumes: [] + +environment: + image: + repository: ghcr.io/bucketeer-io/bucketeer-environment + pullPolicy: IfNotPresent + fullnameOverride: "environment" + namespace: default + env: + project: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + defaultDataset: bucketeer + location: us-central1 + domainEventTopic: bucketeer-domain-events + accountService: localhost:9001 + logLevel: info + port: 9090 + metricsPort: 9002 + affinity: {} + nodeSelector: {} + pdb: + enabled: + maxUnavailable: 50% + hpa: + enabled: false + namespace: + minReplicas: + maxReplicas: + metrics: {} + tls: + service: + secret: + cert: + key: + oauth: + key: + secret: + public: + clientId: + issuer: + serviceToken: + secret: + token: + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + +event-counter: + image: + repository: ghcr.io/bucketeer-io/bucketeer-event-counter + pullPolicy: IfNotPresent + fullnameOverride: "event-counter" + namespace: default + env: + project: + experimentService: localhost:9001 + featureService: localhost:9001 + accountService: localhost:9001 + druidUrl: druid-cluster-brokers.default.svc.cluster.local:8088 + druidDatasourcePrefix: bucketeer + druidUsername: + druidPassword: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + port: 9090 + metricsPort: 9002 + affinity: {} + nodeSelector: {} + pdb: + enabled: + maxUnavailable: 50% + hpa: + enabled: false + namespace: + minReplicas: + maxReplicas: + metrics: {} + tls: + service: + secret: + cert: + key: + serviceToken: + secret: + token: + oauth: + key: + secret: + public: + clientId: + issuer: + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + +event-persister-evaluation-events-kafka: + image: + repository: ghcr.io/bucketeer-io/bucketeer-event-persister + pullPolicy: IfNotPresent + nameOverride: "event-persister" + fullnameOverride: "event-persister-evaluation-events-kafka" + namespace: default + env: + project: + featureService: localhost:9001 + bigtableInstance: bucketeer + location: us-central1 + topic: bucketeer-evaluation-events + subscription: bucketeer-evaluation-events-event-persister-kafka + writer: kafka + kafkaUrl: kafka-kafka-bootstrap.default.svc.cluster.local:9092 + kafkaTopicPrefix: bucketeer + kafkaTopicDataType: evaluation-events + kafkaUsername: + kafkaPassword: + logLevel: info + port: 9090 + metricsPort: 9002 + maxMps: "1000" + numWorkers: 5 + numWriters: 2 + flushSize: 100 + flushInterval: 2s + redis: + serverName: bucketeer-redis + addr: bucketeer-redis.bucketeer.private:6379 + pullerNumGoroutines: 5 + pullerMaxOutstandingMessages: "1000" + pullerMaxOutstandingBytes: "1000000000" + alloyDBRegion: + alloyDBClusterId: + alloyDBInstanceId: + alloyDBUser: + alloyDBPass: + alloyDBName: + affinity: {} + nodeSelector: {} + hpa: + enabled: false + namespace: + minReplicas: + maxReplicas: + metrics: {} + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + tls: + service: + secret: + cert: + key: + serviceToken: + secret: + token: + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +event-persister-goal-events-kafka: + image: + repository: ghcr.io/bucketeer-io/bucketeer-event-persister + pullPolicy: IfNotPresent + nameOverride: "event-persister" + fullnameOverride: "event-persister-goal-events-kafka" + namespace: default + env: + project: + featureService: localhost:9001 + bigtableInstance: bucketeer + location: us-central1 + topic: bucketeer-goal-events + subscription: bucketeer-goal-events-event-persister-kafka + writer: kafka + kafkaUrl: kafka-kafka-bootstrap.default.svc.cluster.local:9092 + kafkaTopicPrefix: bucketeer + kafkaTopicDataType: goal-events + kafkaUsername: + kafkaPassword: + logLevel: info + port: 9090 + metricsPort: 9002 + maxMps: "1000" + numWorkers: 5 + numWriters: 2 + flushSize: 100 + flushInterval: 2s + redis: + serverName: bucketeer-redis + addr: bucketeer-redis.bucketeer.private:6379 + pullerNumGoroutines: 5 + pullerMaxOutstandingMessages: "1000" + pullerMaxOutstandingBytes: "1000000000" + alloyDBRegion: + alloyDBClusterId: + alloyDBInstanceId: + alloyDBUser: + alloyDBPass: + alloyDBName: + affinity: {} + nodeSelector: {} + hpa: + enabled: false + namespace: + minReplicas: + maxReplicas: + metrics: {} + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + tls: + service: + secret: + cert: + key: + serviceToken: + secret: + token: + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +event-persister-user-events-kafka: + image: + repository: ghcr.io/bucketeer-io/bucketeer-event-persister + pullPolicy: IfNotPresent + nameOverride: "event-persister" + fullnameOverride: "event-persister-user-events-kafka" + namespace: default + env: + project: + featureService: localhost:9001 + bigtableInstance: bucketeer-cbt + location: us-central1-a + table: user-events + subscription: bucketeer-user-events-event-persister-kafka + writer: kafka + kafkaUrl: kafka-kafka-bootstrap.default.svc.cluster.local:9092 + kafkaTopicPrefix: bucketeer + kafkaTopicDataType: user-events + kafkaUsername: + kafkaPassword: + logLevel: info + port: 9090 + metricsPort: 9002 + maxMps: "1000" + numWorkers: 5 + numWriters: 2 + flushSize: 100 + flushInterval: 2s + redis: + serverName: bucketeer-redis + addr: bucketeer-redis.bucketeer.private:6379 + pullerNumGoroutines: 5 + pullerMaxOutstandingMessages: "1000" + pullerMaxOutstandingBytes: "1000000000" + alloyDBRegion: + alloyDBClusterId: + alloyDBInstanceId: + alloyDBUser: + alloyDBPass: + alloyDBName: + affinity: {} + nodeSelector: {} + hpa: + enabled: false + namespace: + minReplicas: + maxReplicas: + metrics: {} + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + tls: + service: + secret: + cert: + key: + serviceToken: + secret: + token: + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + +experiment: + image: + repository: ghcr.io/bucketeer-io/bucketeer-experiment + pullPolicy: IfNotPresent + fullnameOverride: "experiment" + namespace: default + env: + project: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + logLevel: info + port: 9090 + metricsPort: 9002 + topic: bucketeer-domain-events + featureService: localhost:9001 + accountService: localhost:9001 + affinity: {} + nodeSelector: {} + pdb: + enabled: + maxUnavailable: 50% + hpa: + enabled: false + namespace: + minReplicas: + maxReplicas: + metrics: {} + tls: + service: + secret: + cert: + key: + oauth: + key: + secret: + public: + clientId: + issuer: + serviceToken: + secret: + token: + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + +feature: + image: + repository: ghcr.io/bucketeer-io/bucketeer-feature + pullPolicy: IfNotPresent + fullnameOverride: "feature" + namespace: default + env: + project: + database: bucketeer + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + bigtableInstance: bucketeer-cbt + accountService: localhost:9001 + experimentService: localhost:9001 + redis: + serverName: bucketeer-redis + poolMaxIdle: 50 + poolMaxActive: 200 + addr: + logLevel: info + port: 9090 + metricsPort: 9002 + bulkSegmentUsersReceivedEventTopic: bucketeer-bulk-segment-users-received-events + domainEventTopic: bucketeer-domain-events + affinity: {} + nodeSelector: {} + pdb: + enabled: + maxUnavailable: 50% + hpa: + enabled: false + namespace: + minReplicas: + maxReplicas: + metrics: {} + tls: + service: + secret: + cert: + key: + serviceToken: + secret: + token: + oauth: + key: + secret: + public: + clientId: + issuer: + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + +feature-recorder: + image: + repository: ghcr.io/bucketeer-io/bucketeer-feature + pullPolicy: IfNotPresent + fullnameOverride: "feature-recorder" + namespace: default + env: + project: + featureService: localhost:9001 + database: bucketeer + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + topic: bucketeer-evaluation-events + subscription: bucketeer-evaluation-events-event-feature-recorder + maxMps: "1000" + numWorkers: "2" + pullerNumGoroutines: "5" + pullerMaxOutstandingMessages: "1000" + pullerMaxOutstandingBytes: "1000000000" + flushInterval: 1m + logLevel: info + port: 9090 + metricsPort: 9002 + affinity: {} + nodeSelector: {} + vpa: + enabled: false + namespace: + updateMode: + resourcePolicy: {} + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + tls: + service: + secret: + cert: + key: + serviceToken: + secret: + token: + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + +feature-segment-persister: + image: + repository: ghcr.io/bucketeer-io/bucketeer-feature + pullPolicy: IfNotPresent + fullnameOverride: "feature-segment-persister" + namespace: default + env: + project: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + bulkSegmentUsersReceivedEventTopic: bucketeer-bulk-segment-users-received-events + bulkSegmentUsersReceivedEventSubscription: bucketeer-bulk-segment-users-received-events-feature-segment-persister + domainEventTopic: bucketeer-domain-events + maxMps: "100" + numWorkers: 2 + flushSize: 2 + flushInterval: 10s + pullerNumGoroutines: 5 + pullerMaxOutstandingMessages: "1000" + pullerMaxOutstandingBytes: "1000000000" + redis: + serverName: non-persistent-redis + poolMaxIdle: 50 + poolMaxActive: 200 + addr: + logLevel: info + port: 9090 + metricsPort: 9002 + affinity: {} + nodeSelector: {} + hpa: + enabled: false + namespace: + minReplicas: + maxReplicas: + metrics: {} + tls: + service: + secret: + cert: + key: + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + +feature-tag-cacher: + image: + repository: ghcr.io/bucketeer-io/bucketeer-feature + pullPolicy: IfNotPresent + fullnameOverride: "feature-tag-cacher" + namespace: default + env: + project: + featureService: localhost:9001 + maxMps: "1000" + numWorkers: 2 + flushSize: 100 + flushInterval: 2s + pullerNumGoroutines: 5 + pullerMaxOutstandingMessages: "1000" + pullerMaxOutstandingBytes: "1000000000" + redis: + serverName: bucketeer-redis + addr: bucketeer-redis.bucketeer.private:6379 + logLevel: info + port: 9090 + metricsPort: 9002 + topic: bucketeer-domain-events + subscription: bucketeer-domain-events-tag-cacher + affinity: {} + nodeSelector: {} + hpa: + enabled: false + namespace: + minReplicas: + maxReplicas: + metrics: {} + tls: + service: + secret: + cert: + key: + serviceToken: + secret: + token: + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + +goal-batch-transformer: + image: + repository: ghcr.io/bucketeer-io/bucketeer-goal-batch + pullPolicy: IfNotPresent + fullnameOverride: "goal-batch-transformer" + namespace: default + env: + port: 9090 + metricsPort: 9002 + project: + featureService: localhost:9001 + userService: localhost:9001 + goalBatchTopic: bucketeer-goal-batch-events + goalBatchSubscription: bucketeer-goal-batch-events-goal-batch-transformer + goalTopic: bucketeer-goal-events + maxMps: 100 + numWorkers: 10 + pullerNumGoroutines: "5" + pullerMaxOutstandingMessages: "1000" + pullerMaxOutstandingBytes: "1000000000" + logLevel: info + affinity: {} + nodeSelector: {} + hpa: + enabled: false + namespace: + minReplicas: + maxReplicas: + metrics: {} + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + tls: + service: + secret: + cert: + key: + serviceToken: + secret: + token: + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + +kafka: + kafka-cluster: + namespace: default + metadata: + name: kafka + spec: + kafka: + version: 2.8.0 + replicas: 1 + resources: {} + jvmOptions: {} + config: + auto.create.topics.enable: "false" + offsets.topic.replication.factor: 1 + transaction.state.log.replication.factor: 1 + transaction.state.log.min.isr: 1 + log.retention.hours: 12 + storage: {} + rack: + # This will be deprecated from 0.17.0. Instead, Use topology.kubernetes.io/zone. + topologyKey: failure-domain.beta.kubernetes.io/zone + affinity: {} + metrics: + # Inspired by config from Kafka 2.0.0 example rules: + # https://github.com/prometheus/jmx_exporter/blob/master/example_configs/kafka-2_0_0.yml + lowercaseOutputName: true + rules: + # Special cases and very specific rules + - pattern: kafka.server<>Value + name: kafka_server_$1_$2 + type: GAUGE + labels: + clientId: "$3" + topic: "$4" + partition: "$5" + - pattern: kafka.server<>Value + name: kafka_server_$1_$2 + type: GAUGE + labels: + clientId: "$3" + broker: "$4:$5" + # Some percent metrics use MeanRate attribute + # Ex) kafka.server<>MeanRate + - pattern: kafka.(\w+)<>MeanRate + name: kafka_$1_$2_$3_percent + type: GAUGE + # Generic gauges for percents + - pattern: kafka.(\w+)<>Value + name: kafka_$1_$2_$3_percent + type: GAUGE + - pattern: kafka.(\w+)<>Value + name: kafka_$1_$2_$3_percent + type: GAUGE + labels: + "$4": "$5" + # Generic per-second counters with 0-2 key/value pairs + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_total + type: COUNTER + labels: + "$4": "$5" + "$6": "$7" + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_total + type: COUNTER + labels: + "$4": "$5" + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_total + type: COUNTER + # Generic gauges with 0-2 key/value pairs + - pattern: kafka.(\w+)<>Value + name: kafka_$1_$2_$3 + type: GAUGE + labels: + "$4": "$5" + "$6": "$7" + - pattern: kafka.(\w+)<>Value + name: kafka_$1_$2_$3 + type: GAUGE + labels: + "$4": "$5" + - pattern: kafka.(\w+)<>Value + name: kafka_$1_$2_$3 + type: GAUGE + # Emulate Prometheus 'Summary' metrics for the exported 'Histogram's. + # Note that these are missing the '_sum' metric! + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_count + type: COUNTER + labels: + "$4": "$5" + "$6": "$7" + - pattern: kafka.(\w+)<>(\d+)thPercentile + name: kafka_$1_$2_$3 + type: GAUGE + labels: + "$4": "$5" + "$6": "$7" + quantile: "0.$8" + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_count + type: COUNTER + labels: + "$4": "$5" + - pattern: kafka.(\w+)<>(\d+)thPercentile + name: kafka_$1_$2_$3 + type: GAUGE + labels: + "$4": "$5" + quantile: "0.$6" + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_count + type: COUNTER + - pattern: kafka.(\w+)<>(\d+)thPercentile + name: kafka_$1_$2_$3 + type: GAUGE + labels: + quantile: "0.$4" + zookeeper: + replicas: + resources: {} + affinity: {} + jvmOptions: {} + storage: {} + metrics: + # Inspired by Zookeeper rules + # https://github.com/prometheus/jmx_exporter/blob/master/example_configs/zookeeper.yaml + lowercaseOutputName: true + rules: + # replicated Zookeeper + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$2" + type: GAUGE + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$3" + type: GAUGE + labels: + replicaId: "$2" + - pattern: "org.apache.ZooKeeperService<>(Packets\\w+)" + name: "zookeeper_$4" + type: COUNTER + labels: + replicaId: "$2" + memberType: "$3" + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$4" + type: GAUGE + labels: + replicaId: "$2" + memberType: "$3" + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$4_$5" + type: GAUGE + labels: + replicaId: "$2" + memberType: "$3" + # standalone Zookeeper + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + type: GAUGE + name: "zookeeper_$2" + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + type: GAUGE + name: "zookeeper_$2" + entityOperator: + affinity: {} + userOperator: + resources: {} + kafkaExporter: + affinity: {} + resources: {} + users: {} + topics: {} + + strimzi-kafka-operator: + namespace: default + watchNamespaces: ["default"] + nodeSelector: {} + resources: {} + +metrics-event-persister: + image: + repository: ghcr.io/bucketeer-io/bucketeer-metrics-event + pullPolicy: IfNotPresent + fullnameOverride: "metrics-event-persister" + namespace: default + env: + project: + topic: bucketeer-metrics-events + subscription: bucketeer-metrics-events-metrics-event-persister + maxMps: "1500" + numWorkers: 2 + flushSize: 100 + flushInterval: 2s + pullerNumGoroutines: 5 + pullerMaxOutstandingMessages: "1000" + pullerMaxOutstandingBytes: "1000000000" + logLevel: info + port: 9090 + metricsPort: 9002 + affinity: {} + nodeSelector: {} + hpa: + enabled: false + namespace: + minReplicas: + maxReplicas: + metrics: {} + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + tls: + service: + secret: + cert: + key: + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + +migration-mysql: + replicaCount: 1 + image: + repository: ghcr.io/bucketeer-io/bucketeer-migration + pullPolicy: IfNotPresent + fullnameOverride: "migration-mysql" + namespace: default + env: + logLevel: info + port: 9090 + metricsPort: 9002 + githubUser: + githubMigrationSourcePath: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + affinity: {} + nodeSelector: {} + hpa: + enabled: false + namespace: + minReplicas: + maxReplicas: + metrics: {} + tls: + service: + secret: + cert: + key: + oauth: + key: + secret: + public: + clientId: + issuer: + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + +notification: + image: + repository: ghcr.io/bucketeer-io/bucketeer-notification + pullPolicy: IfNotPresent + fullnameOverride: "notification" + namespace: default + env: + project: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + logLevel: info + port: 9090 + metricsPort: 9002 + domainEventTopic: + accountService: localhost:9001 + affinity: {} + nodeSelector: {} + pdb: + enabled: + maxUnavailable: 50% + hpa: + enabled: false + minReplicas: + maxReplicas: + metrics: {} + tls: + service: + secret: + cert: + key: + oauth: + key: + secret: + public: + clientId: + issuer: + serviceToken: + secret: + token: + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + +notification-sender: + image: + repository: ghcr.io/bucketeer-io/bucketeer-notification + pullPolicy: IfNotPresent + fullnameOverride: "notification-sender" + namespace: default + env: + project: + domainTopic: bucketeer-domain-events + domainSubscription: bucketeer-domain-events-notification-sender + notificationService: localhost:9001 + environmentService: localhost:9001 + experimentService: localhost:9001 + eventCounterService: localhost:9001 + featureService: localhost:9001 + scheduleFeatureStaleWatcher: "0 0 1 * * MON" + scheduleExperimentRunningWatcher: "0 0 1 * * *" + scheduleMauCountWatcher: "0 0 1 1 * *" + webURL: + maxMps: "1000" + numWorkers: 1 + pullerNumGoroutines: 5 + pullerMaxOutstandingMessages: "1000" + pullerMaxOutstandingBytes: "1000000000" + logLevel: info + port: 9090 + metricsPort: 9002 + affinity: {} + nodeSelector: {} + replicaCount: 1 + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + tls: + service: + secret: + cert: + key: + serviceToken: + secret: + token: + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + +ops-event-batch: + image: + repository: ghcr.io/bucketeer-io/bucketeer-ops-event + pullPolicy: IfNotPresent + fullnameOverride: "ops-event-batch" + namespace: default + env: + project: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + autoOpsService: localhost:9001 + environmentService: localhost:9001 + eventCounterService: localhost:9001 + featureService: localhost:9001 + refreshInterval: 10m + logLevel: info + port: 9090 + metricsPort: 9002 + scheduleCountWatcher: "0,10,20,30,40,50 * * * * *" + scheduleDatetimeWatcher: "0,10,20,30,40,50 * * * * *" + affinity: {} + nodeSelector: {} + replicaCount: 1 + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + tls: + service: + secret: + cert: + key: + serviceToken: + secret: + token: + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + health: + initialDelaySeconds: 10 + periodSeconds: 10 + resources: {} + +push: + image: + repository: ghcr.io/bucketeer-io/bucketeer-push + pullPolicy: IfNotPresent + fullnameOverride: "push" + namespace: default + env: + project: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + logLevel: info + port: 9090 + metricsPort: 9002 + domainEventTopic: + accountService: localhost:9001 + featureService: localhost:9001 + experimentService: localhost:9001 + affinity: {} + nodeSelector: {} + pdb: + enabled: + maxUnavailable: 50% + hpa: + enabled: false + minReplicas: + maxReplicas: + metrics: {} + tls: + service: + secret: + cert: + key: + oauth: + key: + secret: + public: + clientId: + issuer: + serviceToken: + secret: + token: + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + +push-sender: + image: + repository: ghcr.io/bucketeer-io/bucketeer-push + pullPolicy: IfNotPresent + fullnameOverride: "push-sender" + namespace: default + env: + project: + domainTopic: bucketeer-domain-events + domainSubscription: bucketeer-domain-events-push-sender + pushService: localhost:9001 + featureService: localhost:9001 + maxMps: "1000" + numWorkers: 2 + pullerNumGoroutines: 5 + pullerMaxOutstandingMessages: "1000" + pullerMaxOutstandingBytes: "1000000000" + redis: + serverName: bucketeer-redis + addr: bucketeer-redis.bucketeer.private:6379 + poolMaxIdle: 5 + poolMaxActive: 20 + logLevel: info + port: 9090 + metricsPort: 9002 + affinity: {} + nodeSelector: {} + replicaCount: 1 + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + tls: + service: + secret: + cert: + key: + serviceToken: + secret: + token: + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + +user: + image: + repository: ghcr.io/bucketeer-io/bucketeer-user + pullPolicy: IfNotPresent + fullnameOverride: "user" + namespace: + env: + project: + database: bucketeer + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + accountService: localhost:9001 + logLevel: info + port: 9090 + metricsPort: 9002 + affinity: {} + nodeSelector: {} + pdb: + enabled: + maxUnavailable: 50% + hpa: + enabled: false + minReplicas: + maxReplicas: + metrics: {} + tls: + service: + secret: + cert: + key: + oauth: + key: + secret: + public: + clientId: + issuer: + serviceToken: + secret: + token: + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + +user-persister: + image: + repository: ghcr.io/bucketeer-io/bucketeer-user + pullPolicy: IfNotPresent + fullnameOverride: "user-persister" + namespace: default + env: + project: + mysqlUser: + mysqlPass: + mysqlHost: + mysqlPort: 3306 + mysqlDbName: + topic: bucketeer-user-events + subscription: bucketeer-user-events-user-persister + maxMps: "1000" + numWorkers: 2 + flushSize: 100 + flushInterval: 2s + pullerNumGoroutines: 5 + pullerMaxOutstandingMessages: "1000" + pullerMaxOutstandingBytes: "1000000000" + logLevel: info + port: 9090 + metricsPort: 9002 + featureService: localhost:9001 + affinity: {} + nodeSelector: {} + hpa: + enabled: false + namespace: + minReplicas: + maxReplicas: + metrics: {} + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + config: + port: 9000 + adminPort: 8001 + resources: {} + tls: + service: + secret: + cert: + key: + serviceToken: + secret: + token: + service: + type: ClusterIP + clusterIP: None + externalPort: 9000 + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + +web: + image: + repository: ghcr.io/bucketeer-io/bucketeer-web-v2 + pullPolicy: Always + fullnameOverride: "web" + namespace: default + nginx: + config: + tls: + secret: + cert: + key: + service: + type: ClusterIP + clusterIP: None + port: 443 + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + affinity: {} + nodeSelector: {} + pdb: + enabled: + maxUnavailable: 50% + hpa: + enabled: false + namespace: + minReplicas: + maxReplicas: + metrics: {} + tolerations: [] + +web-gateway: + fullnameOverride: "web-gateway" + namespace: default + tls: + bucketeerJP: + secret: + cert: + key: + service: + secret: + cert: + key: + envoy: + image: + repository: envoyproxy/envoy-alpine + tag: v1.21.1 + pullPolicy: IfNotPresent + serviceCluster: bucketeer + adminPort: 8001 + config: + service: + type: LoadBalancer + loadBalancerIP: + port: 443 + health: + initialDelaySeconds: 10 + periodSeconds: 10 + failureThreshold: 10 + resources: {} + affinity: {} + nodeSelector: {} + pdb: + enabled: + maxUnavailable: 50% + hpa: + enabled: false + namespace: + minReplicas: + maxReplicas: + metrics: {} + tolerations: [] diff --git a/pkg/account/api/BUILD.bazel b/pkg/account/api/BUILD.bazel new file mode 100644 index 000000000..4a04c277a --- /dev/null +++ b/pkg/account/api/BUILD.bazel @@ -0,0 +1,69 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "account.go", + "admin_account.go", + "api.go", + "api_key.go", + "error.go", + "validation.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/account/api", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/command:go_default_library", + "//pkg/account/domain:go_default_library", + "//pkg/account/storage/v2:go_default_library", + "//pkg/environment/client:go_default_library", + "//pkg/locale:go_default_library", + "//pkg/log:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/role:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/rpc/status:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/account:go_default_library", + "//proto/environment:go_default_library", + "//proto/event/domain:go_default_library", + "@go_googleapis//google/rpc:errdetails_go_proto", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "account_test.go", + "admin_account_test.go", + "api_key_test.go", + "api_test.go", + "validation_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/account/storage/v2:go_default_library", + "//pkg/environment/client/mock:go_default_library", + "//pkg/locale:go_default_library", + "//pkg/log:go_default_library", + "//pkg/pubsub/publisher/mock:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/storage:go_default_library", + "//pkg/storage/mock:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/storage/v2/mysql/mock:go_default_library", + "//pkg/token:go_default_library", + "//proto/account:go_default_library", + "//proto/auth:go_default_library", + "//proto/environment:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/account/api/account.go b/pkg/account/api/account.go new file mode 100644 index 000000000..649eb35fd --- /dev/null +++ b/pkg/account/api/account.go @@ -0,0 +1,367 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "strconv" + + "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/account/command" + "github.com/bucketeer-io/bucketeer/pkg/account/domain" + v2as "github.com/bucketeer-io/bucketeer/pkg/account/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +func (s *AccountService) CreateAccount( + ctx context.Context, + req *accountproto.CreateAccountRequest, +) (*accountproto.CreateAccountResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_OWNER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateCreateAccountRequest(req); err != nil { + s.logger.Error( + "Failed to create account", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + account, err := domain.NewAccount(req.Command.Email, req.Command.Role) + if err != nil { + s.logger.Error( + "Failed to create a new account", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + // check if an Admin Account that has the same email already exists + _, err = s.getAdminAccount(ctx, account.Id) + if status.Code(err) != codes.NotFound { + if err == nil { + return nil, localizedError(statusAlreadyExists, locale.JaJP) + } + return nil, localizedError(statusInternal, locale.JaJP) + } + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + accountStorage := v2as.NewAccountStorage(tx) + handler := command.NewAccountCommandHandler(editor, account, s.publisher, req.EnvironmentNamespace) + if err := handler.Handle(ctx, req.Command); err != nil { + return err + } + return accountStorage.CreateAccount(ctx, account, req.EnvironmentNamespace) + }) + if err != nil { + if err == v2as.ErrAccountAlreadyExists { + return nil, localizedError(statusAlreadyExists, locale.JaJP) + } + s.logger.Error( + "Failed to create account", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &accountproto.CreateAccountResponse{}, nil +} + +func (s *AccountService) ChangeAccountRole( + ctx context.Context, + req *accountproto.ChangeAccountRoleRequest, +) (*accountproto.ChangeAccountRoleResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_OWNER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateChangeAccountRoleRequest(req); err != nil { + s.logger.Error( + "Failed to change account role", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + if err := s.updateAccountMySQL(ctx, editor, req.Command, req.Id, req.EnvironmentNamespace); err != nil { + if err == v2as.ErrAccountNotFound || err == v2as.ErrAccountUnexpectedAffectedRows { + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to change account role", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &accountproto.ChangeAccountRoleResponse{}, nil +} + +func (s *AccountService) EnableAccount( + ctx context.Context, + req *accountproto.EnableAccountRequest, +) (*accountproto.EnableAccountResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_OWNER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateEnableAccountRequest(req); err != nil { + s.logger.Error( + "Failed to enable account", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + if err := s.updateAccountMySQL(ctx, editor, req.Command, req.Id, req.EnvironmentNamespace); err != nil { + if err == v2as.ErrAccountNotFound || err == v2as.ErrAccountUnexpectedAffectedRows { + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to enable account", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &accountproto.EnableAccountResponse{}, nil +} + +func (s *AccountService) DisableAccount( + ctx context.Context, + req *accountproto.DisableAccountRequest, +) (*accountproto.DisableAccountResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_OWNER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateDisableAccountRequest(req); err != nil { + s.logger.Error( + "Failed to disable account", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + if err := s.updateAccountMySQL(ctx, editor, req.Command, req.Id, req.EnvironmentNamespace); err != nil { + if err == v2as.ErrAccountNotFound || err == v2as.ErrAccountUnexpectedAffectedRows { + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to disable account", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &accountproto.DisableAccountResponse{}, nil +} + +func (s *AccountService) updateAccountMySQL( + ctx context.Context, + editor *eventproto.Editor, + cmd command.Command, + id, environmentNamespace string, +) error { + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return err + } + return s.mysqlClient.RunInTransaction(ctx, tx, func() error { + accountStorage := v2as.NewAccountStorage(tx) + account, err := accountStorage.GetAccount(ctx, id, environmentNamespace) + if err != nil { + return err + } + handler := command.NewAccountCommandHandler(editor, account, s.publisher, environmentNamespace) + if err := handler.Handle(ctx, cmd); err != nil { + return err + } + return accountStorage.UpdateAccount(ctx, account, environmentNamespace) + }) +} + +func (s *AccountService) GetAccount( + ctx context.Context, + req *accountproto.GetAccountRequest, +) (*accountproto.GetAccountResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateGetAccountRequest(req); err != nil { + s.logger.Error( + "Failed to get account", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + account, err := s.getAccount(ctx, req.Email, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + return &accountproto.GetAccountResponse{Account: account.Account}, nil +} + +func (s *AccountService) getAccount(ctx context.Context, email, environmentNamespace string) (*domain.Account, error) { + accountStorage := v2as.NewAccountStorage(s.mysqlClient) + account, err := accountStorage.GetAccount(ctx, email, environmentNamespace) + if err != nil { + if err == v2as.ErrAccountNotFound { + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to get account", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("email", email), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return account, nil +} + +func (s *AccountService) ListAccounts( + ctx context.Context, + req *accountproto.ListAccountsRequest, +) (*accountproto.ListAccountsResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + whereParts := []mysql.WherePart{ + mysql.NewFilter("deleted", "=", false), + mysql.NewFilter("environment_namespace", "=", req.EnvironmentNamespace), + } + if req.Disabled != nil { + whereParts = append(whereParts, mysql.NewFilter("disabled", "=", req.Disabled.Value)) + } + if req.Role != nil { + whereParts = append(whereParts, mysql.NewFilter("role", "=", req.Role.Value)) + } + if req.SearchKeyword != "" { + whereParts = append(whereParts, mysql.NewSearchQuery([]string{"email"}, req.SearchKeyword)) + } + orders, err := s.newAccountListOrders(req.OrderBy, req.OrderDirection) + if err != nil { + s.logger.Error( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, err + } + limit := int(req.PageSize) + cursor := req.Cursor + if cursor == "" { + cursor = "0" + } + offset, err := strconv.Atoi(cursor) + if err != nil { + return nil, localizedError(statusInvalidCursor, locale.JaJP) + } + accountStorage := v2as.NewAccountStorage(s.mysqlClient) + accounts, nextCursor, totalCount, err := accountStorage.ListAccounts( + ctx, + whereParts, + orders, + limit, + offset, + ) + if err != nil { + s.logger.Error( + "Failed to list accounts", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &accountproto.ListAccountsResponse{ + Accounts: accounts, + Cursor: strconv.Itoa(nextCursor), + TotalCount: totalCount, + }, nil +} + +func (s *AccountService) newAccountListOrders( + orderBy accountproto.ListAccountsRequest_OrderBy, + orderDirection accountproto.ListAccountsRequest_OrderDirection, +) ([]*mysql.Order, error) { + var column string + switch orderBy { + case accountproto.ListAccountsRequest_DEFAULT, + accountproto.ListAccountsRequest_EMAIL: + column = "email" + case accountproto.ListAccountsRequest_CREATED_AT: + column = "created_at" + case accountproto.ListAccountsRequest_UPDATED_AT: + column = "updated_at" + default: + return nil, localizedError(statusInvalidOrderBy, locale.JaJP) + } + direction := mysql.OrderDirectionAsc + if orderDirection == accountproto.ListAccountsRequest_DESC { + direction = mysql.OrderDirectionDesc + } + return []*mysql.Order{mysql.NewOrder(column, direction)}, nil +} diff --git a/pkg/account/api/account_test.go b/pkg/account/api/account_test.go new file mode 100644 index 000000000..d51ddbc0a --- /dev/null +++ b/pkg/account/api/account_test.go @@ -0,0 +1,548 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "errors" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + v2as "github.com/bucketeer-io/bucketeer/pkg/account/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" +) + +func TestCreateAccountMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*AccountService) + ctxRole accountproto.Account_Role + req *accountproto.CreateAccountRequest + expectedErr error + }{ + "errNoCommand": { + ctxRole: accountproto.Account_OWNER, + req: &accountproto.CreateAccountRequest{ + Command: nil, + EnvironmentNamespace: "ns0", + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "errInvalidIsEmpty": { + ctxRole: accountproto.Account_OWNER, + req: &accountproto.CreateAccountRequest{ + Command: &accountproto.CreateAccountCommand{Email: ""}, + EnvironmentNamespace: "ns0", + }, + expectedErr: localizedError(statusEmailIsEmpty, locale.JaJP), + }, + "errInvalidEmail": { + ctxRole: accountproto.Account_OWNER, + req: &accountproto.CreateAccountRequest{ + Command: &accountproto.CreateAccountCommand{Email: "bucketeer@"}, + EnvironmentNamespace: "ns0", + }, + expectedErr: localizedError(statusInvalidEmail, locale.JaJP), + }, + "errAlreadyExists_AdminAccount": { + setup: func(s *AccountService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.CreateAccountRequest{ + Command: &accountproto.CreateAccountCommand{Email: "bucketeer_admin@example.com"}, + EnvironmentNamespace: "ns0", + }, + expectedErr: localizedError(statusAlreadyExists, locale.JaJP), + }, + "errAlreadyExists_EnvironmentAccount": { + setup: func(s *AccountService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2as.ErrAccountAlreadyExists) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.CreateAccountRequest{ + Command: &accountproto.CreateAccountCommand{Email: "bucketeer_environment@example.com"}, + EnvironmentNamespace: "ns0", + }, + expectedErr: localizedError(statusAlreadyExists, locale.JaJP), + }, + "errInternal": { + setup: func(s *AccountService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(errors.New("error")) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.CreateAccountRequest{ + Command: &accountproto.CreateAccountCommand{ + Email: "bucketeer@example.com", + Role: accountproto.Account_OWNER, + }, + EnvironmentNamespace: "ns0", + }, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *AccountService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.CreateAccountRequest{ + Command: &accountproto.CreateAccountCommand{ + Email: "bucketeer@example.com", + Role: accountproto.Account_OWNER, + }, + EnvironmentNamespace: "ns0", + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithDefaultToken(t, p.ctxRole) + service := createAccountService(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.CreateAccount(ctx, p.req) + assert.Equal(t, p.expectedErr, err, msg) + }) + } +} + +func TestChangeAccountRoleMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*AccountService) + ctxRole accountproto.Account_Role + req *accountproto.ChangeAccountRoleRequest + expectedErr error + }{ + "errMissingAccountID": { + ctxRole: accountproto.Account_OWNER, + req: &accountproto.ChangeAccountRoleRequest{ + Id: "", + EnvironmentNamespace: "ns0", + }, + expectedErr: localizedError(statusMissingAccountID, locale.JaJP), + }, + "errNoCommand": { + ctxRole: accountproto.Account_OWNER, + req: &accountproto.ChangeAccountRoleRequest{ + Id: "id", + Command: nil, + EnvironmentNamespace: "ns0", + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "errNotFound": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2as.ErrAccountNotFound) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.ChangeAccountRoleRequest{ + Id: "id", + Command: &accountproto.ChangeAccountRoleCommand{ + Role: accountproto.Account_VIEWER, + }, + EnvironmentNamespace: "ns0", + }, + expectedErr: localizedError(statusNotFound, locale.JaJP), + }, + "errInternal": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(errors.New("error")) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.ChangeAccountRoleRequest{ + Id: "bucketeer@example.com", + Command: &accountproto.ChangeAccountRoleCommand{ + Role: accountproto.Account_VIEWER, + }, + EnvironmentNamespace: "ns0", + }, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.ChangeAccountRoleRequest{ + Id: "bucketeer@example.com", + Command: &accountproto.ChangeAccountRoleCommand{ + Role: accountproto.Account_VIEWER, + }, + EnvironmentNamespace: "ns0", + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithDefaultToken(t, p.ctxRole) + service := createAccountService(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.ChangeAccountRole(ctx, p.req) + assert.Equal(t, p.expectedErr, err, msg) + }) + } +} + +func TestEnableAccountMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*AccountService) + ctxRole accountproto.Account_Role + req *accountproto.EnableAccountRequest + expectedErr error + }{ + "errMissingAccountID": { + ctxRole: accountproto.Account_OWNER, + req: &accountproto.EnableAccountRequest{ + Id: "", + EnvironmentNamespace: "ns0", + }, + expectedErr: localizedError(statusMissingAccountID, locale.JaJP), + }, + "errNoCommand": { + ctxRole: accountproto.Account_OWNER, + req: &accountproto.EnableAccountRequest{ + Id: "id", + Command: nil, + EnvironmentNamespace: "ns0", + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "errNotFound": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2as.ErrAccountNotFound) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.EnableAccountRequest{ + Id: "id", + Command: &accountproto.EnableAccountCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: localizedError(statusNotFound, locale.JaJP), + }, + "errInternal": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(errors.New("error")) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.EnableAccountRequest{ + Id: "bucketeer@example.com", + Command: &accountproto.EnableAccountCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.EnableAccountRequest{ + Id: "bucketeer@example.com", + Command: &accountproto.EnableAccountCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithDefaultToken(t, p.ctxRole) + service := createAccountService(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.EnableAccount(ctx, p.req) + assert.Equal(t, p.expectedErr, err, msg) + }) + } +} + +func TestDisableAccountMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*AccountService) + ctxRole accountproto.Account_Role + req *accountproto.DisableAccountRequest + expectedErr error + }{ + "errMissingAccountID": { + ctxRole: accountproto.Account_OWNER, + req: &accountproto.DisableAccountRequest{ + Id: "", + EnvironmentNamespace: "ns0", + }, + expectedErr: localizedError(statusMissingAccountID, locale.JaJP), + }, + "errNoCommand": { + ctxRole: accountproto.Account_OWNER, + req: &accountproto.DisableAccountRequest{ + Id: "id", + Command: nil, + EnvironmentNamespace: "ns0", + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "errNotFound": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2as.ErrAccountNotFound) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.DisableAccountRequest{ + Id: "id", + Command: &accountproto.DisableAccountCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: localizedError(statusNotFound, locale.JaJP), + }, + "errInternal": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(errors.New("error")) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.DisableAccountRequest{ + Id: "bucketeer@example.com", + Command: &accountproto.DisableAccountCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.DisableAccountRequest{ + Id: "bucketeer@example.com", + Command: &accountproto.DisableAccountCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithDefaultToken(t, p.ctxRole) + service := createAccountService(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.DisableAccount(ctx, p.req) + assert.Equal(t, p.expectedErr, err, msg) + }) + } +} + +func TestGetAccountMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*AccountService) + req *accountproto.GetAccountRequest + expectedErr error + }{ + "errMissingAccountID": { + req: &accountproto.GetAccountRequest{ + Email: "", + EnvironmentNamespace: "ns0", + }, + expectedErr: localizedError(statusEmailIsEmpty, locale.JaJP), + }, + "errInvalidEmail": { + req: &accountproto.GetAccountRequest{ + Email: "bucketeer@", + EnvironmentNamespace: "ns0", + }, + expectedErr: localizedError(statusInvalidEmail, locale.JaJP), + }, + "errNotFound": { + setup: func(s *AccountService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + req: &accountproto.GetAccountRequest{ + Email: "service@example.com", + EnvironmentNamespace: "ns0", + }, + expectedErr: localizedError(statusNotFound, locale.JaJP), + }, + "success": { + setup: func(s *AccountService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + req: &accountproto.GetAccountRequest{ + Email: "bucketeer@example.com", + EnvironmentNamespace: "ns0", + }, + expectedErr: nil, + }, + } + ctx := createContextWithDefaultToken(t, accountproto.Account_OWNER) + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + service := createAccountService(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + res, err := service.GetAccount(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + if err == nil { + assert.NotNil(t, res) + } + }) + } +} +func TestListAccountsMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*AccountService) + input *accountproto.ListAccountsRequest + expected *accountproto.ListAccountsResponse + expectedErr error + }{ + "errInvalidCursor": { + setup: nil, + input: &accountproto.ListAccountsRequest{EnvironmentNamespace: "ns0", Cursor: "XXX"}, + expected: nil, + expectedErr: localizedError(statusInvalidCursor, locale.JaJP), + }, + "errInternal": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("test")) + }, + input: &accountproto.ListAccountsRequest{EnvironmentNamespace: "ns0"}, + expected: nil, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *AccountService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + input: &accountproto.ListAccountsRequest{PageSize: 2, Cursor: "", EnvironmentNamespace: "ns0"}, + expected: &accountproto.ListAccountsResponse{Accounts: []*accountproto.Account{}, Cursor: "0"}, + expectedErr: nil, + }, + } + ctx := createContextWithDefaultToken(t, accountproto.Account_OWNER) + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + service := createAccountService(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + actual, err := service.ListAccounts(ctx, p.input) + assert.Equal(t, p.expectedErr, err, msg) + assert.Equal(t, p.expected, actual, msg) + }) + } +} diff --git a/pkg/account/api/admin_account.go b/pkg/account/api/admin_account.go new file mode 100644 index 000000000..e534d15ca --- /dev/null +++ b/pkg/account/api/admin_account.go @@ -0,0 +1,572 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "strconv" + + "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/account/command" + "github.com/bucketeer-io/bucketeer/pkg/account/domain" + v2as "github.com/bucketeer-io/bucketeer/pkg/account/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + environmentproto "github.com/bucketeer-io/bucketeer/proto/environment" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +func (s *AccountService) GetMe( + ctx context.Context, + req *accountproto.GetMeRequest, +) (*accountproto.GetMeResponse, error) { + t, ok := rpc.GetIDToken(ctx) + if !ok { + return nil, localizedError(statusUnauthenticated, locale.JaJP) + } + if !verifyEmailFormat(t.Email) { + s.logger.Error( + "Email inside IDToken has an invalid format", + log.FieldsFromImcomingContext(ctx).AddFields(zap.String("email", t.Email))..., + ) + return nil, localizedError(statusInvalidEmail, locale.JaJP) + } + return s.getMe(ctx, t.Email) +} + +func (s *AccountService) GetMeByEmail( + ctx context.Context, + req *accountproto.GetMeByEmailRequest, +) (*accountproto.GetMeResponse, error) { + _, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + if !verifyEmailFormat(req.Email) { + s.logger.Error( + "Email inside request has an invalid format", + log.FieldsFromImcomingContext(ctx).AddFields(zap.String("email", req.Email))..., + ) + return nil, localizedError(statusInvalidEmail, locale.JaJP) + } + return s.getMe(ctx, req.Email) +} + +func (s *AccountService) getMe(ctx context.Context, email string) (*accountproto.GetMeResponse, error) { + projects, err := s.listProjects(ctx) + if err != nil { + s.logger.Error( + "Failed to get project list", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + if len(projects) == 0 { + s.logger.Error( + "Could not find any projects", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + environments, err := s.listEnvironments(ctx) + if err != nil { + s.logger.Error( + "Failed to get environment list", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + if len(environments) == 0 { + s.logger.Error( + "Could not find any environments", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + // admin account response + adminAccount, err := s.getAdminAccount(ctx, email) + if err != nil && status.Code(err) != codes.NotFound { + return nil, err + } + if adminAccount != nil && !adminAccount.Disabled && !adminAccount.Deleted { + environmentRoles, err := s.makeAdminEnvironmentRoles(projects, environments, accountproto.Account_OWNER) + if err != nil { + return nil, err + } + return &accountproto.GetMeResponse{ + Account: adminAccount.Account, + Email: adminAccount.Email, + IsAdmin: true, + AdminRole: accountproto.Account_OWNER, + Disabled: false, + EnvironmentRoles: environmentRoles, + Deleted: false, + }, nil + } + // environment acccount response + environmentRoles, account, err := s.makeEnvironmentRoles(ctx, email, projects, environments) + if err != nil { + return nil, err + } + return &accountproto.GetMeResponse{ + Account: account, + Email: email, + IsAdmin: false, + AdminRole: accountproto.Account_UNASSIGNED, + Disabled: false, + EnvironmentRoles: environmentRoles, + Deleted: false, + }, nil +} + +func (s *AccountService) makeAdminEnvironmentRoles( + projects []*environmentproto.Project, + environments []*environmentproto.Environment, + adminRole accountproto.Account_Role, +) ([]*accountproto.EnvironmentRole, error) { + projectSet := s.makeProjectSet(projects) + environmentRoles := make([]*accountproto.EnvironmentRole, 0) + for _, e := range environments { + p, ok := projectSet[e.ProjectId] + if !ok || p.Disabled { + continue + } + er := &accountproto.EnvironmentRole{Environment: e, Role: adminRole} + if p.Trial { + er.TrialProject = true + er.TrialStartedAt = p.CreatedAt + } + environmentRoles = append(environmentRoles, er) + } + if len(environmentRoles) == 0 { + return nil, localizedError(statusInternal, locale.JaJP) + } + return environmentRoles, nil +} + +// FIXME: remove *accountproto.Account response after WebUI supports environment feature and removes the dependency +func (s *AccountService) makeEnvironmentRoles( + ctx context.Context, + email string, + projects []*environmentproto.Project, + environments []*environmentproto.Environment, +) ([]*accountproto.EnvironmentRole, *accountproto.Account, error) { + projectSet := s.makeProjectSet(projects) + var lastAccount *accountproto.Account + environmentRoles := make([]*accountproto.EnvironmentRole, 0, len(environments)) + for _, e := range environments { + p, ok := projectSet[e.ProjectId] + if !ok || p.Disabled { + continue + } + account, err := s.getAccount(ctx, email, e.Namespace) + if err != nil && status.Code(err) != codes.NotFound { + return nil, nil, err + } + if account == nil || account.Disabled || account.Deleted { + continue + } + lastAccount = account.Account + er := &accountproto.EnvironmentRole{Environment: e, Role: account.Role} + if p.Trial { + er.TrialProject = true + er.TrialStartedAt = p.CreatedAt + } + environmentRoles = append(environmentRoles, er) + } + if len(environmentRoles) == 0 { + return nil, nil, localizedError(statusNotFound, locale.JaJP) + } + return environmentRoles, lastAccount, nil +} + +func (s *AccountService) CreateAdminAccount( + ctx context.Context, + req *accountproto.CreateAdminAccountRequest, +) (*accountproto.CreateAdminAccountResponse, error) { + editor, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + if err := validateCreateAdminAccountRequest(req); err != nil { + s.logger.Error( + "Failed to create admin account", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, err + } + account, err := domain.NewAccount(req.Command.Email, accountproto.Account_OWNER) + if err != nil { + s.logger.Error( + "Failed to create a new admin account", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + environments, err := s.listEnvironments(ctx) + if err != nil { + s.logger.Error( + "Failed to get environment list", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + // check if an Account that has the same email already exists in any environment + accountStorage := v2as.NewAccountStorage(s.mysqlClient) + for _, env := range environments { + _, err := accountStorage.GetAccount(ctx, account.Id, env.Namespace) + if err == nil { + return nil, localizedError(statusAlreadyExists, locale.JaJP) + } + if err != v2as.ErrAccountNotFound { + return nil, err + } + } + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + adminAccountStorage := v2as.NewAdminAccountStorage(tx) + handler := command.NewAdminAccountCommandHandler(editor, account, s.publisher) + if err := handler.Handle(ctx, req.Command); err != nil { + return err + } + return adminAccountStorage.CreateAdminAccount(ctx, account) + }) + if err != nil { + if err == v2as.ErrAdminAccountAlreadyExists { + return nil, localizedError(statusAlreadyExists, locale.JaJP) + } + s.logger.Error( + "Failed to create admin account", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &accountproto.CreateAdminAccountResponse{}, nil +} + +func (s *AccountService) EnableAdminAccount( + ctx context.Context, + req *accountproto.EnableAdminAccountRequest, +) (*accountproto.EnableAdminAccountResponse, error) { + editor, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + if err := validateEnableAdminAccountRequest(req); err != nil { + s.logger.Error( + "Failed to enable admin account", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, err + } + if err := s.updateAdminAccountMySQL(ctx, editor, req.Id, req.Command); err != nil { + if err == v2as.ErrAdminAccountNotFound || err == v2as.ErrAdminAccountUnexpectedAffectedRows { + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to enable admin account", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &accountproto.EnableAdminAccountResponse{}, nil +} + +func (s *AccountService) DisableAdminAccount( + ctx context.Context, + req *accountproto.DisableAdminAccountRequest, +) (*accountproto.DisableAdminAccountResponse, error) { + editor, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + if err := validateDisableAdminAccountRequest(req); err != nil { + s.logger.Error( + "Failed to disable admin account", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, err + } + if err := s.updateAdminAccountMySQL(ctx, editor, req.Id, req.Command); err != nil { + if err == v2as.ErrAdminAccountNotFound || err == v2as.ErrAdminAccountUnexpectedAffectedRows { + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to disable admin account", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &accountproto.DisableAdminAccountResponse{}, nil +} + +func (s *AccountService) updateAdminAccountMySQL( + ctx context.Context, + editor *eventproto.Editor, + id string, + cmd command.Command, +) error { + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return err + } + return s.mysqlClient.RunInTransaction(ctx, tx, func() error { + adminAccountStorage := v2as.NewAdminAccountStorage(tx) + account, err := adminAccountStorage.GetAdminAccount(ctx, id) + if err != nil { + return err + } + handler := command.NewAdminAccountCommandHandler(editor, account, s.publisher) + if err := handler.Handle(ctx, cmd); err != nil { + return err + } + return adminAccountStorage.UpdateAdminAccount(ctx, account) + }) +} + +func (s *AccountService) ConvertAccount( + ctx context.Context, + req *accountproto.ConvertAccountRequest, +) (*accountproto.ConvertAccountResponse, error) { + editor, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + if err := validateConvertAccountRequest(req); err != nil { + s.logger.Error( + "Failed to get account", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, err + } + account, err := domain.NewAccount(req.Id, accountproto.Account_OWNER) + if err != nil { + s.logger.Error( + "Failed to create a new admin account", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + environments, err := s.listEnvironments(ctx) + if err != nil { + s.logger.Error( + "Failed to get environment list", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + deleteAccountCommand := &accountproto.DeleteAccountCommand{} + createAdminAccountCommand := &accountproto.CreateAdminAccountCommand{Email: req.Id} + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + accountStorage := v2as.NewAccountStorage(tx) + var existedAccountCount int + for _, env := range environments { + existedAccount, err := accountStorage.GetAccount(ctx, account.Id, env.Namespace) + if err != nil { + if err == v2as.ErrAccountNotFound { + continue + } + return err + } + existedAccountCount++ + handler := command.NewAccountCommandHandler( + editor, + existedAccount, + s.publisher, + env.Namespace, + ) + if err := handler.Handle(ctx, deleteAccountCommand); err != nil { + return err + } + if err := accountStorage.UpdateAccount(ctx, existedAccount, env.Namespace); err != nil { + return err + } + } + if existedAccountCount == 0 { + return v2as.ErrAccountNotFound + } + adminAccountStorage := v2as.NewAdminAccountStorage(tx) + handler := command.NewAdminAccountCommandHandler(editor, account, s.publisher) + if err := handler.Handle(ctx, createAdminAccountCommand); err != nil { + return err + } + return adminAccountStorage.CreateAdminAccount(ctx, account) + }) + if err != nil { + if err == v2as.ErrAccountNotFound { + return nil, localizedError(statusNotFound, locale.JaJP) + } + if err == v2as.ErrAdminAccountAlreadyExists { + return nil, localizedError(statusAlreadyExists, locale.JaJP) + } + s.logger.Error( + "Failed to convert account", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, err + } + return &accountproto.ConvertAccountResponse{}, nil +} + +func (s *AccountService) GetAdminAccount( + ctx context.Context, + req *accountproto.GetAdminAccountRequest, +) (*accountproto.GetAdminAccountResponse, error) { + _, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + if err := validateGetAdminAccountRequest(req); err != nil { + s.logger.Error( + "Failed to get admin account", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, err + } + account, err := s.getAdminAccount(ctx, req.Email) + if err != nil { + return nil, err + } + return &accountproto.GetAdminAccountResponse{Account: account.Account}, nil +} + +func (s *AccountService) getAdminAccount(ctx context.Context, email string) (*domain.Account, error) { + adminAccountStorage := v2as.NewAdminAccountStorage(s.mysqlClient) + account, err := adminAccountStorage.GetAdminAccount(ctx, email) + if err != nil { + if err == v2as.ErrAdminAccountNotFound { + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to get admin account", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("email", email), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return account, nil +} + +func (s *AccountService) ListAdminAccounts( + ctx context.Context, + req *accountproto.ListAdminAccountsRequest, +) (*accountproto.ListAdminAccountsResponse, error) { + _, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + whereParts := []mysql.WherePart{mysql.NewFilter("deleted", "=", false)} + if req.Disabled != nil { + whereParts = append(whereParts, mysql.NewFilter("disabled", "=", req.Disabled.Value)) + } + if req.SearchKeyword != "" { + whereParts = append(whereParts, mysql.NewSearchQuery([]string{"email"}, req.SearchKeyword)) + } + orders, err := s.newAdminAccountListOrders(req.OrderBy, req.OrderDirection) + if err != nil { + s.logger.Error( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, err + } + limit := int(req.PageSize) + cursor := req.Cursor + if cursor == "" { + cursor = "0" + } + offset, err := strconv.Atoi(cursor) + if err != nil { + return nil, localizedError(statusInvalidCursor, locale.JaJP) + } + adminAccountStorage := v2as.NewAdminAccountStorage(s.mysqlClient) + accounts, nextCursor, totalCount, err := adminAccountStorage.ListAdminAccounts( + ctx, + whereParts, + orders, + limit, + offset, + ) + if err != nil { + s.logger.Error( + "Failed to list admin accounts", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &accountproto.ListAdminAccountsResponse{ + Accounts: accounts, + Cursor: strconv.Itoa(nextCursor), + TotalCount: totalCount, + }, nil +} + +func (s *AccountService) newAdminAccountListOrders( + orderBy accountproto.ListAdminAccountsRequest_OrderBy, + orderDirection accountproto.ListAdminAccountsRequest_OrderDirection, +) ([]*mysql.Order, error) { + var column string + switch orderBy { + case accountproto.ListAdminAccountsRequest_DEFAULT, + accountproto.ListAdminAccountsRequest_EMAIL: + column = "email" + case accountproto.ListAdminAccountsRequest_CREATED_AT: + column = "created_at" + case accountproto.ListAdminAccountsRequest_UPDATED_AT: + column = "updated_at" + default: + return nil, localizedError(statusInvalidOrderBy, locale.JaJP) + } + direction := mysql.OrderDirectionAsc + if orderDirection == accountproto.ListAdminAccountsRequest_DESC { + direction = mysql.OrderDirectionDesc + } + return []*mysql.Order{mysql.NewOrder(column, direction)}, nil +} diff --git a/pkg/account/api/admin_account_test.go b/pkg/account/api/admin_account_test.go new file mode 100644 index 000000000..267e49054 --- /dev/null +++ b/pkg/account/api/admin_account_test.go @@ -0,0 +1,679 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "errors" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + v2as "github.com/bucketeer-io/bucketeer/pkg/account/storage/v2" + ecmock "github.com/bucketeer-io/bucketeer/pkg/environment/client/mock" + "github.com/bucketeer-io/bucketeer/pkg/locale" + storagemock "github.com/bucketeer-io/bucketeer/pkg/storage/mock" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + environmentproto "github.com/bucketeer-io/bucketeer/proto/environment" +) + +func TestGetMeMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + ctx context.Context + setup func(*AccountService) + input *accountproto.GetMeRequest + expected string + expectedIsAdmin bool + expectedErr error + }{ + "errUnauthenticated": { + ctx: context.Background(), + setup: nil, + input: &accountproto.GetMeRequest{}, + expected: "", + expectedErr: localizedError(statusUnauthenticated, locale.JaJP), + }, + "errInvalidEmail": { + ctx: createContextWithInvalidEmailToken(t, accountproto.Account_OWNER), + setup: nil, + input: &accountproto.GetMeRequest{}, + expected: "", + expectedErr: localizedError(statusInvalidEmail, locale.JaJP), + }, + "errInternal": { + ctx: createContextWithDefaultToken(t, accountproto.Account_OWNER), + setup: func(s *AccountService) { + s.environmentClient.(*ecmock.MockClient).EXPECT().ListProjects( + gomock.Any(), + gomock.Any(), + ).Return( + nil, + localizedError(statusInternal, locale.JaJP), + ) + }, + input: &accountproto.GetMeRequest{}, + expected: "", + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "errInternal_no_projects": { + ctx: createContextWithDefaultToken(t, accountproto.Account_OWNER), + setup: func(s *AccountService) { + s.environmentClient.(*ecmock.MockClient).EXPECT().ListProjects( + gomock.Any(), + gomock.Any(), + ).Return( + &environmentproto.ListProjectsResponse{}, + nil, + ) + }, + input: &accountproto.GetMeRequest{}, + expected: "", + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "errInternal_no_environments": { + ctx: createContextWithDefaultToken(t, accountproto.Account_OWNER), + setup: func(s *AccountService) { + s.environmentClient.(*ecmock.MockClient).EXPECT().ListProjects( + gomock.Any(), + gomock.Any(), + ).Return( + &environmentproto.ListProjectsResponse{ + Projects: getProjects(t), + Cursor: "", + }, + nil, + ) + s.environmentClient.(*ecmock.MockClient).EXPECT().ListEnvironments( + gomock.Any(), + gomock.Any(), + ).Return( + &environmentproto.ListEnvironmentsResponse{}, + nil, + ) + }, + input: &accountproto.GetMeRequest{}, + expected: "", + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "errNotFound": { + ctx: createContextWithDefaultToken(t, accountproto.Account_EDITOR), + setup: func(s *AccountService) { + s.environmentClient.(*ecmock.MockClient).EXPECT().ListProjects( + gomock.Any(), + gomock.Any(), + ).Return( + &environmentproto.ListProjectsResponse{ + Projects: getProjects(t), + Cursor: "", + }, + nil, + ) + s.environmentClient.(*ecmock.MockClient).EXPECT().ListEnvironments( + gomock.Any(), + gomock.Any(), + ).Return( + &environmentproto.ListEnvironmentsResponse{ + Environments: getEnvironments(t), + Cursor: "", + }, + nil, + ) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows).Times(3) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row).Times(3) + }, + input: &accountproto.GetMeRequest{}, + expected: "", + expectedErr: localizedError(statusNotFound, locale.JaJP), + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + service := createAccountService(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + actual, err := service.GetMe(p.ctx, p.input) + assert.Equal(t, p.expectedErr, err, msg) + if actual != nil { + assert.Equal(t, p.expected, actual.Email, msg) + assert.Equal(t, p.expectedIsAdmin, actual.IsAdmin, msg) + } + }) + } +} + +func TestCreateAdminAccountMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*AccountService) + ctxRole accountproto.Account_Role + req *accountproto.CreateAdminAccountRequest + expectedErr error + }{ + "errNoCommand": { + ctxRole: accountproto.Account_OWNER, + req: &accountproto.CreateAdminAccountRequest{ + Command: nil, + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "errEmailIsEmpty": { + ctxRole: accountproto.Account_OWNER, + req: &accountproto.CreateAdminAccountRequest{ + Command: &accountproto.CreateAdminAccountCommand{Email: ""}, + }, + expectedErr: localizedError(statusEmailIsEmpty, locale.JaJP), + }, + "errInvalidEmail": { + ctxRole: accountproto.Account_OWNER, + req: &accountproto.CreateAdminAccountRequest{ + Command: &accountproto.CreateAdminAccountCommand{Email: "bucketeer@"}, + }, + expectedErr: localizedError(statusInvalidEmail, locale.JaJP), + }, + "errInternal": { + setup: func(s *AccountService) { + s.environmentClient.(*ecmock.MockClient).EXPECT().ListEnvironments( + gomock.Any(), gomock.Any(), + ).Return(nil, localizedError(statusInternal, locale.JaJP)) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.CreateAdminAccountRequest{ + Command: &accountproto.CreateAdminAccountCommand{ + Email: "bucketeer@example.com", + }, + }, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "errAlreadyExists_EnvironmentAccount": { + setup: func(s *AccountService) { + s.environmentClient.(*ecmock.MockClient).EXPECT().ListEnvironments( + gomock.Any(), gomock.Any(), + ).Return(&environmentproto.ListEnvironmentsResponse{ + Environments: getEnvironments(t), + Cursor: "", + }, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.CreateAdminAccountRequest{ + Command: &accountproto.CreateAdminAccountCommand{ + Email: "bucketeer_environment@example.com", + }, + }, + expectedErr: localizedError(statusAlreadyExists, locale.JaJP), + }, + "errAlreadyExists_AdminAccount": { + setup: func(s *AccountService) { + s.environmentClient.(*ecmock.MockClient).EXPECT().ListEnvironments( + gomock.Any(), gomock.Any(), + ).Return(&environmentproto.ListEnvironmentsResponse{ + Environments: getEnvironments(t), + Cursor: "", + }, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows).Times(2) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row).Times(2) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2as.ErrAdminAccountAlreadyExists) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.CreateAdminAccountRequest{ + Command: &accountproto.CreateAdminAccountCommand{ + Email: "bucketeer_admin@example.com", + }, + }, + expectedErr: localizedError(statusAlreadyExists, locale.JaJP), + }, + "success": { + setup: func(s *AccountService) { + s.environmentClient.(*ecmock.MockClient).EXPECT().ListEnvironments( + gomock.Any(), gomock.Any(), + ).Return(&environmentproto.ListEnvironmentsResponse{ + Environments: getEnvironments(t), + Cursor: "", + }, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows).Times(2) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row).Times(2) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.CreateAdminAccountRequest{ + Command: &accountproto.CreateAdminAccountCommand{ + Email: "bucketeer@example.com", + }, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithDefaultToken(t, p.ctxRole) + service := createAccountService(t, mockController, storagemock.NewMockClient(mockController)) + if p.setup != nil { + p.setup(service) + } + _, err := service.CreateAdminAccount(ctx, p.req) + assert.Equal(t, p.expectedErr, err, msg) + }) + } +} + +func TestEnableAdminAccountMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*AccountService) + ctxRole accountproto.Account_Role + req *accountproto.EnableAdminAccountRequest + expectedErr error + }{ + "errMissingAccountID": { + ctxRole: accountproto.Account_OWNER, + req: &accountproto.EnableAdminAccountRequest{ + Id: "", + }, + expectedErr: localizedError(statusMissingAccountID, locale.JaJP), + }, + "errNoCommand": { + ctxRole: accountproto.Account_OWNER, + req: &accountproto.EnableAdminAccountRequest{ + Id: "id", + Command: nil, + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "errNotFound": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2as.ErrAdminAccountNotFound) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.EnableAdminAccountRequest{ + Id: "id", + Command: &accountproto.EnableAdminAccountCommand{}, + }, + expectedErr: localizedError(statusNotFound, locale.JaJP), + }, + "errInternal": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(errors.New("error")) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.EnableAdminAccountRequest{ + Id: "bucketeer@example.com", + Command: &accountproto.EnableAdminAccountCommand{}, + }, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.EnableAdminAccountRequest{ + Id: "bucketeer@example.com", + Command: &accountproto.EnableAdminAccountCommand{}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithDefaultToken(t, p.ctxRole) + service := createAccountService(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.EnableAdminAccount(ctx, p.req) + assert.Equal(t, p.expectedErr, err, msg) + }) + } +} + +func TestDisableAdminAccountMySQL(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + t.Parallel() + + patterns := map[string]struct { + setup func(*AccountService) + ctxRole accountproto.Account_Role + req *accountproto.DisableAdminAccountRequest + expectedErr error + }{ + "errMissingAccountID": { + ctxRole: accountproto.Account_OWNER, + req: &accountproto.DisableAdminAccountRequest{ + Id: "", + }, + expectedErr: localizedError(statusMissingAccountID, locale.JaJP), + }, + "errNoCommand": { + ctxRole: accountproto.Account_OWNER, + req: &accountproto.DisableAdminAccountRequest{ + Id: "id", + Command: nil, + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "errNotFound": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2as.ErrAdminAccountNotFound) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.DisableAdminAccountRequest{ + Id: "id", + Command: &accountproto.DisableAdminAccountCommand{}, + }, + expectedErr: localizedError(statusNotFound, locale.JaJP), + }, + "errInternal": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(errors.New("error")) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.DisableAdminAccountRequest{ + Id: "bucketeer@example.com", + Command: &accountproto.DisableAdminAccountCommand{}, + }, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.DisableAdminAccountRequest{ + Id: "bucketeer@example.com", + Command: &accountproto.DisableAdminAccountCommand{}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithDefaultToken(t, p.ctxRole) + service := createAccountService(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.DisableAdminAccount(ctx, p.req) + assert.Equal(t, p.expectedErr, err, msg) + }) + } +} + +func TestConvertAccountMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*AccountService) + ctxRole accountproto.Account_Role + req *accountproto.ConvertAccountRequest + expectedErr error + }{ + "errMissingAccountID": { + ctxRole: accountproto.Account_OWNER, + req: &accountproto.ConvertAccountRequest{ + Id: "", + Command: &accountproto.ConvertAccountCommand{}, + }, + expectedErr: localizedError(statusMissingAccountID, locale.JaJP), + }, + "errNotFound": { + setup: func(s *AccountService) { + s.environmentClient.(*ecmock.MockClient).EXPECT().ListEnvironments( + gomock.Any(), + gomock.Any(), + ).Return(&environmentproto.ListEnvironmentsResponse{ + Environments: getEnvironments(t), + Cursor: "", + }, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2as.ErrAccountNotFound) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.ConvertAccountRequest{ + Id: "b@aa.jp", + Command: &accountproto.ConvertAccountCommand{}, + }, + expectedErr: localizedError(statusNotFound, locale.JaJP), + }, + "success": { + setup: func(s *AccountService) { + s.environmentClient.(*ecmock.MockClient).EXPECT().ListEnvironments( + gomock.Any(), + gomock.Any(), + ).Return(&environmentproto.ListEnvironmentsResponse{ + Environments: getEnvironments(t), + Cursor: "", + }, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.ConvertAccountRequest{ + Id: "bucketeer@example.com", + Command: &accountproto.ConvertAccountCommand{}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithDefaultToken(t, p.ctxRole) + service := createAccountService(t, mockController, storagemock.NewMockClient(mockController)) + if p.setup != nil { + p.setup(service) + } + _, err := service.ConvertAccount(ctx, p.req) + assert.Equal(t, p.expectedErr, err, msg) + }) + } +} + +func TestGetAdminAccountMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*AccountService) + req *accountproto.GetAdminAccountRequest + expectedErr error + }{ + "errMissingAccountID": { + req: &accountproto.GetAdminAccountRequest{ + Email: "", + }, + expectedErr: localizedError(statusEmailIsEmpty, locale.JaJP), + }, + "errInvalidEmail": { + req: &accountproto.GetAdminAccountRequest{ + Email: "bucketeer@", + }, + expectedErr: localizedError(statusInvalidEmail, locale.JaJP), + }, + "errNotFound": { + setup: func(s *AccountService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + req: &accountproto.GetAdminAccountRequest{ + Email: "service@example.com", + }, + expectedErr: localizedError(statusNotFound, locale.JaJP), + }, + "success": { + setup: func(s *AccountService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + req: &accountproto.GetAdminAccountRequest{ + Email: "bucketeer@example.com", + }, + expectedErr: nil, + }, + } + ctx := createContextWithDefaultToken(t, accountproto.Account_OWNER) + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + service := createAccountService(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + res, err := service.GetAdminAccount(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + if res != nil { + assert.NotNil(t, res) + } + }) + } +} + +func TestListAdminAccountsMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*AccountService) + input *accountproto.ListAdminAccountsRequest + expected *accountproto.ListAdminAccountsResponse + expectedErr error + }{ + "errInvalidCursor": { + setup: nil, + input: &accountproto.ListAdminAccountsRequest{Cursor: "xxx"}, + expected: nil, + expectedErr: localizedError(statusInvalidCursor, locale.JaJP), + }, + "errInternal": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("test")) + }, + input: &accountproto.ListAdminAccountsRequest{}, + expected: nil, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *AccountService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + input: &accountproto.ListAdminAccountsRequest{PageSize: 2, Cursor: ""}, + expected: &accountproto.ListAdminAccountsResponse{Accounts: []*accountproto.Account{}, Cursor: "0", TotalCount: 0}, + expectedErr: nil, + }, + } + ctx := createContextWithDefaultToken(t, accountproto.Account_OWNER) + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + service := createAccountService(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + actual, err := service.ListAdminAccounts(ctx, p.input) + assert.Equal(t, p.expectedErr, err, msg) + assert.Equal(t, p.expected, actual, msg) + }) + } +} + +func getProjects(t *testing.T) []*environmentproto.Project { + t.Helper() + return []*environmentproto.Project{ + {Id: "pj0"}, + } +} + +func getEnvironments(t *testing.T) []*environmentproto.Environment { + t.Helper() + return []*environmentproto.Environment{ + {Id: "ns0", Namespace: "ns0", ProjectId: "pj0"}, + {Id: "ns1", Namespace: "ns1", ProjectId: "pj0"}, + } +} diff --git a/pkg/account/api/api.go b/pkg/account/api/api.go new file mode 100644 index 000000000..62bb2cee0 --- /dev/null +++ b/pkg/account/api/api.go @@ -0,0 +1,206 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + environmentclient "github.com/bucketeer-io/bucketeer/pkg/environment/client" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + "github.com/bucketeer-io/bucketeer/pkg/role" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + proto "github.com/bucketeer-io/bucketeer/proto/account" + environmentproto "github.com/bucketeer-io/bucketeer/proto/environment" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +const ( + listRequestPageSize = 500 +) + +type options struct { + logger *zap.Logger +} + +var defaultOptions = options{ + logger: zap.NewNop(), +} + +type Option func(*options) + +func WithLogger(logger *zap.Logger) Option { + return func(opts *options) { + opts.logger = logger + } +} + +type AccountService struct { + environmentClient environmentclient.Client + mysqlClient mysql.Client + publisher publisher.Publisher + opts *options + logger *zap.Logger +} + +func NewAccountService( + e environmentclient.Client, + mysqlClient mysql.Client, + publisher publisher.Publisher, + opts ...Option, +) *AccountService { + options := defaultOptions + for _, opt := range opts { + opt(&options) + } + return &AccountService{ + environmentClient: e, + mysqlClient: mysqlClient, + publisher: publisher, + opts: &options, + logger: options.logger.Named("api"), + } +} + +func (s *AccountService) Register(server *grpc.Server) { + proto.RegisterAccountServiceServer(server, s) +} + +func (s *AccountService) makeProjectSet(projects []*environmentproto.Project) map[string]*environmentproto.Project { + projectSet := make(map[string]*environmentproto.Project) + for _, p := range projects { + projectSet[p.Id] = p + } + return projectSet +} + +func (s *AccountService) listProjects(ctx context.Context) ([]*environmentproto.Project, error) { + projects := []*environmentproto.Project{} + cursor := "" + for { + resp, err := s.environmentClient.ListProjects(ctx, &environmentproto.ListProjectsRequest{ + PageSize: listRequestPageSize, + Cursor: cursor, + }) + if err != nil { + return nil, err + } + projects = append(projects, resp.Projects...) + projectSize := len(resp.Projects) + if projectSize == 0 || projectSize < listRequestPageSize { + return projects, nil + } + cursor = resp.Cursor + } +} + +func (s *AccountService) listEnvironments(ctx context.Context) ([]*environmentproto.Environment, error) { + environments := []*environmentproto.Environment{} + cursor := "" + for { + resp, err := s.environmentClient.ListEnvironments(ctx, &environmentproto.ListEnvironmentsRequest{ + PageSize: listRequestPageSize, + Cursor: cursor, + }) + if err != nil { + return nil, err + } + environments = append(environments, resp.Environments...) + environmentSize := len(resp.Environments) + if environmentSize == 0 || environmentSize < listRequestPageSize { + return environments, nil + } + cursor = resp.Cursor + } +} + +func (s *AccountService) checkAdminRole(ctx context.Context) (*eventproto.Editor, error) { + editor, err := role.CheckAdminRole(ctx) + if err != nil { + switch status.Code(err) { + case codes.Unauthenticated: + s.logger.Info( + "Unauthenticated", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusUnauthenticated, locale.JaJP) + case codes.PermissionDenied: + s.logger.Info( + "Permission denied", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusPermissionDenied, locale.JaJP) + default: + s.logger.Error( + "Failed to check role", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + } + return editor, nil +} + +func (s *AccountService) checkRole( + ctx context.Context, + requiredRole proto.Account_Role, + environmentNamespace string, +) (*eventproto.Editor, error) { + editor, err := role.CheckRole(ctx, requiredRole, func(email string) (*proto.GetAccountResponse, error) { + account, err := s.getAccount(ctx, email, environmentNamespace) + if err != nil { + return nil, err + } + return &proto.GetAccountResponse{Account: account.Account}, nil + }) + if err != nil { + switch status.Code(err) { + case codes.Unauthenticated: + s.logger.Info( + "Unauthenticated", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusUnauthenticated, locale.JaJP) + case codes.PermissionDenied: + s.logger.Info( + "Permission denied", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusPermissionDenied, locale.JaJP) + default: + s.logger.Error( + "Failed to check role", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + } + return editor, nil +} diff --git a/pkg/account/api/api_key.go b/pkg/account/api/api_key.go new file mode 100644 index 000000000..680432ad9 --- /dev/null +++ b/pkg/account/api/api_key.go @@ -0,0 +1,403 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "strconv" + + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/account/command" + "github.com/bucketeer-io/bucketeer/pkg/account/domain" + v2as "github.com/bucketeer-io/bucketeer/pkg/account/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + proto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +func (s *AccountService) CreateAPIKey( + ctx context.Context, + req *proto.CreateAPIKeyRequest, +) (*proto.CreateAPIKeyResponse, error) { + editor, err := s.checkRole(ctx, proto.Account_OWNER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateCreateAPIKeyRequest(req); err != nil { + return nil, err + } + key, err := domain.NewAPIKey(req.Command.Name, req.Command.Role) + if err != nil { + s.logger.Error( + "Failed to create a new api key", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + apiKeyStorage := v2as.NewAPIKeyStorage(tx) + handler := command.NewAPIKeyCommandHandler(editor, key, s.publisher, req.EnvironmentNamespace) + if err := handler.Handle(ctx, req.Command); err != nil { + return err + } + return apiKeyStorage.CreateAPIKey(ctx, key, req.EnvironmentNamespace) + }) + if err != nil { + if err == v2as.ErrAPIKeyAlreadyExists { + return nil, localizedError(statusAlreadyExists, locale.JaJP) + } + s.logger.Error( + "Failed to create api key", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &proto.CreateAPIKeyResponse{ + ApiKey: key.APIKey, + }, nil +} + +func (s *AccountService) ChangeAPIKeyName( + ctx context.Context, + req *proto.ChangeAPIKeyNameRequest, +) (*proto.ChangeAPIKeyNameResponse, error) { + editor, err := s.checkRole(ctx, proto.Account_OWNER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateChangeAPIKeyNameRequest(req); err != nil { + s.logger.Error( + "Failed to change api key name", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + if err := s.updateAPIKeyMySQL(ctx, editor, req.Id, req.EnvironmentNamespace, req.Command); err != nil { + if err == v2as.ErrAPIKeyNotFound || err == v2as.ErrAPIKeyUnexpectedAffectedRows { + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to change api key name", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + zap.String("id", req.Id), + zap.String("name", req.Command.Name), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &proto.ChangeAPIKeyNameResponse{}, nil +} + +func (s *AccountService) EnableAPIKey( + ctx context.Context, + req *proto.EnableAPIKeyRequest, +) (*proto.EnableAPIKeyResponse, error) { + editor, err := s.checkRole(ctx, proto.Account_OWNER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateEnableAPIKeyRequest(req); err != nil { + s.logger.Error( + "Failed to enable api key", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + if err := s.updateAPIKeyMySQL(ctx, editor, req.Id, req.EnvironmentNamespace, req.Command); err != nil { + if err == v2as.ErrAPIKeyNotFound || err == v2as.ErrAPIKeyUnexpectedAffectedRows { + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to enable api key", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + zap.String("id", req.Id), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &proto.EnableAPIKeyResponse{}, nil +} + +func (s *AccountService) DisableAPIKey( + ctx context.Context, + req *proto.DisableAPIKeyRequest, +) (*proto.DisableAPIKeyResponse, error) { + editor, err := s.checkRole(ctx, proto.Account_OWNER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateDisableAPIKeyRequest(req); err != nil { + s.logger.Error( + "Failed to disable api key", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + if err := s.updateAPIKeyMySQL(ctx, editor, req.Id, req.EnvironmentNamespace, req.Command); err != nil { + if err == v2as.ErrAPIKeyNotFound || err == v2as.ErrAPIKeyUnexpectedAffectedRows { + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to disable api key", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + zap.String("id", req.Id), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &proto.DisableAPIKeyResponse{}, nil +} + +func (s *AccountService) updateAPIKeyMySQL( + ctx context.Context, + editor *eventproto.Editor, + id, environmentNamespace string, + cmd command.Command, +) error { + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return err + } + return s.mysqlClient.RunInTransaction(ctx, tx, func() error { + apiKeyStorage := v2as.NewAPIKeyStorage(tx) + apiKey, err := apiKeyStorage.GetAPIKey(ctx, id, environmentNamespace) + if err != nil { + return err + } + handler := command.NewAPIKeyCommandHandler(editor, apiKey, s.publisher, environmentNamespace) + if err := handler.Handle(ctx, cmd); err != nil { + return err + } + return apiKeyStorage.UpdateAPIKey(ctx, apiKey, environmentNamespace) + }) +} + +func (s *AccountService) GetAPIKey(ctx context.Context, req *proto.GetAPIKeyRequest) (*proto.GetAPIKeyResponse, error) { + _, err := s.checkRole(ctx, proto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if req.Id == "" { + return nil, localizedError(statusMissingAPIKeyID, locale.JaJP) + } + apiKeyStorage := v2as.NewAPIKeyStorage(s.mysqlClient) + apiKey, err := apiKeyStorage.GetAPIKey(ctx, req.Id, req.EnvironmentNamespace) + if err != nil { + if err == v2as.ErrAPIKeyNotFound { + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to get api key", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + zap.String("id", req.Id), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &proto.GetAPIKeyResponse{ApiKey: apiKey.APIKey}, nil +} + +func (s *AccountService) ListAPIKeys( + ctx context.Context, + req *proto.ListAPIKeysRequest, +) (*proto.ListAPIKeysResponse, error) { + _, err := s.checkRole(ctx, proto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + whereParts := []mysql.WherePart{ + mysql.NewFilter("environment_namespace", "=", req.EnvironmentNamespace), + } + if req.Disabled != nil { + whereParts = append(whereParts, mysql.NewFilter("disabled", "=", req.Disabled.Value)) + } + if req.SearchKeyword != "" { + whereParts = append(whereParts, mysql.NewSearchQuery([]string{"name"}, req.SearchKeyword)) + } + orders, err := s.newAPIKeyListOrders(req.OrderBy, req.OrderDirection) + if err != nil { + s.logger.Error( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, err + } + limit := int(req.PageSize) + cursor := req.Cursor + if cursor == "" { + cursor = "0" + } + offset, err := strconv.Atoi(cursor) + if err != nil { + return nil, localizedError(statusInvalidCursor, locale.JaJP) + } + apiKeyStorage := v2as.NewAPIKeyStorage(s.mysqlClient) + apiKeys, nextCursor, totalCount, err := apiKeyStorage.ListAPIKeys( + ctx, + whereParts, + orders, + limit, + offset, + ) + if err != nil { + s.logger.Error( + "Failed to list api keys", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &proto.ListAPIKeysResponse{ + ApiKeys: apiKeys, + Cursor: strconv.Itoa(nextCursor), + TotalCount: totalCount, + }, nil +} + +func (s *AccountService) newAPIKeyListOrders( + orderBy proto.ListAPIKeysRequest_OrderBy, + orderDirection proto.ListAPIKeysRequest_OrderDirection, +) ([]*mysql.Order, error) { + var column string + switch orderBy { + case proto.ListAPIKeysRequest_DEFAULT, + proto.ListAPIKeysRequest_NAME: + column = "name" + case proto.ListAPIKeysRequest_CREATED_AT: + column = "created_at" + case proto.ListAPIKeysRequest_UPDATED_AT: + column = "updated_at" + default: + return nil, localizedError(statusInvalidOrderBy, locale.JaJP) + } + direction := mysql.OrderDirectionAsc + if orderDirection == proto.ListAPIKeysRequest_DESC { + direction = mysql.OrderDirectionDesc + } + return []*mysql.Order{mysql.NewOrder(column, direction)}, nil +} + +func (s *AccountService) GetAPIKeyBySearchingAllEnvironments( + ctx context.Context, + req *proto.GetAPIKeyBySearchingAllEnvironmentsRequest, +) (*proto.GetAPIKeyBySearchingAllEnvironmentsResponse, error) { + _, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + if req.Id == "" { + return nil, localizedError(statusMissingAPIKeyID, locale.JaJP) + } + projects, err := s.listProjects(ctx) + if err != nil { + s.logger.Error( + "Failed to get project list", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + if len(projects) == 0 { + s.logger.Error( + "Could not find any projects", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + environments, err := s.listEnvironments(ctx) + if err != nil { + s.logger.Error( + "Failed to get environment list", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + if len(environments) == 0 { + s.logger.Error( + "Could not find any environments", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + projectSet := s.makeProjectSet(projects) + apiKeyStorage := v2as.NewAPIKeyStorage(s.mysqlClient) + for _, e := range environments { + if p, ok := projectSet[e.ProjectId]; !ok || p.Disabled { + continue + } + apiKey, err := apiKeyStorage.GetAPIKey(ctx, req.Id, e.Namespace) + if err != nil { + if err == v2as.ErrAPIKeyNotFound { + continue + } + s.logger.Error( + "Failed to get api key", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", e.Namespace), + zap.String("id", req.Id), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &proto.GetAPIKeyBySearchingAllEnvironmentsResponse{ + EnvironmentApiKey: &proto.EnvironmentAPIKey{EnvironmentNamespace: e.Namespace, ApiKey: apiKey.APIKey}, + }, nil + } + return nil, localizedError(statusNotFound, locale.JaJP) +} diff --git a/pkg/account/api/api_key_test.go b/pkg/account/api/api_key_test.go new file mode 100644 index 000000000..d19ed768b --- /dev/null +++ b/pkg/account/api/api_key_test.go @@ -0,0 +1,465 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "errors" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + v2as "github.com/bucketeer-io/bucketeer/pkg/account/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" +) + +func TestCreateAPIKeyMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*AccountService) + ctxRole accountproto.Account_Role + req *accountproto.CreateAPIKeyRequest + expectedErr error + }{ + "errNoCommand": { + ctxRole: accountproto.Account_OWNER, + req: &accountproto.CreateAPIKeyRequest{ + Command: nil, + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "errMissingAPIKeyName": { + ctxRole: accountproto.Account_OWNER, + req: &accountproto.CreateAPIKeyRequest{ + Command: &accountproto.CreateAPIKeyCommand{Name: ""}, + }, + expectedErr: localizedError(statusMissingAPIKeyName, locale.JaJP), + }, + "errInternal": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(errors.New("error")) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.CreateAPIKeyRequest{ + Command: &accountproto.CreateAPIKeyCommand{ + Name: "name", + Role: accountproto.APIKey_SDK, + }, + }, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.CreateAPIKeyRequest{ + Command: &accountproto.CreateAPIKeyCommand{ + Name: "name", + Role: accountproto.APIKey_SDK, + }, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithDefaultToken(t, p.ctxRole) + service := createAccountService(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.CreateAPIKey(ctx, p.req) + assert.Equal(t, p.expectedErr, err, msg) + }) + } +} + +func TestChangeAPIKeyNameMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*AccountService) + ctxRole accountproto.Account_Role + req *accountproto.ChangeAPIKeyNameRequest + expectedErr error + }{ + "errMissingAPIKeyID": { + ctxRole: accountproto.Account_OWNER, + req: &accountproto.ChangeAPIKeyNameRequest{ + Id: "", + }, + expectedErr: localizedError(statusMissingAPIKeyID, locale.JaJP), + }, + "errNoCommand": { + ctxRole: accountproto.Account_OWNER, + req: &accountproto.ChangeAPIKeyNameRequest{ + Id: "id", + Command: nil, + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "errNotFound": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2as.ErrAPIKeyNotFound) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.ChangeAPIKeyNameRequest{ + Id: "id", + Command: &accountproto.ChangeAPIKeyNameCommand{ + Name: "", + }, + }, + expectedErr: localizedError(statusNotFound, locale.JaJP), + }, + "errInternal": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(errors.New("error")) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.ChangeAPIKeyNameRequest{ + Id: "id", + Command: &accountproto.ChangeAPIKeyNameCommand{ + Name: "new name", + }, + }, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.ChangeAPIKeyNameRequest{ + Id: "id", + Command: &accountproto.ChangeAPIKeyNameCommand{ + Name: "new name", + }, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithDefaultToken(t, p.ctxRole) + service := createAccountService(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.ChangeAPIKeyName(ctx, p.req) + assert.Equal(t, p.expectedErr, err, msg) + }) + } +} + +func TestEnableAPIKeyMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*AccountService) + ctxRole accountproto.Account_Role + req *accountproto.EnableAPIKeyRequest + expectedErr error + }{ + "errMissingAPIKeyID": { + ctxRole: accountproto.Account_OWNER, + req: &accountproto.EnableAPIKeyRequest{ + Id: "", + }, + expectedErr: localizedError(statusMissingAPIKeyID, locale.JaJP), + }, + "errNoCommand": { + ctxRole: accountproto.Account_OWNER, + req: &accountproto.EnableAPIKeyRequest{ + Id: "id", + Command: nil, + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "errNotFound": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2as.ErrAPIKeyNotFound) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.EnableAPIKeyRequest{ + Id: "id", + Command: &accountproto.EnableAPIKeyCommand{}, + }, + expectedErr: localizedError(statusNotFound, locale.JaJP), + }, + "errInternal": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(errors.New("error")) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.EnableAPIKeyRequest{ + Id: "id", + Command: &accountproto.EnableAPIKeyCommand{}, + }, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.EnableAPIKeyRequest{ + Id: "id", + Command: &accountproto.EnableAPIKeyCommand{}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithDefaultToken(t, p.ctxRole) + service := createAccountService(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.EnableAPIKey(ctx, p.req) + assert.Equal(t, p.expectedErr, err, msg) + }) + } +} + +func TestDisableAPIKeyMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*AccountService) + ctxRole accountproto.Account_Role + req *accountproto.DisableAPIKeyRequest + expectedErr error + }{ + "errMissingAPIKeyID": { + ctxRole: accountproto.Account_OWNER, + req: &accountproto.DisableAPIKeyRequest{ + Id: "", + }, + expectedErr: localizedError(statusMissingAPIKeyID, locale.JaJP), + }, + "errNoCommand": { + ctxRole: accountproto.Account_OWNER, + req: &accountproto.DisableAPIKeyRequest{ + Id: "id", + Command: nil, + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "errNotFound": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2as.ErrAPIKeyNotFound) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.DisableAPIKeyRequest{ + Id: "id", + Command: &accountproto.DisableAPIKeyCommand{}, + }, + expectedErr: localizedError(statusNotFound, locale.JaJP), + }, + "errInternal": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(errors.New("error")) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.DisableAPIKeyRequest{ + Id: "id", + Command: &accountproto.DisableAPIKeyCommand{}, + }, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + ctxRole: accountproto.Account_OWNER, + req: &accountproto.DisableAPIKeyRequest{ + Id: "id", + Command: &accountproto.DisableAPIKeyCommand{}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithDefaultToken(t, p.ctxRole) + service := createAccountService(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.DisableAPIKey(ctx, p.req) + assert.Equal(t, p.expectedErr, err, msg) + }) + } +} + +func TestGetAPIKeyMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*AccountService) + req *accountproto.GetAPIKeyRequest + expected error + }{ + "errMissingAPIKeyID": { + req: &accountproto.GetAPIKeyRequest{Id: ""}, + expected: localizedError(statusMissingAPIKeyID, locale.JaJP), + }, + "errNotFound": { + setup: func(s *AccountService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + req: &accountproto.GetAPIKeyRequest{Id: "id"}, + expected: localizedError(statusNotFound, locale.JaJP), + }, + "success": { + setup: func(s *AccountService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + req: &accountproto.GetAPIKeyRequest{Id: "id"}, + expected: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithDefaultToken(t, accountproto.Account_OWNER) + service := createAccountService(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + res, err := service.GetAPIKey(ctx, p.req) + assert.Equal(t, p.expected, err) + if err == nil { + assert.NotNil(t, res) + } + }) + } +} + +func TestListAPIKeysMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*AccountService) + input *accountproto.ListAPIKeysRequest + expected *accountproto.ListAPIKeysResponse + expectedErr error + }{ + "errInvalidCursor": { + input: &accountproto.ListAPIKeysRequest{Cursor: "XXX"}, + expected: nil, + expectedErr: localizedError(statusInvalidCursor, locale.JaJP), + }, + "errInternal": { + setup: func(s *AccountService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("test")) + }, + input: &accountproto.ListAPIKeysRequest{}, + expected: nil, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *AccountService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + input: &accountproto.ListAPIKeysRequest{PageSize: 2, Cursor: ""}, + expected: &accountproto.ListAPIKeysResponse{ApiKeys: []*accountproto.APIKey{}, Cursor: "0"}, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithDefaultToken(t, accountproto.Account_OWNER) + service := createAccountService(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + actual, err := service.ListAPIKeys(ctx, p.input) + assert.Equal(t, p.expectedErr, err, msg) + assert.Equal(t, p.expected, actual, msg) + }) + } +} diff --git a/pkg/account/api/api_test.go b/pkg/account/api/api_test.go new file mode 100644 index 000000000..ebf271232 --- /dev/null +++ b/pkg/account/api/api_test.go @@ -0,0 +1,127 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "encoding/base64" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + ecmock "github.com/bucketeer-io/bucketeer/pkg/environment/client/mock" + "github.com/bucketeer-io/bucketeer/pkg/log" + publishermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher/mock" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/storage" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + "github.com/bucketeer-io/bucketeer/pkg/token" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + authproto "github.com/bucketeer-io/bucketeer/proto/auth" +) + +func TestWithLogger(t *testing.T) { + t.Parallel() + logger, err := log.NewLogger() + require.NoError(t, err) + f := WithLogger(logger) + opt := &options{} + f(opt) + assert.Equal(t, logger, opt.logger) +} + +func TestNewAccountService(t *testing.T) { + t.Parallel() + g := NewAccountService(nil, nil, nil) + assert.IsType(t, &AccountService{}, g) +} + +func createAccountService(t *testing.T, mockController *gomock.Controller, db storage.Client) *AccountService { + t.Helper() + logger := zap.NewNop() + return &AccountService{ + environmentClient: ecmock.NewMockClient(mockController), + mysqlClient: mysqlmock.NewMockClient(mockController), + publisher: publishermock.NewMockPublisher(mockController), + logger: logger.Named("api"), + } +} + +func createContextWithDefaultToken(t *testing.T, role accountproto.Account_Role) context.Context { + t.Helper() + return createContextWithEmailToken(t, "bucketeer@example.com", role) +} + +func createContextWithEmailToken(t *testing.T, email string, role accountproto.Account_Role) context.Context { + t.Helper() + sub := &authproto.IDTokenSubject{ + UserId: email, + ConnId: "test-connector-id", + } + data, err := proto.Marshal(sub) + require.NoError(t, err) + token := &token.IDToken{ + Issuer: "issuer", + Subject: base64.RawURLEncoding.EncodeToString([]byte(data)), + Audience: "audience", + Expiry: time.Now().AddDate(100, 0, 0), + IssuedAt: time.Now(), + Email: email, + AdminRole: role, + } + ctx := context.TODO() + return context.WithValue(ctx, rpc.Key, token) +} + +func createContextWithInvalidSubjectToken(t *testing.T, role accountproto.Account_Role) context.Context { + t.Helper() + token := &token.IDToken{ + Issuer: "issuer", + Subject: base64.RawURLEncoding.EncodeToString([]byte("bucketeer@example.com")), + Audience: "audience", + Expiry: time.Now().AddDate(100, 0, 0), + IssuedAt: time.Now(), + Email: "bucketeer@example.com", + AdminRole: role, + } + ctx := context.TODO() + return context.WithValue(ctx, rpc.Key, token) +} + +func createContextWithInvalidEmailToken(t *testing.T, role accountproto.Account_Role) context.Context { + t.Helper() + sub := &authproto.IDTokenSubject{ + UserId: "bucketeer@example.com", + ConnId: "test-connector-id", + } + data, err := proto.Marshal(sub) + require.NoError(t, err) + token := &token.IDToken{ + Issuer: "issuer", + Subject: base64.RawURLEncoding.EncodeToString([]byte(data)), + Audience: "audience", + Expiry: time.Now().AddDate(100, 0, 0), + IssuedAt: time.Now(), + Email: "bucketeer@", + AdminRole: role, + } + ctx := context.TODO() + return context.WithValue(ctx, rpc.Key, token) +} diff --git a/pkg/account/api/error.go b/pkg/account/api/error.go new file mode 100644 index 000000000..f7f4cbf41 --- /dev/null +++ b/pkg/account/api/error.go @@ -0,0 +1,166 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + gstatus "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/rpc/status" +) + +var ( + statusInternal = gstatus.New(codes.Internal, "account: internal") + statusInvalidCursor = gstatus.New(codes.InvalidArgument, "account: cursor is invalid") + statusNoCommand = gstatus.New(codes.InvalidArgument, "account: command must not be empty") + statusMissingAccountID = gstatus.New(codes.InvalidArgument, "account: account id must be specified") + statusEmailIsEmpty = gstatus.New(codes.InvalidArgument, "account: email is empty") + statusInvalidEmail = gstatus.New(codes.InvalidArgument, "account: invalid email format") + statusMissingAPIKeyID = gstatus.New(codes.InvalidArgument, "account: apikey id must be specified") + statusMissingAPIKeyName = gstatus.New(codes.InvalidArgument, "account: apikey name must be not empty") + statusInvalidOrderBy = gstatus.New(codes.InvalidArgument, "account: order_by is invalid") + statusNotFound = gstatus.New(codes.NotFound, "account: not found") + statusAlreadyExists = gstatus.New(codes.AlreadyExists, "account: already exists") + statusUnauthenticated = gstatus.New(codes.Unauthenticated, "account: unauthenticated") + statusPermissionDenied = gstatus.New(codes.PermissionDenied, "account: permission denied") + + errInternalJaJP = status.MustWithDetails( + statusInternal, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "内部エラーが発生しました", + }, + ) + errInvalidCursorJaJP = status.MustWithDetails( + statusInvalidCursor, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なcursorです", + }, + ) + errNoCommandJaJP = status.MustWithDetails( + statusNoCommand, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "commandは必須です", + }, + ) + errMissingAccountIDJaJP = status.MustWithDetails( + statusMissingAccountID, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "account idは必須です", + }, + ) + errEmailIsEmptyJaJP = status.MustWithDetails( + statusEmailIsEmpty, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "emailは必須です", + }, + ) + errInvalidEmailJaJP = status.MustWithDetails( + statusInvalidEmail, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なemailです", + }, + ) + errMissingAPIKeyIDJaJP = status.MustWithDetails( + statusMissingAPIKeyID, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "api keyのidは必須です", + }, + ) + errMissingAPIKeyNameJaJP = status.MustWithDetails( + statusMissingAPIKeyName, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "api keyのnameは必須です", + }, + ) + errInvalidOrderByJaJP = status.MustWithDetails( + statusInvalidOrderBy, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なソート順の指定です", + }, + ) + errNotFoundJaJP = status.MustWithDetails( + statusNotFound, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "データが存在しません", + }, + ) + errAlreadyExistsJaJP = status.MustWithDetails( + statusAlreadyExists, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "同じidのデータがすでに存在します", + }, + ) + errUnauthenticatedJaJP = status.MustWithDetails( + statusUnauthenticated, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "認証されていません", + }, + ) + errPermissionDeniedJaJP = status.MustWithDetails( + statusPermissionDenied, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "権限がありません", + }, + ) +) + +func localizedError(s *gstatus.Status, loc string) error { + // handle loc if multi-lang is necessary + switch s { + case statusInternal: + return errInternalJaJP + case statusInvalidCursor: + return errInvalidCursorJaJP + case statusNoCommand: + return errNoCommandJaJP + case statusMissingAccountID: + return errMissingAccountIDJaJP + case statusEmailIsEmpty: + return errEmailIsEmptyJaJP + case statusInvalidEmail: + return errInvalidEmailJaJP + case statusMissingAPIKeyID: + return errMissingAPIKeyIDJaJP + case statusMissingAPIKeyName: + return errMissingAPIKeyNameJaJP + case statusInvalidOrderBy: + return errInvalidOrderByJaJP + case statusNotFound: + return errNotFoundJaJP + case statusAlreadyExists: + return errAlreadyExistsJaJP + case statusUnauthenticated: + return errUnauthenticatedJaJP + case statusPermissionDenied: + return errPermissionDeniedJaJP + default: + return errInternalJaJP + } +} diff --git a/pkg/account/api/validation.go b/pkg/account/api/validation.go new file mode 100644 index 000000000..aadfd457b --- /dev/null +++ b/pkg/account/api/validation.go @@ -0,0 +1,175 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "regexp" + + "github.com/bucketeer-io/bucketeer/pkg/locale" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" +) + +// nolint:lll +var emailRegex = regexp.MustCompile("^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$") + +func validateGetAdminAccountRequest(req *accountproto.GetAdminAccountRequest) error { + if req.Email == "" { + return localizedError(statusEmailIsEmpty, locale.JaJP) + } + if !verifyEmailFormat(req.Email) { + return localizedError(statusInvalidEmail, locale.JaJP) + } + return nil +} + +func validateCreateAdminAccountRequest(req *accountproto.CreateAdminAccountRequest) error { + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + if req.Command.Email == "" { + return localizedError(statusEmailIsEmpty, locale.JaJP) + } + if !verifyEmailFormat(req.Command.Email) { + return localizedError(statusInvalidEmail, locale.JaJP) + } + return nil +} + +func validateEnableAdminAccountRequest(req *accountproto.EnableAdminAccountRequest) error { + if req.Id == "" { + return localizedError(statusMissingAccountID, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + return nil +} + +func validateDisableAdminAccountRequest(req *accountproto.DisableAdminAccountRequest) error { + if req.Id == "" { + return localizedError(statusMissingAccountID, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + return nil +} + +func validateGetAccountRequest(req *accountproto.GetAccountRequest) error { + if req.Email == "" { + return localizedError(statusEmailIsEmpty, locale.JaJP) + } + if !verifyEmailFormat(req.Email) { + return localizedError(statusInvalidEmail, locale.JaJP) + } + return nil +} + +func validateCreateAccountRequest(req *accountproto.CreateAccountRequest) error { + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + if req.Command.Email == "" { + return localizedError(statusEmailIsEmpty, locale.JaJP) + } + if !verifyEmailFormat(req.Command.Email) { + return localizedError(statusInvalidEmail, locale.JaJP) + } + return nil +} + +func validateChangeAccountRoleRequest(req *accountproto.ChangeAccountRoleRequest) error { + if req.Id == "" { + return localizedError(statusMissingAccountID, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + return nil +} + +func validateConvertAccountRequest(req *accountproto.ConvertAccountRequest) error { + if req.Id == "" { + return localizedError(statusMissingAccountID, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + return nil +} + +func validateEnableAccountRequest(req *accountproto.EnableAccountRequest) error { + if req.Id == "" { + return localizedError(statusMissingAccountID, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + return nil +} + +func validateDisableAccountRequest(req *accountproto.DisableAccountRequest) error { + if req.Id == "" { + return localizedError(statusMissingAccountID, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + return nil +} + +func verifyEmailFormat(email string) bool { + return emailRegex.MatchString(email) +} + +func validateCreateAPIKeyRequest(req *accountproto.CreateAPIKeyRequest) error { + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + if req.Command.Name == "" { + return localizedError(statusMissingAPIKeyName, locale.JaJP) + } + return nil +} + +func validateChangeAPIKeyNameRequest(req *accountproto.ChangeAPIKeyNameRequest) error { + if req.Id == "" { + return localizedError(statusMissingAPIKeyID, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + return nil +} + +func validateEnableAPIKeyRequest(req *accountproto.EnableAPIKeyRequest) error { + if req.Id == "" { + return localizedError(statusMissingAPIKeyID, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + return nil +} + +func validateDisableAPIKeyRequest(req *accountproto.DisableAPIKeyRequest) error { + if req.Id == "" { + return localizedError(statusMissingAPIKeyID, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + return nil +} diff --git a/pkg/account/api/validation_test.go b/pkg/account/api/validation_test.go new file mode 100644 index 000000000..2f974f16a --- /dev/null +++ b/pkg/account/api/validation_test.go @@ -0,0 +1,39 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestVerifyEmailFormat(t *testing.T) { + t.Parallel() + testcases := []struct { + email string + ok bool + }{ + {"foo@gmail.com", true}, + {"foo+bar@abc.co.jp", true}, + {"invalid", false}, + {"@invalid", false}, + {"", false}, + } + for _, tc := range testcases { + ok := verifyEmailFormat(tc.email) + assert.Equal(t, tc.ok, ok, tc.email) + } +} diff --git a/pkg/account/apikeycacher/BUILD.bazel b/pkg/account/apikeycacher/BUILD.bazel new file mode 100644 index 000000000..2a29d7a18 --- /dev/null +++ b/pkg/account/apikeycacher/BUILD.bazel @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "apikeycacher.go", + "metrics.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/account/apikeycacher", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/client:go_default_library", + "//pkg/cache:go_default_library", + "//pkg/cache/v3:go_default_library", + "//pkg/environment/client:go_default_library", + "//pkg/errgroup:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub/puller:go_default_library", + "//pkg/pubsub/puller/codes:go_default_library", + "//proto/account:go_default_library", + "//proto/environment:go_default_library", + "//proto/event/domain:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/account/apikeycacher/apikeycacher.go b/pkg/account/apikeycacher/apikeycacher.go new file mode 100644 index 000000000..512cc14b3 --- /dev/null +++ b/pkg/account/apikeycacher/apikeycacher.go @@ -0,0 +1,415 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apikeycacher + +import ( + "context" + "fmt" + "time" + + "github.com/golang/protobuf/proto" // nolint:staticcheck + "github.com/golang/protobuf/ptypes" + "go.uber.org/zap" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + "github.com/bucketeer-io/bucketeer/pkg/cache" + cachev3 "github.com/bucketeer-io/bucketeer/pkg/cache/v3" + environmentclient "github.com/bucketeer-io/bucketeer/pkg/environment/client" + "github.com/bucketeer-io/bucketeer/pkg/errgroup" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller/codes" + acproto "github.com/bucketeer-io/bucketeer/proto/account" + environmentproto "github.com/bucketeer-io/bucketeer/proto/environment" + domainevent "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +const ( + listRequestPageSize = 500 +) + +type options struct { + maxMPS int + numWorkers int + flushSize int + flushInterval time.Duration + metrics metrics.Registerer + logger *zap.Logger +} + +type Option func(*options) + +func WithMaxMPS(mps int) Option { + return func(opts *options) { + opts.maxMPS = mps + } +} + +func WithNumWorkers(n int) Option { + return func(opts *options) { + opts.numWorkers = n + } +} + +func WithFlushSize(size int) Option { + return func(opts *options) { + opts.flushSize = size + } +} + +func WithFlushInterval(interval time.Duration) Option { + return func(opts *options) { + opts.flushInterval = interval + } +} + +func WithMetrics(r metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = r + } +} + +func WithLogger(l *zap.Logger) Option { + return func(opts *options) { + opts.logger = l + } +} + +type EnvAPIKeyCacher struct { + puller puller.RateLimitedPuller + accountClient accountclient.Client + environmentClient environmentclient.Client + envAPIKeyCache cachev3.EnvironmentAPIKeyCache + group errgroup.Group + opts *options + logger *zap.Logger + ctx context.Context + cancel func() + doneCh chan struct{} +} + +func NewEnvironmentAPIKeyCacher( + p puller.Puller, + accountClient accountclient.Client, + environmentClient environmentclient.Client, + v3Cache cache.Cache, + opts ...Option, +) *EnvAPIKeyCacher { + ctx, cancel := context.WithCancel(context.Background()) + dopts := &options{ + maxMPS: 1000, + numWorkers: 1, + flushSize: 100, + flushInterval: time.Minute, + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + if dopts.metrics != nil { + registerMetrics(dopts.metrics) + } + return &EnvAPIKeyCacher{ + puller: puller.NewRateLimitedPuller(p, dopts.maxMPS), + accountClient: accountClient, + environmentClient: environmentClient, + envAPIKeyCache: cachev3.NewEnvironmentAPIKeyCache(v3Cache), + opts: dopts, + logger: dopts.logger.Named("apikeycacher"), + ctx: ctx, + cancel: cancel, + doneCh: make(chan struct{}), + } +} + +func (c *EnvAPIKeyCacher) Run() error { + defer close(c.doneCh) + c.group.Go(func() error { + return c.puller.Run(c.ctx) + }) + for i := 0; i < c.opts.numWorkers; i++ { + c.group.Go(c.batch) + } + return c.group.Wait() +} + +func (c *EnvAPIKeyCacher) Stop() { + c.cancel() + <-c.doneCh +} + +func (c *EnvAPIKeyCacher) Check(ctx context.Context) health.Status { + select { + case <-c.ctx.Done(): + c.logger.Error("Unhealthy due to context Done is closed", zap.Error(c.ctx.Err())) + return health.Unhealthy + default: + if c.group.FinishedCount() > 0 { + c.logger.Error("Unhealthy", zap.Int32("FinishedCount", c.group.FinishedCount())) + return health.Unhealthy + } + return health.Healthy + } +} + +func (c *EnvAPIKeyCacher) batch() error { + chunk := make(map[string]*puller.Message, c.opts.flushSize) + timer := time.NewTimer(c.opts.flushInterval) + defer timer.Stop() + for { + select { + case msg, ok := <-c.puller.MessageCh(): + if !ok { + return nil + } + receivedCounter.Inc() + id := msg.Attributes["id"] + if id == "" { + msg.Ack() + handledCounter.WithLabelValues(codes.MissingID.String()).Inc() + continue + } + if _, ok := chunk[id]; ok { + c.logger.Warn("Message with duplicate id", zap.String("id", id)) + handledCounter.WithLabelValues(codes.DuplicateID.String()).Inc() + } + chunk[id] = msg + if len(chunk) >= c.opts.flushSize { + c.handleChunk(chunk) + chunk = make(map[string]*puller.Message, c.opts.flushSize) + timer.Reset(c.opts.flushInterval) + } + case <-timer.C: + if len(chunk) > 0 { + c.handleChunk(chunk) + chunk = make(map[string]*puller.Message, c.opts.flushSize) + } + timer.Reset(c.opts.flushInterval) + case <-c.ctx.Done(): + return nil + } + } +} + +func (c *EnvAPIKeyCacher) handleChunk(chunk map[string]*puller.Message) { + for _, msg := range chunk { + event, err := c.unmarshalMessage(msg) + if err != nil { + msg.Ack() + handledCounter.WithLabelValues(codes.BadMessage.String()).Inc() + c.logger.Error("Failed to unmarshal message", zap.Error(err), zap.String("msgID", msg.ID)) + continue + } + switch event.EntityType { + case domainevent.Event_APIKEY: + c.handleAPIKeyEvent(msg, event) + case domainevent.Event_PROJECT: + c.handleProjectEvent(msg, event) + case domainevent.Event_ENVIRONMENT: + c.handleEnvironmentEvent(msg, event) + default: + msg.Ack() + handledCounter.WithLabelValues(codes.OK.String()).Inc() + continue + } + } +} + +func (c *EnvAPIKeyCacher) handleAPIKeyEvent(msg *puller.Message, event *domainevent.Event) { + apiKeyID := event.EntityId + if apiKeyID == "" { + msg.Ack() + handledCounter.WithLabelValues(codes.BadMessage.String()).Inc() + c.logger.Warn("Message contains an empty apiKeyID", zap.Any("event", event)) + return + } + if err := c.refresh(apiKeyID, event.EnvironmentNamespace, false); err != nil { + msg.Nack() + handledCounter.WithLabelValues(codes.RepeatableError.String()).Inc() + c.logger.Error("Failed to refresh api key", + zap.Error(err), + zap.String("apiKeyID", apiKeyID), + zap.String("environmentNamespace", event.EnvironmentNamespace), + zap.Any("event", event), + ) + return + } + msg.Ack() + handledCounter.WithLabelValues(codes.OK.String()).Inc() +} + +func (c *EnvAPIKeyCacher) handleProjectEvent(msg *puller.Message, event *domainevent.Event) { + if !(event.Type == domainevent.Event_PROJECT_ENABLED || event.Type == domainevent.Event_PROJECT_DISABLED) { + msg.Ack() + handledCounter.WithLabelValues(codes.OK.String()).Inc() + return + } + environmentDisabled := event.Type == domainevent.Event_PROJECT_DISABLED + projectID := event.EntityId + if projectID == "" { + msg.Ack() + handledCounter.WithLabelValues(codes.BadMessage.String()).Inc() + c.logger.Warn("Message contains an empty projectID", zap.Any("event", event)) + return + } + environments, err := c.listEnvironments(projectID) + if err != nil { + msg.Nack() + handledCounter.WithLabelValues(codes.RepeatableError.String()).Inc() + c.logger.Warn("Failed to list environments", zap.Any("event", event)) + return + } + for _, environment := range environments { + if err := c.refreshAll(environment.Namespace, environmentDisabled); err != nil { + msg.Nack() + handledCounter.WithLabelValues(codes.RepeatableError.String()).Inc() + c.logger.Error("Failed to refresh all api keys in the environment", + zap.Error(err), + zap.String("environmentNamespace", environment.Namespace), + zap.Any("event", event), + ) + return + } + } + msg.Ack() + handledCounter.WithLabelValues(codes.OK.String()).Inc() +} + +func (c *EnvAPIKeyCacher) handleEnvironmentEvent(msg *puller.Message, event *domainevent.Event) { + if event.Type != domainevent.Event_ENVIRONMENT_DELETED { + msg.Ack() + handledCounter.WithLabelValues(codes.OK.String()).Inc() + return + } + ede := &domainevent.EnvironmentDeletedEvent{} + if err := ptypes.UnmarshalAny(event.Data, ede); err != nil { + msg.Ack() + handledCounter.WithLabelValues(codes.BadMessage.String()).Inc() + c.logger.Warn("Message doesn't contain an environment deleted event", zap.Any("event", event)) + return + } + environmentNamespace := ede.Namespace + if err := c.refreshAll(environmentNamespace, true); err != nil { + msg.Nack() + handledCounter.WithLabelValues(codes.RepeatableError.String()).Inc() + c.logger.Error("Failed to refresh all api keys in the environment", + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.Any("event", event), + ) + return + } + msg.Ack() + handledCounter.WithLabelValues(codes.OK.String()).Inc() +} + +func (c *EnvAPIKeyCacher) unmarshalMessage(msg *puller.Message) (*domainevent.Event, error) { + event := &domainevent.Event{} + err := proto.Unmarshal(msg.Data, event) + if err != nil { + return nil, err + } + return event, nil +} + +func (c *EnvAPIKeyCacher) refresh(apiKeyID, environmentNamespace string, environmentDisabled bool) error { + req := &acproto.GetAPIKeyRequest{ + Id: apiKeyID, + EnvironmentNamespace: environmentNamespace, + } + resp, err := c.accountClient.GetAPIKey(c.ctx, req) + if err != nil { + return fmt.Errorf("failed to get api key: %w", err) + } + envAPIKey := &acproto.EnvironmentAPIKey{ + ApiKey: resp.ApiKey, + EnvironmentNamespace: environmentNamespace, + EnvironmentDisabled: environmentDisabled, + } + return c.upsert(envAPIKey) +} + +func (c *EnvAPIKeyCacher) refreshAll(environmentNamespace string, environmentDisabled bool) error { + apiKeys, err := c.listAPIKeys(environmentNamespace) + if err != nil { + return fmt.Errorf("failed to list api keys: %w", err) + } + for _, key := range apiKeys { + envAPIKey := &acproto.EnvironmentAPIKey{ + ApiKey: key, + EnvironmentNamespace: environmentNamespace, + EnvironmentDisabled: environmentDisabled, + } + if err := c.upsert(envAPIKey); err != nil { + return err + } + } + return nil +} + +func (c *EnvAPIKeyCacher) upsert(envAPIKey *acproto.EnvironmentAPIKey) error { + if err := c.envAPIKeyCache.Put(envAPIKey); err != nil { + return fmt.Errorf("failed to cache environment api key: %w", err) + } + c.logger.Info( + "API key upserted successfully", + zap.String("apiKeyID", envAPIKey.ApiKey.Id), + zap.String("environmentNamespace", envAPIKey.EnvironmentNamespace), + ) + return nil +} + +func (c *EnvAPIKeyCacher) listEnvironments(projectID string) ([]*environmentproto.Environment, error) { + environments := []*environmentproto.Environment{} + cursor := "" + for { + resp, err := c.environmentClient.ListEnvironments(c.ctx, &environmentproto.ListEnvironmentsRequest{ + PageSize: listRequestPageSize, + Cursor: cursor, + ProjectId: projectID, + }) + if err != nil { + return nil, err + } + environments = append(environments, resp.Environments...) + environmentSize := len(resp.Environments) + if environmentSize == 0 || environmentSize < listRequestPageSize { + return environments, nil + } + cursor = resp.Cursor + } +} + +func (c *EnvAPIKeyCacher) listAPIKeys(environmentNamespace string) ([]*acproto.APIKey, error) { + apiKeys := []*acproto.APIKey{} + cursor := "" + for { + resp, err := c.accountClient.ListAPIKeys(c.ctx, &acproto.ListAPIKeysRequest{ + PageSize: listRequestPageSize, + Cursor: cursor, + EnvironmentNamespace: environmentNamespace, + }) + if err != nil { + return nil, err + } + apiKeys = append(apiKeys, resp.ApiKeys...) + apiKeySize := len(resp.ApiKeys) + if apiKeySize == 0 || apiKeySize < listRequestPageSize { + return apiKeys, nil + } + cursor = resp.Cursor + } +} diff --git a/pkg/account/apikeycacher/metrics.go b/pkg/account/apikeycacher/metrics.go new file mode 100644 index 000000000..d66700518 --- /dev/null +++ b/pkg/account/apikeycacher/metrics.go @@ -0,0 +1,43 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apikeycacher + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +var ( + receivedCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "account", + Name: "apikeycacher_received_total", + Help: "Total number of received messages", + }) + + handledCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "account", + Name: "apikeycacher_handled_total", + Help: "Total number of handled messages", + }, []string{"code"}) +) + +func registerMetrics(r metrics.Registerer) { + r.MustRegister(receivedCounter, handledCounter) +} diff --git a/pkg/account/client/BUILD.bazel b/pkg/account/client/BUILD.bazel new file mode 100644 index 000000000..a407a1359 --- /dev/null +++ b/pkg/account/client/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["client.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/account/client", + visibility = ["//visibility:public"], + deps = [ + "//pkg/rpc/client:go_default_library", + "//proto/account:go_default_library", + "@org_golang_google_grpc//:go_default_library", + ], +) diff --git a/pkg/account/client/client.go b/pkg/account/client/client.go new file mode 100644 index 000000000..86f66dd04 --- /dev/null +++ b/pkg/account/client/client.go @@ -0,0 +1,50 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package client + +import ( + "google.golang.org/grpc" + + rpcclient "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + proto "github.com/bucketeer-io/bucketeer/proto/account" +) + +type Client interface { + proto.AccountServiceClient + Close() +} + +type client struct { + proto.AccountServiceClient + address string + connection *grpc.ClientConn +} + +func NewClient(addr, certPath string, opts ...rpcclient.Option) (Client, error) { + conn, err := rpcclient.NewClientConn(addr, certPath, opts...) + if err != nil { + return nil, err + } + return &client{ + AccountServiceClient: proto.NewAccountServiceClient(conn), + address: addr, + connection: conn, + }, nil +} + +func (c *client) Close() { + c.connection.Close() +} diff --git a/pkg/account/client/mock/BUILD.bazel b/pkg/account/client/mock/BUILD.bazel new file mode 100644 index 000000000..5d2438b5b --- /dev/null +++ b/pkg/account/client/mock/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["client.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/account/client/mock", + visibility = ["//visibility:public"], + deps = [ + "//proto/account:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@org_golang_google_grpc//:go_default_library", + ], +) diff --git a/pkg/account/client/mock/client.go b/pkg/account/client/mock/client.go new file mode 100644 index 000000000..6e03c9a0e --- /dev/null +++ b/pkg/account/client/mock/client.go @@ -0,0 +1,470 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: client.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + grpc "google.golang.org/grpc" + + account "github.com/bucketeer-io/bucketeer/proto/account" +) + +// MockClient is a mock of Client interface. +type MockClient struct { + ctrl *gomock.Controller + recorder *MockClientMockRecorder +} + +// MockClientMockRecorder is the mock recorder for MockClient. +type MockClientMockRecorder struct { + mock *MockClient +} + +// NewMockClient creates a new mock instance. +func NewMockClient(ctrl *gomock.Controller) *MockClient { + mock := &MockClient{ctrl: ctrl} + mock.recorder = &MockClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClient) EXPECT() *MockClientMockRecorder { + return m.recorder +} + +// ChangeAPIKeyName mocks base method. +func (m *MockClient) ChangeAPIKeyName(ctx context.Context, in *account.ChangeAPIKeyNameRequest, opts ...grpc.CallOption) (*account.ChangeAPIKeyNameResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ChangeAPIKeyName", varargs...) + ret0, _ := ret[0].(*account.ChangeAPIKeyNameResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChangeAPIKeyName indicates an expected call of ChangeAPIKeyName. +func (mr *MockClientMockRecorder) ChangeAPIKeyName(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChangeAPIKeyName", reflect.TypeOf((*MockClient)(nil).ChangeAPIKeyName), varargs...) +} + +// ChangeAccountRole mocks base method. +func (m *MockClient) ChangeAccountRole(ctx context.Context, in *account.ChangeAccountRoleRequest, opts ...grpc.CallOption) (*account.ChangeAccountRoleResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ChangeAccountRole", varargs...) + ret0, _ := ret[0].(*account.ChangeAccountRoleResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChangeAccountRole indicates an expected call of ChangeAccountRole. +func (mr *MockClientMockRecorder) ChangeAccountRole(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChangeAccountRole", reflect.TypeOf((*MockClient)(nil).ChangeAccountRole), varargs...) +} + +// Close mocks base method. +func (m *MockClient) Close() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Close") +} + +// Close indicates an expected call of Close. +func (mr *MockClientMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockClient)(nil).Close)) +} + +// ConvertAccount mocks base method. +func (m *MockClient) ConvertAccount(ctx context.Context, in *account.ConvertAccountRequest, opts ...grpc.CallOption) (*account.ConvertAccountResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ConvertAccount", varargs...) + ret0, _ := ret[0].(*account.ConvertAccountResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ConvertAccount indicates an expected call of ConvertAccount. +func (mr *MockClientMockRecorder) ConvertAccount(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConvertAccount", reflect.TypeOf((*MockClient)(nil).ConvertAccount), varargs...) +} + +// CreateAPIKey mocks base method. +func (m *MockClient) CreateAPIKey(ctx context.Context, in *account.CreateAPIKeyRequest, opts ...grpc.CallOption) (*account.CreateAPIKeyResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateAPIKey", varargs...) + ret0, _ := ret[0].(*account.CreateAPIKeyResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateAPIKey indicates an expected call of CreateAPIKey. +func (mr *MockClientMockRecorder) CreateAPIKey(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAPIKey", reflect.TypeOf((*MockClient)(nil).CreateAPIKey), varargs...) +} + +// CreateAccount mocks base method. +func (m *MockClient) CreateAccount(ctx context.Context, in *account.CreateAccountRequest, opts ...grpc.CallOption) (*account.CreateAccountResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateAccount", varargs...) + ret0, _ := ret[0].(*account.CreateAccountResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateAccount indicates an expected call of CreateAccount. +func (mr *MockClientMockRecorder) CreateAccount(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAccount", reflect.TypeOf((*MockClient)(nil).CreateAccount), varargs...) +} + +// CreateAdminAccount mocks base method. +func (m *MockClient) CreateAdminAccount(ctx context.Context, in *account.CreateAdminAccountRequest, opts ...grpc.CallOption) (*account.CreateAdminAccountResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateAdminAccount", varargs...) + ret0, _ := ret[0].(*account.CreateAdminAccountResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateAdminAccount indicates an expected call of CreateAdminAccount. +func (mr *MockClientMockRecorder) CreateAdminAccount(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAdminAccount", reflect.TypeOf((*MockClient)(nil).CreateAdminAccount), varargs...) +} + +// DisableAPIKey mocks base method. +func (m *MockClient) DisableAPIKey(ctx context.Context, in *account.DisableAPIKeyRequest, opts ...grpc.CallOption) (*account.DisableAPIKeyResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DisableAPIKey", varargs...) + ret0, _ := ret[0].(*account.DisableAPIKeyResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DisableAPIKey indicates an expected call of DisableAPIKey. +func (mr *MockClientMockRecorder) DisableAPIKey(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableAPIKey", reflect.TypeOf((*MockClient)(nil).DisableAPIKey), varargs...) +} + +// DisableAccount mocks base method. +func (m *MockClient) DisableAccount(ctx context.Context, in *account.DisableAccountRequest, opts ...grpc.CallOption) (*account.DisableAccountResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DisableAccount", varargs...) + ret0, _ := ret[0].(*account.DisableAccountResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DisableAccount indicates an expected call of DisableAccount. +func (mr *MockClientMockRecorder) DisableAccount(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableAccount", reflect.TypeOf((*MockClient)(nil).DisableAccount), varargs...) +} + +// DisableAdminAccount mocks base method. +func (m *MockClient) DisableAdminAccount(ctx context.Context, in *account.DisableAdminAccountRequest, opts ...grpc.CallOption) (*account.DisableAdminAccountResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DisableAdminAccount", varargs...) + ret0, _ := ret[0].(*account.DisableAdminAccountResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DisableAdminAccount indicates an expected call of DisableAdminAccount. +func (mr *MockClientMockRecorder) DisableAdminAccount(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableAdminAccount", reflect.TypeOf((*MockClient)(nil).DisableAdminAccount), varargs...) +} + +// EnableAPIKey mocks base method. +func (m *MockClient) EnableAPIKey(ctx context.Context, in *account.EnableAPIKeyRequest, opts ...grpc.CallOption) (*account.EnableAPIKeyResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "EnableAPIKey", varargs...) + ret0, _ := ret[0].(*account.EnableAPIKeyResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// EnableAPIKey indicates an expected call of EnableAPIKey. +func (mr *MockClientMockRecorder) EnableAPIKey(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableAPIKey", reflect.TypeOf((*MockClient)(nil).EnableAPIKey), varargs...) +} + +// EnableAccount mocks base method. +func (m *MockClient) EnableAccount(ctx context.Context, in *account.EnableAccountRequest, opts ...grpc.CallOption) (*account.EnableAccountResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "EnableAccount", varargs...) + ret0, _ := ret[0].(*account.EnableAccountResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// EnableAccount indicates an expected call of EnableAccount. +func (mr *MockClientMockRecorder) EnableAccount(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableAccount", reflect.TypeOf((*MockClient)(nil).EnableAccount), varargs...) +} + +// EnableAdminAccount mocks base method. +func (m *MockClient) EnableAdminAccount(ctx context.Context, in *account.EnableAdminAccountRequest, opts ...grpc.CallOption) (*account.EnableAdminAccountResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "EnableAdminAccount", varargs...) + ret0, _ := ret[0].(*account.EnableAdminAccountResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// EnableAdminAccount indicates an expected call of EnableAdminAccount. +func (mr *MockClientMockRecorder) EnableAdminAccount(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableAdminAccount", reflect.TypeOf((*MockClient)(nil).EnableAdminAccount), varargs...) +} + +// GetAPIKey mocks base method. +func (m *MockClient) GetAPIKey(ctx context.Context, in *account.GetAPIKeyRequest, opts ...grpc.CallOption) (*account.GetAPIKeyResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetAPIKey", varargs...) + ret0, _ := ret[0].(*account.GetAPIKeyResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAPIKey indicates an expected call of GetAPIKey. +func (mr *MockClientMockRecorder) GetAPIKey(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAPIKey", reflect.TypeOf((*MockClient)(nil).GetAPIKey), varargs...) +} + +// GetAPIKeyBySearchingAllEnvironments mocks base method. +func (m *MockClient) GetAPIKeyBySearchingAllEnvironments(ctx context.Context, in *account.GetAPIKeyBySearchingAllEnvironmentsRequest, opts ...grpc.CallOption) (*account.GetAPIKeyBySearchingAllEnvironmentsResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetAPIKeyBySearchingAllEnvironments", varargs...) + ret0, _ := ret[0].(*account.GetAPIKeyBySearchingAllEnvironmentsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAPIKeyBySearchingAllEnvironments indicates an expected call of GetAPIKeyBySearchingAllEnvironments. +func (mr *MockClientMockRecorder) GetAPIKeyBySearchingAllEnvironments(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAPIKeyBySearchingAllEnvironments", reflect.TypeOf((*MockClient)(nil).GetAPIKeyBySearchingAllEnvironments), varargs...) +} + +// GetAccount mocks base method. +func (m *MockClient) GetAccount(ctx context.Context, in *account.GetAccountRequest, opts ...grpc.CallOption) (*account.GetAccountResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetAccount", varargs...) + ret0, _ := ret[0].(*account.GetAccountResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccount indicates an expected call of GetAccount. +func (mr *MockClientMockRecorder) GetAccount(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccount", reflect.TypeOf((*MockClient)(nil).GetAccount), varargs...) +} + +// GetAdminAccount mocks base method. +func (m *MockClient) GetAdminAccount(ctx context.Context, in *account.GetAdminAccountRequest, opts ...grpc.CallOption) (*account.GetAdminAccountResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetAdminAccount", varargs...) + ret0, _ := ret[0].(*account.GetAdminAccountResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAdminAccount indicates an expected call of GetAdminAccount. +func (mr *MockClientMockRecorder) GetAdminAccount(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAdminAccount", reflect.TypeOf((*MockClient)(nil).GetAdminAccount), varargs...) +} + +// GetMe mocks base method. +func (m *MockClient) GetMe(ctx context.Context, in *account.GetMeRequest, opts ...grpc.CallOption) (*account.GetMeResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetMe", varargs...) + ret0, _ := ret[0].(*account.GetMeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMe indicates an expected call of GetMe. +func (mr *MockClientMockRecorder) GetMe(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMe", reflect.TypeOf((*MockClient)(nil).GetMe), varargs...) +} + +// GetMeByEmail mocks base method. +func (m *MockClient) GetMeByEmail(ctx context.Context, in *account.GetMeByEmailRequest, opts ...grpc.CallOption) (*account.GetMeResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetMeByEmail", varargs...) + ret0, _ := ret[0].(*account.GetMeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMeByEmail indicates an expected call of GetMeByEmail. +func (mr *MockClientMockRecorder) GetMeByEmail(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMeByEmail", reflect.TypeOf((*MockClient)(nil).GetMeByEmail), varargs...) +} + +// ListAPIKeys mocks base method. +func (m *MockClient) ListAPIKeys(ctx context.Context, in *account.ListAPIKeysRequest, opts ...grpc.CallOption) (*account.ListAPIKeysResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListAPIKeys", varargs...) + ret0, _ := ret[0].(*account.ListAPIKeysResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListAPIKeys indicates an expected call of ListAPIKeys. +func (mr *MockClientMockRecorder) ListAPIKeys(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAPIKeys", reflect.TypeOf((*MockClient)(nil).ListAPIKeys), varargs...) +} + +// ListAccounts mocks base method. +func (m *MockClient) ListAccounts(ctx context.Context, in *account.ListAccountsRequest, opts ...grpc.CallOption) (*account.ListAccountsResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListAccounts", varargs...) + ret0, _ := ret[0].(*account.ListAccountsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListAccounts indicates an expected call of ListAccounts. +func (mr *MockClientMockRecorder) ListAccounts(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAccounts", reflect.TypeOf((*MockClient)(nil).ListAccounts), varargs...) +} + +// ListAdminAccounts mocks base method. +func (m *MockClient) ListAdminAccounts(ctx context.Context, in *account.ListAdminAccountsRequest, opts ...grpc.CallOption) (*account.ListAdminAccountsResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListAdminAccounts", varargs...) + ret0, _ := ret[0].(*account.ListAdminAccountsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListAdminAccounts indicates an expected call of ListAdminAccounts. +func (mr *MockClientMockRecorder) ListAdminAccounts(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAdminAccounts", reflect.TypeOf((*MockClient)(nil).ListAdminAccounts), varargs...) +} diff --git a/pkg/account/cmd/apikeycacher/BUILD.bazel b/pkg/account/cmd/apikeycacher/BUILD.bazel new file mode 100644 index 000000000..307b30d72 --- /dev/null +++ b/pkg/account/cmd/apikeycacher/BUILD.bazel @@ -0,0 +1,24 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["apikeycacher.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/account/cmd/apikeycacher", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/apikeycacher:go_default_library", + "//pkg/account/client:go_default_library", + "//pkg/cache/v3:go_default_library", + "//pkg/cli:go_default_library", + "//pkg/environment/client:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub:go_default_library", + "//pkg/pubsub/puller:go_default_library", + "//pkg/redis/v3:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/rpc/client:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/account/cmd/apikeycacher/apikeycacher.go b/pkg/account/cmd/apikeycacher/apikeycacher.go new file mode 100644 index 000000000..daacdc6d5 --- /dev/null +++ b/pkg/account/cmd/apikeycacher/apikeycacher.go @@ -0,0 +1,193 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apikeycacher + +import ( + "context" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + akc "github.com/bucketeer-io/bucketeer/pkg/account/apikeycacher" + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + cachev3 "github.com/bucketeer-io/bucketeer/pkg/cache/v3" + "github.com/bucketeer-io/bucketeer/pkg/cli" + environmentclient "github.com/bucketeer-io/bucketeer/pkg/environment/client" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" + redisv3 "github.com/bucketeer-io/bucketeer/pkg/redis/v3" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/rpc/client" +) + +const command = "apikey-cacher" + +type apiKeyCacher struct { + *kingpin.CmdClause + port *int + project *string + subscription *string + topic *string + maxMPS *int + numWorkers *int + flushSize *int + flushInterval *time.Duration + accountService *string + environmentService *string + redisServerName *string + redisAddr *string + certPath *string + keyPath *string + serviceTokenPath *string + pullerNumGoroutines *int + pullerMaxOutstandingMessages *int + pullerMaxOutstandingBytes *int +} + +func RegisterCommand(r cli.CommandRegistry, p cli.ParentCommand) cli.Command { + cmd := p.Command(command, "Start api key cacher") + c := &apiKeyCacher{ + CmdClause: cmd, + port: cmd.Flag("port", "Port to bind to.").Default("9090").Int(), + project: cmd.Flag("project", "Google Cloud project name.").String(), + subscription: cmd.Flag("subscription", "Google PubSub subscription name.").String(), + topic: cmd.Flag("topic", "Google PubSub topic name.").String(), + maxMPS: cmd.Flag("max-mps", "Maximum messages should be handled in a second.").Default("5000").Int(), + numWorkers: cmd.Flag("num-workers", "Number of workers.").Default("2").Int(), + flushSize: cmd.Flag("flush-size", "Maximum number of messages in one flush.").Default("100").Int(), + accountService: cmd.Flag( + "account-service", + "bucketeer-account-service address.", + ).Default("account:9090").String(), + environmentService: cmd.Flag( + "environment-service", + "bucketeer-environment-service address.", + ).Default("environment:9090").String(), + flushInterval: cmd.Flag("flush-interval", "Maximum interval between two flushes.").Default("1m").Duration(), + redisServerName: cmd.Flag("redis-server-name", "Name of the redis.").Required().String(), + redisAddr: cmd.Flag("redis-addr", "Address of the redis.").Required().String(), + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + keyPath: cmd.Flag("key", "Path to TLS key.").Required().String(), + serviceTokenPath: cmd.Flag("service-token", "Path to service token.").Required().String(), + pullerNumGoroutines: cmd.Flag( + "puller-num-goroutines", + "Number of goroutines will be spawned to pull messages.", + ).Int(), + pullerMaxOutstandingMessages: cmd.Flag( + "puller-max-outstanding-messages", + "Maximum number of unprocessed messages.", + ).Int(), + pullerMaxOutstandingBytes: cmd.Flag("puller-max-outstanding-bytes", "Maximum size of unprocessed messages.").Int(), + } + r.RegisterCommand(c) + return c +} + +func (c *apiKeyCacher) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + registerer := metrics.DefaultRegisterer() + + puller, err := c.createPuller(ctx, logger) + if err != nil { + return err + } + + creds, err := client.NewPerRPCCredentials(*c.serviceTokenPath) + if err != nil { + return err + } + + accountClient, err := accountclient.NewClient(*c.accountService, *c.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer accountClient.Close() + + environmentClient, err := environmentclient.NewClient(*c.environmentService, *c.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer environmentClient.Close() + + redisV3Client, err := redisv3.NewClient( + *c.redisAddr, + redisv3.WithServerName(*c.redisServerName), + redisv3.WithMetrics(registerer), + redisv3.WithLogger(logger), + ) + if err != nil { + return err + } + defer redisV3Client.Close() + + redisV3Cache := cachev3.NewRedisCache(redisV3Client) + cacher := akc.NewEnvironmentAPIKeyCacher(puller, accountClient, environmentClient, redisV3Cache, + akc.WithMaxMPS(*c.maxMPS), + akc.WithNumWorkers(*c.numWorkers), + akc.WithFlushSize(*c.flushSize), + akc.WithFlushInterval(*c.flushInterval), + akc.WithMetrics(registerer), + akc.WithLogger(logger), + ) + defer cacher.Stop() + go cacher.Run() // nolint:errcheck + + healthChecker := health.NewGrpcChecker( + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + health.WithCheck("cacher", cacher.Check), + ) + go healthChecker.Run(ctx) + + server := rpc.NewServer(healthChecker, *c.certPath, *c.keyPath, + rpc.WithPort(*c.port), + rpc.WithMetrics(registerer), + rpc.WithLogger(logger), + rpc.WithHandler("/health", healthChecker), + ) + defer server.Stop(10 * time.Second) + go server.Run() + + <-ctx.Done() + return nil +} + +func (c *apiKeyCacher) createPuller(ctx context.Context, logger *zap.Logger) (puller.Puller, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + client, err := pubsub.NewClient(ctx, *c.project, pubsub.WithLogger(logger)) + if err != nil { + return nil, err + } + return client.CreatePuller(*c.subscription, *c.topic, + pubsub.WithNumGoroutines(*c.pullerNumGoroutines), + pubsub.WithMaxOutstandingMessages(*c.pullerMaxOutstandingMessages), + pubsub.WithMaxOutstandingBytes(*c.pullerMaxOutstandingBytes), + ) +} diff --git a/pkg/account/cmd/server/BUILD.bazel b/pkg/account/cmd/server/BUILD.bazel new file mode 100644 index 000000000..b82bb5970 --- /dev/null +++ b/pkg/account/cmd/server/BUILD.bazel @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["server.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/account/cmd/server", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/api:go_default_library", + "//pkg/cli:go_default_library", + "//pkg/environment/client:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/rpc/client:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/token:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/account/cmd/server/server.go b/pkg/account/cmd/server/server.go new file mode 100644 index 000000000..c11b7a3a2 --- /dev/null +++ b/pkg/account/cmd/server/server.go @@ -0,0 +1,184 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + "github.com/bucketeer-io/bucketeer/pkg/account/api" + "github.com/bucketeer-io/bucketeer/pkg/cli" + environmentclient "github.com/bucketeer-io/bucketeer/pkg/environment/client" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/token" +) + +const command = "server" + +type server struct { + *kingpin.CmdClause + port *int + project *string + mysqlUser *string + mysqlPass *string + mysqlHost *string + mysqlPort *int + mysqlDBName *string + topic *string + environmentService *string + certPath *string + keyPath *string + serviceTokenPath *string + + oauthKeyPath *string + oauthClientID *string + oauthIssuer *string +} + +func RegisterCommand(r cli.CommandRegistry, p cli.ParentCommand) cli.Command { + cmd := p.Command(command, "Start the server") + server := &server{ + CmdClause: cmd, + port: cmd.Flag("port", "Port to bind to.").Default("9090").Int(), + project: cmd.Flag("project", "Google Cloud project name.").String(), + mysqlUser: cmd.Flag("mysql-user", "MySQL user.").Required().String(), + mysqlPass: cmd.Flag("mysql-pass", "MySQL password.").Required().String(), + mysqlHost: cmd.Flag("mysql-host", "MySQL host.").Required().String(), + mysqlPort: cmd.Flag("mysql-port", "MySQL port.").Required().Int(), + mysqlDBName: cmd.Flag("mysql-db-name", "MySQL database name.").Required().String(), + topic: cmd.Flag("topic", "PubSub topic to publish domain events.").Required().String(), + environmentService: cmd.Flag( + "environment-service", + "bucketeer-environment-service address.", + ).Default("environment:9090").String(), + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + keyPath: cmd.Flag("key", "Path to TLS key.").Required().String(), + serviceTokenPath: cmd.Flag("service-token", "Path to service token.").Required().String(), + oauthKeyPath: cmd.Flag("oauth-key", "Path to public key used to verify oauth token.").Required().String(), + oauthClientID: cmd.Flag("oauth-client-id", "The oauth clientID registered at dex.").Required().String(), + oauthIssuer: cmd.Flag("oauth-issuer", "The url of dex issuer.").Required().String(), + } + r.RegisterCommand(server) + return server +} + +func (s *server) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + registerer := metrics.DefaultRegisterer() + + mysqlClient, err := s.createMySQLClient(ctx, registerer, logger) + if err != nil { + return err + } + defer mysqlClient.Close() + + publisher, err := s.createPublisher(ctx, registerer, logger) + if err != nil { + return err + } + defer publisher.Stop() + + creds, err := client.NewPerRPCCredentials(*s.serviceTokenPath) + if err != nil { + return err + } + environmentClient, err := environmentclient.NewClient(*s.environmentService, *s.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer environmentClient.Close() + + service := api.NewAccountService( + environmentClient, + mysqlClient, + publisher, + api.WithLogger(logger), + ) + + verifier, err := token.NewVerifier(*s.oauthKeyPath, *s.oauthIssuer, *s.oauthClientID) + if err != nil { + return err + } + + healthChecker := health.NewGrpcChecker( + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + ) + go healthChecker.Run(ctx) + + server := rpc.NewServer(service, *s.certPath, *s.keyPath, + rpc.WithPort(*s.port), + rpc.WithVerifier(verifier), + rpc.WithMetrics(registerer), + rpc.WithLogger(logger), + rpc.WithService(healthChecker), + rpc.WithHandler("/health", healthChecker), + ) + defer server.Stop(10 * time.Second) + go server.Run() + + <-ctx.Done() + return nil +} + +func (s *server) createMySQLClient( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (mysql.Client, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + return mysql.NewClient( + ctx, + *s.mysqlUser, *s.mysqlPass, *s.mysqlHost, + *s.mysqlPort, + *s.mysqlDBName, + mysql.WithLogger(logger), + mysql.WithMetrics(registerer), + ) +} + +func (s *server) createPublisher( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (publisher.Publisher, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + client, err := pubsub.NewClient( + ctx, + *s.project, + pubsub.WithMetrics(registerer), + pubsub.WithLogger(logger), + ) + if err != nil { + return nil, err + } + return client.CreatePublisher(*s.topic) +} diff --git a/pkg/account/command/BUILD.bazel b/pkg/account/command/BUILD.bazel new file mode 100644 index 000000000..410c7bcc2 --- /dev/null +++ b/pkg/account/command/BUILD.bazel @@ -0,0 +1,39 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "account.go", + "admin_account.go", + "api_key.go", + "command.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/account/command", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/domain:go_default_library", + "//pkg/domainevent/domain:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//proto/account:go_default_library", + "//proto/event/domain:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "account_test.go", + "admin_account_test.go", + "api_key_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/account/domain:go_default_library", + "//pkg/pubsub/publisher/mock:go_default_library", + "//proto/account:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + ], +) diff --git a/pkg/account/command/account.go b/pkg/account/command/account.go new file mode 100644 index 000000000..8186ddf54 --- /dev/null +++ b/pkg/account/command/account.go @@ -0,0 +1,132 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + + "github.com/golang/protobuf/proto" // nolint:staticcheck + + "github.com/bucketeer-io/bucketeer/pkg/account/domain" + domainevent "github.com/bucketeer-io/bucketeer/pkg/domainevent/domain" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +type accountCommandHandler struct { + editor *eventproto.Editor + account *domain.Account + publisher publisher.Publisher + environmentNamespace string +} + +func NewAccountCommandHandler( + editor *eventproto.Editor, + account *domain.Account, + p publisher.Publisher, + environmentNamespace string, +) Handler { + return &accountCommandHandler{ + editor: editor, + account: account, + publisher: p, + environmentNamespace: environmentNamespace, + } +} + +func (h *accountCommandHandler) Handle(ctx context.Context, cmd Command) error { + switch c := cmd.(type) { + case *accountproto.CreateAccountCommand: + return h.create(ctx, c) + case *accountproto.ChangeAccountRoleCommand: + return h.changeRole(ctx, c) + case *accountproto.EnableAccountCommand: + return h.enable(ctx, c) + case *accountproto.DisableAccountCommand: + return h.disable(ctx, c) + case *accountproto.DeleteAccountCommand: + return h.delete(ctx, c) + default: + return ErrBadCommand + } +} + +func (h *accountCommandHandler) create(ctx context.Context, cmd *accountproto.CreateAccountCommand) error { + return h.send(ctx, eventproto.Event_ACCOUNT_CREATED, &eventproto.AccountCreatedEvent{ + Id: h.account.Id, + Email: h.account.Email, + Name: h.account.Name, + Role: h.account.Role, + Disabled: h.account.Disabled, + CreatedAt: h.account.CreatedAt, + UpdatedAt: h.account.UpdatedAt, + }) +} + +func (h *accountCommandHandler) changeRole(ctx context.Context, cmd *accountproto.ChangeAccountRoleCommand) error { + if err := h.account.ChangeRole(cmd.Role); err != nil { + return err + } + return h.send(ctx, eventproto.Event_ACCOUNT_ROLE_CHANGED, &eventproto.AccountRoleChangedEvent{ + Id: h.account.Id, + Role: cmd.Role, + }) +} + +func (h *accountCommandHandler) enable(ctx context.Context, cmd *accountproto.EnableAccountCommand) error { + if err := h.account.Enable(); err != nil { + return err + } + return h.send(ctx, eventproto.Event_ACCOUNT_ENABLED, &eventproto.AccountEnabledEvent{ + Id: h.account.Id, + }) +} + +func (h *accountCommandHandler) disable(ctx context.Context, cmd *accountproto.DisableAccountCommand) error { + if err := h.account.Disable(); err != nil { + return err + } + return h.send(ctx, eventproto.Event_ACCOUNT_DISABLED, &eventproto.AccountDisabledEvent{ + Id: h.account.Id, + }) +} + +func (h *accountCommandHandler) delete(ctx context.Context, cmd *accountproto.DeleteAccountCommand) error { + if err := h.account.Delete(); err != nil { + return err + } + return h.send(ctx, eventproto.Event_ACCOUNT_DELETED, &eventproto.AccountDeletedEvent{ + Id: h.account.Id, + }) +} + +func (h *accountCommandHandler) send(ctx context.Context, eventType eventproto.Event_Type, event proto.Message) error { + e, err := domainevent.NewEvent( + h.editor, + eventproto.Event_ACCOUNT, + h.account.Id, + eventType, + event, + h.environmentNamespace, + ) + if err != nil { + return err + } + if err := h.publisher.Publish(ctx, e); err != nil { + return err + } + return nil +} diff --git a/pkg/account/command/account_test.go b/pkg/account/command/account_test.go new file mode 100644 index 000000000..846690865 --- /dev/null +++ b/pkg/account/command/account_test.go @@ -0,0 +1,106 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/bucketeer-io/bucketeer/pkg/account/domain" + publishermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher/mock" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" +) + +func TestNewAccountCommandHandler(t *testing.T) { + t.Parallel() + a := NewAccountCommandHandler(nil, nil, nil, "") + assert.IsType(t, &accountCommandHandler{}, a) +} + +func TestHandle(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*accountCommandHandler) + input Command + expectedErr error + }{ + "CreateAccountCommand: success": { + setup: func(h *accountCommandHandler) { + a, err := domain.NewAccount("email", accountproto.Account_VIEWER) + require.NoError(t, err) + h.account = a + h.publisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + }, + input: &accountproto.CreateAccountCommand{}, + expectedErr: nil, + }, + "ChangeAccountRoleCommand: success": { + setup: func(h *accountCommandHandler) { + a, err := domain.NewAccount("email", accountproto.Account_VIEWER) + require.NoError(t, err) + h.account = a + h.publisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + }, + input: &accountproto.ChangeAccountRoleCommand{}, + expectedErr: nil, + }, + "EnableAccountCommand: success": { + setup: func(h *accountCommandHandler) { + a, err := domain.NewAccount("email", accountproto.Account_VIEWER) + require.NoError(t, err) + h.account = a + h.publisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + }, + input: &accountproto.EnableAccountCommand{}, + expectedErr: nil, + }, + "DisableAccountCommand: success": { + setup: func(h *accountCommandHandler) { + a, err := domain.NewAccount("email", accountproto.Account_VIEWER) + require.NoError(t, err) + h.account = a + h.publisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + }, + input: &accountproto.DisableAccountCommand{}, + expectedErr: nil, + }, + "ErrBadCommand": { + input: nil, + expectedErr: ErrBadCommand, + }, + } + for msg, p := range patterns { + h := newAccountCommandHandlerWithMock(t, mockController) + if p.setup != nil { + p.setup(h) + } + err := h.Handle(context.Background(), p.input) + assert.Equal(t, p.expectedErr, err, "%s", msg) + } +} + +func newAccountCommandHandlerWithMock(t *testing.T, mockController *gomock.Controller) *accountCommandHandler { + return &accountCommandHandler{ + publisher: publishermock.NewMockPublisher(mockController), + environmentNamespace: "ns0", + } +} diff --git a/pkg/account/command/admin_account.go b/pkg/account/command/admin_account.go new file mode 100644 index 000000000..ca641260b --- /dev/null +++ b/pkg/account/command/admin_account.go @@ -0,0 +1,103 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + + "github.com/golang/protobuf/proto" // nolint:staticcheck + + "github.com/bucketeer-io/bucketeer/pkg/account/domain" + domainevent "github.com/bucketeer-io/bucketeer/pkg/domainevent/domain" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +type adminAccountCommandHandler struct { + editor *eventproto.Editor + account *domain.Account + publisher publisher.Publisher +} + +func NewAdminAccountCommandHandler( + editor *eventproto.Editor, + account *domain.Account, + p publisher.Publisher, +) Handler { + return &adminAccountCommandHandler{ + editor: editor, + account: account, + publisher: p, + } +} + +func (h *adminAccountCommandHandler) Handle(ctx context.Context, cmd Command) error { + switch c := cmd.(type) { + case *accountproto.CreateAdminAccountCommand: + return h.create(ctx, c) + case *accountproto.EnableAdminAccountCommand: + return h.enable(ctx, c) + case *accountproto.DisableAdminAccountCommand: + return h.disable(ctx, c) + default: + return ErrBadCommand + } +} + +func (h *adminAccountCommandHandler) create(ctx context.Context, cmd *accountproto.CreateAdminAccountCommand) error { + return h.send(ctx, eventproto.Event_ADMIN_ACCOUNT_CREATED, &eventproto.AdminAccountCreatedEvent{ + Id: h.account.Id, + Email: h.account.Email, + Name: h.account.Name, + Role: h.account.Role, + Disabled: h.account.Disabled, + CreatedAt: h.account.CreatedAt, + UpdatedAt: h.account.UpdatedAt, + }) +} + +func (h *adminAccountCommandHandler) enable(ctx context.Context, cmd *accountproto.EnableAdminAccountCommand) error { + if err := h.account.Enable(); err != nil { + return err + } + return h.send(ctx, eventproto.Event_ADMIN_ACCOUNT_ENABLED, &eventproto.AdminAccountEnabledEvent{ + Id: h.account.Id, + }) +} + +func (h *adminAccountCommandHandler) disable(ctx context.Context, cmd *accountproto.DisableAdminAccountCommand) error { + if err := h.account.Disable(); err != nil { + return err + } + return h.send(ctx, eventproto.Event_ADMIN_ACCOUNT_DISABLED, &eventproto.AdminAccountDisabledEvent{ + Id: h.account.Id, + }) +} + +func (h *adminAccountCommandHandler) send( + ctx context.Context, + eventType eventproto.Event_Type, + event proto.Message, +) error { + e, err := domainevent.NewAdminEvent(h.editor, eventproto.Event_ADMIN_ACCOUNT, h.account.Id, eventType, event) + if err != nil { + return err + } + if err := h.publisher.Publish(ctx, e); err != nil { + return err + } + return nil +} diff --git a/pkg/account/command/admin_account_test.go b/pkg/account/command/admin_account_test.go new file mode 100644 index 000000000..baeb7bc6a --- /dev/null +++ b/pkg/account/command/admin_account_test.go @@ -0,0 +1,85 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/bucketeer-io/bucketeer/pkg/account/domain" + publishermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher/mock" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" +) + +func TestNewAdminAccountCommandHandler(t *testing.T) { + t.Parallel() + a := NewAdminAccountCommandHandler(nil, nil, nil) + assert.IsType(t, &adminAccountCommandHandler{}, a) +} + +func TestHandleAdmin(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*adminAccountCommandHandler) + input Command + expectedErr error + }{ + "CreateAdminAccountCommand: success": { + setup: func(h *adminAccountCommandHandler) { + a, err := domain.NewAccount("email", accountproto.Account_VIEWER) + require.NoError(t, err) + h.account = a + h.publisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + }, + input: &accountproto.CreateAdminAccountCommand{}, + expectedErr: nil, + }, + "EnableAdminAccountCommand: success": { + setup: func(h *adminAccountCommandHandler) { + a, err := domain.NewAccount("email", accountproto.Account_VIEWER) + require.NoError(t, err) + h.account = a + h.publisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + }, + input: &accountproto.EnableAdminAccountCommand{}, + expectedErr: nil, + }, + "ErrBadCommand": { + input: nil, + expectedErr: ErrBadCommand, + }, + } + for msg, p := range patterns { + h := newAdminAccountCommandHandlerWithMock(t, mockController) + if p.setup != nil { + p.setup(h) + } + err := h.Handle(context.Background(), p.input) + assert.Equal(t, p.expectedErr, err, "%s", msg) + } +} + +func newAdminAccountCommandHandlerWithMock(t *testing.T, mockController *gomock.Controller) *adminAccountCommandHandler { + return &adminAccountCommandHandler{ + publisher: publishermock.NewMockPublisher(mockController), + } +} diff --git a/pkg/account/command/api_key.go b/pkg/account/command/api_key.go new file mode 100644 index 000000000..9c006c49d --- /dev/null +++ b/pkg/account/command/api_key.go @@ -0,0 +1,120 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + + "github.com/golang/protobuf/proto" // nolint:staticcheck + + "github.com/bucketeer-io/bucketeer/pkg/account/domain" + domainevent "github.com/bucketeer-io/bucketeer/pkg/domainevent/domain" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +type apiKeyCommandHandler struct { + editor *eventproto.Editor + apiKey *domain.APIKey + publisher publisher.Publisher + environmentNamespace string +} + +func NewAPIKeyCommandHandler( + editor *eventproto.Editor, + apiKey *domain.APIKey, + p publisher.Publisher, + environmentNamespace string, +) Handler { + return &apiKeyCommandHandler{ + editor: editor, + apiKey: apiKey, + publisher: p, + environmentNamespace: environmentNamespace, + } +} + +func (h *apiKeyCommandHandler) Handle(ctx context.Context, cmd Command) error { + switch c := cmd.(type) { + case *accountproto.CreateAPIKeyCommand: + return h.create(ctx, c) + case *accountproto.ChangeAPIKeyNameCommand: + return h.rename(ctx, c) + case *accountproto.EnableAPIKeyCommand: + return h.enable(ctx, c) + case *accountproto.DisableAPIKeyCommand: + return h.disable(ctx, c) + default: + return ErrBadCommand + } +} + +func (h *apiKeyCommandHandler) create(ctx context.Context, cmd *accountproto.CreateAPIKeyCommand) error { + return h.send(ctx, eventproto.Event_APIKEY_CREATED, &eventproto.APIKeyCreatedEvent{ + Id: h.apiKey.Id, + Name: h.apiKey.Name, + Role: h.apiKey.Role, + Disabled: h.apiKey.Disabled, + CreatedAt: h.apiKey.CreatedAt, + UpdatedAt: h.apiKey.UpdatedAt, + }) +} + +func (h *apiKeyCommandHandler) rename(ctx context.Context, cmd *accountproto.ChangeAPIKeyNameCommand) error { + if err := h.apiKey.Rename(cmd.Name); err != nil { + return err + } + return h.send(ctx, eventproto.Event_APIKEY_NAME_CHANGED, &eventproto.APIKeyNameChangedEvent{ + Id: h.apiKey.Id, + Name: h.apiKey.Name, + }) +} + +func (h *apiKeyCommandHandler) enable(ctx context.Context, cmd *accountproto.EnableAPIKeyCommand) error { + if err := h.apiKey.Enable(); err != nil { + return err + } + return h.send(ctx, eventproto.Event_APIKEY_ENABLED, &eventproto.APIKeyEnabledEvent{ + Id: h.apiKey.Id, + }) +} + +func (h *apiKeyCommandHandler) disable(ctx context.Context, cmd *accountproto.DisableAPIKeyCommand) error { + if err := h.apiKey.Disable(); err != nil { + return err + } + return h.send(ctx, eventproto.Event_APIKEY_DISABLED, &eventproto.APIKeyDisabledEvent{ + Id: h.apiKey.Id, + }) +} + +func (h *apiKeyCommandHandler) send(ctx context.Context, eventType eventproto.Event_Type, event proto.Message) error { + e, err := domainevent.NewEvent( + h.editor, + eventproto.Event_APIKEY, + h.apiKey.Id, + eventType, + event, + h.environmentNamespace, + ) + if err != nil { + return err + } + if err := h.publisher.Publish(ctx, e); err != nil { + return err + } + return nil +} diff --git a/pkg/account/command/api_key_test.go b/pkg/account/command/api_key_test.go new file mode 100644 index 000000000..743777d12 --- /dev/null +++ b/pkg/account/command/api_key_test.go @@ -0,0 +1,105 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/bucketeer-io/bucketeer/pkg/account/domain" + publishermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher/mock" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" +) + +func TestNewAPIKeyCommandHandler(t *testing.T) { + t.Parallel() + a := NewAPIKeyCommandHandler(nil, nil, nil, "") + assert.IsType(t, &apiKeyCommandHandler{}, a) +} + +func newAPIKeyCommandHandlerWithMock(t *testing.T, mockController *gomock.Controller) *apiKeyCommandHandler { + return &apiKeyCommandHandler{ + publisher: publishermock.NewMockPublisher(mockController), + } +} + +func TestAPIKeyHandle(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*apiKeyCommandHandler) + input Command + expectedErr error + }{ + "CreateAPIKeyCommand: success": { + setup: func(h *apiKeyCommandHandler) { + a, err := domain.NewAPIKey("email", accountproto.APIKey_SDK) + require.NoError(t, err) + h.apiKey = a + h.publisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + }, + input: &accountproto.CreateAPIKeyCommand{}, + expectedErr: nil, + }, + "ChangeAPIKeyNameCommand: success": { + setup: func(h *apiKeyCommandHandler) { + a, err := domain.NewAPIKey("email", accountproto.APIKey_SDK) + require.NoError(t, err) + h.apiKey = a + h.publisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + }, + input: &accountproto.ChangeAPIKeyNameCommand{}, + expectedErr: nil, + }, + "EnableAPIKeyCommand: success": { + setup: func(h *apiKeyCommandHandler) { + a, err := domain.NewAPIKey("email", accountproto.APIKey_SDK) + require.NoError(t, err) + h.apiKey = a + h.publisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + }, + input: &accountproto.EnableAPIKeyCommand{}, + expectedErr: nil, + }, + "DisableAPIKeyCommand: success": { + setup: func(h *apiKeyCommandHandler) { + a, err := domain.NewAPIKey("email", accountproto.APIKey_SDK) + require.NoError(t, err) + h.apiKey = a + h.publisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + }, + input: &accountproto.DisableAPIKeyCommand{}, + expectedErr: nil, + }, + "ErrBadCommand": { + input: nil, + expectedErr: ErrBadCommand, + }, + } + for msg, p := range patterns { + h := newAPIKeyCommandHandlerWithMock(t, mockController) + if p.setup != nil { + p.setup(h) + } + err := h.Handle(context.Background(), p.input) + assert.Equal(t, p.expectedErr, err, "%s", msg) + } +} diff --git a/pkg/account/command/command.go b/pkg/account/command/command.go new file mode 100644 index 000000000..f212d8887 --- /dev/null +++ b/pkg/account/command/command.go @@ -0,0 +1,30 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + "errors" +) + +var ( + ErrBadCommand = errors.New("command: cannot handle command") +) + +type Command interface{} + +type Handler interface { + Handle(ctx context.Context, cmd Command) error +} diff --git a/pkg/account/domain/BUILD.bazel b/pkg/account/domain/BUILD.bazel new file mode 100644 index 000000000..ab2b9e1b2 --- /dev/null +++ b/pkg/account/domain/BUILD.bazel @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "account.go", + "api_key.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/account/domain", + visibility = ["//visibility:public"], + deps = ["//proto/account:go_default_library"], +) + +go_test( + name = "go_default_test", + srcs = [ + "account_test.go", + "api_key_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//proto/account:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + ], +) diff --git a/pkg/account/domain/account.go b/pkg/account/domain/account.go new file mode 100644 index 000000000..067f3bf37 --- /dev/null +++ b/pkg/account/domain/account.go @@ -0,0 +1,60 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "time" + + proto "github.com/bucketeer-io/bucketeer/proto/account" +) + +type Account struct { + *proto.Account +} + +func NewAccount(email string, role proto.Account_Role) (*Account, error) { + now := time.Now().Unix() + return &Account{&proto.Account{ + Id: email, + Email: email, + Role: role, + CreatedAt: now, + UpdatedAt: now, + }}, nil +} + +func (a *Account) ChangeRole(role proto.Account_Role) error { + a.Account.Role = role + a.UpdatedAt = time.Now().Unix() + return nil +} + +func (a *Account) Enable() error { + a.Account.Disabled = false + a.UpdatedAt = time.Now().Unix() + return nil +} + +func (a *Account) Disable() error { + a.Account.Disabled = true + a.UpdatedAt = time.Now().Unix() + return nil +} + +func (a *Account) Delete() error { + a.Account.Deleted = true + a.UpdatedAt = time.Now().Unix() + return nil +} diff --git a/pkg/account/domain/account_test.go b/pkg/account/domain/account_test.go new file mode 100644 index 000000000..c507c5e92 --- /dev/null +++ b/pkg/account/domain/account_test.go @@ -0,0 +1,53 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + proto "github.com/bucketeer-io/bucketeer/proto/account" +) + +func TestNewAccount(t *testing.T) { + a, err := NewAccount("email", proto.Account_VIEWER) + assert.NoError(t, err) + assert.Equal(t, "email", a.Id) + assert.Equal(t, "email", a.Email) + assert.Equal(t, proto.Account_VIEWER, a.Role) +} + +func TestChangeRole(t *testing.T) { + a, err := NewAccount("email", proto.Account_VIEWER) + assert.NoError(t, err) + a.ChangeRole(proto.Account_EDITOR) + assert.Equal(t, proto.Account_EDITOR, a.Role) +} + +func TestEnable(t *testing.T) { + a, err := NewAccount("email", proto.Account_VIEWER) + assert.NoError(t, err) + a.Disabled = true + a.Enable() + assert.Equal(t, false, a.Disabled) +} + +func TestDisable(t *testing.T) { + a, err := NewAccount("email", proto.Account_VIEWER) + assert.NoError(t, err) + a.Disable() + assert.Equal(t, true, a.Disabled) +} diff --git a/pkg/account/domain/api_key.go b/pkg/account/domain/api_key.go new file mode 100644 index 000000000..ebe41082c --- /dev/null +++ b/pkg/account/domain/api_key.go @@ -0,0 +1,70 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "crypto/rand" + "encoding/hex" + "time" + + proto "github.com/bucketeer-io/bucketeer/proto/account" +) + +const keyBytes = 32 + +type APIKey struct { + *proto.APIKey +} + +func NewAPIKey(name string, role proto.APIKey_Role) (*APIKey, error) { + key, err := generateKey() + if err != nil { + return nil, err + } + now := time.Now().Unix() + return &APIKey{&proto.APIKey{ + Id: key, + Name: name, + Role: role, + CreatedAt: now, + UpdatedAt: now, + }}, nil +} + +func (a *APIKey) Rename(name string) error { + a.APIKey.Name = name + a.UpdatedAt = time.Now().Unix() + return nil +} + +func (a *APIKey) Enable() error { + a.APIKey.Disabled = false + a.UpdatedAt = time.Now().Unix() + return nil +} + +func (a *APIKey) Disable() error { + a.APIKey.Disabled = true + a.UpdatedAt = time.Now().Unix() + return nil +} +func generateKey() (string, error) { + b := make([]byte, keyBytes) + _, err := rand.Read(b) + if err != nil { + return "", err + } + return hex.EncodeToString(b), nil +} diff --git a/pkg/account/domain/api_key_test.go b/pkg/account/domain/api_key_test.go new file mode 100644 index 000000000..a643f523a --- /dev/null +++ b/pkg/account/domain/api_key_test.go @@ -0,0 +1,59 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + proto "github.com/bucketeer-io/bucketeer/proto/account" +) + +func TestNewAPIKey(t *testing.T) { + a, err := NewAPIKey("name", proto.APIKey_SDK) + assert.NoError(t, err) + assert.Equal(t, "name", a.Name) + assert.Equal(t, proto.APIKey_SDK, a.Role) +} + +func TestGenerateKey(t *testing.T) { + key, err := generateKey() + require.NoError(t, err) + require.NotEmpty(t, key) +} + +func TestRename(t *testing.T) { + a, err := NewAPIKey("name", proto.APIKey_SDK) + assert.NoError(t, err) + a.Rename("test") + assert.Equal(t, "test", a.Name) +} + +func TestAPIKeyEnable(t *testing.T) { + a, err := NewAPIKey("name", proto.APIKey_SDK) + assert.NoError(t, err) + a.Disabled = true + a.Enable() + assert.Equal(t, false, a.Disabled) +} + +func TestAPIKeyDisable(t *testing.T) { + a, err := NewAPIKey("name", proto.APIKey_SDK) + assert.NoError(t, err) + a.Disable() + assert.Equal(t, true, a.Disabled) +} diff --git a/pkg/account/storage/v2/BUILD.bazel b/pkg/account/storage/v2/BUILD.bazel new file mode 100644 index 000000000..b7aa3caaa --- /dev/null +++ b/pkg/account/storage/v2/BUILD.bazel @@ -0,0 +1,35 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "account.go", + "admin_account.go", + "api_key.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/account/storage/v2", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/account:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "account_test.go", + "admin_account_test.go", + "api_key_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/account/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/storage/v2/mysql/mock:go_default_library", + "//proto/account:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + ], +) diff --git a/pkg/account/storage/v2/account.go b/pkg/account/storage/v2/account.go new file mode 100644 index 000000000..951379e14 --- /dev/null +++ b/pkg/account/storage/v2/account.go @@ -0,0 +1,245 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v2 + +import ( + "context" + "errors" + "fmt" + + "github.com/bucketeer-io/bucketeer/pkg/account/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + proto "github.com/bucketeer-io/bucketeer/proto/account" +) + +var ( + ErrAccountAlreadyExists = errors.New("account: account already exists") + ErrAccountNotFound = errors.New("account: account not found") + ErrAccountUnexpectedAffectedRows = errors.New("account: account unexpected affected rows") +) + +type AccountStorage interface { + CreateAccount(ctx context.Context, a *domain.Account, environmentNamespace string) error + UpdateAccount(ctx context.Context, a *domain.Account, environmentNamespace string) error + GetAccount(ctx context.Context, id, environmentNamespace string) (*domain.Account, error) + ListAccounts( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, + ) ([]*proto.Account, int, int64, error) +} + +type accountStorage struct { + qe mysql.QueryExecer +} + +func NewAccountStorage(qe mysql.QueryExecer) AccountStorage { + return &accountStorage{qe} +} + +func (s *accountStorage) CreateAccount(ctx context.Context, a *domain.Account, environmentNamespace string) error { + query := ` + INSERT INTO account ( + id, + email, + name, + role, + disabled, + created_at, + updated_at, + deleted, + environment_namespace + ) VALUES ( + ?, ?, ?, ?, ?, ?, ?, ?, ? + ) + ` + _, err := s.qe.ExecContext( + ctx, + query, + a.Id, + a.Email, + a.Name, + int32(a.Role), + a.Disabled, + a.CreatedAt, + a.UpdatedAt, + a.Deleted, + environmentNamespace, + ) + if err != nil { + if err == mysql.ErrDuplicateEntry { + return ErrAccountAlreadyExists + } + return err + } + return nil +} + +func (s *accountStorage) UpdateAccount(ctx context.Context, a *domain.Account, environmentNamespace string) error { + query := ` + UPDATE + account + SET + email = ?, + name = ?, + role = ?, + disabled = ?, + created_at = ?, + updated_at = ?, + deleted = ? + WHERE + id = ? AND + environment_namespace = ? + ` + result, err := s.qe.ExecContext( + ctx, + query, + a.Email, + a.Name, + int32(a.Role), + a.Disabled, + a.CreatedAt, + a.UpdatedAt, + a.Deleted, + a.Id, + environmentNamespace, + ) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + if rowsAffected != 1 { + return ErrAccountUnexpectedAffectedRows + } + return nil +} + +func (s *accountStorage) GetAccount(ctx context.Context, id, environmentNamespace string) (*domain.Account, error) { + account := proto.Account{} + var role int32 + query := ` + SELECT + id, + email, + name, + role, + disabled, + created_at, + updated_at, + deleted + FROM + account + WHERE + id = ? AND + environment_namespace = ? + ` + err := s.qe.QueryRowContext( + ctx, + query, + id, + environmentNamespace, + ).Scan( + &account.Id, + &account.Email, + &account.Name, + &role, + &account.Disabled, + &account.CreatedAt, + &account.UpdatedAt, + &account.Deleted, + ) + if err != nil { + if err == mysql.ErrNoRows { + return nil, ErrAccountNotFound + } + return nil, err + } + account.Role = proto.Account_Role(role) + return &domain.Account{Account: &account}, nil +} + +func (s *accountStorage) ListAccounts( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, +) ([]*proto.Account, int, int64, error) { + whereSQL, whereArgs := mysql.ConstructWhereSQLString(whereParts) + orderBySQL := mysql.ConstructOrderBySQLString(orders) + limitOffsetSQL := mysql.ConstructLimitOffsetSQLString(limit, offset) + query := fmt.Sprintf(` + SELECT + id, + email, + name, + role, + disabled, + created_at, + updated_at, + deleted + FROM + account + %s %s %s + `, whereSQL, orderBySQL, limitOffsetSQL, + ) + rows, err := s.qe.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, 0, 0, err + } + defer rows.Close() + accounts := make([]*proto.Account, 0, limit) + for rows.Next() { + account := proto.Account{} + var role int32 + err := rows.Scan( + &account.Id, + &account.Email, + &account.Name, + &role, + &account.Disabled, + &account.CreatedAt, + &account.UpdatedAt, + &account.Deleted, + ) + if err != nil { + return nil, 0, 0, err + } + account.Role = proto.Account_Role(role) + accounts = append(accounts, &account) + } + if rows.Err() != nil { + return nil, 0, 0, err + } + nextOffset := offset + len(accounts) + var totalCount int64 + countQuery := fmt.Sprintf(` + SELECT + COUNT(1) + FROM + account + %s %s + `, whereSQL, orderBySQL, + ) + err = s.qe.QueryRowContext(ctx, countQuery, whereArgs...).Scan(&totalCount) + if err != nil { + return nil, 0, 0, err + } + return accounts, nextOffset, totalCount, nil +} diff --git a/pkg/account/storage/v2/account_test.go b/pkg/account/storage/v2/account_test.go new file mode 100644 index 000000000..5f43d10b8 --- /dev/null +++ b/pkg/account/storage/v2/account_test.go @@ -0,0 +1,300 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "context" + "errors" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/account/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + proto "github.com/bucketeer-io/bucketeer/proto/account" +) + +func TestNewAccountStorage(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + storage := NewAccountStorage(mock.NewMockQueryExecer(mockController)) + assert.IsType(t, &accountStorage{}, storage) +} + +func TestCreateAccount(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*accountStorage) + input *domain.Account + environmentNamespace string + expectedErr error + }{ + "ErrAccountAlreadyExists": { + setup: func(s *accountStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, mysql.ErrDuplicateEntry) + }, + input: &domain.Account{ + Account: &proto.Account{Id: "aid-0"}, + }, + environmentNamespace: "ns0", + expectedErr: ErrAccountAlreadyExists, + }, + "Error": { + setup: func(s *accountStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + input: &domain.Account{ + Account: &proto.Account{Id: "aid-0"}, + }, + environmentNamespace: "ns0", + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *accountStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, nil) + }, + input: &domain.Account{ + Account: &proto.Account{Id: "aid-0"}, + }, + environmentNamespace: "ns0", + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newAccountStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + err := storage.CreateAccount(context.Background(), p.input, p.environmentNamespace) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestUpdateAccount(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*accountStorage) + input *domain.Account + environmentNamespace string + expectedErr error + }{ + "ErrAccountUnexpectedAffectedRows": { + setup: func(s *accountStorage) { + result := mock.NewMockResult(mockController) + result.EXPECT().RowsAffected().Return(int64(0), nil) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + input: &domain.Account{ + Account: &proto.Account{Id: "aid-0"}, + }, + environmentNamespace: "ns0", + expectedErr: ErrAccountUnexpectedAffectedRows, + }, + "Error": { + setup: func(s *accountStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + input: &domain.Account{ + Account: &proto.Account{Id: "aid-0"}, + }, + environmentNamespace: "ns0", + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *accountStorage) { + result := mock.NewMockResult(mockController) + result.EXPECT().RowsAffected().Return(int64(1), nil) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + input: &domain.Account{ + Account: &proto.Account{Id: "aid-0"}, + }, + environmentNamespace: "ns0", + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newAccountStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + err := storage.UpdateAccount(context.Background(), p.input, p.environmentNamespace) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestGetAccount(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*accountStorage) + id string + environmentNamespace string + expectedErr error + }{ + "ErrAccountNotFound": { + setup: func(s *accountStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id-0", + environmentNamespace: "ns0", + expectedErr: ErrAccountNotFound, + }, + "Error": { + setup: func(s *accountStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(errors.New("error")) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + + }, + id: "id-0", + environmentNamespace: "ns0", + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *accountStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id-0", + environmentNamespace: "ns0", + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newAccountStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + _, err := storage.GetAccount(context.Background(), p.id, p.environmentNamespace) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestListAccounts(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*accountStorage) + whereParts []mysql.WherePart + orders []*mysql.Order + limit int + offset int + expected []*proto.Account + expectedCursor int + expectedErr error + }{ + "Error": { + setup: func(s *accountStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + whereParts: nil, + orders: nil, + limit: 0, + offset: 0, + expected: nil, + expectedCursor: 0, + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *accountStorage) { + rows := mock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + whereParts: []mysql.WherePart{ + mysql.NewFilter("num", ">=", 5), + }, + orders: []*mysql.Order{ + mysql.NewOrder("id", mysql.OrderDirectionAsc), + }, + limit: 10, + offset: 5, + expected: []*proto.Account{}, + expectedCursor: 5, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newAccountStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + accounts, cursor, _, err := storage.ListAccounts( + context.Background(), + p.whereParts, + p.orders, + p.limit, + p.offset, + ) + assert.Equal(t, p.expected, accounts) + assert.Equal(t, p.expectedCursor, cursor) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func newAccountStorageWithMock(t *testing.T, mockController *gomock.Controller) *accountStorage { + t.Helper() + return &accountStorage{mock.NewMockQueryExecer(mockController)} +} diff --git a/pkg/account/storage/v2/admin_account.go b/pkg/account/storage/v2/admin_account.go new file mode 100644 index 000000000..6d72389a1 --- /dev/null +++ b/pkg/account/storage/v2/admin_account.go @@ -0,0 +1,239 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v2 + +import ( + "context" + "errors" + "fmt" + + "github.com/bucketeer-io/bucketeer/pkg/account/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + proto "github.com/bucketeer-io/bucketeer/proto/account" +) + +var ( + ErrAdminAccountAlreadyExists = errors.New("account: admin account already exists") + ErrAdminAccountNotFound = errors.New("account: admin account not found") + ErrAdminAccountUnexpectedAffectedRows = errors.New("account: admin account unexpected affected rows") +) + +type AdminAccountStorage interface { + CreateAdminAccount(ctx context.Context, a *domain.Account) error + UpdateAdminAccount(ctx context.Context, a *domain.Account) error + GetAdminAccount(ctx context.Context, id string) (*domain.Account, error) + ListAdminAccounts( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, + ) ([]*proto.Account, int, int64, error) +} + +type adminAccountStorage struct { + qe mysql.QueryExecer +} + +func NewAdminAccountStorage(qe mysql.QueryExecer) AdminAccountStorage { + return &adminAccountStorage{qe} +} + +func (s *adminAccountStorage) CreateAdminAccount(ctx context.Context, a *domain.Account) error { + query := ` + INSERT INTO admin_account ( + id, + email, + name, + role, + disabled, + created_at, + updated_at, + deleted + ) VALUES ( + ?, ?, ?, ?, ?, ?, ?, ? + ) + ` + _, err := s.qe.ExecContext( + ctx, + query, + a.Id, + a.Email, + a.Name, + int32(a.Role), + a.Disabled, + a.CreatedAt, + a.UpdatedAt, + a.Deleted, + ) + if err != nil { + if err == mysql.ErrDuplicateEntry { + return ErrAdminAccountAlreadyExists + } + return err + } + return nil +} + +func (s *adminAccountStorage) UpdateAdminAccount(ctx context.Context, a *domain.Account) error { + query := ` + UPDATE + admin_account + SET + email = ?, + name = ?, + role = ?, + disabled = ?, + created_at = ?, + updated_at = ?, + deleted = ? + WHERE + id = ? + ` + result, err := s.qe.ExecContext( + ctx, + query, + a.Email, + a.Name, + int32(a.Role), + a.Disabled, + a.CreatedAt, + a.UpdatedAt, + a.Deleted, + a.Id, + ) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + if rowsAffected != 1 { + return ErrAdminAccountUnexpectedAffectedRows + } + return nil +} + +func (s *adminAccountStorage) GetAdminAccount(ctx context.Context, id string) (*domain.Account, error) { + account := proto.Account{} + var role int32 + query := ` + SELECT + id, + email, + name, + role, + disabled, + created_at, + updated_at, + deleted + FROM + admin_account + WHERE + id = ? + ` + err := s.qe.QueryRowContext( + ctx, + query, + id, + ).Scan( + &account.Id, + &account.Email, + &account.Name, + &role, + &account.Disabled, + &account.CreatedAt, + &account.UpdatedAt, + &account.Deleted, + ) + if err != nil { + if err == mysql.ErrNoRows { + return nil, ErrAdminAccountNotFound + } + return nil, err + } + account.Role = proto.Account_Role(role) + return &domain.Account{Account: &account}, nil +} + +func (s *adminAccountStorage) ListAdminAccounts( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, +) ([]*proto.Account, int, int64, error) { + whereSQL, whereArgs := mysql.ConstructWhereSQLString(whereParts) + orderBySQL := mysql.ConstructOrderBySQLString(orders) + limitOffsetSQL := mysql.ConstructLimitOffsetSQLString(limit, offset) + query := fmt.Sprintf(` + SELECT + id, + email, + name, + role, + disabled, + created_at, + updated_at, + deleted + FROM + admin_account + %s %s %s + `, whereSQL, orderBySQL, limitOffsetSQL, + ) + rows, err := s.qe.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, 0, 0, err + } + defer rows.Close() + accounts := make([]*proto.Account, 0, limit) + for rows.Next() { + account := proto.Account{} + var role int32 + err := rows.Scan( + &account.Id, + &account.Email, + &account.Name, + &role, + &account.Disabled, + &account.CreatedAt, + &account.UpdatedAt, + &account.Deleted, + ) + if err != nil { + return nil, 0, 0, err + } + account.Role = proto.Account_Role(role) + accounts = append(accounts, &account) + } + if rows.Err() != nil { + return nil, 0, 0, err + } + nextOffset := offset + len(accounts) + var totalCount int64 + countQuery := fmt.Sprintf(` + SELECT + COUNT(1) + FROM + admin_account + %s %s + `, whereSQL, orderBySQL, + ) + err = s.qe.QueryRowContext(ctx, countQuery, whereArgs...).Scan(&totalCount) + if err != nil { + return nil, 0, 0, err + } + return accounts, nextOffset, totalCount, nil +} diff --git a/pkg/account/storage/v2/admin_account_test.go b/pkg/account/storage/v2/admin_account_test.go new file mode 100644 index 000000000..dc303b218 --- /dev/null +++ b/pkg/account/storage/v2/admin_account_test.go @@ -0,0 +1,288 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "context" + "errors" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/account/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + proto "github.com/bucketeer-io/bucketeer/proto/account" +) + +func TestNewAdminAccountStorage(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + storage := NewAdminAccountStorage(mock.NewMockQueryExecer(mockController)) + assert.IsType(t, &adminAccountStorage{}, storage) +} + +func TestCreateAdminAccount(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*adminAccountStorage) + input *domain.Account + expectedErr error + }{ + "ErrAdminAccountAlreadyExists": { + setup: func(s *adminAccountStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, mysql.ErrDuplicateEntry) + }, + input: &domain.Account{ + Account: &proto.Account{Id: "aid-0"}, + }, + expectedErr: ErrAdminAccountAlreadyExists, + }, + "Error": { + setup: func(s *adminAccountStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + input: &domain.Account{ + Account: &proto.Account{Id: "aid-0"}, + }, + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *adminAccountStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, nil) + }, + input: &domain.Account{ + Account: &proto.Account{Id: "aid-0"}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newAdminAccountStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + err := storage.CreateAdminAccount(context.Background(), p.input) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestUpdateAdminAccount(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*adminAccountStorage) + input *domain.Account + expectedErr error + }{ + "ErrAdminAccountUnexpectedAffectedRows": { + setup: func(s *adminAccountStorage) { + result := mock.NewMockResult(mockController) + result.EXPECT().RowsAffected().Return(int64(0), nil) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + input: &domain.Account{ + Account: &proto.Account{Id: "aid-0"}, + }, + expectedErr: ErrAdminAccountUnexpectedAffectedRows, + }, + "Error": { + setup: func(s *adminAccountStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + input: &domain.Account{ + Account: &proto.Account{Id: "aid-0"}, + }, + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *adminAccountStorage) { + result := mock.NewMockResult(mockController) + result.EXPECT().RowsAffected().Return(int64(1), nil) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + input: &domain.Account{ + Account: &proto.Account{Id: "aid-0"}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newAdminAccountStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + err := storage.UpdateAdminAccount(context.Background(), p.input) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestGetAdminAccount(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*adminAccountStorage) + id string + expectedErr error + }{ + "ErrAdminAccountNotFound": { + setup: func(s *adminAccountStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id-0", + expectedErr: ErrAdminAccountNotFound, + }, + "Error": { + setup: func(s *adminAccountStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(errors.New("error")) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + + }, + id: "id-0", + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *adminAccountStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id-0", + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newAdminAccountStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + _, err := storage.GetAdminAccount(context.Background(), p.id) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestListAdminAccounts(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*adminAccountStorage) + whereParts []mysql.WherePart + orders []*mysql.Order + limit int + offset int + expected []*proto.Account + expectedCursor int + expectedErr error + }{ + "Error": { + setup: func(s *adminAccountStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + whereParts: nil, + orders: nil, + limit: 0, + offset: 0, + expected: nil, + expectedCursor: 0, + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *adminAccountStorage) { + rows := mock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + whereParts: []mysql.WherePart{ + mysql.NewFilter("num", ">=", 5), + }, + orders: []*mysql.Order{ + mysql.NewOrder("id", mysql.OrderDirectionAsc), + }, + limit: 10, + offset: 5, + expected: []*proto.Account{}, + expectedCursor: 5, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newAdminAccountStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + accounts, cursor, _, err := storage.ListAdminAccounts( + context.Background(), + p.whereParts, + p.orders, + p.limit, + p.offset, + ) + assert.Equal(t, p.expected, accounts) + assert.Equal(t, p.expectedCursor, cursor) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func newAdminAccountStorageWithMock(t *testing.T, mockController *gomock.Controller) *adminAccountStorage { + t.Helper() + return &adminAccountStorage{mock.NewMockQueryExecer(mockController)} +} diff --git a/pkg/account/storage/v2/api_key.go b/pkg/account/storage/v2/api_key.go new file mode 100644 index 000000000..04e94c9ab --- /dev/null +++ b/pkg/account/storage/v2/api_key.go @@ -0,0 +1,229 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v2 + +import ( + "context" + "errors" + "fmt" + + "github.com/bucketeer-io/bucketeer/pkg/account/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + proto "github.com/bucketeer-io/bucketeer/proto/account" +) + +var ( + ErrAPIKeyAlreadyExists = errors.New("apiKey: api key already exists") + ErrAPIKeyNotFound = errors.New("apiKey: api key not found") + ErrAPIKeyUnexpectedAffectedRows = errors.New("apiKey: api key unexpected affected rows") +) + +type APIKeyStorage interface { + CreateAPIKey(ctx context.Context, k *domain.APIKey, environmentNamespace string) error + UpdateAPIKey(ctx context.Context, k *domain.APIKey, environmentNamespace string) error + GetAPIKey(ctx context.Context, id, environmentNamespace string) (*domain.APIKey, error) + ListAPIKeys( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, + ) ([]*proto.APIKey, int, int64, error) +} + +type apiKeyStorage struct { + qe mysql.QueryExecer +} + +func NewAPIKeyStorage(qe mysql.QueryExecer) APIKeyStorage { + return &apiKeyStorage{qe} +} + +func (s *apiKeyStorage) CreateAPIKey(ctx context.Context, k *domain.APIKey, environmentNamespace string) error { + query := ` + INSERT INTO api_key ( + id, + name, + role, + disabled, + created_at, + updated_at, + environment_namespace + ) VALUES ( + ?, ?, ?, ?, ?, ?, ? + ) + ` + _, err := s.qe.ExecContext( + ctx, + query, + k.Id, + k.Name, + int32(k.Role), + k.Disabled, + k.CreatedAt, + k.UpdatedAt, + environmentNamespace, + ) + if err != nil { + if err == mysql.ErrDuplicateEntry { + return ErrAPIKeyAlreadyExists + } + return err + } + return nil +} + +func (s *apiKeyStorage) UpdateAPIKey(ctx context.Context, k *domain.APIKey, environmentNamespace string) error { + query := ` + UPDATE + api_key + SET + name = ?, + role = ?, + disabled = ?, + created_at = ?, + updated_at = ? + WHERE + id = ? AND + environment_namespace = ? + ` + result, err := s.qe.ExecContext( + ctx, + query, + k.Name, + int32(k.Role), + k.Disabled, + k.CreatedAt, + k.UpdatedAt, + k.Id, + environmentNamespace, + ) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + if rowsAffected != 1 { + return ErrAPIKeyUnexpectedAffectedRows + } + return nil +} + +func (s *apiKeyStorage) GetAPIKey(ctx context.Context, id, environmentNamespace string) (*domain.APIKey, error) { + apiKey := proto.APIKey{} + var role int32 + query := ` + SELECT + id, + name, + role, + disabled, + created_at, + updated_at + FROM + api_key + WHERE + id = ? AND + environment_namespace = ? + ` + err := s.qe.QueryRowContext( + ctx, + query, + id, + environmentNamespace, + ).Scan( + &apiKey.Id, + &apiKey.Name, + &role, + &apiKey.Disabled, + &apiKey.CreatedAt, + &apiKey.UpdatedAt, + ) + if err != nil { + if err == mysql.ErrNoRows { + return nil, ErrAPIKeyNotFound + } + return nil, err + } + apiKey.Role = proto.APIKey_Role(role) + return &domain.APIKey{APIKey: &apiKey}, nil +} + +func (s *apiKeyStorage) ListAPIKeys( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, +) ([]*proto.APIKey, int, int64, error) { + whereSQL, whereArgs := mysql.ConstructWhereSQLString(whereParts) + orderBySQL := mysql.ConstructOrderBySQLString(orders) + limitOffsetSQL := mysql.ConstructLimitOffsetSQLString(limit, offset) + query := fmt.Sprintf(` + SELECT + id, + name, + role, + disabled, + created_at, + updated_at + FROM + api_key + %s %s %s + `, whereSQL, orderBySQL, limitOffsetSQL, + ) + rows, err := s.qe.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, 0, 0, err + } + defer rows.Close() + apiKeys := make([]*proto.APIKey, 0, limit) + for rows.Next() { + apiKey := proto.APIKey{} + var role int32 + err := rows.Scan( + &apiKey.Id, + &apiKey.Name, + &role, + &apiKey.Disabled, + &apiKey.CreatedAt, + &apiKey.UpdatedAt, + ) + if err != nil { + return nil, 0, 0, err + } + apiKey.Role = proto.APIKey_Role(role) + apiKeys = append(apiKeys, &apiKey) + } + if rows.Err() != nil { + return nil, 0, 0, err + } + nextOffset := offset + len(apiKeys) + var totalCount int64 + countQuery := fmt.Sprintf(` + SELECT + COUNT(1) + FROM + api_key + %s %s + `, whereSQL, orderBySQL, + ) + err = s.qe.QueryRowContext(ctx, countQuery, whereArgs...).Scan(&totalCount) + if err != nil { + return nil, 0, 0, err + } + return apiKeys, nextOffset, totalCount, nil +} diff --git a/pkg/account/storage/v2/api_key_test.go b/pkg/account/storage/v2/api_key_test.go new file mode 100644 index 000000000..f603b782a --- /dev/null +++ b/pkg/account/storage/v2/api_key_test.go @@ -0,0 +1,300 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "context" + "errors" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/account/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + proto "github.com/bucketeer-io/bucketeer/proto/account" +) + +func TestNewAPIKeyStorage(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + storage := NewAPIKeyStorage(mock.NewMockQueryExecer(mockController)) + assert.IsType(t, &apiKeyStorage{}, storage) +} + +func TestCreateAPIKey(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*apiKeyStorage) + input *domain.APIKey + environmentNamespace string + expectedErr error + }{ + "ErrAPIKeyAlreadyExists": { + setup: func(s *apiKeyStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, mysql.ErrDuplicateEntry) + }, + input: &domain.APIKey{ + APIKey: &proto.APIKey{Id: "aid-0"}, + }, + environmentNamespace: "ns0", + expectedErr: ErrAPIKeyAlreadyExists, + }, + "Error": { + setup: func(s *apiKeyStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + input: &domain.APIKey{ + APIKey: &proto.APIKey{Id: "aid-0"}, + }, + environmentNamespace: "ns0", + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *apiKeyStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, nil) + }, + input: &domain.APIKey{ + APIKey: &proto.APIKey{Id: "aid-0"}, + }, + environmentNamespace: "ns0", + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newAPIKeyStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + err := storage.CreateAPIKey(context.Background(), p.input, p.environmentNamespace) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestUpdateAPIKey(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*apiKeyStorage) + input *domain.APIKey + environmentNamespace string + expectedErr error + }{ + "ErrAPIKeyUnexpectedAffectedRows": { + setup: func(s *apiKeyStorage) { + result := mock.NewMockResult(mockController) + result.EXPECT().RowsAffected().Return(int64(0), nil) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + input: &domain.APIKey{ + APIKey: &proto.APIKey{Id: "aid-0"}, + }, + environmentNamespace: "ns0", + expectedErr: ErrAPIKeyUnexpectedAffectedRows, + }, + "Error": { + setup: func(s *apiKeyStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + input: &domain.APIKey{ + APIKey: &proto.APIKey{Id: "aid-0"}, + }, + environmentNamespace: "ns0", + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *apiKeyStorage) { + result := mock.NewMockResult(mockController) + result.EXPECT().RowsAffected().Return(int64(1), nil) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + input: &domain.APIKey{ + APIKey: &proto.APIKey{Id: "aid-0"}, + }, + environmentNamespace: "ns0", + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newAPIKeyStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + err := storage.UpdateAPIKey(context.Background(), p.input, p.environmentNamespace) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestGetAPIKey(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*apiKeyStorage) + id string + environmentNamespace string + expectedErr error + }{ + "ErrAPIKeyNotFound": { + setup: func(s *apiKeyStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id-0", + environmentNamespace: "ns0", + expectedErr: ErrAPIKeyNotFound, + }, + "Error": { + setup: func(s *apiKeyStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(errors.New("error")) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + + }, + id: "id-0", + environmentNamespace: "ns0", + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *apiKeyStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id-0", + environmentNamespace: "ns0", + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newAPIKeyStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + _, err := storage.GetAPIKey(context.Background(), p.id, p.environmentNamespace) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestListAPIKeys(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*apiKeyStorage) + whereParts []mysql.WherePart + orders []*mysql.Order + limit int + offset int + expected []*proto.APIKey + expectedCursor int + expectedErr error + }{ + "Error": { + setup: func(s *apiKeyStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + whereParts: nil, + orders: nil, + limit: 0, + offset: 0, + expected: nil, + expectedCursor: 0, + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *apiKeyStorage) { + rows := mock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + whereParts: []mysql.WherePart{ + mysql.NewFilter("num", ">=", 5), + }, + orders: []*mysql.Order{ + mysql.NewOrder("id", mysql.OrderDirectionAsc), + }, + limit: 10, + offset: 5, + expected: []*proto.APIKey{}, + expectedCursor: 5, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newAPIKeyStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + apiKeys, cursor, _, err := storage.ListAPIKeys( + context.Background(), + p.whereParts, + p.orders, + p.limit, + p.offset, + ) + assert.Equal(t, p.expected, apiKeys) + assert.Equal(t, p.expectedCursor, cursor) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func newAPIKeyStorageWithMock(t *testing.T, mockController *gomock.Controller) *apiKeyStorage { + t.Helper() + return &apiKeyStorage{mock.NewMockQueryExecer(mockController)} +} diff --git a/pkg/account/storage/v2/mock/BUILD.bazel b/pkg/account/storage/v2/mock/BUILD.bazel new file mode 100644 index 000000000..07e559174 --- /dev/null +++ b/pkg/account/storage/v2/mock/BUILD.bazel @@ -0,0 +1,18 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "account.go", + "admin_account.go", + "api_key.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/account/storage/v2/mock", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/account:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + ], +) diff --git a/pkg/account/storage/v2/mock/account.go b/pkg/account/storage/v2/mock/account.go new file mode 100644 index 000000000..5e0234c32 --- /dev/null +++ b/pkg/account/storage/v2/mock/account.go @@ -0,0 +1,99 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: account.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + domain "github.com/bucketeer-io/bucketeer/pkg/account/domain" + mysql "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + account "github.com/bucketeer-io/bucketeer/proto/account" +) + +// MockAccountStorage is a mock of AccountStorage interface. +type MockAccountStorage struct { + ctrl *gomock.Controller + recorder *MockAccountStorageMockRecorder +} + +// MockAccountStorageMockRecorder is the mock recorder for MockAccountStorage. +type MockAccountStorageMockRecorder struct { + mock *MockAccountStorage +} + +// NewMockAccountStorage creates a new mock instance. +func NewMockAccountStorage(ctrl *gomock.Controller) *MockAccountStorage { + mock := &MockAccountStorage{ctrl: ctrl} + mock.recorder = &MockAccountStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockAccountStorage) EXPECT() *MockAccountStorageMockRecorder { + return m.recorder +} + +// CreateAccount mocks base method. +func (m *MockAccountStorage) CreateAccount(ctx context.Context, a *domain.Account, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateAccount", ctx, a, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateAccount indicates an expected call of CreateAccount. +func (mr *MockAccountStorageMockRecorder) CreateAccount(ctx, a, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAccount", reflect.TypeOf((*MockAccountStorage)(nil).CreateAccount), ctx, a, environmentNamespace) +} + +// GetAccount mocks base method. +func (m *MockAccountStorage) GetAccount(ctx context.Context, id, environmentNamespace string) (*domain.Account, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccount", ctx, id, environmentNamespace) + ret0, _ := ret[0].(*domain.Account) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccount indicates an expected call of GetAccount. +func (mr *MockAccountStorageMockRecorder) GetAccount(ctx, id, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccount", reflect.TypeOf((*MockAccountStorage)(nil).GetAccount), ctx, id, environmentNamespace) +} + +// ListAccounts mocks base method. +func (m *MockAccountStorage) ListAccounts(ctx context.Context, whereParts []mysql.WherePart, orders []*mysql.Order, limit, offset int) ([]*account.Account, int, int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListAccounts", ctx, whereParts, orders, limit, offset) + ret0, _ := ret[0].([]*account.Account) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(int64) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 +} + +// ListAccounts indicates an expected call of ListAccounts. +func (mr *MockAccountStorageMockRecorder) ListAccounts(ctx, whereParts, orders, limit, offset interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAccounts", reflect.TypeOf((*MockAccountStorage)(nil).ListAccounts), ctx, whereParts, orders, limit, offset) +} + +// UpdateAccount mocks base method. +func (m *MockAccountStorage) UpdateAccount(ctx context.Context, a *domain.Account, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateAccount", ctx, a, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateAccount indicates an expected call of UpdateAccount. +func (mr *MockAccountStorageMockRecorder) UpdateAccount(ctx, a, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAccount", reflect.TypeOf((*MockAccountStorage)(nil).UpdateAccount), ctx, a, environmentNamespace) +} diff --git a/pkg/account/storage/v2/mock/admin_account.go b/pkg/account/storage/v2/mock/admin_account.go new file mode 100644 index 000000000..16980f90c --- /dev/null +++ b/pkg/account/storage/v2/mock/admin_account.go @@ -0,0 +1,99 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: admin_account.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + domain "github.com/bucketeer-io/bucketeer/pkg/account/domain" + mysql "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + account "github.com/bucketeer-io/bucketeer/proto/account" +) + +// MockAdminAccountStorage is a mock of AdminAccountStorage interface. +type MockAdminAccountStorage struct { + ctrl *gomock.Controller + recorder *MockAdminAccountStorageMockRecorder +} + +// MockAdminAccountStorageMockRecorder is the mock recorder for MockAdminAccountStorage. +type MockAdminAccountStorageMockRecorder struct { + mock *MockAdminAccountStorage +} + +// NewMockAdminAccountStorage creates a new mock instance. +func NewMockAdminAccountStorage(ctrl *gomock.Controller) *MockAdminAccountStorage { + mock := &MockAdminAccountStorage{ctrl: ctrl} + mock.recorder = &MockAdminAccountStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockAdminAccountStorage) EXPECT() *MockAdminAccountStorageMockRecorder { + return m.recorder +} + +// CreateAdminAccount mocks base method. +func (m *MockAdminAccountStorage) CreateAdminAccount(ctx context.Context, a *domain.Account) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateAdminAccount", ctx, a) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateAdminAccount indicates an expected call of CreateAdminAccount. +func (mr *MockAdminAccountStorageMockRecorder) CreateAdminAccount(ctx, a interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAdminAccount", reflect.TypeOf((*MockAdminAccountStorage)(nil).CreateAdminAccount), ctx, a) +} + +// GetAdminAccount mocks base method. +func (m *MockAdminAccountStorage) GetAdminAccount(ctx context.Context, id string) (*domain.Account, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAdminAccount", ctx, id) + ret0, _ := ret[0].(*domain.Account) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAdminAccount indicates an expected call of GetAdminAccount. +func (mr *MockAdminAccountStorageMockRecorder) GetAdminAccount(ctx, id interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAdminAccount", reflect.TypeOf((*MockAdminAccountStorage)(nil).GetAdminAccount), ctx, id) +} + +// ListAdminAccounts mocks base method. +func (m *MockAdminAccountStorage) ListAdminAccounts(ctx context.Context, whereParts []mysql.WherePart, orders []*mysql.Order, limit, offset int) ([]*account.Account, int, int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListAdminAccounts", ctx, whereParts, orders, limit, offset) + ret0, _ := ret[0].([]*account.Account) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(int64) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 +} + +// ListAdminAccounts indicates an expected call of ListAdminAccounts. +func (mr *MockAdminAccountStorageMockRecorder) ListAdminAccounts(ctx, whereParts, orders, limit, offset interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAdminAccounts", reflect.TypeOf((*MockAdminAccountStorage)(nil).ListAdminAccounts), ctx, whereParts, orders, limit, offset) +} + +// UpdateAdminAccount mocks base method. +func (m *MockAdminAccountStorage) UpdateAdminAccount(ctx context.Context, a *domain.Account) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateAdminAccount", ctx, a) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateAdminAccount indicates an expected call of UpdateAdminAccount. +func (mr *MockAdminAccountStorageMockRecorder) UpdateAdminAccount(ctx, a interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAdminAccount", reflect.TypeOf((*MockAdminAccountStorage)(nil).UpdateAdminAccount), ctx, a) +} diff --git a/pkg/account/storage/v2/mock/api_key.go b/pkg/account/storage/v2/mock/api_key.go new file mode 100644 index 000000000..68d1ecfa9 --- /dev/null +++ b/pkg/account/storage/v2/mock/api_key.go @@ -0,0 +1,99 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: api_key.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + domain "github.com/bucketeer-io/bucketeer/pkg/account/domain" + mysql "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + account "github.com/bucketeer-io/bucketeer/proto/account" +) + +// MockAPIKeyStorage is a mock of APIKeyStorage interface. +type MockAPIKeyStorage struct { + ctrl *gomock.Controller + recorder *MockAPIKeyStorageMockRecorder +} + +// MockAPIKeyStorageMockRecorder is the mock recorder for MockAPIKeyStorage. +type MockAPIKeyStorageMockRecorder struct { + mock *MockAPIKeyStorage +} + +// NewMockAPIKeyStorage creates a new mock instance. +func NewMockAPIKeyStorage(ctrl *gomock.Controller) *MockAPIKeyStorage { + mock := &MockAPIKeyStorage{ctrl: ctrl} + mock.recorder = &MockAPIKeyStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockAPIKeyStorage) EXPECT() *MockAPIKeyStorageMockRecorder { + return m.recorder +} + +// CreateAPIKey mocks base method. +func (m *MockAPIKeyStorage) CreateAPIKey(ctx context.Context, k *domain.APIKey, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateAPIKey", ctx, k, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateAPIKey indicates an expected call of CreateAPIKey. +func (mr *MockAPIKeyStorageMockRecorder) CreateAPIKey(ctx, k, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAPIKey", reflect.TypeOf((*MockAPIKeyStorage)(nil).CreateAPIKey), ctx, k, environmentNamespace) +} + +// GetAPIKey mocks base method. +func (m *MockAPIKeyStorage) GetAPIKey(ctx context.Context, id, environmentNamespace string) (*domain.APIKey, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAPIKey", ctx, id, environmentNamespace) + ret0, _ := ret[0].(*domain.APIKey) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAPIKey indicates an expected call of GetAPIKey. +func (mr *MockAPIKeyStorageMockRecorder) GetAPIKey(ctx, id, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAPIKey", reflect.TypeOf((*MockAPIKeyStorage)(nil).GetAPIKey), ctx, id, environmentNamespace) +} + +// ListAPIKeys mocks base method. +func (m *MockAPIKeyStorage) ListAPIKeys(ctx context.Context, whereParts []mysql.WherePart, orders []*mysql.Order, limit, offset int) ([]*account.APIKey, int, int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListAPIKeys", ctx, whereParts, orders, limit, offset) + ret0, _ := ret[0].([]*account.APIKey) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(int64) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 +} + +// ListAPIKeys indicates an expected call of ListAPIKeys. +func (mr *MockAPIKeyStorageMockRecorder) ListAPIKeys(ctx, whereParts, orders, limit, offset interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAPIKeys", reflect.TypeOf((*MockAPIKeyStorage)(nil).ListAPIKeys), ctx, whereParts, orders, limit, offset) +} + +// UpdateAPIKey mocks base method. +func (m *MockAPIKeyStorage) UpdateAPIKey(ctx context.Context, k *domain.APIKey, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateAPIKey", ctx, k, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateAPIKey indicates an expected call of UpdateAPIKey. +func (mr *MockAPIKeyStorageMockRecorder) UpdateAPIKey(ctx, k, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAPIKey", reflect.TypeOf((*MockAPIKeyStorage)(nil).UpdateAPIKey), ctx, k, environmentNamespace) +} diff --git a/pkg/auditlog/api/BUILD.bazel b/pkg/auditlog/api/BUILD.bazel new file mode 100644 index 000000000..1c5a722c7 --- /dev/null +++ b/pkg/auditlog/api/BUILD.bazel @@ -0,0 +1,52 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "api.go", + "error.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/auditlog/api", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/client:go_default_library", + "//pkg/auditlog/storage/v2:go_default_library", + "//pkg/domainevent/domain:go_default_library", + "//pkg/locale:go_default_library", + "//pkg/log:go_default_library", + "//pkg/role:go_default_library", + "//pkg/rpc/status:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/account:go_default_library", + "//proto/auditlog:go_default_library", + "//proto/event/domain:go_default_library", + "@go_googleapis//google/rpc:errdetails_go_proto", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["api_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/account/client/mock:go_default_library", + "//pkg/auditlog/storage/v2/mock:go_default_library", + "//pkg/domainevent/domain:go_default_library", + "//pkg/locale:go_default_library", + "//pkg/log:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/storage/v2/mysql/mock:go_default_library", + "//pkg/token:go_default_library", + "//proto/account:go_default_library", + "//proto/auditlog:go_default_library", + "//proto/event/domain:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/auditlog/api/api.go b/pkg/auditlog/api/api.go new file mode 100644 index 000000000..0e46f7a0b --- /dev/null +++ b/pkg/auditlog/api/api.go @@ -0,0 +1,415 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "strconv" + + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + v2als "github.com/bucketeer-io/bucketeer/pkg/auditlog/storage/v2" + domainevent "github.com/bucketeer-io/bucketeer/pkg/domainevent/domain" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/role" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + proto "github.com/bucketeer-io/bucketeer/proto/auditlog" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +type options struct { + logger *zap.Logger +} + +type Option func(*options) + +func WithLogger(l *zap.Logger) Option { + return func(opts *options) { + opts.logger = l + } +} + +type AuditlogService interface { + Register(*grpc.Server) + ListAuditLogs(context.Context, *proto.ListAuditLogsRequest) (*proto.ListAuditLogsResponse, error) + ListAdminAuditLogs( + ctx context.Context, + req *proto.ListAdminAuditLogsRequest, + ) (*proto.ListAdminAuditLogsResponse, error) + ListFeatureHistory( + ctx context.Context, + req *proto.ListFeatureHistoryRequest, + ) (*proto.ListFeatureHistoryResponse, error) +} + +type auditlogService struct { + accountClient accountclient.Client + mysqlStorage v2als.AuditLogStorage + mysqlAdminStorage v2als.AdminAuditLogStorage + opts *options + logger *zap.Logger +} + +func NewAuditLogService( + accountClient accountclient.Client, + mysqlClient mysql.Client, + opts ...Option, +) AuditlogService { + dopts := &options{ + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + return &auditlogService{ + accountClient: accountClient, + mysqlStorage: v2als.NewAuditLogStorage(mysqlClient), + mysqlAdminStorage: v2als.NewAdminAuditLogStorage(mysqlClient), + opts: dopts, + logger: dopts.logger.Named("api"), + } +} + +func (s *auditlogService) Register(server *grpc.Server) { + proto.RegisterAuditLogServiceServer(server, s) +} + +func (s *auditlogService) ListAuditLogs( + ctx context.Context, + req *proto.ListAuditLogsRequest, +) (*proto.ListAuditLogsResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + limit := int(req.PageSize) + cursor := req.Cursor + if cursor == "" { + cursor = "0" + } + offset, err := strconv.Atoi(cursor) + if err != nil { + return nil, localizedError(statusInvalidCursor, locale.JaJP) + } + whereParts := []mysql.WherePart{ + mysql.NewFilter("environment_namespace", "=", req.EnvironmentNamespace), + } + if req.From != 0 { + whereParts = append(whereParts, mysql.NewFilter("timestamp", ">=", req.From)) + } + if req.To != 0 { + whereParts = append(whereParts, mysql.NewFilter("timestamp", "<=", req.To)) + } + if req.EntityType != nil { + whereParts = append(whereParts, mysql.NewFilter("entity_type", "=", req.EntityType.Value)) + } + if req.SearchKeyword != "" { + whereParts = append(whereParts, mysql.NewSearchQuery([]string{"editor"}, req.SearchKeyword)) + } + orders, err := s.newAuditLogListOrders(req.OrderBy, req.OrderDirection) + if err != nil { + s.logger.Error( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, err + } + auditlogs, nextCursor, totalCount, err := s.mysqlStorage.ListAuditLogs( + ctx, + whereParts, + orders, + limit, + offset, + ) + if err != nil { + s.logger.Error( + "Failed to list auditlogs", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + for _, auditlog := range auditlogs { + auditlog.LocalizedMessage = domainevent.LocalizedMessage(auditlog.Type, locale.JaJP) + } + return &proto.ListAuditLogsResponse{ + AuditLogs: auditlogs, + Cursor: strconv.Itoa(nextCursor), + TotalCount: totalCount, + }, nil +} + +func (s *auditlogService) newAuditLogListOrders( + orderBy proto.ListAuditLogsRequest_OrderBy, + orderDirection proto.ListAuditLogsRequest_OrderDirection, +) ([]*mysql.Order, error) { + var column string + switch orderBy { + case proto.ListAuditLogsRequest_DEFAULT, + proto.ListAuditLogsRequest_TIMESTAMP: + column = "timestamp" + default: + return nil, localizedError(statusInvalidOrderBy, locale.JaJP) + } + direction := mysql.OrderDirectionDesc + if orderDirection == proto.ListAuditLogsRequest_ASC { + direction = mysql.OrderDirectionAsc + } + return []*mysql.Order{mysql.NewOrder(column, direction)}, nil +} + +func (s *auditlogService) ListAdminAuditLogs( + ctx context.Context, + req *proto.ListAdminAuditLogsRequest, +) (*proto.ListAdminAuditLogsResponse, error) { + _, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + whereParts := []mysql.WherePart{} + if req.From != 0 { + whereParts = append(whereParts, mysql.NewFilter("timestamp", ">=", req.From)) + } + if req.To != 0 { + whereParts = append(whereParts, mysql.NewFilter("timestamp", "<=", req.To)) + } + if req.EntityType != nil { + whereParts = append(whereParts, mysql.NewFilter("entity_type", "=", req.EntityType.Value)) + } + if req.SearchKeyword != "" { + whereParts = append(whereParts, mysql.NewSearchQuery([]string{"editor"}, req.SearchKeyword)) + } + orders, err := s.newAdminAuditLogListOrders(req.OrderBy, req.OrderDirection) + if err != nil { + s.logger.Error( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, err + } + limit := int(req.PageSize) + cursor := req.Cursor + if cursor == "" { + cursor = "0" + } + offset, err := strconv.Atoi(cursor) + if err != nil { + return nil, localizedError(statusInvalidCursor, locale.JaJP) + } + auditlogs, nextCursor, totalCount, err := s.mysqlAdminStorage.ListAdminAuditLogs( + ctx, + whereParts, + orders, + limit, + offset, + ) + if err != nil { + s.logger.Error( + "Failed to list admin auditlogs", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + for _, auditlog := range auditlogs { + auditlog.LocalizedMessage = domainevent.LocalizedMessage(auditlog.Type, locale.JaJP) + } + return &proto.ListAdminAuditLogsResponse{ + AuditLogs: auditlogs, + Cursor: strconv.Itoa(nextCursor), + TotalCount: totalCount, + }, nil +} + +func (s *auditlogService) newAdminAuditLogListOrders( + orderBy proto.ListAdminAuditLogsRequest_OrderBy, + orderDirection proto.ListAdminAuditLogsRequest_OrderDirection, +) ([]*mysql.Order, error) { + var column string + switch orderBy { + case proto.ListAdminAuditLogsRequest_DEFAULT, + proto.ListAdminAuditLogsRequest_TIMESTAMP: + column = "timestamp" + default: + return nil, localizedError(statusInvalidOrderBy, locale.JaJP) + } + direction := mysql.OrderDirectionDesc + if orderDirection == proto.ListAdminAuditLogsRequest_ASC { + direction = mysql.OrderDirectionAsc + } + return []*mysql.Order{mysql.NewOrder(column, direction)}, nil +} + +func (s *auditlogService) ListFeatureHistory( + ctx context.Context, + req *proto.ListFeatureHistoryRequest, +) (*proto.ListFeatureHistoryResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + whereParts := []mysql.WherePart{ + mysql.NewFilter("environment_namespace", "=", req.EnvironmentNamespace), + mysql.NewFilter("entity_type", "=", int32(eventproto.Event_FEATURE)), + mysql.NewFilter("entity_id", "=", req.FeatureId), + } + if req.From != 0 { + whereParts = append(whereParts, mysql.NewFilter("timestamp", ">=", req.From)) + } + if req.To != 0 { + whereParts = append(whereParts, mysql.NewFilter("timestamp", "<=", req.To)) + } + if req.SearchKeyword != "" { + whereParts = append(whereParts, mysql.NewSearchQuery([]string{"editor"}, req.SearchKeyword)) + } + orders, err := s.newFeatureHistoryAuditLogListOrders(req.OrderBy, req.OrderDirection) + if err != nil { + s.logger.Error( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, err + } + limit := int(req.PageSize) + cursor := req.Cursor + if cursor == "" { + cursor = "0" + } + offset, err := strconv.Atoi(cursor) + if err != nil { + return nil, localizedError(statusInvalidCursor, locale.JaJP) + } + auditlogs, nextCursor, totalCount, err := s.mysqlStorage.ListAuditLogs( + ctx, + whereParts, + orders, + limit, + offset, + ) + if err != nil { + s.logger.Error( + "Failed to list feature history", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + zap.String("featureId", req.FeatureId), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + for _, auditlog := range auditlogs { + auditlog.LocalizedMessage = domainevent.LocalizedMessage(auditlog.Type, locale.JaJP) + } + return &proto.ListFeatureHistoryResponse{ + AuditLogs: auditlogs, + Cursor: strconv.Itoa(nextCursor), + TotalCount: totalCount, + }, nil +} + +func (s *auditlogService) newFeatureHistoryAuditLogListOrders( + orderBy proto.ListFeatureHistoryRequest_OrderBy, + orderDirection proto.ListFeatureHistoryRequest_OrderDirection, +) ([]*mysql.Order, error) { + var column string + switch orderBy { + case proto.ListFeatureHistoryRequest_DEFAULT, + proto.ListFeatureHistoryRequest_TIMESTAMP: + column = "timestamp" + default: + return nil, localizedError(statusInvalidOrderBy, locale.JaJP) + } + direction := mysql.OrderDirectionDesc + if orderDirection == proto.ListFeatureHistoryRequest_ASC { + direction = mysql.OrderDirectionAsc + } + return []*mysql.Order{mysql.NewOrder(column, direction)}, nil +} + +func (s *auditlogService) checkRole( + ctx context.Context, + requiredRole accountproto.Account_Role, + environmentNamespace string, +) (*eventproto.Editor, error) { + editor, err := role.CheckRole(ctx, requiredRole, func(email string) (*accountproto.GetAccountResponse, error) { + return s.accountClient.GetAccount(ctx, &accountproto.GetAccountRequest{ + Email: email, + EnvironmentNamespace: environmentNamespace, + }) + }) + if err != nil { + switch status.Code(err) { + case codes.Unauthenticated: + s.logger.Info( + "Unauthenticated", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusUnauthenticated, locale.JaJP) + case codes.PermissionDenied: + s.logger.Info( + "Permission denied", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusPermissionDenied, locale.JaJP) + default: + s.logger.Error( + "Failed to check role", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + } + return editor, nil +} + +func (s *auditlogService) checkAdminRole(ctx context.Context) (*eventproto.Editor, error) { + editor, err := role.CheckAdminRole(ctx) + if err != nil { + switch status.Code(err) { + case codes.Unauthenticated: + s.logger.Info( + "Unauthenticated", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusUnauthenticated, locale.JaJP) + case codes.PermissionDenied: + s.logger.Info( + "Permission denied", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusPermissionDenied, locale.JaJP) + default: + s.logger.Error( + "Failed to check role", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + } + return editor, nil +} diff --git a/pkg/auditlog/api/api_test.go b/pkg/auditlog/api/api_test.go new file mode 100644 index 000000000..7e0d51e44 --- /dev/null +++ b/pkg/auditlog/api/api_test.go @@ -0,0 +1,243 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "errors" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + accountclientmock "github.com/bucketeer-io/bucketeer/pkg/account/client/mock" + v2alsmock "github.com/bucketeer-io/bucketeer/pkg/auditlog/storage/v2/mock" + domainevent "github.com/bucketeer-io/bucketeer/pkg/domainevent/domain" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + "github.com/bucketeer-io/bucketeer/pkg/token" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + proto "github.com/bucketeer-io/bucketeer/proto/auditlog" + domaineventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +func TestNewAuditLogService(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + accountClientMock := accountclientmock.NewMockClient(mockController) + mysqlClient := mysqlmock.NewMockClient(mockController) + logger := zap.NewNop() + s := NewAuditLogService(accountClientMock, mysqlClient, WithLogger(logger)) + assert.IsType(t, &auditlogService{}, s) +} + +func TestListAuditLogsMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*auditlogService) + input *proto.ListAuditLogsRequest + expected *proto.ListAuditLogsResponse + expectedErr error + }{ + "err: ErrInvalidCursor": { + setup: nil, + input: &proto.ListAuditLogsRequest{Cursor: "XXX"}, + expected: nil, + expectedErr: errInvalidCursorJaJP, + }, + "err: ErrInternal": { + setup: func(s *auditlogService) { + s.mysqlStorage.(*v2alsmock.MockAuditLogStorage).EXPECT().ListAuditLogs( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, 0, int64(0), errors.New("test")) + }, + input: &proto.ListAuditLogsRequest{}, + expected: nil, + expectedErr: errInternalJaJP, + }, + "success": { + setup: func(s *auditlogService) { + s.mysqlStorage.(*v2alsmock.MockAuditLogStorage).EXPECT().ListAuditLogs( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(createAuditLogs(t), 2, int64(10), nil) + }, + input: &proto.ListAuditLogsRequest{PageSize: 2, Cursor: "", EnvironmentNamespace: "ns0"}, + expected: &proto.ListAuditLogsResponse{AuditLogs: createAuditLogs(t), Cursor: "2", TotalCount: 10}, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := newAuditLogService(t, mockController) + if p.setup != nil { + p.setup(s) + } + actual, err := s.ListAuditLogs(createContextWithToken(t, accountproto.Account_UNASSIGNED), p.input) + assert.Equal(t, p.expectedErr, err) + assert.Equal(t, p.expected, actual) + }) + } +} + +func TestListAdminAuditLogsMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*auditlogService) + input *proto.ListAdminAuditLogsRequest + expected *proto.ListAdminAuditLogsResponse + expectedErr error + }{ + "err: ErrInvalidCursor": { + setup: nil, + input: &proto.ListAdminAuditLogsRequest{Cursor: "invalid"}, + expected: nil, + expectedErr: errInvalidCursorJaJP, + }, + "err: ErrInternal": { + setup: func(s *auditlogService) { + s.mysqlAdminStorage.(*v2alsmock.MockAdminAuditLogStorage).EXPECT().ListAdminAuditLogs( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, 0, int64(0), errors.New("test")) + }, + input: &proto.ListAdminAuditLogsRequest{}, + expected: nil, + expectedErr: errInternalJaJP, + }, + "success": { + setup: func(s *auditlogService) { + s.mysqlAdminStorage.(*v2alsmock.MockAdminAuditLogStorage).EXPECT().ListAdminAuditLogs( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(createAuditLogs(t), 2, int64(10), nil) + }, + input: &proto.ListAdminAuditLogsRequest{PageSize: 2, Cursor: ""}, + expected: &proto.ListAdminAuditLogsResponse{AuditLogs: createAuditLogs(t), Cursor: "2", TotalCount: 10}, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := newAuditLogService(t, mockController) + if p.setup != nil { + p.setup(s) + } + actual, err := s.ListAdminAuditLogs(createContextWithToken(t, accountproto.Account_OWNER), p.input) + assert.Equal(t, p.expectedErr, err) + assert.Equal(t, p.expected, actual) + }) + } +} + +func TestListFeatureHistoryMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*auditlogService) + input *proto.ListFeatureHistoryRequest + expected *proto.ListFeatureHistoryResponse + expectedErr error + }{ + "err: ErrInvalidCursor": { + setup: nil, + input: &proto.ListFeatureHistoryRequest{Cursor: "XXX"}, + expected: nil, + expectedErr: errInvalidCursorJaJP, + }, + "err: ErrInternal": { + setup: func(s *auditlogService) { + s.mysqlStorage.(*v2alsmock.MockAuditLogStorage).EXPECT().ListAuditLogs( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, 0, int64(0), errors.New("test")) + }, + input: &proto.ListFeatureHistoryRequest{}, + expected: nil, + expectedErr: errInternalJaJP, + }, + "success": { + setup: func(s *auditlogService) { + s.mysqlStorage.(*v2alsmock.MockAuditLogStorage).EXPECT().ListAuditLogs( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(createAuditLogs(t), 2, int64(10), nil) + }, + input: &proto.ListFeatureHistoryRequest{ + FeatureId: "fid-1", PageSize: 2, Cursor: "", EnvironmentNamespace: "ns0", + }, + expected: &proto.ListFeatureHistoryResponse{AuditLogs: createAuditLogs(t), Cursor: "2", TotalCount: int64(10)}, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := newAuditLogService(t, mockController) + if p.setup != nil { + p.setup(s) + } + actual, err := s.ListFeatureHistory(createContextWithToken(t, accountproto.Account_UNASSIGNED), p.input) + assert.Equal(t, p.expectedErr, err) + assert.Equal(t, p.expected, actual) + }) + } +} + +func newAuditLogService(t *testing.T, mockController *gomock.Controller) *auditlogService { + t.Helper() + logger, err := log.NewLogger() + require.NoError(t, err) + accountClientMock := accountclientmock.NewMockClient(mockController) + ar := &accountproto.GetAccountResponse{ + Account: &accountproto.Account{ + Email: "email", + Role: accountproto.Account_VIEWER, + }, + } + accountClientMock.EXPECT().GetAccount(gomock.Any(), gomock.Any()).Return(ar, nil).AnyTimes() + return &auditlogService{ + accountClient: accountClientMock, + mysqlStorage: v2alsmock.NewMockAuditLogStorage(mockController), + mysqlAdminStorage: v2alsmock.NewMockAdminAuditLogStorage(mockController), + logger: logger.Named("api"), + } +} + +func createAuditLogs(t *testing.T) []*proto.AuditLog { + t.Helper() + msgUnknown := domainevent.LocalizedMessage(domaineventproto.Event_UNKNOWN, locale.JaJP) + return []*proto.AuditLog{ + {Id: "id-0", LocalizedMessage: msgUnknown}, + {Id: "id-1", LocalizedMessage: msgUnknown}, + } +} + +func createContextWithToken(t *testing.T, role accountproto.Account_Role) context.Context { + t.Helper() + token := &token.IDToken{ + Email: "test@example.com", + AdminRole: role, + } + ctx := context.TODO() + return context.WithValue(ctx, rpc.Key, token) +} diff --git a/pkg/auditlog/api/error.go b/pkg/auditlog/api/error.go new file mode 100644 index 000000000..0db38d417 --- /dev/null +++ b/pkg/auditlog/api/error.go @@ -0,0 +1,86 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + gstatus "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/rpc/status" +) + +var ( + statusInternal = gstatus.New(codes.Internal, "auditlog: internal") + statusUnauthenticated = gstatus.New(codes.Unauthenticated, "auditlog: unauthenticated") + statusPermissionDenied = gstatus.New(codes.PermissionDenied, "auditlog: permission denied") + statusInvalidCursor = gstatus.New(codes.InvalidArgument, "auditlog: cursor is invalid") + statusInvalidOrderBy = gstatus.New(codes.InvalidArgument, "auditlog: order_by is invalid") + + errInternalJaJP = status.MustWithDetails( + statusInternal, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "内部エラーが発生しました", + }, + ) + errUnauthenticatedJaJP = status.MustWithDetails( + statusUnauthenticated, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "認証されていません", + }, + ) + errPermissionDeniedJaJP = status.MustWithDetails( + statusPermissionDenied, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "権限がありません", + }, + ) + errInvalidCursorJaJP = status.MustWithDetails( + statusInvalidCursor, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なcursorです", + }, + ) + errInvalidOrderByJaJP = status.MustWithDetails( + statusInvalidOrderBy, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なソート順の指定です", + }, + ) +) + +func localizedError(s *gstatus.Status, loc string) error { + // handle loc if multi-lang is necessary + switch s { + case statusInternal: + return errInternalJaJP + case statusUnauthenticated: + return errUnauthenticatedJaJP + case statusPermissionDenied: + return errPermissionDeniedJaJP + case statusInvalidCursor: + return errInvalidCursorJaJP + case statusInvalidOrderBy: + return errInvalidOrderByJaJP + default: + return errInternalJaJP + } +} diff --git a/pkg/auditlog/client/BUILD.bazel b/pkg/auditlog/client/BUILD.bazel new file mode 100644 index 000000000..0bec0b479 --- /dev/null +++ b/pkg/auditlog/client/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["client.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/auditlog/client", + visibility = ["//visibility:public"], + deps = [ + "//pkg/rpc/client:go_default_library", + "//proto/auditlog:go_default_library", + "@org_golang_google_grpc//:go_default_library", + ], +) diff --git a/pkg/auditlog/client/client.go b/pkg/auditlog/client/client.go new file mode 100644 index 000000000..8d30f3de3 --- /dev/null +++ b/pkg/auditlog/client/client.go @@ -0,0 +1,49 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "google.golang.org/grpc" + + rpcclient "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + proto "github.com/bucketeer-io/bucketeer/proto/auditlog" +) + +type Client interface { + proto.AuditLogServiceClient + Close() +} + +type client struct { + proto.AuditLogServiceClient + address string + connection *grpc.ClientConn +} + +func NewClient(addr, certPath string, opts ...rpcclient.Option) (Client, error) { + conn, err := rpcclient.NewClientConn(addr, certPath, opts...) + if err != nil { + return nil, err + } + return &client{ + AuditLogServiceClient: proto.NewAuditLogServiceClient(conn), + address: addr, + connection: conn, + }, nil +} + +func (c *client) Close() { + c.connection.Close() +} diff --git a/pkg/auditlog/cmd/persister/BUILD.bazel b/pkg/auditlog/cmd/persister/BUILD.bazel new file mode 100644 index 000000000..d7ed825ab --- /dev/null +++ b/pkg/auditlog/cmd/persister/BUILD.bazel @@ -0,0 +1,20 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["persister.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/auditlog/cmd/persister", + visibility = ["//visibility:public"], + deps = [ + "//pkg/auditlog/persister:go_default_library", + "//pkg/cli:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub:go_default_library", + "//pkg/pubsub/puller:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/auditlog/cmd/persister/persister.go b/pkg/auditlog/cmd/persister/persister.go new file mode 100644 index 000000000..464556f20 --- /dev/null +++ b/pkg/auditlog/cmd/persister/persister.go @@ -0,0 +1,171 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persister + +import ( + "context" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + pst "github.com/bucketeer-io/bucketeer/pkg/auditlog/persister" + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" +) + +const command = "persister" + +type Persister interface { + Run(context.Context, metrics.Metrics, *zap.Logger) error +} + +type persister struct { + *kingpin.CmdClause + port *int + project *string + mysqlUser *string + mysqlPass *string + mysqlHost *string + mysqlPort *int + mysqlDBName *string + subscription *string + maxMPS *int + numWorkers *int + topic *string + flushSize *int + flushInterval *time.Duration + certPath *string + keyPath *string + pullerNumGoroutines *int + pullerMaxOutstandingMessages *int + pullerMaxOutstandingBytes *int +} + +func RegisterCommand(r cli.CommandRegistry, p cli.ParentCommand) cli.Command { + cmd := p.Command(command, "Start auditlog persister") + persister := &persister{ + CmdClause: cmd, + port: cmd.Flag("port", "Port to bind to.").Default("9090").Int(), + project: cmd.Flag("project", "Google Cloud project name.").String(), + mysqlUser: cmd.Flag("mysql-user", "MySQL user.").Required().String(), + mysqlPass: cmd.Flag("mysql-pass", "MySQL password.").Required().String(), + mysqlHost: cmd.Flag("mysql-host", "MySQL host.").Required().String(), + mysqlPort: cmd.Flag("mysql-port", "MySQL port.").Required().Int(), + mysqlDBName: cmd.Flag("mysql-db-name", "MySQL database name.").Required().String(), + subscription: cmd.Flag("subscription", "Google PubSub subscription name.").Required().String(), + topic: cmd.Flag("topic", "Google PubSub topic name.").Required().String(), + maxMPS: cmd.Flag("max-mps", "Maximum messages should be handled in a second.").Default("1000").Int(), + numWorkers: cmd.Flag("num-workers", "Number of workers.").Default("2").Int(), + flushSize: cmd.Flag("flush-size", "Maximum number of messages in one flush.").Default("100").Int(), + flushInterval: cmd.Flag("flush-interval", "Maximum interval between two flushes.").Default("1s").Duration(), + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + keyPath: cmd.Flag("key", "Path to TLS key.").Required().String(), + pullerNumGoroutines: cmd.Flag( + "puller-num-goroutines", + "Number of goroutines will be spawned to pull messages.", + ).Int(), + pullerMaxOutstandingMessages: cmd.Flag( + "puller-max-outstanding-messages", + "Maximum number of unprocessed messages.", + ).Int(), + pullerMaxOutstandingBytes: cmd.Flag("puller-max-outstanding-bytes", "Maximum size of unprocessed messages.").Int(), + } + r.RegisterCommand(persister) + return persister +} + +func (p *persister) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + registerer := metrics.DefaultRegisterer() + + mysqlClient, err := p.createMySQLClient(ctx, registerer, logger) + if err != nil { + return err + } + defer mysqlClient.Close() + + puller, err := p.createPuller(ctx, logger) + if err != nil { + return err + } + + persister := pst.NewPersister( + puller, + mysqlClient, + pst.WithMaxMPS(*p.maxMPS), + pst.WithNumWorkers(*p.numWorkers), + pst.WithFlushSize(*p.flushSize), + pst.WithFlushInterval(*p.flushInterval), + pst.WithMetrics(registerer), + pst.WithLogger(logger), + ) + defer persister.Stop() + go persister.Run() // nolint:errcheck + + healthChecker := health.NewGrpcChecker( + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + health.WithCheck("persister", persister.Check), + ) + go healthChecker.Run(ctx) + + server := rpc.NewServer(healthChecker, *p.certPath, *p.keyPath, + rpc.WithPort(*p.port), + rpc.WithMetrics(registerer), + rpc.WithLogger(logger), + rpc.WithHandler("/health", healthChecker), + ) + defer server.Stop(10 * time.Second) + go server.Run() + + <-ctx.Done() + return nil +} + +func (p *persister) createMySQLClient( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (mysql.Client, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + return mysql.NewClient( + ctx, + *p.mysqlUser, *p.mysqlPass, *p.mysqlHost, + *p.mysqlPort, + *p.mysqlDBName, + mysql.WithLogger(logger), + mysql.WithMetrics(registerer), + ) +} + +func (p *persister) createPuller(ctx context.Context, logger *zap.Logger) (puller.Puller, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + client, err := pubsub.NewClient(ctx, *p.project, pubsub.WithLogger(logger)) + if err != nil { + return nil, err + } + return client.CreatePuller(*p.subscription, *p.topic, + pubsub.WithNumGoroutines(*p.pullerNumGoroutines), + pubsub.WithMaxOutstandingMessages(*p.pullerMaxOutstandingMessages), + pubsub.WithMaxOutstandingBytes(*p.pullerMaxOutstandingBytes), + ) +} diff --git a/pkg/auditlog/cmd/server/BUILD.bazel b/pkg/auditlog/cmd/server/BUILD.bazel new file mode 100644 index 000000000..5a9944312 --- /dev/null +++ b/pkg/auditlog/cmd/server/BUILD.bazel @@ -0,0 +1,21 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["server.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/auditlog/cmd/server", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/client:go_default_library", + "//pkg/auditlog/api:go_default_library", + "//pkg/cli:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/rpc/client:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/token:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/auditlog/cmd/server/server.go b/pkg/auditlog/cmd/server/server.go new file mode 100644 index 000000000..c776c45bb --- /dev/null +++ b/pkg/auditlog/cmd/server/server.go @@ -0,0 +1,155 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + "github.com/bucketeer-io/bucketeer/pkg/auditlog/api" + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/token" +) + +const command = "server" + +type server struct { + *kingpin.CmdClause + port *int + project *string + mysqlUser *string + mysqlPass *string + mysqlHost *string + mysqlPort *int + mysqlDBName *string + accountService *string + certPath *string + keyPath *string + serviceTokenPath *string + + oauthKeyPath *string + oauthClientID *string + oauthIssuer *string +} + +func RegisterCommand(r cli.CommandRegistry, p cli.ParentCommand) cli.Command { + cmd := p.Command(command, "Start the server") + server := &server{ + CmdClause: cmd, + port: cmd.Flag("port", "Port to bind to.").Default("9090").Int(), + project: cmd.Flag("project", "Google Cloud project name.").String(), + mysqlUser: cmd.Flag("mysql-user", "MySQL user.").Required().String(), + mysqlPass: cmd.Flag("mysql-pass", "MySQL password.").Required().String(), + mysqlHost: cmd.Flag("mysql-host", "MySQL host.").Required().String(), + mysqlPort: cmd.Flag("mysql-port", "MySQL port.").Required().Int(), + mysqlDBName: cmd.Flag("mysql-db-name", "MySQL database name.").Required().String(), + accountService: cmd.Flag( + "account-service", + "bucketeer-account-service address.", + ).Default("account:9090").String(), + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + keyPath: cmd.Flag("key", "Path to TLS key.").Required().String(), + serviceTokenPath: cmd.Flag("service-token", "Path to service token.").Required().String(), + oauthKeyPath: cmd.Flag("oauth-key", "Path to public key used to verify oauth token.").Required().String(), + oauthClientID: cmd.Flag("oauth-client-id", "The oauth clientID registered at dex.").Required().String(), + oauthIssuer: cmd.Flag("oauth-issuer", "The url of dex issuer.").Required().String(), + } + r.RegisterCommand(server) + return server +} + +func (s *server) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + registerer := metrics.DefaultRegisterer() + + mysqlClient, err := s.createMySQLClient(ctx, registerer, logger) + if err != nil { + return err + } + defer mysqlClient.Close() + + creds, err := client.NewPerRPCCredentials(*s.serviceTokenPath) + if err != nil { + return err + } + + accountClient, err := accountclient.NewClient(*s.accountService, *s.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer accountClient.Close() + + service := api.NewAuditLogService( + accountClient, + mysqlClient, + api.WithLogger(logger), + ) + + verifier, err := token.NewVerifier(*s.oauthKeyPath, *s.oauthIssuer, *s.oauthClientID) + if err != nil { + return err + } + + healthChecker := health.NewGrpcChecker( + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + ) + go healthChecker.Run(ctx) + + server := rpc.NewServer(service, *s.certPath, *s.keyPath, + rpc.WithPort(*s.port), + rpc.WithVerifier(verifier), + rpc.WithMetrics(registerer), + rpc.WithLogger(logger), + rpc.WithService(healthChecker), + rpc.WithHandler("/health", healthChecker), + ) + defer server.Stop(10 * time.Second) + go server.Run() + + <-ctx.Done() + return nil +} + +func (s *server) createMySQLClient( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (mysql.Client, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + return mysql.NewClient( + ctx, + *s.mysqlUser, *s.mysqlPass, *s.mysqlHost, + *s.mysqlPort, + *s.mysqlDBName, + mysql.WithLogger(logger), + mysql.WithMetrics(registerer), + ) +} diff --git a/pkg/auditlog/domain/BUILD.bazel b/pkg/auditlog/domain/BUILD.bazel new file mode 100644 index 000000000..c664f9dc5 --- /dev/null +++ b/pkg/auditlog/domain/BUILD.bazel @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["auditlog.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/auditlog/domain", + visibility = ["//visibility:public"], + deps = [ + "//proto/auditlog:go_default_library", + "//proto/event/domain:go_default_library", + ], +) diff --git a/pkg/auditlog/domain/auditlog.go b/pkg/auditlog/domain/auditlog.go new file mode 100644 index 000000000..d4b4839cb --- /dev/null +++ b/pkg/auditlog/domain/auditlog.go @@ -0,0 +1,41 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + proto "github.com/bucketeer-io/bucketeer/proto/auditlog" + domainevent "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +type AuditLog struct { + *proto.AuditLog + EnvironmentNamespace string +} + +func NewAuditLog(event *domainevent.Event, envirronmentNamespace string) *AuditLog { + return &AuditLog{ + AuditLog: &proto.AuditLog{ + Id: event.Id, + Timestamp: event.Timestamp, + EntityType: event.EntityType, + EntityId: event.EntityId, + Type: event.Type, + Event: event.Data, + Editor: event.Editor, + Options: event.Options, + }, + EnvironmentNamespace: envirronmentNamespace, + } +} diff --git a/pkg/auditlog/persister/BUILD.bazel b/pkg/auditlog/persister/BUILD.bazel new file mode 100644 index 000000000..777511e78 --- /dev/null +++ b/pkg/auditlog/persister/BUILD.bazel @@ -0,0 +1,48 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "metrics.go", + "persister.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/auditlog/persister", + visibility = ["//visibility:public"], + deps = [ + "//pkg/auditlog/domain:go_default_library", + "//pkg/auditlog/storage/v2:go_default_library", + "//pkg/errgroup:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub/puller:go_default_library", + "//pkg/pubsub/puller/codes:go_default_library", + "//pkg/storage:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/event/domain:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["persister_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/domainevent/domain:go_default_library", + "//pkg/health:go_default_library", + "//pkg/log:go_default_library", + "//pkg/metrics/mock:go_default_library", + "//pkg/pubsub/puller:go_default_library", + "//pkg/pubsub/puller/mock:go_default_library", + "//pkg/storage/v2/mysql/mock:go_default_library", + "//proto/account:go_default_library", + "//proto/event/domain:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/auditlog/persister/metrics.go b/pkg/auditlog/persister/metrics.go new file mode 100644 index 000000000..ea1e6bdda --- /dev/null +++ b/pkg/auditlog/persister/metrics.go @@ -0,0 +1,43 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persister + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +var ( + receivedCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "auditlog", + Name: "persister_received_total", + Help: "Total number of received messages", + }) + + handledCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "auditlog", + Name: "persister_handled_total", + Help: "Total number of handled messages", + }, []string{"code"}) +) + +func registerMetrics(r metrics.Registerer) { + r.MustRegister(receivedCounter, handledCounter) +} diff --git a/pkg/auditlog/persister/persister.go b/pkg/auditlog/persister/persister.go new file mode 100644 index 000000000..edf9c1210 --- /dev/null +++ b/pkg/auditlog/persister/persister.go @@ -0,0 +1,247 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persister + +import ( + "context" + "time" + + pb "github.com/golang/protobuf/proto" // nolint:staticcheck + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/auditlog/domain" + v2als "github.com/bucketeer-io/bucketeer/pkg/auditlog/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/errgroup" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller/codes" + "github.com/bucketeer-io/bucketeer/pkg/storage" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + domainevent "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +type options struct { + maxMPS int + numWorkers int + flushSize int + flushInterval time.Duration + metrics metrics.Registerer + logger *zap.Logger +} + +type Option func(*options) + +func WithMaxMPS(mps int) Option { + return func(opts *options) { + opts.maxMPS = mps + } +} + +func WithNumWorkers(n int) Option { + return func(opts *options) { + opts.numWorkers = n + } +} + +func WithFlushSize(s int) Option { + return func(opts *options) { + opts.flushSize = s + } +} + +func WithFlushInterval(i time.Duration) Option { + return func(opts *options) { + opts.flushInterval = i + } +} + +func WithMetrics(r metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = r + } +} + +func WithLogger(logger *zap.Logger) Option { + return func(opts *options) { + opts.logger = logger + } +} + +type Persister struct { + puller puller.RateLimitedPuller + mysqlAdminStorage v2als.AdminAuditLogStorage + mysqlStorage v2als.AuditLogStorage + group errgroup.Group + opts *options + logger *zap.Logger + ctx context.Context + cancel func() + doneCh chan struct{} +} + +func NewPersister( + p puller.Puller, + mysqlClient mysql.Client, + opts ...Option, +) *Persister { + dopts := &options{ + maxMPS: 1000, + numWorkers: 1, + flushSize: 100, + flushInterval: time.Second, + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + ctx, cancel := context.WithCancel(context.Background()) + if dopts.metrics != nil { + registerMetrics(dopts.metrics) + } + return &Persister{ + puller: puller.NewRateLimitedPuller(p, dopts.maxMPS), + mysqlAdminStorage: v2als.NewAdminAuditLogStorage(mysqlClient), + mysqlStorage: v2als.NewAuditLogStorage(mysqlClient), + opts: dopts, + logger: dopts.logger.Named("persister"), + ctx: ctx, + cancel: cancel, + doneCh: make(chan struct{}), + } +} + +func (p *Persister) Run() error { + defer close(p.doneCh) + p.group.Go(func() error { + return p.puller.Run(p.ctx) + }) + for i := 0; i < p.opts.numWorkers; i++ { + p.group.Go(p.runWorker) + } + return p.group.Wait() +} + +func (p *Persister) Stop() { + p.cancel() + <-p.doneCh +} + +func (p *Persister) Check(ctx context.Context) health.Status { + select { + case <-p.ctx.Done(): + p.logger.Error("Unhealthy due to context Done is closed", zap.Error(p.ctx.Err())) + return health.Unhealthy + default: + if p.group.FinishedCount() > 0 { + p.logger.Error("Unhealthy", zap.Int32("FinishedCount", p.group.FinishedCount())) + return health.Unhealthy + } + return health.Healthy + } +} + +func (p *Persister) runWorker() error { + chunk := make(map[string]*puller.Message, p.opts.flushSize) + timer := time.NewTimer(p.opts.flushInterval) + defer timer.Stop() + for { + select { + case msg, ok := <-p.puller.MessageCh(): + if !ok { + return nil + } + receivedCounter.Inc() + id := msg.Attributes["id"] + if id == "" { + msg.Ack() + handledCounter.WithLabelValues(codes.MissingID.String()).Inc() + continue + } + if _, ok := chunk[id]; ok { + p.logger.Warn("Message with duplicate id", zap.String("id", id)) + handledCounter.WithLabelValues(codes.DuplicateID.String()).Inc() + } + chunk[id] = msg + if len(chunk) >= p.opts.flushSize { + p.flushChunk(chunk) + chunk = make(map[string]*puller.Message, p.opts.flushSize) + timer.Reset(p.opts.flushInterval) + } + case <-timer.C: + if len(chunk) > 0 { + p.flushChunk(chunk) + chunk = make(map[string]*puller.Message, p.opts.flushSize) + } + timer.Reset(p.opts.flushInterval) + case <-p.ctx.Done(): + return nil + } + } +} + +func (p *Persister) flushChunk(chunk map[string]*puller.Message) { + auditlogs, adminAuditLogs, messages, adminMessages := p.extractAuditLogs(chunk) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + // Environment audit logs + p.createAuditLogsMySQL(ctx, auditlogs, messages, p.mysqlStorage.CreateAuditLogs) + // Admin audit logs + p.createAuditLogsMySQL(ctx, adminAuditLogs, adminMessages, p.mysqlAdminStorage.CreateAdminAuditLogs) +} + +func (p *Persister) extractAuditLogs( + chunk map[string]*puller.Message, +) (auditlogs, adminAuditLogs []*domain.AuditLog, messages, adminMessages []*puller.Message) { + for _, msg := range chunk { + event := &domainevent.Event{} + if err := pb.Unmarshal(msg.Data, event); err != nil { + p.logger.Error("Failed to unmarshal message", zap.Error(err)) + msg.Ack() + continue + } + if event.IsAdminEvent { + adminAuditLogs = append(adminAuditLogs, domain.NewAuditLog(event, storage.AdminEnvironmentNamespace)) + adminMessages = append(adminMessages, msg) + } else { + auditlogs = append(auditlogs, domain.NewAuditLog(event, event.EnvironmentNamespace)) + messages = append(messages, msg) + } + } + return +} + +func (p *Persister) createAuditLogsMySQL( + ctx context.Context, + auditlogs []*domain.AuditLog, + messages []*puller.Message, + createFunc func(ctx context.Context, auditLogs []*domain.AuditLog) error, +) { + if len(auditlogs) == 0 { + return + } + if err := createFunc(ctx, auditlogs); err != nil { + p.logger.Error("Failed to put admin audit logs", zap.Error(err)) + for _, msg := range messages { + handledCounter.WithLabelValues(codes.RepeatableError.String()).Inc() + msg.Nack() + } + return + } + for _, msg := range messages { + handledCounter.WithLabelValues(codes.OK.String()).Inc() + msg.Ack() + } +} diff --git a/pkg/auditlog/persister/persister_test.go b/pkg/auditlog/persister/persister_test.go new file mode 100644 index 000000000..751242ccc --- /dev/null +++ b/pkg/auditlog/persister/persister_test.go @@ -0,0 +1,156 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persister + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + domainevent "github.com/bucketeer-io/bucketeer/pkg/domainevent/domain" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/log" + metricsmock "github.com/bucketeer-io/bucketeer/pkg/metrics/mock" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" + pullermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller/mock" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + "github.com/bucketeer-io/bucketeer/proto/event/domain" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +func TestNewPersister(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + puller := pullermock.NewMockPuller(mockController) + mysqlClient := mysqlmock.NewMockClient(mockController) + registerer := metricsmock.NewMockRegisterer(mockController) + registerer.EXPECT().MustRegister(gomock.Any()).Return() + p := NewPersister( + puller, + mysqlClient, + WithMaxMPS(1000), + WithNumWorkers(1), + WithFlushSize(100), + WithFlushInterval(time.Second), + WithMetrics(registerer), + WithLogger(zap.NewNop()), + ) + assert.IsType(t, &Persister{}, p) +} + +func TestCheck(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(p *Persister) + expected health.Status + }{ + { + setup: func(p *Persister) { p.cancel() }, + expected: health.Unhealthy, + }, + { + setup: func(p *Persister) { + p.group.Go(func() error { return nil }) + time.Sleep(100 * time.Millisecond) // wait for p.group.FinishedCount() is incremented + }, + expected: health.Unhealthy, + }, + { + setup: nil, + expected: health.Healthy, + }, + } + + for _, pat := range patterns { + p := newPersister(t, mockController) + if pat.setup != nil { + pat.setup(p) + } + assert.Equal(t, pat.expected, p.Check(context.Background())) + } +} + +func TestExtractAuditLogs(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + editor := &eventproto.Editor{Email: "test@example.com", Role: accountproto.Account_EDITOR} + event0, err := domainevent.NewEvent(editor, eventproto.Event_FEATURE, "fId-0", eventproto.Event_FEATURE_CREATED, &eventproto.FeatureCreatedEvent{Id: "fId-0"}, "ns0") + assert.NoError(t, err) + event1, err := domainevent.NewEvent(editor, eventproto.Event_FEATURE, "fId-1", eventproto.Event_FEATURE_CREATED, &eventproto.FeatureCreatedEvent{Id: "fId-1"}, "ns0") + assert.NoError(t, err) + adninEvent0, err := domainevent.NewAdminEvent(editor, eventproto.Event_FEATURE, "fId-2", eventproto.Event_FEATURE_CREATED, &eventproto.FeatureCreatedEvent{Id: "fId-2"}) + assert.NoError(t, err) + chunk := createChunk(t, []*domain.Event{event0, event1, adninEvent0}) + + p := newPersister(t, mockController) + auditLogs, adminAuditLogs, messages, adminMessages := p.extractAuditLogs(chunk) + for i, al := range auditLogs { + msg, ok := chunk[al.Id] + assert.True(t, ok) + assert.Equal(t, msg.ID, al.Id) + assert.Equal(t, messages[i].ID, al.Id) + } + for i, al := range adminAuditLogs { + msg, ok := chunk[al.Id] + assert.True(t, ok) + assert.Equal(t, msg.ID, al.Id) + assert.Equal(t, adminMessages[i].ID, al.Id) + } +} + +func newPersister(t *testing.T, mockController *gomock.Controller) *Persister { + t.Helper() + ctx, cancel := context.WithCancel(context.Background()) + logger, err := log.NewLogger() + require.NoError(t, err) + return &Persister{ + puller: pullermock.NewMockRateLimitedPuller(mockController), + logger: logger.Named("persister"), + ctx: ctx, + cancel: cancel, + doneCh: make(chan struct{}), + } +} + +func createChunk(t *testing.T, events []*domain.Event) map[string]*puller.Message { + t.Helper() + chunk := make(map[string]*puller.Message) + for _, e := range events { + data, err := proto.Marshal(e) + require.NoError(t, err) + chunk[e.Id] = &puller.Message{ + ID: e.Id, + Data: data, + Attributes: map[string]string{"id": e.Id}, + Ack: func() {}, + Nack: func() {}, + } + } + return chunk +} diff --git a/pkg/auditlog/storage/v2/BUILD.bazel b/pkg/auditlog/storage/v2/BUILD.bazel new file mode 100644 index 000000000..0e1c1b9b8 --- /dev/null +++ b/pkg/auditlog/storage/v2/BUILD.bazel @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "admin_audit_log.go", + "audit_log.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/auditlog/storage/v2", + visibility = ["//visibility:public"], + deps = [ + "//pkg/auditlog/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/auditlog:go_default_library", + "//proto/event/domain:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "admin_audit_log_test.go", + "audit_log_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/auditlog/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/storage/v2/mysql/mock:go_default_library", + "//proto/auditlog:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + ], +) diff --git a/pkg/auditlog/storage/v2/admin_audit_log.go b/pkg/auditlog/storage/v2/admin_audit_log.go new file mode 100644 index 000000000..bc950ad82 --- /dev/null +++ b/pkg/auditlog/storage/v2/admin_audit_log.go @@ -0,0 +1,166 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v2 + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/bucketeer-io/bucketeer/pkg/auditlog/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + proto "github.com/bucketeer-io/bucketeer/proto/auditlog" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +var ( + ErrAdminAuditLogAlreadyExists = errors.New("auditlog: admin auditlog already exists") +) + +type AdminAuditLogStorage interface { + CreateAdminAuditLogs(ctx context.Context, auditLogs []*domain.AuditLog) error + ListAdminAuditLogs( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, + ) ([]*proto.AuditLog, int, int64, error) +} + +type adminAuditLogStorage struct { + qe mysql.QueryExecer +} + +func NewAdminAuditLogStorage(qe mysql.QueryExecer) AdminAuditLogStorage { + return &adminAuditLogStorage{qe} +} + +func (s *adminAuditLogStorage) CreateAdminAuditLogs(ctx context.Context, auditLogs []*domain.AuditLog) error { + if len(auditLogs) == 0 { + return nil + } + var query strings.Builder + query.WriteString(` + INSERT INTO admin_audit_log ( + id, + timestamp, + entity_type, + entity_id, + type, + event, + editor, + options + ) VALUES + `) + args := []interface{}{} + for i, al := range auditLogs { + if i != 0 { + query.WriteString(",") + } + query.WriteString(" (?, ?, ?, ?, ?, ?, ?, ?)") + args = append( + args, + al.Id, + al.Timestamp, + int32(al.EntityType), + al.EntityId, + int32(al.Type), + mysql.JSONObject{Val: al.Event}, + mysql.JSONObject{Val: al.Editor}, + mysql.JSONObject{Val: al.Options}, + ) + } + _, err := s.qe.ExecContext(ctx, query.String(), args...) + if err != nil { + if err == mysql.ErrDuplicateEntry { + return ErrAdminAuditLogAlreadyExists + } + return err + } + return nil +} + +func (s *adminAuditLogStorage) ListAdminAuditLogs( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, +) ([]*proto.AuditLog, int, int64, error) { + whereSQL, whereArgs := mysql.ConstructWhereSQLString(whereParts) + orderBySQL := mysql.ConstructOrderBySQLString(orders) + limitOffsetSQL := mysql.ConstructLimitOffsetSQLString(limit, offset) + query := fmt.Sprintf(` + SELECT + id, + timestamp, + entity_type, + entity_id, + type, + event, + editor, + options + FROM + admin_audit_log + %s %s %s + `, whereSQL, orderBySQL, limitOffsetSQL, + ) + rows, err := s.qe.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, 0, 0, err + } + defer rows.Close() + auditLogs := make([]*proto.AuditLog, 0, limit) + for rows.Next() { + auditLog := proto.AuditLog{} + var et int32 + var t int32 + err := rows.Scan( + &auditLog.Id, + &auditLog.Timestamp, + &et, + &auditLog.EntityId, + &t, + &mysql.JSONObject{Val: &auditLog.Event}, + &mysql.JSONObject{Val: &auditLog.Editor}, + &mysql.JSONObject{Val: &auditLog.Options}, + ) + if err != nil { + return nil, 0, 0, err + } + auditLog.EntityType = eventproto.Event_EntityType(et) + auditLog.Type = eventproto.Event_Type(t) + auditLogs = append(auditLogs, &auditLog) + } + if rows.Err() != nil { + return nil, 0, 0, err + } + nextOffset := offset + len(auditLogs) + var totalCount int64 + countQuery := fmt.Sprintf(` + SELECT + COUNT(1) + FROM + admin_audit_log + %s %s + `, whereSQL, orderBySQL, + ) + err = s.qe.QueryRowContext(ctx, countQuery, whereArgs...).Scan(&totalCount) + if err != nil { + return nil, 0, 0, err + } + return auditLogs, nextOffset, totalCount, nil +} diff --git a/pkg/auditlog/storage/v2/admin_audit_log_test.go b/pkg/auditlog/storage/v2/admin_audit_log_test.go new file mode 100644 index 000000000..0e2913f0c --- /dev/null +++ b/pkg/auditlog/storage/v2/admin_audit_log_test.go @@ -0,0 +1,179 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "context" + "errors" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/auditlog/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + proto "github.com/bucketeer-io/bucketeer/proto/auditlog" +) + +func TestNewAdminSubscriptionStorage(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + storage := NewAdminAuditLogStorage(mock.NewMockQueryExecer(mockController)) + assert.IsType(t, &adminAuditLogStorage{}, storage) +} + +func TestCreateAdminAuditLogs(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*adminAuditLogStorage) + input []*domain.AuditLog + expectedErr error + }{ + "ErrAdminAuditLogAlreadyExists": { + setup: func(s *adminAuditLogStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, mysql.ErrDuplicateEntry) + }, + input: []*domain.AuditLog{ + {AuditLog: &proto.AuditLog{Id: "id-0"}}, + {AuditLog: &proto.AuditLog{Id: "id-1"}}, + }, + expectedErr: ErrAdminAuditLogAlreadyExists, + }, + "Error": { + setup: func(s *adminAuditLogStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + input: []*domain.AuditLog{ + {AuditLog: &proto.AuditLog{Id: "id-0"}}, + {AuditLog: &proto.AuditLog{Id: "id-1"}}, + }, + expectedErr: errors.New("error"), + }, + "Success: len == 0": { + setup: nil, + input: nil, + expectedErr: nil, + }, + "Success": { + setup: func(s *adminAuditLogStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, nil) + }, + input: []*domain.AuditLog{ + {AuditLog: &proto.AuditLog{Id: "id-0"}}, + {AuditLog: &proto.AuditLog{Id: "id-1"}}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newAdminAuditLogStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + err := storage.CreateAdminAuditLogs(context.Background(), p.input) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestListAdminAuditLogs(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*adminAuditLogStorage) + whereParts []mysql.WherePart + orders []*mysql.Order + limit int + offset int + expected []*proto.AuditLog + expectedCursor int + expectedErr error + }{ + "Error": { + setup: func(s *adminAuditLogStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + whereParts: nil, + orders: nil, + limit: 0, + offset: 0, + expected: nil, + expectedCursor: 0, + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *adminAuditLogStorage) { + rows := mock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + whereParts: nil, + orders: []*mysql.Order{ + mysql.NewOrder("timestamp", mysql.OrderDirectionDesc), + }, + limit: 10, + offset: 5, + expected: []*proto.AuditLog{}, + expectedCursor: 5, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newAdminAuditLogStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + auditLogs, cursor, _, err := storage.ListAdminAuditLogs( + context.Background(), + p.whereParts, + p.orders, + p.limit, + p.offset, + ) + assert.Equal(t, p.expected, auditLogs) + assert.Equal(t, p.expectedCursor, cursor) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func newAdminAuditLogStorageWithMock(t *testing.T, mockController *gomock.Controller) *adminAuditLogStorage { + t.Helper() + return &adminAuditLogStorage{mock.NewMockQueryExecer(mockController)} +} diff --git a/pkg/auditlog/storage/v2/audit_log.go b/pkg/auditlog/storage/v2/audit_log.go new file mode 100644 index 000000000..9a55724a2 --- /dev/null +++ b/pkg/auditlog/storage/v2/audit_log.go @@ -0,0 +1,168 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v2 + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/bucketeer-io/bucketeer/pkg/auditlog/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + proto "github.com/bucketeer-io/bucketeer/proto/auditlog" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +var ( + ErrAuditLogAlreadyExists = errors.New("auditlog: auditlog already exists") +) + +type AuditLogStorage interface { + CreateAuditLogs(ctx context.Context, auditLogs []*domain.AuditLog) error + ListAuditLogs( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, + ) ([]*proto.AuditLog, int, int64, error) +} + +type auditLogStorage struct { + qe mysql.QueryExecer +} + +func NewAuditLogStorage(qe mysql.QueryExecer) AuditLogStorage { + return &auditLogStorage{qe} +} + +func (s *auditLogStorage) CreateAuditLogs(ctx context.Context, auditLogs []*domain.AuditLog) error { + if len(auditLogs) == 0 { + return nil + } + var query strings.Builder + query.WriteString(` + INSERT INTO audit_log ( + id, + timestamp, + entity_type, + entity_id, + type, + event, + editor, + options, + environment_namespace + ) VALUES + `) + args := []interface{}{} + for i, al := range auditLogs { + if i != 0 { + query.WriteString(",") + } + query.WriteString(" (?, ?, ?, ?, ?, ?, ?, ?, ?)") + args = append( + args, + al.Id, + al.Timestamp, + int32(al.EntityType), + al.EntityId, + int32(al.Type), + mysql.JSONObject{Val: al.Event}, + mysql.JSONObject{Val: al.Editor}, + mysql.JSONObject{Val: al.Options}, + al.EnvironmentNamespace, + ) + } + _, err := s.qe.ExecContext(ctx, query.String(), args...) + if err != nil { + if err == mysql.ErrDuplicateEntry { + return ErrAuditLogAlreadyExists + } + return err + } + return nil +} + +func (s *auditLogStorage) ListAuditLogs( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, +) ([]*proto.AuditLog, int, int64, error) { + whereSQL, whereArgs := mysql.ConstructWhereSQLString(whereParts) + orderBySQL := mysql.ConstructOrderBySQLString(orders) + limitOffsetSQL := mysql.ConstructLimitOffsetSQLString(limit, offset) + query := fmt.Sprintf(` + SELECT + id, + timestamp, + entity_type, + entity_id, + type, + event, + editor, + options + FROM + audit_log + %s %s %s + `, whereSQL, orderBySQL, limitOffsetSQL, + ) + rows, err := s.qe.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, 0, 0, err + } + defer rows.Close() + auditLogs := make([]*proto.AuditLog, 0, limit) + for rows.Next() { + auditLog := proto.AuditLog{} + var et int32 + var t int32 + err := rows.Scan( + &auditLog.Id, + &auditLog.Timestamp, + &et, + &auditLog.EntityId, + &t, + &mysql.JSONObject{Val: &auditLog.Event}, + &mysql.JSONObject{Val: &auditLog.Editor}, + &mysql.JSONObject{Val: &auditLog.Options}, + ) + if err != nil { + return nil, 0, 0, err + } + auditLog.EntityType = eventproto.Event_EntityType(et) + auditLog.Type = eventproto.Event_Type(t) + auditLogs = append(auditLogs, &auditLog) + } + if rows.Err() != nil { + return nil, 0, 0, err + } + nextOffset := offset + len(auditLogs) + var totalCount int64 + countQuery := fmt.Sprintf(` + SELECT + COUNT(1) + FROM + audit_log + %s %s + `, whereSQL, orderBySQL, + ) + err = s.qe.QueryRowContext(ctx, countQuery, whereArgs...).Scan(&totalCount) + if err != nil { + return nil, 0, 0, err + } + return auditLogs, nextOffset, totalCount, nil +} diff --git a/pkg/auditlog/storage/v2/audit_log_test.go b/pkg/auditlog/storage/v2/audit_log_test.go new file mode 100644 index 000000000..8931d52d0 --- /dev/null +++ b/pkg/auditlog/storage/v2/audit_log_test.go @@ -0,0 +1,179 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "context" + "errors" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/auditlog/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + proto "github.com/bucketeer-io/bucketeer/proto/auditlog" +) + +func TestNewSubscriptionStorage(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + storage := NewAuditLogStorage(mock.NewMockQueryExecer(mockController)) + assert.IsType(t, &auditLogStorage{}, storage) +} + +func TestCreateAuditLogs(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*auditLogStorage) + input []*domain.AuditLog + expectedErr error + }{ + "ErrAuditLogAlreadyExists": { + setup: func(s *auditLogStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, mysql.ErrDuplicateEntry) + }, + input: []*domain.AuditLog{ + {AuditLog: &proto.AuditLog{Id: "id-0"}}, + {AuditLog: &proto.AuditLog{Id: "id-1"}}, + }, + expectedErr: ErrAuditLogAlreadyExists, + }, + "Error": { + setup: func(s *auditLogStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + input: []*domain.AuditLog{ + {AuditLog: &proto.AuditLog{Id: "id-0"}}, + {AuditLog: &proto.AuditLog{Id: "id-1"}}, + }, + expectedErr: errors.New("error"), + }, + "Success: len == 0": { + setup: nil, + input: nil, + expectedErr: nil, + }, + "Success": { + setup: func(s *auditLogStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, nil) + }, + input: []*domain.AuditLog{ + {AuditLog: &proto.AuditLog{Id: "id-0"}}, + {AuditLog: &proto.AuditLog{Id: "id-1"}}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newAuditLogStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + err := storage.CreateAuditLogs(context.Background(), p.input) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestListAuditLogs(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*auditLogStorage) + whereParts []mysql.WherePart + orders []*mysql.Order + limit int + offset int + expected []*proto.AuditLog + expectedCursor int + expectedErr error + }{ + "Error": { + setup: func(s *auditLogStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + whereParts: nil, + orders: nil, + limit: 0, + offset: 0, + expected: nil, + expectedCursor: 0, + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *auditLogStorage) { + rows := mock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + whereParts: nil, + orders: []*mysql.Order{ + mysql.NewOrder("timestamp", mysql.OrderDirectionDesc), + }, + limit: 10, + offset: 5, + expected: []*proto.AuditLog{}, + expectedCursor: 5, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newAuditLogStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + auditLogs, cursor, _, err := storage.ListAuditLogs( + context.Background(), + p.whereParts, + p.orders, + p.limit, + p.offset, + ) + assert.Equal(t, p.expected, auditLogs) + assert.Equal(t, p.expectedCursor, cursor) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func newAuditLogStorageWithMock(t *testing.T, mockController *gomock.Controller) *auditLogStorage { + t.Helper() + return &auditLogStorage{mock.NewMockQueryExecer(mockController)} +} diff --git a/pkg/auditlog/storage/v2/mock/BUILD.bazel b/pkg/auditlog/storage/v2/mock/BUILD.bazel new file mode 100644 index 000000000..4b4b10f55 --- /dev/null +++ b/pkg/auditlog/storage/v2/mock/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "admin_audit_log.go", + "audit_log.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/auditlog/storage/v2/mock", + visibility = ["//visibility:public"], + deps = [ + "//pkg/auditlog/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/auditlog:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + ], +) diff --git a/pkg/auditlog/storage/v2/mock/admin_audit_log.go b/pkg/auditlog/storage/v2/mock/admin_audit_log.go new file mode 100644 index 000000000..d0b68fbae --- /dev/null +++ b/pkg/auditlog/storage/v2/mock/admin_audit_log.go @@ -0,0 +1,70 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: admin_audit_log.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + domain "github.com/bucketeer-io/bucketeer/pkg/auditlog/domain" + mysql "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + auditlog "github.com/bucketeer-io/bucketeer/proto/auditlog" +) + +// MockAdminAuditLogStorage is a mock of AdminAuditLogStorage interface. +type MockAdminAuditLogStorage struct { + ctrl *gomock.Controller + recorder *MockAdminAuditLogStorageMockRecorder +} + +// MockAdminAuditLogStorageMockRecorder is the mock recorder for MockAdminAuditLogStorage. +type MockAdminAuditLogStorageMockRecorder struct { + mock *MockAdminAuditLogStorage +} + +// NewMockAdminAuditLogStorage creates a new mock instance. +func NewMockAdminAuditLogStorage(ctrl *gomock.Controller) *MockAdminAuditLogStorage { + mock := &MockAdminAuditLogStorage{ctrl: ctrl} + mock.recorder = &MockAdminAuditLogStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockAdminAuditLogStorage) EXPECT() *MockAdminAuditLogStorageMockRecorder { + return m.recorder +} + +// CreateAdminAuditLogs mocks base method. +func (m *MockAdminAuditLogStorage) CreateAdminAuditLogs(ctx context.Context, auditLogs []*domain.AuditLog) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateAdminAuditLogs", ctx, auditLogs) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateAdminAuditLogs indicates an expected call of CreateAdminAuditLogs. +func (mr *MockAdminAuditLogStorageMockRecorder) CreateAdminAuditLogs(ctx, auditLogs interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAdminAuditLogs", reflect.TypeOf((*MockAdminAuditLogStorage)(nil).CreateAdminAuditLogs), ctx, auditLogs) +} + +// ListAdminAuditLogs mocks base method. +func (m *MockAdminAuditLogStorage) ListAdminAuditLogs(ctx context.Context, whereParts []mysql.WherePart, orders []*mysql.Order, limit, offset int) ([]*auditlog.AuditLog, int, int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListAdminAuditLogs", ctx, whereParts, orders, limit, offset) + ret0, _ := ret[0].([]*auditlog.AuditLog) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(int64) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 +} + +// ListAdminAuditLogs indicates an expected call of ListAdminAuditLogs. +func (mr *MockAdminAuditLogStorageMockRecorder) ListAdminAuditLogs(ctx, whereParts, orders, limit, offset interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAdminAuditLogs", reflect.TypeOf((*MockAdminAuditLogStorage)(nil).ListAdminAuditLogs), ctx, whereParts, orders, limit, offset) +} diff --git a/pkg/auditlog/storage/v2/mock/audit_log.go b/pkg/auditlog/storage/v2/mock/audit_log.go new file mode 100644 index 000000000..4c83a46d5 --- /dev/null +++ b/pkg/auditlog/storage/v2/mock/audit_log.go @@ -0,0 +1,70 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: audit_log.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + domain "github.com/bucketeer-io/bucketeer/pkg/auditlog/domain" + mysql "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + auditlog "github.com/bucketeer-io/bucketeer/proto/auditlog" +) + +// MockAuditLogStorage is a mock of AuditLogStorage interface. +type MockAuditLogStorage struct { + ctrl *gomock.Controller + recorder *MockAuditLogStorageMockRecorder +} + +// MockAuditLogStorageMockRecorder is the mock recorder for MockAuditLogStorage. +type MockAuditLogStorageMockRecorder struct { + mock *MockAuditLogStorage +} + +// NewMockAuditLogStorage creates a new mock instance. +func NewMockAuditLogStorage(ctrl *gomock.Controller) *MockAuditLogStorage { + mock := &MockAuditLogStorage{ctrl: ctrl} + mock.recorder = &MockAuditLogStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockAuditLogStorage) EXPECT() *MockAuditLogStorageMockRecorder { + return m.recorder +} + +// CreateAuditLogs mocks base method. +func (m *MockAuditLogStorage) CreateAuditLogs(ctx context.Context, auditLogs []*domain.AuditLog) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateAuditLogs", ctx, auditLogs) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateAuditLogs indicates an expected call of CreateAuditLogs. +func (mr *MockAuditLogStorageMockRecorder) CreateAuditLogs(ctx, auditLogs interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAuditLogs", reflect.TypeOf((*MockAuditLogStorage)(nil).CreateAuditLogs), ctx, auditLogs) +} + +// ListAuditLogs mocks base method. +func (m *MockAuditLogStorage) ListAuditLogs(ctx context.Context, whereParts []mysql.WherePart, orders []*mysql.Order, limit, offset int) ([]*auditlog.AuditLog, int, int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListAuditLogs", ctx, whereParts, orders, limit, offset) + ret0, _ := ret[0].([]*auditlog.AuditLog) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(int64) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 +} + +// ListAuditLogs indicates an expected call of ListAuditLogs. +func (mr *MockAuditLogStorageMockRecorder) ListAuditLogs(ctx, whereParts, orders, limit, offset interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAuditLogs", reflect.TypeOf((*MockAuditLogStorage)(nil).ListAuditLogs), ctx, whereParts, orders, limit, offset) +} diff --git a/pkg/auth/api/BUILD.bazel b/pkg/auth/api/BUILD.bazel new file mode 100644 index 000000000..09f8da2c7 --- /dev/null +++ b/pkg/auth/api/BUILD.bazel @@ -0,0 +1,36 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "api.go", + "error.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/auth/api", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/client:go_default_library", + "//pkg/auth/oidc:go_default_library", + "//pkg/locale:go_default_library", + "//pkg/log:go_default_library", + "//pkg/role:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/rpc/status:go_default_library", + "//pkg/token:go_default_library", + "//proto/account:go_default_library", + "//proto/auth:go_default_library", + "//proto/event/domain:go_default_library", + "@go_googleapis//google/rpc:errdetails_go_proto", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + "@org_golang_x_oauth2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["api_test.go"], + embed = [":go_default_library"], +) diff --git a/pkg/auth/api/api.go b/pkg/auth/api/api.go new file mode 100644 index 000000000..b1af0cd6f --- /dev/null +++ b/pkg/auth/api/api.go @@ -0,0 +1,369 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "regexp" + "time" + + "go.uber.org/zap" + "golang.org/x/oauth2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + "github.com/bucketeer-io/bucketeer/pkg/auth/oidc" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/role" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/token" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + authproto "github.com/bucketeer-io/bucketeer/proto/auth" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +type options struct { + refreshTokenTTL time.Duration + emailFilter *regexp.Regexp + logger *zap.Logger +} + +var defaultOptions = options{ + refreshTokenTTL: time.Hour, + logger: zap.NewNop(), +} + +type Option func(*options) + +func WithRefreshTokenTTL(ttl time.Duration) Option { + return func(opts *options) { + opts.refreshTokenTTL = ttl + } +} + +func WithEmailFilter(regexp *regexp.Regexp) Option { + return func(opts *options) { + opts.emailFilter = regexp + } +} + +func WithLogger(logger *zap.Logger) Option { + return func(opts *options) { + opts.logger = logger + } +} + +type authService struct { + oidc *oidc.OIDC + signer token.Signer + accountClient accountclient.Client + opts *options + logger *zap.Logger +} + +func NewAuthService( + oidc *oidc.OIDC, + signer token.Signer, + accountClient accountclient.Client, + opts ...Option, +) rpc.Service { + options := defaultOptions + for _, opt := range opts { + opt(&options) + } + return &authService{ + oidc: oidc, + signer: signer, + accountClient: accountClient, + opts: &options, + logger: options.logger.Named("api"), + } +} + +func (s *authService) Register(server *grpc.Server) { + authproto.RegisterAuthServiceServer(server, s) +} + +func (s *authService) GetAuthCodeURL( + ctx context.Context, + req *authproto.GetAuthCodeURLRequest, +) (*authproto.GetAuthCodeURLResponse, error) { + // The state parameter is used to help mitigate CSRF attacks. + // Before sending a request to get authCodeURL, the client has to generate a random string, + // store it in local and set to the state parameter in GetAuthCodeURLRequest. + // When the client is redirected back, the state value will be included in that redirect. + // Client compares the returned state to the one generated before, + // if the values match then send a new request to ExchangeToken, else deny it. + if err := validateGetAuthCodeURLRequest(req); err != nil { + return nil, err + } + url, err := s.oidc.AuthCodeURL(req.State, req.RedirectUrl) + if err != nil { + if err == oidc.ErrUnregisteredRedirectURL { + return nil, localizedError(statusUnregisteredRedirectURL, locale.JaJP) + } + s.logger.Error( + "Failed to get auth code url", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &authproto.GetAuthCodeURLResponse{Url: url}, nil +} + +func validateGetAuthCodeURLRequest(req *authproto.GetAuthCodeURLRequest) error { + if req.State == "" { + return localizedError(statusMissingState, locale.JaJP) + } + if req.RedirectUrl == "" { + return localizedError(statusMissingRedirectURL, locale.JaJP) + } + return nil +} + +func (s *authService) ExchangeToken( + ctx context.Context, + req *authproto.ExchangeTokenRequest, +) (*authproto.ExchangeTokenResponse, error) { + if err := validateExchangeTokenRequest(req); err != nil { + return nil, err + } + authToken, err := s.oidc.Exchange(ctx, req.Code, req.RedirectUrl) + if err != nil { + if err == oidc.ErrUnregisteredRedirectURL { + return nil, localizedError(statusUnregisteredRedirectURL, locale.JaJP) + } + if err == oidc.ErrBadRequest { + return nil, localizedError(statusInvalidCode, locale.JaJP) + } + s.logger.Error( + "Failed to exchange token", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + token, err := s.generateToken(ctx, authToken) + if err != nil { + return nil, err + } + return &authproto.ExchangeTokenResponse{Token: token}, nil +} + +func validateExchangeTokenRequest(req *authproto.ExchangeTokenRequest) error { + if req.Code == "" { + return localizedError(statusMissingCode, locale.JaJP) + } + if req.RedirectUrl == "" { + return localizedError(statusMissingRedirectURL, locale.JaJP) + } + return nil +} + +func (s *authService) RefreshToken( + ctx context.Context, + req *authproto.RefreshTokenRequest, +) (*authproto.RefreshTokenResponse, error) { + if err := validateRefreshTokenRequest(req); err != nil { + return nil, err + } + authToken, err := s.oidc.RefreshToken(ctx, req.RefreshToken, s.opts.refreshTokenTTL, req.RedirectUrl) + if err != nil { + if err == oidc.ErrUnregisteredRedirectURL { + return nil, localizedError(statusUnregisteredRedirectURL, locale.JaJP) + } + if err == oidc.ErrBadRequest { + return nil, localizedError(statusInvalidRefreshToken, locale.JaJP) + } + s.logger.Error( + "Failed to refresh token", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + token, err := s.generateToken(ctx, authToken) + if err != nil { + return nil, err + } + return &authproto.RefreshTokenResponse{Token: token}, nil +} + +func validateRefreshTokenRequest(req *authproto.RefreshTokenRequest) error { + if req.RefreshToken == "" { + return localizedError(statusMissingRefreshToken, locale.JaJP) + } + if req.RedirectUrl == "" { + return localizedError(statusMissingRedirectURL, locale.JaJP) + } + return nil +} + +func (s *authService) generateToken(ctx context.Context, t *oauth2.Token) (*authproto.Token, error) { + rawIDToken := oidc.ExtractRawIDToken(t) + if len(rawIDToken) == 0 { + s.logger.Error( + "Token does not contain id_token", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Any("oauth2Token", t))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + claims, err := s.oidc.Verify(ctx, rawIDToken) + if err != nil { + s.logger.Error( + "Failed to verify id token", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + if err := s.maybeCheckEmail(ctx, claims.Email); err != nil { + return nil, err + } + resp, err := s.accountClient.GetMeByEmail(ctx, &accountproto.GetMeByEmailRequest{ + Email: claims.Email, + }) + if err != nil { + if code := status.Code(err); code == codes.NotFound { + s.logger.Warn( + "Unabled to generate token for an unapproved account", + log.FieldsFromImcomingContext(ctx).AddFields(zap.String("email", claims.Email))..., + ) + return nil, localizedError(statusUnapprovedAccount, locale.JaJP) + } + s.logger.Error( + "Failed to get account", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("email", claims.Email), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + adminRole := accountproto.Account_UNASSIGNED + if resp.IsAdmin { + adminRole = accountproto.Account_OWNER + } + idToken := &token.IDToken{ + Issuer: claims.Iss, + Subject: claims.Sub, + Audience: claims.Aud, + Expiry: time.Unix(claims.Exp, 0), + IssuedAt: time.Unix(claims.Iat, 0), + Email: claims.Email, + AdminRole: adminRole, + } + signedIDToken, err := s.signer.Sign(idToken) + if err != nil { + s.logger.Error( + "Failed to sign id token", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &authproto.Token{ + AccessToken: t.AccessToken, + TokenType: t.TokenType, + RefreshToken: t.RefreshToken, + Expiry: t.Expiry.Unix(), + IdToken: signedIDToken, + }, nil +} + +func (s *authService) maybeCheckEmail(ctx context.Context, email string) error { + if s.opts.emailFilter == nil { + return nil + } + if s.opts.emailFilter.MatchString(email) { + return nil + } + s.logger.Info( + "Access denied email", + log.FieldsFromImcomingContext(ctx).AddFields(zap.String("email", email))..., + ) + return localizedError(statusAccessDeniedEmail, locale.JaJP) +} + +func (s *authService) checkAdminRole(ctx context.Context) (*eventproto.Editor, error) { + editor, err := role.CheckAdminRole(ctx) + if err != nil { + switch status.Code(err) { + case codes.Unauthenticated: + s.logger.Info( + "Unauthenticated", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusUnauthenticated, locale.JaJP) + case codes.PermissionDenied: + s.logger.Info( + "Permission denied", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusPermissionDenied, locale.JaJP) + default: + s.logger.Error( + "Failed to check role", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + } + return editor, nil +} + +func (s *authService) checkRole( + ctx context.Context, + requiredRole accountproto.Account_Role, + environmentNamespace string, +) (*eventproto.Editor, error) { + editor, err := role.CheckRole(ctx, requiredRole, func(email string) (*accountproto.GetAccountResponse, error) { + return s.accountClient.GetAccount(ctx, &accountproto.GetAccountRequest{ + Email: email, + EnvironmentNamespace: environmentNamespace, + }) + }) + if err != nil { + switch status.Code(err) { + case codes.Unauthenticated: + s.logger.Info( + "Unauthenticated", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusUnauthenticated, locale.JaJP) + case codes.PermissionDenied: + s.logger.Info( + "Permission denied", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusPermissionDenied, locale.JaJP) + default: + s.logger.Error( + "Failed to check role", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + } + return editor, nil +} diff --git a/pkg/auth/api/api_test.go b/pkg/auth/api/api_test.go new file mode 100644 index 000000000..618ba3092 --- /dev/null +++ b/pkg/auth/api/api_test.go @@ -0,0 +1,15 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api diff --git a/pkg/auth/api/error.go b/pkg/auth/api/error.go new file mode 100644 index 000000000..85f06d38d --- /dev/null +++ b/pkg/auth/api/error.go @@ -0,0 +1,166 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + gstatus "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/rpc/status" +) + +var ( + statusInternal = gstatus.New(codes.Internal, "auth: internal") + statusMissingCode = gstatus.New(codes.InvalidArgument, "auth: code must not be empty") + statusMissingState = gstatus.New(codes.InvalidArgument, "auth: state must not be empty") + statusMissingRedirectURL = gstatus.New(codes.InvalidArgument, "auth: missing redirectURL") + statusUnregisteredRedirectURL = gstatus.New(codes.InvalidArgument, "auth: unregistered redirectURL") + statusMissingRefreshToken = gstatus.New(codes.InvalidArgument, "auth: refreshToken must not be empty") + statusInvalidCode = gstatus.New(codes.InvalidArgument, "auth: invalid code") + statusInvalidRefreshToken = gstatus.New(codes.InvalidArgument, "auth: invalid refresh token") + statusUnapprovedAccount = gstatus.New(codes.PermissionDenied, "auth: unapproved account") + statusAccessDeniedEmail = gstatus.New(codes.PermissionDenied, "auth: access denied email") + statusMissingEncryptedSecret = gstatus.New(codes.InvalidArgument, "auth: encrypted secret must not be empty") + statusUnauthenticated = gstatus.New(codes.Unauthenticated, "auth: unauthenticated") + statusPermissionDenied = gstatus.New(codes.PermissionDenied, "auth: permission denied") + + errInternalJaJP = status.MustWithDetails( + statusInternal, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "内部エラーが発生しました", + }, + ) + errMissingCodeJaJP = status.MustWithDetails( + statusMissingCode, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "認証codeは必須です", + }, + ) + errMissingStateJaJP = status.MustWithDetails( + statusMissingState, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "stateは必須です", + }, + ) + errMissingRedirectURLJaJP = status.MustWithDetails( + statusMissingRedirectURL, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "redirect urlは必須です", + }, + ) + errUnregisteredRedirectURLJaJP = status.MustWithDetails( + statusUnregisteredRedirectURL, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "許可されていないredirect urlです", + }, + ) + errMissingRefreshTokenJaJP = status.MustWithDetails( + statusMissingRefreshToken, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "refresh tokenは必須です", + }, + ) + errInvalidCodeJaJP = status.MustWithDetails( + statusInvalidCode, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なcodeです", + }, + ) + errInvalidRefreshTokenJaJP = status.MustWithDetails( + statusInvalidRefreshToken, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なrefresh tokenです", + }, + ) + errUnapprovedAccountJaJP = status.MustWithDetails( + statusUnapprovedAccount, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "許可されていないaccountです", + }, + ) + errAccessDeniedEmailJaJP = status.MustWithDetails( + statusAccessDeniedEmail, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "許可されていないemailです", + }, + ) + errMissingEncryptedSecretJaJP = status.MustWithDetails( + statusMissingEncryptedSecret, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "encrypted secretは必須です", + }, + ) + errUnauthenticatedJaJP = status.MustWithDetails( + statusUnauthenticated, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "認証されていません", + }, + ) + errPermissionDeniedJaJP = status.MustWithDetails( + statusPermissionDenied, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "権限がありません", + }, + ) +) + +func localizedError(s *gstatus.Status, loc string) error { + // handle loc if multi-lang is necessary + switch s { + case statusInternal: + return errInternalJaJP + case statusMissingCode: + return errMissingCodeJaJP + case statusMissingState: + return errMissingStateJaJP + case statusMissingRedirectURL: + return errMissingRedirectURLJaJP + case statusUnregisteredRedirectURL: + return errUnregisteredRedirectURLJaJP + case statusMissingRefreshToken: + return errMissingRefreshTokenJaJP + case statusInvalidCode: + return errInvalidCodeJaJP + case statusInvalidRefreshToken: + return errInvalidRefreshTokenJaJP + case statusUnapprovedAccount: + return errUnapprovedAccountJaJP + case statusAccessDeniedEmail: + return errAccessDeniedEmailJaJP + case statusMissingEncryptedSecret: + return errMissingEncryptedSecretJaJP + case statusUnauthenticated: + return errUnauthenticatedJaJP + case statusPermissionDenied: + return errPermissionDeniedJaJP + default: + return errInternalJaJP + } +} diff --git a/pkg/auth/client/BUILD.bazel b/pkg/auth/client/BUILD.bazel new file mode 100644 index 000000000..289201cc0 --- /dev/null +++ b/pkg/auth/client/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["client.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/auth/client", + visibility = ["//visibility:public"], + deps = [ + "//pkg/rpc/client:go_default_library", + "//proto/auth:go_default_library", + "@org_golang_google_grpc//:go_default_library", + ], +) diff --git a/pkg/auth/client/client.go b/pkg/auth/client/client.go new file mode 100644 index 000000000..b11c42b8b --- /dev/null +++ b/pkg/auth/client/client.go @@ -0,0 +1,50 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package client + +import ( + "google.golang.org/grpc" + + rpcclient "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + proto "github.com/bucketeer-io/bucketeer/proto/auth" +) + +type Client interface { + proto.AuthServiceClient + Close() +} + +type client struct { + proto.AuthServiceClient + address string + connection *grpc.ClientConn +} + +func NewClient(addr, certPath string, opts ...rpcclient.Option) (Client, error) { + conn, err := rpcclient.NewClientConn(addr, certPath, opts...) + if err != nil { + return nil, err + } + return &client{ + AuthServiceClient: proto.NewAuthServiceClient(conn), + address: addr, + connection: conn, + }, nil +} + +func (c *client) Close() { + c.connection.Close() +} diff --git a/pkg/auth/client/mock/BUILD.bazel b/pkg/auth/client/mock/BUILD.bazel new file mode 100644 index 000000000..6850b419c --- /dev/null +++ b/pkg/auth/client/mock/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["client.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/auth/client/mock", + visibility = ["//visibility:public"], + deps = [ + "//proto/auth:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@org_golang_google_grpc//:go_default_library", + ], +) diff --git a/pkg/auth/client/mock/client.go b/pkg/auth/client/mock/client.go new file mode 100644 index 000000000..2a9bee3d5 --- /dev/null +++ b/pkg/auth/client/mock/client.go @@ -0,0 +1,110 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: client.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + grpc "google.golang.org/grpc" + + auth "github.com/bucketeer-io/bucketeer/proto/auth" +) + +// MockClient is a mock of Client interface. +type MockClient struct { + ctrl *gomock.Controller + recorder *MockClientMockRecorder +} + +// MockClientMockRecorder is the mock recorder for MockClient. +type MockClientMockRecorder struct { + mock *MockClient +} + +// NewMockClient creates a new mock instance. +func NewMockClient(ctrl *gomock.Controller) *MockClient { + mock := &MockClient{ctrl: ctrl} + mock.recorder = &MockClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClient) EXPECT() *MockClientMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockClient) Close() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Close") +} + +// Close indicates an expected call of Close. +func (mr *MockClientMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockClient)(nil).Close)) +} + +// ExchangeToken mocks base method. +func (m *MockClient) ExchangeToken(ctx context.Context, in *auth.ExchangeTokenRequest, opts ...grpc.CallOption) (*auth.ExchangeTokenResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ExchangeToken", varargs...) + ret0, _ := ret[0].(*auth.ExchangeTokenResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExchangeToken indicates an expected call of ExchangeToken. +func (mr *MockClientMockRecorder) ExchangeToken(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExchangeToken", reflect.TypeOf((*MockClient)(nil).ExchangeToken), varargs...) +} + +// GetAuthCodeURL mocks base method. +func (m *MockClient) GetAuthCodeURL(ctx context.Context, in *auth.GetAuthCodeURLRequest, opts ...grpc.CallOption) (*auth.GetAuthCodeURLResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetAuthCodeURL", varargs...) + ret0, _ := ret[0].(*auth.GetAuthCodeURLResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAuthCodeURL indicates an expected call of GetAuthCodeURL. +func (mr *MockClientMockRecorder) GetAuthCodeURL(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAuthCodeURL", reflect.TypeOf((*MockClient)(nil).GetAuthCodeURL), varargs...) +} + +// RefreshToken mocks base method. +func (m *MockClient) RefreshToken(ctx context.Context, in *auth.RefreshTokenRequest, opts ...grpc.CallOption) (*auth.RefreshTokenResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "RefreshToken", varargs...) + ret0, _ := ret[0].(*auth.RefreshTokenResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RefreshToken indicates an expected call of RefreshToken. +func (mr *MockClientMockRecorder) RefreshToken(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RefreshToken", reflect.TypeOf((*MockClient)(nil).RefreshToken), varargs...) +} diff --git a/pkg/auth/cmd/server/BUILD.bazel b/pkg/auth/cmd/server/BUILD.bazel new file mode 100644 index 000000000..241a8e849 --- /dev/null +++ b/pkg/auth/cmd/server/BUILD.bazel @@ -0,0 +1,21 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["server.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/auth/cmd/server", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/client:go_default_library", + "//pkg/auth/api:go_default_library", + "//pkg/auth/oidc:go_default_library", + "//pkg/cli:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/rpc/client:go_default_library", + "//pkg/token:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/auth/cmd/server/server.go b/pkg/auth/cmd/server/server.go new file mode 100644 index 000000000..43c2558de --- /dev/null +++ b/pkg/auth/cmd/server/server.go @@ -0,0 +1,150 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "regexp" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + "github.com/bucketeer-io/bucketeer/pkg/auth/api" + "github.com/bucketeer-io/bucketeer/pkg/auth/oidc" + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + "github.com/bucketeer-io/bucketeer/pkg/token" +) + +const command = "server" + +type server struct { + *kingpin.CmdClause + port *int + accountService *string + certPath *string + keyPath *string + serviceTokenPath *string + oauthPrivateKeyPath *string + oauthClientID *string + oauthClientSecret *string + oauthRedirectURLs *[]string + oauthIssuer *string + oauthIssuerCertPath *string + emailFilter *string +} + +func RegisterCommand(r cli.CommandRegistry, p cli.ParentCommand) cli.Command { + cmd := p.Command(command, "Start the server") + server := &server{ + CmdClause: cmd, + port: cmd.Flag("port", "Port to bind to.").Default("9090").Int(), + accountService: cmd.Flag( + "account-service", + "bucketeer-account-service address.", + ).Default("account:9090").String(), + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + keyPath: cmd.Flag("key", "Path to TLS key.").Required().String(), + serviceTokenPath: cmd.Flag("service-token", "Path to service token.").Required().String(), + oauthPrivateKeyPath: cmd.Flag( + "oauth-private-key", + "Path to private key for signing oauth token.", + ).Required().String(), + oauthClientID: cmd.Flag("oauth-client-id", "The oauth clientID registered at dex.").Required().String(), + oauthClientSecret: cmd.Flag( + "oauth-client-secret", + "The oauth client secret registered at Dex.", + ).Required().String(), + oauthRedirectURLs: cmd.Flag("oauth-redirect-urls", "The redirect urls registered at Dex.").Required().Strings(), + oauthIssuer: cmd.Flag("oauth-issuer", "The url of dex issuer.").Required().String(), + oauthIssuerCertPath: cmd.Flag("oauth-issuer-cert", "Path to TLS certificate of issuer.").Required().String(), + emailFilter: cmd.Flag("email-filter", "Regexp pattern for filtering email.").String(), + } + r.RegisterCommand(server) + return server +} + +func (s *server) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + registerer := metrics.DefaultRegisterer() + + oidc, err := oidc.NewOIDC( + ctx, + *s.oauthIssuer, + *s.oauthIssuerCertPath, + *s.oauthClientID, + *s.oauthClientSecret, + *s.oauthRedirectURLs, + oidc.WithLogger(logger)) + if err != nil { + return err + } + + signer, err := token.NewSigner(*s.oauthPrivateKeyPath) + if err != nil { + return err + } + + creds, err := client.NewPerRPCCredentials(*s.serviceTokenPath) + if err != nil { + return err + } + + accountClient, err := accountclient.NewClient(*s.accountService, *s.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger)) + if err != nil { + return err + } + defer accountClient.Close() + + serviceOptions := []api.Option{ + api.WithLogger(logger), + } + if *s.emailFilter != "" { + filter, err := regexp.Compile(*s.emailFilter) + if err != nil { + return err + } + serviceOptions = append(serviceOptions, api.WithEmailFilter(filter)) + } + service := api.NewAuthService(oidc, signer, accountClient, serviceOptions...) + + healthChecker := health.NewGrpcChecker( + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + ) + go healthChecker.Run(ctx) + + server := rpc.NewServer(service, *s.certPath, *s.keyPath, + rpc.WithPort(*s.port), + rpc.WithMetrics(registerer), + rpc.WithLogger(logger), + rpc.WithService(healthChecker), + rpc.WithHandler("/health", healthChecker), + ) + defer server.Stop(10 * time.Second) + go server.Run() + + <-ctx.Done() + return nil +} diff --git a/pkg/auth/oidc/BUILD.bazel b/pkg/auth/oidc/BUILD.bazel new file mode 100644 index 000000000..015c96c5b --- /dev/null +++ b/pkg/auth/oidc/BUILD.bazel @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["oidc.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/auth/oidc", + visibility = ["//visibility:public"], + deps = [ + "@com_github_coreos_go_oidc//:go_default_library", + "@org_golang_x_oauth2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["oidc_test.go"], + embed = [":go_default_library"], + deps = [ + "@com_github_stretchr_testify//assert:go_default_library", + "@org_golang_x_oauth2//:go_default_library", + ], +) diff --git a/pkg/auth/oidc/oidc.go b/pkg/auth/oidc/oidc.go new file mode 100644 index 000000000..f03acde2b --- /dev/null +++ b/pkg/auth/oidc/oidc.go @@ -0,0 +1,256 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oidc + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" + "io/ioutil" + "net/http" + "time" + + oidc "github.com/coreos/go-oidc" + "go.uber.org/zap" + "golang.org/x/oauth2" +) + +var ( + ErrUnregisteredRedirectURL = errors.New("oidc: unregistered redirectURL") + ErrBadRequest = errors.New("oidc: bad request") +) + +type Claims struct { + Iss string `json:"iss"` + Sub string `json:"sub"` + Aud string `json:"aud"` + Exp int64 `json:"exp"` + Iat int64 `json:"iat"` + Email string `json:"email"` + EmailVerified bool `json:"email_verified"` + Name string `json:"name"` +} + +type options struct { + scopes []string + httpTimeout time.Duration + logger *zap.Logger +} + +var defaultOptions = options{ + scopes: []string{"openid", "profile", "email"}, + httpTimeout: 10 * time.Second, + logger: zap.NewNop(), +} + +type Option func(*options) + +func WithScopes(scopes []string) Option { + return func(opts *options) { + opts.scopes = scopes + } +} + +func WithHTTPTimeout(timeout time.Duration) Option { + return func(opts *options) { + opts.httpTimeout = timeout + } +} + +func WithLogger(logger *zap.Logger) Option { + return func(opts *options) { + opts.logger = logger + } +} + +type OIDC struct { + clientID string + clientSecret string + redirectURLs []string + provider *oidc.Provider + verifier *oidc.IDTokenVerifier + offlineAsScope bool + client *http.Client + opts *options + logger *zap.Logger +} + +func NewOIDC( + ctx context.Context, + issuerURL, issuerCertPath, clientID, clientSecret string, + redirectURLs []string, + opts ...Option, +) (*OIDC, error) { + dopts := defaultOptions + for _, opt := range opts { + opt(&dopts) + } + cert, err := ioutil.ReadFile(issuerCertPath) + if err != nil { + return nil, err + } + certPool := x509.NewCertPool() + if !certPool.AppendCertsFromPEM(cert) { + return nil, errors.New("oidc: Failed to parse issuer cert") + } + httpClient := &http.Client{ + Timeout: dopts.httpTimeout, + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: certPool, + }, + }, + } + logger := dopts.logger.Named("oidc") + ctx = oidc.ClientContext(ctx, httpClient) + provider, err := oidc.NewProvider(ctx, issuerURL) + if err != nil { + logger.Error("Failed to query provider", zap.Error(err), zap.String("issuerURL", issuerURL)) + return nil, err + } + offlineScope, err := checkOfflineScope(provider) + if err != nil { + logger.Error("Failed to check offline scope", zap.Error(err)) + return nil, err + } + return &OIDC{ + clientID: clientID, + clientSecret: clientSecret, + redirectURLs: redirectURLs, + provider: provider, + verifier: provider.Verifier(&oidc.Config{ClientID: clientID}), + offlineAsScope: offlineScope, + client: httpClient, + opts: &dopts, + logger: logger, + }, nil +} + +func checkOfflineScope(provider *oidc.Provider) (bool, error) { + var s struct { + ScopesSupported []string `json:"scopes_supported"` + } + if err := provider.Claims(&s); err != nil { + return false, err + } + if len(s.ScopesSupported) == 0 { + return true, nil + } + for _, scope := range s.ScopesSupported { + if scope == oidc.ScopeOfflineAccess { + return true, nil + } + } + return false, nil +} + +func (o *OIDC) AuthCodeURL(state, redirectURL string) (string, error) { + if err := o.validateRedirectURL(redirectURL); err != nil { + return "", err + } + scopes := o.opts.scopes + if o.offlineAsScope { + scopes = append(scopes, "offline_access") + return o.oauth2Config(scopes, redirectURL).AuthCodeURL(state), nil + } + return o.oauth2Config(scopes, redirectURL).AuthCodeURL(state, oauth2.AccessTypeOffline), nil +} + +func (o *OIDC) Exchange(ctx context.Context, code, redirectURL string) (*oauth2.Token, error) { + if err := o.validateRedirectURL(redirectURL); err != nil { + return nil, err + } + ctx = oidc.ClientContext(ctx, o.client) + token, err := o.oauth2Config(nil, redirectURL).Exchange(ctx, code) + if err == nil { + return token, nil + } + if isBadRequestError(err) { + o.logger.Info("failed to exchange token", zap.Error(err)) + return nil, ErrBadRequest + } + return nil, err +} + +func (o *OIDC) RefreshToken( + ctx context.Context, + token string, + expires time.Duration, + redirectURL string, +) (*oauth2.Token, error) { + if err := o.validateRedirectURL(redirectURL); err != nil { + return nil, err + } + t := &oauth2.Token{ + RefreshToken: token, + Expiry: time.Now().Add(expires), + } + ctx = oidc.ClientContext(ctx, o.client) + newToken, err := o.oauth2Config(nil, redirectURL).TokenSource(ctx, t).Token() + if err == nil { + return newToken, nil + } + if isBadRequestError(err) { + o.logger.Info("failed to refresh token", zap.Error(err)) + return nil, ErrBadRequest + } + return nil, err +} + +func (o *OIDC) Verify(ctx context.Context, rawIDToken string) (*Claims, error) { + idToken, err := o.verifier.Verify(ctx, rawIDToken) + if err != nil { + return nil, err + } + claims := &Claims{} + if err := idToken.Claims(claims); err != nil { + return nil, err + } + return claims, nil +} + +func ExtractRawIDToken(token *oauth2.Token) string { + rawIDToken, _ := token.Extra("id_token").(string) + return rawIDToken +} + +func (o *OIDC) validateRedirectURL(url string) error { + for _, r := range o.redirectURLs { + if r == url { + return nil + } + } + return ErrUnregisteredRedirectURL +} + +func (o *OIDC) oauth2Config(scopes []string, redirectURL string) *oauth2.Config { + return &oauth2.Config{ + ClientID: o.clientID, + ClientSecret: o.clientSecret, + Endpoint: o.provider.Endpoint(), + Scopes: scopes, + RedirectURL: redirectURL, + } +} + +func isBadRequestError(err error) bool { + if retrieveErr, ok := err.(*oauth2.RetrieveError); ok { + if code := retrieveErr.Response.StatusCode; code > 200 && code < 500 { + return true + } + } + return false +} diff --git a/pkg/auth/oidc/oidc_test.go b/pkg/auth/oidc/oidc_test.go new file mode 100644 index 000000000..9e98f2262 --- /dev/null +++ b/pkg/auth/oidc/oidc_test.go @@ -0,0 +1,50 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oidc + +import ( + "errors" + "fmt" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + "golang.org/x/oauth2" +) + +func TestIsBadRequestError(t *testing.T) { + testcases := []struct { + err error + isBadRequest bool + }{ + { + err: errors.New("test-error"), + isBadRequest: false, + }, + { + err: &oauth2.RetrieveError{Response: &http.Response{StatusCode: 500}}, + isBadRequest: false, + }, + { + err: &oauth2.RetrieveError{Response: &http.Response{StatusCode: 400}}, + isBadRequest: true, + }, + } + for i, tc := range testcases { + des := fmt.Sprintf("case %d", i) + result := isBadRequestError(tc.err) + assert.Equal(t, tc.isBadRequest, result, des) + } +} diff --git a/pkg/autoops/api/BUILD.bazel b/pkg/autoops/api/BUILD.bazel new file mode 100644 index 000000000..ae25d346f --- /dev/null +++ b/pkg/autoops/api/BUILD.bazel @@ -0,0 +1,73 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "api.go", + "error.go", + "operation.go", + "webhook.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/autoops/api", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/client:go_default_library", + "//pkg/auth/client:go_default_library", + "//pkg/autoops/command:go_default_library", + "//pkg/autoops/domain:go_default_library", + "//pkg/autoops/storage/v2:go_default_library", + "//pkg/crypto:go_default_library", + "//pkg/experiment/client:go_default_library", + "//pkg/feature/client:go_default_library", + "//pkg/locale:go_default_library", + "//pkg/log:go_default_library", + "//pkg/opsevent/storage/v2:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/role:go_default_library", + "//pkg/rpc/status:go_default_library", + "//pkg/storage:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/uuid:go_default_library", + "//proto/account:go_default_library", + "//proto/autoops:go_default_library", + "//proto/event/domain:go_default_library", + "//proto/experiment:go_default_library", + "//proto/feature:go_default_library", + "@go_googleapis//google/rpc:errdetails_go_proto", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "api_test.go", + "webhook_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/account/client/mock:go_default_library", + "//pkg/auth/client/mock:go_default_library", + "//pkg/experiment/client/mock:go_default_library", + "//pkg/feature/client/mock:go_default_library", + "//pkg/locale:go_default_library", + "//pkg/pubsub/publisher/mock:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/storage:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/storage/v2/mysql/mock:go_default_library", + "//pkg/token:go_default_library", + "//proto/account:go_default_library", + "//proto/autoops:go_default_library", + "//proto/experiment:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + "@go_googleapis//google/rpc:errdetails_go_proto", + "@org_golang_google_grpc//status:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/autoops/api/api.go b/pkg/autoops/api/api.go new file mode 100644 index 000000000..855fb0324 --- /dev/null +++ b/pkg/autoops/api/api.go @@ -0,0 +1,899 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "errors" + "net/url" + "strconv" + "time" + + "go.uber.org/zap" + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + authclient "github.com/bucketeer-io/bucketeer/pkg/auth/client" + "github.com/bucketeer-io/bucketeer/pkg/autoops/command" + "github.com/bucketeer-io/bucketeer/pkg/autoops/domain" + v2as "github.com/bucketeer-io/bucketeer/pkg/autoops/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/crypto" + experimentclient "github.com/bucketeer-io/bucketeer/pkg/experiment/client" + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + v2os "github.com/bucketeer-io/bucketeer/pkg/opsevent/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + "github.com/bucketeer-io/bucketeer/pkg/role" + "github.com/bucketeer-io/bucketeer/pkg/storage" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + autoopsproto "github.com/bucketeer-io/bucketeer/proto/autoops" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + experimentproto "github.com/bucketeer-io/bucketeer/proto/experiment" +) + +var errAlreadyTriggered = errors.New("auto ops Rule has already triggered") + +type options struct { + logger *zap.Logger +} + +type Option func(*options) + +func WithLogger(l *zap.Logger) Option { + return func(opts *options) { + opts.logger = l + } +} + +type AutoOpsService struct { + mysqlClient mysql.Client + featureClient featureclient.Client + experimentClient experimentclient.Client + accountClient accountclient.Client + authClient authclient.Client + publisher publisher.Publisher + webhookBaseURL *url.URL + webhookCryptoUtil crypto.EncrypterDecrypter + opts *options + logger *zap.Logger +} + +func NewAutoOpsService( + mysqlClient mysql.Client, + featureClient featureclient.Client, + experimentClient experimentclient.Client, + accountClient accountclient.Client, + authClient authclient.Client, + publisher publisher.Publisher, + webhookBaseURL *url.URL, + webhookCryptoUtil crypto.EncrypterDecrypter, + opts ...Option, +) *AutoOpsService { + dopts := &options{ + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + return &AutoOpsService{ + mysqlClient: mysqlClient, + featureClient: featureClient, + experimentClient: experimentClient, + accountClient: accountClient, + authClient: authClient, + publisher: publisher, + webhookBaseURL: webhookBaseURL, + opts: dopts, + webhookCryptoUtil: webhookCryptoUtil, + logger: dopts.logger.Named("api"), + } +} + +func (s *AutoOpsService) Register(server *grpc.Server) { + autoopsproto.RegisterAutoOpsServiceServer(server, s) +} + +func (s *AutoOpsService) CreateAutoOpsRule( + ctx context.Context, + req *autoopsproto.CreateAutoOpsRuleRequest, +) (*autoopsproto.CreateAutoOpsRuleResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := s.validateCreateAutoOpsRuleRequest(req); err != nil { + return nil, err + } + autoOpsRule, err := domain.NewAutoOpsRule( + req.Command.FeatureId, + req.Command.OpsType, + req.Command.OpsEventRateClauses, + req.Command.DatetimeClauses, + req.Command.WebhookClauses, + ) + if err != nil { + s.logger.Error( + "Failed to create a new autoOpsRule", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + opsEventRateClauses, err := autoOpsRule.ExtractOpsEventRateClauses() + if err != nil { + s.logger.Error( + "Failed to extract opsEventRateClauses", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + for _, c := range opsEventRateClauses { + exist, err := s.existGoal(ctx, req.EnvironmentNamespace, c.GoalId) + if err != nil { + return nil, localizedError(statusInternal, locale.JaJP) + } + if !exist { + s.logger.Error( + "Goal does not exist", + log.FieldsFromImcomingContext(ctx).AddFields(zap.String("environmentNamespace", req.EnvironmentNamespace))..., + ) + return nil, localizedError(statusOpsEventRateClauseGoalNotFound, locale.JaJP) + } + } + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + autoOpsRuleStorage := v2as.NewAutoOpsRuleStorage(tx) + handler := command.NewAutoOpsCommandHandler(editor, autoOpsRule, s.publisher, req.EnvironmentNamespace) + if err := handler.Handle(ctx, req.Command); err != nil { + return err + } + return autoOpsRuleStorage.CreateAutoOpsRule(ctx, autoOpsRule, req.EnvironmentNamespace) + }) + if err != nil { + if err == v2as.ErrAutoOpsRuleAlreadyExists { + return nil, localizedError(statusAlreadyExists, locale.JaJP) + } + s.logger.Error( + "Failed to create autoOps", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &autoopsproto.CreateAutoOpsRuleResponse{}, nil +} + +func (s *AutoOpsService) validateCreateAutoOpsRuleRequest(req *autoopsproto.CreateAutoOpsRuleRequest) error { + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + if req.Command.FeatureId == "" { + return localizedError(statusFeatureIDRequired, locale.JaJP) + } + if len(req.Command.OpsEventRateClauses) == 0 && len(req.Command.DatetimeClauses) == 0 && len(req.Command.WebhookClauses) == 0 { + return localizedError(statusClauseRequired, locale.JaJP) + } + if req.Command.OpsType == autoopsproto.OpsType_ENABLE_FEATURE && len(req.Command.OpsEventRateClauses) > 0 { + return localizedError(statusIncompatibleOpsType, locale.JaJP) + } + if err := s.validateOpsEventRateClauses(req.Command.OpsEventRateClauses); err != nil { + return err + } + if err := s.validateDatetimeClauses(req.Command.DatetimeClauses); err != nil { + return err + } + if err := s.validateWebhookClauses(req.Command.WebhookClauses); err != nil { + return err + } + return nil +} + +func (s *AutoOpsService) validateOpsEventRateClauses(clauses []*autoopsproto.OpsEventRateClause) error { + for _, c := range clauses { + if err := s.validateOpsEventRateClause(c); err != nil { + return err + } + } + return nil +} + +func (s *AutoOpsService) validateOpsEventRateClause(clause *autoopsproto.OpsEventRateClause) error { + if clause.VariationId == "" { + return localizedError(statusOpsEventRateClauseVariationIDRequired, locale.JaJP) + } + if clause.GoalId == "" { + return localizedError(statusOpsEventRateClauseGoalIDRequired, locale.JaJP) + } + if clause.MinCount <= 0 { + return localizedError(statusOpsEventRateClauseMinCountRequired, locale.JaJP) + } + if clause.ThreadsholdRate > 1 || clause.ThreadsholdRate <= 0 { + return localizedError(statusOpsEventRateClauseInvalidThredshold, locale.JaJP) + } + return nil +} + +func (s *AutoOpsService) validateDatetimeClauses(clauses []*autoopsproto.DatetimeClause) error { + for _, c := range clauses { + if err := s.validateDatetimeClause(c); err != nil { + return err + } + } + return nil +} + +func (s *AutoOpsService) validateDatetimeClause(clause *autoopsproto.DatetimeClause) error { + if clause.Time <= time.Now().Unix() { + return localizedError(statusDatetimeClauseInvalidTime, locale.JaJP) + } + return nil +} + +func (s *AutoOpsService) validateWebhookClauses(clauses []*autoopsproto.WebhookClause) error { + for _, c := range clauses { + if err := s.validateWebhookClause(c); err != nil { + return err + } + } + return nil +} + +func (s *AutoOpsService) validateWebhookClause(clause *autoopsproto.WebhookClause) error { + if clause.WebhookId == "" { + return localizedError(statusWebhookClauseWebhookIDRequired, locale.JaJP) + } + if len(clause.Conditions) == 0 { + return localizedError(statusWebhookClauseConditionRequired, locale.JaJP) + } + for _, c := range clause.Conditions { + if c.Filter == "" { + return localizedError(statusWebhookClauseConditionFilterRequired, locale.JaJP) + } + if c.Value == "" { + return localizedError(statusWebhookClauseConditionValueRequired, locale.JaJP) + } + _, ok := autoopsproto.WebhookClause_Condition_Operator_name[int32(c.Operator)] + if !ok { + return localizedError(statusWebhookClauseConditionInvalidOperator, locale.JaJP) + } + } + return nil +} + +func (s *AutoOpsService) DeleteAutoOpsRule( + ctx context.Context, + req *autoopsproto.DeleteAutoOpsRuleRequest, +) (*autoopsproto.DeleteAutoOpsRuleResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateDeleteAutoOpsRuleRequest(req); err != nil { + return nil, err + } + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + autoOpsRuleStorage := v2as.NewAutoOpsRuleStorage(tx) + autoOpsRule, err := autoOpsRuleStorage.GetAutoOpsRule(ctx, req.Id, req.EnvironmentNamespace) + if err != nil { + return err + } + handler := command.NewAutoOpsCommandHandler(editor, autoOpsRule, s.publisher, req.EnvironmentNamespace) + if err := handler.Handle(ctx, req.Command); err != nil { + return err + } + return autoOpsRuleStorage.UpdateAutoOpsRule(ctx, autoOpsRule, req.EnvironmentNamespace) + }) + if err != nil { + if err == v2as.ErrAutoOpsRuleNotFound || err == v2as.ErrAutoOpsRuleUnexpectedAffectedRows { + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to delete autoOpsRule", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &autoopsproto.DeleteAutoOpsRuleResponse{}, nil +} + +func validateDeleteAutoOpsRuleRequest(req *autoopsproto.DeleteAutoOpsRuleRequest) error { + if req.Id == "" { + return localizedError(statusIDRequired, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + return nil +} + +func (s *AutoOpsService) UpdateAutoOpsRule( + ctx context.Context, + req *autoopsproto.UpdateAutoOpsRuleRequest, +) (*autoopsproto.UpdateAutoOpsRuleResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := s.validateUpdateAutoOpsRuleRequest(req); err != nil { + return nil, err + } + opsEventRateClauses := []*autoopsproto.OpsEventRateClause{} + for _, c := range req.AddOpsEventRateClauseCommands { + opsEventRateClauses = append(opsEventRateClauses, c.OpsEventRateClause) + } + for _, c := range req.ChangeOpsEventRateClauseCommands { + opsEventRateClauses = append(opsEventRateClauses, c.OpsEventRateClause) + } + for _, c := range opsEventRateClauses { + exist, err := s.existGoal(ctx, req.EnvironmentNamespace, c.GoalId) + if err != nil { + return nil, localizedError(statusInternal, locale.JaJP) + } + if !exist { + s.logger.Error( + "Goal does not exist", + log.FieldsFromImcomingContext(ctx).AddFields(zap.String("environmentNamespace", req.EnvironmentNamespace))..., + ) + return nil, localizedError(statusOpsEventRateClauseGoalNotFound, locale.JaJP) + } + } + commands := s.createUpdateAutoOpsRuleCommands(req) + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + autoOpsRuleStorage := v2as.NewAutoOpsRuleStorage(tx) + autoOpsRule, err := autoOpsRuleStorage.GetAutoOpsRule(ctx, req.Id, req.EnvironmentNamespace) + if err != nil { + return err + } + if req.ChangeAutoOpsRuleOpsTypeCommand != nil { + if req.ChangeAutoOpsRuleOpsTypeCommand.OpsType == autoopsproto.OpsType_ENABLE_FEATURE && + len(req.AddOpsEventRateClauseCommands) > 0 { + return localizedError(statusIncompatibleOpsType, locale.JaJP) + } + } else if autoOpsRule.OpsType == autoopsproto.OpsType_ENABLE_FEATURE && len(req.AddOpsEventRateClauseCommands) > 0 { + return localizedError(statusIncompatibleOpsType, locale.JaJP) + } + handler := command.NewAutoOpsCommandHandler(editor, autoOpsRule, s.publisher, req.EnvironmentNamespace) + for _, command := range commands { + if err := handler.Handle(ctx, command); err != nil { + return err + } + } + return autoOpsRuleStorage.UpdateAutoOpsRule(ctx, autoOpsRule, req.EnvironmentNamespace) + }) + if err != nil { + if err == v2as.ErrAutoOpsRuleNotFound || err == v2as.ErrAutoOpsRuleUnexpectedAffectedRows { + return nil, localizedError(statusNotFound, locale.JaJP) + } + if status.Code(err) == codes.InvalidArgument { + return nil, err + } + s.logger.Error( + "Failed to update autoOpsRule", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &autoopsproto.UpdateAutoOpsRuleResponse{}, nil +} + +func (s *AutoOpsService) validateUpdateAutoOpsRuleRequest(req *autoopsproto.UpdateAutoOpsRuleRequest) error { + if req.Id == "" { + return localizedError(statusIDRequired, locale.JaJP) + } + if s.isNoUpdateAutoOpsRuleCommand(req) { + return localizedError(statusNoCommand, locale.JaJP) + } + for _, c := range req.AddOpsEventRateClauseCommands { + if c.OpsEventRateClause == nil { + return localizedError(statusOpsEventRateClauseRequired, locale.JaJP) + } + if err := s.validateOpsEventRateClause(c.OpsEventRateClause); err != nil { + return err + } + } + for _, c := range req.ChangeOpsEventRateClauseCommands { + if c.Id == "" { + return localizedError(statusClauseIDRequired, locale.JaJP) + } + if c.OpsEventRateClause == nil { + return localizedError(statusOpsEventRateClauseRequired, locale.JaJP) + } + if err := s.validateOpsEventRateClause(c.OpsEventRateClause); err != nil { + return err + } + } + for _, c := range req.DeleteClauseCommands { + if c.Id == "" { + return localizedError(statusClauseIDRequired, locale.JaJP) + } + } + for _, c := range req.AddDatetimeClauseCommands { + if c.DatetimeClause == nil { + return localizedError(statusDatetimeClauseRequired, locale.JaJP) + } + if err := s.validateDatetimeClause(c.DatetimeClause); err != nil { + return err + } + } + for _, c := range req.ChangeDatetimeClauseCommands { + if c.Id == "" { + return localizedError(statusClauseIDRequired, locale.JaJP) + } + if c.DatetimeClause == nil { + return localizedError(statusDatetimeClauseRequired, locale.JaJP) + } + if err := s.validateDatetimeClause(c.DatetimeClause); err != nil { + return err + } + } + for _, c := range req.AddWebhookClauseCommands { + if c.WebhookClause == nil { + return localizedError(statusWebhookClauseRequired, locale.JaJP) + } + if err := s.validateWebhookClause(c.WebhookClause); err != nil { + return err + } + } + for _, c := range req.ChangeWebhookClauseCommands { + if c.Id == "" { + return localizedError(statusClauseIDRequired, locale.JaJP) + } + if c.WebhookClause == nil { + return localizedError(statusWebhookClauseRequired, locale.JaJP) + } + if err := s.validateWebhookClause(c.WebhookClause); err != nil { + return err + } + } + return nil +} + +func (s *AutoOpsService) isNoUpdateAutoOpsRuleCommand(req *autoopsproto.UpdateAutoOpsRuleRequest) bool { + return req.ChangeAutoOpsRuleOpsTypeCommand == nil && + len(req.AddOpsEventRateClauseCommands) == 0 && + len(req.ChangeOpsEventRateClauseCommands) == 0 && + len(req.DeleteClauseCommands) == 0 && + len(req.AddDatetimeClauseCommands) == 0 && + len(req.ChangeDatetimeClauseCommands) == 0 && + len(req.AddWebhookClauseCommands) == 0 && + len(req.ChangeWebhookClauseCommands) == 0 +} + +func (s *AutoOpsService) createUpdateAutoOpsRuleCommands(req *autoopsproto.UpdateAutoOpsRuleRequest) []command.Command { + commands := make([]command.Command, 0) + if req.ChangeAutoOpsRuleOpsTypeCommand != nil { + commands = append(commands, req.ChangeAutoOpsRuleOpsTypeCommand) + } + for _, c := range req.AddOpsEventRateClauseCommands { + commands = append(commands, c) + } + for _, c := range req.ChangeOpsEventRateClauseCommands { + commands = append(commands, c) + } + for _, c := range req.AddDatetimeClauseCommands { + commands = append(commands, c) + } + for _, c := range req.ChangeDatetimeClauseCommands { + commands = append(commands, c) + } + for _, c := range req.AddWebhookClauseCommands { + commands = append(commands, c) + } + for _, c := range req.ChangeWebhookClauseCommands { + commands = append(commands, c) + } + for _, c := range req.DeleteClauseCommands { + commands = append(commands, c) + } + return commands +} + +func (s *AutoOpsService) GetAutoOpsRule( + ctx context.Context, + req *autoopsproto.GetAutoOpsRuleRequest, +) (*autoopsproto.GetAutoOpsRuleResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := s.validateGetAutoOpsRuleRequest(req); err != nil { + return nil, err + } + autoOpsRuleStorage := v2as.NewAutoOpsRuleStorage(s.mysqlClient) + autoOpsRule, err := autoOpsRuleStorage.GetAutoOpsRule(ctx, req.Id, req.EnvironmentNamespace) + if err != nil { + if err == v2as.ErrAutoOpsRuleNotFound { + return nil, localizedError(statusNotFound, locale.JaJP) + } + return nil, localizedError(statusInternal, locale.JaJP) + } + if autoOpsRule.Deleted { + return nil, localizedError(statusAlreadyDeleted, locale.JaJP) + } + return &autoopsproto.GetAutoOpsRuleResponse{ + AutoOpsRule: autoOpsRule.AutoOpsRule, + }, nil +} + +func (s *AutoOpsService) validateGetAutoOpsRuleRequest(req *autoopsproto.GetAutoOpsRuleRequest) error { + if req.Id == "" { + return localizedError(statusIDRequired, locale.JaJP) + } + return nil +} + +func (s *AutoOpsService) ListAutoOpsRules( + ctx context.Context, + req *autoopsproto.ListAutoOpsRulesRequest, +) (*autoopsproto.ListAutoOpsRulesResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + autoOpsRules, cursor, err := s.listAutoOpsRules( + ctx, + req.PageSize, + req.Cursor, + req.FeatureIds, + req.EnvironmentNamespace) + if err != nil { + return nil, err + } + return &autoopsproto.ListAutoOpsRulesResponse{ + AutoOpsRules: autoOpsRules, + Cursor: cursor, + }, nil +} + +func (s *AutoOpsService) listAutoOpsRules( + ctx context.Context, + pageSize int64, + cursor string, + featureIds []string, + environmentNamespace string, +) ([]*autoopsproto.AutoOpsRule, string, error) { + whereParts := []mysql.WherePart{ + mysql.NewFilter("deleted", "=", false), + mysql.NewFilter("environment_namespace", "=", environmentNamespace), + } + fIDs := make([]interface{}, 0, len(featureIds)) + for _, fID := range featureIds { + fIDs = append(fIDs, fID) + } + if len(fIDs) > 0 { + whereParts = append(whereParts, mysql.NewInFilter("feature_id", fIDs)) + } + limit := int(pageSize) + if cursor == "" { + cursor = "0" + } + offset, err := strconv.Atoi(cursor) + if err != nil { + return nil, "", localizedError(statusInvalidCursor, locale.JaJP) + } + autoOpsRuleStorage := v2as.NewAutoOpsRuleStorage(s.mysqlClient) + autoOpsRules, nextCursor, err := autoOpsRuleStorage.ListAutoOpsRules( + ctx, + whereParts, + nil, + limit, + offset, + ) + if err != nil { + s.logger.Error( + "Failed to list autoOpsRules", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, "", localizedError(statusInternal, locale.JaJP) + } + return autoOpsRules, strconv.Itoa(nextCursor), nil +} + +func (s *AutoOpsService) ExecuteAutoOps( + ctx context.Context, + req *autoopsproto.ExecuteAutoOpsRequest, +) (*autoopsproto.ExecuteAutoOpsResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := s.validateExecuteAutoOpsRequest(req); err != nil { + return nil, err + } + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + autoOpsRuleStorage := v2as.NewAutoOpsRuleStorage(tx) + autoOpsRule, err := autoOpsRuleStorage.GetAutoOpsRule(ctx, req.Id, req.EnvironmentNamespace) + if err != nil { + return err + } + if autoOpsRule.AlreadyTriggered() { + return errAlreadyTriggered + } + handler := command.NewAutoOpsCommandHandler(editor, autoOpsRule, s.publisher, req.EnvironmentNamespace) + if err := handler.Handle(ctx, req.ChangeAutoOpsRuleTriggeredAtCommand); err != nil { + return err + } + if err = autoOpsRuleStorage.UpdateAutoOpsRule(ctx, autoOpsRule, req.EnvironmentNamespace); err != nil { + return err + } + return ExecuteOperation(ctx, req.EnvironmentNamespace, autoOpsRule, s.featureClient, s.logger) + }) + if err != nil { + if err == errAlreadyTriggered { + return &autoopsproto.ExecuteAutoOpsResponse{AlreadyTriggered: true}, nil + } + if err == v2as.ErrAutoOpsRuleNotFound || err == v2as.ErrAutoOpsRuleUnexpectedAffectedRows { + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to execute autoOpsRule", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &autoopsproto.ExecuteAutoOpsResponse{AlreadyTriggered: false}, nil +} + +func (s *AutoOpsService) validateExecuteAutoOpsRequest(req *autoopsproto.ExecuteAutoOpsRequest) error { + if req.Id == "" { + return localizedError(statusIDRequired, locale.JaJP) + } + if req.ChangeAutoOpsRuleTriggeredAtCommand == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + return nil +} + +func (s *AutoOpsService) ListOpsCounts( + ctx context.Context, + req *autoopsproto.ListOpsCountsRequest, +) (*autoopsproto.ListOpsCountsResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + opsCounts, cursor, err := s.listOpsCounts( + ctx, + req.PageSize, + req.Cursor, + req.EnvironmentNamespace, + req.FeatureIds, + req.AutoOpsRuleIds) + if err != nil { + return nil, err + } + return &autoopsproto.ListOpsCountsResponse{ + Cursor: cursor, + OpsCounts: opsCounts, + }, nil +} + +func (s *AutoOpsService) listOpsCounts( + ctx context.Context, + pageSize int64, + cursor string, + environmentNamespace string, + featureIDs []string, + autoOpsRuleIDs []string, +) ([]*autoopsproto.OpsCount, string, error) { + whereParts := []mysql.WherePart{ + mysql.NewFilter("environment_namespace", "=", environmentNamespace), + } + fIDs := make([]interface{}, 0, len(featureIDs)) + for _, fID := range featureIDs { + fIDs = append(fIDs, fID) + } + if len(fIDs) > 0 { + whereParts = append(whereParts, mysql.NewInFilter("feature_id", fIDs)) + } + aIDs := make([]interface{}, 0, len(autoOpsRuleIDs)) + for _, aID := range autoOpsRuleIDs { + aIDs = append(aIDs, aID) + } + if len(aIDs) > 0 { + whereParts = append(whereParts, mysql.NewInFilter("auto_ops_rule_id", aIDs)) + } + limit := int(pageSize) + if cursor == "" { + cursor = "0" + } + offset, err := strconv.Atoi(cursor) + if err != nil { + return nil, "", localizedError(statusInvalidCursor, locale.JaJP) + } + opsCountStorage := v2os.NewOpsCountStorage(s.mysqlClient) + opsCounts, nextCursor, err := opsCountStorage.ListOpsCounts( + ctx, + whereParts, + nil, + limit, + offset, + ) + if err != nil { + s.logger.Error( + "Failed to list opsCounts", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, "", localizedError(statusInternal, locale.JaJP) + } + return opsCounts, strconv.Itoa(nextCursor), nil +} + +func (s *AutoOpsService) existGoal(ctx context.Context, environmentNamespace string, goalID string) (bool, error) { + _, err := s.getGoal(ctx, environmentNamespace, goalID) + if err != nil { + if err == storage.ErrKeyNotFound { + return false, nil + } + return false, err + } + return true, nil +} + +func (s *AutoOpsService) getGoal( + ctx context.Context, + environmentNamespace, goalID string, +) (*experimentproto.Goal, error) { + resp, err := s.experimentClient.GetGoal(ctx, &experimentproto.GetGoalRequest{ + Id: goalID, + EnvironmentNamespace: environmentNamespace, + }) + if err != nil { + s.logger.Error( + "Failed to get goal", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("goalId", goalID), + )..., + ) + return nil, err + } + return resp.Goal, nil +} + +func (s *AutoOpsService) checkRole( + ctx context.Context, + requiredRole accountproto.Account_Role, + environmentNamespace string, +) (*eventproto.Editor, error) { + editor, err := role.CheckRole(ctx, requiredRole, func(email string) (*accountproto.GetAccountResponse, error) { + return s.accountClient.GetAccount(ctx, &accountproto.GetAccountRequest{ + Email: email, + EnvironmentNamespace: environmentNamespace, + }) + }) + if err != nil { + switch status.Code(err) { + case codes.Unauthenticated: + s.logger.Info( + "Unauthenticated", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusUnauthenticated, locale.JaJP) + case codes.PermissionDenied: + s.logger.Info( + "Permission denied", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusPermissionDenied, locale.JaJP) + default: + s.logger.Error( + "Failed to check role", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + } + return editor, nil +} + +func (s *AutoOpsService) reportInternalServerError( + ctx context.Context, + err error, + environmentNamespace string, + localizer locale.Localizer, +) error { + s.logger.Error( + "Internal server error", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + dt, err := statusInternal.WithDetails(&errdetails.LocalizedMessage{ + Locale: localizer.GetLocale(), + Message: localizer.MustLocalize(locale.InternalServerError), + }) + if err != nil { + return statusInternal.Err() + } + return dt.Err() +} diff --git a/pkg/autoops/api/api_test.go b/pkg/autoops/api/api_test.go new file mode 100644 index 000000000..06e3907e0 --- /dev/null +++ b/pkg/autoops/api/api_test.go @@ -0,0 +1,792 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "errors" + "net/url" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + accountclientmock "github.com/bucketeer-io/bucketeer/pkg/account/client/mock" + authclientmock "github.com/bucketeer-io/bucketeer/pkg/auth/client/mock" + experimentclientmock "github.com/bucketeer-io/bucketeer/pkg/experiment/client/mock" + featureclientmock "github.com/bucketeer-io/bucketeer/pkg/feature/client/mock" + "github.com/bucketeer-io/bucketeer/pkg/locale" + publishermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher/mock" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/storage" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + "github.com/bucketeer-io/bucketeer/pkg/token" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + autoopsproto "github.com/bucketeer-io/bucketeer/proto/autoops" + experimentproto "github.com/bucketeer-io/bucketeer/proto/experiment" +) + +var testWebhookURL = func() *url.URL { + u, err := url.Parse("https://bucketeer.io/hook") + if err != nil { + panic(err) + } + return u +}() + +type dummyWebhookCryptoUtil struct{} + +func (u *dummyWebhookCryptoUtil) Encrypt(ctx context.Context, data []byte) ([]byte, error) { + return []byte(data), nil +} + +func (u *dummyWebhookCryptoUtil) Decrypt(ctx context.Context, data []byte) ([]byte, error) { + return []byte(data), nil +} + +func TestNewAutoOpsService(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + mysqlClientMock := mysqlmock.NewMockClient(mockController) + featureClientMock := featureclientmock.NewMockClient(mockController) + experimentClientMock := experimentclientmock.NewMockClient(mockController) + accountClientMock := accountclientmock.NewMockClient(mockController) + authClientMock := authclientmock.NewMockClient(mockController) + p := publishermock.NewMockPublisher(mockController) + logger := zap.NewNop() + s := NewAutoOpsService( + mysqlClientMock, + featureClientMock, + experimentClientMock, + accountClientMock, + authClientMock, + p, + testWebhookURL, + &dummyWebhookCryptoUtil{}, + WithLogger(logger), + ) + assert.IsType(t, &AutoOpsService{}, s) +} + +func TestCreateAutoOpsRuleMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*AutoOpsService) + req *autoopsproto.CreateAutoOpsRuleRequest + expectedErr error + }{ + "err: ErrNoCommand": { + req: &autoopsproto.CreateAutoOpsRuleRequest{}, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "err: ErrFeatureIDRequired": { + req: &autoopsproto.CreateAutoOpsRuleRequest{ + Command: &autoopsproto.CreateAutoOpsRuleCommand{}, + }, + expectedErr: localizedError(statusFeatureIDRequired, locale.JaJP), + }, + "err: ErrClauseRequired": { + req: &autoopsproto.CreateAutoOpsRuleRequest{ + Command: &autoopsproto.CreateAutoOpsRuleCommand{ + FeatureId: "fid", + OpsType: autoopsproto.OpsType_ENABLE_FEATURE, + }, + }, + expectedErr: localizedError(statusClauseRequired, locale.JaJP), + }, + "err: ErrIncompatibleOpsType": { + req: &autoopsproto.CreateAutoOpsRuleRequest{ + Command: &autoopsproto.CreateAutoOpsRuleCommand{ + FeatureId: "fid", + OpsType: autoopsproto.OpsType_ENABLE_FEATURE, + OpsEventRateClauses: []*autoopsproto.OpsEventRateClause{ + { + VariationId: "", + GoalId: "gid", + MinCount: 10, + ThreadsholdRate: 0.5, + Operator: autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL, + }, + }, + }, + }, + expectedErr: localizedError(statusIncompatibleOpsType, locale.JaJP), + }, + "err: ErrOpsEventRateClauseVariationIDRequired": { + req: &autoopsproto.CreateAutoOpsRuleRequest{ + Command: &autoopsproto.CreateAutoOpsRuleCommand{ + FeatureId: "fid", + OpsType: autoopsproto.OpsType_DISABLE_FEATURE, + OpsEventRateClauses: []*autoopsproto.OpsEventRateClause{ + { + VariationId: "", + GoalId: "gid1", + MinCount: 10, + ThreadsholdRate: 0.5, + Operator: autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL, + }, + }, + }, + }, + expectedErr: localizedError(statusOpsEventRateClauseVariationIDRequired, locale.JaJP), + }, + "err: ErrOpsEventRateClauseGoalIDRequired": { + req: &autoopsproto.CreateAutoOpsRuleRequest{ + Command: &autoopsproto.CreateAutoOpsRuleCommand{ + FeatureId: "fid", + OpsType: autoopsproto.OpsType_DISABLE_FEATURE, + OpsEventRateClauses: []*autoopsproto.OpsEventRateClause{ + { + VariationId: "vid", + GoalId: "", + MinCount: 10, + ThreadsholdRate: 0.5, + Operator: autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL, + }, + }, + }, + }, + expectedErr: localizedError(statusOpsEventRateClauseGoalIDRequired, locale.JaJP), + }, + "err: ErrOpsEventRateClauseMinCountRequired": { + req: &autoopsproto.CreateAutoOpsRuleRequest{ + Command: &autoopsproto.CreateAutoOpsRuleCommand{ + FeatureId: "fid", + OpsType: autoopsproto.OpsType_DISABLE_FEATURE, + OpsEventRateClauses: []*autoopsproto.OpsEventRateClause{ + { + VariationId: "vid", + GoalId: "gid", + MinCount: 0, + ThreadsholdRate: 0.5, + Operator: autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL, + }, + }, + }, + }, + expectedErr: localizedError(statusOpsEventRateClauseMinCountRequired, locale.JaJP), + }, + "err: ErrOpsEventRateClauseInvalidThredshold: less": { + req: &autoopsproto.CreateAutoOpsRuleRequest{ + Command: &autoopsproto.CreateAutoOpsRuleCommand{ + FeatureId: "fid", + OpsType: autoopsproto.OpsType_DISABLE_FEATURE, + OpsEventRateClauses: []*autoopsproto.OpsEventRateClause{ + { + VariationId: "vid", + GoalId: "gid", + MinCount: 10, + ThreadsholdRate: -0.1, + Operator: autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL, + }, + }, + }, + }, + expectedErr: localizedError(statusOpsEventRateClauseInvalidThredshold, locale.JaJP), + }, + "err: ErrOpsEventRateClauseInvalidThredshold: greater": { + req: &autoopsproto.CreateAutoOpsRuleRequest{ + Command: &autoopsproto.CreateAutoOpsRuleCommand{ + FeatureId: "fid", + OpsType: autoopsproto.OpsType_DISABLE_FEATURE, + OpsEventRateClauses: []*autoopsproto.OpsEventRateClause{ + { + VariationId: "vid", + GoalId: "gid", + MinCount: 10, + ThreadsholdRate: 1.1, + Operator: autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL, + }, + }, + }, + }, + expectedErr: localizedError(statusOpsEventRateClauseInvalidThredshold, locale.JaJP), + }, + "err: ErrDatetimeClauseInvalidTime": { + req: &autoopsproto.CreateAutoOpsRuleRequest{ + Command: &autoopsproto.CreateAutoOpsRuleCommand{ + FeatureId: "fid", + OpsType: autoopsproto.OpsType_ENABLE_FEATURE, + DatetimeClauses: []*autoopsproto.DatetimeClause{ + {Time: 0}, + }, + }, + }, + expectedErr: localizedError(statusDatetimeClauseInvalidTime, locale.JaJP), + }, + "err: ErrWebhookClauseWebhookIDRequired": { + req: &autoopsproto.CreateAutoOpsRuleRequest{ + Command: &autoopsproto.CreateAutoOpsRuleCommand{ + FeatureId: "fid", + OpsType: autoopsproto.OpsType_DISABLE_FEATURE, + WebhookClauses: []*autoopsproto.WebhookClause{ + { + WebhookId: "", + Conditions: []*autoopsproto.WebhookClause_Condition{ + { + Filter: ".foo.bar", + Value: "foobaz", + Operator: autoopsproto.WebhookClause_Condition_EQUAL, + }, + }, + }, + }, + }, + }, + expectedErr: localizedError(statusWebhookClauseWebhookIDRequired, locale.JaJP), + }, + "err: ErrWebhookClauseWebhookClauseConditionRequired": { + req: &autoopsproto.CreateAutoOpsRuleRequest{ + Command: &autoopsproto.CreateAutoOpsRuleCommand{ + FeatureId: "fid", + OpsType: autoopsproto.OpsType_DISABLE_FEATURE, + WebhookClauses: []*autoopsproto.WebhookClause{ + { + WebhookId: "webhook-1", + Conditions: []*autoopsproto.WebhookClause_Condition{}, + }, + }, + }, + }, + expectedErr: localizedError(statusWebhookClauseConditionRequired, locale.JaJP), + }, + "err: ErrWebhookClauseWebhookClauseConditionFilterRequired": { + req: &autoopsproto.CreateAutoOpsRuleRequest{ + Command: &autoopsproto.CreateAutoOpsRuleCommand{ + FeatureId: "fid", + OpsType: autoopsproto.OpsType_DISABLE_FEATURE, + WebhookClauses: []*autoopsproto.WebhookClause{ + { + WebhookId: "foo-id", + Conditions: []*autoopsproto.WebhookClause_Condition{ + { + Filter: "", + Value: "foobaz", + Operator: autoopsproto.WebhookClause_Condition_EQUAL, + }, + }, + }, + }, + }, + }, + expectedErr: localizedError(statusWebhookClauseConditionFilterRequired, locale.JaJP), + }, + "success": { + setup: func(s *AutoOpsService) { + s.experimentClient.(*experimentclientmock.MockClient).EXPECT().GetGoal( + gomock.Any(), gomock.Any(), + ).Return(&experimentproto.GetGoalResponse{}, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &autoopsproto.CreateAutoOpsRuleRequest{ + Command: &autoopsproto.CreateAutoOpsRuleCommand{ + FeatureId: "fid", + OpsType: autoopsproto.OpsType_DISABLE_FEATURE, + OpsEventRateClauses: []*autoopsproto.OpsEventRateClause{ + { + VariationId: "vid", + GoalId: "gid", + MinCount: 10, + ThreadsholdRate: 0.5, + Operator: autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL, + }, + }, + DatetimeClauses: []*autoopsproto.DatetimeClause{ + {Time: time.Now().AddDate(0, 0, 1).Unix()}, + }, + WebhookClauses: []*autoopsproto.WebhookClause{ + { + WebhookId: "foo-id", + Conditions: []*autoopsproto.WebhookClause_Condition{ + { + Filter: ".foo.bar", + Value: "foobaz", + Operator: autoopsproto.WebhookClause_Condition_EQUAL, + }, + }, + }, + }, + }, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := createAutoOpsService(mockController, nil) + if p.setup != nil { + p.setup(s) + } + _, err := s.CreateAutoOpsRule(createContextWithTokenRoleOwner(t), p.req) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestUpdateAutoOpsRuleMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*AutoOpsService) + req *autoopsproto.UpdateAutoOpsRuleRequest + expected *autoopsproto.UpdateAutoOpsRuleResponse + expectedErr error + }{ + "err: ErrIDRequired": { + req: &autoopsproto.UpdateAutoOpsRuleRequest{}, + expected: nil, + expectedErr: localizedError(statusIDRequired, locale.JaJP), + }, + "err: ErrNoCommand": { + req: &autoopsproto.UpdateAutoOpsRuleRequest{ + Id: "aid1", + }, + expected: nil, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "err: ErrOpsEventRateClauseRequired": { + req: &autoopsproto.UpdateAutoOpsRuleRequest{ + Id: "aid1", + AddOpsEventRateClauseCommands: []*autoopsproto.AddOpsEventRateClauseCommand{{}}, + }, + expected: nil, + expectedErr: localizedError(statusOpsEventRateClauseRequired, locale.JaJP), + }, + "err: DeleteClauseCommand: ErrClauseIdRequired": { + req: &autoopsproto.UpdateAutoOpsRuleRequest{ + Id: "aid1", + DeleteClauseCommands: []*autoopsproto.DeleteClauseCommand{{}}, + }, + expected: nil, + expectedErr: localizedError(statusClauseIDRequired, locale.JaJP), + }, + "err: ChangeOpsEventRateClauseCommand: ErrClauseIdRequired": { + req: &autoopsproto.UpdateAutoOpsRuleRequest{ + Id: "aid1", + ChangeOpsEventRateClauseCommands: []*autoopsproto.ChangeOpsEventRateClauseCommand{{}}, + }, + expected: nil, + expectedErr: localizedError(statusClauseIDRequired, locale.JaJP), + }, + "err: ChangeOpsEventRateClauseCommand: ErrOpsEventRateClauseRequired": { + req: &autoopsproto.UpdateAutoOpsRuleRequest{ + Id: "aid1", + ChangeOpsEventRateClauseCommands: []*autoopsproto.ChangeOpsEventRateClauseCommand{{ + Id: "aid", + }}, + }, + expected: nil, + expectedErr: localizedError(statusOpsEventRateClauseRequired, locale.JaJP), + }, + "err: ErrDatetimeClauseReqired": { + req: &autoopsproto.UpdateAutoOpsRuleRequest{ + Id: "aid1", + AddDatetimeClauseCommands: []*autoopsproto.AddDatetimeClauseCommand{{}}, + }, + expected: nil, + expectedErr: localizedError(statusDatetimeClauseRequired, locale.JaJP), + }, + "err: ChangeDatetimeClauseCommand: ErrDatetimeClauseInvalidTime": { + req: &autoopsproto.UpdateAutoOpsRuleRequest{ + Id: "aid1", + ChangeDatetimeClauseCommands: []*autoopsproto.ChangeDatetimeClauseCommand{{ + Id: "aid", + DatetimeClause: &autoopsproto.DatetimeClause{Time: 0}, + }}, + }, + expected: nil, + expectedErr: localizedError(statusDatetimeClauseInvalidTime, locale.JaJP), + }, + "err: ErrWebhookClauseRequired": { + req: &autoopsproto.UpdateAutoOpsRuleRequest{ + Id: "aid1", + AddWebhookClauseCommands: []*autoopsproto.AddWebhookClauseCommand{{}}, + }, + expected: nil, + expectedErr: localizedError(statusWebhookClauseRequired, locale.JaJP), + }, + "err: ChangeWebhookClauseCommand: ErrWebhookClauseWebhookClauseConditionRequired": { + req: &autoopsproto.UpdateAutoOpsRuleRequest{ + Id: "aid1", + ChangeWebhookClauseCommands: []*autoopsproto.ChangeWebhookClauseCommand{ + { + Id: "aid", + WebhookClause: &autoopsproto.WebhookClause{ + WebhookId: "foo-id", + Conditions: []*autoopsproto.WebhookClause_Condition{}, + }, + }, + }, + }, + expected: nil, + expectedErr: localizedError(statusWebhookClauseConditionRequired, locale.JaJP), + }, + "success": { + setup: func(s *AutoOpsService) { + s.experimentClient.(*experimentclientmock.MockClient).EXPECT().GetGoal( + gomock.Any(), gomock.Any(), + ).Return(&experimentproto.GetGoalResponse{}, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &autoopsproto.UpdateAutoOpsRuleRequest{ + Id: "aid1", + EnvironmentNamespace: "ns0", + ChangeAutoOpsRuleOpsTypeCommand: &autoopsproto.ChangeAutoOpsRuleOpsTypeCommand{OpsType: autoopsproto.OpsType_DISABLE_FEATURE}, + AddOpsEventRateClauseCommands: []*autoopsproto.AddOpsEventRateClauseCommand{{ + OpsEventRateClause: &autoopsproto.OpsEventRateClause{ + VariationId: "vid", + GoalId: "gid", + MinCount: 10, + ThreadsholdRate: 0.5, + Operator: autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL, + }, + }}, + DeleteClauseCommands: []*autoopsproto.DeleteClauseCommand{{ + Id: "cid", + }}, + AddDatetimeClauseCommands: []*autoopsproto.AddDatetimeClauseCommand{{ + DatetimeClause: &autoopsproto.DatetimeClause{ + Time: time.Now().AddDate(0, 0, 1).Unix(), + }, + }}, + }, + expected: &autoopsproto.UpdateAutoOpsRuleResponse{}, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := createAutoOpsService(mockController, nil) + if p.setup != nil { + p.setup(s) + } + _, err := s.UpdateAutoOpsRule(createContextWithTokenRoleOwner(t), p.req) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestDeleteAutoOpsRuleMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*AutoOpsService) + req *autoopsproto.DeleteAutoOpsRuleRequest + expectedErr error + }{ + "err: ErrIDRequired": { + req: &autoopsproto.DeleteAutoOpsRuleRequest{}, + expectedErr: localizedError(statusIDRequired, locale.JaJP), + }, + "err: ErrNoCommand": { + req: &autoopsproto.DeleteAutoOpsRuleRequest{ + Id: "aid1", + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "success": { + setup: func(s *AutoOpsService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &autoopsproto.DeleteAutoOpsRuleRequest{ + Id: "aid1", + EnvironmentNamespace: "ns0", + Command: &autoopsproto.DeleteAutoOpsRuleCommand{}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := createAutoOpsService(mockController, nil) + if p.setup != nil { + p.setup(s) + } + _, err := s.DeleteAutoOpsRule(createContextWithTokenRoleOwner(t), p.req) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestGetAutoOpsRuleMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*AutoOpsService) + req *autoopsproto.GetAutoOpsRuleRequest + expectedErr error + }{ + "err: ErrIDRequired": { + req: &autoopsproto.GetAutoOpsRuleRequest{}, + expectedErr: localizedError(statusIDRequired, locale.JaJP), + }, + "err: ErrNotFound": { + setup: func(s *AutoOpsService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + req: &autoopsproto.GetAutoOpsRuleRequest{Id: "wrongid", EnvironmentNamespace: "ns0"}, + expectedErr: localizedError(statusNotFound, locale.JaJP), + }, + "success": { + setup: func(s *AutoOpsService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + req: &autoopsproto.GetAutoOpsRuleRequest{Id: "aid1", EnvironmentNamespace: "ns0"}, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := createAutoOpsService(mockController, nil) + if p.setup != nil { + p.setup(s) + } + _, err := s.GetAutoOpsRule(createContextWithTokenRoleUnassigned(t), p.req) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestListAutoOpsRulesMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*AutoOpsService) + req *autoopsproto.ListAutoOpsRulesRequest + expectedErr error + }{ + { + setup: func(s *AutoOpsService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + }, + req: &autoopsproto.ListAutoOpsRulesRequest{EnvironmentNamespace: "ns0", Cursor: ""}, + expectedErr: nil, + }, + } + for _, p := range patterns { + service := createAutoOpsService(mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.ListAutoOpsRules(createContextWithTokenRoleUnassigned(t), p.req) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestExecuteAutoOpsRuleMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*AutoOpsService) + req *autoopsproto.ExecuteAutoOpsRequest + expectedErr error + }{ + "err: ErrIDRequired": { + req: &autoopsproto.ExecuteAutoOpsRequest{}, + expectedErr: localizedError(statusIDRequired, locale.JaJP), + }, + "err: ErrNoCommand": { + req: &autoopsproto.ExecuteAutoOpsRequest{ + Id: "aid", + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "success: AlreadyTriggered": { + setup: func(s *AutoOpsService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(errAlreadyTriggered) + }, + req: &autoopsproto.ExecuteAutoOpsRequest{ + Id: "aid3", + EnvironmentNamespace: "ns0", + ChangeAutoOpsRuleTriggeredAtCommand: &autoopsproto.ChangeAutoOpsRuleTriggeredAtCommand{}, + }, + expectedErr: nil, + }, + "success": { + setup: func(s *AutoOpsService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &autoopsproto.ExecuteAutoOpsRequest{ + Id: "aid1", + EnvironmentNamespace: "ns0", + ChangeAutoOpsRuleTriggeredAtCommand: &autoopsproto.ChangeAutoOpsRuleTriggeredAtCommand{}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := createAutoOpsService(mockController, nil) + if p.setup != nil { + p.setup(s) + } + _, err := s.ExecuteAutoOps(createContextWithTokenRoleOwner(t), p.req) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestExistGoal(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*AutoOpsService) + goalID string + expected bool + expectedErr error + }{ + "not found": { + setup: func(s *AutoOpsService) { + s.experimentClient.(*experimentclientmock.MockClient).EXPECT().GetGoal(gomock.Any(), gomock.Any()).Return(nil, storage.ErrKeyNotFound) + }, + goalID: "gid-0", + expected: false, + expectedErr: nil, + }, + "fails": { + setup: func(s *AutoOpsService) { + s.experimentClient.(*experimentclientmock.MockClient).EXPECT().GetGoal(gomock.Any(), gomock.Any()).Return(nil, errors.New("test")) + }, + goalID: "gid-0", + expected: false, + expectedErr: errors.New("test"), + }, + "exists": { + setup: func(s *AutoOpsService) { + s.experimentClient.(*experimentclientmock.MockClient).EXPECT().GetGoal(gomock.Any(), gomock.Any()).Return(&experimentproto.GetGoalResponse{}, nil) + }, + goalID: "gid-0", + expected: true, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := createAutoOpsService(mockController, nil) + if p.setup != nil { + p.setup(s) + } + actual, err := s.existGoal(context.Background(), "ns-0", p.goalID) + assert.Equal(t, p.expected, actual) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func createAutoOpsService(c *gomock.Controller, db storage.Client) *AutoOpsService { + mysqlClientMock := mysqlmock.NewMockClient(c) + featureClientMock := featureclientmock.NewMockClient(c) + accountClientMock := accountclientmock.NewMockClient(c) + ar := &accountproto.GetAccountResponse{ + Account: &accountproto.Account{ + Email: "email", + Role: accountproto.Account_VIEWER, + }, + } + accountClientMock.EXPECT().GetAccount(gomock.Any(), gomock.Any()).Return(ar, nil).AnyTimes() + experimentClientMock := experimentclientmock.NewMockClient(c) + authClientMock := authclientmock.NewMockClient(c) + p := publishermock.NewMockPublisher(c) + logger := zap.NewNop() + return NewAutoOpsService( + mysqlClientMock, + featureClientMock, + experimentClientMock, + accountClientMock, + authClientMock, + p, + testWebhookURL, + &dummyWebhookCryptoUtil{}, + WithLogger(logger), + ) +} + +func createContextWithTokenRoleUnassigned(t *testing.T) context.Context { + t.Helper() + token := &token.IDToken{ + Issuer: "issuer", + Subject: "sub", + Audience: "audience", + Expiry: time.Now().AddDate(100, 0, 0), + IssuedAt: time.Now(), + Email: "email", + AdminRole: accountproto.Account_UNASSIGNED, + } + ctx := context.TODO() + return context.WithValue(ctx, rpc.Key, token) +} + +func createContextWithTokenRoleOwner(t *testing.T) context.Context { + t.Helper() + token := &token.IDToken{ + Issuer: "issuer", + Subject: "sub", + Audience: "audience", + Expiry: time.Now().AddDate(100, 0, 0), + IssuedAt: time.Now(), + Email: "email", + AdminRole: accountproto.Account_OWNER, + } + ctx := context.TODO() + return context.WithValue(ctx, rpc.Key, token) +} diff --git a/pkg/autoops/api/error.go b/pkg/autoops/api/error.go new file mode 100644 index 000000000..1d5bf9a2e --- /dev/null +++ b/pkg/autoops/api/error.go @@ -0,0 +1,396 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + gstatus "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/rpc/status" +) + +var ( + statusInternal = gstatus.New(codes.Internal, "autoops: internal") + statusUnknownOpsType = gstatus.New(codes.Internal, "autoops: unknown ops type") + statusInvalidCursor = gstatus.New(codes.InvalidArgument, "autoops: cursor is invalid") + statusInvalidOrderBy = gstatus.New(codes.InvalidArgument, "autoops: order_by is invalid") + statusNoCommand = gstatus.New(codes.InvalidArgument, "autoops: no command") + statusIDRequired = gstatus.New(codes.InvalidArgument, "autoops: id must be specified") + statusFeatureIDRequired = gstatus.New( + codes.InvalidArgument, + "autoops: featureId must be specified", + ) + statusClauseRequired = gstatus.New( + codes.InvalidArgument, + "autoops: at least one clause must be specified", + ) + statusClauseIDRequired = gstatus.New(codes.InvalidArgument, "autoops: clause id must be specified") + statusIncompatibleOpsType = gstatus.New( + codes.InvalidArgument, + "autoops: ops type is incompatible with ops event rate clause", + ) + statusOpsEventRateClauseRequired = gstatus.New( + codes.InvalidArgument, + "autoops: ops event rate clause must be specified", + ) + statusOpsEventRateClauseFeatureVersionRequired = gstatus.New( + codes.InvalidArgument, + "autoops: ops event rate clause feature version must be specified", + ) + statusOpsEventRateClauseVariationIDRequired = gstatus.New( + codes.InvalidArgument, + "autoops: ops event rate clause variation id must be specified", + ) + statusOpsEventRateClauseGoalIDRequired = gstatus.New( + codes.InvalidArgument, + "autoops: ops event rate clause goal id is required", + ) + statusOpsEventRateClauseMinCountRequired = gstatus.New( + codes.InvalidArgument, + "autoops: ops event rate clause min count must be specified", + ) + statusOpsEventRateClauseInvalidThredshold = gstatus.New( + codes.InvalidArgument, + "autoops: ops event rate clause thredshold must be >0 and <=1", + ) + statusDatetimeClauseRequired = gstatus.New(codes.InvalidArgument, "autoops: datetime clause must be specified") + statusDatetimeClauseInvalidTime = gstatus.New( + codes.InvalidArgument, + "autoops: datetime clause time must be after now timestamp", + ) + statusNotFound = gstatus.New(codes.NotFound, "autoops: not found") + statusAlreadyDeleted = gstatus.New(codes.NotFound, "autoops: already deleted") + statusOpsEventRateClauseGoalNotFound = gstatus.New( + codes.NotFound, + "autoops: ops event rate clause goal does not exist", + ) + statusWebhookNotFound = gstatus.New(codes.NotFound, "autoops: webhook not found") + statusWebhookClauseRequired = gstatus.New( + codes.InvalidArgument, + "autoops: webhook clause is required", + ) + statusWebhookClauseWebhookIDRequired = gstatus.New( + codes.InvalidArgument, + "autoops: webhook clause wehook id is required", + ) + statusWebhookClauseConditionRequired = gstatus.New( + codes.InvalidArgument, + "autoops: webhook clause condition is required", + ) + statusWebhookClauseConditionFilterRequired = gstatus.New( + codes.InvalidArgument, + "autoops: webhook clause condition filter is required", + ) + statusWebhookClauseConditionValueRequired = gstatus.New( + codes.InvalidArgument, + "autoops: webhook clause condition value is required", + ) + statusWebhookClauseConditionInvalidOperator = gstatus.New( + codes.InvalidArgument, + "autoops: webhook clause condition oerator is invalid", + ) + statusAlreadyExists = gstatus.New(codes.AlreadyExists, "autoops: already exists") + statusUnauthenticated = gstatus.New(codes.Unauthenticated, "autoops: unauthenticated") + statusPermissionDenied = gstatus.New(codes.PermissionDenied, "autoops: permission denied") + statusInvalidRequest = gstatus.New(codes.InvalidArgument, "autoops: invalid request") + + errInternalJaJP = status.MustWithDetails( + statusInternal, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "内部エラーが発生しました", + }, + ) + errUnknownOpsTypeJaJP = status.MustWithDetails( + statusUnknownOpsType, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不明なオペレーションタイプです", + }, + ) + errInvalidCursorJaJP = status.MustWithDetails( + statusInvalidCursor, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なcursorです", + }, + ) + errInvalidOrderByJaJP = status.MustWithDetails( + statusInvalidOrderBy, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なソート順の指定です", + }, + ) + errNoCommandJaJP = status.MustWithDetails( + statusNoCommand, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "commandは必須です", + }, + ) + errIDRequiredJaJP = status.MustWithDetails( + statusIDRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "idは必須です", + }, + ) + errFeatureIDRequiredJaJP = status.MustWithDetails( + statusFeatureIDRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "feature idは必須です", + }, + ) + errClauseRequiredJaJP = status.MustWithDetails( + statusClauseRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "自動オペレーションルールは必須です", + }, + ) + errClauseIDRequiredJaJP = status.MustWithDetails( + statusClauseIDRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "自動オペレーションルールのidは必須です", + }, + ) + errIncompatibleOpsTypeJaJP = status.MustWithDetails( + statusIncompatibleOpsType, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "対象のオペレーションタイプに対応していない自動オペレーションルールがあります", + }, + ) + errOpsEventRateClauseRequiredJaJP = status.MustWithDetails( + statusOpsEventRateClauseRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "イベントレートルールは必須です", + }, + ) + errOpsEventRateClauseFeatureVersionRequiredJaJP = status.MustWithDetails( + statusOpsEventRateClauseFeatureVersionRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "イベントレートルールのfeature versionは必須です", + }, + ) + errOpsEventRateClauseVariationIDRequiredJaJP = status.MustWithDetails( + statusOpsEventRateClauseVariationIDRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "イベントレートルールのvariation idは必須です", + }, + ) + errOpsEventRateClauseGoalIDRequiredJaJP = status.MustWithDetails( + statusOpsEventRateClauseGoalIDRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "イベントレートルールのgoal idは必須です", + }, + ) + errOpsEventRateClauseMinCountRequiredJaJP = status.MustWithDetails( + statusOpsEventRateClauseMinCountRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "イベントレートルールのminimum countは必須です", + }, + ) + errOpsEventRateClauseInvalidThredsholdJaJP = status.MustWithDetails( + statusOpsEventRateClauseMinCountRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "イベントレートルールのしきい値が不正です", + }, + ) + errDatetimeClauseRequiredJaJP = status.MustWithDetails( + statusDatetimeClauseRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "日時ルールは必須です", + }, + ) + errDatetimeClauseInvalidTimeJaJP = status.MustWithDetails( + statusDatetimeClauseInvalidTime, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "日時ルールの日時が不正です", + }, + ) + errWebhookNotFoundJaJP = status.MustWithDetails( + statusWebhookNotFound, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "ウェブフックが存在しません", + }, + ) + errWebhookClauseRequiredJaJP = status.MustWithDetails( + statusWebhookClauseRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "ウェブフックルールは必須です", + }, + ) + errWebhookClauseWebhookIDRequiredJaJP = status.MustWithDetails( + statusWebhookClauseWebhookIDRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "ウェブフックルールのwebhook idは必須です", + }, + ) + errWebhookClauseConditionRequiredJaJP = status.MustWithDetails( + statusWebhookClauseConditionRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "ウェブフックルールのconditionは必須です", + }, + ) + errWebhookClauseConditionFilterRequiredJaJP = status.MustWithDetails( + statusWebhookClauseConditionFilterRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "ウェブフックルールのconditionのfilterは必須です", + }, + ) + errWebhookClauseConditionValueRequiredJaJP = status.MustWithDetails( + statusWebhookClauseConditionValueRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "ウェブフックルールのconditionのvalueは必須です", + }, + ) + errWebhookClauseConditionInvalidOperatorJaJP = status.MustWithDetails( + statusWebhookClauseConditionInvalidOperator, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "ウェブフックルールのconditionのoperatorが不正です", + }, + ) + errNotFoundJaJP = status.MustWithDetails( + statusNotFound, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "データが存在しません", + }, + ) + errAlreadyDeletedJaJP = status.MustWithDetails( + statusAlreadyDeleted, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "データがすでに削除済みです", + }, + ) + errOpsEventRateClauseGoalNotFoundJaJP = status.MustWithDetails( + statusOpsEventRateClauseGoalNotFound, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "イベントレートルールのgoalが存在しません", + }, + ) + errAlreadyExistsJaJP = status.MustWithDetails( + statusAlreadyExists, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "同じidのデータがすでに存在します", + }, + ) + errUnauthenticatedJaJP = status.MustWithDetails( + statusUnauthenticated, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "認証されていません", + }, + ) + errPermissionDeniedJaJP = status.MustWithDetails( + statusPermissionDenied, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "権限がありません", + }, + ) +) + +func localizedError(s *gstatus.Status, loc string) error { + // handle loc if multi-lang is necessary + switch s { + case statusInternal: + return errInternalJaJP + case statusUnknownOpsType: + return errUnknownOpsTypeJaJP + case statusInvalidCursor: + return errInvalidCursorJaJP + case statusInvalidOrderBy: + return errInvalidOrderByJaJP + case statusNoCommand: + return errNoCommandJaJP + case statusIDRequired: + return errIDRequiredJaJP + case statusFeatureIDRequired: + return errFeatureIDRequiredJaJP + case statusClauseRequired: + return errClauseRequiredJaJP + case statusClauseIDRequired: + return errClauseIDRequiredJaJP + case statusIncompatibleOpsType: + return errIncompatibleOpsTypeJaJP + case statusOpsEventRateClauseRequired: + return errOpsEventRateClauseRequiredJaJP + case statusOpsEventRateClauseFeatureVersionRequired: + return errOpsEventRateClauseFeatureVersionRequiredJaJP + case statusOpsEventRateClauseVariationIDRequired: + return errOpsEventRateClauseVariationIDRequiredJaJP + case statusOpsEventRateClauseGoalIDRequired: + return errOpsEventRateClauseGoalIDRequiredJaJP + case statusOpsEventRateClauseMinCountRequired: + return errOpsEventRateClauseMinCountRequiredJaJP + case statusOpsEventRateClauseMinCountRequired: + return errOpsEventRateClauseInvalidThredsholdJaJP + case statusDatetimeClauseRequired: + return errDatetimeClauseRequiredJaJP + case statusDatetimeClauseInvalidTime: + return errDatetimeClauseInvalidTimeJaJP + case statusWebhookNotFound: + return errWebhookNotFoundJaJP + case statusWebhookClauseRequired: + return errWebhookClauseRequiredJaJP + case statusWebhookClauseWebhookIDRequired: + return errWebhookClauseWebhookIDRequiredJaJP + case statusWebhookClauseConditionRequired: + return errWebhookClauseConditionRequiredJaJP + case statusWebhookClauseConditionFilterRequired: + return errWebhookClauseConditionFilterRequiredJaJP + case statusWebhookClauseConditionInvalidOperator: + return errWebhookClauseConditionInvalidOperatorJaJP + case statusNotFound: + return errNotFoundJaJP + case statusAlreadyDeleted: + return errAlreadyDeletedJaJP + case statusOpsEventRateClauseGoalNotFound: + return errOpsEventRateClauseGoalNotFoundJaJP + case statusAlreadyExists: + return errAlreadyExistsJaJP + case statusUnauthenticated: + return errUnauthenticatedJaJP + case statusPermissionDenied: + return errPermissionDeniedJaJP + default: + return errInternalJaJP + } +} diff --git a/pkg/autoops/api/operation.go b/pkg/autoops/api/operation.go new file mode 100644 index 000000000..a412f0afc --- /dev/null +++ b/pkg/autoops/api/operation.go @@ -0,0 +1,100 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + + "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/autoops/domain" + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + autoopsproto "github.com/bucketeer-io/bucketeer/proto/autoops" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +type Command interface{} + +func ExecuteOperation( + ctx context.Context, + environmentNamespace string, + autoOpsRule *domain.AutoOpsRule, + featureClient featureclient.Client, + logger *zap.Logger, +) error { + switch autoOpsRule.OpsType { + case autoopsproto.OpsType_ENABLE_FEATURE: + return enableFeature(ctx, environmentNamespace, autoOpsRule, featureClient, logger) + case autoopsproto.OpsType_DISABLE_FEATURE: + return disableFeature(ctx, environmentNamespace, autoOpsRule, featureClient, logger) + } + return localizedError(statusUnknownOpsType, locale.JaJP) +} + +func enableFeature( + ctx context.Context, + environmentNamespace string, + autoOpsRule *domain.AutoOpsRule, + featureClient featureclient.Client, + logger *zap.Logger, +) error { + req := &featureproto.EnableFeatureRequest{ + Id: autoOpsRule.FeatureId, + Command: &featureproto.EnableFeatureCommand{}, + EnvironmentNamespace: environmentNamespace, + } + _, err := featureClient.EnableFeature(ctx, req) + if code := status.Code(err); code == codes.FailedPrecondition { + logger.Warn( + "Failed to enable feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil + } + return err +} + +func disableFeature( + ctx context.Context, + environmentNamespace string, + autoOpsRule *domain.AutoOpsRule, + featureClient featureclient.Client, + logger *zap.Logger, +) error { + req := &featureproto.DisableFeatureRequest{ + Id: autoOpsRule.FeatureId, + Command: &featureproto.DisableFeatureCommand{}, + EnvironmentNamespace: environmentNamespace, + } + _, err := featureClient.DisableFeature(ctx, req) + if code := status.Code(err); code == codes.FailedPrecondition { + logger.Warn( + "Failed to disable feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil + } + return err +} diff --git a/pkg/autoops/api/webhook.go b/pkg/autoops/api/webhook.go new file mode 100644 index 000000000..303d41be0 --- /dev/null +++ b/pkg/autoops/api/webhook.go @@ -0,0 +1,522 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "encoding/base64" + "strconv" + + "go.uber.org/zap" + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/autoops/command" + "github.com/bucketeer-io/bucketeer/pkg/autoops/domain" + v2as "github.com/bucketeer-io/bucketeer/pkg/autoops/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + accountpb "github.com/bucketeer-io/bucketeer/proto/account" + autoopspb "github.com/bucketeer-io/bucketeer/proto/autoops" + eventpb "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +const ( + webhookAuthKey = "auth" +) + +func (s *AutoOpsService) CreateWebhook( + ctx context.Context, + req *autoopspb.CreateWebhookRequest, +) (*autoopspb.CreateWebhookResponse, error) { + editor, err := s.checkRole(ctx, accountpb.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + localizer := locale.NewLocalizer(locale.NewLocale(locale.JaJP)) + status, err := validateCreateWebhook(req, localizer) + if err != nil { + return nil, s.reportInternalServerError(ctx, err, req.EnvironmentNamespace, localizer) + } + if status != nil { + s.logger.Error( + "Failed to validate webhook create request", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(status.Err()), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, status.Err() + } + resp, err := s.createWebhook(ctx, req, editor) + if err != nil { + return nil, s.reportInternalServerError(ctx, err, req.EnvironmentNamespace, localizer) + } + return resp, nil +} + +func (s *AutoOpsService) createWebhook( + ctx context.Context, + req *autoopspb.CreateWebhookRequest, + editor *eventpb.Editor, +) (*autoopspb.CreateWebhookResponse, error) { + id, err := uuid.NewUUID() + if err != nil { + return nil, err + } + secret, err := s.generateWebhookSecret(ctx, id.String(), req.EnvironmentNamespace) + if err != nil { + return nil, err + } + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + return nil, err + } + + webhook := domain.NewWebhook(id.String(), req.Command.Name, req.Command.Description) + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + webhookStorage := v2as.NewWebhookStorage(tx) + err := webhookStorage.CreateWebhook(ctx, webhook, req.EnvironmentNamespace) + if err != nil { + return err + } + handler := command.NewWebhookCommandHandler( + editor, + s.publisher, + webhook, + req.EnvironmentNamespace, + ) + if err := handler.Handle(ctx, req.Command); err != nil { + return err + } + return nil + }) + if err != nil { + return nil, err + } + return &autoopspb.CreateWebhookResponse{ + Webhook: webhook.Webhook, + Url: s.createWebhookURL(secret), + }, nil +} + +func (s *AutoOpsService) generateWebhookSecret( + ctx context.Context, + id, environmentNamespace string, +) (string, error) { + ws := domain.NewWebhookSecret(id, environmentNamespace) + encoded, err := ws.Marshal() + if err != nil { + s.logger.Error( + "Failed to marshal webhook secret", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return "", err + } + encrypted, err := s.webhookCryptoUtil.Encrypt(ctx, encoded) + if err != nil { + s.logger.Error( + "Failed to encrypt webhook secret", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return "", err + } + return base64.RawURLEncoding.EncodeToString(encrypted), nil +} + +func validateCreateWebhook( + req *autoopspb.CreateWebhookRequest, + localizer locale.Localizer, +) (*status.Status, error) { + if req.Command == nil { + return statusInvalidRequest.WithDetails(&errdetails.LocalizedMessage{ + Locale: localizer.GetLocale(), + Message: localizer.MustLocalizeWithTemplate(locale.RequiredFieldTemplate, "command"), + }) + } + if req.Command.Name == "" { + return statusInvalidRequest.WithDetails(&errdetails.LocalizedMessage{ + Locale: localizer.GetLocale(), + Message: localizer.MustLocalizeWithTemplate(locale.RequiredFieldTemplate, "webhook name"), + }) + } + return nil, nil +} + +func (s *AutoOpsService) GetWebhook( + ctx context.Context, + req *autoopspb.GetWebhookRequest, +) (*autoopspb.GetWebhookResponse, error) { + _, err := s.checkRole(ctx, accountpb.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + localizer := locale.NewLocalizer(locale.NewLocale(locale.JaJP)) + status, err := validateGetWebhookRequest(req, localizer) + if err != nil { + return nil, s.reportInternalServerError(ctx, err, req.EnvironmentNamespace, localizer) + } + if status != nil { + s.logger.Error( + "Failed to validate webhook get request", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(status.Err()), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, status.Err() + } + secret, err := s.generateWebhookSecret(ctx, req.Id, req.EnvironmentNamespace) + if err != nil { + return nil, s.reportInternalServerError(ctx, err, req.EnvironmentNamespace, localizer) + } + webhookStorage := v2as.NewWebhookStorage(s.mysqlClient) + webhook, err := webhookStorage.GetWebhook(ctx, req.Id, req.EnvironmentNamespace) + if err != nil { + s.logger.Error( + "Failed to get webhook", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", req.Id), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + if err == v2as.ErrWebhookNotFound { + dt, err := statusWebhookNotFound.WithDetails(&errdetails.LocalizedMessage{ + Locale: localizer.GetLocale(), + Message: localizer.MustLocalizeWithTemplate(locale.NotFoundError, "webhook"), + }) + if err != nil { + return nil, statusInternal.Err() + } + return nil, dt.Err() + } + return nil, s.reportInternalServerError(ctx, err, req.EnvironmentNamespace, localizer) + } + return &autoopspb.GetWebhookResponse{ + Webhook: webhook.Webhook, + Url: s.createWebhookURL(secret), + }, nil +} + +func validateGetWebhookRequest( + req *autoopspb.GetWebhookRequest, + localizer locale.Localizer, +) (*status.Status, error) { + if req.Id == "" { + return statusInvalidRequest.WithDetails(&errdetails.LocalizedMessage{ + Locale: localizer.GetLocale(), + Message: localizer.MustLocalizeWithTemplate(locale.RequiredFieldTemplate, "id"), + }) + } + return nil, nil +} + +func (s *AutoOpsService) ListWebhooks( + ctx context.Context, + req *autoopspb.ListWebhooksRequest, +) (*autoopspb.ListWebhooksResponse, error) { + _, err := s.checkRole(ctx, accountpb.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + localizer := locale.NewLocalizer(locale.NewLocale(locale.JaJP)) + whereParts := []mysql.WherePart{ + mysql.NewFilter("environment_namespace", "=", req.EnvironmentNamespace), + } + if req.SearchKeyword != "" { + whereParts = append(whereParts, mysql.NewSearchQuery([]string{"id", "name", "description"}, req.SearchKeyword)) + } + orders, err := s.newListOrders(req.OrderBy, req.OrderDirection, localizer) + if err != nil { + s.logger.Error( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + limit := int(req.PageSize) + cursor := req.Cursor + if cursor == "" { + cursor = "0" + } + offset, err := strconv.Atoi(cursor) + if err != nil { + dt, err := statusInvalidCursor.WithDetails(&errdetails.LocalizedMessage{ + Locale: localizer.GetLocale(), + Message: localizer.MustLocalizeWithTemplate(locale.InvalidArgumentError, "cursor"), + }) + if err != nil { + return nil, statusInternal.Err() + } + return nil, dt.Err() + } + webhookStorage := v2as.NewWebhookStorage(s.mysqlClient) + webhooks, nextCursor, totalCount, err := webhookStorage.ListWebhooks( + ctx, + whereParts, + orders, + limit, + offset, + ) + if err != nil { + s.logger.Error( + "Failed to list webhooks", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + return &autoopspb.ListWebhooksResponse{ + Webhooks: webhooks, + Cursor: strconv.Itoa(nextCursor), + TotalCount: totalCount, + }, nil +} + +func (s *AutoOpsService) newListOrders( + orderBy autoopspb.ListWebhooksRequest_OrderBy, + orderDirection autoopspb.ListWebhooksRequest_OrderDirection, + localizer locale.Localizer, +) ([]*mysql.Order, error) { + var column string + switch orderBy { + case autoopspb.ListWebhooksRequest_DEFAULT, + autoopspb.ListWebhooksRequest_NAME: + column = "webhook.name" + case autoopspb.ListWebhooksRequest_CREATED_AT: + column = "webhook.created_at" + case autoopspb.ListWebhooksRequest_UPDATED_AT: + column = "webhook.updated_at" + default: + dt, err := statusInvalidOrderBy.WithDetails(&errdetails.LocalizedMessage{ + Locale: localizer.GetLocale(), + Message: localizer.MustLocalizeWithTemplate(locale.InvalidArgumentError, "order_by"), + }) + if err != nil { + return nil, statusInternal.Err() + } + return nil, dt.Err() + } + direction := mysql.OrderDirectionAsc + if orderDirection == autoopspb.ListWebhooksRequest_DESC { + direction = mysql.OrderDirectionDesc + } + return []*mysql.Order{mysql.NewOrder(column, direction)}, nil +} + +func (s *AutoOpsService) UpdateWebhook( + ctx context.Context, + req *autoopspb.UpdateWebhookRequest, +) (*autoopspb.UpdateWebhookResponse, error) { + editor, err := s.checkRole(ctx, accountpb.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + localizer := locale.NewLocalizer(locale.NewLocale(locale.JaJP)) + status, err := validateUpdateWebhookRequest(req, localizer) + if err != nil { + return nil, s.reportInternalServerError(ctx, err, req.EnvironmentNamespace, localizer) + } + if status != nil { + s.logger.Error( + "Failed to validate webhook update request", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(status.Err()), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, status.Err() + } + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, s.reportInternalServerError(ctx, err, req.EnvironmentNamespace, localizer) + } + var commands []command.Command + if req.ChangeWebhookDescriptionCommand != nil { + commands = append(commands, req.ChangeWebhookDescriptionCommand) + } + if req.ChangeWebhookNameCommand != nil { + commands = append(commands, req.ChangeWebhookNameCommand) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + webhookStorage := v2as.NewWebhookStorage(tx) + webhook, err := webhookStorage.GetWebhook(ctx, req.Id, req.EnvironmentNamespace) + if err != nil { + return err + } + handler := command.NewWebhookCommandHandler(editor, s.publisher, webhook, req.EnvironmentNamespace) + for _, command := range commands { + if err := handler.Handle(ctx, command); err != nil { + return err + } + } + return webhookStorage.UpdateWebhook(ctx, webhook, req.EnvironmentNamespace) + }) + if err != nil { + if err == v2as.ErrWebhookNotFound || err == v2as.ErrAutoOpsRuleUnexpectedAffectedRows { + dt, err := statusWebhookNotFound.WithDetails(&errdetails.LocalizedMessage{ + Locale: localizer.GetLocale(), + Message: localizer.MustLocalizeWithTemplate(locale.NotFoundError, "webhook"), + }) + if err != nil { + return nil, statusInternal.Err() + } + return nil, dt.Err() + } + s.logger.Error( + "Failed to update webhook", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, s.reportInternalServerError(ctx, err, req.EnvironmentNamespace, localizer) + } + return &autoopspb.UpdateWebhookResponse{}, nil +} + +func validateUpdateWebhookRequest( + req *autoopspb.UpdateWebhookRequest, + localizer locale.Localizer, +) (*status.Status, error) { + if req.Id == "" { + return statusInvalidRequest.WithDetails(&errdetails.LocalizedMessage{ + Locale: localizer.GetLocale(), + Message: localizer.MustLocalizeWithTemplate(locale.RequiredFieldTemplate, "id"), + }) + } + if req.ChangeWebhookNameCommand == nil && req.ChangeWebhookDescriptionCommand == nil { + return statusInvalidRequest.WithDetails(&errdetails.LocalizedMessage{ + Locale: localizer.GetLocale(), + Message: localizer.MustLocalizeWithTemplate(locale.RequiredFieldTemplate, "command"), + }) + } + if req.ChangeWebhookNameCommand != nil && req.ChangeWebhookNameCommand.Name == "" { + return statusInvalidRequest.WithDetails(&errdetails.LocalizedMessage{ + Locale: localizer.GetLocale(), + Message: localizer.MustLocalizeWithTemplate(locale.RequiredFieldTemplate, "webhook name"), + }) + } + return nil, nil +} + +func (s *AutoOpsService) DeleteWebhook( + ctx context.Context, + req *autoopspb.DeleteWebhookRequest, +) (*autoopspb.DeleteWebhookResponse, error) { + editor, err := s.checkRole(ctx, accountpb.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + localizer := locale.NewLocalizer(locale.NewLocale(locale.JaJP)) + status, err := validateDeleteWebhookRequest(req, localizer) + if err != nil { + return nil, s.reportInternalServerError(ctx, err, req.EnvironmentNamespace, localizer) + } + if status != nil { + s.logger.Error( + "Failed to validate webhook delete request", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(status.Err()), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, status.Err() + } + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, s.reportInternalServerError(ctx, err, req.EnvironmentNamespace, localizer) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + webhookStorage := v2as.NewWebhookStorage(tx) + webhook, err := webhookStorage.GetWebhook(ctx, req.Id, req.EnvironmentNamespace) + if err != nil { + return err + } + handler := command.NewWebhookCommandHandler(editor, s.publisher, webhook, req.EnvironmentNamespace) + if err := handler.Handle(ctx, req.Command); err != nil { + return err + } + return webhookStorage.DeleteWebhook(ctx, req.Id, req.EnvironmentNamespace) + }) + if err != nil { + if err == v2as.ErrWebhookNotFound || err == v2as.ErrAutoOpsRuleUnexpectedAffectedRows { + dt, err := statusWebhookNotFound.WithDetails(&errdetails.LocalizedMessage{ + Locale: localizer.GetLocale(), + Message: localizer.MustLocalizeWithTemplate(locale.NotFoundError, "webhook"), + }) + if err != nil { + return nil, statusInternal.Err() + } + return nil, dt.Err() + } + s.logger.Error( + "Failed to delete webhook", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, s.reportInternalServerError(ctx, err, req.EnvironmentNamespace, localizer) + } + return &autoopspb.DeleteWebhookResponse{}, nil +} + +func validateDeleteWebhookRequest( + req *autoopspb.DeleteWebhookRequest, + localizer locale.Localizer, +) (*status.Status, error) { + if req.Id == "" { + return statusInvalidRequest.WithDetails(&errdetails.LocalizedMessage{ + Locale: localizer.GetLocale(), + Message: localizer.MustLocalizeWithTemplate(locale.RequiredFieldTemplate, "id"), + }) + } + if req.Command == nil { + return statusInvalidRequest.WithDetails(&errdetails.LocalizedMessage{ + Locale: localizer.GetLocale(), + Message: localizer.MustLocalizeWithTemplate(locale.RequiredFieldTemplate, "command"), + }) + } + return nil, nil +} + +func (s *AutoOpsService) createWebhookURL(secret string) string { + url := s.webhookBaseURL + q := url.Query() + q.Set(webhookAuthKey, secret) + url.RawQuery = q.Encode() + return url.String() +} diff --git a/pkg/autoops/api/webhook_test.go b/pkg/autoops/api/webhook_test.go new file mode 100644 index 000000000..7f1489ec5 --- /dev/null +++ b/pkg/autoops/api/webhook_test.go @@ -0,0 +1,441 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/locale" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + autoopspb "github.com/bucketeer-io/bucketeer/proto/autoops" +) + +func TestCreateWebhook(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + localizer := locale.NewLocalizer(locale.NewLocale(locale.JaJP)) + createError := func(msg string) error { + st, err := statusInvalidRequest.WithDetails(&errdetails.LocalizedMessage{ + Locale: localizer.GetLocale(), + Message: msg, + }) + require.NoError(t, err) + return st.Err() + } + baseSetup := func(s *AutoOpsService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + } + + patterns := map[string]struct { + setup func(*AutoOpsService) + req *autoopspb.CreateWebhookRequest + resp *autoopspb.CreateWebhookResponse + expectedErr error + }{ + "err: ErrNoCommand": { + req: &autoopspb.CreateWebhookRequest{}, + expectedErr: createError(localizer.MustLocalizeWithTemplate(locale.RequiredFieldTemplate, "command")), + }, + "err: ErrWebhookNameRequired": { + req: &autoopspb.CreateWebhookRequest{ + Command: &autoopspb.CreateWebhookCommand{}, + }, + expectedErr: createError(localizer.MustLocalizeWithTemplate(locale.RequiredFieldTemplate, "webhook name")), + }, + "success": { + setup: baseSetup, + req: &autoopspb.CreateWebhookRequest{ + Command: &autoopspb.CreateWebhookCommand{ + Name: "name", + Description: "description", + }, + }, + resp: &autoopspb.CreateWebhookResponse{ + Webhook: &autoopspb.Webhook{ + Name: "name", + Description: "description", + }, + Url: "https://bucketeer.io/hook?auth=secret", + }, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := createAutoOpsService(mockController, nil) + if p.setup != nil { + p.setup(s) + } + resp, err := s.CreateWebhook(createContextWithTokenRoleOwner(t), p.req) + if p.resp != nil { + assert.Equal(t, p.resp.Webhook.Name, resp.Webhook.Name) + assert.Equal(t, p.resp.Webhook.Description, resp.Webhook.Description) + } + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestGetWebhook(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + localizer := locale.NewLocalizer(locale.NewLocale(locale.JaJP)) + createError := func(msg string) error { + status, err := statusInvalidRequest.WithDetails(&errdetails.LocalizedMessage{ + Locale: localizer.GetLocale(), + Message: msg, + }) + require.NoError(t, err) + return status.Err() + } + ctx := createContextWithTokenRoleOwner(t) + service := createAutoOpsService(mockController, nil) + + patterns := map[string]struct { + setup func(*AutoOpsService) + req *autoopspb.GetWebhookRequest + res *autoopspb.GetWebhookResponse + expectedErr error + }{ + "err: ErrNoId": { + req: &autoopspb.GetWebhookRequest{ + EnvironmentNamespace: "ns0", + }, + expectedErr: createError(localizer.MustLocalizeWithTemplate(locale.RequiredFieldTemplate, "id")), + }, + "success": { + setup: func(s *AutoOpsService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + req: &autoopspb.GetWebhookRequest{ + Id: "id-0", + EnvironmentNamespace: "ns0", + }, + res: &autoopspb.GetWebhookResponse{ + Webhook: &autoopspb.Webhook{ + Name: "", + Description: "", + }, + Url: "https://bucketeer.io/hook?auth=secret", + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + if p.setup != nil { + p.setup(service) + } + resp, err := service.GetWebhook(ctx, p.req) + if p.res != nil { + assert.Equal(t, p.res.Webhook.Name, resp.Webhook.Name) + assert.Equal(t, p.res.Webhook.Description, resp.Webhook.Name) + } + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestListWebhooks(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + ctx := createContextWithTokenRoleOwner(t) + localizer := locale.NewLocalizer(locale.NewLocale(locale.JaJP)) + createError := func(msg string) error { + status, err := statusInvalidCursor.WithDetails(&errdetails.LocalizedMessage{ + Locale: localizer.GetLocale(), + Message: msg, + }) + require.NoError(t, err) + return status.Err() + } + service := createAutoOpsService(mockController, nil) + + patterns := map[string]struct { + setup func(*AutoOpsService) + req *autoopspb.ListWebhooksRequest + res *autoopspb.ListWebhooksResponse + expectedErr error + }{ + "err: ErrInvalidArgument": { + req: &autoopspb.ListWebhooksRequest{ + EnvironmentNamespace: "ns0", + PageSize: int64(500), + Cursor: "abc", + OrderBy: autoopspb.ListWebhooksRequest_DEFAULT, + OrderDirection: autoopspb.ListWebhooksRequest_ASC, + SearchKeyword: "", + }, + expectedErr: createError(localizer.MustLocalizeWithTemplate(locale.InvalidArgumentError, "cursor")), + }, + "success": { + setup: func(s *AutoOpsService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + req: &autoopspb.ListWebhooksRequest{ + EnvironmentNamespace: "ns0", + PageSize: int64(500), + Cursor: "", + OrderBy: autoopspb.ListWebhooksRequest_DEFAULT, + OrderDirection: autoopspb.ListWebhooksRequest_ASC, + SearchKeyword: "", + }, + res: &autoopspb.ListWebhooksResponse{ + Webhooks: []*autoopspb.Webhook{}, + Cursor: "0", + TotalCount: 0, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + if p.setup != nil { + p.setup(service) + } + resp, err := service.ListWebhooks(ctx, p.req) + assert.Equal(t, p.res, resp) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestUpdateWebhook(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + localizer := locale.NewLocalizer(locale.NewLocale(locale.JaJP)) + createError := func(msg string) error { + status, err := statusInvalidRequest.WithDetails(&errdetails.LocalizedMessage{ + Locale: localizer.GetLocale(), + Message: msg, + }) + require.NoError(t, err) + return status.Err() + } + ctx := createContextWithTokenRoleOwner(t) + service := createAutoOpsService(mockController, nil) + + patterns := map[string]struct { + setup func(*AutoOpsService) + req *autoopspb.UpdateWebhookRequest + res *autoopspb.UpdateWebhookResponse + expectedErr error + }{ + "err: ErrNoId": { + req: &autoopspb.UpdateWebhookRequest{ + EnvironmentNamespace: "ns0", + ChangeWebhookDescriptionCommand: &autoopspb.ChangeWebhookDescriptionCommand{}, + }, + expectedErr: createError(localizer.MustLocalizeWithTemplate(locale.RequiredFieldTemplate, "id")), + }, + "err: ErrNoCommand": { + req: &autoopspb.UpdateWebhookRequest{ + Id: "id-0", + EnvironmentNamespace: "ns0", + }, + expectedErr: createError(localizer.MustLocalizeWithTemplate(locale.RequiredFieldTemplate, "command")), + }, + "err: ErrNoName": { + req: &autoopspb.UpdateWebhookRequest{ + Id: "id-0", + EnvironmentNamespace: "ns0", + ChangeWebhookNameCommand: &autoopspb.ChangeWebhookNameCommand{}, + }, + expectedErr: createError(localizer.MustLocalizeWithTemplate(locale.RequiredFieldTemplate, "webhook name")), + }, + "success": { + setup: func(s *AutoOpsService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &autoopspb.UpdateWebhookRequest{ + Id: "id-0", + EnvironmentNamespace: "ns0", + ChangeWebhookNameCommand: &autoopspb.ChangeWebhookNameCommand{ + Name: "name", + }, + ChangeWebhookDescriptionCommand: &autoopspb.ChangeWebhookDescriptionCommand{}, + }, + res: &autoopspb.UpdateWebhookResponse{}, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + if p.setup != nil { + p.setup(service) + } + resp, err := service.UpdateWebhook(ctx, p.req) + assert.Equal(t, p.res, resp) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestDeleteWebhook(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + localizer := locale.NewLocalizer(locale.NewLocale(locale.JaJP)) + createError := func(msg string, status *status.Status) error { + status, err := status.WithDetails(&errdetails.LocalizedMessage{ + Locale: localizer.GetLocale(), + Message: msg, + }) + require.NoError(t, err) + return status.Err() + } + ctx := createContextWithTokenRoleOwner(t) + service := createAutoOpsService(mockController, nil) + + patterns := map[string]struct { + setup func(*AutoOpsService) + req *autoopspb.DeleteWebhookRequest + res *autoopspb.DeleteWebhookResponse + expectedErr error + }{ + "err: ErrNoId": { + req: &autoopspb.DeleteWebhookRequest{ + EnvironmentNamespace: "ns0", + Command: &autoopspb.DeleteWebhookCommand{}, + }, + expectedErr: createError( + localizer.MustLocalizeWithTemplate(locale.RequiredFieldTemplate, "id"), + statusInvalidRequest, + ), + }, + "err: ErrNoCommand": { + req: &autoopspb.DeleteWebhookRequest{ + Id: "id-0", + EnvironmentNamespace: "ns0", + }, + expectedErr: createError( + localizer.MustLocalizeWithTemplate(locale.RequiredFieldTemplate, "command"), + statusInvalidRequest, + ), + }, + "err: InternalErr": { + setup: func(s *AutoOpsService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(errors.New("Internal error")) + }, + req: &autoopspb.DeleteWebhookRequest{ + Id: "id-0", + EnvironmentNamespace: "ns0", + Command: &autoopspb.DeleteWebhookCommand{}, + }, + expectedErr: createError( + localizer.MustLocalize(locale.InternalServerError), + statusInternal, + ), + }, + "success": { + setup: func(s *AutoOpsService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &autoopspb.DeleteWebhookRequest{ + Id: "id-0", + EnvironmentNamespace: "ns0", + Command: &autoopspb.DeleteWebhookCommand{}, + }, + res: &autoopspb.DeleteWebhookResponse{}, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + if p.setup != nil { + p.setup(service) + } + resp, err := service.DeleteWebhook(ctx, p.req) + assert.Equal(t, p.res, resp) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +type dummyWebhookSecret struct { + WebhookID string `json:"webhook_id"` + EnvironmentNamespace string `json:"environment_namespace"` +} + +func TestGenerateWebhookSecret(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + service := createAutoOpsService(mockController, nil) + ctx := context.TODO() + + testcases := map[string]struct { + id string + environmentNamespace string + }{ + "success": { + id: "id-1", + environmentNamespace: "ns-1", + }, + } + for msg, p := range testcases { + t.Run(msg, func(t *testing.T) { + secret, err := service.generateWebhookSecret(ctx, p.id, p.environmentNamespace) + require.NoError(t, err) + ws := dummyWebhookSecret{} + decoded, err := base64.RawURLEncoding.DecodeString(secret) + require.NoError(t, err) + err = json.Unmarshal(decoded, &ws) + require.NoError(t, err) + assert.Equal(t, p.environmentNamespace, ws.EnvironmentNamespace) + assert.Equal(t, p.id, ws.WebhookID) + }) + } +} diff --git a/pkg/autoops/client/BUILD.bazel b/pkg/autoops/client/BUILD.bazel new file mode 100644 index 000000000..1e4c23b4e --- /dev/null +++ b/pkg/autoops/client/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["client.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/autoops/client", + visibility = ["//visibility:public"], + deps = [ + "//pkg/rpc/client:go_default_library", + "//proto/autoops:go_default_library", + "@org_golang_google_grpc//:go_default_library", + ], +) diff --git a/pkg/autoops/client/client.go b/pkg/autoops/client/client.go new file mode 100644 index 000000000..7197144da --- /dev/null +++ b/pkg/autoops/client/client.go @@ -0,0 +1,50 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package client + +import ( + "google.golang.org/grpc" + + rpcclient "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + proto "github.com/bucketeer-io/bucketeer/proto/autoops" +) + +type Client interface { + proto.AutoOpsServiceClient + Close() +} + +type client struct { + proto.AutoOpsServiceClient + address string + connection *grpc.ClientConn +} + +func NewClient(addr, certPath string, opts ...rpcclient.Option) (Client, error) { + conn, err := rpcclient.NewClientConn(addr, certPath, opts...) + if err != nil { + return nil, err + } + return &client{ + AutoOpsServiceClient: proto.NewAutoOpsServiceClient(conn), + address: addr, + connection: conn, + }, nil +} + +func (c *client) Close() { + c.connection.Close() +} diff --git a/pkg/autoops/client/mock/BUILD.bazel b/pkg/autoops/client/mock/BUILD.bazel new file mode 100644 index 000000000..876b9fb77 --- /dev/null +++ b/pkg/autoops/client/mock/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["client.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/autoops/client/mock", + visibility = ["//visibility:public"], + deps = [ + "//proto/autoops:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@org_golang_google_grpc//:go_default_library", + ], +) diff --git a/pkg/autoops/client/mock/client.go b/pkg/autoops/client/mock/client.go new file mode 100644 index 000000000..883ff1fa0 --- /dev/null +++ b/pkg/autoops/client/mock/client.go @@ -0,0 +1,290 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: client.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + grpc "google.golang.org/grpc" + + autoops "github.com/bucketeer-io/bucketeer/proto/autoops" +) + +// MockClient is a mock of Client interface. +type MockClient struct { + ctrl *gomock.Controller + recorder *MockClientMockRecorder +} + +// MockClientMockRecorder is the mock recorder for MockClient. +type MockClientMockRecorder struct { + mock *MockClient +} + +// NewMockClient creates a new mock instance. +func NewMockClient(ctrl *gomock.Controller) *MockClient { + mock := &MockClient{ctrl: ctrl} + mock.recorder = &MockClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClient) EXPECT() *MockClientMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockClient) Close() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Close") +} + +// Close indicates an expected call of Close. +func (mr *MockClientMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockClient)(nil).Close)) +} + +// CreateAutoOpsRule mocks base method. +func (m *MockClient) CreateAutoOpsRule(ctx context.Context, in *autoops.CreateAutoOpsRuleRequest, opts ...grpc.CallOption) (*autoops.CreateAutoOpsRuleResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateAutoOpsRule", varargs...) + ret0, _ := ret[0].(*autoops.CreateAutoOpsRuleResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateAutoOpsRule indicates an expected call of CreateAutoOpsRule. +func (mr *MockClientMockRecorder) CreateAutoOpsRule(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAutoOpsRule", reflect.TypeOf((*MockClient)(nil).CreateAutoOpsRule), varargs...) +} + +// CreateWebhook mocks base method. +func (m *MockClient) CreateWebhook(ctx context.Context, in *autoops.CreateWebhookRequest, opts ...grpc.CallOption) (*autoops.CreateWebhookResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateWebhook", varargs...) + ret0, _ := ret[0].(*autoops.CreateWebhookResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateWebhook indicates an expected call of CreateWebhook. +func (mr *MockClientMockRecorder) CreateWebhook(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateWebhook", reflect.TypeOf((*MockClient)(nil).CreateWebhook), varargs...) +} + +// DeleteAutoOpsRule mocks base method. +func (m *MockClient) DeleteAutoOpsRule(ctx context.Context, in *autoops.DeleteAutoOpsRuleRequest, opts ...grpc.CallOption) (*autoops.DeleteAutoOpsRuleResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteAutoOpsRule", varargs...) + ret0, _ := ret[0].(*autoops.DeleteAutoOpsRuleResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteAutoOpsRule indicates an expected call of DeleteAutoOpsRule. +func (mr *MockClientMockRecorder) DeleteAutoOpsRule(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAutoOpsRule", reflect.TypeOf((*MockClient)(nil).DeleteAutoOpsRule), varargs...) +} + +// DeleteWebhook mocks base method. +func (m *MockClient) DeleteWebhook(ctx context.Context, in *autoops.DeleteWebhookRequest, opts ...grpc.CallOption) (*autoops.DeleteWebhookResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteWebhook", varargs...) + ret0, _ := ret[0].(*autoops.DeleteWebhookResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteWebhook indicates an expected call of DeleteWebhook. +func (mr *MockClientMockRecorder) DeleteWebhook(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteWebhook", reflect.TypeOf((*MockClient)(nil).DeleteWebhook), varargs...) +} + +// ExecuteAutoOps mocks base method. +func (m *MockClient) ExecuteAutoOps(ctx context.Context, in *autoops.ExecuteAutoOpsRequest, opts ...grpc.CallOption) (*autoops.ExecuteAutoOpsResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ExecuteAutoOps", varargs...) + ret0, _ := ret[0].(*autoops.ExecuteAutoOpsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExecuteAutoOps indicates an expected call of ExecuteAutoOps. +func (mr *MockClientMockRecorder) ExecuteAutoOps(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteAutoOps", reflect.TypeOf((*MockClient)(nil).ExecuteAutoOps), varargs...) +} + +// GetAutoOpsRule mocks base method. +func (m *MockClient) GetAutoOpsRule(ctx context.Context, in *autoops.GetAutoOpsRuleRequest, opts ...grpc.CallOption) (*autoops.GetAutoOpsRuleResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetAutoOpsRule", varargs...) + ret0, _ := ret[0].(*autoops.GetAutoOpsRuleResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAutoOpsRule indicates an expected call of GetAutoOpsRule. +func (mr *MockClientMockRecorder) GetAutoOpsRule(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAutoOpsRule", reflect.TypeOf((*MockClient)(nil).GetAutoOpsRule), varargs...) +} + +// GetWebhook mocks base method. +func (m *MockClient) GetWebhook(ctx context.Context, in *autoops.GetWebhookRequest, opts ...grpc.CallOption) (*autoops.GetWebhookResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetWebhook", varargs...) + ret0, _ := ret[0].(*autoops.GetWebhookResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetWebhook indicates an expected call of GetWebhook. +func (mr *MockClientMockRecorder) GetWebhook(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWebhook", reflect.TypeOf((*MockClient)(nil).GetWebhook), varargs...) +} + +// ListAutoOpsRules mocks base method. +func (m *MockClient) ListAutoOpsRules(ctx context.Context, in *autoops.ListAutoOpsRulesRequest, opts ...grpc.CallOption) (*autoops.ListAutoOpsRulesResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListAutoOpsRules", varargs...) + ret0, _ := ret[0].(*autoops.ListAutoOpsRulesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListAutoOpsRules indicates an expected call of ListAutoOpsRules. +func (mr *MockClientMockRecorder) ListAutoOpsRules(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAutoOpsRules", reflect.TypeOf((*MockClient)(nil).ListAutoOpsRules), varargs...) +} + +// ListOpsCounts mocks base method. +func (m *MockClient) ListOpsCounts(ctx context.Context, in *autoops.ListOpsCountsRequest, opts ...grpc.CallOption) (*autoops.ListOpsCountsResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListOpsCounts", varargs...) + ret0, _ := ret[0].(*autoops.ListOpsCountsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListOpsCounts indicates an expected call of ListOpsCounts. +func (mr *MockClientMockRecorder) ListOpsCounts(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListOpsCounts", reflect.TypeOf((*MockClient)(nil).ListOpsCounts), varargs...) +} + +// ListWebhooks mocks base method. +func (m *MockClient) ListWebhooks(ctx context.Context, in *autoops.ListWebhooksRequest, opts ...grpc.CallOption) (*autoops.ListWebhooksResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListWebhooks", varargs...) + ret0, _ := ret[0].(*autoops.ListWebhooksResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListWebhooks indicates an expected call of ListWebhooks. +func (mr *MockClientMockRecorder) ListWebhooks(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListWebhooks", reflect.TypeOf((*MockClient)(nil).ListWebhooks), varargs...) +} + +// UpdateAutoOpsRule mocks base method. +func (m *MockClient) UpdateAutoOpsRule(ctx context.Context, in *autoops.UpdateAutoOpsRuleRequest, opts ...grpc.CallOption) (*autoops.UpdateAutoOpsRuleResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateAutoOpsRule", varargs...) + ret0, _ := ret[0].(*autoops.UpdateAutoOpsRuleResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateAutoOpsRule indicates an expected call of UpdateAutoOpsRule. +func (mr *MockClientMockRecorder) UpdateAutoOpsRule(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAutoOpsRule", reflect.TypeOf((*MockClient)(nil).UpdateAutoOpsRule), varargs...) +} + +// UpdateWebhook mocks base method. +func (m *MockClient) UpdateWebhook(ctx context.Context, in *autoops.UpdateWebhookRequest, opts ...grpc.CallOption) (*autoops.UpdateWebhookResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateWebhook", varargs...) + ret0, _ := ret[0].(*autoops.UpdateWebhookResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateWebhook indicates an expected call of UpdateWebhook. +func (mr *MockClientMockRecorder) UpdateWebhook(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateWebhook", reflect.TypeOf((*MockClient)(nil).UpdateWebhook), varargs...) +} diff --git a/pkg/autoops/cmd/server/BUILD.bazel b/pkg/autoops/cmd/server/BUILD.bazel new file mode 100644 index 000000000..51f658f91 --- /dev/null +++ b/pkg/autoops/cmd/server/BUILD.bazel @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["server.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/autoops/cmd/server", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/client:go_default_library", + "//pkg/auth/client:go_default_library", + "//pkg/autoops/api:go_default_library", + "//pkg/autoops/webhookhandler:go_default_library", + "//pkg/cli:go_default_library", + "//pkg/crypto:go_default_library", + "//pkg/experiment/client:go_default_library", + "//pkg/feature/client:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/rpc/client:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/token:go_default_library", + "@com_google_cloud_go_kms//apiv1:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/autoops/cmd/server/server.go b/pkg/autoops/cmd/server/server.go new file mode 100644 index 000000000..cddf1cd1f --- /dev/null +++ b/pkg/autoops/cmd/server/server.go @@ -0,0 +1,298 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "net/url" + "os" + "path" + "time" + + kms "cloud.google.com/go/kms/apiv1" + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + authclient "github.com/bucketeer-io/bucketeer/pkg/auth/client" + "github.com/bucketeer-io/bucketeer/pkg/autoops/api" + "github.com/bucketeer-io/bucketeer/pkg/autoops/webhookhandler" + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/crypto" + experimentclient "github.com/bucketeer-io/bucketeer/pkg/experiment/client" + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/token" +) + +const ( + command = "server" + webhookPath = "hook" +) + +type server struct { + *kingpin.CmdClause + port *int + project *string + mysqlUser *string + mysqlPass *string + mysqlHost *string + mysqlPort *int + mysqlDBName *string + domainEventTopic *string + accountService *string + authService *string + featureService *string + experimentService *string + certPath *string + keyPath *string + serviceTokenPath *string + + oauthKeyPath *string + oauthClientID *string + oauthIssuer *string + + webhookBaseURL *string + webhookKMSResourceName *string +} + +func RegisterServerCommand(r cli.CommandRegistry, p cli.ParentCommand) cli.Command { + cmd := p.Command(command, "Start the gRPC server") + server := &server{ + CmdClause: cmd, + port: cmd.Flag("port", "Port to bind to.").Default("9090").Int(), + project: cmd.Flag("project", "Google Cloud project name.").Required().String(), + mysqlUser: cmd.Flag("mysql-user", "MySQL user.").Required().String(), + mysqlPass: cmd.Flag("mysql-pass", "MySQL password.").Required().String(), + mysqlHost: cmd.Flag("mysql-host", "MySQL host.").Required().String(), + mysqlPort: cmd.Flag("mysql-port", "MySQL port.").Required().Int(), + mysqlDBName: cmd.Flag("mysql-db-name", "MySQL database name.").Required().String(), + domainEventTopic: cmd.Flag("domain-event-topic", "PubSub topic to publish domain events.").Required().String(), + accountService: cmd.Flag("account-service", "bucketeer-account-service address.").Default("account:9090").String(), + authService: cmd.Flag("auth-service", "bucketeer-auth-service address.").Default("auth:9090").String(), + featureService: cmd.Flag("feature-service", "bucketeer-feature-service address.").Default("feature:9090").String(), + experimentService: cmd.Flag( + "experiment-service", + "bucketeer-experiment-service address.", + ).Default("experiment:9090").String(), + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + keyPath: cmd.Flag("key", "Path to TLS key.").Required().String(), + serviceTokenPath: cmd.Flag("service-token", "Path to service token.").Required().String(), + oauthKeyPath: cmd.Flag("oauth-key", "Path to public key used to verify oauth token.").Required().String(), + oauthClientID: cmd.Flag("oauth-client-id", "The oauth clientID registered at dex.").Required().String(), + oauthIssuer: cmd.Flag("oauth-issuer", "The url of dex issuer.").Required().String(), + webhookBaseURL: cmd.Flag("webhook-base-url", "the base url for incoming webhooks.").Required().String(), + webhookKMSResourceName: cmd.Flag( + "webhook-kms-resource-name", + "Cloud KMS resource name to encrypt and decrypt webhook credentials.", + ).Required().String(), + } + r.RegisterCommand(server) + return server +} + +func (s *server) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + registerer := metrics.DefaultRegisterer() + + *s.serviceTokenPath = s.insertTelepresenceMoutRoot(*s.serviceTokenPath) + *s.oauthKeyPath = s.insertTelepresenceMoutRoot(*s.oauthKeyPath) + *s.keyPath = s.insertTelepresenceMoutRoot(*s.keyPath) + *s.certPath = s.insertTelepresenceMoutRoot(*s.certPath) + + mysqlClient, err := s.createMySQLClient(ctx, registerer, logger) + if err != nil { + return err + } + defer mysqlClient.Close() + + publisher, err := s.createDomainEventPublisher(ctx, registerer, logger) + if err != nil { + return err + } + defer publisher.Stop() + + creds, err := client.NewPerRPCCredentials(*s.serviceTokenPath) + if err != nil { + return err + } + + featureClient, err := featureclient.NewClient(*s.featureService, *s.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer featureClient.Close() + + experimentClient, err := experimentclient.NewClient(*s.experimentService, *s.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer experimentClient.Close() + + accountClient, err := accountclient.NewClient(*s.accountService, *s.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer accountClient.Close() + + authClient, err := authclient.NewClient(*s.authService, *s.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer authClient.Close() + + u, err := url.Parse(*s.webhookBaseURL) + if err != nil { + return err + } + u.Path = path.Join(u.Path, webhookPath) + + kmsClient, err := kms.NewKeyManagementClient(ctx) + if err != nil { + return err + } + webhookCryptoUtil := crypto.NewCloudKMSCrypto( + kmsClient, + *s.webhookKMSResourceName, + ) + service := api.NewAutoOpsService( + mysqlClient, + featureClient, + experimentClient, + accountClient, + authClient, + publisher, + u, + webhookCryptoUtil, + api.WithLogger(logger), + ) + + verifier, err := token.NewVerifier(*s.oauthKeyPath, *s.oauthIssuer, *s.oauthClientID) + if err != nil { + return err + } + + healthChecker := health.NewGrpcChecker( + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + ) + go healthChecker.Run(ctx) + + webhookHandler, err := webhookhandler.NewHandler( + mysqlClient, + authClient, + featureClient, + publisher, + verifier, + *s.serviceTokenPath, + webhookCryptoUtil, + webhookhandler.WithLogger(logger), + ) + if err != nil { + return err + } + + server := rpc.NewServer(service, *s.certPath, *s.keyPath, + rpc.WithPort(*s.port), + rpc.WithVerifier(verifier), + rpc.WithMetrics(registerer), + rpc.WithLogger(logger), + rpc.WithService(healthChecker), + rpc.WithHandler("/health", healthChecker), + rpc.WithHandler("/hook", webhookHandler), + ) + defer server.Stop(10 * time.Second) + go server.Run() + + <-ctx.Done() + return nil +} + +func (s *server) createMySQLClient( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (mysql.Client, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + return mysql.NewClient( + ctx, + *s.mysqlUser, *s.mysqlPass, *s.mysqlHost, + *s.mysqlPort, + *s.mysqlDBName, + mysql.WithLogger(logger), + mysql.WithMetrics(registerer), + ) +} + +func (s *server) createDomainEventPublisher( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (publisher.Publisher, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + client, err := pubsub.NewClient( + ctx, + *s.project, + pubsub.WithMetrics(registerer), + pubsub.WithLogger(logger), + ) + if err != nil { + return nil, err + } + domainPublisher, err := client.CreatePublisher(*s.domainEventTopic) + if err != nil { + return nil, err + } + return domainPublisher, nil +} + +func (s *server) insertTelepresenceMoutRoot(path string) string { + volumeRoot := os.Getenv("TELEPRESENCE_ROOT") + if volumeRoot == "" { + return path + } + return volumeRoot + path +} diff --git a/pkg/autoops/command/BUILD.bazel b/pkg/autoops/command/BUILD.bazel new file mode 100644 index 000000000..d85316f47 --- /dev/null +++ b/pkg/autoops/command/BUILD.bazel @@ -0,0 +1,37 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "auto_ops_rule.go", + "command.go", + "webhook.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/autoops/command", + visibility = ["//visibility:public"], + deps = [ + "//pkg/autoops/domain:go_default_library", + "//pkg/domainevent/domain:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//proto/autoops:go_default_library", + "//proto/event/domain:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["auto_ops_rule_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/autoops/domain:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/pubsub/publisher/mock:go_default_library", + "//proto/account:go_default_library", + "//proto/autoops:go_default_library", + "//proto/event/domain:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + ], +) diff --git a/pkg/autoops/command/auto_ops_rule.go b/pkg/autoops/command/auto_ops_rule.go new file mode 100644 index 000000000..c14e545a9 --- /dev/null +++ b/pkg/autoops/command/auto_ops_rule.go @@ -0,0 +1,219 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + + pb "github.com/golang/protobuf/proto" // nolint:staticcheck + + "github.com/bucketeer-io/bucketeer/pkg/autoops/domain" + domainevent "github.com/bucketeer-io/bucketeer/pkg/domainevent/domain" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + proto "github.com/bucketeer-io/bucketeer/proto/autoops" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +type autoOpsRuleCommandHandler struct { + editor *eventproto.Editor + autoOpsRule *domain.AutoOpsRule + publisher publisher.Publisher + environmentNamespace string +} + +func NewAutoOpsCommandHandler( + editor *eventproto.Editor, + autoOpsRule *domain.AutoOpsRule, + p publisher.Publisher, + environmentNamespace string, +) Handler { + return &autoOpsRuleCommandHandler{ + editor: editor, + autoOpsRule: autoOpsRule, + publisher: p, + environmentNamespace: environmentNamespace, + } +} + +func (h *autoOpsRuleCommandHandler) Handle(ctx context.Context, cmd Command) error { + switch c := cmd.(type) { + case *proto.CreateAutoOpsRuleCommand: + return h.create(ctx, c) + case *proto.ChangeAutoOpsRuleOpsTypeCommand: + return h.changeOpsType(ctx, c) + case *proto.DeleteAutoOpsRuleCommand: + return h.delete(ctx, c) + case *proto.ChangeAutoOpsRuleTriggeredAtCommand: + return h.changeTriggeredAt(ctx, c) + case *proto.AddOpsEventRateClauseCommand: + return h.addOpsEventRateClause(ctx, c) + case *proto.ChangeOpsEventRateClauseCommand: + return h.changeOpsEventRateClause(ctx, c) + case *proto.DeleteClauseCommand: + return h.deleteClause(ctx, c) + case *proto.AddDatetimeClauseCommand: + return h.addDatetimeClause(ctx, c) + case *proto.ChangeDatetimeClauseCommand: + return h.changeDatetimeClause(ctx, c) + case *proto.AddWebhookClauseCommand: + return h.addWebhookClause(ctx, c) + case *proto.ChangeWebhookClauseCommand: + return h.changeWebhookClause(ctx, c) + } + return errUnknownCommand +} + +func (h *autoOpsRuleCommandHandler) create(ctx context.Context, cmd *proto.CreateAutoOpsRuleCommand) error { + return h.send(ctx, eventproto.Event_AUTOOPS_RULE_CREATED, &eventproto.AutoOpsRuleCreatedEvent{ + FeatureId: h.autoOpsRule.FeatureId, + OpsType: h.autoOpsRule.OpsType, + Clauses: h.autoOpsRule.Clauses, + TriggeredAt: h.autoOpsRule.TriggeredAt, + CreatedAt: h.autoOpsRule.CreatedAt, + UpdatedAt: h.autoOpsRule.UpdatedAt, + }) +} + +func (h *autoOpsRuleCommandHandler) changeOpsType( + ctx context.Context, + cmd *proto.ChangeAutoOpsRuleOpsTypeCommand, +) error { + h.autoOpsRule.SetOpsType(cmd.OpsType) + return h.send(ctx, eventproto.Event_AUTOOPS_RULE_OPS_TYPE_CHANGED, &eventproto.AutoOpsRuleOpsTypeChangedEvent{ + OpsType: h.autoOpsRule.OpsType, + }) +} + +func (h *autoOpsRuleCommandHandler) delete(ctx context.Context, cmd *proto.DeleteAutoOpsRuleCommand) error { + h.autoOpsRule.SetDeleted() + return h.send(ctx, eventproto.Event_AUTOOPS_RULE_DELETED, &eventproto.AutoOpsRuleDeletedEvent{}) +} + +func (h *autoOpsRuleCommandHandler) changeTriggeredAt( + ctx context.Context, + cmd *proto.ChangeAutoOpsRuleTriggeredAtCommand, +) error { + h.autoOpsRule.SetTriggeredAt() + return h.send( + ctx, + eventproto.Event_AUTOOPS_RULE_TRIGGERED_AT_CHANGED, + &eventproto.AutoOpsRuleTriggeredAtChangedEvent{}, + ) +} + +func (h *autoOpsRuleCommandHandler) addOpsEventRateClause( + ctx context.Context, + cmd *proto.AddOpsEventRateClauseCommand, +) error { + clause, err := h.autoOpsRule.AddOpsEventRateClause(cmd.OpsEventRateClause) + if err != nil { + return err + } + return h.send(ctx, eventproto.Event_OPS_EVENT_RATE_CLAUSE_ADDED, &eventproto.OpsEventRateClauseAddedEvent{ + ClauseId: clause.Id, + OpsEventRateClause: cmd.OpsEventRateClause, + }) +} + +func (h *autoOpsRuleCommandHandler) changeOpsEventRateClause( + ctx context.Context, + cmd *proto.ChangeOpsEventRateClauseCommand, +) error { + if err := h.autoOpsRule.ChangeOpsEventRateClause(cmd.Id, cmd.OpsEventRateClause); err != nil { + return err + } + return h.send(ctx, eventproto.Event_OPS_EVENT_RATE_CLAUSE_CHANGED, &eventproto.OpsEventRateClauseChangedEvent{ + ClauseId: cmd.Id, + OpsEventRateClause: cmd.OpsEventRateClause, + }) +} + +func (h *autoOpsRuleCommandHandler) deleteClause(ctx context.Context, cmd *proto.DeleteClauseCommand) error { + if err := h.autoOpsRule.DeleteClause(cmd.Id); err != nil { + return err + } + return h.send(ctx, eventproto.Event_AUTOOPS_RULE_CLAUSE_DELETED, &eventproto.AutoOpsRuleClauseDeletedEvent{ + ClauseId: cmd.Id, + }) +} + +func (h *autoOpsRuleCommandHandler) addDatetimeClause(ctx context.Context, cmd *proto.AddDatetimeClauseCommand) error { + clause, err := h.autoOpsRule.AddDatetimeClause(cmd.DatetimeClause) + if err != nil { + return err + } + return h.send(ctx, eventproto.Event_DATETIME_CLAUSE_ADDED, &eventproto.DatetimeClauseAddedEvent{ + ClauseId: clause.Id, + DatetimeClause: cmd.DatetimeClause, + }) +} + +func (h *autoOpsRuleCommandHandler) changeDatetimeClause( + ctx context.Context, + cmd *proto.ChangeDatetimeClauseCommand, +) error { + if err := h.autoOpsRule.ChangeDatetimeClause(cmd.Id, cmd.DatetimeClause); err != nil { + return err + } + return h.send(ctx, eventproto.Event_DATETIME_CLAUSE_CHANGED, &eventproto.DatetimeClauseChangedEvent{ + ClauseId: cmd.Id, + DatetimeClause: cmd.DatetimeClause, + }) +} + +func (h *autoOpsRuleCommandHandler) addWebhookClause( + ctx context.Context, + cmd *proto.AddWebhookClauseCommand, +) error { + clause, err := h.autoOpsRule.AddWebhookClause(cmd.WebhookClause) + if err != nil { + return err + } + return h.send(ctx, eventproto.Event_WEBHOOK_CLAUSE_ADDED, &eventproto.WebhookClauseAddedEvent{ + ClauseId: clause.Id, + WebhookClause: cmd.WebhookClause, + }) +} + +func (h *autoOpsRuleCommandHandler) changeWebhookClause( + ctx context.Context, + cmd *proto.ChangeWebhookClauseCommand, +) error { + if err := h.autoOpsRule.ChangeWebhookClause(cmd.Id, cmd.WebhookClause); err != nil { + return err + } + return h.send(ctx, eventproto.Event_WEBHOOK_CLAUSE_CHANGED, &eventproto.WebhookClauseChangedEvent{ + ClauseId: cmd.Id, + WebhookClause: cmd.WebhookClause, + }) +} + +func (h *autoOpsRuleCommandHandler) send(ctx context.Context, eventType eventproto.Event_Type, event pb.Message) error { + e, err := domainevent.NewEvent( + h.editor, + eventproto.Event_AUTOOPS_RULE, + h.autoOpsRule.Id, + eventType, + event, + h.environmentNamespace, + ) + if err != nil { + return err + } + if err := h.publisher.Publish(ctx, e); err != nil { + return err + } + return nil +} diff --git a/pkg/autoops/command/auto_ops_rule_test.go b/pkg/autoops/command/auto_ops_rule_test.go new file mode 100644 index 000000000..024b01b33 --- /dev/null +++ b/pkg/autoops/command/auto_ops_rule_test.go @@ -0,0 +1,318 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + domain "github.com/bucketeer-io/bucketeer/pkg/autoops/domain" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + publishermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher/mock" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + proto "github.com/bucketeer-io/bucketeer/proto/autoops" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +func TestChangeOpsType(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := []*struct { + input proto.OpsType + expected error + }{ + { + input: proto.OpsType_DISABLE_FEATURE, + expected: nil, + }, + } + for _, p := range patterns { + m := publishermock.NewMockPublisher(mockController) + a := newAutoOpsRule(t) + h := newAutoOpsRuleCommandHandler(m, a) + if p.expected == nil { + m.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + } + cmd := &proto.ChangeAutoOpsRuleOpsTypeCommand{OpsType: proto.OpsType_DISABLE_FEATURE} + err := h.Handle(context.Background(), cmd) + assert.Equal(t, p.expected, err) + assert.Equal(t, p.input, a.OpsType) + } +} + +func TestDelete(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := []*struct { + expected error + }{ + { + expected: nil, + }, + } + for _, p := range patterns { + m := publishermock.NewMockPublisher(mockController) + a := newAutoOpsRule(t) + h := newAutoOpsRuleCommandHandler(m, a) + if p.expected == nil { + m.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + } + cmd := &proto.DeleteAutoOpsRuleCommand{} + err := h.Handle(context.Background(), cmd) + assert.Equal(t, p.expected, err) + } +} + +func TestChangeTriggeredAt(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := []*struct { + expected error + }{ + { + expected: nil, + }, + } + for _, p := range patterns { + m := publishermock.NewMockPublisher(mockController) + a := newAutoOpsRule(t) + h := newAutoOpsRuleCommandHandler(m, a) + if p.expected == nil { + m.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + } + cmd := &proto.ChangeAutoOpsRuleTriggeredAtCommand{} + err := h.Handle(context.Background(), cmd) + assert.Equal(t, p.expected, err) + } +} + +func TestAddOpsEventRateClause(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := []*struct { + input *proto.OpsEventRateClause + expected error + }{ + { + input: &proto.OpsEventRateClause{}, + expected: nil, + }, + } + for _, p := range patterns { + m := publishermock.NewMockPublisher(mockController) + a := newAutoOpsRule(t) + l := len(a.Clauses) + h := newAutoOpsRuleCommandHandler(m, a) + if p.expected == nil { + m.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + } + cmd := &proto.AddOpsEventRateClauseCommand{OpsEventRateClause: p.input} + err := h.Handle(context.Background(), cmd) + assert.Equal(t, p.expected, err) + assert.Equal(t, l+1, len(a.Clauses)) + } +} + +func TestChangeOpsEventRateClause(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := []*struct { + input *proto.OpsEventRateClause + expected error + }{ + { + input: &proto.OpsEventRateClause{}, + expected: nil, + }, + } + for _, p := range patterns { + m := publishermock.NewMockPublisher(mockController) + a := newAutoOpsRule(t) + h := newAutoOpsRuleCommandHandler(m, a) + if p.expected == nil { + m.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + } + cmd := &proto.ChangeOpsEventRateClauseCommand{Id: a.Clauses[0].Id, OpsEventRateClause: p.input} + err := h.Handle(context.Background(), cmd) + assert.Equal(t, p.expected, err) + } +} + +func TestDeleteClause(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := []*struct { + expected error + }{ + { + expected: nil, + }, + } + for _, p := range patterns { + m := publishermock.NewMockPublisher(mockController) + a := newAutoOpsRule(t) + l := len(a.Clauses) + h := newAutoOpsRuleCommandHandler(m, a) + if p.expected == nil { + m.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + } + cmd := &proto.DeleteClauseCommand{Id: a.Clauses[0].Id} + err := h.Handle(context.Background(), cmd) + assert.Equal(t, p.expected, err) + assert.Equal(t, l-1, len(a.Clauses)) + } +} + +func TestAddDatetimeClause(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := []*struct { + input *proto.DatetimeClause + expected error + }{ + { + input: &proto.DatetimeClause{}, + expected: nil, + }, + } + for _, p := range patterns { + m := publishermock.NewMockPublisher(mockController) + a := newAutoOpsRule(t) + l := len(a.Clauses) + h := newAutoOpsRuleCommandHandler(m, a) + if p.expected == nil { + m.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + } + cmd := &proto.AddDatetimeClauseCommand{DatetimeClause: p.input} + err := h.Handle(context.Background(), cmd) + assert.Equal(t, p.expected, err) + assert.Equal(t, l+1, len(a.Clauses)) + } +} + +func TestChangeDatetimeClause(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := []*struct { + input *proto.DatetimeClause + expected error + }{ + { + input: &proto.DatetimeClause{}, + expected: nil, + }, + } + for _, p := range patterns { + m := publishermock.NewMockPublisher(mockController) + a := newAutoOpsRule(t) + h := newAutoOpsRuleCommandHandler(m, a) + if p.expected == nil { + m.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + } + cmd := &proto.ChangeDatetimeClauseCommand{Id: a.Clauses[0].Id, DatetimeClause: p.input} + err := h.Handle(context.Background(), cmd) + assert.Equal(t, p.expected, err) + } +} + +func TestAddWebhookClause(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := []*struct { + input *proto.WebhookClause + expected error + }{ + { + input: &proto.WebhookClause{}, + expected: nil, + }, + } + for _, p := range patterns { + m := publishermock.NewMockPublisher(mockController) + a := newAutoOpsRule(t) + h := newAutoOpsRuleCommandHandler(m, a) + if p.expected == nil { + m.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + } + cmd := &proto.AddWebhookClauseCommand{WebhookClause: p.input} + err := h.Handle(context.Background(), cmd) + assert.Equal(t, p.expected, err) + } +} + +func TestChangeWebhookClause(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := []*struct { + input *proto.WebhookClause + expected error + }{ + { + input: &proto.WebhookClause{}, + expected: nil, + }, + } + for _, p := range patterns { + m := publishermock.NewMockPublisher(mockController) + a := newAutoOpsRule(t) + h := newAutoOpsRuleCommandHandler(m, a) + if p.expected == nil { + m.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + } + cmd := &proto.ChangeWebhookClauseCommand{Id: a.Clauses[0].Id, WebhookClause: p.input} + err := h.Handle(context.Background(), cmd) + assert.Equal(t, p.expected, err) + } +} + +func newAutoOpsRule(t *testing.T) *domain.AutoOpsRule { + oerc1 := &proto.OpsEventRateClause{ + GoalId: "gid", + MinCount: 10, + ThreadsholdRate: 0.5, + Operator: proto.OpsEventRateClause_GREATER_OR_EQUAL, + } + oerc2 := &proto.OpsEventRateClause{ + GoalId: "gid", + MinCount: 10, + ThreadsholdRate: 0.5, + Operator: proto.OpsEventRateClause_GREATER_OR_EQUAL, + } + dc1 := &proto.DatetimeClause{ + Time: 1000000001, + } + dc2 := &proto.DatetimeClause{ + Time: 1000000002, + } + aor, err := domain.NewAutoOpsRule("fid", proto.OpsType_ENABLE_FEATURE, []*proto.OpsEventRateClause{oerc1, oerc2}, []*proto.DatetimeClause{dc1, dc2}, []*proto.WebhookClause{}) + require.NoError(t, err) + return aor +} + +func newAutoOpsRuleCommandHandler(publisher publisher.Publisher, autoOpsRule *domain.AutoOpsRule) Handler { + return NewAutoOpsCommandHandler( + &eventproto.Editor{ + Email: "email", + Role: accountproto.Account_EDITOR, + }, + autoOpsRule, + publisher, + "ns0", + ) +} diff --git a/pkg/autoops/command/command.go b/pkg/autoops/command/command.go new file mode 100644 index 000000000..352f9f6d9 --- /dev/null +++ b/pkg/autoops/command/command.go @@ -0,0 +1,30 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + "errors" +) + +var ( + errUnknownCommand = errors.New("command: unknown command") +) + +type Command interface{} + +type Handler interface { + Handle(ctx context.Context, cmd Command) error +} diff --git a/pkg/autoops/command/webhook.go b/pkg/autoops/command/webhook.go new file mode 100644 index 000000000..61f3bc0b4 --- /dev/null +++ b/pkg/autoops/command/webhook.go @@ -0,0 +1,141 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + + "github.com/bucketeer-io/bucketeer/pkg/autoops/domain" + domainevent "github.com/bucketeer-io/bucketeer/pkg/domainevent/domain" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + autoopspb "github.com/bucketeer-io/bucketeer/proto/autoops" + eventpb "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +type webhookCommandHandler struct { + editor *eventpb.Editor + publisher publisher.Publisher + webhook *domain.Webhook + environmentNamespace string +} + +func NewWebhookCommandHandler( + editor *eventpb.Editor, + p publisher.Publisher, + webhook *domain.Webhook, + environmentNamespace string, +) Handler { + return &webhookCommandHandler{ + editor: editor, + publisher: p, + webhook: webhook, + environmentNamespace: environmentNamespace, + } +} + +func (h *webhookCommandHandler) Handle(ctx context.Context, cmd Command) error { + switch c := cmd.(type) { + case *autoopspb.CreateWebhookCommand: + return h.CreateWebhook(ctx, c) + case *autoopspb.DeleteWebhookCommand: + return h.DeleteWebhook(ctx, c) + case *autoopspb.ChangeWebhookNameCommand: + return h.ChangeWebhookName(ctx, c) + case *autoopspb.ChangeWebhookDescriptionCommand: + return h.ChangeWebhookDescription(ctx, c) + default: + return errUnknownCommand + } +} + +func (h *webhookCommandHandler) CreateWebhook(ctx context.Context, cmd *autoopspb.CreateWebhookCommand) error { + event, err := domainevent.NewEvent( + h.editor, + eventpb.Event_WEBHOOK, + h.webhook.Id, + eventpb.Event_WEBHOOK_CREATED, + &eventpb.WebhookCreatedEvent{ + Id: h.webhook.Id, + Name: h.webhook.Name, + Description: h.webhook.Description, + CreatedAt: h.webhook.CreatedAt, + UpdatedAt: h.webhook.UpdatedAt, + }, + h.environmentNamespace, + ) + if err != nil { + return err + } + return h.publisher.Publish(ctx, event) +} + +func (h *webhookCommandHandler) DeleteWebhook(ctx context.Context, cmd *autoopspb.DeleteWebhookCommand) error { + event, err := domainevent.NewEvent( + h.editor, + eventpb.Event_WEBHOOK, + h.webhook.Id, + eventpb.Event_WEBHOOK_DELETED, + &eventpb.WebhookDeletedEvent{ + Id: h.webhook.Id, + }, + h.environmentNamespace, + ) + if err != nil { + return err + } + return h.publisher.Publish(ctx, event) +} + +func (h *webhookCommandHandler) ChangeWebhookName(ctx context.Context, cmd *autoopspb.ChangeWebhookNameCommand) error { + if err := h.webhook.ChangeName(cmd.Name); err != nil { + return err + } + event, err := domainevent.NewEvent( + h.editor, + eventpb.Event_WEBHOOK, + h.webhook.Id, + eventpb.Event_WEBHOOK_NAME_CHANGED, + &eventpb.WebhookNameChangedEvent{ + Id: h.webhook.Id, + Name: cmd.Name, + }, + h.environmentNamespace, + ) + if err != nil { + return err + } + return h.publisher.Publish(ctx, event) +} + +func (h *webhookCommandHandler) ChangeWebhookDescription(ctx context.Context, cmd *autoopspb.ChangeWebhookDescriptionCommand) error { + if err := h.webhook.ChangeDescription(cmd.Description); err != nil { + return err + } + event, err := domainevent.NewEvent( + h.editor, + eventpb.Event_WEBHOOK, + h.webhook.Id, + eventpb.Event_WEBHOOK_DESCRIPTION_CHANGED, + &eventpb.WebhookDescriptionChangedEvent{ + Id: h.webhook.Id, + Description: cmd.Description, + }, + h.environmentNamespace, + ) + if err != nil { + return err + } + return h.publisher.Publish(ctx, event) +} diff --git a/pkg/autoops/domain/BUILD.bazel b/pkg/autoops/domain/BUILD.bazel new file mode 100644 index 000000000..de06c405a --- /dev/null +++ b/pkg/autoops/domain/BUILD.bazel @@ -0,0 +1,32 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "auto_ops_rule.go", + "webhook.go", + "webhook_secret.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/autoops/domain", + visibility = ["//visibility:public"], + deps = [ + "//pkg/uuid:go_default_library", + "//proto/autoops:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@io_bazel_rules_go//proto/wkt:any_go_proto", + ], +) + +go_test( + name = "go_default_test", + srcs = ["auto_ops_rule_test.go"], + embed = [":go_default_library"], + deps = [ + "//proto/autoops:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + ], +) diff --git a/pkg/autoops/domain/auto_ops_rule.go b/pkg/autoops/domain/auto_ops_rule.go new file mode 100644 index 000000000..672f162fc --- /dev/null +++ b/pkg/autoops/domain/auto_ops_rule.go @@ -0,0 +1,268 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "errors" + "time" + + pb "github.com/golang/protobuf/proto" // nolint:staticcheck + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/any" + + "github.com/bucketeer-io/bucketeer/pkg/uuid" + proto "github.com/bucketeer-io/bucketeer/proto/autoops" +) + +var ( + errClauseNotFound = errors.New("autoOpsRule: clause not found") + errClauseEmpty = errors.New("autoOpsRule: clause cannot be empty") + + opsEventRateClause = &proto.OpsEventRateClause{} + datetimeClause = &proto.DatetimeClause{} + webhookClause = &proto.WebhookClause{} +) + +type AutoOpsRule struct { + *proto.AutoOpsRule +} + +func NewAutoOpsRule( + featureID string, + opsType proto.OpsType, + opsEventRateClauses []*proto.OpsEventRateClause, + datetimeClauses []*proto.DatetimeClause, + webhookClauses []*proto.WebhookClause, +) (*AutoOpsRule, error) { + now := time.Now().Unix() + id, err := uuid.NewUUID() + if err != nil { + return nil, err + } + autoOpsRule := &AutoOpsRule{&proto.AutoOpsRule{ + Id: id.String(), + FeatureId: featureID, + OpsType: opsType, + Clauses: []*proto.Clause{}, + CreatedAt: now, + UpdatedAt: now, + }} + for _, c := range opsEventRateClauses { + if _, err := autoOpsRule.AddOpsEventRateClause(c); err != nil { + return nil, err + } + } + for _, c := range datetimeClauses { + if _, err := autoOpsRule.AddDatetimeClause(c); err != nil { + return nil, err + } + } + for _, c := range webhookClauses { + if _, err := autoOpsRule.AddWebhookClause(c); err != nil { + return nil, err + } + } + if len(autoOpsRule.Clauses) == 0 { + return nil, errClauseEmpty + } + return autoOpsRule, nil + +} + +func (a *AutoOpsRule) SetDeleted() { + a.AutoOpsRule.Deleted = true + a.AutoOpsRule.UpdatedAt = time.Now().Unix() +} + +func (a *AutoOpsRule) SetTriggeredAt() { + now := time.Now().Unix() + a.AutoOpsRule.TriggeredAt = now + a.AutoOpsRule.UpdatedAt = now +} + +func (a *AutoOpsRule) AlreadyTriggered() bool { + return a.TriggeredAt > 0 +} + +func (a *AutoOpsRule) SetOpsType(opsType proto.OpsType) { + a.AutoOpsRule.OpsType = opsType + a.AutoOpsRule.UpdatedAt = time.Now().Unix() + a.AutoOpsRule.TriggeredAt = 0 +} + +func (a *AutoOpsRule) AddOpsEventRateClause(oerc *proto.OpsEventRateClause) (*proto.Clause, error) { + ac, err := ptypes.MarshalAny(oerc) + if err != nil { + return nil, err + } + return a.addClause(ac) +} + +func (a *AutoOpsRule) AddDatetimeClause(dc *proto.DatetimeClause) (*proto.Clause, error) { + ac, err := ptypes.MarshalAny(dc) + if err != nil { + return nil, err + } + return a.addClause(ac) +} + +func (a *AutoOpsRule) AddWebhookClause(wc *proto.WebhookClause) (*proto.Clause, error) { + ac, err := ptypes.MarshalAny(wc) + if err != nil { + return nil, err + } + return a.addClause(ac) +} + +func (a *AutoOpsRule) ChangeWebhookClause(id string, wc *proto.WebhookClause) error { + return a.changeClause(id, wc) +} + +func (a *AutoOpsRule) addClause(ac *any.Any) (*proto.Clause, error) { + id, err := uuid.NewUUID() + if err != nil { + return nil, err + } + clause := &proto.Clause{ + Id: id.String(), + Clause: ac, + } + a.AutoOpsRule.Clauses = append(a.AutoOpsRule.Clauses, clause) + a.AutoOpsRule.UpdatedAt = time.Now().Unix() + a.AutoOpsRule.TriggeredAt = 0 + return clause, nil +} + +func (a *AutoOpsRule) ChangeOpsEventRateClause(id string, oerc *proto.OpsEventRateClause) error { + return a.changeClause(id, oerc) +} + +func (a *AutoOpsRule) ChangeDatetimeClause(id string, dc *proto.DatetimeClause) error { + return a.changeClause(id, dc) +} + +func (a *AutoOpsRule) changeClause(id string, mc pb.Message) error { + a.AutoOpsRule.UpdatedAt = time.Now().Unix() + a.AutoOpsRule.TriggeredAt = 0 + for _, c := range a.Clauses { + if c.Id == id { + clause, err := ptypes.MarshalAny(mc) + if err != nil { + return err + } + c.Clause = clause + return nil + } + } + return errClauseNotFound +} + +func (a *AutoOpsRule) DeleteClause(id string) error { + if len(a.Clauses) <= 1 { + return errClauseEmpty + } + a.AutoOpsRule.UpdatedAt = time.Now().Unix() + a.AutoOpsRule.TriggeredAt = 0 + var clauses []*proto.Clause + for i, c := range a.Clauses { + if c.Id == id { + clauses = append(a.Clauses[:i], a.Clauses[i+1:]...) + continue + } + } + if len(clauses) > 0 { + a.Clauses = clauses + return nil + } + return errClauseNotFound +} + +func (a *AutoOpsRule) ExtractOpsEventRateClauses() (map[string]*proto.OpsEventRateClause, error) { + opsEventRateClauses := map[string]*proto.OpsEventRateClause{} + for _, c := range a.Clauses { + opsEventRateClause, err := a.unmarshalOpsEventRateClause(c) + if err != nil { + return nil, err + } + if opsEventRateClause == nil { + continue + } + opsEventRateClauses[c.Id] = opsEventRateClause + } + return opsEventRateClauses, nil +} + +func (a *AutoOpsRule) unmarshalOpsEventRateClause(clause *proto.Clause) (*proto.OpsEventRateClause, error) { + if ptypes.Is(clause.Clause, opsEventRateClause) { + c := &proto.OpsEventRateClause{} + if err := ptypes.UnmarshalAny(clause.Clause, c); err != nil { + return nil, err + } + return c, nil + } + return nil, nil +} + +func (a *AutoOpsRule) ExtractDatetimeClauses() ([]*proto.DatetimeClause, error) { + datetimeClauses := []*proto.DatetimeClause{} + for _, c := range a.Clauses { + datetimeClause, err := a.unmarshalDatetimeClause(c) + if err != nil { + return nil, err + } + if datetimeClause == nil { + continue + } + datetimeClauses = append(datetimeClauses, datetimeClause) + } + return datetimeClauses, nil +} + +func (a *AutoOpsRule) unmarshalDatetimeClause(clause *proto.Clause) (*proto.DatetimeClause, error) { + if ptypes.Is(clause.Clause, datetimeClause) { + c := &proto.DatetimeClause{} + if err := ptypes.UnmarshalAny(clause.Clause, c); err != nil { + return nil, err + } + return c, nil + } + return nil, nil +} + +func (a *AutoOpsRule) ExtractWebhookClauses() ([]*proto.WebhookClause, error) { + webhookClauses := []*proto.WebhookClause{} + for _, c := range a.Clauses { + webhookClause, err := a.unmarshalWebhookClause(c) + if err != nil { + return nil, err + } + if webhookClause == nil { + continue + } + webhookClauses = append(webhookClauses, webhookClause) + } + return webhookClauses, nil +} + +func (a *AutoOpsRule) unmarshalWebhookClause(clause *proto.Clause) (*proto.WebhookClause, error) { + if ptypes.Is(clause.Clause, webhookClause) { + c := &proto.WebhookClause{} + if err := ptypes.UnmarshalAny(clause.Clause, c); err != nil { + return nil, err + } + return c, nil + } + return nil, nil +} diff --git a/pkg/autoops/domain/auto_ops_rule_test.go b/pkg/autoops/domain/auto_ops_rule_test.go new file mode 100644 index 000000000..7e12783dd --- /dev/null +++ b/pkg/autoops/domain/auto_ops_rule_test.go @@ -0,0 +1,296 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + autoopsproto "github.com/bucketeer-io/bucketeer/proto/autoops" +) + +func TestNewAutoOpsRule(t *testing.T) { + t.Parallel() + aor := createAutoOpsRule(t) + assert.IsType(t, &AutoOpsRule{}, aor) + assert.Equal(t, "feature-id", aor.FeatureId) + assert.Equal(t, autoopsproto.OpsType_ENABLE_FEATURE, aor.OpsType) + assert.NotZero(t, aor.Clauses) + assert.Zero(t, aor.TriggeredAt) + assert.NotZero(t, aor.CreatedAt) + assert.NotZero(t, aor.UpdatedAt) +} + +func TestSetDeleted(t *testing.T) { + t.Parallel() + aor := createAutoOpsRule(t) + aor.SetDeleted() + assert.Equal(t, true, aor.Deleted) +} + +func TestSetTriggeredAt(t *testing.T) { + t.Parallel() + aor := createAutoOpsRule(t) + aor.SetTriggeredAt() + assert.NotZero(t, aor.TriggeredAt) +} + +func TestAlreadyTriggeredAt(t *testing.T) { + t.Parallel() + aor := createAutoOpsRule(t) + assert.False(t, aor.AlreadyTriggered()) + aor.SetTriggeredAt() + assert.True(t, aor.AlreadyTriggered()) +} + +func TestSetOpsType(t *testing.T) { + t.Parallel() + aor := createAutoOpsRule(t) + aor.TriggeredAt = 1 + aor.SetOpsType(autoopsproto.OpsType_DISABLE_FEATURE) + assert.Equal(t, autoopsproto.OpsType_DISABLE_FEATURE, aor.OpsType) + assert.Zero(t, aor.TriggeredAt) +} + +func TestAddOpsEventRateClause(t *testing.T) { + t.Parallel() + aor := createAutoOpsRule(t) + aor.TriggeredAt = 1 + l := len(aor.Clauses) + c := &autoopsproto.OpsEventRateClause{ + GoalId: "goalid01", + MinCount: 10, + ThreadsholdRate: 0.5, + Operator: autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL, + } + clause, err := aor.AddOpsEventRateClause(c) + require.NoError(t, err) + assert.NotNil(t, clause) + assert.NotEmpty(t, aor.Clauses[l].Id) + _, err = aor.unmarshalOpsEventRateClause(aor.Clauses[l]) + require.NoError(t, err) + assert.Zero(t, aor.TriggeredAt) +} + +func TestAddDatetimeClause(t *testing.T) { + t.Parallel() + aor := createAutoOpsRule(t) + aor.TriggeredAt = 1 + l := len(aor.Clauses) + c := &autoopsproto.DatetimeClause{ + Time: 1000000001, + } + clause, err := aor.AddDatetimeClause(c) + require.NoError(t, err) + assert.NotNil(t, clause) + assert.NotEmpty(t, aor.Clauses[l].Id) + dc, err := aor.unmarshalDatetimeClause(aor.Clauses[l]) + require.NoError(t, err) + assert.Equal(t, c.Time, dc.Time) + assert.Zero(t, aor.TriggeredAt) +} + +func TestChangeOpsEventRateClause(t *testing.T) { + t.Parallel() + aor := createAutoOpsRule(t) + aor.TriggeredAt = 1 + l := len(aor.Clauses) + c := &autoopsproto.OpsEventRateClause{ + GoalId: "goalid01", + MinCount: 10, + ThreadsholdRate: 0.5, + Operator: autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL, + } + err := aor.ChangeOpsEventRateClause(aor.Clauses[0].Id, c) + require.NoError(t, err) + assert.Equal(t, l, len(aor.Clauses)) + _, err = aor.unmarshalOpsEventRateClause(aor.Clauses[0]) + require.NoError(t, err) + assert.Zero(t, aor.TriggeredAt) +} + +func TestDatetimeClause(t *testing.T) { + t.Parallel() + aor := createAutoOpsRule(t) + aor.TriggeredAt = 1 + l := len(aor.Clauses) + c := &autoopsproto.DatetimeClause{ + Time: 1000000001, + } + err := aor.ChangeDatetimeClause(aor.Clauses[0].Id, c) + require.NoError(t, err) + assert.Equal(t, l, len(aor.Clauses)) + dc, err := aor.unmarshalDatetimeClause(aor.Clauses[0]) + require.NoError(t, err) + assert.Equal(t, c.Time, dc.Time) + assert.Zero(t, aor.TriggeredAt) +} + +func TestDeleteClause(t *testing.T) { + t.Parallel() + aor := createAutoOpsRule(t) + aor.TriggeredAt = 1 + l := len(aor.Clauses) + c := &autoopsproto.OpsEventRateClause{ + GoalId: "goalid01", + MinCount: 10, + ThreadsholdRate: 0.5, + Operator: autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL, + } + _, err := aor.AddOpsEventRateClause(c) + require.NoError(t, err) + err = aor.DeleteClause(aor.Clauses[l].Id) + require.NoError(t, err) + assert.Equal(t, l, len(aor.Clauses)) + assert.Zero(t, aor.TriggeredAt) +} + +func createAutoOpsRule(t *testing.T) *AutoOpsRule { + aor, err := NewAutoOpsRule( + "feature-id", + autoopsproto.OpsType_ENABLE_FEATURE, + []*autoopsproto.OpsEventRateClause{}, + []*autoopsproto.DatetimeClause{ + {Time: 0}, + }, + []*autoopsproto.WebhookClause{}, + ) + require.NoError(t, err) + return aor +} + +func TestExtractOpsEventRateClauses(t *testing.T) { + oerc1 := &autoopsproto.OpsEventRateClause{ + VariationId: "vid1", + GoalId: "gid1", + MinCount: int64(10), + ThreadsholdRate: float64(0.5), + Operator: autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL, + } + c1, err := ptypes.MarshalAny(oerc1) + require.NoError(t, err) + oerc2 := &autoopsproto.OpsEventRateClause{ + VariationId: "vid1", + GoalId: "gid2", + MinCount: int64(10), + ThreadsholdRate: float64(0.5), + Operator: autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL, + } + c2, err := ptypes.MarshalAny(oerc2) + require.NoError(t, err) + dc1 := &autoopsproto.DatetimeClause{ + Time: 1000000001, + } + c3, err := ptypes.MarshalAny(dc1) + require.NoError(t, err) + autoOpsRule := &AutoOpsRule{&autoopsproto.AutoOpsRule{ + Id: "id-0", + FeatureId: "fid-0", + Clauses: []*autoopsproto.Clause{{Id: "c1", Clause: c1}, {Id: "c2", Clause: c2}, {Id: "c3", Clause: c3}}, + }} + expected := map[string]*autoopsproto.OpsEventRateClause{"c1": oerc1, "c2": oerc2} + actual, err := autoOpsRule.ExtractOpsEventRateClauses() + assert.NoError(t, err) + assert.Equal(t, len(expected), len(actual)) + for i, a := range actual { + assert.True(t, proto.Equal(expected[i], a)) + } +} + +func TestExtractDatetimeClauses(t *testing.T) { + dc1 := &autoopsproto.DatetimeClause{ + Time: 1000000001, + } + c1, err := ptypes.MarshalAny(dc1) + require.NoError(t, err) + dc2 := &autoopsproto.DatetimeClause{ + Time: 1000000002, + } + c2, err := ptypes.MarshalAny(dc2) + require.NoError(t, err) + oerc1 := &autoopsproto.OpsEventRateClause{ + VariationId: "vid1", + GoalId: "gid1", + MinCount: int64(10), + ThreadsholdRate: float64(0.5), + Operator: autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL, + } + c3, err := ptypes.MarshalAny(oerc1) + require.NoError(t, err) + autoOpsRule := &AutoOpsRule{&autoopsproto.AutoOpsRule{ + Id: "id-0", + FeatureId: "fid-0", + Clauses: []*autoopsproto.Clause{{Clause: c1}, {Clause: c2}, {Clause: c3}}, + }} + expected := []*autoopsproto.DatetimeClause{dc1, dc2} + actual, err := autoOpsRule.ExtractDatetimeClauses() + assert.NoError(t, err) + assert.Equal(t, len(expected), len(actual)) + for i, a := range actual { + assert.True(t, proto.Equal(expected[i], a)) + } +} + +func TestExtractWebhookClauses(t *testing.T) { + wc1 := &autoopsproto.WebhookClause{ + WebhookId: "foo-id", + Conditions: []*autoopsproto.WebhookClause_Condition{ + { + Filter: ".foo.bar", + Value: "foobaz", + Operator: autoopsproto.WebhookClause_Condition_EQUAL, + }, + }, + } + c1, err := ptypes.MarshalAny(wc1) + require.NoError(t, err) + wc2 := &autoopsproto.WebhookClause{ + WebhookId: "bar-id", + Conditions: []*autoopsproto.WebhookClause_Condition{ + { + Filter: ".bar.foo", + Value: "barbaz", + Operator: autoopsproto.WebhookClause_Condition_NOT_EQUAL, + }, + }, + } + c2, err := ptypes.MarshalAny(wc2) + require.NoError(t, err) + oerc1 := &autoopsproto.OpsEventRateClause{ + VariationId: "vid1", + GoalId: "gid1", + MinCount: int64(10), + ThreadsholdRate: float64(0.5), + Operator: autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL, + } + c3, err := ptypes.MarshalAny(oerc1) + require.NoError(t, err) + autoOpsRule := &AutoOpsRule{&autoopsproto.AutoOpsRule{ + Id: "id-0", + FeatureId: "fid-0", + Clauses: []*autoopsproto.Clause{{Clause: c1}, {Clause: c2}, {Clause: c3}}, + }} + expected := []*autoopsproto.WebhookClause{wc1, wc2} + actual, err := autoOpsRule.ExtractWebhookClauses() + assert.NoError(t, err) + assert.Equal(t, len(expected), len(actual)) + for i, a := range actual { + assert.True(t, proto.Equal(expected[i], a)) + } +} diff --git a/pkg/autoops/domain/webhook.go b/pkg/autoops/domain/webhook.go new file mode 100644 index 000000000..6b13c172a --- /dev/null +++ b/pkg/autoops/domain/webhook.go @@ -0,0 +1,49 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "time" + + proto "github.com/bucketeer-io/bucketeer/proto/autoops" +) + +// Webhook holds the settings for accepting webhooks from alert systems, etc. +type Webhook struct { + *proto.Webhook +} + +func NewWebhook(id, name, description string) *Webhook { + now := time.Now().Unix() + return &Webhook{&proto.Webhook{ + Id: id, + Name: name, + Description: description, + CreatedAt: now, + UpdatedAt: now, + }} +} + +func (w *Webhook) ChangeName(name string) error { + w.Name = name + w.UpdatedAt = time.Now().Unix() + return nil +} + +func (w *Webhook) ChangeDescription(description string) error { + w.Description = description + w.UpdatedAt = time.Now().Unix() + return nil +} diff --git a/pkg/autoops/domain/webhook_secret.go b/pkg/autoops/domain/webhook_secret.go new file mode 100644 index 000000000..719453c6f --- /dev/null +++ b/pkg/autoops/domain/webhook_secret.go @@ -0,0 +1,57 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "encoding/json" +) + +type webhookSecret struct { + WebhookID string `json:"webhook_id"` + EnvironmentNamespace string `json:"environment_namespace"` +} + +type WebhookSecret interface { + Marshal() ([]byte, error) + GetWebhookID() string + GetEnvironmentNamespace() string +} + +func NewWebhookSecret(webhookID, environmentNamespace string) WebhookSecret { + return &webhookSecret{ + EnvironmentNamespace: environmentNamespace, + WebhookID: webhookID, + } +} + +func UnmarshalWebhookSecret(data []byte) (WebhookSecret, error) { + ws := webhookSecret{} + if err := json.Unmarshal(data, &ws); err != nil { + return nil, err + } + return &ws, nil +} + +func (ws *webhookSecret) Marshal() ([]byte, error) { + return json.Marshal(ws) +} + +func (ws *webhookSecret) GetWebhookID() string { + return ws.WebhookID +} + +func (ws *webhookSecret) GetEnvironmentNamespace() string { + return ws.EnvironmentNamespace +} diff --git a/pkg/autoops/storage/v2/BUILD.bazel b/pkg/autoops/storage/v2/BUILD.bazel new file mode 100644 index 000000000..09cdab7f6 --- /dev/null +++ b/pkg/autoops/storage/v2/BUILD.bazel @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "auto_ops_rule.go", + "webhook.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/autoops/storage/v2", + visibility = ["//visibility:public"], + deps = [ + "//pkg/autoops/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/autoops:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["auto_ops_rule_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/autoops/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/storage/v2/mysql/mock:go_default_library", + "//proto/autoops:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + ], +) diff --git a/pkg/autoops/storage/v2/auto_ops_rule.go b/pkg/autoops/storage/v2/auto_ops_rule.go new file mode 100644 index 000000000..2e769a2f3 --- /dev/null +++ b/pkg/autoops/storage/v2/auto_ops_rule.go @@ -0,0 +1,243 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v2 + +import ( + "context" + "errors" + "fmt" + + "github.com/bucketeer-io/bucketeer/pkg/autoops/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + proto "github.com/bucketeer-io/bucketeer/proto/autoops" +) + +var ( + ErrAutoOpsRuleAlreadyExists = errors.New("autoOpsRule: already exists") + ErrAutoOpsRuleNotFound = errors.New("autoOpsRule: not found") + ErrAutoOpsRuleUnexpectedAffectedRows = errors.New("autoOpsRule: unexpected affected rows") +) + +type AutoOpsRuleStorage interface { + CreateAutoOpsRule(ctx context.Context, e *domain.AutoOpsRule, environmentNamespace string) error + UpdateAutoOpsRule(ctx context.Context, e *domain.AutoOpsRule, environmentNamespace string) error + GetAutoOpsRule(ctx context.Context, id, environmentNamespace string) (*domain.AutoOpsRule, error) + ListAutoOpsRules( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, + ) ([]*proto.AutoOpsRule, int, error) +} + +type autoOpsRuleStorage struct { + qe mysql.QueryExecer +} + +func NewAutoOpsRuleStorage(qe mysql.QueryExecer) AutoOpsRuleStorage { + return &autoOpsRuleStorage{qe: qe} +} + +func (s *autoOpsRuleStorage) CreateAutoOpsRule( + ctx context.Context, + e *domain.AutoOpsRule, + environmentNamespace string, +) error { + query := ` + INSERT INTO auto_ops_rule ( + id, + feature_id, + ops_type, + clauses, + triggered_at, + created_at, + updated_at, + deleted, + environment_namespace + ) VALUES ( + ?, ?, ?, ?, ?, ?, ?, ?, ? + ) + ` + _, err := s.qe.ExecContext( + ctx, + query, + e.Id, + e.FeatureId, + int32(e.OpsType), + mysql.JSONObject{Val: e.Clauses}, + e.TriggeredAt, + e.CreatedAt, + e.UpdatedAt, + e.Deleted, + environmentNamespace, + ) + if err != nil { + if err == mysql.ErrDuplicateEntry { + return ErrAutoOpsRuleAlreadyExists + } + return err + } + return nil +} + +func (s *autoOpsRuleStorage) UpdateAutoOpsRule( + ctx context.Context, + e *domain.AutoOpsRule, + environmentNamespace string, +) error { + query := ` + UPDATE + auto_ops_rule + SET + feature_id = ?, + ops_type = ?, + clauses = ?, + triggered_at = ?, + created_at = ?, + updated_at = ?, + deleted = ? + WHERE + id = ? AND + environment_namespace = ? + ` + result, err := s.qe.ExecContext( + ctx, + query, + e.FeatureId, + int32(e.OpsType), + mysql.JSONObject{Val: e.Clauses}, + e.TriggeredAt, + e.CreatedAt, + e.UpdatedAt, + e.Deleted, + e.Id, + environmentNamespace, + ) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + if rowsAffected != 1 { + return ErrAutoOpsRuleUnexpectedAffectedRows + } + return nil +} + +func (s *autoOpsRuleStorage) GetAutoOpsRule( + ctx context.Context, + id, environmentNamespace string, +) (*domain.AutoOpsRule, error) { + autoOpsRule := proto.AutoOpsRule{} + var opsType int32 + query := ` + SELECT + id, + feature_id, + ops_type, + clauses, + triggered_at, + created_at, + updated_at, + deleted + FROM + auto_ops_rule + WHERE + id = ? AND + environment_namespace = ? + ` + err := s.qe.QueryRowContext( + ctx, + query, + id, + environmentNamespace, + ).Scan( + &autoOpsRule.Id, + &autoOpsRule.FeatureId, + &opsType, + &mysql.JSONObject{Val: &autoOpsRule.Clauses}, + &autoOpsRule.TriggeredAt, + &autoOpsRule.CreatedAt, + &autoOpsRule.UpdatedAt, + &autoOpsRule.Deleted, + ) + if err != nil { + if err == mysql.ErrNoRows { + return nil, ErrAutoOpsRuleNotFound + } + return nil, err + } + autoOpsRule.OpsType = proto.OpsType(opsType) + return &domain.AutoOpsRule{AutoOpsRule: &autoOpsRule}, nil +} + +func (s *autoOpsRuleStorage) ListAutoOpsRules( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, +) ([]*proto.AutoOpsRule, int, error) { + whereSQL, whereArgs := mysql.ConstructWhereSQLString(whereParts) + orderBySQL := mysql.ConstructOrderBySQLString(orders) + limitOffsetSQL := mysql.ConstructLimitOffsetSQLString(limit, offset) + query := fmt.Sprintf(` + SELECT + id, + feature_id, + ops_type, + clauses, + triggered_at, + created_at, + updated_at, + deleted + FROM + auto_ops_rule + %s %s %s + `, whereSQL, orderBySQL, limitOffsetSQL, + ) + rows, err := s.qe.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, 0, err + } + defer rows.Close() + autoOpsRules := make([]*proto.AutoOpsRule, 0, limit) + for rows.Next() { + autoOpsRule := proto.AutoOpsRule{} + var opsType int32 + err := rows.Scan( + &autoOpsRule.Id, + &autoOpsRule.FeatureId, + &opsType, + &mysql.JSONObject{Val: &autoOpsRule.Clauses}, + &autoOpsRule.TriggeredAt, + &autoOpsRule.CreatedAt, + &autoOpsRule.UpdatedAt, + &autoOpsRule.Deleted, + ) + if err != nil { + return nil, 0, err + } + autoOpsRule.OpsType = proto.OpsType(opsType) + autoOpsRules = append(autoOpsRules, &autoOpsRule) + } + if rows.Err() != nil { + return nil, 0, err + } + nextOffset := offset + len(autoOpsRules) + return autoOpsRules, nextOffset, nil +} diff --git a/pkg/autoops/storage/v2/auto_ops_rule_test.go b/pkg/autoops/storage/v2/auto_ops_rule_test.go new file mode 100644 index 000000000..2f1b1f7ec --- /dev/null +++ b/pkg/autoops/storage/v2/auto_ops_rule_test.go @@ -0,0 +1,258 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "context" + "errors" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/autoops/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + proto "github.com/bucketeer-io/bucketeer/proto/autoops" +) + +func TestNewAutoOpsRuleStorage(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + db := NewAutoOpsRuleStorage(mock.NewMockQueryExecer(mockController)) + assert.IsType(t, &autoOpsRuleStorage{}, db) +} + +func TestCreateAutoOpsRule(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*autoOpsRuleStorage) + input *domain.AutoOpsRule + environmentNamespace string + expectedErr error + }{ + { + setup: func(s *autoOpsRuleStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, mysql.ErrDuplicateEntry) + }, + input: &domain.AutoOpsRule{ + AutoOpsRule: &proto.AutoOpsRule{Id: "id-0"}, + }, + environmentNamespace: "ns0", + expectedErr: ErrAutoOpsRuleAlreadyExists, + }, + { + setup: func(s *autoOpsRuleStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, nil) + }, + input: &domain.AutoOpsRule{ + AutoOpsRule: &proto.AutoOpsRule{Id: "id-1"}, + }, + environmentNamespace: "ns0", + expectedErr: nil, + }, + } + for _, p := range patterns { + storage := newAutoOpsRuleStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + err := storage.CreateAutoOpsRule(context.Background(), p.input, p.environmentNamespace) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestUpdateAutoOpsRule(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*autoOpsRuleStorage) + input *domain.AutoOpsRule + environmentNamespace string + expectedErr error + }{ + { + setup: func(s *autoOpsRuleStorage) { + result := mock.NewMockResult(mockController) + result.EXPECT().RowsAffected().Return(int64(0), nil) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + input: &domain.AutoOpsRule{ + AutoOpsRule: &proto.AutoOpsRule{Id: "id-0"}, + }, + environmentNamespace: "ns", + expectedErr: ErrAutoOpsRuleUnexpectedAffectedRows, + }, + { + setup: func(s *autoOpsRuleStorage) { + result := mock.NewMockResult(mockController) + result.EXPECT().RowsAffected().Return(int64(1), nil) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + input: &domain.AutoOpsRule{ + AutoOpsRule: &proto.AutoOpsRule{Id: "id-0"}, + }, + environmentNamespace: "ns", + expectedErr: nil, + }, + } + for _, p := range patterns { + storage := newAutoOpsRuleStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + err := storage.UpdateAutoOpsRule(context.Background(), p.input, p.environmentNamespace) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestGetAutoOpsRule(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*autoOpsRuleStorage) + input string + environmentNamespace string + expected *domain.AutoOpsRule + expectedErr error + }{ + { + setup: func(s *autoOpsRuleStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + input: "", + environmentNamespace: "ns0", + expected: nil, + expectedErr: ErrAutoOpsRuleNotFound, + }, + { + setup: func(s *autoOpsRuleStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + input: "id-0", + environmentNamespace: "ns0", + expected: &domain.AutoOpsRule{ + AutoOpsRule: &proto.AutoOpsRule{Id: "id-0"}, + }, + expectedErr: nil, + }, + } + for _, p := range patterns { + storage := newAutoOpsRuleStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + _, err := storage.GetAutoOpsRule(context.Background(), p.input, p.environmentNamespace) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestListAutoOpsRules(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := []struct { + setup func(*autoOpsRuleStorage) + whereParts []mysql.WherePart + orders []*mysql.Order + limit int + offset int + expected []*proto.AutoOpsRule + expectedCursor int + expectedErr error + }{ + { + setup: func(s *autoOpsRuleStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + whereParts: nil, + orders: nil, + limit: 0, + offset: 0, + expected: nil, + expectedCursor: 0, + expectedErr: errors.New("error"), + }, + { + setup: func(s *autoOpsRuleStorage) { + rows := mock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + }, + whereParts: []mysql.WherePart{ + mysql.NewFilter("num", ">=", 5), + }, + orders: []*mysql.Order{ + mysql.NewOrder("id", mysql.OrderDirectionAsc), + }, + limit: 10, + offset: 5, + expected: []*proto.AutoOpsRule{}, + expectedCursor: 5, + expectedErr: nil, + }, + } + for _, p := range patterns { + storage := newAutoOpsRuleStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + autoOpsRules, cursor, err := storage.ListAutoOpsRules( + context.Background(), + p.whereParts, + p.orders, + p.limit, + p.offset, + ) + assert.Equal(t, p.expected, autoOpsRules) + assert.Equal(t, p.expectedCursor, cursor) + assert.Equal(t, p.expectedErr, err) + } +} + +func newAutoOpsRuleStorageWithMock(t *testing.T, mockController *gomock.Controller) *autoOpsRuleStorage { + t.Helper() + return &autoOpsRuleStorage{mock.NewMockQueryExecer(mockController)} +} diff --git a/pkg/autoops/storage/v2/mock/BUILD.bazel b/pkg/autoops/storage/v2/mock/BUILD.bazel new file mode 100644 index 000000000..64ecedc1f --- /dev/null +++ b/pkg/autoops/storage/v2/mock/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "auto_ops_rule.go", + "webhook.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/autoops/storage/v2/mock", + visibility = ["//visibility:public"], + deps = [ + "//pkg/autoops/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/autoops:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + ], +) diff --git a/pkg/autoops/storage/v2/mock/auto_ops_rule.go b/pkg/autoops/storage/v2/mock/auto_ops_rule.go new file mode 100644 index 000000000..5db029481 --- /dev/null +++ b/pkg/autoops/storage/v2/mock/auto_ops_rule.go @@ -0,0 +1,98 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: auto_ops_rule.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + domain "github.com/bucketeer-io/bucketeer/pkg/autoops/domain" + mysql "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + autoops "github.com/bucketeer-io/bucketeer/proto/autoops" +) + +// MockAutoOpsRuleStorage is a mock of AutoOpsRuleStorage interface. +type MockAutoOpsRuleStorage struct { + ctrl *gomock.Controller + recorder *MockAutoOpsRuleStorageMockRecorder +} + +// MockAutoOpsRuleStorageMockRecorder is the mock recorder for MockAutoOpsRuleStorage. +type MockAutoOpsRuleStorageMockRecorder struct { + mock *MockAutoOpsRuleStorage +} + +// NewMockAutoOpsRuleStorage creates a new mock instance. +func NewMockAutoOpsRuleStorage(ctrl *gomock.Controller) *MockAutoOpsRuleStorage { + mock := &MockAutoOpsRuleStorage{ctrl: ctrl} + mock.recorder = &MockAutoOpsRuleStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockAutoOpsRuleStorage) EXPECT() *MockAutoOpsRuleStorageMockRecorder { + return m.recorder +} + +// CreateAutoOpsRule mocks base method. +func (m *MockAutoOpsRuleStorage) CreateAutoOpsRule(ctx context.Context, e *domain.AutoOpsRule, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateAutoOpsRule", ctx, e, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateAutoOpsRule indicates an expected call of CreateAutoOpsRule. +func (mr *MockAutoOpsRuleStorageMockRecorder) CreateAutoOpsRule(ctx, e, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAutoOpsRule", reflect.TypeOf((*MockAutoOpsRuleStorage)(nil).CreateAutoOpsRule), ctx, e, environmentNamespace) +} + +// GetAutoOpsRule mocks base method. +func (m *MockAutoOpsRuleStorage) GetAutoOpsRule(ctx context.Context, id, environmentNamespace string) (*domain.AutoOpsRule, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAutoOpsRule", ctx, id, environmentNamespace) + ret0, _ := ret[0].(*domain.AutoOpsRule) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAutoOpsRule indicates an expected call of GetAutoOpsRule. +func (mr *MockAutoOpsRuleStorageMockRecorder) GetAutoOpsRule(ctx, id, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAutoOpsRule", reflect.TypeOf((*MockAutoOpsRuleStorage)(nil).GetAutoOpsRule), ctx, id, environmentNamespace) +} + +// ListAutoOpsRules mocks base method. +func (m *MockAutoOpsRuleStorage) ListAutoOpsRules(ctx context.Context, whereParts []mysql.WherePart, orders []*mysql.Order, limit, offset int) ([]*autoops.AutoOpsRule, int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListAutoOpsRules", ctx, whereParts, orders, limit, offset) + ret0, _ := ret[0].([]*autoops.AutoOpsRule) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// ListAutoOpsRules indicates an expected call of ListAutoOpsRules. +func (mr *MockAutoOpsRuleStorageMockRecorder) ListAutoOpsRules(ctx, whereParts, orders, limit, offset interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAutoOpsRules", reflect.TypeOf((*MockAutoOpsRuleStorage)(nil).ListAutoOpsRules), ctx, whereParts, orders, limit, offset) +} + +// UpdateAutoOpsRule mocks base method. +func (m *MockAutoOpsRuleStorage) UpdateAutoOpsRule(ctx context.Context, e *domain.AutoOpsRule, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateAutoOpsRule", ctx, e, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateAutoOpsRule indicates an expected call of UpdateAutoOpsRule. +func (mr *MockAutoOpsRuleStorageMockRecorder) UpdateAutoOpsRule(ctx, e, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAutoOpsRule", reflect.TypeOf((*MockAutoOpsRuleStorage)(nil).UpdateAutoOpsRule), ctx, e, environmentNamespace) +} diff --git a/pkg/autoops/storage/v2/mock/webhook.go b/pkg/autoops/storage/v2/mock/webhook.go new file mode 100644 index 000000000..fe017ddf2 --- /dev/null +++ b/pkg/autoops/storage/v2/mock/webhook.go @@ -0,0 +1,113 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: webhook.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + domain "github.com/bucketeer-io/bucketeer/pkg/autoops/domain" + mysql "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + autoops "github.com/bucketeer-io/bucketeer/proto/autoops" +) + +// MockWebhookStorage is a mock of WebhookStorage interface. +type MockWebhookStorage struct { + ctrl *gomock.Controller + recorder *MockWebhookStorageMockRecorder +} + +// MockWebhookStorageMockRecorder is the mock recorder for MockWebhookStorage. +type MockWebhookStorageMockRecorder struct { + mock *MockWebhookStorage +} + +// NewMockWebhookStorage creates a new mock instance. +func NewMockWebhookStorage(ctrl *gomock.Controller) *MockWebhookStorage { + mock := &MockWebhookStorage{ctrl: ctrl} + mock.recorder = &MockWebhookStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockWebhookStorage) EXPECT() *MockWebhookStorageMockRecorder { + return m.recorder +} + +// CreateWebhook mocks base method. +func (m *MockWebhookStorage) CreateWebhook(ctx context.Context, webhook *domain.Webhook, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateWebhook", ctx, webhook, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateWebhook indicates an expected call of CreateWebhook. +func (mr *MockWebhookStorageMockRecorder) CreateWebhook(ctx, webhook, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateWebhook", reflect.TypeOf((*MockWebhookStorage)(nil).CreateWebhook), ctx, webhook, environmentNamespace) +} + +// DeleteWebhook mocks base method. +func (m *MockWebhookStorage) DeleteWebhook(ctx context.Context, id, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteWebhook", ctx, id, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteWebhook indicates an expected call of DeleteWebhook. +func (mr *MockWebhookStorageMockRecorder) DeleteWebhook(ctx, id, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteWebhook", reflect.TypeOf((*MockWebhookStorage)(nil).DeleteWebhook), ctx, id, environmentNamespace) +} + +// GetWebhook mocks base method. +func (m *MockWebhookStorage) GetWebhook(ctx context.Context, id, environmentNamespace string) (*domain.Webhook, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetWebhook", ctx, id, environmentNamespace) + ret0, _ := ret[0].(*domain.Webhook) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetWebhook indicates an expected call of GetWebhook. +func (mr *MockWebhookStorageMockRecorder) GetWebhook(ctx, id, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWebhook", reflect.TypeOf((*MockWebhookStorage)(nil).GetWebhook), ctx, id, environmentNamespace) +} + +// ListWebhooks mocks base method. +func (m *MockWebhookStorage) ListWebhooks(ctx context.Context, whereParts []mysql.WherePart, orders []*mysql.Order, limit, offset int) ([]*autoops.Webhook, int, int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListWebhooks", ctx, whereParts, orders, limit, offset) + ret0, _ := ret[0].([]*autoops.Webhook) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(int64) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 +} + +// ListWebhooks indicates an expected call of ListWebhooks. +func (mr *MockWebhookStorageMockRecorder) ListWebhooks(ctx, whereParts, orders, limit, offset interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListWebhooks", reflect.TypeOf((*MockWebhookStorage)(nil).ListWebhooks), ctx, whereParts, orders, limit, offset) +} + +// UpdateWebhook mocks base method. +func (m *MockWebhookStorage) UpdateWebhook(ctx context.Context, webhook *domain.Webhook, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateWebhook", ctx, webhook, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateWebhook indicates an expected call of UpdateWebhook. +func (mr *MockWebhookStorageMockRecorder) UpdateWebhook(ctx, webhook, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateWebhook", reflect.TypeOf((*MockWebhookStorage)(nil).UpdateWebhook), ctx, webhook, environmentNamespace) +} diff --git a/pkg/autoops/storage/v2/webhook.go b/pkg/autoops/storage/v2/webhook.go new file mode 100644 index 000000000..2d7bfd16b --- /dev/null +++ b/pkg/autoops/storage/v2/webhook.go @@ -0,0 +1,256 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v2 + +import ( + "context" + "errors" + "fmt" + + "github.com/bucketeer-io/bucketeer/pkg/autoops/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + proto "github.com/bucketeer-io/bucketeer/proto/autoops" +) + +var ( + ErrWebhookAlreadyExists = errors.New("webhook: already exists") + ErrWebhookNotFound = errors.New("webhook: not found") + ErrWebhookUnexpectedAffectedRows = errors.New("webhook: unexpected affected rows") +) + +type WebhookStorage interface { + CreateWebhook(ctx context.Context, webhook *domain.Webhook, environmentNamespace string) error + UpdateWebhook(ctx context.Context, webhook *domain.Webhook, environmentNamespace string) error + DeleteWebhook(ctx context.Context, id, environmentNamespace string) error + GetWebhook(ctx context.Context, id, environmentNamespace string) (*domain.Webhook, error) + ListWebhooks( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, + ) ([]*proto.Webhook, int, int64, error) +} + +type webhookStorage struct { + qe mysql.QueryExecer +} + +func NewWebhookStorage( + qe mysql.QueryExecer, +) WebhookStorage { + return &webhookStorage{qe: qe} +} + +func (s *webhookStorage) CreateWebhook( + ctx context.Context, + webhook *domain.Webhook, + environmentNamespace string, +) error { + query := ` + INSERT INTO webhook ( + id, + name, + description, + environment_namespace, + created_at, + updated_at + ) VALUES (?, ?, ?, ?, ?, ?) + ` + _, err := s.qe.ExecContext( + ctx, + query, + webhook.Id, + webhook.Name, + webhook.Description, + environmentNamespace, + webhook.CreatedAt, + webhook.UpdatedAt, + ) + if err != nil { + if err == mysql.ErrDuplicateEntry { + return ErrWebhookAlreadyExists + } + return err + } + return nil +} + +func (s *webhookStorage) UpdateWebhook( + ctx context.Context, + webhook *domain.Webhook, + environmentNamespace string, +) error { + query := ` + UPDATE + webhook + SET + name = ?, + description = ?, + updated_at = ? + WHERE + id = ? AND + environment_namespace = ? + ` + result, err := s.qe.ExecContext( + ctx, + query, + webhook.Name, + webhook.Description, + webhook.UpdatedAt, + webhook.Id, + environmentNamespace, + ) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + if rowsAffected != 1 { + return ErrWebhookUnexpectedAffectedRows + } + return nil +} + +func (s *webhookStorage) DeleteWebhook( + ctx context.Context, + id, environmentNamespace string, +) error { + query := ` + DELETE FROM + webhook + WHERE + id = ? AND + environment_namespace = ? + ` + result, err := s.qe.ExecContext( + ctx, + query, + id, + environmentNamespace, + ) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + if rowsAffected != 1 { + return ErrWebhookUnexpectedAffectedRows + } + return nil +} + +func (s *webhookStorage) GetWebhook( + ctx context.Context, + id, environmentNamespace string, +) (*domain.Webhook, error) { + query := ` + SELECT + id, + name, + description, + created_at, + updated_at + FROM + webhook + WHERE + id = ? AND + environment_namespace = ? + ` + webhook := proto.Webhook{} + err := s.qe.QueryRowContext( + ctx, + query, + id, + environmentNamespace, + ).Scan( + &webhook.Id, + &webhook.Name, + &webhook.Description, + &webhook.CreatedAt, + &webhook.UpdatedAt, + ) + if err != nil { + if err == mysql.ErrNoRows { + return nil, ErrWebhookNotFound + } + return nil, err + } + return &domain.Webhook{Webhook: &webhook}, nil +} + +func (s *webhookStorage) ListWebhooks( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, +) ([]*proto.Webhook, int, int64, error) { + whereSQL, whereArgs := mysql.ConstructWhereSQLString(whereParts) + orderBySQL := mysql.ConstructOrderBySQLString(orders) + limitOffsetSQL := mysql.ConstructLimitOffsetSQLString(limit, offset) + query := fmt.Sprintf(` + SELECT + id, + name, + description, + created_at, + updated_at + FROM + webhook + %s %s %s + `, whereSQL, orderBySQL, limitOffsetSQL, + ) + rows, err := s.qe.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, 0, 0, err + } + defer rows.Close() + webhooks := make([]*proto.Webhook, 0, limit) + for rows.Next() { + webhook := proto.Webhook{} + err := rows.Scan( + &webhook.Id, + &webhook.Name, + &webhook.Description, + &webhook.CreatedAt, + &webhook.UpdatedAt, + ) + if err != nil { + return nil, 0, 0, err + } + webhooks = append(webhooks, &webhook) + } + if rows.Err() != nil { + return nil, 0, 0, err + } + nextOffset := offset + len(webhooks) + countQuery := fmt.Sprintf(` + SELECT + COUNT(1) + FROM + webhook + %s %s + `, whereSQL, orderBySQL, + ) + var totalCount int64 + if err := s.qe.QueryRowContext(ctx, countQuery, whereArgs...).Scan(&totalCount); err != nil { + return nil, 0, 0, err + } + return webhooks, nextOffset, totalCount, nil +} diff --git a/pkg/autoops/webhookhandler/BUILD.bazel b/pkg/autoops/webhookhandler/BUILD.bazel new file mode 100644 index 000000000..cf7ec5d6b --- /dev/null +++ b/pkg/autoops/webhookhandler/BUILD.bazel @@ -0,0 +1,50 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "evaluation.go", + "handler.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/autoops/webhookhandler", + visibility = ["//visibility:public"], + deps = [ + "//pkg/auth/client:go_default_library", + "//pkg/autoops/api:go_default_library", + "//pkg/autoops/command:go_default_library", + "//pkg/autoops/domain:go_default_library", + "//pkg/autoops/storage/v2:go_default_library", + "//pkg/crypto:go_default_library", + "//pkg/feature/client:go_default_library", + "//pkg/log:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/token:go_default_library", + "//proto/account:go_default_library", + "//proto/autoops:go_default_library", + "//proto/event/domain:go_default_library", + "@com_github_itchyny_gojq//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["handler_test.go"], + data = glob(["testdata/**"]), + embed = [":go_default_library"], + deps = [ + "//pkg/auth/client/mock:go_default_library", + "//pkg/autoops/domain:go_default_library", + "//pkg/feature/client/mock:go_default_library", + "//pkg/log:go_default_library", + "//pkg/pubsub/publisher/mock:go_default_library", + "//pkg/storage/v2/mysql/mock:go_default_library", + "//pkg/token:go_default_library", + "//proto/autoops:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + ], +) diff --git a/pkg/autoops/webhookhandler/evaluation.go b/pkg/autoops/webhookhandler/evaluation.go new file mode 100644 index 000000000..c918711f5 --- /dev/null +++ b/pkg/autoops/webhookhandler/evaluation.go @@ -0,0 +1,207 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package webhookhandler + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "strconv" + + "github.com/itchyny/gojq" + + autoopsproto "github.com/bucketeer-io/bucketeer/proto/autoops" +) + +func evaluateClause( + ctx context.Context, + clause *autoopsproto.WebhookClause, + payload interface{}, +) (bool, error) { + if len(clause.Conditions) == 0 { + return false, fmt.Errorf("WebhookClause has no conditions") + } + // All conditions are combined with implicit AND + for _, condition := range clause.Conditions { + asmt, err := evaluateCondition(ctx, condition, payload) + if err != nil { + return false, err + } + if asmt { + continue + } + return false, nil + } + return true, nil +} + +func evaluateCondition( + ctx context.Context, + condition *autoopsproto.WebhookClause_Condition, + payload interface{}, +) (bool, error) { + filtered, err := filterWebhookPayload( + ctx, + condition.Filter, + payload, + ) + if err != nil { + return false, err + } + if filtered == nil { + return false, nil + } + var specified interface{} + if err := json.Unmarshal([]byte(condition.Value), &specified); err != nil { + return false, err + } + cFiltered, err := convertType(reflect.TypeOf(specified), filtered) + if err != nil { + return false, err + } + switch op := condition.Operator; op { + case autoopsproto.WebhookClause_Condition_EQUAL: + return specified == cFiltered, nil + case autoopsproto.WebhookClause_Condition_NOT_EQUAL: + return specified != cFiltered, nil + case autoopsproto.WebhookClause_Condition_MORE_THAN: + specifiedF, cFilteredF, ok := extractFloat64Values(specified, cFiltered) + if ok { + return specifiedF < cFilteredF, nil + } + specifiedS, cFilteredS, ok := extractStringValues(specified, cFiltered) + if ok { + return specifiedS < cFilteredS, nil + } + return false, fmt.Errorf("Failed to evaluate %v < %v", specified, cFiltered) + case autoopsproto.WebhookClause_Condition_MORE_THAN_OR_EQUAL: + specifiedF, cFilteredF, ok := extractFloat64Values(specified, cFiltered) + if ok { + return specifiedF <= cFilteredF, nil + } + specifiedS, cFilteredS, ok := extractStringValues(specified, cFiltered) + if ok { + return specifiedS <= cFilteredS, nil + } + return false, fmt.Errorf("Failed to evaluate %v <= %v", specified, cFiltered) + case autoopsproto.WebhookClause_Condition_LESS_THAN: + specifiedF, cFilteredF, ok := extractFloat64Values(specified, cFiltered) + if ok { + return specifiedF > cFilteredF, nil + } + specifiedS, cFilteredS, ok := extractStringValues(specified, cFiltered) + if ok { + return specifiedS > cFilteredS, nil + } + return false, fmt.Errorf("Failed to evaluate %v > %v", specified, cFiltered) + case autoopsproto.WebhookClause_Condition_LESS_THAN_OR_EQUAL: + specifiedF, cFilteredF, ok := extractFloat64Values(specified, cFiltered) + if ok { + return specifiedF >= cFilteredF, nil + } + specifiedS, cFilteredS, ok := extractStringValues(specified, cFiltered) + if ok { + return specifiedS >= cFilteredS, nil + } + return false, fmt.Errorf("Failed to evaluate %v >= %v", specified, cFiltered) + default: + return false, fmt.Errorf("Unknown operation: %s", op) + } +} + +func filterWebhookPayload( + ctx context.Context, + filter string, + payload interface{}, +) (interface{}, error) { + query, err := gojq.Parse(filter) + if err != nil { + return nil, err + } + iter := query.RunWithContext(ctx, payload) + var results []interface{} + for { + v, ok := iter.Next() + if !ok { + break + } + if err, ok := v.(error); ok { + return nil, err + } + if v == nil { + continue + } + results = append(results, v) + } + if len(results) > 1 { + return false, fmt.Errorf("Got multiple values: %s", filter) + } + if len(results) == 1 { + return results[0], nil + } + return nil, nil +} + +func extractFloat64Values(i1, i2 interface{}) (float64, float64, bool) { + // json.Unmarshal make JSON number into float64 + var ( + f1, f2 float64 + ok bool + ) + if f1, ok = i1.(float64); !ok { + return f1, f2, false + } + if f2, ok = i2.(float64); !ok { + return f1, f2, false + } + return f1, f2, true +} + +func extractStringValues(i1, i2 interface{}) (string, string, bool) { + var ( + s1, s2 string + ok bool + ) + if s1, ok = i1.(string); !ok { + return s1, s2, false + } + if s2, ok = i2.(string); !ok { + return s1, s2, false + } + return s1, s2, true +} + +func convertType(targetType reflect.Type, original interface{}) (interface{}, error) { + if targetType == reflect.TypeOf(original) { + return original, nil + } + switch targetType.Kind() { + case reflect.String: + if f, ok := original.(float64); ok { + converted := strconv.FormatFloat(f, 'f', -1, 64) + return converted, nil + } + return nil, fmt.Errorf("Failed to convert %v to %v", original, targetType) + case reflect.Float64: + if s, ok := original.(string); ok { + converted, err := strconv.ParseFloat(s, 64) + return converted, err + } + return nil, fmt.Errorf("Failed to convert %v to %v", original, targetType) + default: + return nil, fmt.Errorf("Not supported type %v", targetType) + } +} diff --git a/pkg/autoops/webhookhandler/handler.go b/pkg/autoops/webhookhandler/handler.go new file mode 100644 index 000000000..3413ca3fc --- /dev/null +++ b/pkg/autoops/webhookhandler/handler.go @@ -0,0 +1,319 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package webhookhandler + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "io/ioutil" + "net/http" + "strings" + + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/autoops/command" + "github.com/bucketeer-io/bucketeer/pkg/autoops/domain" + "github.com/bucketeer-io/bucketeer/pkg/crypto" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + + authclient "github.com/bucketeer-io/bucketeer/pkg/auth/client" + autoopsapi "github.com/bucketeer-io/bucketeer/pkg/autoops/api" + autoopsdomain "github.com/bucketeer-io/bucketeer/pkg/autoops/domain" + v2as "github.com/bucketeer-io/bucketeer/pkg/autoops/storage/v2" + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/token" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + autoopsproto "github.com/bucketeer-io/bucketeer/proto/autoops" + event "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +const ( + urlParamKeyAuth = "auth" +) + +var ( + errAuthKeyEmpty = errors.New("autoops: auth key is empty") + errAlreadyTriggered = errors.New("autoops: rule has already triggered") + errPermissionDenied = errors.New("autoops: permission denied") +) + +func WithLogger(logger *zap.Logger) Option { + return func(o *options) { + o.logger = logger + } +} + +type handler struct { + mysqlClient mysql.Client + authClient authclient.Client + featureClient featureclient.Client + publisher publisher.Publisher + editor *event.Editor + webhookCryptoUtil crypto.EncrypterDecrypter + logger *zap.Logger +} + +type Option func(*options) + +type options struct { + logger *zap.Logger +} + +func NewHandler( + mysqlClient mysql.Client, + authClient authclient.Client, + featureClient featureclient.Client, + publisher publisher.Publisher, + verifier token.Verifier, + tokenPath string, + webhookCryptoUtil crypto.EncrypterDecrypter, + opts ...Option, +) (*handler, error) { + options := &options{ + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(options) + } + data, err := ioutil.ReadFile(tokenPath) + if err != nil { + return nil, err + } + token, err := verifier.Verify(strings.TrimSpace(string(data))) + if err != nil { + return nil, err + } + if !token.IsAdmin() { + return nil, errPermissionDenied + } + editor := &event.Editor{ + Email: token.Email, + Role: accountproto.Account_OWNER, + IsAdmin: true, + } + return &handler{ + mysqlClient: mysqlClient, + authClient: authClient, + featureClient: featureClient, + publisher: publisher, + editor: editor, + webhookCryptoUtil: webhookCryptoUtil, + logger: options.logger.Named("webhookhandler"), + }, nil +} + +func (h *handler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + ctx := req.Context() + if ctx.Err() == context.Canceled { + h.logger.Warn( + "Request was canceled", + log.FieldsFromImcomingContext(ctx)..., + ) + resp.WriteHeader(http.StatusBadRequest) + return + } + secret, err := validateParams(req) + if err != nil { + h.logger.Warn( + "Invalid url parameters", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + resp.WriteHeader(http.StatusBadRequest) + return + } + ws, err := h.authWebhook(ctx, secret) + if err != nil { + h.logger.Error( + "Failed to get webhook configuration", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + resp.WriteHeader(http.StatusInternalServerError) + return + } + tx, err := h.mysqlClient.BeginTx(ctx) + if err != nil { + h.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + resp.WriteHeader(http.StatusInternalServerError) + return + } + err = h.mysqlClient.RunInTransaction(ctx, tx, func() error { + webhookStorage := v2as.NewWebhookStorage(tx) + webhook, err := webhookStorage.GetWebhook(ctx, ws.GetWebhookID(), ws.GetEnvironmentNamespace()) + if err != nil { + return err + } + autoOpsRuleStorage := v2as.NewAutoOpsRuleStorage(tx) + whereParts := []mysql.WherePart{ + mysql.NewFilter("deleted", "=", false), + mysql.NewFilter("environment_namespace", "=", ws.GetEnvironmentNamespace()), + } + autoOpsRules, _, err := autoOpsRuleStorage.ListAutoOpsRules( + ctx, + whereParts, + nil, + mysql.QueryNoLimit, + mysql.QueryNoOffset, + ) + if err != nil { + return err + } + var payload interface{} + if err := json.NewDecoder(req.Body).Decode(&payload); err != nil { + return err + } + // Handle webhook and assesses all rules + // and return the last occurred error. + var lastErr error + for _, r := range autoOpsRules { + rule := &autoopsdomain.AutoOpsRule{AutoOpsRule: r} + asmt, err := h.assessAutoOpsRule(ctx, rule, webhook.Id, payload) + if err != nil { + lastErr = err + } + if asmt { + if err = h.executeAutoOps(ctx, rule, ws.GetEnvironmentNamespace(), autoOpsRuleStorage); err != nil { + lastErr = err + } + } + } + return lastErr + }) + if err != nil { + h.logger.Error( + "Failed to execute autoOpsRule", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", ws.GetEnvironmentNamespace()), + )..., + ) + resp.WriteHeader(http.StatusInternalServerError) + return + } + resp.WriteHeader(http.StatusOK) +} + +func validateParams(req *http.Request) (string, error) { + secret := req.URL.Query().Get(urlParamKeyAuth) + if secret == "" { + return "", errAuthKeyEmpty + } + return secret, nil +} + +func (h *handler) authWebhook( + ctx context.Context, + secret string, +) (domain.WebhookSecret, error) { + decoded, err := base64.RawURLEncoding.DecodeString(secret) + if err != nil { + h.logger.Error( + "Failed to decode encrypted secret", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, err + } + decrypted, err := h.webhookCryptoUtil.Decrypt(ctx, decoded) + if err != nil { + h.logger.Error( + "Failed to decrypt encrypted secret", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, err + } + ws, err := domain.UnmarshalWebhookSecret(decrypted) + if err != nil { + h.logger.Error( + "Failed to unmarshal webhook secret", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, err + } + return ws, nil +} + +func (h *handler) assessAutoOpsRule( + ctx context.Context, + a *autoopsdomain.AutoOpsRule, + tarId string, + payload interface{}, +) (bool, error) { + webhookClauses, err := a.ExtractWebhookClauses() + if err != nil { + h.logger.Error("Failed to extract webhook clauses", + zap.Error(err), + zap.String("featureId", a.FeatureId), + zap.String("autoOpsRuleId", a.Id), + ) + return false, err + } + var lastErr error + // All clauses are combined with implicit OR + for _, w := range webhookClauses { + if w.WebhookId != tarId { + continue + } + asmt, err := evaluateClause(ctx, w, payload) + if err != nil { + h.logger.Error("Skipping evaluation because an error has occurred", + zap.Error(err), + zap.String("featureId", a.FeatureId), + zap.String("autoOpsRuleId", a.Id), + ) + lastErr = err + continue + } + if asmt { + h.logger.Info("Clause satisfies condition", + zap.String("featureId", a.FeatureId), + zap.String("autoOpsRuleId", a.Id), + zap.Any("webhookClause", w), + ) + return true, lastErr + } + } + return false, lastErr +} + +func (h *handler) executeAutoOps( + ctx context.Context, + rule *autoopsdomain.AutoOpsRule, + environmentNamespace string, + storage v2as.AutoOpsRuleStorage, +) error { + if rule.AlreadyTriggered() { + return errAlreadyTriggered + } + handler := command.NewAutoOpsCommandHandler(h.editor, rule, h.publisher, environmentNamespace) + if err := handler.Handle(ctx, &autoopsproto.ChangeAutoOpsRuleTriggeredAtCommand{}); err != nil { + return err + } + if err := storage.UpdateAutoOpsRule(ctx, rule, environmentNamespace); err != nil { + return err + } + return autoopsapi.ExecuteOperation(ctx, environmentNamespace, rule, h.featureClient, h.logger) +} diff --git a/pkg/autoops/webhookhandler/handler_test.go b/pkg/autoops/webhookhandler/handler_test.go new file mode 100644 index 000000000..e10250d10 --- /dev/null +++ b/pkg/autoops/webhookhandler/handler_test.go @@ -0,0 +1,571 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package webhookhandler + +import ( + "context" + "encoding/base64" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/ptypes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + authclientmock "github.com/bucketeer-io/bucketeer/pkg/auth/client/mock" + autoopsdomain "github.com/bucketeer-io/bucketeer/pkg/autoops/domain" + featureclientmock "github.com/bucketeer-io/bucketeer/pkg/feature/client/mock" + "github.com/bucketeer-io/bucketeer/pkg/log" + publishermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher/mock" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + "github.com/bucketeer-io/bucketeer/pkg/token" + autoopsproto "github.com/bucketeer-io/bucketeer/proto/autoops" +) + +type dummyWebhookCryptoUtil struct{} + +func (u *dummyWebhookCryptoUtil) Encrypt(ctx context.Context, data []byte) ([]byte, error) { + return []byte(data), nil +} + +func (u *dummyWebhookCryptoUtil) Decrypt(ctx context.Context, data []byte) ([]byte, error) { + return []byte(data), nil +} + +func TestNewHandler(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + issuer := "test_issuer" + clientID := "test_client_id" + testcases := map[string]struct { + rawIDToken string + valid bool + }{ + "err: malformed jwt": { + rawIDToken: "", + valid: false, + }, + "err: invalid jwt": { + rawIDToken: "testdata/invalid-token", + valid: false, + }, + "success": { + rawIDToken: "testdata/valid-token", + valid: true, + }, + } + verifier, err := token.NewVerifier("testdata/valid-public.pem", issuer, clientID) + require.NoError(t, err) + for msg, p := range testcases { + t.Run(msg, func(t *testing.T) { + h, err := NewHandler( + mysqlmock.NewMockClient(mockController), + authclientmock.NewMockClient(mockController), + featureclientmock.NewMockClient(mockController), + publishermock.NewMockPublisher(mockController), + verifier, + p.rawIDToken, + &dummyWebhookCryptoUtil{}, + ) + if p.valid { + assert.NotNil(t, h) + assert.NoError(t, err) + } else { + assert.Nil(t, h) + assert.Error(t, err) + } + }) + } +} + +func TestServeHTTP(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + logger, err := log.NewLogger() + require.NoError(t, err) + h := &handler{ + mysqlClient: mysqlmock.NewMockClient(mockController), + authClient: authclientmock.NewMockClient(mockController), + featureClient: featureclientmock.NewMockClient(mockController), + webhookCryptoUtil: &dummyWebhookCryptoUtil{}, + logger: logger, + } + patterns := map[string]struct { + setup func(*testing.T, *handler) + input *http.Request + expected int + }{ + "fail: bad params": { + input: httptest.NewRequest("POST", + "/hook?foo=bar", + nil), + expected: http.StatusBadRequest, + }, + "fail: auth error": { + input: httptest.NewRequest("POST", + "/hook?auth=secret", + nil), + expected: http.StatusInternalServerError, + }, + "success": { + setup: func(t *testing.T, h *handler) { + h.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + h.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + }, + // The test secret below will return the following proto + // &autoopsdomain.webhookSecret { + // WebhookID: "id-0", + // EnvironmentNamespace: "ns0", + // } + input: httptest.NewRequest("POST", + "/hook?auth=eyJ3ZWJob29rX2lkIjoiaWQtMCIsImVudmlyb25tZW50X25hbWVzcGFjZSI6Im5zMCJ9", + nil), + expected: http.StatusOK, + }, + } + + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + if p.setup != nil { + p.setup(t, h) + } + actual := httptest.NewRecorder() + h.ServeHTTP(actual, p.input) + assert.Equal(t, p.expected, actual.Code) + }) + } +} + +func TestHandleWebhook(t *testing.T) { + t.Parallel() + convert := func(wcs []autoopsproto.WebhookClause) []*autoopsproto.Clause { + var clauses []*autoopsproto.Clause + for _, w := range wcs { + c, err := ptypes.MarshalAny(&w) + require.NoError(t, err) + clauses = append(clauses, &autoopsproto.Clause{Clause: c}) + } + return clauses + } + ctx := context.TODO() + logger, err := log.NewLogger() + require.NoError(t, err) + mockController := gomock.NewController(t) + defer mockController.Finish() + + cases := []struct { + name string + webhookId string + payload string + autoOpsRule *autoopsdomain.AutoOpsRule + wantErr bool + expected bool + }{ + { + name: "1. execute rule-1", + webhookId: "webhook-1", + payload: `{"body":{"Alert id": 123}}`, + expected: true, + autoOpsRule: &autoopsdomain.AutoOpsRule{ + AutoOpsRule: &autoopsproto.AutoOpsRule{ + Id: "rule-1", + Clauses: convert([]autoopsproto.WebhookClause{ + { + WebhookId: "webhook-1", + Conditions: []*autoopsproto.WebhookClause_Condition{ + { + Filter: `.body."Alert id"`, + Value: `123`, + Operator: autoopsproto.WebhookClause_Condition_EQUAL, + }, + }, + }, + }), + }, + }, + }, + { + name: "2. execute rule-1", + webhookId: "webhook-1", + payload: `{"body":{"Alert id": 123, "Status": "Open", "Impacted plays": 10}}`, + expected: true, + autoOpsRule: &autoopsdomain.AutoOpsRule{ + AutoOpsRule: &autoopsproto.AutoOpsRule{ + Id: "rule-1", + Clauses: convert([]autoopsproto.WebhookClause{ + { + WebhookId: "webhook-1", + Conditions: []*autoopsproto.WebhookClause_Condition{ + { + Filter: `.body."Alert id"`, + Value: `123`, + Operator: autoopsproto.WebhookClause_Condition_EQUAL, + }, + { + Filter: `.body.Status`, + Value: `"Open"`, + Operator: autoopsproto.WebhookClause_Condition_EQUAL, + }, + { + Filter: `.body."Impacted plays"`, + Value: `5`, + Operator: autoopsproto.WebhookClause_Condition_MORE_THAN_OR_EQUAL, + }, + }, + }, + }), + }, + }, + }, + { + name: "execute rule-1 with converting", + webhookId: "webhook-1", + payload: `{"body":{"Alert id": 123, "Status": "Open", "Impacted users": "100"}}`, + expected: true, + autoOpsRule: &autoopsdomain.AutoOpsRule{ + AutoOpsRule: &autoopsproto.AutoOpsRule{ + Id: "rule-1", + Clauses: convert([]autoopsproto.WebhookClause{ + { + WebhookId: "webhook-1", + Conditions: []*autoopsproto.WebhookClause_Condition{ + { + Filter: `.body."Alert id"`, + Value: `"123"`, + Operator: autoopsproto.WebhookClause_Condition_EQUAL, + }, + { + Filter: `.body.Status`, + Value: `"Open"`, + Operator: autoopsproto.WebhookClause_Condition_EQUAL, + }, + { + Filter: `.body."Impacted users"`, + Value: `50`, + Operator: autoopsproto.WebhookClause_Condition_MORE_THAN_OR_EQUAL, + }, + }, + }, + }), + }, + }, + }, + { + name: "execute rule-1 with string comparison for lexical order", + webhookId: "webhook-1", + payload: `{"body":{"foo": "abc", "bar": "ABC"}}`, + expected: true, + autoOpsRule: &autoopsdomain.AutoOpsRule{ + AutoOpsRule: &autoopsproto.AutoOpsRule{ + Id: "rule-1", + Clauses: convert([]autoopsproto.WebhookClause{ + { + WebhookId: "webhook-1", + Conditions: []*autoopsproto.WebhookClause_Condition{ + { + Filter: `.body.foo`, + Value: `"ab"`, + Operator: autoopsproto.WebhookClause_Condition_MORE_THAN, + }, + { + Filter: `.body.bar`, + Value: `"XYZ"`, + Operator: autoopsproto.WebhookClause_Condition_LESS_THAN, + }, + }, + }, + }), + }, + }, + }, + { + name: "execute nothing because of conditions not matched", + webhookId: "webhook-1", + payload: `{"body":{"Alert id": 123, "Status": "Open", "Impacted plays": 3}}`, + expected: false, + autoOpsRule: &autoopsdomain.AutoOpsRule{ + AutoOpsRule: &autoopsproto.AutoOpsRule{ + Id: "rule-1", + Clauses: convert([]autoopsproto.WebhookClause{ + { + WebhookId: "webhook-1", + Conditions: []*autoopsproto.WebhookClause_Condition{ + { + Filter: `.body."Alert id"`, + Value: `123`, + Operator: autoopsproto.WebhookClause_Condition_EQUAL, + }, + { + Filter: `.body.Status`, + Value: `"Open"`, + Operator: autoopsproto.WebhookClause_Condition_EQUAL, + }, + { + Filter: `.body."Impacted plays"`, + Value: `10`, + Operator: autoopsproto.WebhookClause_Condition_MORE_THAN, + }, + }, + }, + }), + }, + }, + }, + { + name: "execute nothing because of conditions not matched", + webhookId: "webhook-1", + payload: `{"body":{"Alert id": 123, "Status": "Open", "Impacted plays": 10}}`, + expected: false, + autoOpsRule: &autoopsdomain.AutoOpsRule{ + AutoOpsRule: &autoopsproto.AutoOpsRule{ + Id: "rule-1", + Clauses: convert([]autoopsproto.WebhookClause{ + { + WebhookId: "webhook-1", + Conditions: []*autoopsproto.WebhookClause_Condition{ + { + Filter: `.body."Alert id"`, + Value: `123`, + Operator: autoopsproto.WebhookClause_Condition_EQUAL, + }, + { + Filter: `.body.Status`, + Value: `"Close"`, + Operator: autoopsproto.WebhookClause_Condition_EQUAL, + }, + { + Filter: `.body."Impacted plays"`, + Value: `5`, + Operator: autoopsproto.WebhookClause_Condition_MORE_THAN_OR_EQUAL, + }, + }, + }, + }), + }, + }, + }, + { + name: "execute nothing because of conditions not matched", + webhookId: "webhook-1", + payload: `{"body":{"Alert id": 123, "Status": "Open", "Impacted plays": 10}}`, + expected: false, + autoOpsRule: &autoopsdomain.AutoOpsRule{ + AutoOpsRule: &autoopsproto.AutoOpsRule{ + Id: "rule-1", + Clauses: convert([]autoopsproto.WebhookClause{ + { + WebhookId: "webhook-1", + Conditions: []*autoopsproto.WebhookClause_Condition{ + { + Filter: `.body."Alert id"`, + Value: `321`, + Operator: autoopsproto.WebhookClause_Condition_EQUAL, + }, + { + Filter: `.body.Status`, + Value: `"Open"`, + Operator: autoopsproto.WebhookClause_Condition_EQUAL, + }, + { + Filter: `.body."Impacted plays"`, + Value: `5`, + Operator: autoopsproto.WebhookClause_Condition_MORE_THAN_OR_EQUAL, + }, + }, + }, + }), + }, + }, + }, + { + name: "execute nothing because of webhook id is not matched", + webhookId: "webhook-1", + payload: `{"body":{"Alert id": 123}}`, + expected: false, + autoOpsRule: &autoopsdomain.AutoOpsRule{ + AutoOpsRule: &autoopsproto.AutoOpsRule{ + Id: "rule-1", + Clauses: convert([]autoopsproto.WebhookClause{ + { + WebhookId: "webhook-2", + Conditions: []*autoopsproto.WebhookClause_Condition{ + { + Filter: `.body."Alert id"`, + Value: `123`, + Operator: autoopsproto.WebhookClause_Condition_EQUAL, + }, + }, + }, + }), + }, + }, + }, + { + name: `execute nothing because of typo "Alert id" with "Alert Id"`, + webhookId: "webhook-1", + payload: `{"body":{"Alert id": 123, "Status": "Open", "Impacted plays": 10}}`, + expected: false, + autoOpsRule: &autoopsdomain.AutoOpsRule{ + AutoOpsRule: &autoopsproto.AutoOpsRule{ + Id: "rule-1", + Clauses: convert([]autoopsproto.WebhookClause{ + { + WebhookId: "webhook-1", + Conditions: []*autoopsproto.WebhookClause_Condition{ + { + Filter: `.body."Alert Id"`, + Value: `123`, + Operator: autoopsproto.WebhookClause_Condition_EQUAL, + }, + }, + }, + }), + }, + }, + }, + { + name: `execute nothing because of invalid filter`, + webhookId: "webhook-1", + payload: `{"body":{"Alert id": 123, "Status": "Open", "Impacted plays": 10}}`, + expected: false, + wantErr: true, + autoOpsRule: &autoopsdomain.AutoOpsRule{ + AutoOpsRule: &autoopsproto.AutoOpsRule{ + Id: "rule-1", + Clauses: convert([]autoopsproto.WebhookClause{ + { + WebhookId: "webhook-1", + Conditions: []*autoopsproto.WebhookClause_Condition{ + { + Filter: `.body | ..`, + Value: `123`, + Operator: autoopsproto.WebhookClause_Condition_EQUAL, + }, + }, + }, + }), + }, + }, + }, + { + name: `execute nothing because of invalid data`, + webhookId: "webhook-1", + payload: `{"body":{"Alert id": 123, "Status": "Open", "Impacted plays": 10}}`, + expected: false, + wantErr: true, + autoOpsRule: &autoopsdomain.AutoOpsRule{ + AutoOpsRule: &autoopsproto.AutoOpsRule{ + Id: "rule-1", + Clauses: convert([]autoopsproto.WebhookClause{ + { + WebhookId: "webhook-1", + Conditions: []*autoopsproto.WebhookClause_Condition{}, + }, + }), + }, + }, + }, + { + name: `execute nothing because of converting error`, + webhookId: "webhook-1", + payload: `{"body":{"foo": true}}`, + expected: false, + wantErr: true, + autoOpsRule: &autoopsdomain.AutoOpsRule{ + AutoOpsRule: &autoopsproto.AutoOpsRule{ + Id: "rule-1", + Clauses: convert([]autoopsproto.WebhookClause{ + { + WebhookId: "webhook-1", + Conditions: []*autoopsproto.WebhookClause_Condition{ + { + Filter: `.body.foo`, + Value: `123`, + Operator: autoopsproto.WebhookClause_Condition_EQUAL, + }, + }, + }, + }), + }, + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + var payload interface{} + err := json.Unmarshal([]byte(c.payload), &payload) + require.NoError(t, err) + h := &handler{ + mysqlClient: mysqlmock.NewMockClient(mockController), + authClient: authclientmock.NewMockClient(mockController), + featureClient: featureclientmock.NewMockClient(mockController), + logger: logger, + } + result, err := h.assessAutoOpsRule( + ctx, + c.autoOpsRule, + c.webhookId, + payload, + ) + assert.Equal(t, c.expected, result) + assert.Equal(t, c.wantErr, err != nil) + if err != nil { + t.Log(err) + } + }) + } +} + +func TestAuthWebhook(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + logger, err := log.NewLogger() + require.NoError(t, err) + h := &handler{ + mysqlClient: mysqlmock.NewMockClient(mockController), + authClient: authclientmock.NewMockClient(mockController), + featureClient: featureclientmock.NewMockClient(mockController), + webhookCryptoUtil: &dummyWebhookCryptoUtil{}, + logger: logger, + } + ctx := context.TODO() + + testcases := map[string]struct { + id string + environmentNamespace string + }{ + "success": { + id: "id-1", + environmentNamespace: "ns-1", + }, + } + for msg, p := range testcases { + t.Run(msg, func(t *testing.T) { + ws := autoopsdomain.NewWebhookSecret(p.id, p.environmentNamespace) + encoded, err := json.Marshal(ws) + require.NoError(t, err) + actual, err := h.authWebhook(ctx, base64.RawURLEncoding.EncodeToString(encoded)) + require.NoError(t, err) + assert.Equal(t, ws, actual) + }) + } +} diff --git a/pkg/autoops/webhookhandler/testdata/invalid-token b/pkg/autoops/webhookhandler/testdata/invalid-token new file mode 100644 index 000000000..408e48fff --- /dev/null +++ b/pkg/autoops/webhookhandler/testdata/invalid-token @@ -0,0 +1 @@ +eyJhbGciOiJSUzI1NiJ9.eyJhdWQiOiJ0ZXN0X2NsaWVudF9pZCIsImVtYWlsIjoidGVzdEBlbWFpbCIsImV4cCI6IjIxMjItMDUtMjVUMDU6MDU6MDYuODUzNTIxWiIsImlhdCI6IjAwMDEtMDEtMDFUMDA6MDA6MDBaIiwiaXNzIjoidGVzdF9pc3N1ZXIiLCJyb2xlIjowLCJzdWIiOiJzdWJqZWN0In0.aW52YWxpZC1zaWduYXR1cmU \ No newline at end of file diff --git a/pkg/autoops/webhookhandler/testdata/valid-public.pem b/pkg/autoops/webhookhandler/testdata/valid-public.pem new file mode 100644 index 000000000..9eb5a1d26 --- /dev/null +++ b/pkg/autoops/webhookhandler/testdata/valid-public.pem @@ -0,0 +1,14 @@ +-----BEGIN PUBLIC KEY----- +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA+tMNIgc1RCQRAcdH8E1y +SbkmHdhpJK5Y9tKGrr5jghSnYg1FxCOUxcBJ/SBRUEnCAL0DCc3Jr1LU1PdL+ad7 +k43jNYFQu8wdi0Lh4SypiRKco8W3UafmXRMW2SaD/74XZKW0KFsr/BQJ3Ounle+u +kT5kNXe+T3fodzfzok5ib1XdgMLYLSVQ4HMvFDIr7xCAth712Hvv6bIJvKifTcb8 +rHbCaEog3oLRVfuQubUHQHOP4a6T0kUUWgIBUzDQxqmekrEdLGxlnhS94o259cPE +9VMUnnQPbgQOby7fPGSwPJT3BEiHdZkH8IH+MWXDSRIMAR6Y2LsOjTU5PUaMwrbN +p05zKY/rRnZpSc2g/ZviQ4CK4GWWMRsnzOLnHAykgeWC9JK/5QZwpmPwqrovW+LJ +UkNG7mCfw6gjkvxBDcCXzOyb7gX/2J9zZSKRjYybCrcJkKnVWoGdnAbYy1Nso3DU +BHIOSM3yWSVanYZTLSqIAIOMqdVajTVzB3nVz5M77Uuz6GhvF1cWH10jkNGvp0Ez +Qnrq/6CrOotvv04GOkuxBQ6kj9ir5J0rKQIhvRZ299C+/Sli1VPUPpAI8LtSOPFe +CC9ryFghEA/KibUAO/DpXdmNWtiddRuqYCxjdGrCmcMLS8xDe9Aage0DS6I/9g9P +4xVHLpWRMgh3PP/D3cIa4a0CAwEAAQ== +-----END PUBLIC KEY----- diff --git a/pkg/autoops/webhookhandler/testdata/valid-token b/pkg/autoops/webhookhandler/testdata/valid-token new file mode 100644 index 000000000..6638d7f94 --- /dev/null +++ b/pkg/autoops/webhookhandler/testdata/valid-token @@ -0,0 +1 @@ +eyJhbGciOiJSUzI1NiJ9.eyJhdWQiOiJ0ZXN0X2NsaWVudF9pZCIsImVtYWlsIjoidGVzdEBlbWFpbCIsImV4cCI6IjIxMjItMDUtMjVUMDU6MDM6NTAuMzkyMjE4WiIsImlhdCI6IjAwMDEtMDEtMDFUMDA6MDA6MDBaIiwiaXNzIjoidGVzdF9pc3N1ZXIiLCJyb2xlIjowLCJzdWIiOiJzdWJqZWN0In0.lnAmpahDIWNk06PtSZMFMcbZFycrCCX9Ireotq5E6_psQ6z9FgRVGYO2anJ4bb3HHkSM-o-57_VOOKCpVTERCi8xccEbZq2StpiayaG99Mg3nVXqDd3exjhJIoWEQ6bwlhDdDuvv8EqQfLYrFKJjXGpqlpLDpbq31GLbqlxM3dO6mItEzZ6RRNEEDtOKYYEQZhtGKyHEnowDBwdK70M6y7y09hB_uIX1PD6Ycw7eJy6zse01QOEpqWNaUtSotKjfa8LdtjQnriyGh0VNrjH6lNSbv8WRLOeUiv8X35fEfI8h2b-22ViWFnJgwnzu90PlS7GxjbuwoBdHreX61y-2bqg3cWLbzlp4olJQEWFicdEOhVvDrBqPcfrxquxkeYqp2yXGCU6vw1MwHVvaz2xw8AJRy1S2WY5oCwcfNkLeK4I53D1gcVgy-I45eXxUHBnHdPCumICsM6EsHcaWUxHSlZ46VUT2IXQFnjSI2O6j8R8vVjj4QqKM4y-Vm1sdll-3yx32IW56dxbr5E_cY8tPbX8BZ1PpBuiextrah4bTmVaISk7QEV__ksa1XioC1RXDt7mYgnbIqdyg4ru6bVMwATMKuD9VbXViDc3I3YWhvo-NfjCcsuVsgZUN2EvmadEd2eQcDi6tdkHJTIngOD0wP9BztuP-8mOt_hgEq8kWybU \ No newline at end of file diff --git a/pkg/backoff/BUILD.bazel b/pkg/backoff/BUILD.bazel new file mode 100644 index 000000000..45a801f8b --- /dev/null +++ b/pkg/backoff/BUILD.bazel @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "backoff.go", + "constant.go", + "exponential.go", + "retry.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/backoff", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = [ + "constant_test.go", + "exponential_test.go", + ], + embed = [":go_default_library"], + deps = [ + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + ], +) diff --git a/pkg/backoff/backoff.go b/pkg/backoff/backoff.go new file mode 100644 index 000000000..3172aa8bf --- /dev/null +++ b/pkg/backoff/backoff.go @@ -0,0 +1,26 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backoff + +import ( + "time" +) + +type Backoff interface { + Next() time.Duration + Calls() int + Reset() + Clone() Backoff +} diff --git a/pkg/backoff/constant.go b/pkg/backoff/constant.go new file mode 100644 index 000000000..65a02e473 --- /dev/null +++ b/pkg/backoff/constant.go @@ -0,0 +1,54 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backoff + +import ( + "time" +) + +type constant struct { + calls int + interval time.Duration +} + +func NewConstant(interval time.Duration) Backoff { + return &constant{ + interval: interval, + } +} + +func (b *constant) Next() time.Duration { + defer func() { + b.calls++ + }() + if b.calls == 0 { + return 0 + } + return b.interval +} + +func (b *constant) Calls() int { + return b.calls +} + +func (b *constant) Reset() { + b.calls = 0 +} + +func (b *constant) Clone() Backoff { + return &constant{ + interval: b.interval, + } +} diff --git a/pkg/backoff/constant_test.go b/pkg/backoff/constant_test.go new file mode 100644 index 000000000..056bd13c9 --- /dev/null +++ b/pkg/backoff/constant_test.go @@ -0,0 +1,34 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backoff + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestConstant(t *testing.T) { + bo := NewConstant(time.Millisecond) + assert.Equal(t, 0, bo.Calls()) + assert.Equal(t, time.Duration(0), bo.Next()) + assert.Equal(t, 1, bo.Calls()) + for i := 2; i < 10; i++ { + d := bo.Next() + assert.Equal(t, time.Millisecond, d) + assert.Equal(t, i, bo.Calls()) + } +} diff --git a/pkg/backoff/exponential.go b/pkg/backoff/exponential.go new file mode 100644 index 000000000..d4cfe4398 --- /dev/null +++ b/pkg/backoff/exponential.go @@ -0,0 +1,66 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backoff + +import ( + "math" + "math/rand" + "time" +) + +type exponential struct { + base time.Duration + max time.Duration + calls int + rand *rand.Rand +} + +func NewExponential(base, max time.Duration) Backoff { + return &exponential{ + base: base, + max: max, + rand: rand.New(rand.NewSource(time.Now().UTC().UnixNano())), + } +} + +// Implemented FullJitter algorithm +// https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ +func (b *exponential) Next() time.Duration { + defer func() { + b.calls++ + }() + if b.calls == 0 { + return 0 + } + d := math.Min(float64(b.max), float64(b.base)*math.Pow(2, float64(b.calls-1))) + d = d * b.rand.Float64() + return time.Duration(d) +} + +func (b *exponential) Calls() int { + return b.calls +} + +func (b *exponential) Reset() { + b.calls = 0 +} + +func (b *exponential) Clone() Backoff { + return &exponential{ + base: b.base, + max: b.max, + rand: b.rand, + } +} diff --git a/pkg/backoff/exponential_test.go b/pkg/backoff/exponential_test.go new file mode 100644 index 000000000..99c143579 --- /dev/null +++ b/pkg/backoff/exponential_test.go @@ -0,0 +1,40 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backoff + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestExponential(t *testing.T) { + eb := NewExponential(time.Millisecond, time.Second) + assert.Equal(t, 0, eb.Calls()) + assert.Equal(t, time.Duration(0), eb.Next()) + assert.Equal(t, 1, eb.Calls()) + for i := 2; i < 100; i++ { + d := eb.Next() + des := fmt.Sprintf("i = %d duration: %v", i, d) + require.True(t, d >= 0, des) + require.True(t, d <= time.Second, des) + require.True(t, eb.Calls() == i, des) + } + eb.Reset() + assert.Equal(t, 0, eb.Calls()) +} diff --git a/pkg/backoff/retry.go b/pkg/backoff/retry.go new file mode 100644 index 000000000..8e79bd67c --- /dev/null +++ b/pkg/backoff/retry.go @@ -0,0 +1,64 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backoff + +import ( + "context" + "time" +) + +type Retry interface { + WaitNext() bool + Calls() int +} + +func NewRetry(ctx context.Context, max int, backoff Backoff) Retry { + return &retry{ + max: max, + backoff: backoff, + ctx: ctx, + } +} + +type retry struct { + max int + calls int + ctx context.Context + backoff Backoff +} + +func (r *retry) WaitNext() bool { + defer func() { + r.calls++ + }() + if r.calls >= r.max { + return false + } + d := r.backoff.Next() + if d == 0 { + return true + } + t := time.NewTimer(d) + select { + case <-r.ctx.Done(): + return false + case <-t.C: + } + return true +} + +func (r *retry) Calls() int { + return r.calls +} diff --git a/pkg/cache/BUILD.bazel b/pkg/cache/BUILD.bazel new file mode 100644 index 000000000..403c4298f --- /dev/null +++ b/pkg/cache/BUILD.bazel @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "cache.go", + "redis_cache.go", + "ttl_cache.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/cache", + visibility = ["//visibility:public"], + deps = [ + "//pkg/redis:go_default_library", + "//pkg/storage:go_default_library", + ], +) diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go new file mode 100644 index 000000000..cc321e00e --- /dev/null +++ b/pkg/cache/cache.go @@ -0,0 +1,96 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package cache + +import ( + "errors" + "fmt" + + "github.com/bucketeer-io/bucketeer/pkg/storage" +) + +var ( + ErrNotFound = errors.New("cache: not found") + ErrInvalidType = errors.New("cache: not expected type") +) + +type Cache interface { + Getter + Putter +} + +type MultiGetCache interface { + Cache + MultiGetter +} + +type MultiGetDeleteCache interface { + MultiGetCache + Deleter +} + +type Getter interface { + Get(key interface{}) (interface{}, error) +} + +type MultiGetter interface { + GetMulti(keys interface{}) ([]interface{}, error) + Scan(cursor, key, count interface{}) (uint64, []string, error) +} + +type Putter interface { + Put(key interface{}, value interface{}) error +} + +type Deleter interface { + Delete(key string) error +} + +// FIXME: remove after persistent-redis migration +type Lister interface { + Keys(pattern string, maxSize int) ([]string, error) +} + +func MakeKey(kind, id, environmentNamespace string) string { + if environmentNamespace == storage.AdminEnvironmentNamespace { + return fmt.Sprintf("%s:%s", kind, id) + } + return fmt.Sprintf("%s:%s:%s", environmentNamespace, kind, id) +} + +func MakeKeyPrefix(kind, environmentNamespace string) string { + if environmentNamespace == storage.AdminEnvironmentNamespace { + return fmt.Sprintf("%s:", kind) + } + return fmt.Sprintf("%s:%s:", environmentNamespace, kind) +} + +// MakeHashSlotKey creates a key to ensure that multiple keys are allocated in the same hash slot. +// https://redis.io/topics/cluster-spec#keys-hash-tags +func MakeHashSlotKey(hashTag, id, environmentNamespace string) string { + if environmentNamespace == storage.AdminEnvironmentNamespace { + return fmt.Sprintf("{%s}%s", hashTag, id) + } + return fmt.Sprintf("{%s:%s}%s", environmentNamespace, hashTag, id) +} + +func Bytes(value interface{}) ([]byte, error) { + b, ok := value.([]byte) + if !ok { + return nil, ErrInvalidType + } + return b, nil +} diff --git a/pkg/cache/mock/BUILD.bazel b/pkg/cache/mock/BUILD.bazel new file mode 100644 index 000000000..b5840aced --- /dev/null +++ b/pkg/cache/mock/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["cache.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/cache/mock", + visibility = ["//visibility:public"], + deps = ["@com_github_golang_mock//gomock:go_default_library"], +) diff --git a/pkg/cache/mock/cache.go b/pkg/cache/mock/cache.go new file mode 100644 index 000000000..d126cd723 --- /dev/null +++ b/pkg/cache/mock/cache.go @@ -0,0 +1,447 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: cache.go + +// Package mock is a generated GoMock package. +package mock + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockCache is a mock of Cache interface. +type MockCache struct { + ctrl *gomock.Controller + recorder *MockCacheMockRecorder +} + +// MockCacheMockRecorder is the mock recorder for MockCache. +type MockCacheMockRecorder struct { + mock *MockCache +} + +// NewMockCache creates a new mock instance. +func NewMockCache(ctrl *gomock.Controller) *MockCache { + mock := &MockCache{ctrl: ctrl} + mock.recorder = &MockCacheMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCache) EXPECT() *MockCacheMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockCache) Get(key interface{}) (interface{}, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", key) + ret0, _ := ret[0].(interface{}) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockCacheMockRecorder) Get(key interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockCache)(nil).Get), key) +} + +// Put mocks base method. +func (m *MockCache) Put(key, value interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Put", key, value) + ret0, _ := ret[0].(error) + return ret0 +} + +// Put indicates an expected call of Put. +func (mr *MockCacheMockRecorder) Put(key, value interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockCache)(nil).Put), key, value) +} + +// MockMultiGetCache is a mock of MultiGetCache interface. +type MockMultiGetCache struct { + ctrl *gomock.Controller + recorder *MockMultiGetCacheMockRecorder +} + +// MockMultiGetCacheMockRecorder is the mock recorder for MockMultiGetCache. +type MockMultiGetCacheMockRecorder struct { + mock *MockMultiGetCache +} + +// NewMockMultiGetCache creates a new mock instance. +func NewMockMultiGetCache(ctrl *gomock.Controller) *MockMultiGetCache { + mock := &MockMultiGetCache{ctrl: ctrl} + mock.recorder = &MockMultiGetCacheMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockMultiGetCache) EXPECT() *MockMultiGetCacheMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockMultiGetCache) Get(key interface{}) (interface{}, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", key) + ret0, _ := ret[0].(interface{}) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockMultiGetCacheMockRecorder) Get(key interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockMultiGetCache)(nil).Get), key) +} + +// GetMulti mocks base method. +func (m *MockMultiGetCache) GetMulti(keys interface{}) ([]interface{}, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMulti", keys) + ret0, _ := ret[0].([]interface{}) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMulti indicates an expected call of GetMulti. +func (mr *MockMultiGetCacheMockRecorder) GetMulti(keys interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMulti", reflect.TypeOf((*MockMultiGetCache)(nil).GetMulti), keys) +} + +// Put mocks base method. +func (m *MockMultiGetCache) Put(key, value interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Put", key, value) + ret0, _ := ret[0].(error) + return ret0 +} + +// Put indicates an expected call of Put. +func (mr *MockMultiGetCacheMockRecorder) Put(key, value interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockMultiGetCache)(nil).Put), key, value) +} + +// Scan mocks base method. +func (m *MockMultiGetCache) Scan(cursor, key, count interface{}) (uint64, []string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Scan", cursor, key, count) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].([]string) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// Scan indicates an expected call of Scan. +func (mr *MockMultiGetCacheMockRecorder) Scan(cursor, key, count interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Scan", reflect.TypeOf((*MockMultiGetCache)(nil).Scan), cursor, key, count) +} + +// MockMultiGetDeleteCache is a mock of MultiGetDeleteCache interface. +type MockMultiGetDeleteCache struct { + ctrl *gomock.Controller + recorder *MockMultiGetDeleteCacheMockRecorder +} + +// MockMultiGetDeleteCacheMockRecorder is the mock recorder for MockMultiGetDeleteCache. +type MockMultiGetDeleteCacheMockRecorder struct { + mock *MockMultiGetDeleteCache +} + +// NewMockMultiGetDeleteCache creates a new mock instance. +func NewMockMultiGetDeleteCache(ctrl *gomock.Controller) *MockMultiGetDeleteCache { + mock := &MockMultiGetDeleteCache{ctrl: ctrl} + mock.recorder = &MockMultiGetDeleteCacheMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockMultiGetDeleteCache) EXPECT() *MockMultiGetDeleteCacheMockRecorder { + return m.recorder +} + +// Delete mocks base method. +func (m *MockMultiGetDeleteCache) Delete(key string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", key) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockMultiGetDeleteCacheMockRecorder) Delete(key interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockMultiGetDeleteCache)(nil).Delete), key) +} + +// Get mocks base method. +func (m *MockMultiGetDeleteCache) Get(key interface{}) (interface{}, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", key) + ret0, _ := ret[0].(interface{}) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockMultiGetDeleteCacheMockRecorder) Get(key interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockMultiGetDeleteCache)(nil).Get), key) +} + +// GetMulti mocks base method. +func (m *MockMultiGetDeleteCache) GetMulti(keys interface{}) ([]interface{}, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMulti", keys) + ret0, _ := ret[0].([]interface{}) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMulti indicates an expected call of GetMulti. +func (mr *MockMultiGetDeleteCacheMockRecorder) GetMulti(keys interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMulti", reflect.TypeOf((*MockMultiGetDeleteCache)(nil).GetMulti), keys) +} + +// Put mocks base method. +func (m *MockMultiGetDeleteCache) Put(key, value interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Put", key, value) + ret0, _ := ret[0].(error) + return ret0 +} + +// Put indicates an expected call of Put. +func (mr *MockMultiGetDeleteCacheMockRecorder) Put(key, value interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockMultiGetDeleteCache)(nil).Put), key, value) +} + +// Scan mocks base method. +func (m *MockMultiGetDeleteCache) Scan(cursor, key, count interface{}) (uint64, []string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Scan", cursor, key, count) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].([]string) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// Scan indicates an expected call of Scan. +func (mr *MockMultiGetDeleteCacheMockRecorder) Scan(cursor, key, count interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Scan", reflect.TypeOf((*MockMultiGetDeleteCache)(nil).Scan), cursor, key, count) +} + +// MockGetter is a mock of Getter interface. +type MockGetter struct { + ctrl *gomock.Controller + recorder *MockGetterMockRecorder +} + +// MockGetterMockRecorder is the mock recorder for MockGetter. +type MockGetterMockRecorder struct { + mock *MockGetter +} + +// NewMockGetter creates a new mock instance. +func NewMockGetter(ctrl *gomock.Controller) *MockGetter { + mock := &MockGetter{ctrl: ctrl} + mock.recorder = &MockGetterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockGetter) EXPECT() *MockGetterMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockGetter) Get(key interface{}) (interface{}, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", key) + ret0, _ := ret[0].(interface{}) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockGetterMockRecorder) Get(key interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockGetter)(nil).Get), key) +} + +// MockMultiGetter is a mock of MultiGetter interface. +type MockMultiGetter struct { + ctrl *gomock.Controller + recorder *MockMultiGetterMockRecorder +} + +// MockMultiGetterMockRecorder is the mock recorder for MockMultiGetter. +type MockMultiGetterMockRecorder struct { + mock *MockMultiGetter +} + +// NewMockMultiGetter creates a new mock instance. +func NewMockMultiGetter(ctrl *gomock.Controller) *MockMultiGetter { + mock := &MockMultiGetter{ctrl: ctrl} + mock.recorder = &MockMultiGetterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockMultiGetter) EXPECT() *MockMultiGetterMockRecorder { + return m.recorder +} + +// GetMulti mocks base method. +func (m *MockMultiGetter) GetMulti(keys interface{}) ([]interface{}, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMulti", keys) + ret0, _ := ret[0].([]interface{}) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMulti indicates an expected call of GetMulti. +func (mr *MockMultiGetterMockRecorder) GetMulti(keys interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMulti", reflect.TypeOf((*MockMultiGetter)(nil).GetMulti), keys) +} + +// Scan mocks base method. +func (m *MockMultiGetter) Scan(cursor, key, count interface{}) (uint64, []string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Scan", cursor, key, count) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].([]string) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// Scan indicates an expected call of Scan. +func (mr *MockMultiGetterMockRecorder) Scan(cursor, key, count interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Scan", reflect.TypeOf((*MockMultiGetter)(nil).Scan), cursor, key, count) +} + +// MockPutter is a mock of Putter interface. +type MockPutter struct { + ctrl *gomock.Controller + recorder *MockPutterMockRecorder +} + +// MockPutterMockRecorder is the mock recorder for MockPutter. +type MockPutterMockRecorder struct { + mock *MockPutter +} + +// NewMockPutter creates a new mock instance. +func NewMockPutter(ctrl *gomock.Controller) *MockPutter { + mock := &MockPutter{ctrl: ctrl} + mock.recorder = &MockPutterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPutter) EXPECT() *MockPutterMockRecorder { + return m.recorder +} + +// Put mocks base method. +func (m *MockPutter) Put(key, value interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Put", key, value) + ret0, _ := ret[0].(error) + return ret0 +} + +// Put indicates an expected call of Put. +func (mr *MockPutterMockRecorder) Put(key, value interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockPutter)(nil).Put), key, value) +} + +// MockDeleter is a mock of Deleter interface. +type MockDeleter struct { + ctrl *gomock.Controller + recorder *MockDeleterMockRecorder +} + +// MockDeleterMockRecorder is the mock recorder for MockDeleter. +type MockDeleterMockRecorder struct { + mock *MockDeleter +} + +// NewMockDeleter creates a new mock instance. +func NewMockDeleter(ctrl *gomock.Controller) *MockDeleter { + mock := &MockDeleter{ctrl: ctrl} + mock.recorder = &MockDeleterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDeleter) EXPECT() *MockDeleterMockRecorder { + return m.recorder +} + +// Delete mocks base method. +func (m *MockDeleter) Delete(key string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", key) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockDeleterMockRecorder) Delete(key interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockDeleter)(nil).Delete), key) +} + +// MockLister is a mock of Lister interface. +type MockLister struct { + ctrl *gomock.Controller + recorder *MockListerMockRecorder +} + +// MockListerMockRecorder is the mock recorder for MockLister. +type MockListerMockRecorder struct { + mock *MockLister +} + +// NewMockLister creates a new mock instance. +func NewMockLister(ctrl *gomock.Controller) *MockLister { + mock := &MockLister{ctrl: ctrl} + mock.recorder = &MockListerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockLister) EXPECT() *MockListerMockRecorder { + return m.recorder +} + +// Keys mocks base method. +func (m *MockLister) Keys(pattern string, maxSize int) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Keys", pattern, maxSize) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Keys indicates an expected call of Keys. +func (mr *MockListerMockRecorder) Keys(pattern, maxSize interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Keys", reflect.TypeOf((*MockLister)(nil).Keys), pattern, maxSize) +} diff --git a/pkg/cache/redis_cache.go b/pkg/cache/redis_cache.go new file mode 100644 index 000000000..dbb50a8cd --- /dev/null +++ b/pkg/cache/redis_cache.go @@ -0,0 +1,49 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cache + +import ( + "github.com/bucketeer-io/bucketeer/pkg/redis" +) + +type redisCache struct { + cluster redis.Cluster +} + +func NewRedisCache(cluster redis.Cluster) Cache { + return &redisCache{ + cluster: cluster, + } +} + +func (r *redisCache) Get(key interface{}) (interface{}, error) { + conn := r.cluster.Get(redis.WithReadOnly()) + defer conn.Close() + value, err := conn.Do("GET", key) + if err != nil { + if err == redis.ErrNil { + return nil, ErrNotFound + } + return nil, err + } + return value, nil +} + +func (r *redisCache) Put(key interface{}, value interface{}) error { + conn := r.cluster.Get() + defer conn.Close() + _, err := conn.Do("SET", key, value) + return err +} diff --git a/pkg/cache/testing/BUILD.bazel b/pkg/cache/testing/BUILD.bazel new file mode 100644 index 000000000..65e6a1dc2 --- /dev/null +++ b/pkg/cache/testing/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["cache.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/cache/testing", + visibility = ["//visibility:public"], + deps = ["//pkg/cache:go_default_library"], +) diff --git a/pkg/cache/testing/cache.go b/pkg/cache/testing/cache.go new file mode 100644 index 000000000..4d7b56d4d --- /dev/null +++ b/pkg/cache/testing/cache.go @@ -0,0 +1,65 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testing + +import ( + "sync" + + "github.com/bucketeer-io/bucketeer/pkg/cache" +) + +type inMemoryCache struct { + data map[interface{}]interface{} + mutex sync.Mutex +} + +func NewInMemoryCache() cache.MultiGetDeleteCache { + return &inMemoryCache{ + data: make(map[interface{}]interface{}), + } +} + +func (c *inMemoryCache) Get(key interface{}) (interface{}, error) { + c.mutex.Lock() + defer c.mutex.Unlock() + if val, ok := c.data[key]; ok { + return val, nil + } + return nil, cache.ErrNotFound +} + +func (c *inMemoryCache) Put(key interface{}, value interface{}) error { + c.mutex.Lock() + defer c.mutex.Unlock() + c.data[key] = value + return nil +} + +func (c *inMemoryCache) GetMulti(keys interface{}) ([]interface{}, error) { + // TODO: implement + return nil, nil +} + +func (c *inMemoryCache) Scan(cursor, key, count interface{}) (uint64, []string, error) { + // TODO: implement + return 0, nil, nil +} + +func (c *inMemoryCache) Delete(key string) error { + c.mutex.Lock() + defer c.mutex.Unlock() + delete(c.data, key) + return nil +} diff --git a/pkg/cache/ttl_cache.go b/pkg/cache/ttl_cache.go new file mode 100644 index 000000000..f437cb773 --- /dev/null +++ b/pkg/cache/ttl_cache.go @@ -0,0 +1,90 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cache + +import ( + "sync" + "time" +) + +type entry struct { + value interface{} + expiration time.Time +} + +type TTLCache struct { + entries sync.Map + ttl time.Duration + doneCh chan struct{} +} + +func NewTTLCache(ttl time.Duration, evictionInterval time.Duration) *TTLCache { + c := &TTLCache{ + ttl: ttl, + doneCh: make(chan struct{}), + } + if evictionInterval > 0 { + go c.startEvicter(evictionInterval) + } + return c +} + +func (c *TTLCache) startEvicter(interval time.Duration) { + ticker := time.NewTicker(interval) + for { + select { + case now := <-ticker.C: + c.evictExpired(now) + case <-c.doneCh: + ticker.Stop() + return + } + } +} + +func (c *TTLCache) evictExpired(t time.Time) { + c.entries.Range(func(key interface{}, value interface{}) bool { + e := value.(*entry) + if e.expiration.Before(t) { + c.entries.Delete(key) + } + return true + }) +} + +func (c *TTLCache) Get(key interface{}) (interface{}, error) { + item, ok := c.entries.Load(key) + if !ok { + return nil, ErrNotFound + } + return item.(*entry).value, nil +} + +func (c *TTLCache) Put(key interface{}, value interface{}) error { + e := &entry{ + value: value, + expiration: time.Now().Add(c.ttl), + } + c.entries.Store(key, e) + return nil +} + +func (c *TTLCache) Destroy() { + close(c.doneCh) + c.entries.Range(func(key interface{}, value interface{}) bool { + c.entries.Delete(key) + return true + }) +} diff --git a/pkg/cache/v2/BUILD.bazel b/pkg/cache/v2/BUILD.bazel new file mode 100644 index 000000000..752cdc46b --- /dev/null +++ b/pkg/cache/v2/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["redis_cache.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/cache/v2", + visibility = ["//visibility:public"], + deps = [ + "//pkg/cache:go_default_library", + "//pkg/redis/v2:go_default_library", + "@com_github_go_redis_redis//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/cache/v2/redis_cache.go b/pkg/cache/v2/redis_cache.go new file mode 100644 index 000000000..8343966bf --- /dev/null +++ b/pkg/cache/v2/redis_cache.go @@ -0,0 +1,103 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "fmt" + + goredis "github.com/go-redis/redis" + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/cache" + redis "github.com/bucketeer-io/bucketeer/pkg/redis/v2" +) + +const ( + defaultKeysMaxSize = 3000000 +) + +type redisCache struct { + cluster redis.Cluster + logger *zap.Logger +} + +func NewRedisCache(cluster redis.Cluster, logger *zap.Logger) cache.Cache { + return &redisCache{ + cluster: cluster, + logger: logger.Named("redis_cache_v2"), + } +} + +func NewRedisCacheLister(cluster redis.Cluster, logger *zap.Logger) cache.Lister { + return &redisCache{ + cluster: cluster, + logger: logger.Named("redis_cache_lister_v2"), + } +} + +func NewRedisCacheDeleter(cluster redis.Cluster, logger *zap.Logger) cache.Deleter { + return &redisCache{ + cluster: cluster, + logger: logger.Named("redis_cache_deleter_v2"), + } +} + +func (r *redisCache) Get(key interface{}) (interface{}, error) { + value, err := r.cluster.Get(key.(string)) + if err != nil { + if err == redis.ErrNil { + return nil, cache.ErrNotFound + } + return nil, err + } + return value, nil +} + +func (r *redisCache) Put(key interface{}, value interface{}) error { + return r.cluster.Set(key.(string), value, 0) +} + +func (r *redisCache) Delete(key string) error { + return r.cluster.Del(key) +} + +// If maxSize is less than or equals to zero, it is regarded as the default. +func (r *redisCache) Keys(pattern string, maxSize int) ([]string, error) { + keys := []string{} + if maxSize <= 0 { + maxSize = defaultKeysMaxSize + } + fn := func(client *goredis.Client) error { + r.logger.Debug(fmt.Sprintf("keys runs for %s", client.String()), + zap.String("pattern", pattern), + ) + iter := client.Scan(0, pattern, 0).Iterator() + for iter.Next() { + keys = append(keys, iter.Val()) + if len(keys) >= int(maxSize) { + return nil + } + } + if err := iter.Err(); err != nil { + return err + } + return nil + } + err := r.cluster.ForEachMaster(fn) + if err != nil { + return nil, err + } + return keys, nil +} diff --git a/pkg/cache/v3/BUILD.bazel b/pkg/cache/v3/BUILD.bazel new file mode 100644 index 000000000..47ab3f4ca --- /dev/null +++ b/pkg/cache/v3/BUILD.bazel @@ -0,0 +1,41 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "environment_api_key.go", + "experiments.go", + "features.go", + "redis_cache.go", + "segment_users.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/cache/v3", + visibility = ["//visibility:public"], + deps = [ + "//pkg/cache:go_default_library", + "//pkg/redis/v3:go_default_library", + "//pkg/storage:go_default_library", + "//proto/account:go_default_library", + "//proto/experiment:go_default_library", + "//proto/feature:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "features_test.go", + "segment_users_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/cache:go_default_library", + "//pkg/cache/mock:go_default_library", + "//proto/feature:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + ], +) diff --git a/pkg/cache/v3/environment_api_key.go b/pkg/cache/v3/environment_api_key.go new file mode 100644 index 000000000..6f4bc5ea3 --- /dev/null +++ b/pkg/cache/v3/environment_api_key.go @@ -0,0 +1,68 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v3 + +import ( + "github.com/golang/protobuf/proto" // nolint:staticcheck + + "github.com/bucketeer-io/bucketeer/pkg/cache" + "github.com/bucketeer-io/bucketeer/pkg/storage" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" +) + +type EnvironmentAPIKeyCache interface { + Get(string) (*accountproto.EnvironmentAPIKey, error) + Put(*accountproto.EnvironmentAPIKey) error +} + +type environmentAPIKeyCache struct { + cache cache.Cache +} + +func NewEnvironmentAPIKeyCache(c cache.Cache) EnvironmentAPIKeyCache { + return &environmentAPIKeyCache{cache: c} +} + +func (c *environmentAPIKeyCache) Get(id string) (*accountproto.EnvironmentAPIKey, error) { + key := c.key(id) + value, err := c.cache.Get(key) + if err != nil { + return nil, err + } + b, err := cache.Bytes(value) + if err != nil { + return nil, err + } + environmentAPIKey := &accountproto.EnvironmentAPIKey{} + if err := proto.Unmarshal(b, environmentAPIKey); err != nil { + return nil, err + } + return environmentAPIKey, nil +} + +func (c *environmentAPIKeyCache) Put(environmentAPIKey *accountproto.EnvironmentAPIKey) error { + buffer, err := proto.Marshal(environmentAPIKey) + if err != nil { + return err + } + key := c.key(environmentAPIKey.ApiKey.Id) + return c.cache.Put(key, buffer) +} + +func (c *environmentAPIKeyCache) key(id string) string { + // always use AdminEnvironmentNamespace because we'd like to get APIKey and environment_namespace only by id + return cache.MakeKey("environment_apikey", id, storage.AdminEnvironmentNamespace) +} diff --git a/pkg/cache/v3/experiments.go b/pkg/cache/v3/experiments.go new file mode 100644 index 000000000..9c7ef2d2c --- /dev/null +++ b/pkg/cache/v3/experiments.go @@ -0,0 +1,87 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v3 + +import ( + "fmt" + + "github.com/golang/protobuf/proto" // nolint:staticcheck + + "github.com/bucketeer-io/bucketeer/pkg/cache" + experimentproto "github.com/bucketeer-io/bucketeer/proto/experiment" +) + +type ExperimentsCache interface { + Get(featureID string, featureVersion int32, environmentNamespace string) (*experimentproto.Experiments, error) + Put( + featureID string, + featureVersion int32, + experiments *experimentproto.Experiments, + environmentNamespace string, + ) error +} + +type experimentsCache struct { + cache cache.Cache +} + +func NewExperimentsCache(c cache.Cache) ExperimentsCache { + return &experimentsCache{cache: c} +} + +func (c *experimentsCache) Get( + featureID string, + featureVersion int32, + environmentNamespace string, +) (*experimentproto.Experiments, error) { + key := c.key(featureID, featureVersion, environmentNamespace) + value, err := c.cache.Get(key) + if err != nil { + return nil, err + } + b, err := cache.Bytes(value) + if err != nil { + return nil, err + } + experiments := &experimentproto.Experiments{} + err = proto.Unmarshal(b, experiments) + if err != nil { + return nil, err + } + return experiments, nil +} + +func (c *experimentsCache) Put( + featureID string, + featureVersion int32, + experiments *experimentproto.Experiments, + environmentNamespace string, +) error { + buffer, err := proto.Marshal(experiments) + if err != nil { + return err + } + key := c.key(featureID, featureVersion, environmentNamespace) + return c.cache.Put(key, buffer) +} + +func (c *experimentsCache) key(featureID string, featureVersion int32, environmentNamespace string) string { + return cache.MakeKey( + "event_transformer:cache:experiments", + fmt.Sprintf("%s:%d", featureID, featureVersion), + environmentNamespace, + ) +} diff --git a/pkg/cache/v3/features.go b/pkg/cache/v3/features.go new file mode 100644 index 000000000..2195ee133 --- /dev/null +++ b/pkg/cache/v3/features.go @@ -0,0 +1,73 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v3 + +import ( + "fmt" + + "github.com/golang/protobuf/proto" // nolint:staticcheck + + "github.com/bucketeer-io/bucketeer/pkg/cache" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +const ( + featuresKind = "features" +) + +type FeaturesCache interface { + Get(environmentNamespace string) (*featureproto.Features, error) + Put(features *featureproto.Features, environmentNamespace string) error +} + +type featuresCache struct { + cache cache.MultiGetCache +} + +func NewFeaturesCache(c cache.MultiGetCache) FeaturesCache { + return &featuresCache{cache: c} +} + +func (c *featuresCache) Get(environmentNamespace string) (*featureproto.Features, error) { + key := c.key(environmentNamespace) + value, err := c.cache.Get(key) + if err != nil { + return nil, err + } + b, err := cache.Bytes(value) + if err != nil { + return nil, err + } + features := &featureproto.Features{} + err = proto.Unmarshal(b, features) + if err != nil { + return nil, err + } + return features, nil +} + +func (c *featuresCache) Put(features *featureproto.Features, environmentNamespace string) error { + buffer, err := proto.Marshal(features) + if err != nil { + return err + } + key := c.key(environmentNamespace) + return c.cache.Put(key, buffer) +} + +func (c *featuresCache) key(environmentNamespace string) string { + return fmt.Sprintf("%s:%s", environmentNamespace, featuresKind) +} diff --git a/pkg/cache/v3/features_test.go b/pkg/cache/v3/features_test.go new file mode 100644 index 000000000..32bf3bd0a --- /dev/null +++ b/pkg/cache/v3/features_test.go @@ -0,0 +1,148 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3 + +import ( + "fmt" + "testing" + + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/bucketeer-io/bucketeer/pkg/cache" + cachemock "github.com/bucketeer-io/bucketeer/pkg/cache/mock" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +const ( + tag = "bucketeer-tag" + environmentNamespace = "bucketeer-environment" +) + +func TestGetFeatures(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + features := createFeatures(t) + dataFeatures := marshalMessage(t, features) + key := fmt.Sprintf("%s:%s", environmentNamespace, featuresKind) + + patterns := map[string]struct { + setup func(*featuresCache) + expectedErr error + }{ + "error_get_not_found": { + setup: func(tf *featuresCache) { + tf.cache.(*cachemock.MockMultiGetCache).EXPECT().Get(key).Return(nil, cache.ErrNotFound) + }, + expectedErr: cache.ErrNotFound, + }, + "error_invalid_type": { + setup: func(tf *featuresCache) { + tf.cache.(*cachemock.MockMultiGetCache).EXPECT().Get(key).Return("test", nil) + }, + expectedErr: cache.ErrInvalidType, + }, + "success": { + setup: func(tf *featuresCache) { + tf.cache.(*cachemock.MockMultiGetCache).EXPECT().Get(key).Return(dataFeatures, nil) + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + tf := newFeaturesCache(t, mockController) + p.setup(tf) + features, err := tf.Get(environmentNamespace) + if err == nil { + assert.Equal(t, features.Features[0].Id, features.Features[0].Id) + assert.Equal(t, features.Features[0].Name, features.Features[0].Name) + } + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestPutFeatures(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + features := createFeatures(t) + dataFeatures := marshalMessage(t, features) + key := fmt.Sprintf("%s:%s", environmentNamespace, featuresKind) + + patterns := map[string]struct { + setup func(*featuresCache) + input *featureproto.Features + expectedErr error + }{ + "error_proto_message_nil": { + setup: nil, + input: nil, + expectedErr: proto.ErrNil, + }, + "success": { + setup: func(tf *featuresCache) { + tf.cache.(*cachemock.MockMultiGetCache).EXPECT().Put(key, dataFeatures).Return(nil) + }, + input: features, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + tf := newFeaturesCache(t, mockController) + if p.setup != nil { + p.setup(tf) + } + err := tf.Put(p.input, environmentNamespace) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func createFeatures(t *testing.T) *featureproto.Features { + t.Helper() + f := []*featureproto.Feature{} + for i := 0; i < 5; i++ { + feature := &featureproto.Feature{ + Id: fmt.Sprintf("feature-id-%d", i), + Name: fmt.Sprintf("feature-name-%d", i), + } + f = append(f, feature) + } + return &featureproto.Features{ + Features: f, + } +} + +func marshalMessage(t *testing.T, pb proto.Message) interface{} { + t.Helper() + buffer, err := proto.Marshal(pb) + require.NoError(t, err) + return buffer +} + +func newFeaturesCache(t *testing.T, mockController *gomock.Controller) *featuresCache { + t.Helper() + return &featuresCache{ + cache: cachemock.NewMockMultiGetCache(mockController), + } +} diff --git a/pkg/cache/v3/mock/BUILD.bazel b/pkg/cache/v3/mock/BUILD.bazel new file mode 100644 index 000000000..5ee0919d3 --- /dev/null +++ b/pkg/cache/v3/mock/BUILD.bazel @@ -0,0 +1,19 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "environment_api_key.go", + "experiments.go", + "features.go", + "segment_users.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/cache/v3/mock", + visibility = ["//visibility:public"], + deps = [ + "//proto/account:go_default_library", + "//proto/experiment:go_default_library", + "//proto/feature:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + ], +) diff --git a/pkg/cache/v3/mock/environment_api_key.go b/pkg/cache/v3/mock/environment_api_key.go new file mode 100644 index 000000000..b8a5b2e2a --- /dev/null +++ b/pkg/cache/v3/mock/environment_api_key.go @@ -0,0 +1,65 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: environment_api_key.go + +// Package mock is a generated GoMock package. +package mock + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + account "github.com/bucketeer-io/bucketeer/proto/account" +) + +// MockEnvironmentAPIKeyCache is a mock of EnvironmentAPIKeyCache interface. +type MockEnvironmentAPIKeyCache struct { + ctrl *gomock.Controller + recorder *MockEnvironmentAPIKeyCacheMockRecorder +} + +// MockEnvironmentAPIKeyCacheMockRecorder is the mock recorder for MockEnvironmentAPIKeyCache. +type MockEnvironmentAPIKeyCacheMockRecorder struct { + mock *MockEnvironmentAPIKeyCache +} + +// NewMockEnvironmentAPIKeyCache creates a new mock instance. +func NewMockEnvironmentAPIKeyCache(ctrl *gomock.Controller) *MockEnvironmentAPIKeyCache { + mock := &MockEnvironmentAPIKeyCache{ctrl: ctrl} + mock.recorder = &MockEnvironmentAPIKeyCacheMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockEnvironmentAPIKeyCache) EXPECT() *MockEnvironmentAPIKeyCacheMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockEnvironmentAPIKeyCache) Get(arg0 string) (*account.EnvironmentAPIKey, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", arg0) + ret0, _ := ret[0].(*account.EnvironmentAPIKey) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockEnvironmentAPIKeyCacheMockRecorder) Get(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockEnvironmentAPIKeyCache)(nil).Get), arg0) +} + +// Put mocks base method. +func (m *MockEnvironmentAPIKeyCache) Put(arg0 *account.EnvironmentAPIKey) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Put", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Put indicates an expected call of Put. +func (mr *MockEnvironmentAPIKeyCacheMockRecorder) Put(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockEnvironmentAPIKeyCache)(nil).Put), arg0) +} diff --git a/pkg/cache/v3/mock/experiments.go b/pkg/cache/v3/mock/experiments.go new file mode 100644 index 000000000..c300ddac9 --- /dev/null +++ b/pkg/cache/v3/mock/experiments.go @@ -0,0 +1,65 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: experiments.go + +// Package mock is a generated GoMock package. +package mock + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + experiment "github.com/bucketeer-io/bucketeer/proto/experiment" +) + +// MockExperimentsCache is a mock of ExperimentsCache interface. +type MockExperimentsCache struct { + ctrl *gomock.Controller + recorder *MockExperimentsCacheMockRecorder +} + +// MockExperimentsCacheMockRecorder is the mock recorder for MockExperimentsCache. +type MockExperimentsCacheMockRecorder struct { + mock *MockExperimentsCache +} + +// NewMockExperimentsCache creates a new mock instance. +func NewMockExperimentsCache(ctrl *gomock.Controller) *MockExperimentsCache { + mock := &MockExperimentsCache{ctrl: ctrl} + mock.recorder = &MockExperimentsCacheMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockExperimentsCache) EXPECT() *MockExperimentsCacheMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockExperimentsCache) Get(featureID string, featureVersion int32, environmentNamespace string) (*experiment.Experiments, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", featureID, featureVersion, environmentNamespace) + ret0, _ := ret[0].(*experiment.Experiments) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockExperimentsCacheMockRecorder) Get(featureID, featureVersion, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockExperimentsCache)(nil).Get), featureID, featureVersion, environmentNamespace) +} + +// Put mocks base method. +func (m *MockExperimentsCache) Put(featureID string, featureVersion int32, experiments *experiment.Experiments, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Put", featureID, featureVersion, experiments, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// Put indicates an expected call of Put. +func (mr *MockExperimentsCacheMockRecorder) Put(featureID, featureVersion, experiments, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockExperimentsCache)(nil).Put), featureID, featureVersion, experiments, environmentNamespace) +} diff --git a/pkg/cache/v3/mock/features.go b/pkg/cache/v3/mock/features.go new file mode 100644 index 000000000..d56698d03 --- /dev/null +++ b/pkg/cache/v3/mock/features.go @@ -0,0 +1,65 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: features.go + +// Package mock is a generated GoMock package. +package mock + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + feature "github.com/bucketeer-io/bucketeer/proto/feature" +) + +// MockFeaturesCache is a mock of FeaturesCache interface. +type MockFeaturesCache struct { + ctrl *gomock.Controller + recorder *MockFeaturesCacheMockRecorder +} + +// MockFeaturesCacheMockRecorder is the mock recorder for MockFeaturesCache. +type MockFeaturesCacheMockRecorder struct { + mock *MockFeaturesCache +} + +// NewMockFeaturesCache creates a new mock instance. +func NewMockFeaturesCache(ctrl *gomock.Controller) *MockFeaturesCache { + mock := &MockFeaturesCache{ctrl: ctrl} + mock.recorder = &MockFeaturesCacheMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockFeaturesCache) EXPECT() *MockFeaturesCacheMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockFeaturesCache) Get(environmentNamespace string) (*feature.Features, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", environmentNamespace) + ret0, _ := ret[0].(*feature.Features) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockFeaturesCacheMockRecorder) Get(environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockFeaturesCache)(nil).Get), environmentNamespace) +} + +// Put mocks base method. +func (m *MockFeaturesCache) Put(features *feature.Features, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Put", features, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// Put indicates an expected call of Put. +func (mr *MockFeaturesCacheMockRecorder) Put(features, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockFeaturesCache)(nil).Put), features, environmentNamespace) +} diff --git a/pkg/cache/v3/mock/segment_users.go b/pkg/cache/v3/mock/segment_users.go new file mode 100644 index 000000000..53cbd7940 --- /dev/null +++ b/pkg/cache/v3/mock/segment_users.go @@ -0,0 +1,80 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: segment_users.go + +// Package mock is a generated GoMock package. +package mock + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + feature "github.com/bucketeer-io/bucketeer/proto/feature" +) + +// MockSegmentUsersCache is a mock of SegmentUsersCache interface. +type MockSegmentUsersCache struct { + ctrl *gomock.Controller + recorder *MockSegmentUsersCacheMockRecorder +} + +// MockSegmentUsersCacheMockRecorder is the mock recorder for MockSegmentUsersCache. +type MockSegmentUsersCacheMockRecorder struct { + mock *MockSegmentUsersCache +} + +// NewMockSegmentUsersCache creates a new mock instance. +func NewMockSegmentUsersCache(ctrl *gomock.Controller) *MockSegmentUsersCache { + mock := &MockSegmentUsersCache{ctrl: ctrl} + mock.recorder = &MockSegmentUsersCacheMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSegmentUsersCache) EXPECT() *MockSegmentUsersCacheMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockSegmentUsersCache) Get(segmentID, environmentNamespace string) (*feature.SegmentUsers, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", segmentID, environmentNamespace) + ret0, _ := ret[0].(*feature.SegmentUsers) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockSegmentUsersCacheMockRecorder) Get(segmentID, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockSegmentUsersCache)(nil).Get), segmentID, environmentNamespace) +} + +// GetAll mocks base method. +func (m *MockSegmentUsersCache) GetAll(environmentNamespace string) ([]*feature.SegmentUsers, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAll", environmentNamespace) + ret0, _ := ret[0].([]*feature.SegmentUsers) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAll indicates an expected call of GetAll. +func (mr *MockSegmentUsersCacheMockRecorder) GetAll(environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAll", reflect.TypeOf((*MockSegmentUsersCache)(nil).GetAll), environmentNamespace) +} + +// Put mocks base method. +func (m *MockSegmentUsersCache) Put(segmentUsers *feature.SegmentUsers, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Put", segmentUsers, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// Put indicates an expected call of Put. +func (mr *MockSegmentUsersCacheMockRecorder) Put(segmentUsers, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockSegmentUsersCache)(nil).Put), segmentUsers, environmentNamespace) +} diff --git a/pkg/cache/v3/redis_cache.go b/pkg/cache/v3/redis_cache.go new file mode 100644 index 000000000..fd4b80415 --- /dev/null +++ b/pkg/cache/v3/redis_cache.go @@ -0,0 +1,75 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3 + +import ( + "github.com/bucketeer-io/bucketeer/pkg/cache" + redis "github.com/bucketeer-io/bucketeer/pkg/redis/v3" +) + +type redisCache struct { + client redis.Client +} + +func NewRedisCache(client redis.Client) cache.MultiGetDeleteCache { + return &redisCache{ + client: client, + } +} + +func (r *redisCache) Get(key interface{}) (interface{}, error) { + value, err := r.client.Get(key.(string)) + if err != nil { + if err == redis.ErrNil { + return nil, cache.ErrNotFound + } + return nil, err + } + return value, nil +} + +func (r *redisCache) Put(key interface{}, value interface{}) error { + return r.client.Set(key.(string), value, 0) +} + +func (r *redisCache) GetMulti(keys interface{}) ([]interface{}, error) { + value, err := r.client.GetMulti(keys.([]string)) + switch err { + case nil: + return value, nil + case redis.ErrNil: + return nil, cache.ErrNotFound + case redis.ErrInvalidType: + return nil, cache.ErrInvalidType + default: + return nil, err + } +} + +func (r *redisCache) Scan(cursor, key, count interface{}) (uint64, []string, error) { + c, keys, err := r.client.Scan(cursor.(uint64), key.(string), count.(int64)) + switch err { + case nil: + return c, keys, nil + case redis.ErrNil: + return 0, nil, cache.ErrNotFound + default: + return 0, nil, err + } +} + +func (r *redisCache) Delete(key string) error { + return r.client.Del(key) +} diff --git a/pkg/cache/v3/segment_users.go b/pkg/cache/v3/segment_users.go new file mode 100644 index 000000000..d54da736e --- /dev/null +++ b/pkg/cache/v3/segment_users.go @@ -0,0 +1,121 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v3 + +import ( + "github.com/golang/protobuf/proto" // nolint:staticcheck + + "github.com/bucketeer-io/bucketeer/pkg/cache" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +const ( + segmentUsersKind = "segment_users" + segmentUsersMaxSize = int64(100) +) + +type SegmentUsersCache interface { + Get(segmentID, environmentNamespace string) (*featureproto.SegmentUsers, error) + GetAll(environmentNamespace string) ([]*featureproto.SegmentUsers, error) + Put(segmentUsers *featureproto.SegmentUsers, environmentNamespace string) error +} + +type segmentUsersCache struct { + cache cache.MultiGetCache +} + +func NewSegmentUsersCache(c cache.MultiGetCache) SegmentUsersCache { + return &segmentUsersCache{cache: c} +} + +func (c *segmentUsersCache) Get(segmentID, environmentNamespace string) (*featureproto.SegmentUsers, error) { + key := c.key(segmentID, environmentNamespace) + value, err := c.cache.Get(key) + if err != nil { + return nil, err + } + b, err := cache.Bytes(value) + if err != nil { + return nil, err + } + segmentUsers := &featureproto.SegmentUsers{} + err = proto.Unmarshal(b, segmentUsers) + if err != nil { + return nil, err + } + return segmentUsers, nil +} + +func (c *segmentUsersCache) GetAll(environmentNamespace string) ([]*featureproto.SegmentUsers, error) { + keys, err := c.scan(environmentNamespace) + if err != nil { + return nil, err + } + users, err := c.cache.GetMulti(keys) + if err != nil { + return nil, err + } + segmentUsers := []*featureproto.SegmentUsers{} + for _, value := range users { + b, err := cache.Bytes(value) + if err != nil { + return nil, err + } + su := &featureproto.SegmentUsers{} + err = proto.Unmarshal(b, su) + if err != nil { + return nil, err + } + segmentUsers = append(segmentUsers, su) + } + return segmentUsers, nil +} + +func (c *segmentUsersCache) Put(segmentUsers *featureproto.SegmentUsers, environmentNamespace string) error { + buffer, err := proto.Marshal(segmentUsers) + if err != nil { + return err + } + key := c.key(segmentUsers.SegmentId, environmentNamespace) + return c.cache.Put(key, buffer) +} + +func (c *segmentUsersCache) scan(environmentNamespace string) ([]string, error) { + keyPrefix := cache.MakeKeyPrefix(segmentUsersKind, environmentNamespace) + key := keyPrefix + "*" + var cursor uint64 + var k []string + var err error + keys := []string{} + for { + cursor, k, err = c.cache.Scan(cursor, key, segmentUsersMaxSize) + if err != nil { + break + } + keys = append(keys, k...) + if cursor == 0 { + break + } + } + if err != nil { + return nil, err + } + return keys, nil +} + +func (c *segmentUsersCache) key(segmentID, environmentNamespace string) string { + return cache.MakeKey(segmentUsersKind, segmentID, environmentNamespace) +} diff --git a/pkg/cache/v3/segment_users_test.go b/pkg/cache/v3/segment_users_test.go new file mode 100644 index 000000000..5d82a0baf --- /dev/null +++ b/pkg/cache/v3/segment_users_test.go @@ -0,0 +1,220 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3 + +import ( + "fmt" + "testing" + + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/cache" + cachemock "github.com/bucketeer-io/bucketeer/pkg/cache/mock" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +const ( + segmentID = "segment-id" +) + +func TestGetSegmentUser(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + segmentUsers := createSegmentUsersCache(t) + dataSegmentUsers := marshalMessage(t, segmentUsers) + key := cache.MakeKey(segmentUsersKind, segmentID, environmentNamespace) + + patterns := map[string]struct { + setup func(*segmentUsersCache) + expectedErr error + }{ + "error_get_not_found": { + setup: func(sc *segmentUsersCache) { + sc.cache.(*cachemock.MockMultiGetCache).EXPECT().Get(key).Return(nil, cache.ErrNotFound) + }, + expectedErr: cache.ErrNotFound, + }, + "error_invalid_type": { + setup: func(sc *segmentUsersCache) { + sc.cache.(*cachemock.MockMultiGetCache).EXPECT().Get(key).Return("test", nil) + }, + expectedErr: cache.ErrInvalidType, + }, + "success": { + setup: func(sc *segmentUsersCache) { + sc.cache.(*cachemock.MockMultiGetCache).EXPECT().Get(key).Return(dataSegmentUsers, nil) + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + sc := newSegmentUsersCache(t, mockController) + p.setup(sc) + cache, err := sc.Get(segmentID, environmentNamespace) + if err == nil { + assert.Equal(t, segmentUsers.SegmentId, cache.SegmentId) + assert.Equal(t, segmentUsers.Users[0].Id, cache.Users[0].Id) + assert.Equal(t, segmentUsers.Users[0].SegmentId, cache.Users[0].SegmentId) + assert.Equal(t, segmentUsers.Users[0].UserId, cache.Users[0].UserId) + assert.Equal(t, segmentUsers.Users[0].State, cache.Users[0].State) + assert.Equal(t, segmentUsers.Users[0].Deleted, cache.Users[0].Deleted) + } + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestGetAllSegmentUser(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + segmentUsers := createSegmentUsersCache(t) + dataSegmentUsers := marshalMessage(t, segmentUsers) + keys := []string{ + fmt.Sprintf("%s:%s:segment-id-1", environmentNamespace, segmentUsersKind), + fmt.Sprintf("%s:%s:segment-id-2", environmentNamespace, segmentUsersKind), + } + + keyPrefix := cache.MakeKeyPrefix(segmentUsersKind, environmentNamespace) + key := keyPrefix + "*" + var cursor uint64 + + patterns := map[string]struct { + setup func(*segmentUsersCache) + expectedErr error + }{ + "error_scan_not_found": { + setup: func(sc *segmentUsersCache) { + sc.cache.(*cachemock.MockMultiGetCache).EXPECT().Scan(cursor, key, segmentUsersMaxSize).Return( + cursor, nil, cache.ErrNotFound) + }, + expectedErr: cache.ErrNotFound, + }, + "error_get_multi_not_found": { + setup: func(sc *segmentUsersCache) { + sc.cache.(*cachemock.MockMultiGetCache).EXPECT().Scan(cursor, key, segmentUsersMaxSize).Return( + cursor, keys, nil) + sc.cache.(*cachemock.MockMultiGetCache).EXPECT().GetMulti(keys).Return(nil, cache.ErrNotFound) + }, + expectedErr: cache.ErrNotFound, + }, + "error_invalid_type": { + setup: func(sc *segmentUsersCache) { + sc.cache.(*cachemock.MockMultiGetCache).EXPECT().Scan(cursor, key, segmentUsersMaxSize).Return( + cursor, keys, nil) + sc.cache.(*cachemock.MockMultiGetCache).EXPECT().GetMulti(keys).Return([]interface{}{"test"}, nil) + }, + expectedErr: cache.ErrInvalidType, + }, + "success": { + setup: func(sc *segmentUsersCache) { + sc.cache.(*cachemock.MockMultiGetCache).EXPECT().Scan(cursor, key, segmentUsersMaxSize).Return( + cursor, keys, nil) + sc.cache.(*cachemock.MockMultiGetCache).EXPECT().GetMulti(keys).Return([]interface{}{dataSegmentUsers}, nil) + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + sc := newSegmentUsersCache(t, mockController) + p.setup(sc) + allUsers, err := sc.GetAll(environmentNamespace) + if err == nil { + users := allUsers[0] + assert.Equal(t, segmentUsers.SegmentId, users.SegmentId) + for i := 0; i < len(segmentUsers.Users); i++ { + assert.Equal(t, segmentUsers.Users[i].Id, users.Users[i].Id) + assert.Equal(t, segmentUsers.Users[i].SegmentId, users.Users[i].SegmentId) + assert.Equal(t, segmentUsers.Users[i].UserId, users.Users[i].UserId) + assert.Equal(t, segmentUsers.Users[i].State, users.Users[i].State) + assert.Equal(t, segmentUsers.Users[i].Deleted, users.Users[i].Deleted) + } + } + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestPutSegmentUser(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + segmentUsers := createSegmentUsersCache(t) + dataSegmentUsers := marshalMessage(t, segmentUsers) + key := cache.MakeKey(segmentUsersKind, segmentID, environmentNamespace) + + patterns := map[string]struct { + setup func(*segmentUsersCache) + input *featureproto.SegmentUsers + expectedErr error + }{ + "error_proto_message_nil": { + setup: nil, + input: nil, + expectedErr: proto.ErrNil, + }, + "success": { + setup: func(sc *segmentUsersCache) { + sc.cache.(*cachemock.MockMultiGetCache).EXPECT().Put(key, dataSegmentUsers).Return(nil) + }, + input: segmentUsers, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + sc := newSegmentUsersCache(t, mockController) + if p.setup != nil { + p.setup(sc) + } + err := sc.Put(p.input, environmentNamespace) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func createSegmentUsersCache(t *testing.T) *featureproto.SegmentUsers { + t.Helper() + u := []*featureproto.SegmentUser{} + for i := 0; i < 5; i++ { + user := &featureproto.SegmentUser{ + Id: fmt.Sprintf("segment-user-id-%d", i), + SegmentId: segmentID, + UserId: fmt.Sprintf("user-id-%d", i), + State: featureproto.SegmentUser_INCLUDED, + Deleted: false, + } + u = append(u, user) + } + return &featureproto.SegmentUsers{ + SegmentId: segmentID, + Users: u, + } +} + +func newSegmentUsersCache(t *testing.T, mockController *gomock.Controller) *segmentUsersCache { + t.Helper() + return &segmentUsersCache{ + cache: cachemock.NewMockMultiGetCache(mockController), + } +} diff --git a/pkg/cli/BUILD.bazel b/pkg/cli/BUILD.bazel new file mode 100644 index 000000000..e1144a660 --- /dev/null +++ b/pkg/cli/BUILD.bazel @@ -0,0 +1,20 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "app.go", + "cmd.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/cli", + visibility = ["//visibility:public"], + deps = [ + "//pkg/log:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/trace:go_default_library", + "@com_google_cloud_go_profiler//:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@io_opencensus_go//trace:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/cli/app.go b/pkg/cli/app.go new file mode 100644 index 000000000..4e4a1f4f6 --- /dev/null +++ b/pkg/cli/app.go @@ -0,0 +1,165 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cli + +import ( + "context" + "errors" + "fmt" + "os" + "os/signal" + "syscall" + + "cloud.google.com/go/profiler" + octrace "go.opencensus.io/trace" + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/trace" +) + +var ( + errCommandNotFound = errors.New("command not found") + + healthCheckSpanName = "grpc.health.v1.Health.Check" + pubsubAckSpanName = "google.pubsub.v1.Subscriber.Acknowledge" + pubsubModifyAckSpanName = "google.pubsub.v1.Subscriber.ModifyAckDeadline" +) + +type App struct { + name string + version string + cmds map[string]Command + app *kingpin.Application +} + +func NewApp(name, desc, version, build string) *App { + app := &App{ + name: name, + version: fmt.Sprintf("%s-%s", version, build), + app: kingpin.New(name, desc), + cmds: make(map[string]Command), + } + app.app.Version(app.version) + app.app.DefaultEnvars() + return app +} + +func (a *App) Command(name string, desc string) *kingpin.CmdClause { + return a.app.Command(name, desc) +} + +func (a *App) RegisterCommand(cmd Command) { + a.cmds[cmd.FullCommand()] = cmd +} + +func (a *App) Run() error { + logLevel := a.app.Flag("log-level", "The level of logging.").Default("info").Enum(log.Levels...) + profile := a.app.Flag("profile", "If true enables uploading the profiles to Stackdriver.").Default("true").Bool() + metricsPort := a.app.Flag("metrics-port", "Port to bind metrics server to.").Default("9002").Int() + traceSamplingProbability := a.app.Flag( + "trace-sampling-probability", + "How offten we send traces to exporters.", + ).Default("0.01").Float() + tracePubsubAckSamplingProbability := a.app.Flag( + "trace-pubsub-ack-sampling-probability", + "How offten we send traces of pubsub ack to exporters.", + ).Default("0.0001").Float() + gcpTraceEnabled := a.app.Flag( + "gcp-trace-enabled", + "Enables sending trace data to GCP Trace service.", + ).Default("true").Bool() + + cmd, err := a.app.Parse(os.Args[1:]) + if err != nil { + return err + } + if a.cmds[cmd] == nil { + return errCommandNotFound + } + + serviceName := fmt.Sprintf("%s.%s", a.name, cmd) + logger, err := log.NewLogger( + log.WithLevel(*logLevel), + log.WithServiceContext(serviceName, a.version), + ) + if err != nil { + return err + } + defer logger.Sync() // nolint:errcheck + + if *profile { + err = profiler.Start(profiler.Config{ + Service: serviceName, + ServiceVersion: a.version}, + ) + if err != nil { + logger.Error("Failed to start profiler", zap.Error(err)) + return err + } + } + + metrics := metrics.NewMetrics( + *metricsPort, + "/metrics", + metrics.WithLogger(logger), + ) + defer metrics.Stop() + go metrics.Run() // nolint:errcheck + + if *gcpTraceEnabled { + sd, err := trace.NewStackdriverExporter(serviceName, a.version, logger) + if err != nil { + logger.Error("Failed to create the Stackdriver exporter", zap.Error(err)) + return err + } + defer sd.Flush() + octrace.RegisterExporter(sd) + } + octrace.ApplyConfig(octrace.Config{ + DefaultSampler: trace.NewSampler( + trace.WithDefaultProbability(*traceSamplingProbability), + trace.WithFilteringSampler(healthCheckSpanName, octrace.NeverSample()), + trace.WithFilteringSampler( + pubsubAckSpanName, + octrace.ProbabilitySampler(*tracePubsubAckSamplingProbability), + ), + trace.WithFilteringSampler( + pubsubModifyAckSpanName, + octrace.ProbabilitySampler(*tracePubsubAckSamplingProbability), + ), + ), + }) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ch := make(chan os.Signal, 1) + signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM) + defer signal.Stop(ch) + + go func() { + select { + case s := <-ch: + logger.Info("App is stopping due to signal", zap.Stringer("signal", s)) + cancel() + case <-ctx.Done(): + } + }() + logger.Info(fmt.Sprintf("Running %s", serviceName)) + return a.cmds[cmd].Run(ctx, metrics, logger) +} diff --git a/pkg/cli/cmd.go b/pkg/cli/cmd.go new file mode 100644 index 000000000..092fb2b42 --- /dev/null +++ b/pkg/cli/cmd.go @@ -0,0 +1,37 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cli + +import ( + "context" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +type ParentCommand interface { + Command(name string, desc string) *kingpin.CmdClause +} + +type Command interface { + FullCommand() string + Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error +} + +type CommandRegistry interface { + RegisterCommand(Command) +} diff --git a/pkg/crypto/BUILD.bazel b/pkg/crypto/BUILD.bazel new file mode 100644 index 000000000..5ea7b4016 --- /dev/null +++ b/pkg/crypto/BUILD.bazel @@ -0,0 +1,15 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "cloudkmscrypto.go", + "crypto.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/crypto", + visibility = ["//visibility:public"], + deps = [ + "@com_google_cloud_go_kms//apiv1:go_default_library", + "@go_googleapis//google/cloud/kms/v1:kms_go_proto", + ], +) diff --git a/pkg/crypto/cloudkmscrypto.go b/pkg/crypto/cloudkmscrypto.go new file mode 100644 index 000000000..1082745f1 --- /dev/null +++ b/pkg/crypto/cloudkmscrypto.go @@ -0,0 +1,60 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crypto + +import ( + "context" + + cloudkms "cloud.google.com/go/kms/apiv1" + kms "cloud.google.com/go/kms/apiv1" + kmsproto "google.golang.org/genproto/googleapis/cloud/kms/v1" +) + +type cloudKMSCrypto struct { + client *cloudkms.KeyManagementClient + keyName string +} + +func NewCloudKMSCrypto( + client *kms.KeyManagementClient, + keyName string, +) EncrypterDecrypter { + return cloudKMSCrypto{ + client: client, + keyName: keyName, + } +} + +func (c cloudKMSCrypto) Encrypt(ctx context.Context, data []byte) ([]byte, error) { + resp, err := c.client.Encrypt(ctx, &kmsproto.EncryptRequest{ + Name: c.keyName, + Plaintext: data, + }) + if err != nil { + return nil, err + } + return resp.Ciphertext, nil +} + +func (c cloudKMSCrypto) Decrypt(ctx context.Context, data []byte) ([]byte, error) { + resp, err := c.client.Decrypt(ctx, &kmsproto.DecryptRequest{ + Name: c.keyName, + Ciphertext: data, + }) + if err != nil { + return nil, err + } + return resp.Plaintext, nil +} diff --git a/pkg/crypto/crypto.go b/pkg/crypto/crypto.go new file mode 100644 index 000000000..ec169e881 --- /dev/null +++ b/pkg/crypto/crypto.go @@ -0,0 +1,22 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crypto + +import "context" + +type EncrypterDecrypter interface { + Encrypt(ctx context.Context, data []byte) ([]byte, error) + Decrypt(ctx context.Context, data []byte) ([]byte, error) +} diff --git a/pkg/domainevent/BUILD.bazel b/pkg/domainevent/BUILD.bazel new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/domainevent/domain/BUILD.bazel b/pkg/domainevent/domain/BUILD.bazel new file mode 100644 index 000000000..d8d4dc467 --- /dev/null +++ b/pkg/domainevent/domain/BUILD.bazel @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "event.go", + "message.go", + "url.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/domainevent/domain", + visibility = ["//visibility:public"], + deps = [ + "//pkg/locale:go_default_library", + "//pkg/storage:go_default_library", + "//pkg/uuid:go_default_library", + "//proto/event/domain:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "message_test.go", + "url_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/locale:go_default_library", + "//proto/event/domain:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + ], +) diff --git a/pkg/domainevent/domain/event.go b/pkg/domainevent/domain/event.go new file mode 100644 index 000000000..3f3591a91 --- /dev/null +++ b/pkg/domainevent/domain/event.go @@ -0,0 +1,104 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "time" + + pb "github.com/golang/protobuf/proto" // nolint:staticcheck + "github.com/golang/protobuf/ptypes" + + "github.com/bucketeer-io/bucketeer/pkg/storage" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +var defaultOptions = domain.Options{ + Comment: "", + NewVersion: 1, +} + +type Option func(*domain.Options) + +func WithComment(c string) Option { + return func(opts *domain.Options) { + opts.Comment = c + } +} + +func WithNewVersion(ver int32) Option { + return func(opts *domain.Options) { + opts.NewVersion = ver + } +} + +func NewEvent( + editor *domain.Editor, + entityType domain.Event_EntityType, + entityID string, + eventType domain.Event_Type, + event pb.Message, + environmentNamespace string, + opts ...Option, +) (*domain.Event, error) { + return newEvent(editor, entityType, entityID, eventType, event, environmentNamespace, false, opts...) +} + +func NewAdminEvent( + editor *domain.Editor, + entityType domain.Event_EntityType, + entityID string, + eventType domain.Event_Type, + event pb.Message, + opts ...Option, +) (*domain.Event, error) { + return newEvent(editor, entityType, entityID, eventType, event, storage.AdminEnvironmentNamespace, true, opts...) +} + +func newEvent( + editor *domain.Editor, + entityType domain.Event_EntityType, + entityID string, + eventType domain.Event_Type, + event pb.Message, + environmentNamespace string, + isAdminEvent bool, + opts ...Option, +) (*domain.Event, error) { + options := defaultOptions + for _, opt := range opts { + opt(&options) + } + buf, err := ptypes.MarshalAny(event) + if err != nil { + return nil, err + } + id, err := uuid.NewUUID() + if err != nil { + return nil, err + } + return &domain.Event{ + Id: id.String(), + Timestamp: time.Now().Unix(), + EntityType: entityType, + EntityId: entityID, + Type: eventType, + Editor: editor, + Data: buf, + EnvironmentNamespace: environmentNamespace, + IsAdminEvent: isAdminEvent, + Options: &options, + }, nil +} diff --git a/pkg/domainevent/domain/message.go b/pkg/domainevent/domain/message.go new file mode 100644 index 000000000..2fc40a9ed --- /dev/null +++ b/pkg/domainevent/domain/message.go @@ -0,0 +1,665 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "github.com/bucketeer-io/bucketeer/pkg/locale" + proto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +func LocalizedMessage(eventType proto.Event_Type, loc string) *proto.LocalizedMessage { + // handle loc if multi-lang is necessary + switch eventType { + case proto.Event_UNKNOWN: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不明な操作を実行しました", + } + case proto.Event_FEATURE_CREATED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "feature flagを作成しました", + } + case proto.Event_FEATURE_RENAMED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "feature flagの名前を変更しました", + } + case proto.Event_FEATURE_ENABLED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "feature flagを有効化しました", + } + case proto.Event_FEATURE_DISABLED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "feature flagを無効化しました", + } + case proto.Event_FEATURE_ARCHIVED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "feature flagをアーカイブしました", + } + case proto.Event_FEATURE_UNARCHIVED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "feature flagをアーカイブから解除しました", + } + case proto.Event_FEATURE_DELETED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "feature flagを削除しました", + } + case proto.Event_FEATURE_EVALUATION_DELAYABLE_SET: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "feature flagを初回リクエスト時にキューに入れるように変更されました", + } + case proto.Event_FEATURE_EVALUATION_UNDELAYABLE_SET: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "feature flagを初回リクエスト時にキューに入れないように変更されました", + } + case proto.Event_FEATURE_DESCRIPTION_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "feature flagの説明文を変更しました", + } + case proto.Event_FEATURE_VARIATION_ADDED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "feature flagにvariationを追加しました", + } + case proto.Event_FEATURE_VARIATION_REMOVED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "feature flagのvariationを削除しました", + } + case proto.Event_FEATURE_OFF_VARIATION_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "feature flagの無効時のvariationを変更しました", + } + case proto.Event_VARIATION_VALUE_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "variationの値を変更しました", + } + case proto.Event_VARIATION_NAME_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "variationの名前を変更しました", + } + case proto.Event_VARIATION_DESCRIPTION_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "variationの説明文を変更しました", + } + case proto.Event_VARIATION_USER_ADDED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "variationを適用するユーザーを追加しました", + } + case proto.Event_VARIATION_USER_REMOVED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "variationを適用するユーザーを削除しました", + } + case proto.Event_FEATURE_RULE_ADDED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "ruleを追加しました", + } + case proto.Event_FEATURE_RULE_STRATEGY_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "ruleの適用するvariationの選択方法を変更しました", + } + case proto.Event_FEATURE_RULE_DELETED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "ruleを削除しました", + } + case proto.Event_RULE_CLAUSE_ADDED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "ruleの条件を追加しました", + } + case proto.Event_RULE_CLAUSE_DELETED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "ruleの条件を削除しました", + } + case proto.Event_RULE_FIXED_STRATEGY_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "ruleの適用するvariationの種類を変更しました", + } + case proto.Event_RULE_ROLLOUT_STRATEGY_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "ruleの適用するvariationの適用割合を変更しました", + } + case proto.Event_CLAUSE_ATTRIBUTE_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "ruleの条件のattributeを変更しました", + } + case proto.Event_CLAUSE_OPERATOR_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "ruleの条件のoperatorを変更しました", + } + case proto.Event_CLAUSE_VALUE_ADDED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "ruleの条件の対象の値を追加しました", + } + case proto.Event_CLAUSE_VALUE_REMOVED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "ruleの条件の対象の値を削除しました", + } + case proto.Event_FEATURE_DEFAULT_STRATEGY_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "feature flagがデフォルトで適用する条件を変更しました", + } + case proto.Event_FEATURE_TAG_ADDED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "タグを追加しました", + } + case proto.Event_FEATURE_TAG_REMOVED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "タグを削除しました", + } + case proto.Event_FEATURE_VERSION_INCREMENTED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "feature flagのバージョンを更新しました", + } + case proto.Event_FEATURE_CLONED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "feature flagをクローンしました", + } + case proto.Event_SAMPLING_SEED_RESET: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "ランダムサンプリングをリセットしました", + } + case proto.Event_PREREQUISITE_ADDED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "prerequisiteを追加しました", + } + case proto.Event_PREREQUISITE_REMOVED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "prerequisiteを削除しました", + } + case proto.Event_PREREQUISITE_VARIATION_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "prerequisiteのvariationを変更しました", + } + case proto.Event_GOAL_CREATED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "goalを作成しました", + } + case proto.Event_GOAL_RENAMED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "goalの名前を変更しました", + } + case proto.Event_GOAL_DESCRIPTION_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "goalの説明文を変更しました", + } + case proto.Event_GOAL_ARCHIVED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "goalをアーカイブしました", + } + case proto.Event_GOAL_DELETED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "goalを削除しました", + } + case proto.Event_EXPERIMENT_CREATED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "experimentを作成しました", + } + case proto.Event_EXPERIMENT_STOPPED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "experimentを停止しました", + } + case proto.Event_EXPERIMENT_START_AT_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "experimentの開始時間を変更しました", + } + case proto.Event_EXPERIMENT_STOP_AT_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "experimentの終了時間を変更しました", + } + case proto.Event_EXPERIMENT_NAME_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "experimentの名前を変更しました", + } + case proto.Event_EXPERIMENT_DESCRIPTION_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "experimentの説明文を変更しました", + } + case proto.Event_EXPERIMENT_ARCHIVED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "experimentをアーカイブしました", + } + case proto.Event_EXPERIMENT_DELETED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "experimentを削除しました", + } + case proto.Event_EXPERIMENT_PERIOD_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "experimentの期間を変更しました", + } + case proto.Event_EXPERIMENT_STARTED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "experimentが開始しました", + } + case proto.Event_EXPERIMENT_FINISHED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "experimentが終了しました", + } + case proto.Event_ACCOUNT_CREATED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "アカウントを作成しました", + } + case proto.Event_ACCOUNT_ROLE_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "アカウントの権限を変更しました", + } + case proto.Event_ACCOUNT_ENABLED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "アカウントを有効化しました", + } + case proto.Event_ACCOUNT_DISABLED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "アカウントを無効化しました", + } + case proto.Event_ACCOUNT_DELETED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "アカウントを削除しました", + } + case proto.Event_APIKEY_CREATED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "APIキーを作成しました", + } + case proto.Event_APIKEY_NAME_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "APIキーの名前を変更しました", + } + case proto.Event_APIKEY_ENABLED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "APIキーを有効化しました", + } + case proto.Event_APIKEY_DISABLED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "APIキーを無効化しました", + } + case proto.Event_SEGMENT_CREATED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "segmentを作成しました", + } + case proto.Event_SEGMENT_DELETED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "segmentを削除しました", + } + case proto.Event_SEGMENT_NAME_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "segmentの名前を変更しました", + } + case proto.Event_SEGMENT_DESCRIPTION_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "segmentの説明文を変更しました", + } + case proto.Event_SEGMENT_RULE_ADDED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "segmentにruleを追加しました", + } + case proto.Event_SEGMENT_RULE_DELETED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "segmentからruleを削除しました", + } + case proto.Event_SEGMENT_RULE_CLAUSE_ADDED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "segmentのruleに条件を追加しました", + } + case proto.Event_SEGMENT_RULE_CLAUSE_DELETED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "segmentのruleから条件を削除しました", + } + case proto.Event_SEGMENT_CLAUSE_ATTRIBUTE_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "segmentのruleの条件のattributeを変更しました", + } + case proto.Event_SEGMENT_CLAUSE_OPERATOR_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "segmentのruleの条件のoperatorを変更しました", + } + case proto.Event_SEGMENT_CLAUSE_VALUE_ADDED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "segmentのruleの条件の対象の値を追加しました", + } + case proto.Event_SEGMENT_CLAUSE_VALUE_REMOVED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "segmentのruleの条件の対象の値を削除しました", + } + case proto.Event_SEGMENT_USER_ADDED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "segmentにユーザーを追加しました", + } + case proto.Event_SEGMENT_USER_DELETED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "segmentからユーザーを削除しました", + } + case proto.Event_SEGMENT_BULK_UPLOAD_USERS: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "ユーザーセグメントファイルをアップロードしました", + } + case proto.Event_SEGMENT_BULK_UPLOAD_USERS_STATUS_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "ユーザーセグメントファイルのアップロードステータスが変わりました", + } + case proto.Event_ENVIRONMENT_CREATED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "Environmentを作成しました", + } + case proto.Event_ENVIRONMENT_RENAMED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "Environmentの名前を変更しました", + } + case proto.Event_ENVIRONMENT_DESCRIPTION_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "Environmentの説明文を変更しました", + } + case proto.Event_ENVIRONMENT_DELETED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "Environmentを削除しました", + } + case proto.Event_ADMIN_ACCOUNT_CREATED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "管理者アカウントを作成しました", + } + case proto.Event_ADMIN_ACCOUNT_ENABLED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "管理者アカウントを有効化しました", + } + case proto.Event_ADMIN_ACCOUNT_DISABLED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "管理者アカウントを無効化しました", + } + case proto.Event_AUTOOPS_RULE_CREATED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "自動オペレーションルールを作成しました", + } + case proto.Event_AUTOOPS_RULE_DELETED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "自動オペレーションルールを削除しました", + } + case proto.Event_AUTOOPS_RULE_OPS_TYPE_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "オペレーションタイプを変更しました", + } + case proto.Event_AUTOOPS_RULE_CLAUSE_DELETED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "オペレーションルールを削除しました", + } + case proto.Event_AUTOOPS_RULE_TRIGGERED_AT_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "自動オペレーションの実行時間が変更されました", + } + case proto.Event_OPS_EVENT_RATE_CLAUSE_ADDED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "イベントレートルールが追加されました", + } + case proto.Event_OPS_EVENT_RATE_CLAUSE_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "イベントレートルールが変更されました", + } + case proto.Event_DATETIME_CLAUSE_ADDED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "日時ルールが追加されました", + } + case proto.Event_DATETIME_CLAUSE_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "日時ルールが変更されました", + } + case proto.Event_PUSH_CREATED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "プッシュ設定を作成しました", + } + case proto.Event_PUSH_DELETED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "プッシュ設定を削除しました", + } + case proto.Event_PUSH_TAGS_ADDED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "プッシュ設定にタグを追加しました", + } + case proto.Event_PUSH_TAGS_DELETED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "プッシュ設定からタグを削除しました", + } + case proto.Event_PUSH_RENAMED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "プッシュ設定の名前を変更しました", + } + case proto.Event_SUBSCRIPTION_CREATED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "通知設定を作成しました", + } + case proto.Event_SUBSCRIPTION_DELETED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "通知設定を削除しました", + } + case proto.Event_SUBSCRIPTION_ENABLED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "通知設定を有効化しました", + } + case proto.Event_SUBSCRIPTION_DISABLED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "通知設定を無効化しました", + } + case proto.Event_SUBSCRIPTION_SOURCE_TYPE_ADDED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "通知設定に通知設定ソースを追加しました", + } + case proto.Event_SUBSCRIPTION_SOURCE_TYPE_DELETED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "通知設定から通知設定ソースを削除しました", + } + case proto.Event_SUBSCRIPTION_RENAMED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "通知設定の名前を変更しました", + } + case proto.Event_ADMIN_SUBSCRIPTION_CREATED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "管理者用通知設定を作成しました", + } + case proto.Event_ADMIN_SUBSCRIPTION_DELETED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "管理者用通知設定を削除しました", + } + case proto.Event_ADMIN_SUBSCRIPTION_ENABLED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "管理者用通知設定を有効化しました", + } + case proto.Event_ADMIN_SUBSCRIPTION_DISABLED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "管理者用通知設定を無効化しました", + } + case proto.Event_ADMIN_SUBSCRIPTION_SOURCE_TYPE_ADDED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "管理者用通知設定に通知設定ソースを追加しました", + } + case proto.Event_ADMIN_SUBSCRIPTION_SOURCE_TYPE_DELETED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "管理者用通知設定から通知設定ソースを削除しました", + } + case proto.Event_ADMIN_SUBSCRIPTION_RENAMED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "管理者用通知設定の名前を変更しました", + } + case proto.Event_PROJECT_CREATED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "Projectを作成しました", + } + case proto.Event_PROJECT_DESCRIPTION_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "Projectの説明文を変更しました", + } + case proto.Event_PROJECT_ENABLED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "Projectを有効化しました", + } + case proto.Event_PROJECT_DISABLED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "Projectを無効化しました", + } + case proto.Event_PROJECT_TRIAL_CREATED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "Trial Projectを作成しました", + } + case proto.Event_PROJECT_TRIAL_CONVERTED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "Trialを正式なProjectに変換しました", + } + case proto.Event_WEBHOOK_CREATED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "webhookを作成しました", + } + case proto.Event_WEBHOOK_DELETED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "webhookを削除しました", + } + case proto.Event_WEBHOOK_NAME_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "webhookの名前を変更しました", + } + case proto.Event_WEBHOOK_DESCRIPTION_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "webhookの説明を変更しました", + } + case proto.Event_WEBHOOK_CLAUSE_ADDED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "webhookのルールが追加されました", + } + case proto.Event_WEBHOOK_CLAUSE_CHANGED: + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "webhookのルールが変更されました", + } + } + return &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不明な操作を実行しました", + } +} diff --git a/pkg/domainevent/domain/message_test.go b/pkg/domainevent/domain/message_test.go new file mode 100644 index 000000000..476fe2a83 --- /dev/null +++ b/pkg/domainevent/domain/message_test.go @@ -0,0 +1,71 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/locale" + proto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +func TestLocalizedMessage(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + inputEventType proto.Event_Type + expected *proto.LocalizedMessage + }{ + "unknown match": { + inputEventType: proto.Event_UNKNOWN, + expected: &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不明な操作を実行しました", + }, + }, + "unmatch": { + inputEventType: proto.Event_Type(-1), + expected: &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不明な操作を実行しました", + }, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + actual := LocalizedMessage(p.inputEventType, locale.JaJP) + assert.Equal(t, p.expected, actual) + }) + } +} + +// TestImplementedLocalizedMessage checks if every domain event type has a message. +func TestImplementedLocalizedMessage(t *testing.T) { + t.Parallel() + unknown := &proto.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不明な操作を実行しました", + } + for k, v := range proto.Event_Type_name { + if v == "UNKNOWN" { + continue + } + t.Run(v, func(t *testing.T) { + actual := LocalizedMessage(proto.Event_Type(k), locale.JaJP) + assert.NotEqual(t, unknown, actual) + }) + } +} diff --git a/pkg/domainevent/domain/url.go b/pkg/domainevent/domain/url.go new file mode 100644 index 000000000..ff9df4f67 --- /dev/null +++ b/pkg/domainevent/domain/url.go @@ -0,0 +1,80 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "errors" + "fmt" + + proto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +const ( + urlTemplateFeature = "%s/%s/features/%s" + urlTemplateGoal = "%s/%s/goals/%s" + urlTemplateExperiment = "%s/%s/experiments/%s" + urlTemplateAccount = "%s/%s/accounts/%s" + urlTemplateAPIKey = "%s/%s/apikeys/%s" + urlTemplateSegment = "%s/%s/segments/%s" + urlTemplateAutoOpsRule = "%s/%s/features/%s/settings" + urlTemplatePush = "%s/%s/settings/pushes/%s" + urlTemplateSubscription = "%s/%s/settings/notifications/%s" + + // FIXME: url templates for admin will not require defaultEnvironmentID after environmentID is removed from admin page. + urlTemplateAdminSubscription = "%s/%s/admin/notifications/%s" + urlTemplateEnvironment = "%s/%s/admin/environments/%s" + urlTemplateAdminAccount = "%s/%s/admin/accounts/%s" + urlTemplateProject = "%s/%s/admin/projects/%s" + urlTemplateWebhook = "%s/%s/settings/intergrations/webhook/%s" + defaultEnvironmentID = "bucketeer" +) + +var ( + ErrUnknownEntityType = errors.New("domain: unknown entity type") +) + +func URL(entityType proto.Event_EntityType, url, environmentID, id string) (string, error) { + switch entityType { + case proto.Event_FEATURE: + return fmt.Sprintf(urlTemplateFeature, url, environmentID, id), nil + case proto.Event_GOAL: + return fmt.Sprintf(urlTemplateGoal, url, environmentID, id), nil + case proto.Event_EXPERIMENT: + return fmt.Sprintf(urlTemplateExperiment, url, environmentID, id), nil + case proto.Event_ACCOUNT: + return fmt.Sprintf(urlTemplateAccount, url, environmentID, id), nil + case proto.Event_APIKEY: + return fmt.Sprintf(urlTemplateAPIKey, url, environmentID, id), nil + case proto.Event_SEGMENT: + return fmt.Sprintf(urlTemplateSegment, url, environmentID, id), nil + case proto.Event_AUTOOPS_RULE: + return fmt.Sprintf(urlTemplateAutoOpsRule, url, environmentID, id), nil + case proto.Event_PUSH: + return fmt.Sprintf(urlTemplatePush, url, environmentID, id), nil + case proto.Event_SUBSCRIPTION: + return fmt.Sprintf(urlTemplateSubscription, url, environmentID, id), nil + case proto.Event_ADMIN_SUBSCRIPTION: + return fmt.Sprintf(urlTemplateAdminSubscription, url, defaultEnvironmentID, id), nil + case proto.Event_ENVIRONMENT: + return fmt.Sprintf(urlTemplateEnvironment, url, defaultEnvironmentID, id), nil + case proto.Event_ADMIN_ACCOUNT: + return fmt.Sprintf(urlTemplateAdminAccount, url, defaultEnvironmentID, id), nil + case proto.Event_PROJECT: + return fmt.Sprintf(urlTemplateProject, url, defaultEnvironmentID, id), nil + case proto.Event_WEBHOOK: + return fmt.Sprintf(urlTemplateWebhook, url, defaultEnvironmentID, id), nil + } + return "", ErrUnknownEntityType +} diff --git a/pkg/domainevent/domain/url_test.go b/pkg/domainevent/domain/url_test.go new file mode 100644 index 000000000..91f89336a --- /dev/null +++ b/pkg/domainevent/domain/url_test.go @@ -0,0 +1,54 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + proto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +func TestURL(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + inputEntityType proto.Event_EntityType + expected string + }{ + "feature": { + inputEntityType: proto.Event_FEATURE, + expected: "url/env/features/id", + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + actual, err := URL(p.inputEntityType, "url", "env", "id") + assert.NoError(t, err) + assert.Equal(t, p.expected, actual) + }) + } +} + +// TestImplementedURL checks if every domain entity type has a url. +func TestImplementedURL(t *testing.T) { + t.Parallel() + for k, v := range proto.Event_EntityType_name { + t.Run(v, func(t *testing.T) { + _, err := URL(proto.Event_EntityType(k), "url", "env", "id") + assert.NoError(t, err) + }) + } +} diff --git a/pkg/druid/BUILD.bazel b/pkg/druid/BUILD.bazel new file mode 100644 index 000000000..f0bd66e2f --- /dev/null +++ b/pkg/druid/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "supervisor.go", + "supervisor_creator.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/druid", + visibility = ["//visibility:public"], + deps = [ + "//pkg/storage/druid:go_default_library", + "//pkg/storage/kafka:go_default_library", + "@com_github_ca_dp_godruid//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/druid/mock/BUILD.bazel b/pkg/druid/mock/BUILD.bazel new file mode 100644 index 000000000..0bcba4954 --- /dev/null +++ b/pkg/druid/mock/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["supervisor_creator.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/druid/mock", + visibility = ["//visibility:public"], + deps = ["@com_github_golang_mock//gomock:go_default_library"], +) diff --git a/pkg/druid/mock/supervisor_creator.go b/pkg/druid/mock/supervisor_creator.go new file mode 100644 index 000000000..3b72d48bf --- /dev/null +++ b/pkg/druid/mock/supervisor_creator.go @@ -0,0 +1,49 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: supervisor_creator.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockSupervisorCreator is a mock of SupervisorCreator interface. +type MockSupervisorCreator struct { + ctrl *gomock.Controller + recorder *MockSupervisorCreatorMockRecorder +} + +// MockSupervisorCreatorMockRecorder is the mock recorder for MockSupervisorCreator. +type MockSupervisorCreatorMockRecorder struct { + mock *MockSupervisorCreator +} + +// NewMockSupervisorCreator creates a new mock instance. +func NewMockSupervisorCreator(ctrl *gomock.Controller) *MockSupervisorCreator { + mock := &MockSupervisorCreator{ctrl: ctrl} + mock.recorder = &MockSupervisorCreatorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSupervisorCreator) EXPECT() *MockSupervisorCreatorMockRecorder { + return m.recorder +} + +// CreateSupervisors mocks base method. +func (m *MockSupervisorCreator) CreateSupervisors(ctx context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateSupervisors", ctx) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateSupervisors indicates an expected call of CreateSupervisors. +func (mr *MockSupervisorCreatorMockRecorder) CreateSupervisors(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSupervisors", reflect.TypeOf((*MockSupervisorCreator)(nil).CreateSupervisors), ctx) +} diff --git a/pkg/druid/supervisor.go b/pkg/druid/supervisor.go new file mode 100644 index 000000000..85fc96cb7 --- /dev/null +++ b/pkg/druid/supervisor.go @@ -0,0 +1,171 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package druid + +import ( + "fmt" + + "github.com/ca-dp/godruid" + + storagedruid "github.com/bucketeer-io/bucketeer/pkg/storage/druid" + storagekafka "github.com/bucketeer-io/bucketeer/pkg/storage/kafka" +) + +type supervisor struct { + dataType string + kafkaTopicDataType string + metricsSpec []*godruid.MetricsSpec +} + +var ( + eventSupervisors = []supervisor{ + { + dataType: "evaluation_events", + kafkaTopicDataType: "evaluation-events", + metricsSpec: []*godruid.MetricsSpec{ + { + Type: "count", + Name: "count", + }, + { + Name: "userIdHllSketch", + Type: "HLLSketchBuild", + FieldName: "metric.userId", + }, + { + Name: "userIdThetaSketch", + Type: "thetaSketch", + FieldName: "metric.userId", + }, + }, + }, + { + dataType: "goal_events", + kafkaTopicDataType: "goal-events", + metricsSpec: []*godruid.MetricsSpec{ + { + Name: "count", + Type: "count", + }, + { + Name: "valueSum", + Type: "doubleSum", + FieldName: "value", + }, + { + Name: "valueVariance", + Type: "variance", + FieldName: "value", + InputType: "double", + Estimator: "population", + }, + { + Name: "userIdHllSketch", + Type: "HLLSketchBuild", + FieldName: "metric.userId", + }, + { + Name: "userIdThetaSketch", + Type: "thetaSketch", + FieldName: "metric.userId", + }, + }, + }, + { + dataType: "user_events", + kafkaTopicDataType: "user-events", + }, + } +) + +func EventSupervisors( + druidDatasourcePrefix, + kafkaTopicPrefix, + kafkaURL, + kafkaUsername, + kafkaPassword string, + maxRowsPerSegment int, +) []*godruid.SupervisorKafka { + supervisors := []*godruid.SupervisorKafka{} + for _, es := range eventSupervisors { + kafkaTopic := storagekafka.TopicName(kafkaTopicPrefix, es.kafkaTopicDataType) + datasource := storagedruid.Datasource(druidDatasourcePrefix, es.dataType) + supervisors = append( + supervisors, + eventSupervisor( + kafkaTopic, + kafkaURL, + kafkaUsername, + kafkaPassword, + datasource, + maxRowsPerSegment, + es.metricsSpec, + ), + ) + } + return supervisors +} + +func eventSupervisor( + kafkaTopic, kafkaURL, kafkaUsername, kafkaPassword, datasource string, + maxRowsPerSegment int, + metricsSpec []*godruid.MetricsSpec, +) *godruid.SupervisorKafka { + sasLJAASConfig := fmt.Sprintf( + "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"%s\" password=\"%s\";", + kafkaUsername, + kafkaPassword, + ) + return &godruid.SupervisorKafka{ + Type: "kafka", + IOConfig: godruid.IOConfigKafka( + kafkaTopic, + "PT24H", + "PT25H", + kafkaURL, + "SCRAM-SHA-512", + "SASL_PLAINTEXT", + sasLJAASConfig, + ), + TuningConfig: godruid.TuningConfigKafka(maxRowsPerSegment), + DataSchema: &godruid.DataSchema{ + Datasource: datasource, + GranularitySpec: godruid.GranUniform("HOUR", "HOUR", true), + Parser: &godruid.Parser{ + Type: "string", + ParseSpec: &godruid.ParseSpec{ + Format: "json", + TimestampSpec: &godruid.TimestampSpec{Column: "timestamp", Format: "iso"}, + DimensionSpec: &godruid.EmptyDimension{}, + }, + }, + MetricsSpec: metricsSpec, + }, + } +} + +func retentionRule(period string) *godruid.RetentionRules { + return &godruid.RetentionRules{ + Rules: []*godruid.RetentionRule{ + { + Type: "loadByPeriod", + Period: period, + IncludeFuture: false, + TieredReplicants: &godruid.TieredReplicants{DefaultTier: 2}, + }, + {Type: "dropForever"}, + }, + } +} diff --git a/pkg/druid/supervisor_creator.go b/pkg/druid/supervisor_creator.go new file mode 100644 index 000000000..627ace105 --- /dev/null +++ b/pkg/druid/supervisor_creator.go @@ -0,0 +1,124 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package druid + +import ( + "context" + + "go.uber.org/zap" + + storagedruid "github.com/bucketeer-io/bucketeer/pkg/storage/druid" +) + +type SupervisorCreator interface { + CreateSupervisors(ctx context.Context) error +} + +type options struct { + maxRowsPerSegment int + retentionPeriod string + logger *zap.Logger +} + +type Option func(*options) + +func WithMaxRowsPerSegment(r int) Option { + return func(opts *options) { + opts.maxRowsPerSegment = r + } +} + +func WithRetentionPeriod(r string) Option { + return func(opts *options) { + opts.retentionPeriod = r + } +} + +func WithLogger(l *zap.Logger) Option { + return func(opts *options) { + opts.logger = l + } +} + +type supervisorCreator struct { + coordinatorClient *storagedruid.CoordinatorClient + overlordClient *storagedruid.OverlordClient + datasourcePrefix string + kafkaURL string + kafkaTopicPrefix string + kafkaUsername string + kafkaPassword string + opts *options + logger *zap.Logger +} + +func NewSupervisorCreator( + coordinatorClient *storagedruid.CoordinatorClient, + overlordClient *storagedruid.OverlordClient, + datasourcePrefix, + kafkaURL, + kafkaTopicPrefix, + kafkaUsername, + kafkaPassword string, + opts ...Option) SupervisorCreator { + + dopts := &options{ + maxRowsPerSegment: 3000000, + retentionPeriod: "P2M", + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + return &supervisorCreator{ + coordinatorClient: coordinatorClient, + overlordClient: overlordClient, + datasourcePrefix: datasourcePrefix, + kafkaURL: kafkaURL, + kafkaTopicPrefix: kafkaTopicPrefix, + kafkaUsername: kafkaUsername, + kafkaPassword: kafkaPassword, + opts: dopts, + logger: dopts.logger.Named("druid"), + } +} + +func (c *supervisorCreator) CreateSupervisors(ctx context.Context) error { + for _, sv := range EventSupervisors( + c.datasourcePrefix, + c.kafkaTopicPrefix, + c.kafkaURL, + c.kafkaUsername, + c.kafkaPassword, + c.opts.maxRowsPerSegment, + ) { + if err := c.overlordClient.CreateOrUpdateSupervisor(ctx, sv, ""); err != nil { + c.logger.Error("Failed to create suprvisor", zap.Error(err), + zap.Any("supervisor", sv)) + return err + } + datasource := sv.DataSchema.Datasource + retentionRule := retentionRule(c.opts.retentionPeriod) + if err := c.coordinatorClient.CreateOrUpdateRetentionRule(ctx, datasource, retentionRule, ""); err != nil { + c.logger.Error("Failed to set retention rule", zap.Error(err), + zap.String("datastore", datasource)) + return err + } + c.logger.Info("Succeeded to create suprvisor", + zap.Any("supervisor", sv)) + } + return nil +} diff --git a/pkg/environment/api/BUILD.bazel b/pkg/environment/api/BUILD.bazel new file mode 100644 index 000000000..07db5f912 --- /dev/null +++ b/pkg/environment/api/BUILD.bazel @@ -0,0 +1,63 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "api.go", + "environment.go", + "error.go", + "project.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/environment/api", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/client:go_default_library", + "//pkg/environment/command:go_default_library", + "//pkg/environment/domain:go_default_library", + "//pkg/environment/storage/v2:go_default_library", + "//pkg/locale:go_default_library", + "//pkg/log:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/role:go_default_library", + "//pkg/rpc/status:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/account:go_default_library", + "//proto/environment:go_default_library", + "//proto/event/domain:go_default_library", + "@go_googleapis//google/rpc:errdetails_go_proto", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "api_test.go", + "environment_test.go", + "project_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/account/client/mock:go_default_library", + "//pkg/environment/storage/v2:go_default_library", + "//pkg/locale:go_default_library", + "//pkg/log:go_default_library", + "//pkg/pubsub/publisher/mock:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/storage:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/storage/v2/mysql/mock:go_default_library", + "//pkg/token:go_default_library", + "//proto/account:go_default_library", + "//proto/environment:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/environment/api/api.go b/pkg/environment/api/api.go new file mode 100644 index 000000000..aee996f5a --- /dev/null +++ b/pkg/environment/api/api.go @@ -0,0 +1,105 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + "github.com/bucketeer-io/bucketeer/pkg/role" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + environmentproto "github.com/bucketeer-io/bucketeer/proto/environment" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +type options struct { + logger *zap.Logger +} + +type Option func(*options) + +func WithLogger(l *zap.Logger) Option { + return func(opts *options) { + opts.logger = l + } +} + +type EnvironmentService struct { + accountClient accountclient.Client + mysqlClient mysql.Client + publisher publisher.Publisher + opts *options + logger *zap.Logger +} + +func NewEnvironmentService( + ac accountclient.Client, + mysqlClient mysql.Client, + publisher publisher.Publisher, + opts ...Option, +) *EnvironmentService { + dopts := &options{ + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + return &EnvironmentService{ + accountClient: ac, + mysqlClient: mysqlClient, + publisher: publisher, + opts: dopts, + logger: dopts.logger.Named("api"), + } +} + +func (s *EnvironmentService) Register(server *grpc.Server) { + environmentproto.RegisterEnvironmentServiceServer(server, s) +} + +func (s *EnvironmentService) checkAdminRole(ctx context.Context) (*eventproto.Editor, error) { + editor, err := role.CheckAdminRole(ctx) + if err != nil { + switch status.Code(err) { + case codes.Unauthenticated: + s.logger.Info( + "Unauthenticated", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusUnauthenticated, locale.JaJP) + case codes.PermissionDenied: + s.logger.Info( + "Permission denied", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusPermissionDenied, locale.JaJP) + default: + s.logger.Error( + "Failed to check role", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + } + return editor, nil +} diff --git a/pkg/environment/api/api_test.go b/pkg/environment/api/api_test.go new file mode 100644 index 000000000..4b7ff8275 --- /dev/null +++ b/pkg/environment/api/api_test.go @@ -0,0 +1,89 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + acmock "github.com/bucketeer-io/bucketeer/pkg/account/client/mock" + "github.com/bucketeer-io/bucketeer/pkg/log" + publishermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher/mock" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/storage" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + "github.com/bucketeer-io/bucketeer/pkg/token" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" +) + +func TestNewEnvironmentService(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + ac := acmock.NewMockClient(mockController) + mysqlClient := mysqlmock.NewMockClient(mockController) + p := publishermock.NewMockPublisher(mockController) + logger := zap.NewNop() + s := NewEnvironmentService(ac, mysqlClient, p, WithLogger(logger)) + assert.IsType(t, &EnvironmentService{}, s) +} + +func createContextWithToken(t *testing.T) context.Context { + t.Helper() + token := &token.IDToken{ + Issuer: "issuer", + Subject: "sub", + Audience: "audience", + Expiry: time.Now().AddDate(100, 0, 0), + IssuedAt: time.Now(), + Email: "email", + AdminRole: accountproto.Account_EDITOR, + } + ctx := context.TODO() + return context.WithValue(ctx, rpc.Key, token) +} + +func createContextWithTokenRoleUnassigned(t *testing.T) context.Context { + t.Helper() + token := &token.IDToken{ + Issuer: "issuer", + Subject: "sub", + Audience: "audience", + Expiry: time.Now().AddDate(100, 0, 0), + IssuedAt: time.Now(), + Email: "email", + AdminRole: accountproto.Account_UNASSIGNED, + } + ctx := context.TODO() + return context.WithValue(ctx, rpc.Key, token) +} +func newEnvironmentService(t *testing.T, mockController *gomock.Controller, s storage.Client) *EnvironmentService { + t.Helper() + logger, err := log.NewLogger() + require.NoError(t, err) + return &EnvironmentService{ + accountClient: acmock.NewMockClient(mockController), + mysqlClient: mysqlmock.NewMockClient(mockController), + publisher: publishermock.NewMockPublisher(mockController), + logger: logger.Named("api"), + } +} diff --git a/pkg/environment/api/environment.go b/pkg/environment/api/environment.go new file mode 100644 index 000000000..91151126e --- /dev/null +++ b/pkg/environment/api/environment.go @@ -0,0 +1,388 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "regexp" + "strconv" + + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/environment/command" + "github.com/bucketeer-io/bucketeer/pkg/environment/domain" + v2es "github.com/bucketeer-io/bucketeer/pkg/environment/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + environmentproto "github.com/bucketeer-io/bucketeer/proto/environment" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +var environmentIDRegex = regexp.MustCompile("^[a-z0-9-]{1,50}$") + +func (s *EnvironmentService) GetEnvironment( + ctx context.Context, + req *environmentproto.GetEnvironmentRequest, +) (*environmentproto.GetEnvironmentResponse, error) { + _, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + if err := validateGetEnvironmentRequest(req); err != nil { + return nil, err + } + environmentStorage := v2es.NewEnvironmentStorage(s.mysqlClient) + environment, err := environmentStorage.GetEnvironment(ctx, req.Id) + if err != nil { + if err == v2es.ErrEnvironmentNotFound { + return nil, localizedError(statusEnvironmentNotFound, locale.JaJP) + } + return nil, localizedError(statusInternal, locale.JaJP) + } + if environment.Deleted { + return nil, localizedError(statusEnvironmentAlreadyDeleted, locale.JaJP) + } + return &environmentproto.GetEnvironmentResponse{ + Environment: environment.Environment, + }, nil +} + +func validateGetEnvironmentRequest(req *environmentproto.GetEnvironmentRequest) error { + if req.Id == "" { + return localizedError(statusEnvironmentIDRequired, locale.JaJP) + } + return nil +} + +func (s *EnvironmentService) GetEnvironmentByNamespace( + ctx context.Context, + req *environmentproto.GetEnvironmentByNamespaceRequest, +) (*environmentproto.GetEnvironmentByNamespaceResponse, error) { + _, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + environment, err := s.getEnvironmentByNamespace(ctx, req.Namespace) + if err != nil { + return nil, err + } + return &environmentproto.GetEnvironmentByNamespaceResponse{ + Environment: environment, + }, nil +} + +func (s *EnvironmentService) getEnvironmentByNamespace( + ctx context.Context, + namespace string, +) (*environmentproto.Environment, error) { + environmentStorage := v2es.NewEnvironmentStorage(s.mysqlClient) + environment, err := environmentStorage.GetEnvironmentByNamespace(ctx, namespace, false) + if err != nil { + if err == v2es.ErrEnvironmentNotFound { + return nil, localizedError(statusEnvironmentNotFound, locale.JaJP) + } + return nil, localizedError(statusInternal, locale.JaJP) + } + return environment.Environment, nil +} + +func (s *EnvironmentService) ListEnvironments( + ctx context.Context, + req *environmentproto.ListEnvironmentsRequest, +) (*environmentproto.ListEnvironmentsResponse, error) { + _, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + whereParts := []mysql.WherePart{mysql.NewFilter("deleted", "=", false)} + if req.ProjectId != "" { + whereParts = append(whereParts, mysql.NewFilter("project_id", "=", req.ProjectId)) + } + if req.SearchKeyword != "" { + whereParts = append(whereParts, mysql.NewSearchQuery([]string{"id", "description"}, req.SearchKeyword)) + } + orders, err := s.newEnvironmentListOrders(req.OrderBy, req.OrderDirection) + if err != nil { + s.logger.Error( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, err + } + limit := int(req.PageSize) + cursor := req.Cursor + if cursor == "" { + cursor = "0" + } + offset, err := strconv.Atoi(cursor) + if err != nil { + return nil, localizedError(statusInvalidCursor, locale.JaJP) + } + environmentStorage := v2es.NewEnvironmentStorage(s.mysqlClient) + environments, nextCursor, totalCount, err := environmentStorage.ListEnvironments( + ctx, + whereParts, + orders, + limit, + offset, + ) + if err != nil { + s.logger.Error( + "Failed to list environments", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &environmentproto.ListEnvironmentsResponse{ + Environments: environments, + Cursor: strconv.Itoa(nextCursor), + TotalCount: totalCount, + }, nil +} + +func (s *EnvironmentService) newEnvironmentListOrders( + orderBy environmentproto.ListEnvironmentsRequest_OrderBy, + orderDirection environmentproto.ListEnvironmentsRequest_OrderDirection, +) ([]*mysql.Order, error) { + var column string + switch orderBy { + case environmentproto.ListEnvironmentsRequest_DEFAULT, + environmentproto.ListEnvironmentsRequest_ID: + column = "id" + case environmentproto.ListEnvironmentsRequest_CREATED_AT: + column = "created_at" + case environmentproto.ListEnvironmentsRequest_UPDATED_AT: + column = "updated_at" + default: + return nil, localizedError(statusInvalidOrderBy, locale.JaJP) + } + direction := mysql.OrderDirectionAsc + if orderDirection == environmentproto.ListEnvironmentsRequest_DESC { + direction = mysql.OrderDirectionDesc + } + return []*mysql.Order{mysql.NewOrder(column, direction)}, nil +} + +func (s *EnvironmentService) CreateEnvironment( + ctx context.Context, + req *environmentproto.CreateEnvironmentRequest, +) (*environmentproto.CreateEnvironmentResponse, error) { + editor, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + if err := validateCreateEnvironmentRequest(req); err != nil { + return nil, err + } + if err := s.checkProjectExistence(ctx, req.Command.ProjectId); err != nil { + return nil, err + } + newEnvironment := domain.NewEnvironment(req.Command.Id, req.Command.Description, req.Command.ProjectId) + if err := s.createEnvironment(ctx, req.Command, newEnvironment, editor); err != nil { + return nil, err + } + return &environmentproto.CreateEnvironmentResponse{}, nil +} + +func (s *EnvironmentService) createEnvironment( + ctx context.Context, + cmd command.Command, + environment *domain.Environment, + editor *eventproto.Editor, +) error { + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + environmentStorage := v2es.NewEnvironmentStorage(tx) + handler := command.NewEnvironmentCommandHandler(editor, environment, s.publisher) + if err := handler.Handle(ctx, cmd); err != nil { + return err + } + return environmentStorage.CreateEnvironment(ctx, environment) + }) + if err != nil { + if err == v2es.ErrEnvironmentAlreadyExists { + return localizedError(statusEnvironmentAlreadyExists, locale.JaJP) + } + s.logger.Error( + "Failed to create environment", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return localizedError(statusInternal, locale.JaJP) + } + return nil +} + +func validateCreateEnvironmentRequest(req *environmentproto.CreateEnvironmentRequest) error { + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + if !environmentIDRegex.MatchString(req.Command.Id) { + return localizedError(statusInvalidEnvironmentID, locale.JaJP) + } + if req.Command.ProjectId == "" { + return localizedError(statusProjectIDRequired, locale.JaJP) + } + return nil +} + +func (s *EnvironmentService) checkProjectExistence(ctx context.Context, projectID string) error { + // enabled project must exist + existingProject, err := s.getProject(ctx, projectID) + if err != nil { + return err + } + if existingProject.Disabled { + return localizedError(statusProjectDisabled, locale.JaJP) + } + return nil +} + +func (s *EnvironmentService) UpdateEnvironment( + ctx context.Context, + req *environmentproto.UpdateEnvironmentRequest, +) (*environmentproto.UpdateEnvironmentResponse, error) { + editor, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + commands := getUpdateEnvironmentCommands(req) + if err := validateUpdateEnvironmentRequest(req.Id, commands); err != nil { + return nil, err + } + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + environmentStorage := v2es.NewEnvironmentStorage(tx) + environment, err := environmentStorage.GetEnvironment(ctx, req.Id) + if err != nil { + return err + } + handler := command.NewEnvironmentCommandHandler(editor, environment, s.publisher) + for _, command := range commands { + if err := handler.Handle(ctx, command); err != nil { + return err + } + } + return environmentStorage.UpdateEnvironment(ctx, environment) + }) + if err != nil { + if err == v2es.ErrEnvironmentNotFound || err == v2es.ErrEnvironmentUnexpectedAffectedRows { + return nil, localizedError(statusEnvironmentNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to update environment", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &environmentproto.UpdateEnvironmentResponse{}, nil +} + +func getUpdateEnvironmentCommands(req *environmentproto.UpdateEnvironmentRequest) []command.Command { + commands := make([]command.Command, 0) + if req.RenameCommand != nil { + commands = append(commands, req.RenameCommand) + } + if req.ChangeDescriptionCommand != nil { + commands = append(commands, req.ChangeDescriptionCommand) + } + return commands +} + +func validateUpdateEnvironmentRequest(id string, commands []command.Command) error { + if len(commands) == 0 { + return localizedError(statusNoCommand, locale.JaJP) + } + if id == "" { + return localizedError(statusEnvironmentIDRequired, locale.JaJP) + } + return nil +} + +func (s *EnvironmentService) DeleteEnvironment( + ctx context.Context, + req *environmentproto.DeleteEnvironmentRequest, +) (*environmentproto.DeleteEnvironmentResponse, error) { + editor, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + if err := validateDeleteEnvironmentRequest(req); err != nil { + return nil, err + } + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + environmentStorage := v2es.NewEnvironmentStorage(tx) + environment, err := environmentStorage.GetEnvironment(ctx, req.Id) + if err != nil { + return err + } + handler := command.NewEnvironmentCommandHandler(editor, environment, s.publisher) + if err := handler.Handle(ctx, req.Command); err != nil { + return err + } + return environmentStorage.UpdateEnvironment(ctx, environment) + }) + if err != nil { + if err == v2es.ErrEnvironmentNotFound || err == v2es.ErrEnvironmentUnexpectedAffectedRows { + return nil, localizedError(statusEnvironmentNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to update environment", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &environmentproto.DeleteEnvironmentResponse{}, nil +} + +func validateDeleteEnvironmentRequest(req *environmentproto.DeleteEnvironmentRequest) error { + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + if req.Id == "" { + return localizedError(statusEnvironmentIDRequired, locale.JaJP) + } + return nil +} diff --git a/pkg/environment/api/environment_test.go b/pkg/environment/api/environment_test.go new file mode 100644 index 000000000..d8baf4240 --- /dev/null +++ b/pkg/environment/api/environment_test.go @@ -0,0 +1,529 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "errors" + "strings" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + v2es "github.com/bucketeer-io/bucketeer/pkg/environment/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + proto "github.com/bucketeer-io/bucketeer/proto/environment" +) + +func TestGetEnvironmentMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*EnvironmentService) + id string + expectedErr error + }{ + "err: ErrEnvironmentIDRequired": { + setup: nil, + id: "", + expectedErr: localizedError(statusEnvironmentIDRequired, locale.JaJP), + }, + "err: ErrEnvironmentNotFound": { + setup: func(s *EnvironmentService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id-0", + expectedErr: localizedError(statusEnvironmentNotFound, locale.JaJP), + }, + "err: ErrInternal": { + setup: func(s *EnvironmentService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(errors.New("error")) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id-1", + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *EnvironmentService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id-3", + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := newEnvironmentService(t, mockController, nil) + if p.setup != nil { + p.setup(s) + } + req := &proto.GetEnvironmentRequest{Id: p.id} + resp, err := s.GetEnvironment(createContextWithToken(t), req) + assert.Equal(t, p.expectedErr, err) + if err == nil { + assert.NotNil(t, resp) + } + }) + } +} + +func TestGetEnvironmentByNamespaceMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*EnvironmentService) + namespace string + expectedErr error + }{ + "err: ErrInternal": { + setup: func(s *EnvironmentService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(errors.New("error")) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + namespace: "ns-0", + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "err: ErrEnvironmentNotFound": { + setup: func(s *EnvironmentService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + namespace: "ns-1", + expectedErr: localizedError(statusEnvironmentNotFound, locale.JaJP), + }, + "success": { + setup: func(s *EnvironmentService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + namespace: "ns-2", + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := newEnvironmentService(t, mockController, nil) + if p.setup != nil { + p.setup(s) + } + req := &proto.GetEnvironmentByNamespaceRequest{Namespace: p.namespace} + resp, err := s.GetEnvironmentByNamespace(createContextWithToken(t), req) + assert.Equal(t, p.expectedErr, err) + if err == nil { + assert.NotNil(t, resp) + } + }) + } +} + +func TestListEnvironmentsMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*EnvironmentService) + input *proto.ListEnvironmentsRequest + expected *proto.ListEnvironmentsResponse + expectedErr error + }{ + "err: ErrInvalidCursor": { + setup: nil, + input: &proto.ListEnvironmentsRequest{Cursor: "XXX"}, + expected: nil, + expectedErr: localizedError(statusInvalidCursor, locale.JaJP), + }, + "err: ErrInternal": { + setup: func(s *EnvironmentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + input: &proto.ListEnvironmentsRequest{}, + expected: nil, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *EnvironmentService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + input: &proto.ListEnvironmentsRequest{PageSize: 2, Cursor: ""}, + expected: &proto.ListEnvironmentsResponse{Environments: []*proto.Environment{}, Cursor: "0"}, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := newEnvironmentService(t, mockController, nil) + if p.setup != nil { + p.setup(s) + } + actual, err := s.ListEnvironments(createContextWithToken(t), p.input) + assert.Equal(t, p.expectedErr, err) + assert.Equal(t, p.expected, actual) + }) + } +} + +func TestCreateEnvironmentMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*EnvironmentService) + req *proto.CreateEnvironmentRequest + expectedErr error + }{ + "err: ErrNoCommand": { + setup: nil, + req: &proto.CreateEnvironmentRequest{ + Command: nil, + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "err: ErrInvalidEnvironmentID: empty id": { + setup: nil, + req: &proto.CreateEnvironmentRequest{ + Command: &proto.CreateEnvironmentCommand{Id: ""}, + }, + expectedErr: localizedError(statusInvalidEnvironmentID, locale.JaJP), + }, + "err: ErrInvalidEnvironmentID: can't use uppercase": { + setup: nil, + req: &proto.CreateEnvironmentRequest{ + Command: &proto.CreateEnvironmentCommand{Id: "NS-1"}, + }, + expectedErr: localizedError(statusInvalidEnvironmentID, locale.JaJP), + }, + "err: ErrInvalidEnvironmentID: max id length exceeded": { + setup: nil, + req: &proto.CreateEnvironmentRequest{ + Command: &proto.CreateEnvironmentCommand{Id: strings.Repeat("a", 51)}, + }, + expectedErr: localizedError(statusInvalidEnvironmentID, locale.JaJP), + }, + "err: ErrProjectIDRequired": { + setup: nil, + req: &proto.CreateEnvironmentRequest{ + Command: &proto.CreateEnvironmentCommand{Id: "ns-0", ProjectId: ""}, + }, + expectedErr: localizedError(statusProjectIDRequired, locale.JaJP), + }, + "err: ErrProjectNotFound": { + setup: func(s *EnvironmentService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + req: &proto.CreateEnvironmentRequest{ + Command: &proto.CreateEnvironmentCommand{Id: "ns-0", ProjectId: "project-id-0"}, + }, + expectedErr: localizedError(statusProjectNotFound, locale.JaJP), + }, + "err: ErrEnvironmentAlreadyExists": { + setup: func(s *EnvironmentService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2es.ErrEnvironmentAlreadyExists) + }, + req: &proto.CreateEnvironmentRequest{ + Command: &proto.CreateEnvironmentCommand{Id: "ns-0", ProjectId: "project-id-0"}, + }, + expectedErr: localizedError(statusEnvironmentAlreadyExists, locale.JaJP), + }, + "err: ErrInternal": { + setup: func(s *EnvironmentService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(errors.New("error")) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + req: &proto.CreateEnvironmentRequest{ + Command: &proto.CreateEnvironmentCommand{Id: "ns-1", ProjectId: "project-id-0"}, + }, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *EnvironmentService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &proto.CreateEnvironmentRequest{ + Command: &proto.CreateEnvironmentCommand{Id: "ns-2", ProjectId: "project-id-0"}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken(t) + service := newEnvironmentService(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.CreateEnvironment(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestUpdateEnvironmentMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*EnvironmentService) + req *proto.UpdateEnvironmentRequest + expectedErr error + }{ + "err: ErrNoCommand": { + setup: nil, + req: &proto.UpdateEnvironmentRequest{ + Id: "ns0", + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "err: ErrEnvironmentIDRequired": { + setup: nil, + req: &proto.UpdateEnvironmentRequest{ + RenameCommand: &proto.RenameEnvironmentCommand{Name: "name-0"}, + }, + expectedErr: localizedError(statusEnvironmentIDRequired, locale.JaJP), + }, + "err: ErrEnvironmentNotFound": { + setup: func(s *EnvironmentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2es.ErrEnvironmentNotFound) + }, + req: &proto.UpdateEnvironmentRequest{ + Id: "ns0", + RenameCommand: &proto.RenameEnvironmentCommand{Name: "name-0"}, + }, + expectedErr: localizedError(statusEnvironmentNotFound, locale.JaJP), + }, + "err: ErrInternal": { + setup: func(s *EnvironmentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(errors.New("error")) + }, + req: &proto.UpdateEnvironmentRequest{ + Id: "ns1", + RenameCommand: &proto.RenameEnvironmentCommand{Name: "name-1"}, + }, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *EnvironmentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &proto.UpdateEnvironmentRequest{ + Id: "ns1", + RenameCommand: &proto.RenameEnvironmentCommand{Name: "name-1"}, + ChangeDescriptionCommand: &proto.ChangeDescriptionEnvironmentCommand{Description: "desc-1"}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken(t) + service := newEnvironmentService(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.UpdateEnvironment(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestDeleteEnvironmentMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*EnvironmentService) + req *proto.DeleteEnvironmentRequest + expectedErr error + }{ + "err: ErrNoCommand": { + setup: nil, + req: &proto.DeleteEnvironmentRequest{}, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "err: ErrEnvironmentIDRequired": { + setup: nil, + req: &proto.DeleteEnvironmentRequest{ + Command: &proto.DeleteEnvironmentCommand{}, + }, + expectedErr: localizedError(statusEnvironmentIDRequired, locale.JaJP), + }, + "err: ErrEnvironmentNotFound": { + setup: func(s *EnvironmentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2es.ErrEnvironmentNotFound) + }, + req: &proto.DeleteEnvironmentRequest{ + Id: "ns0", + Command: &proto.DeleteEnvironmentCommand{}, + }, + expectedErr: localizedError(statusEnvironmentNotFound, locale.JaJP), + }, + "err: ErrInternal": { + setup: func(s *EnvironmentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(errors.New("error")) + }, + req: &proto.DeleteEnvironmentRequest{ + Id: "ns1", + Command: &proto.DeleteEnvironmentCommand{}, + }, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *EnvironmentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &proto.DeleteEnvironmentRequest{ + Id: "ns1", + Command: &proto.DeleteEnvironmentCommand{}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken(t) + service := newEnvironmentService(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.DeleteEnvironment(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestEnvironmentPermissionDeniedMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + action func(context.Context, *EnvironmentService) error + expected error + }{ + "CreateEnvironment": { + action: func(ctx context.Context, es *EnvironmentService) error { + _, err := es.CreateEnvironment(ctx, &proto.CreateEnvironmentRequest{}) + return err + }, + expected: localizedError(statusPermissionDenied, locale.JaJP), + }, + "UpdateEnvironment": { + action: func(ctx context.Context, es *EnvironmentService) error { + _, err := es.UpdateEnvironment(ctx, &proto.UpdateEnvironmentRequest{}) + return err + }, + expected: localizedError(statusPermissionDenied, locale.JaJP), + }, + "DeleteEnvironment": { + action: func(ctx context.Context, es *EnvironmentService) error { + _, err := es.DeleteEnvironment(ctx, &proto.DeleteEnvironmentRequest{}) + return err + }, + expected: localizedError(statusPermissionDenied, locale.JaJP), + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithTokenRoleUnassigned(t) + service := newEnvironmentService(t, mockController, nil) + actual := p.action(ctx, service) + assert.Equal(t, p.expected, actual) + }) + } +} diff --git a/pkg/environment/api/error.go b/pkg/environment/api/error.go new file mode 100644 index 000000000..6e8047f96 --- /dev/null +++ b/pkg/environment/api/error.go @@ -0,0 +1,206 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + gstatus "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/rpc/status" +) + +var ( + statusInternal = gstatus.New(codes.Internal, "environment: internal") + statusNoCommand = gstatus.New(codes.InvalidArgument, "environment: no command") + statusInvalidCursor = gstatus.New(codes.InvalidArgument, "environment: cursor is invalid") + statusEnvironmentIDRequired = gstatus.New(codes.InvalidArgument, "environment: environment id must be specified") + statusInvalidEnvironmentID = gstatus.New(codes.InvalidArgument, "environment: invalid environment id") + statusProjectIDRequired = gstatus.New(codes.InvalidArgument, "environment: project id must be specified") + statusInvalidProjectID = gstatus.New(codes.InvalidArgument, "environment: invalid project id") + statusInvalidProjectCreatorEmail = gstatus.New(codes.InvalidArgument, "environment: invalid project creator email") + statusInvalidOrderBy = gstatus.New(codes.InvalidArgument, "environment: order_by is invalid") + statusEnvironmentNotFound = gstatus.New(codes.NotFound, "environment: environment not found") + statusProjectNotFound = gstatus.New(codes.NotFound, "environment: project not found") + statusEnvironmentAlreadyDeleted = gstatus.New(codes.NotFound, "environment: environment already deleted") + statusEnvironmentAlreadyExists = gstatus.New(codes.AlreadyExists, "environment: environment already exists") + statusProjectAlreadyExists = gstatus.New(codes.AlreadyExists, "environment: project already exists") + statusProjectDisabled = gstatus.New(codes.FailedPrecondition, "environment: project disabled") + statusUnauthenticated = gstatus.New(codes.Unauthenticated, "environment: unauthenticated") + statusPermissionDenied = gstatus.New(codes.PermissionDenied, "environment: permission denied") + + errInternalJaJP = status.MustWithDetails( + statusInternal, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "内部エラーが発生しました", + }, + ) + errNoCommandJaJP = status.MustWithDetails( + statusNoCommand, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "commandは必須です", + }, + ) + errInvalidCursorJaJP = status.MustWithDetails( + statusInvalidCursor, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なcursorです", + }, + ) + errEnvironmentIDRequiredJaJP = status.MustWithDetails( + statusEnvironmentIDRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "environment idは必須です", + }, + ) + errInvalidEnvironmentIDJaJP = status.MustWithDetails( + statusInvalidEnvironmentID, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なenvironment idです", + }, + ) + errProjectIDRequiredJaJP = status.MustWithDetails( + statusProjectIDRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "project idは必須です", + }, + ) + errInvalidProjectIDJaJP = status.MustWithDetails( + statusInvalidProjectID, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なproject idです", + }, + ) + errInvalidProjectCreatorEmailJaJP = status.MustWithDetails( + statusInvalidProjectCreatorEmail, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "Project作成者のemailが不正です", + }, + ) + errInvalidOrderByJaJP = status.MustWithDetails( + statusInvalidOrderBy, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なソート順の指定です", + }, + ) + errEnvironmentNotFoundJaJP = status.MustWithDetails( + statusEnvironmentNotFound, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "environmentのデータが存在しません", + }, + ) + errProjectNotFoundJaJP = status.MustWithDetails( + statusProjectNotFound, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "projectのデータが存在しません", + }, + ) + errEnvironmentAlreadyDeletedJaJP = status.MustWithDetails( + statusEnvironmentAlreadyDeleted, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "environmentのデータがすでに削除済みです", + }, + ) + errEnvironmentAlreadyExistsJaJP = status.MustWithDetails( + statusEnvironmentAlreadyExists, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "同じidまたはnamespaceのenvironmentのデータがすでに存在します", + }, + ) + errProjectAlreadyExistsJaJP = status.MustWithDetails( + statusProjectAlreadyExists, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "同じidのprojectのデータがすでに存在します", + }, + ) + errProjectDisabledJaJp = status.MustWithDetails( + statusProjectDisabled, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "projectのデータが無効化されています", + }, + ) + errUnauthenticatedJaJP = status.MustWithDetails( + statusUnauthenticated, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "認証されていません", + }, + ) + errPermissionDeniedJaJP = status.MustWithDetails( + statusPermissionDenied, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "権限がありません", + }, + ) +) + +func localizedError(s *gstatus.Status, loc string) error { + // handle loc if multi-lang is necessary + switch s { + case statusInternal: + return errInternalJaJP + case statusNoCommand: + return errNoCommandJaJP + case statusInvalidCursor: + return errInvalidCursorJaJP + case statusEnvironmentIDRequired: + return errEnvironmentIDRequiredJaJP + case statusInvalidEnvironmentID: + return errInvalidEnvironmentIDJaJP + case statusProjectIDRequired: + return errProjectIDRequiredJaJP + case statusInvalidProjectID: + return errInvalidProjectIDJaJP + case statusInvalidProjectCreatorEmail: + return errInvalidProjectCreatorEmailJaJP + case statusInvalidOrderBy: + return errInvalidOrderByJaJP + case statusEnvironmentNotFound: + return errEnvironmentNotFoundJaJP + case statusProjectNotFound: + return errProjectNotFoundJaJP + case statusEnvironmentAlreadyDeleted: + return errEnvironmentAlreadyDeletedJaJP + case statusEnvironmentAlreadyExists: + return errEnvironmentAlreadyExistsJaJP + case statusProjectAlreadyExists: + return errProjectAlreadyExistsJaJP + case statusProjectDisabled: + return errProjectDisabledJaJp + case statusUnauthenticated: + return errUnauthenticatedJaJP + case statusPermissionDenied: + return errPermissionDeniedJaJP + default: + return errInternalJaJP + } +} diff --git a/pkg/environment/api/project.go b/pkg/environment/api/project.go new file mode 100644 index 000000000..b1710b7c2 --- /dev/null +++ b/pkg/environment/api/project.go @@ -0,0 +1,494 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "fmt" + "regexp" + "strconv" + + "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/environment/command" + "github.com/bucketeer-io/bucketeer/pkg/environment/domain" + v2es "github.com/bucketeer-io/bucketeer/pkg/environment/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + environmentproto "github.com/bucketeer-io/bucketeer/proto/environment" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +var ( + projectIDRegex = regexp.MustCompile("^[a-z0-9-]{1,50}$") + + //nolint:lll + emailRegex = regexp.MustCompile("^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$") +) + +func (s *EnvironmentService) GetProject( + ctx context.Context, + req *environmentproto.GetProjectRequest, +) (*environmentproto.GetProjectResponse, error) { + _, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + if err := validateGetProjectRequest(req); err != nil { + return nil, err + } + project, err := s.getProject(ctx, req.Id) + if err != nil { + return nil, err + } + return &environmentproto.GetProjectResponse{ + Project: project.Project, + }, nil +} + +func validateGetProjectRequest(req *environmentproto.GetProjectRequest) error { + if req.Id == "" { + return localizedError(statusProjectIDRequired, locale.JaJP) + } + return nil +} + +func (s *EnvironmentService) getProject(ctx context.Context, id string) (*domain.Project, error) { + projectStorage := v2es.NewProjectStorage(s.mysqlClient) + project, err := projectStorage.GetProject(ctx, id) + if err != nil { + if err == v2es.ErrProjectNotFound { + return nil, localizedError(statusProjectNotFound, locale.JaJP) + } + return nil, localizedError(statusInternal, locale.JaJP) + } + return project, nil +} + +func (s *EnvironmentService) ListProjects( + ctx context.Context, + req *environmentproto.ListProjectsRequest, +) (*environmentproto.ListProjectsResponse, error) { + _, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + whereParts := []mysql.WherePart{} + if req.Disabled != nil { + whereParts = append(whereParts, mysql.NewFilter("disabled", "=", req.Disabled.Value)) + } + if req.SearchKeyword != "" { + whereParts = append(whereParts, mysql.NewSearchQuery([]string{"id", "creator_email"}, req.SearchKeyword)) + } + orders, err := s.newProjectListOrders(req.OrderBy, req.OrderDirection) + if err != nil { + s.logger.Error( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, err + } + limit := int(req.PageSize) + cursor := req.Cursor + if cursor == "" { + cursor = "0" + } + offset, err := strconv.Atoi(cursor) + if err != nil { + return nil, localizedError(statusInvalidCursor, locale.JaJP) + } + projectStorage := v2es.NewProjectStorage(s.mysqlClient) + projects, nextCursor, totalCount, err := projectStorage.ListProjects( + ctx, + whereParts, + orders, + limit, + offset, + ) + if err != nil { + s.logger.Error( + "Failed to list projects", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &environmentproto.ListProjectsResponse{ + Projects: projects, + Cursor: strconv.Itoa(nextCursor), + TotalCount: totalCount, + }, nil +} + +func (s *EnvironmentService) newProjectListOrders( + orderBy environmentproto.ListProjectsRequest_OrderBy, + orderDirection environmentproto.ListProjectsRequest_OrderDirection, +) ([]*mysql.Order, error) { + var column string + switch orderBy { + case environmentproto.ListProjectsRequest_DEFAULT, + environmentproto.ListProjectsRequest_ID: + column = "id" + case environmentproto.ListProjectsRequest_CREATED_AT: + column = "created_at" + case environmentproto.ListProjectsRequest_UPDATED_AT: + column = "updated_at" + default: + return nil, localizedError(statusInvalidOrderBy, locale.JaJP) + } + direction := mysql.OrderDirectionAsc + if orderDirection == environmentproto.ListProjectsRequest_DESC { + direction = mysql.OrderDirectionDesc + } + return []*mysql.Order{mysql.NewOrder(column, direction)}, nil +} + +func (s *EnvironmentService) CreateProject( + ctx context.Context, + req *environmentproto.CreateProjectRequest, +) (*environmentproto.CreateProjectResponse, error) { + editor, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + if err := validateCreateProjectRequest(req); err != nil { + return nil, err + } + project := domain.NewProject(req.Command.Id, req.Command.Description, editor.Email, false) + if err := s.createProject(ctx, req.Command, project, editor); err != nil { + return nil, err + } + return &environmentproto.CreateProjectResponse{}, nil +} + +func validateCreateProjectRequest(req *environmentproto.CreateProjectRequest) error { + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + if !projectIDRegex.MatchString(req.Command.Id) { + return localizedError(statusInvalidProjectID, locale.JaJP) + } + return nil +} + +func (s *EnvironmentService) createProject( + ctx context.Context, + cmd command.Command, + project *domain.Project, + editor *eventproto.Editor, +) error { + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + projectStorage := v2es.NewProjectStorage(tx) + handler := command.NewProjectCommandHandler(editor, project, s.publisher) + if err := handler.Handle(ctx, cmd); err != nil { + return err + } + return projectStorage.CreateProject(ctx, project) + }) + if err != nil { + if err == v2es.ErrProjectAlreadyExists { + return localizedError(statusProjectAlreadyExists, locale.JaJP) + } + s.logger.Error( + "Failed to create project", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return localizedError(statusInternal, locale.JaJP) + } + return nil +} + +func (s *EnvironmentService) CreateTrialProject( + ctx context.Context, + req *environmentproto.CreateTrialProjectRequest, +) (*environmentproto.CreateTrialProjectResponse, error) { + _, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + if err := validateCreateTrialProjectRequest(req); err != nil { + return nil, err + } + editor := &eventproto.Editor{ + Email: req.Command.Email, + Role: accountproto.Account_UNASSIGNED, + IsAdmin: false, + } + existingProject, err := s.getTrialProjectByEmail(ctx, editor.Email) + if err != nil && status.Code(err) != codes.NotFound { + return nil, err + } + if existingProject != nil { + return nil, localizedError(statusProjectAlreadyExists, locale.JaJP) + } + project := domain.NewProject(req.Command.Id, "", editor.Email, true) + if err := s.createProject(ctx, req.Command, project, editor); err != nil { + return nil, err + } + if err := s.createTrialEnvironmentsAndAccounts(ctx, project, editor); err != nil { + return nil, err + } + return &environmentproto.CreateTrialProjectResponse{}, nil +} + +func validateCreateTrialProjectRequest(req *environmentproto.CreateTrialProjectRequest) error { + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + if !projectIDRegex.MatchString(req.Command.Id) { + return localizedError(statusInvalidProjectID, locale.JaJP) + } + if !emailRegex.MatchString(req.Command.Email) { + return localizedError(statusInvalidProjectCreatorEmail, locale.JaJP) + } + return nil +} + +func (s *EnvironmentService) getTrialProjectByEmail( + ctx context.Context, + email string, +) (*environmentproto.Project, error) { + projectStorage := v2es.NewProjectStorage(s.mysqlClient) + project, err := projectStorage.GetTrialProjectByEmail(ctx, email, false, true) + if err != nil { + if err == v2es.ErrProjectNotFound { + return nil, localizedError(statusProjectNotFound, locale.JaJP) + } + return nil, localizedError(statusInternal, locale.JaJP) + } + return project.Project, nil +} + +func (s *EnvironmentService) createTrialEnvironmentsAndAccounts( + ctx context.Context, + project *domain.Project, + editor *eventproto.Editor, +) error { + getAdminAccountReq := &accountproto.GetAdminAccountRequest{ + Email: editor.Email, + } + getAdminAccountRes, err := s.accountClient.GetAdminAccount(ctx, getAdminAccountReq) + if err != nil && status.Code(err) != codes.NotFound { + return localizedError(statusInternal, locale.JaJP) + } + adminAccountExists := false + if getAdminAccountRes != nil && getAdminAccountRes.Account != nil { + adminAccountExists = true + } + envIDs := []string{ + fmt.Sprintf("%s-development", project.Id), + fmt.Sprintf("%s-staging", project.Id), + fmt.Sprintf("%s-production", project.Id), + } + for _, envID := range envIDs { + createEnvCmd := &environmentproto.CreateEnvironmentCommand{ + Id: envID, + ProjectId: project.Id, + Description: "", + } + env := domain.NewEnvironment(envID, "", project.Id) + if err := s.createEnvironment(ctx, createEnvCmd, env, editor); err != nil { + return err + } + if !adminAccountExists { + createAccountReq := &accountproto.CreateAccountRequest{ + Command: &accountproto.CreateAccountCommand{ + Email: editor.Email, + Role: accountproto.Account_OWNER, + }, + EnvironmentNamespace: env.Namespace, + } + if _, err := s.accountClient.CreateAccount(ctx, createAccountReq); err != nil { + return localizedError(statusInternal, locale.JaJP) + } + } + } + return nil +} + +func (s *EnvironmentService) UpdateProject( + ctx context.Context, + req *environmentproto.UpdateProjectRequest, +) (*environmentproto.UpdateProjectResponse, error) { + editor, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + commands := getUpdateProjectCommands(req) + if err := validateUpdateProjectRequest(req.Id, commands); err != nil { + return nil, err + } + if err := s.updateProject(ctx, req.Id, editor, commands...); err != nil { + return nil, err + } + return &environmentproto.UpdateProjectResponse{}, nil +} + +func getUpdateProjectCommands(req *environmentproto.UpdateProjectRequest) []command.Command { + commands := make([]command.Command, 0) + if req.ChangeDescriptionCommand != nil { + commands = append(commands, req.ChangeDescriptionCommand) + } + return commands +} + +func validateUpdateProjectRequest(id string, commands []command.Command) error { + if len(commands) == 0 { + return localizedError(statusNoCommand, locale.JaJP) + } + if id == "" { + return localizedError(statusProjectIDRequired, locale.JaJP) + } + return nil +} + +func (s *EnvironmentService) updateProject( + ctx context.Context, + id string, + editor *eventproto.Editor, + commands ...command.Command, +) error { + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + projectStorage := v2es.NewProjectStorage(tx) + project, err := projectStorage.GetProject(ctx, id) + if err != nil { + return err + } + handler := command.NewProjectCommandHandler(editor, project, s.publisher) + for _, command := range commands { + if err := handler.Handle(ctx, command); err != nil { + return err + } + } + return projectStorage.UpdateProject(ctx, project) + }) + if err != nil { + if err == v2es.ErrProjectNotFound || err == v2es.ErrProjectUnexpectedAffectedRows { + return localizedError(statusProjectNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to update project", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return localizedError(statusInternal, locale.JaJP) + } + return nil +} + +func (s *EnvironmentService) EnableProject( + ctx context.Context, + req *environmentproto.EnableProjectRequest, +) (*environmentproto.EnableProjectResponse, error) { + editor, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + if err := validateEnableProjectRequest(req); err != nil { + return nil, err + } + if err := s.updateProject(ctx, req.Id, editor, req.Command); err != nil { + return nil, err + } + return &environmentproto.EnableProjectResponse{}, nil +} + +func validateEnableProjectRequest(req *environmentproto.EnableProjectRequest) error { + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + if req.Id == "" { + return localizedError(statusProjectIDRequired, locale.JaJP) + } + return nil +} + +func (s *EnvironmentService) DisableProject( + ctx context.Context, + req *environmentproto.DisableProjectRequest, +) (*environmentproto.DisableProjectResponse, error) { + editor, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + if err := validateDisableProjectRequest(req); err != nil { + return nil, err + } + if err := s.updateProject(ctx, req.Id, editor, req.Command); err != nil { + return nil, err + } + return &environmentproto.DisableProjectResponse{}, nil +} + +func validateDisableProjectRequest(req *environmentproto.DisableProjectRequest) error { + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + if req.Id == "" { + return localizedError(statusProjectIDRequired, locale.JaJP) + } + return nil +} + +func (s *EnvironmentService) ConvertTrialProject( + ctx context.Context, + req *environmentproto.ConvertTrialProjectRequest, +) (*environmentproto.ConvertTrialProjectResponse, error) { + editor, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + if err := validateConvertTrialProjectRequest(req); err != nil { + return nil, err + } + if err := s.updateProject(ctx, req.Id, editor, req.Command); err != nil { + return nil, err + } + return &environmentproto.ConvertTrialProjectResponse{}, nil +} + +func validateConvertTrialProjectRequest(req *environmentproto.ConvertTrialProjectRequest) error { + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + if req.Id == "" { + return localizedError(statusProjectIDRequired, locale.JaJP) + } + return nil +} diff --git a/pkg/environment/api/project_test.go b/pkg/environment/api/project_test.go new file mode 100644 index 000000000..3874347c6 --- /dev/null +++ b/pkg/environment/api/project_test.go @@ -0,0 +1,734 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "errors" + "strings" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + acmock "github.com/bucketeer-io/bucketeer/pkg/account/client/mock" + v2es "github.com/bucketeer-io/bucketeer/pkg/environment/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + proto "github.com/bucketeer-io/bucketeer/proto/environment" +) + +func TestGetProjectMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*EnvironmentService) + id string + expectedErr error + }{ + "err: ErrProjectIDRequired": { + setup: nil, + id: "", + expectedErr: localizedError(statusProjectIDRequired, locale.JaJP), + }, + "err: ErrProjectNotFound": { + setup: func(s *EnvironmentService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "err-id-0", + expectedErr: localizedError(statusProjectNotFound, locale.JaJP), + }, + "err: ErrInternal": { + setup: func(s *EnvironmentService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(errors.New("error")) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "err-id-1", + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *EnvironmentService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "success-id-0", + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := newEnvironmentService(t, mockController, nil) + if p.setup != nil { + p.setup(s) + } + req := &proto.GetProjectRequest{Id: p.id} + resp, err := s.GetProject(createContextWithToken(t), req) + assert.Equal(t, p.expectedErr, err) + if err == nil { + assert.NotNil(t, resp) + } + }) + } +} + +func TestListProjectsMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*EnvironmentService) + input *proto.ListProjectsRequest + expected *proto.ListProjectsResponse + expectedErr error + }{ + "err: ErrInvalidCursor": { + setup: nil, + input: &proto.ListProjectsRequest{Cursor: "XXX"}, + expected: nil, + expectedErr: localizedError(statusInvalidCursor, locale.JaJP), + }, + "err: ErrInternal": { + setup: func(s *EnvironmentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + input: &proto.ListProjectsRequest{}, + expected: nil, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *EnvironmentService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + input: &proto.ListProjectsRequest{PageSize: 2, Cursor: ""}, + expected: &proto.ListProjectsResponse{Projects: []*proto.Project{}, Cursor: "0", TotalCount: 0}, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := newEnvironmentService(t, mockController, nil) + if p.setup != nil { + p.setup(s) + } + actual, err := s.ListProjects(createContextWithToken(t), p.input) + assert.Equal(t, p.expectedErr, err) + assert.Equal(t, p.expected, actual) + }) + } +} + +func TestCreateProjectMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*EnvironmentService) + req *proto.CreateProjectRequest + expectedErr error + }{ + "err: ErrNoCommand": { + setup: nil, + req: &proto.CreateProjectRequest{ + Command: nil, + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "err: ErrInvalidProjectID: empty id": { + setup: nil, + req: &proto.CreateProjectRequest{ + Command: &proto.CreateProjectCommand{Id: ""}, + }, + expectedErr: localizedError(statusInvalidProjectID, locale.JaJP), + }, + "err: ErrInvalidProjectID: can't use uppercase": { + setup: nil, + req: &proto.CreateProjectRequest{ + Command: &proto.CreateProjectCommand{Id: "ID-1"}, + }, + expectedErr: localizedError(statusInvalidProjectID, locale.JaJP), + }, + "err: ErrInvalidProjectID: max id length exceeded": { + setup: nil, + req: &proto.CreateProjectRequest{ + Command: &proto.CreateProjectCommand{Id: strings.Repeat("a", 51)}, + }, + expectedErr: localizedError(statusInvalidProjectID, locale.JaJP), + }, + "err: ErrProjectAlreadyExists: duplicate id": { + setup: func(s *EnvironmentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2es.ErrProjectAlreadyExists) + }, + req: &proto.CreateProjectRequest{ + Command: &proto.CreateProjectCommand{Id: "id-0"}, + }, + expectedErr: localizedError(statusProjectAlreadyExists, locale.JaJP), + }, + "err: ErrInternal": { + setup: func(s *EnvironmentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(errors.New("error")) + }, + req: &proto.CreateProjectRequest{ + Command: &proto.CreateProjectCommand{Id: "id-1"}, + }, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *EnvironmentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &proto.CreateProjectRequest{ + Command: &proto.CreateProjectCommand{Id: "id-2"}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken(t) + service := newEnvironmentService(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.CreateProject(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestCreateTrialProjectMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*EnvironmentService) + req *proto.CreateTrialProjectRequest + expectedErr error + }{ + "err: ErrNoCommand": { + setup: nil, + req: &proto.CreateTrialProjectRequest{ + Command: nil, + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "err: ErrInvalidProjectID: empty id": { + setup: nil, + req: &proto.CreateTrialProjectRequest{ + Command: &proto.CreateTrialProjectCommand{Id: ""}, + }, + expectedErr: localizedError(statusInvalidProjectID, locale.JaJP), + }, + "err: ErrInvalidProjectID: can't use uppercase": { + setup: nil, + req: &proto.CreateTrialProjectRequest{ + Command: &proto.CreateTrialProjectCommand{Id: "ID-1"}, + }, + expectedErr: localizedError(statusInvalidProjectID, locale.JaJP), + }, + "err: ErrInvalidProjectID: max id length exceeded": { + setup: nil, + req: &proto.CreateTrialProjectRequest{ + Command: &proto.CreateTrialProjectCommand{Id: strings.Repeat("a", 51)}, + }, + expectedErr: localizedError(statusInvalidProjectID, locale.JaJP), + }, + "err: ErrInvalidProjectCreatorEmail": { + setup: nil, + req: &proto.CreateTrialProjectRequest{ + Command: &proto.CreateTrialProjectCommand{Id: "id-0", Email: "email"}, + }, + expectedErr: localizedError(statusInvalidProjectCreatorEmail, locale.JaJP), + }, + "err: ErrProjectAlreadyExists: trial exists": { + setup: func(s *EnvironmentService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + req: &proto.CreateTrialProjectRequest{ + Command: &proto.CreateTrialProjectCommand{Id: "id-0", Email: "test@example.com"}, + }, + expectedErr: localizedError(statusProjectAlreadyExists, locale.JaJP), + }, + "err: ErrProjectAlreadyExists: duplicated id": { + setup: func(s *EnvironmentService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2es.ErrProjectAlreadyExists) + }, + req: &proto.CreateTrialProjectRequest{ + Command: &proto.CreateTrialProjectCommand{Id: "id-0", Email: "test@example.com"}, + }, + expectedErr: localizedError(statusProjectAlreadyExists, locale.JaJP), + }, + "err: ErrInternal": { + setup: func(s *EnvironmentService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(errors.New("error")) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + req: &proto.CreateTrialProjectRequest{ + Command: &proto.CreateTrialProjectCommand{Id: "id-1", Email: "test@example.com"}, + }, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *EnvironmentService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil).Times(4) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil).Times(4) + s.accountClient.(*acmock.MockClient).EXPECT().GetAdminAccount(gomock.Any(), gomock.Any()).Return( + nil, status.Error(codes.NotFound, "not found")) + s.accountClient.(*acmock.MockClient).EXPECT().CreateAccount(gomock.Any(), gomock.Any()).Return( + &accountproto.CreateAccountResponse{}, nil).Times(3) + }, + req: &proto.CreateTrialProjectRequest{ + Command: &proto.CreateTrialProjectCommand{Id: "id-2", Email: "test@example.com"}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken(t) + service := newEnvironmentService(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.CreateTrialProject(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestUpdateProjectMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*EnvironmentService) + req *proto.UpdateProjectRequest + expectedErr error + }{ + "err: ErrNoCommand": { + setup: nil, + req: &proto.UpdateProjectRequest{ + Id: "id-0", + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "err: ErrProjectIDRequired": { + setup: nil, + req: &proto.UpdateProjectRequest{ + ChangeDescriptionCommand: &proto.ChangeDescriptionProjectCommand{Description: "desc"}, + }, + expectedErr: localizedError(statusProjectIDRequired, locale.JaJP), + }, + "err: ErrProjectNotFound": { + setup: func(s *EnvironmentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2es.ErrProjectNotFound) + }, + req: &proto.UpdateProjectRequest{ + Id: "id-0", + ChangeDescriptionCommand: &proto.ChangeDescriptionProjectCommand{Description: "desc"}, + }, + expectedErr: localizedError(statusProjectNotFound, locale.JaJP), + }, + "err: ErrInternal": { + setup: func(s *EnvironmentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(errors.New("error")) + }, + req: &proto.UpdateProjectRequest{ + Id: "id-1", + ChangeDescriptionCommand: &proto.ChangeDescriptionProjectCommand{Description: "desc"}, + }, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *EnvironmentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &proto.UpdateProjectRequest{ + Id: "id-1", + ChangeDescriptionCommand: &proto.ChangeDescriptionProjectCommand{Description: "desc"}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken(t) + service := newEnvironmentService(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.UpdateProject(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestEnableProjectMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*EnvironmentService) + req *proto.EnableProjectRequest + expectedErr error + }{ + "err: ErrNoCommand": { + setup: nil, + req: &proto.EnableProjectRequest{ + Id: "id-0", + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "err: ErrProjectIDRequired": { + setup: nil, + req: &proto.EnableProjectRequest{ + Command: &proto.EnableProjectCommand{}, + }, + expectedErr: localizedError(statusProjectIDRequired, locale.JaJP), + }, + "err: ErrProjectNotFound": { + setup: func(s *EnvironmentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2es.ErrProjectNotFound) + }, + req: &proto.EnableProjectRequest{ + Id: "id-0", + Command: &proto.EnableProjectCommand{}, + }, + expectedErr: localizedError(statusProjectNotFound, locale.JaJP), + }, + "err: ErrInternal": { + setup: func(s *EnvironmentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(errors.New("error")) + }, + req: &proto.EnableProjectRequest{ + Id: "id-1", + Command: &proto.EnableProjectCommand{}, + }, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *EnvironmentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &proto.EnableProjectRequest{ + Id: "id-1", + Command: &proto.EnableProjectCommand{}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken(t) + service := newEnvironmentService(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.EnableProject(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestDisableProjectMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*EnvironmentService) + req *proto.DisableProjectRequest + expectedErr error + }{ + "err: ErrNoCommand": { + setup: nil, + req: &proto.DisableProjectRequest{ + Id: "id-0", + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "err: ErrProjectIDRequired": { + setup: nil, + req: &proto.DisableProjectRequest{ + Command: &proto.DisableProjectCommand{}, + }, + expectedErr: localizedError(statusProjectIDRequired, locale.JaJP), + }, + "err: ErrProjectNotFound": { + setup: func(s *EnvironmentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2es.ErrProjectNotFound) + }, + req: &proto.DisableProjectRequest{ + Id: "id-0", + Command: &proto.DisableProjectCommand{}, + }, + expectedErr: localizedError(statusProjectNotFound, locale.JaJP), + }, + "err: ErrInternal": { + setup: func(s *EnvironmentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(errors.New("error")) + }, + req: &proto.DisableProjectRequest{ + Id: "id-1", + Command: &proto.DisableProjectCommand{}, + }, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *EnvironmentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &proto.DisableProjectRequest{ + Id: "id-1", + Command: &proto.DisableProjectCommand{}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken(t) + service := newEnvironmentService(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.DisableProject(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestConvertTrialProjectMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*EnvironmentService) + req *proto.ConvertTrialProjectRequest + expectedErr error + }{ + "err: ErrNoCommand": { + setup: nil, + req: &proto.ConvertTrialProjectRequest{ + Id: "id-0", + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "err: ErrProjectIDRequired": { + setup: nil, + req: &proto.ConvertTrialProjectRequest{ + Command: &proto.ConvertTrialProjectCommand{}, + }, + expectedErr: localizedError(statusProjectIDRequired, locale.JaJP), + }, + "err: ErrProjectNotFound": { + setup: func(s *EnvironmentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2es.ErrProjectNotFound) + }, + req: &proto.ConvertTrialProjectRequest{ + Id: "id-0", + Command: &proto.ConvertTrialProjectCommand{}, + }, + expectedErr: localizedError(statusProjectNotFound, locale.JaJP), + }, + "err: ErrInternal": { + setup: func(s *EnvironmentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(errors.New("error")) + }, + req: &proto.ConvertTrialProjectRequest{ + Id: "id-1", + Command: &proto.ConvertTrialProjectCommand{}, + }, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *EnvironmentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &proto.ConvertTrialProjectRequest{ + Id: "id-1", + Command: &proto.ConvertTrialProjectCommand{}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken(t) + service := newEnvironmentService(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.ConvertTrialProject(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestProjectPermissionDeniedMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + action func(context.Context, *EnvironmentService) error + expected error + }{ + "CreateProject": { + action: func(ctx context.Context, es *EnvironmentService) error { + _, err := es.CreateProject(ctx, &proto.CreateProjectRequest{}) + return err + }, + expected: localizedError(statusPermissionDenied, locale.JaJP), + }, + "CreateTrialProject": { + action: func(ctx context.Context, es *EnvironmentService) error { + _, err := es.CreateTrialProject(ctx, &proto.CreateTrialProjectRequest{}) + return err + }, + expected: localizedError(statusPermissionDenied, locale.JaJP), + }, + "UpdateProject": { + action: func(ctx context.Context, es *EnvironmentService) error { + _, err := es.UpdateProject(ctx, &proto.UpdateProjectRequest{}) + return err + }, + expected: localizedError(statusPermissionDenied, locale.JaJP), + }, + "EnableProject": { + action: func(ctx context.Context, es *EnvironmentService) error { + _, err := es.EnableProject(ctx, &proto.EnableProjectRequest{}) + return err + }, + expected: localizedError(statusPermissionDenied, locale.JaJP), + }, + "DisableProject": { + action: func(ctx context.Context, es *EnvironmentService) error { + _, err := es.DisableProject(ctx, &proto.DisableProjectRequest{}) + return err + }, + expected: localizedError(statusPermissionDenied, locale.JaJP), + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithTokenRoleUnassigned(t) + service := newEnvironmentService(t, mockController, nil) + actual := p.action(ctx, service) + assert.Equal(t, p.expected, actual) + }) + } +} diff --git a/pkg/environment/client/BUILD.bazel b/pkg/environment/client/BUILD.bazel new file mode 100644 index 000000000..9a0fe3135 --- /dev/null +++ b/pkg/environment/client/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["client.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/environment/client", + visibility = ["//visibility:public"], + deps = [ + "//pkg/rpc/client:go_default_library", + "//proto/environment:go_default_library", + "@org_golang_google_grpc//:go_default_library", + ], +) diff --git a/pkg/environment/client/client.go b/pkg/environment/client/client.go new file mode 100644 index 000000000..0f5b84fd1 --- /dev/null +++ b/pkg/environment/client/client.go @@ -0,0 +1,50 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package client + +import ( + "google.golang.org/grpc" + + rpcclient "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + proto "github.com/bucketeer-io/bucketeer/proto/environment" +) + +type Client interface { + proto.EnvironmentServiceClient + Close() +} + +type client struct { + proto.EnvironmentServiceClient + address string + connection *grpc.ClientConn +} + +func NewClient(addr, certPath string, opts ...rpcclient.Option) (Client, error) { + conn, err := rpcclient.NewClientConn(addr, certPath, opts...) + if err != nil { + return nil, err + } + return &client{ + EnvironmentServiceClient: proto.NewEnvironmentServiceClient(conn), + address: addr, + connection: conn, + }, nil +} + +func (c *client) Close() { + c.connection.Close() +} diff --git a/pkg/environment/client/mock/BUILD.bazel b/pkg/environment/client/mock/BUILD.bazel new file mode 100644 index 000000000..bdb13452e --- /dev/null +++ b/pkg/environment/client/mock/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["client.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/environment/client/mock", + visibility = ["//visibility:public"], + deps = [ + "//proto/environment:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@org_golang_google_grpc//:go_default_library", + ], +) diff --git a/pkg/environment/client/mock/client.go b/pkg/environment/client/mock/client.go new file mode 100644 index 000000000..53e1a65d6 --- /dev/null +++ b/pkg/environment/client/mock/client.go @@ -0,0 +1,330 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: client.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + grpc "google.golang.org/grpc" + + environment "github.com/bucketeer-io/bucketeer/proto/environment" +) + +// MockClient is a mock of Client interface. +type MockClient struct { + ctrl *gomock.Controller + recorder *MockClientMockRecorder +} + +// MockClientMockRecorder is the mock recorder for MockClient. +type MockClientMockRecorder struct { + mock *MockClient +} + +// NewMockClient creates a new mock instance. +func NewMockClient(ctrl *gomock.Controller) *MockClient { + mock := &MockClient{ctrl: ctrl} + mock.recorder = &MockClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClient) EXPECT() *MockClientMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockClient) Close() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Close") +} + +// Close indicates an expected call of Close. +func (mr *MockClientMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockClient)(nil).Close)) +} + +// ConvertTrialProject mocks base method. +func (m *MockClient) ConvertTrialProject(ctx context.Context, in *environment.ConvertTrialProjectRequest, opts ...grpc.CallOption) (*environment.ConvertTrialProjectResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ConvertTrialProject", varargs...) + ret0, _ := ret[0].(*environment.ConvertTrialProjectResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ConvertTrialProject indicates an expected call of ConvertTrialProject. +func (mr *MockClientMockRecorder) ConvertTrialProject(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConvertTrialProject", reflect.TypeOf((*MockClient)(nil).ConvertTrialProject), varargs...) +} + +// CreateEnvironment mocks base method. +func (m *MockClient) CreateEnvironment(ctx context.Context, in *environment.CreateEnvironmentRequest, opts ...grpc.CallOption) (*environment.CreateEnvironmentResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateEnvironment", varargs...) + ret0, _ := ret[0].(*environment.CreateEnvironmentResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateEnvironment indicates an expected call of CreateEnvironment. +func (mr *MockClientMockRecorder) CreateEnvironment(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateEnvironment", reflect.TypeOf((*MockClient)(nil).CreateEnvironment), varargs...) +} + +// CreateProject mocks base method. +func (m *MockClient) CreateProject(ctx context.Context, in *environment.CreateProjectRequest, opts ...grpc.CallOption) (*environment.CreateProjectResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateProject", varargs...) + ret0, _ := ret[0].(*environment.CreateProjectResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateProject indicates an expected call of CreateProject. +func (mr *MockClientMockRecorder) CreateProject(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateProject", reflect.TypeOf((*MockClient)(nil).CreateProject), varargs...) +} + +// CreateTrialProject mocks base method. +func (m *MockClient) CreateTrialProject(ctx context.Context, in *environment.CreateTrialProjectRequest, opts ...grpc.CallOption) (*environment.CreateTrialProjectResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateTrialProject", varargs...) + ret0, _ := ret[0].(*environment.CreateTrialProjectResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateTrialProject indicates an expected call of CreateTrialProject. +func (mr *MockClientMockRecorder) CreateTrialProject(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateTrialProject", reflect.TypeOf((*MockClient)(nil).CreateTrialProject), varargs...) +} + +// DeleteEnvironment mocks base method. +func (m *MockClient) DeleteEnvironment(ctx context.Context, in *environment.DeleteEnvironmentRequest, opts ...grpc.CallOption) (*environment.DeleteEnvironmentResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteEnvironment", varargs...) + ret0, _ := ret[0].(*environment.DeleteEnvironmentResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteEnvironment indicates an expected call of DeleteEnvironment. +func (mr *MockClientMockRecorder) DeleteEnvironment(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteEnvironment", reflect.TypeOf((*MockClient)(nil).DeleteEnvironment), varargs...) +} + +// DisableProject mocks base method. +func (m *MockClient) DisableProject(ctx context.Context, in *environment.DisableProjectRequest, opts ...grpc.CallOption) (*environment.DisableProjectResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DisableProject", varargs...) + ret0, _ := ret[0].(*environment.DisableProjectResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DisableProject indicates an expected call of DisableProject. +func (mr *MockClientMockRecorder) DisableProject(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableProject", reflect.TypeOf((*MockClient)(nil).DisableProject), varargs...) +} + +// EnableProject mocks base method. +func (m *MockClient) EnableProject(ctx context.Context, in *environment.EnableProjectRequest, opts ...grpc.CallOption) (*environment.EnableProjectResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "EnableProject", varargs...) + ret0, _ := ret[0].(*environment.EnableProjectResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// EnableProject indicates an expected call of EnableProject. +func (mr *MockClientMockRecorder) EnableProject(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableProject", reflect.TypeOf((*MockClient)(nil).EnableProject), varargs...) +} + +// GetEnvironment mocks base method. +func (m *MockClient) GetEnvironment(ctx context.Context, in *environment.GetEnvironmentRequest, opts ...grpc.CallOption) (*environment.GetEnvironmentResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetEnvironment", varargs...) + ret0, _ := ret[0].(*environment.GetEnvironmentResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEnvironment indicates an expected call of GetEnvironment. +func (mr *MockClientMockRecorder) GetEnvironment(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEnvironment", reflect.TypeOf((*MockClient)(nil).GetEnvironment), varargs...) +} + +// GetEnvironmentByNamespace mocks base method. +func (m *MockClient) GetEnvironmentByNamespace(ctx context.Context, in *environment.GetEnvironmentByNamespaceRequest, opts ...grpc.CallOption) (*environment.GetEnvironmentByNamespaceResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetEnvironmentByNamespace", varargs...) + ret0, _ := ret[0].(*environment.GetEnvironmentByNamespaceResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEnvironmentByNamespace indicates an expected call of GetEnvironmentByNamespace. +func (mr *MockClientMockRecorder) GetEnvironmentByNamespace(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEnvironmentByNamespace", reflect.TypeOf((*MockClient)(nil).GetEnvironmentByNamespace), varargs...) +} + +// GetProject mocks base method. +func (m *MockClient) GetProject(ctx context.Context, in *environment.GetProjectRequest, opts ...grpc.CallOption) (*environment.GetProjectResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetProject", varargs...) + ret0, _ := ret[0].(*environment.GetProjectResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProject indicates an expected call of GetProject. +func (mr *MockClientMockRecorder) GetProject(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProject", reflect.TypeOf((*MockClient)(nil).GetProject), varargs...) +} + +// ListEnvironments mocks base method. +func (m *MockClient) ListEnvironments(ctx context.Context, in *environment.ListEnvironmentsRequest, opts ...grpc.CallOption) (*environment.ListEnvironmentsResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListEnvironments", varargs...) + ret0, _ := ret[0].(*environment.ListEnvironmentsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListEnvironments indicates an expected call of ListEnvironments. +func (mr *MockClientMockRecorder) ListEnvironments(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListEnvironments", reflect.TypeOf((*MockClient)(nil).ListEnvironments), varargs...) +} + +// ListProjects mocks base method. +func (m *MockClient) ListProjects(ctx context.Context, in *environment.ListProjectsRequest, opts ...grpc.CallOption) (*environment.ListProjectsResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListProjects", varargs...) + ret0, _ := ret[0].(*environment.ListProjectsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListProjects indicates an expected call of ListProjects. +func (mr *MockClientMockRecorder) ListProjects(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListProjects", reflect.TypeOf((*MockClient)(nil).ListProjects), varargs...) +} + +// UpdateEnvironment mocks base method. +func (m *MockClient) UpdateEnvironment(ctx context.Context, in *environment.UpdateEnvironmentRequest, opts ...grpc.CallOption) (*environment.UpdateEnvironmentResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateEnvironment", varargs...) + ret0, _ := ret[0].(*environment.UpdateEnvironmentResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateEnvironment indicates an expected call of UpdateEnvironment. +func (mr *MockClientMockRecorder) UpdateEnvironment(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateEnvironment", reflect.TypeOf((*MockClient)(nil).UpdateEnvironment), varargs...) +} + +// UpdateProject mocks base method. +func (m *MockClient) UpdateProject(ctx context.Context, in *environment.UpdateProjectRequest, opts ...grpc.CallOption) (*environment.UpdateProjectResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateProject", varargs...) + ret0, _ := ret[0].(*environment.UpdateProjectResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateProject indicates an expected call of UpdateProject. +func (mr *MockClientMockRecorder) UpdateProject(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateProject", reflect.TypeOf((*MockClient)(nil).UpdateProject), varargs...) +} diff --git a/pkg/environment/cmd/server/BUILD.bazel b/pkg/environment/cmd/server/BUILD.bazel new file mode 100644 index 000000000..14f7ab040 --- /dev/null +++ b/pkg/environment/cmd/server/BUILD.bazel @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["server.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/environment/cmd/server", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/client:go_default_library", + "//pkg/cli:go_default_library", + "//pkg/environment/api:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/rpc/client:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/token:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/environment/cmd/server/server.go b/pkg/environment/cmd/server/server.go new file mode 100644 index 000000000..27932f56e --- /dev/null +++ b/pkg/environment/cmd/server/server.go @@ -0,0 +1,187 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/environment/api" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/token" +) + +const command = "server" + +type server struct { + *kingpin.CmdClause + port *int + project *string + mysqlUser *string + mysqlPass *string + mysqlHost *string + mysqlPort *int + mysqlDBName *string + domainEventTopic *string + accountService *string + certPath *string + keyPath *string + serviceTokenPath *string + oauthKeyPath *string + oauthClientID *string + oauthIssuer *string +} + +func RegisterServerCommand(r cli.CommandRegistry, p cli.ParentCommand) cli.Command { + cmd := p.Command(command, "Start the gRPC server") + server := &server{ + CmdClause: cmd, + port: cmd.Flag("port", "Port to bind to.").Default("9090").Int(), + project: cmd.Flag("project", "Google Cloud project name.").Required().String(), + mysqlUser: cmd.Flag("mysql-user", "MySQL user.").Required().String(), + mysqlPass: cmd.Flag("mysql-pass", "MySQL password.").Required().String(), + mysqlHost: cmd.Flag("mysql-host", "MySQL host.").Required().String(), + mysqlPort: cmd.Flag("mysql-port", "MySQL port.").Required().Int(), + mysqlDBName: cmd.Flag("mysql-db-name", "MySQL database name.").Required().String(), + domainEventTopic: cmd.Flag("domain-event-topic", "PubSub topic to publish domain events.").Required().String(), + accountService: cmd.Flag( + "account-service", + "bucketeer-account-service address.", + ).Default("account:9090").String(), + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + keyPath: cmd.Flag("key", "Path to TLS key.").Required().String(), + serviceTokenPath: cmd.Flag("service-token", "Path to service token.").Required().String(), + oauthKeyPath: cmd.Flag("oauth-key", "Path to public key used to verify oauth token.").Required().String(), + oauthClientID: cmd.Flag("oauth-client-id", "The oauth clientID registered at dex.").Required().String(), + oauthIssuer: cmd.Flag("oauth-issuer", "The url of dex issuer.").Required().String(), + } + r.RegisterCommand(server) + return server +} + +func (s *server) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + registerer := metrics.DefaultRegisterer() + + mysqlClient, err := s.createMySQLClient(ctx, registerer, logger) + if err != nil { + return err + } + defer mysqlClient.Close() + + publisher, err := s.createDomainEventPublisher(ctx, registerer, logger) + if err != nil { + return err + } + defer publisher.Stop() + + creds, err := client.NewPerRPCCredentials(*s.serviceTokenPath) + if err != nil { + return err + } + accountClient, err := accountclient.NewClient(*s.accountService, *s.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer accountClient.Close() + + service := api.NewEnvironmentService( + accountClient, + mysqlClient, + publisher, + api.WithLogger(logger), + ) + + verifier, err := token.NewVerifier(*s.oauthKeyPath, *s.oauthIssuer, *s.oauthClientID) + if err != nil { + return err + } + + healthChecker := health.NewGrpcChecker( + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + ) + go healthChecker.Run(ctx) + + server := rpc.NewServer(service, *s.certPath, *s.keyPath, + rpc.WithPort(*s.port), + rpc.WithVerifier(verifier), + rpc.WithMetrics(registerer), + rpc.WithLogger(logger), + rpc.WithService(healthChecker), + rpc.WithHandler("/health", healthChecker), + ) + defer server.Stop(10 * time.Second) + go server.Run() + + <-ctx.Done() + return nil +} + +func (s *server) createMySQLClient( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (mysql.Client, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + return mysql.NewClient( + ctx, + *s.mysqlUser, *s.mysqlPass, *s.mysqlHost, + *s.mysqlPort, + *s.mysqlDBName, + mysql.WithLogger(logger), + mysql.WithMetrics(registerer), + ) +} + +func (s *server) createDomainEventPublisher( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (publisher.Publisher, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + client, err := pubsub.NewClient( + ctx, + *s.project, + pubsub.WithMetrics(registerer), + pubsub.WithLogger(logger), + ) + if err != nil { + return nil, err + } + domainPublisher, err := client.CreatePublisher(*s.domainEventTopic) + if err != nil { + return nil, err + } + return domainPublisher, nil +} diff --git a/pkg/environment/command/BUILD.bazel b/pkg/environment/command/BUILD.bazel new file mode 100644 index 000000000..cca0aaccf --- /dev/null +++ b/pkg/environment/command/BUILD.bazel @@ -0,0 +1,39 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "command.go", + "environment.go", + "project.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/environment/command", + visibility = ["//visibility:public"], + deps = [ + "//pkg/domainevent/domain:go_default_library", + "//pkg/environment/domain:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//proto/environment:go_default_library", + "//proto/event/domain:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "environment_test.go", + "project_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/environment/domain:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/pubsub/publisher/mock:go_default_library", + "//proto/account:go_default_library", + "//proto/environment:go_default_library", + "//proto/event/domain:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + ], +) diff --git a/pkg/environment/command/command.go b/pkg/environment/command/command.go new file mode 100644 index 000000000..352f9f6d9 --- /dev/null +++ b/pkg/environment/command/command.go @@ -0,0 +1,30 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + "errors" +) + +var ( + errUnknownCommand = errors.New("command: unknown command") +) + +type Command interface{} + +type Handler interface { + Handle(ctx context.Context, cmd Command) error +} diff --git a/pkg/environment/command/environment.go b/pkg/environment/command/environment.go new file mode 100644 index 000000000..c0cca6289 --- /dev/null +++ b/pkg/environment/command/environment.go @@ -0,0 +1,111 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + + pb "github.com/golang/protobuf/proto" // nolint:staticcheck + + domainevent "github.com/bucketeer-io/bucketeer/pkg/domainevent/domain" + "github.com/bucketeer-io/bucketeer/pkg/environment/domain" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + proto "github.com/bucketeer-io/bucketeer/proto/environment" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +type environmentCommandHandler struct { + editor *eventproto.Editor + environment *domain.Environment + publisher publisher.Publisher +} + +func NewEnvironmentCommandHandler( + editor *eventproto.Editor, + environment *domain.Environment, + p publisher.Publisher, +) Handler { + return &environmentCommandHandler{ + editor: editor, + environment: environment, + publisher: p, + } +} + +func (h *environmentCommandHandler) Handle(ctx context.Context, cmd Command) error { + switch c := cmd.(type) { + case *proto.CreateEnvironmentCommand: + return h.create(ctx, c) + case *proto.RenameEnvironmentCommand: + return h.rename(ctx, c) + case *proto.ChangeDescriptionEnvironmentCommand: + return h.changeDescription(ctx, c) + case *proto.DeleteEnvironmentCommand: + return h.delete(ctx, c) + default: + return errUnknownCommand + } +} + +func (h *environmentCommandHandler) create(ctx context.Context, cmd *proto.CreateEnvironmentCommand) error { + return h.send(ctx, eventproto.Event_ENVIRONMENT_CREATED, &eventproto.EnvironmentCreatedEvent{ + Id: h.environment.Id, + Namespace: h.environment.Namespace, + Name: h.environment.Name, + Description: h.environment.Description, + Deleted: h.environment.Deleted, + CreatedAt: h.environment.CreatedAt, + UpdatedAt: h.environment.UpdatedAt, + ProjectId: h.environment.ProjectId, + }) +} + +func (h *environmentCommandHandler) rename(ctx context.Context, cmd *proto.RenameEnvironmentCommand) error { + h.environment.Rename(cmd.Name) + return h.send(ctx, eventproto.Event_ENVIRONMENT_RENAMED, &eventproto.EnvironmentRenamedEvent{ + Id: h.environment.Id, + Name: cmd.Name, + }) +} + +func (h *environmentCommandHandler) changeDescription( + ctx context.Context, + cmd *proto.ChangeDescriptionEnvironmentCommand, +) error { + h.environment.ChangeDescription(cmd.Description) + return h.send(ctx, eventproto.Event_ENVIRONMENT_DESCRIPTION_CHANGED, &eventproto.EnvironmentDescriptionChangedEvent{ + Id: h.environment.Id, + Description: cmd.Description, + }) +} + +func (h *environmentCommandHandler) delete(ctx context.Context, cmd *proto.DeleteEnvironmentCommand) error { + h.environment.SetDeleted() + return h.send(ctx, eventproto.Event_ENVIRONMENT_DELETED, &eventproto.EnvironmentDeletedEvent{ + Id: h.environment.Id, + Namespace: h.environment.Namespace, + }) +} + +func (h *environmentCommandHandler) send(ctx context.Context, eventType eventproto.Event_Type, event pb.Message) error { + e, err := domainevent.NewAdminEvent(h.editor, eventproto.Event_ENVIRONMENT, h.environment.Id, eventType, event) + if err != nil { + return err + } + if err := h.publisher.Publish(ctx, e); err != nil { + return err + } + return nil +} diff --git a/pkg/environment/command/environment_test.go b/pkg/environment/command/environment_test.go new file mode 100644 index 000000000..fe305dfb0 --- /dev/null +++ b/pkg/environment/command/environment_test.go @@ -0,0 +1,103 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/environment/domain" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + publishermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher/mock" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + environmentproto "github.com/bucketeer-io/bucketeer/proto/environment" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +func TestHandleCreateEnvironmentCommand(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + publisher := publishermock.NewMockPublisher(mockController) + env := domain.NewEnvironment("env-id", "env desc", "project-id") + + h := newEnvironmentCommandHandler(t, publisher, env) + publisher.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + cmd := &environmentproto.CreateEnvironmentCommand{Id: env.Id, Description: env.Description} + err := h.Handle(context.Background(), cmd) + assert.NoError(t, err) +} + +func TestHandleRenameEnvironmentCommand(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + publisher := publishermock.NewMockPublisher(mockController) + env := domain.NewEnvironment("env-id", "env desc", "project-id") + + h := newEnvironmentCommandHandler(t, publisher, env) + publisher.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + newName := "new-env-name" + cmd := &environmentproto.RenameEnvironmentCommand{Name: newName} + err := h.Handle(context.Background(), cmd) + assert.NoError(t, err) + assert.Equal(t, newName, env.Name) +} + +func TestHandleChangeDescriptionEnvironmentCommand(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + publisher := publishermock.NewMockPublisher(mockController) + env := domain.NewEnvironment("env-id", "env desc", "project-id") + + h := newEnvironmentCommandHandler(t, publisher, env) + publisher.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + newDesc := "new env desc" + cmd := &environmentproto.ChangeDescriptionEnvironmentCommand{Description: newDesc} + err := h.Handle(context.Background(), cmd) + assert.NoError(t, err) + assert.Equal(t, newDesc, env.Description) +} + +func TestHandleDeleteEnvironmentCommand(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + publisher := publishermock.NewMockPublisher(mockController) + env := domain.NewEnvironment("env-id", "env desc", "project-id") + + h := newEnvironmentCommandHandler(t, publisher, env) + publisher.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + cmd := &environmentproto.DeleteEnvironmentCommand{} + err := h.Handle(context.Background(), cmd) + assert.NoError(t, err) + assert.True(t, env.Deleted) +} + +func newEnvironmentCommandHandler(t *testing.T, publisher publisher.Publisher, env *domain.Environment) Handler { + t.Helper() + return NewEnvironmentCommandHandler( + &eventproto.Editor{ + Email: "email", + Role: accountproto.Account_EDITOR, + }, + env, + publisher, + ) +} diff --git a/pkg/environment/command/project.go b/pkg/environment/command/project.go new file mode 100644 index 000000000..07b43a632 --- /dev/null +++ b/pkg/environment/command/project.go @@ -0,0 +1,131 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + + pb "github.com/golang/protobuf/proto" // nolint:staticcheck + + domainevent "github.com/bucketeer-io/bucketeer/pkg/domainevent/domain" + "github.com/bucketeer-io/bucketeer/pkg/environment/domain" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + proto "github.com/bucketeer-io/bucketeer/proto/environment" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +type projectCommandHandler struct { + editor *eventproto.Editor + project *domain.Project + publisher publisher.Publisher +} + +func NewProjectCommandHandler( + editor *eventproto.Editor, + project *domain.Project, + p publisher.Publisher, +) Handler { + return &projectCommandHandler{ + editor: editor, + project: project, + publisher: p, + } +} + +func (h *projectCommandHandler) Handle(ctx context.Context, cmd Command) error { + switch c := cmd.(type) { + case *proto.CreateProjectCommand: + return h.create(ctx, c) + case *proto.CreateTrialProjectCommand: + return h.createTrial(ctx, c) + case *proto.ChangeDescriptionProjectCommand: + return h.changeDescription(ctx, c) + case *proto.EnableProjectCommand: + return h.enable(ctx, c) + case *proto.DisableProjectCommand: + return h.disable(ctx, c) + case *proto.ConvertTrialProjectCommand: + return h.convertTrial(ctx, c) + default: + return errUnknownCommand + } +} + +func (h *projectCommandHandler) create(ctx context.Context, cmd *proto.CreateProjectCommand) error { + return h.send(ctx, eventproto.Event_PROJECT_CREATED, &eventproto.ProjectCreatedEvent{ + Id: h.project.Id, + Description: h.project.Description, + Disabled: h.project.Disabled, + Trial: h.project.Trial, + CreatorEmail: h.project.CreatorEmail, + CreatedAt: h.project.CreatedAt, + UpdatedAt: h.project.UpdatedAt, + }) +} + +func (h *projectCommandHandler) createTrial(ctx context.Context, cmd *proto.CreateTrialProjectCommand) error { + return h.send(ctx, eventproto.Event_PROJECT_TRIAL_CREATED, &eventproto.ProjectTrialCreatedEvent{ + Id: h.project.Id, + Description: h.project.Description, + Disabled: h.project.Disabled, + Trial: h.project.Trial, + CreatorEmail: h.project.CreatorEmail, + CreatedAt: h.project.CreatedAt, + UpdatedAt: h.project.UpdatedAt, + }) +} + +func (h *projectCommandHandler) changeDescription( + ctx context.Context, + cmd *proto.ChangeDescriptionProjectCommand, +) error { + h.project.ChangeDescription(cmd.Description) + return h.send(ctx, eventproto.Event_PROJECT_DESCRIPTION_CHANGED, &eventproto.ProjectDescriptionChangedEvent{ + Id: h.project.Id, + Description: cmd.Description, + }) +} + +func (h *projectCommandHandler) enable(ctx context.Context, cmd *proto.EnableProjectCommand) error { + h.project.Enable() + return h.send(ctx, eventproto.Event_PROJECT_ENABLED, &eventproto.ProjectEnabledEvent{ + Id: h.project.Id, + }) +} + +func (h *projectCommandHandler) disable(ctx context.Context, cmd *proto.DisableProjectCommand) error { + h.project.Disable() + return h.send(ctx, eventproto.Event_PROJECT_DISABLED, &eventproto.ProjectDisabledEvent{ + Id: h.project.Id, + }) +} + +func (h *projectCommandHandler) convertTrial(ctx context.Context, cmd *proto.ConvertTrialProjectCommand) error { + h.project.ConvertTrial() + return h.send(ctx, eventproto.Event_PROJECT_TRIAL_CONVERTED, &eventproto.ProjectTrialConvertedEvent{ + Id: h.project.Id, + }) +} + +func (h *projectCommandHandler) send(ctx context.Context, eventType eventproto.Event_Type, event pb.Message) error { + e, err := domainevent.NewAdminEvent(h.editor, eventproto.Event_PROJECT, h.project.Id, eventType, event) + if err != nil { + return err + } + if err := h.publisher.Publish(ctx, e); err != nil { + return err + } + return nil +} diff --git a/pkg/environment/command/project_test.go b/pkg/environment/command/project_test.go new file mode 100644 index 000000000..f5a521903 --- /dev/null +++ b/pkg/environment/command/project_test.go @@ -0,0 +1,132 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/environment/domain" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + publishermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher/mock" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + environmentproto "github.com/bucketeer-io/bucketeer/proto/environment" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +func TestHandleCreateProjectCommand(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + publisher := publishermock.NewMockPublisher(mockController) + project := domain.NewProject("project-id", "project desc", "test@example.com", false) + + h := newProjectCommandHandler(t, publisher, project) + publisher.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + cmd := &environmentproto.CreateProjectCommand{Id: project.Id, Description: project.Description} + err := h.Handle(context.Background(), cmd) + assert.NoError(t, err) +} + +func TestHandleCreateTrialProjectCommand(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + publisher := publishermock.NewMockPublisher(mockController) + project := domain.NewProject("project-id", "", "test@example.com", true) + + h := newProjectCommandHandler(t, publisher, project) + publisher.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + cmd := &environmentproto.CreateTrialProjectCommand{Id: project.Id, Email: project.CreatorEmail} + err := h.Handle(context.Background(), cmd) + assert.NoError(t, err) +} + +func TestHandleChangeDescriptionProjectCommand(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + publisher := publishermock.NewMockPublisher(mockController) + project := domain.NewProject("project-id", "project desc", "test@example.com", false) + + h := newProjectCommandHandler(t, publisher, project) + publisher.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + newDesc := "new project desc" + cmd := &environmentproto.ChangeDescriptionProjectCommand{Description: newDesc} + err := h.Handle(context.Background(), cmd) + assert.NoError(t, err) + assert.Equal(t, newDesc, project.Description) +} + +func TestHandleEnableProjectCommand(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + publisher := publishermock.NewMockPublisher(mockController) + project := domain.NewProject("project-id", "project desc", "test@example.com", false) + project.Disabled = true + + h := newProjectCommandHandler(t, publisher, project) + publisher.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + cmd := &environmentproto.EnableProjectCommand{} + err := h.Handle(context.Background(), cmd) + assert.NoError(t, err) + assert.False(t, project.Disabled) +} + +func TestHandleDisableProjectCommand(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + publisher := publishermock.NewMockPublisher(mockController) + project := domain.NewProject("project-id", "project desc", "test@example.com", false) + + h := newProjectCommandHandler(t, publisher, project) + publisher.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + cmd := &environmentproto.DisableProjectCommand{} + err := h.Handle(context.Background(), cmd) + assert.NoError(t, err) + assert.True(t, project.Disabled) +} + +func TestHandleConvertTrialProjectCommand(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + publisher := publishermock.NewMockPublisher(mockController) + project := domain.NewProject("project-id", "project desc", "test@example.com", true) + + h := newProjectCommandHandler(t, publisher, project) + publisher.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + cmd := &environmentproto.ConvertTrialProjectCommand{} + err := h.Handle(context.Background(), cmd) + assert.NoError(t, err) + assert.False(t, project.Trial) +} + +func newProjectCommandHandler(t *testing.T, publisher publisher.Publisher, project *domain.Project) Handler { + t.Helper() + return NewProjectCommandHandler( + &eventproto.Editor{ + Email: "email", + Role: accountproto.Account_EDITOR, + }, + project, + publisher, + ) +} diff --git a/pkg/environment/domain/BUILD.bazel b/pkg/environment/domain/BUILD.bazel new file mode 100644 index 000000000..a9b9e3d55 --- /dev/null +++ b/pkg/environment/domain/BUILD.bazel @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "environment.go", + "project.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/environment/domain", + visibility = ["//visibility:public"], + deps = ["//proto/environment:go_default_library"], +) + +go_test( + name = "go_default_test", + srcs = [ + "environment_test.go", + "project_test.go", + ], + embed = [":go_default_library"], + deps = ["@com_github_stretchr_testify//assert:go_default_library"], +) diff --git a/pkg/environment/domain/environment.go b/pkg/environment/domain/environment.go new file mode 100644 index 000000000..b69e5f061 --- /dev/null +++ b/pkg/environment/domain/environment.go @@ -0,0 +1,56 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "strings" + "time" + + proto "github.com/bucketeer-io/bucketeer/proto/environment" +) + +type Environment struct { + *proto.Environment +} + +func NewEnvironment(id, description, projectID string) *Environment { + now := time.Now().Unix() + namespace := strings.Replace(id, "-", "", -1) + return &Environment{&proto.Environment{ + Id: id, + Namespace: namespace, + Name: id, + Description: description, + Deleted: false, + CreatedAt: now, + UpdatedAt: now, + ProjectId: projectID, + }} +} + +func (e *Environment) Rename(name string) { + e.Environment.Name = name + e.Environment.UpdatedAt = time.Now().Unix() +} + +func (e *Environment) ChangeDescription(description string) { + e.Environment.Description = description + e.Environment.UpdatedAt = time.Now().Unix() +} + +func (e *Environment) SetDeleted() { + e.Environment.Deleted = true + e.Environment.UpdatedAt = time.Now().Unix() +} diff --git a/pkg/environment/domain/environment_test.go b/pkg/environment/domain/environment_test.go new file mode 100644 index 000000000..8d6b811f1 --- /dev/null +++ b/pkg/environment/domain/environment_test.go @@ -0,0 +1,52 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewEnvironment(t *testing.T) { + t.Parallel() + env := NewEnvironment("env-id", "env desc", "project-id") + expectedNamespace := "envid" + assert.IsType(t, &Environment{}, env) + assert.Equal(t, expectedNamespace, env.Namespace) +} + +func TestRenameEnvironment(t *testing.T) { + t.Parallel() + env := NewEnvironment("env-id", "env desc", "project-id") + newName := "new-env-name" + env.Rename(newName) + assert.Equal(t, newName, env.Name) +} + +func TestChangeDescriptionEnvironment(t *testing.T) { + t.Parallel() + env := NewEnvironment("env-id", "env desc", "project-id") + newDesc := "new env desc" + env.ChangeDescription(newDesc) + assert.Equal(t, newDesc, env.Description) +} + +func TestSetDeletedEnvironment(t *testing.T) { + t.Parallel() + env := NewEnvironment("env-id", "env desc", "project-id") + env.SetDeleted() + assert.True(t, env.Deleted) +} diff --git a/pkg/environment/domain/project.go b/pkg/environment/domain/project.go new file mode 100644 index 000000000..50bcfe493 --- /dev/null +++ b/pkg/environment/domain/project.go @@ -0,0 +1,58 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "time" + + proto "github.com/bucketeer-io/bucketeer/proto/environment" +) + +type Project struct { + *proto.Project +} + +func NewProject(id, description, creatorEmail string, trial bool) *Project { + now := time.Now().Unix() + return &Project{&proto.Project{ + Id: id, + Description: description, + Disabled: false, + Trial: trial, + CreatorEmail: creatorEmail, + CreatedAt: now, + UpdatedAt: now, + }} +} + +func (p *Project) ChangeDescription(description string) { + p.Project.Description = description + p.Project.UpdatedAt = time.Now().Unix() +} + +func (p *Project) Enable() { + p.Project.Disabled = false + p.Project.UpdatedAt = time.Now().Unix() +} + +func (p *Project) Disable() { + p.Project.Disabled = true + p.Project.UpdatedAt = time.Now().Unix() +} + +func (p *Project) ConvertTrial() { + p.Project.Trial = false + p.Project.UpdatedAt = time.Now().Unix() +} diff --git a/pkg/environment/domain/project_test.go b/pkg/environment/domain/project_test.go new file mode 100644 index 000000000..0d7d5054d --- /dev/null +++ b/pkg/environment/domain/project_test.go @@ -0,0 +1,57 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewProject(t *testing.T) { + t.Parallel() + project := NewProject("project-id", "project desc", "test@example.com", false) + assert.IsType(t, &Project{}, project) +} + +func TestChangeDescriptionProject(t *testing.T) { + t.Parallel() + project := NewProject("project-id", "project desc", "test@example.com", false) + newDesc := "new env desc" + project.ChangeDescription(newDesc) + assert.Equal(t, newDesc, project.Description) +} + +func TestEnableProject(t *testing.T) { + t.Parallel() + project := NewProject("project-id", "project desc", "test@example.com", false) + project.Disabled = true + project.Enable() + assert.False(t, project.Disabled) +} + +func TestDisableProject(t *testing.T) { + t.Parallel() + project := NewProject("project-id", "project desc", "test@example.com", false) + project.Disable() + assert.True(t, project.Disabled) +} + +func TestConvertTrialProject(t *testing.T) { + t.Parallel() + project := NewProject("project-id", "project desc", "test@example.com", true) + project.ConvertTrial() + assert.False(t, project.Trial) +} diff --git a/pkg/environment/storage/v2/BUILD.bazel b/pkg/environment/storage/v2/BUILD.bazel new file mode 100644 index 000000000..785c995c4 --- /dev/null +++ b/pkg/environment/storage/v2/BUILD.bazel @@ -0,0 +1,33 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "environment.go", + "project.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/environment/storage/v2", + visibility = ["//visibility:public"], + deps = [ + "//pkg/environment/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/environment:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "environment_test.go", + "project_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/environment/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/storage/v2/mysql/mock:go_default_library", + "//proto/environment:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + ], +) diff --git a/pkg/environment/storage/v2/environment.go b/pkg/environment/storage/v2/environment.go new file mode 100644 index 000000000..0f4211a8b --- /dev/null +++ b/pkg/environment/storage/v2/environment.go @@ -0,0 +1,286 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v2 + +import ( + "context" + "errors" + "fmt" + + "github.com/bucketeer-io/bucketeer/pkg/environment/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + proto "github.com/bucketeer-io/bucketeer/proto/environment" +) + +var ( + ErrEnvironmentAlreadyExists = errors.New("environment: already exists") + ErrEnvironmentNotFound = errors.New("environment: not found") + ErrEnvironmentUnexpectedAffectedRows = errors.New("environment: unexpected affected rows") +) + +type EnvironmentStorage interface { + CreateEnvironment(ctx context.Context, e *domain.Environment) error + UpdateEnvironment(ctx context.Context, e *domain.Environment) error + GetEnvironment(ctx context.Context, id string) (*domain.Environment, error) + GetEnvironmentByNamespace( + ctx context.Context, + namespace string, + deleted bool, + ) (*domain.Environment, error) + ListEnvironments( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, + ) ([]*proto.Environment, int, int64, error) +} + +type environmentStorage struct { + qe mysql.QueryExecer +} + +func NewEnvironmentStorage(qe mysql.QueryExecer) EnvironmentStorage { + return &environmentStorage{qe} +} + +func (s *environmentStorage) CreateEnvironment(ctx context.Context, e *domain.Environment) error { + query := ` + INSERT INTO environment ( + id, + namespace, + name, + description, + deleted, + created_at, + updated_at, + project_id + ) VALUES ( + ?, ?, ?, ?, ?, ?, ?, ? + ) + ` + _, err := s.qe.ExecContext( + ctx, + query, + e.Id, + e.Namespace, + e.Name, + e.Description, + e.Deleted, + e.CreatedAt, + e.UpdatedAt, + e.ProjectId, + ) + if err != nil { + if err == mysql.ErrDuplicateEntry { + return ErrEnvironmentAlreadyExists + } + return err + } + return nil +} + +func (s *environmentStorage) UpdateEnvironment(ctx context.Context, e *domain.Environment) error { + query := ` + UPDATE + environment + SET + namespace = ?, + name = ?, + description = ?, + deleted = ?, + created_at = ?, + updated_at = ?, + project_id = ? + WHERE + id = ? + ` + result, err := s.qe.ExecContext( + ctx, + query, + e.Namespace, + e.Name, + e.Description, + e.Deleted, + e.CreatedAt, + e.UpdatedAt, + e.ProjectId, + e.Id, + ) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + if rowsAffected != 1 { + return ErrEnvironmentUnexpectedAffectedRows + } + return nil +} + +func (s *environmentStorage) GetEnvironment(ctx context.Context, id string) (*domain.Environment, error) { + e := proto.Environment{} + query := ` + SELECT + id, + namespace, + name, + description, + deleted, + created_at, + updated_at, + project_id + FROM + environment + WHERE + id = ? + ` + err := s.qe.QueryRowContext( + ctx, + query, + id, + ).Scan( + &e.Id, + &e.Namespace, + &e.Name, + &e.Description, + &e.Deleted, + &e.CreatedAt, + &e.UpdatedAt, + &e.ProjectId, + ) + if err != nil { + if err == mysql.ErrNoRows { + return nil, ErrEnvironmentNotFound + } + return nil, err + } + return &domain.Environment{Environment: &e}, nil +} + +func (s *environmentStorage) GetEnvironmentByNamespace( + ctx context.Context, + namespace string, + deleted bool, +) (*domain.Environment, error) { + e := proto.Environment{} + query := ` + SELECT + id, + namespace, + name, + description, + deleted, + created_at, + updated_at, + project_id + FROM + environment + WHERE + namespace = ? AND + deleted = ? + ` + err := s.qe.QueryRowContext( + ctx, + query, + namespace, + deleted, + ).Scan( + &e.Id, + &e.Namespace, + &e.Name, + &e.Description, + &e.Deleted, + &e.CreatedAt, + &e.UpdatedAt, + &e.ProjectId, + ) + if err != nil { + if err == mysql.ErrNoRows { + return nil, ErrEnvironmentNotFound + } + return nil, err + } + return &domain.Environment{Environment: &e}, nil + +} + +func (s *environmentStorage) ListEnvironments(ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, +) ([]*proto.Environment, int, int64, error) { + whereSQL, whereArgs := mysql.ConstructWhereSQLString(whereParts) + orderBySQL := mysql.ConstructOrderBySQLString(orders) + limitOffsetSQL := mysql.ConstructLimitOffsetSQLString(limit, offset) + query := fmt.Sprintf(` + SELECT + id, + namespace, + name, + description, + deleted, + created_at, + updated_at, + project_id + FROM + environment + %s %s %s + `, whereSQL, orderBySQL, limitOffsetSQL, + ) + rows, err := s.qe.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, 0, 0, err + } + defer rows.Close() + environments := make([]*proto.Environment, 0, limit) + for rows.Next() { + e := proto.Environment{} + err := rows.Scan( + &e.Id, + &e.Namespace, + &e.Name, + &e.Description, + &e.Deleted, + &e.CreatedAt, + &e.UpdatedAt, + &e.ProjectId, + ) + if err != nil { + return nil, 0, 0, err + } + environments = append(environments, &e) + } + if rows.Err() != nil { + return nil, 0, 0, err + } + nextOffset := offset + len(environments) + var totalCount int64 + countQuery := fmt.Sprintf(` + SELECT + COUNT(1) + FROM + environment + %s %s + `, whereSQL, orderBySQL, + ) + err = s.qe.QueryRowContext(ctx, countQuery, whereArgs...).Scan(&totalCount) + if err != nil { + return nil, 0, 0, err + } + return environments, nextOffset, totalCount, nil +} diff --git a/pkg/environment/storage/v2/environment_test.go b/pkg/environment/storage/v2/environment_test.go new file mode 100644 index 000000000..6a8a8c917 --- /dev/null +++ b/pkg/environment/storage/v2/environment_test.go @@ -0,0 +1,348 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "context" + "errors" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/environment/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + proto "github.com/bucketeer-io/bucketeer/proto/environment" +) + +func TestNewEnvironmentStorage(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + storage := NewEnvironmentStorage(mock.NewMockQueryExecer(mockController)) + assert.IsType(t, &environmentStorage{}, storage) +} + +func TestCreateEnvironment(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*environmentStorage) + input *domain.Environment + expectedErr error + }{ + "ErrEnvironmentAlreadyExists": { + setup: func(s *environmentStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, mysql.ErrDuplicateEntry) + }, + input: &domain.Environment{ + Environment: &proto.Environment{Id: "eid-0"}, + }, + expectedErr: ErrEnvironmentAlreadyExists, + }, + "Error": { + setup: func(s *environmentStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + input: &domain.Environment{ + Environment: &proto.Environment{Id: "eid-0"}, + }, + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *environmentStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, nil) + }, + input: &domain.Environment{ + Environment: &proto.Environment{Id: "eid-0"}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newEnvironmentStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + err := storage.CreateEnvironment(context.Background(), p.input) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestUpdateEnvironment(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*environmentStorage) + input *domain.Environment + expectedErr error + }{ + "ErrEnvironmentUnexpectedAffectedRows": { + setup: func(s *environmentStorage) { + result := mock.NewMockResult(mockController) + result.EXPECT().RowsAffected().Return(int64(0), nil) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + input: &domain.Environment{ + Environment: &proto.Environment{Id: "eid-0"}, + }, + expectedErr: ErrEnvironmentUnexpectedAffectedRows, + }, + "Error": { + setup: func(s *environmentStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + input: &domain.Environment{ + Environment: &proto.Environment{Id: "eid-0"}, + }, + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *environmentStorage) { + result := mock.NewMockResult(mockController) + result.EXPECT().RowsAffected().Return(int64(1), nil) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + input: &domain.Environment{ + Environment: &proto.Environment{Id: "eid-0"}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newEnvironmentStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + err := storage.UpdateEnvironment(context.Background(), p.input) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestGetEnvironment(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*environmentStorage) + id string + expectedErr error + }{ + "ErrEnvironmentNotFound": { + setup: func(s *environmentStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id-0", + expectedErr: ErrEnvironmentNotFound, + }, + "Error": { + setup: func(s *environmentStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(errors.New("error")) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + + }, + id: "id-0", + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *environmentStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id-0", + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newEnvironmentStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + _, err := storage.GetEnvironment(context.Background(), p.id) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestGetEnvironmentByNamespace(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*environmentStorage) + ns string + deleted bool + expectedErr error + }{ + "ErrEnvironmentNotFound": { + setup: func(s *environmentStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + ns: "ns-0", + deleted: false, + expectedErr: ErrEnvironmentNotFound, + }, + "Error": { + setup: func(s *environmentStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(errors.New("error")) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + + }, + ns: "ns-0", + deleted: false, + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *environmentStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + ns: "ns-0", + deleted: false, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newEnvironmentStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + _, err := storage.GetEnvironmentByNamespace(context.Background(), p.ns, p.deleted) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestListEnvironments(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*environmentStorage) + whereParts []mysql.WherePart + orders []*mysql.Order + limit int + offset int + expected []*proto.Environment + expectedCursor int + expectedErr error + }{ + "Error": { + setup: func(s *environmentStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + whereParts: nil, + orders: nil, + limit: 0, + offset: 0, + expected: nil, + expectedCursor: 0, + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *environmentStorage) { + rows := mock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + whereParts: []mysql.WherePart{ + mysql.NewFilter("num", ">=", 5), + }, + orders: []*mysql.Order{ + mysql.NewOrder("id", mysql.OrderDirectionAsc), + }, + limit: 10, + offset: 5, + expected: []*proto.Environment{}, + expectedCursor: 5, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newEnvironmentStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + accounts, cursor, _, err := storage.ListEnvironments( + context.Background(), + p.whereParts, + p.orders, + p.limit, + p.offset, + ) + assert.Equal(t, p.expected, accounts) + assert.Equal(t, p.expectedCursor, cursor) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func newEnvironmentStorageWithMock(t *testing.T, mockController *gomock.Controller) *environmentStorage { + t.Helper() + return &environmentStorage{mock.NewMockQueryExecer(mockController)} +} diff --git a/pkg/environment/storage/v2/mock/BUILD.bazel b/pkg/environment/storage/v2/mock/BUILD.bazel new file mode 100644 index 000000000..87b7afdc4 --- /dev/null +++ b/pkg/environment/storage/v2/mock/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "environment.go", + "project.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/environment/storage/v2/mock", + visibility = ["//visibility:public"], + deps = [ + "//pkg/environment/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/environment:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + ], +) diff --git a/pkg/environment/storage/v2/mock/environment.go b/pkg/environment/storage/v2/mock/environment.go new file mode 100644 index 000000000..8b8d0d70c --- /dev/null +++ b/pkg/environment/storage/v2/mock/environment.go @@ -0,0 +1,114 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: environment.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + domain "github.com/bucketeer-io/bucketeer/pkg/environment/domain" + mysql "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + environment "github.com/bucketeer-io/bucketeer/proto/environment" +) + +// MockEnvironmentStorage is a mock of EnvironmentStorage interface. +type MockEnvironmentStorage struct { + ctrl *gomock.Controller + recorder *MockEnvironmentStorageMockRecorder +} + +// MockEnvironmentStorageMockRecorder is the mock recorder for MockEnvironmentStorage. +type MockEnvironmentStorageMockRecorder struct { + mock *MockEnvironmentStorage +} + +// NewMockEnvironmentStorage creates a new mock instance. +func NewMockEnvironmentStorage(ctrl *gomock.Controller) *MockEnvironmentStorage { + mock := &MockEnvironmentStorage{ctrl: ctrl} + mock.recorder = &MockEnvironmentStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockEnvironmentStorage) EXPECT() *MockEnvironmentStorageMockRecorder { + return m.recorder +} + +// CreateEnvironment mocks base method. +func (m *MockEnvironmentStorage) CreateEnvironment(ctx context.Context, e *domain.Environment) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateEnvironment", ctx, e) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateEnvironment indicates an expected call of CreateEnvironment. +func (mr *MockEnvironmentStorageMockRecorder) CreateEnvironment(ctx, e interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateEnvironment", reflect.TypeOf((*MockEnvironmentStorage)(nil).CreateEnvironment), ctx, e) +} + +// GetEnvironment mocks base method. +func (m *MockEnvironmentStorage) GetEnvironment(ctx context.Context, id string) (*domain.Environment, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEnvironment", ctx, id) + ret0, _ := ret[0].(*domain.Environment) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEnvironment indicates an expected call of GetEnvironment. +func (mr *MockEnvironmentStorageMockRecorder) GetEnvironment(ctx, id interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEnvironment", reflect.TypeOf((*MockEnvironmentStorage)(nil).GetEnvironment), ctx, id) +} + +// GetEnvironmentByNamespace mocks base method. +func (m *MockEnvironmentStorage) GetEnvironmentByNamespace(ctx context.Context, namespace string, deleted bool) (*domain.Environment, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEnvironmentByNamespace", ctx, namespace, deleted) + ret0, _ := ret[0].(*domain.Environment) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEnvironmentByNamespace indicates an expected call of GetEnvironmentByNamespace. +func (mr *MockEnvironmentStorageMockRecorder) GetEnvironmentByNamespace(ctx, namespace, deleted interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEnvironmentByNamespace", reflect.TypeOf((*MockEnvironmentStorage)(nil).GetEnvironmentByNamespace), ctx, namespace, deleted) +} + +// ListEnvironments mocks base method. +func (m *MockEnvironmentStorage) ListEnvironments(ctx context.Context, whereParts []mysql.WherePart, orders []*mysql.Order, limit, offset int) ([]*environment.Environment, int, int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListEnvironments", ctx, whereParts, orders, limit, offset) + ret0, _ := ret[0].([]*environment.Environment) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(int64) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 +} + +// ListEnvironments indicates an expected call of ListEnvironments. +func (mr *MockEnvironmentStorageMockRecorder) ListEnvironments(ctx, whereParts, orders, limit, offset interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListEnvironments", reflect.TypeOf((*MockEnvironmentStorage)(nil).ListEnvironments), ctx, whereParts, orders, limit, offset) +} + +// UpdateEnvironment mocks base method. +func (m *MockEnvironmentStorage) UpdateEnvironment(ctx context.Context, e *domain.Environment) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateEnvironment", ctx, e) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateEnvironment indicates an expected call of UpdateEnvironment. +func (mr *MockEnvironmentStorageMockRecorder) UpdateEnvironment(ctx, e interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateEnvironment", reflect.TypeOf((*MockEnvironmentStorage)(nil).UpdateEnvironment), ctx, e) +} diff --git a/pkg/environment/storage/v2/mock/project.go b/pkg/environment/storage/v2/mock/project.go new file mode 100644 index 000000000..9208ac418 --- /dev/null +++ b/pkg/environment/storage/v2/mock/project.go @@ -0,0 +1,114 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: project.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + domain "github.com/bucketeer-io/bucketeer/pkg/environment/domain" + mysql "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + environment "github.com/bucketeer-io/bucketeer/proto/environment" +) + +// MockProjectStorage is a mock of ProjectStorage interface. +type MockProjectStorage struct { + ctrl *gomock.Controller + recorder *MockProjectStorageMockRecorder +} + +// MockProjectStorageMockRecorder is the mock recorder for MockProjectStorage. +type MockProjectStorageMockRecorder struct { + mock *MockProjectStorage +} + +// NewMockProjectStorage creates a new mock instance. +func NewMockProjectStorage(ctrl *gomock.Controller) *MockProjectStorage { + mock := &MockProjectStorage{ctrl: ctrl} + mock.recorder = &MockProjectStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockProjectStorage) EXPECT() *MockProjectStorageMockRecorder { + return m.recorder +} + +// CreateProject mocks base method. +func (m *MockProjectStorage) CreateProject(ctx context.Context, p *domain.Project) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateProject", ctx, p) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateProject indicates an expected call of CreateProject. +func (mr *MockProjectStorageMockRecorder) CreateProject(ctx, p interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateProject", reflect.TypeOf((*MockProjectStorage)(nil).CreateProject), ctx, p) +} + +// GetProject mocks base method. +func (m *MockProjectStorage) GetProject(ctx context.Context, id string) (*domain.Project, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProject", ctx, id) + ret0, _ := ret[0].(*domain.Project) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProject indicates an expected call of GetProject. +func (mr *MockProjectStorageMockRecorder) GetProject(ctx, id interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProject", reflect.TypeOf((*MockProjectStorage)(nil).GetProject), ctx, id) +} + +// GetTrialProjectByEmail mocks base method. +func (m *MockProjectStorage) GetTrialProjectByEmail(ctx context.Context, email string, disabled, trial bool) (*domain.Project, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTrialProjectByEmail", ctx, email, disabled, trial) + ret0, _ := ret[0].(*domain.Project) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTrialProjectByEmail indicates an expected call of GetTrialProjectByEmail. +func (mr *MockProjectStorageMockRecorder) GetTrialProjectByEmail(ctx, email, disabled, trial interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTrialProjectByEmail", reflect.TypeOf((*MockProjectStorage)(nil).GetTrialProjectByEmail), ctx, email, disabled, trial) +} + +// ListProjects mocks base method. +func (m *MockProjectStorage) ListProjects(ctx context.Context, whereParts []mysql.WherePart, orders []*mysql.Order, limit, offset int) ([]*environment.Project, int, int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListProjects", ctx, whereParts, orders, limit, offset) + ret0, _ := ret[0].([]*environment.Project) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(int64) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 +} + +// ListProjects indicates an expected call of ListProjects. +func (mr *MockProjectStorageMockRecorder) ListProjects(ctx, whereParts, orders, limit, offset interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListProjects", reflect.TypeOf((*MockProjectStorage)(nil).ListProjects), ctx, whereParts, orders, limit, offset) +} + +// UpdateProject mocks base method. +func (m *MockProjectStorage) UpdateProject(ctx context.Context, p *domain.Project) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateProject", ctx, p) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateProject indicates an expected call of UpdateProject. +func (mr *MockProjectStorageMockRecorder) UpdateProject(ctx, p interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateProject", reflect.TypeOf((*MockProjectStorage)(nil).UpdateProject), ctx, p) +} diff --git a/pkg/environment/storage/v2/project.go b/pkg/environment/storage/v2/project.go new file mode 100644 index 000000000..1d4b32cb6 --- /dev/null +++ b/pkg/environment/storage/v2/project.go @@ -0,0 +1,279 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v2 + +import ( + "context" + "errors" + "fmt" + + "github.com/bucketeer-io/bucketeer/pkg/environment/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + proto "github.com/bucketeer-io/bucketeer/proto/environment" +) + +var ( + ErrProjectAlreadyExists = errors.New("project: already exists") + ErrProjectNotFound = errors.New("project: not found") + ErrProjectUnexpectedAffectedRows = errors.New("project: unexpected affected rows") +) + +type ProjectStorage interface { + CreateProject(ctx context.Context, p *domain.Project) error + UpdateProject(ctx context.Context, p *domain.Project) error + GetProject(ctx context.Context, id string) (*domain.Project, error) + GetTrialProjectByEmail( + ctx context.Context, + email string, + disabled, trial bool, + ) (*domain.Project, error) + ListProjects( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, + ) ([]*proto.Project, int, int64, error) +} + +type projectStorage struct { + qe mysql.QueryExecer +} + +func NewProjectStorage(qe mysql.QueryExecer) ProjectStorage { + return &projectStorage{qe} +} + +func (s *projectStorage) CreateProject(ctx context.Context, p *domain.Project) error { + query := ` + INSERT INTO project ( + id, + description, + disabled, + trial, + creator_email, + created_at, + updated_at + ) VALUES ( + ?, ?, ?, ?, ?, ?, ? + ) + ` + _, err := s.qe.ExecContext( + ctx, + query, + p.Id, + p.Description, + p.Disabled, + p.Trial, + p.CreatorEmail, + p.CreatedAt, + p.UpdatedAt, + ) + if err != nil { + if err == mysql.ErrDuplicateEntry { + return ErrProjectAlreadyExists + } + return err + } + return nil +} + +func (s *projectStorage) UpdateProject(ctx context.Context, p *domain.Project) error { + query := ` + UPDATE + project + SET + description = ?, + disabled = ?, + trial = ?, + creator_email = ?, + created_at = ?, + updated_at = ? + WHERE + id = ? + ` + result, err := s.qe.ExecContext( + ctx, + query, + p.Description, + p.Disabled, + p.Trial, + p.CreatorEmail, + p.CreatedAt, + p.UpdatedAt, + p.Id, + ) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + if rowsAffected != 1 { + return ErrProjectUnexpectedAffectedRows + } + return nil +} + +func (s *projectStorage) GetProject(ctx context.Context, id string) (*domain.Project, error) { + project := proto.Project{} + query := ` + SELECT + id, + description, + disabled, + trial, + creator_email, + created_at, + updated_at + FROM + project + WHERE + id = ? + ` + err := s.qe.QueryRowContext( + ctx, + query, + id, + ).Scan( + &project.Id, + &project.Description, + &project.Disabled, + &project.Trial, + &project.CreatorEmail, + &project.CreatedAt, + &project.UpdatedAt, + ) + if err != nil { + if err == mysql.ErrNoRows { + return nil, ErrProjectNotFound + } + return nil, err + } + return &domain.Project{Project: &project}, nil +} + +func (s *projectStorage) GetTrialProjectByEmail( + ctx context.Context, + email string, + disabled, trial bool, +) (*domain.Project, error) { + project := proto.Project{} + query := ` + SELECT + id, + description, + disabled, + trial, + creator_email, + created_at, + updated_at + FROM + project + WHERE + creator_email = ? AND + disabled = ? AND + trial = ? + ` + err := s.qe.QueryRowContext( + ctx, + query, + email, + disabled, + trial, + ).Scan( + &project.Id, + &project.Description, + &project.Disabled, + &project.Trial, + &project.CreatorEmail, + &project.CreatedAt, + &project.UpdatedAt, + ) + if err != nil { + if err == mysql.ErrNoRows { + return nil, ErrProjectNotFound + } + return nil, err + } + return &domain.Project{Project: &project}, nil + +} + +func (s *projectStorage) ListProjects( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, +) ([]*proto.Project, int, int64, error) { + whereSQL, whereArgs := mysql.ConstructWhereSQLString(whereParts) + orderBySQL := mysql.ConstructOrderBySQLString(orders) + limitOffsetSQL := mysql.ConstructLimitOffsetSQLString(limit, offset) + query := fmt.Sprintf(` + SELECT + id, + description, + disabled, + trial, + creator_email, + created_at, + updated_at + FROM + project + %s %s %s + `, whereSQL, orderBySQL, limitOffsetSQL, + ) + rows, err := s.qe.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, 0, 0, err + } + defer rows.Close() + projects := make([]*proto.Project, 0, limit) + for rows.Next() { + project := proto.Project{} + err := rows.Scan( + &project.Id, + &project.Description, + &project.Disabled, + &project.Trial, + &project.CreatorEmail, + &project.CreatedAt, + &project.UpdatedAt, + ) + if err != nil { + return nil, 0, 0, err + } + projects = append(projects, &project) + } + if rows.Err() != nil { + return nil, 0, 0, err + } + nextOffset := offset + len(projects) + var totalCount int64 + countQuery := fmt.Sprintf(` + SELECT + COUNT(1) + FROM + project + %s %s + `, whereSQL, orderBySQL, + ) + err = s.qe.QueryRowContext(ctx, countQuery, whereArgs...).Scan(&totalCount) + if err != nil { + return nil, 0, 0, err + } + return projects, nextOffset, totalCount, nil +} diff --git a/pkg/environment/storage/v2/project_test.go b/pkg/environment/storage/v2/project_test.go new file mode 100644 index 000000000..8d75167af --- /dev/null +++ b/pkg/environment/storage/v2/project_test.go @@ -0,0 +1,352 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "context" + "errors" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/environment/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + proto "github.com/bucketeer-io/bucketeer/proto/environment" +) + +func TestNewProjectStorage(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + storage := NewProjectStorage(mock.NewMockQueryExecer(mockController)) + assert.IsType(t, &projectStorage{}, storage) +} + +func TestCreateProject(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*projectStorage) + input *domain.Project + expectedErr error + }{ + "ErrProjectAlreadyExists": { + setup: func(s *projectStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, mysql.ErrDuplicateEntry) + }, + input: &domain.Project{ + Project: &proto.Project{Id: "pid-0"}, + }, + expectedErr: ErrProjectAlreadyExists, + }, + "Error": { + setup: func(s *projectStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + input: &domain.Project{ + Project: &proto.Project{Id: "pid-0"}, + }, + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *projectStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, nil) + }, + input: &domain.Project{ + Project: &proto.Project{Id: "pid-0"}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newProjectStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + err := storage.CreateProject(context.Background(), p.input) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestUpdateProject(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*projectStorage) + input *domain.Project + expectedErr error + }{ + "ErrProjectUnexpectedAffectedRows": { + setup: func(s *projectStorage) { + result := mock.NewMockResult(mockController) + result.EXPECT().RowsAffected().Return(int64(0), nil) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + input: &domain.Project{ + Project: &proto.Project{Id: "pid-0"}, + }, + expectedErr: ErrProjectUnexpectedAffectedRows, + }, + "Error": { + setup: func(s *projectStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + input: &domain.Project{ + Project: &proto.Project{Id: "pid-0"}, + }, + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *projectStorage) { + result := mock.NewMockResult(mockController) + result.EXPECT().RowsAffected().Return(int64(1), nil) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + input: &domain.Project{ + Project: &proto.Project{Id: "pid-0"}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newProjectStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + err := storage.UpdateProject(context.Background(), p.input) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestGetProject(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*projectStorage) + id string + expectedErr error + }{ + "ErrProjectNotFound": { + setup: func(s *projectStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id-0", + expectedErr: ErrProjectNotFound, + }, + "Error": { + setup: func(s *projectStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(errors.New("error")) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + + }, + id: "id-0", + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *projectStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id-0", + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newProjectStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + _, err := storage.GetProject(context.Background(), p.id) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestGetTrialProjectByEmail(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*projectStorage) + email string + disabled bool + trial bool + expectedErr error + }{ + "ErrProjectNotFound": { + setup: func(s *projectStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + email: "test@example.com", + disabled: false, + trial: false, + expectedErr: ErrProjectNotFound, + }, + "Error": { + setup: func(s *projectStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(errors.New("error")) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + + }, + email: "test@example.com", + disabled: false, + trial: false, + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *projectStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + email: "test@example.com", + disabled: false, + trial: false, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newProjectStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + _, err := storage.GetTrialProjectByEmail(context.Background(), p.email, p.disabled, p.trial) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestListProjects(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*projectStorage) + whereParts []mysql.WherePart + orders []*mysql.Order + limit int + offset int + expected []*proto.Project + expectedCursor int + expectedErr error + }{ + "Error": { + setup: func(s *projectStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + whereParts: nil, + orders: nil, + limit: 0, + offset: 0, + expected: nil, + expectedCursor: 0, + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *projectStorage) { + rows := mock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + whereParts: []mysql.WherePart{ + mysql.NewFilter("num", ">=", 5), + }, + orders: []*mysql.Order{ + mysql.NewOrder("id", mysql.OrderDirectionAsc), + }, + limit: 10, + offset: 5, + expected: []*proto.Project{}, + expectedCursor: 5, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newProjectStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + accounts, cursor, _, err := storage.ListProjects( + context.Background(), + p.whereParts, + p.orders, + p.limit, + p.offset, + ) + assert.Equal(t, p.expected, accounts) + assert.Equal(t, p.expectedCursor, cursor) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func newProjectStorageWithMock(t *testing.T, mockController *gomock.Controller) *projectStorage { + t.Helper() + return &projectStorage{mock.NewMockQueryExecer(mockController)} +} diff --git a/pkg/errgroup/BUILD.bazel b/pkg/errgroup/BUILD.bazel new file mode 100644 index 000000000..5680c97f4 --- /dev/null +++ b/pkg/errgroup/BUILD.bazel @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["errgroup.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/errgroup", + visibility = ["//visibility:public"], + deps = ["@org_golang_x_sync//errgroup:go_default_library"], +) + +go_test( + name = "go_default_test", + srcs = ["errgroup_test.go"], + embed = [":go_default_library"], + deps = ["@com_github_stretchr_testify//assert:go_default_library"], +) diff --git a/pkg/errgroup/errgroup.go b/pkg/errgroup/errgroup.go new file mode 100644 index 000000000..d819e4ef6 --- /dev/null +++ b/pkg/errgroup/errgroup.go @@ -0,0 +1,75 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errgroup + +import ( + "errors" + "fmt" + "runtime" + "strings" + "sync/atomic" + + eg "golang.org/x/sync/errgroup" +) + +type Group struct { + eg.Group + finishedCount int32 + failedCount int32 +} + +func (g *Group) Go(f func() error) <-chan struct{} { + doneCh := make(chan struct{}) + g.Group.Go(func() (err error) { + defer func() { + atomic.AddInt32(&g.finishedCount, 1) + if r := recover(); r != nil { + fmt.Printf("errgroup: recovered, err: %v, stacktrace: %s\n", r, takeStacktrace()) + err = errors.New("errgroup: panic") + } + if err != nil { + atomic.AddInt32(&g.failedCount, 1) + } + close(doneCh) + }() + err = f() + return + }) + return doneCh +} + +func (g *Group) FinishedCount() int32 { + return atomic.LoadInt32(&g.finishedCount) +} + +func (g *Group) FailedCount() int32 { + return atomic.LoadInt32(&g.failedCount) +} + +func takeStacktrace() string { + callers := make([]string, 0, 16) + var pc [16]uintptr + n := runtime.Callers(2, pc[:]) + for _, pc := range pc[:n] { + fn := runtime.FuncForPC(pc) + if fn == nil { + continue + } + file, line := fn.FileLine(pc) + name := fn.Name() + callers = append(callers, fmt.Sprintf("%s\n\t%s:%d", file, name, line)) + } + return strings.Join(callers, "\n") +} diff --git a/pkg/errgroup/errgroup_test.go b/pkg/errgroup/errgroup_test.go new file mode 100644 index 000000000..1a6d29435 --- /dev/null +++ b/pkg/errgroup/errgroup_test.go @@ -0,0 +1,73 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errgroup + +import ( + "errors" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestGroup(t *testing.T) { + testcases := []struct { + err error + finished int32 + failed int32 + }{ + { + err: nil, + finished: 1, + failed: 0, + }, + { + err: errors.New("test"), + finished: 1, + failed: 1, + }, + } + for i, tc := range testcases { + g := Group{} + doneCh := g.Go(func() error { + return tc.err + }) + <-doneCh + des := fmt.Sprintf("index: %d", i) + assert.Equal(t, tc.finished, g.FinishedCount(), des) + assert.Equal(t, tc.failed, g.FailedCount(), des) + } +} + +func TestGroupNoFinished(t *testing.T) { + g := Group{} + g.Go(func() error { + time.Sleep(time.Second) + return nil + }) + assert.Equal(t, int32(0), g.FinishedCount()) + assert.Equal(t, int32(0), g.FailedCount()) +} + +func TestGroupPanic(t *testing.T) { + g := Group{} + doneCh := g.Go(func() error { + panic("test") + }) + <-doneCh + assert.Equal(t, int32(1), g.FinishedCount()) + assert.Equal(t, int32(1), g.FailedCount()) +} diff --git a/pkg/eventcounter/api/BUILD.bazel b/pkg/eventcounter/api/BUILD.bazel new file mode 100644 index 000000000..be9154b4b --- /dev/null +++ b/pkg/eventcounter/api/BUILD.bazel @@ -0,0 +1,69 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "api.go", + "error.go", + "metrics.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/eventcounter/api", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/client:go_default_library", + "//pkg/eventcounter/druid:go_default_library", + "//pkg/eventcounter/storage/v2:go_default_library", + "//pkg/experiment/client:go_default_library", + "//pkg/feature/client:go_default_library", + "//pkg/locale:go_default_library", + "//pkg/log:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/role:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/rpc/status:go_default_library", + "//pkg/storage:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/account:go_default_library", + "//proto/event/domain:go_default_library", + "//proto/eventcounter:go_default_library", + "//proto/experiment:go_default_library", + "//proto/feature:go_default_library", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@go_googleapis//google/rpc:errdetails_go_proto", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["api_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/account/client/mock:go_default_library", + "//pkg/eventcounter/domain:go_default_library", + "//pkg/eventcounter/druid:go_default_library", + "//pkg/eventcounter/druid/mock:go_default_library", + "//pkg/eventcounter/storage/v2:go_default_library", + "//pkg/eventcounter/storage/v2/mock:go_default_library", + "//pkg/experiment/client/mock:go_default_library", + "//pkg/feature/client/mock:go_default_library", + "//pkg/locale:go_default_library", + "//pkg/log:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/storage:go_default_library", + "//pkg/token:go_default_library", + "//proto/account:go_default_library", + "//proto/eventcounter:go_default_library", + "//proto/experiment:go_default_library", + "//proto/feature:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + ], +) diff --git a/pkg/eventcounter/api/api.go b/pkg/eventcounter/api/api.go new file mode 100644 index 000000000..ebb8789de --- /dev/null +++ b/pkg/eventcounter/api/api.go @@ -0,0 +1,673 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "errors" + "sort" + "time" + + "github.com/golang/protobuf/ptypes/wrappers" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + ecdruid "github.com/bucketeer-io/bucketeer/pkg/eventcounter/druid" + v2ecstorage "github.com/bucketeer-io/bucketeer/pkg/eventcounter/storage/v2" + experimentclient "github.com/bucketeer-io/bucketeer/pkg/experiment/client" + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/role" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/storage" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + ecproto "github.com/bucketeer-io/bucketeer/proto/eventcounter" + experimentproto "github.com/bucketeer-io/bucketeer/proto/experiment" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +const listRequestPageSize = 500 + +var ( + jpLocation = time.FixedZone("Asia/Tokyo", 9*60*60) +) + +type eventCounterService struct { + experimentClient experimentclient.Client + featureClient featureclient.Client + accountClient accountclient.Client + druidQuerier ecdruid.Querier + mysqlExperimentResultStorage v2ecstorage.ExperimentResultStorage + metrics metrics.Registerer + logger *zap.Logger +} + +func NewEventCounterService( + mc mysql.Client, + e experimentclient.Client, + f featureclient.Client, + a accountclient.Client, + d ecdruid.Querier, + r metrics.Registerer, + l *zap.Logger, +) rpc.Service { + registerMetrics(r) + return &eventCounterService{ + experimentClient: e, + featureClient: f, + accountClient: a, + druidQuerier: d, + mysqlExperimentResultStorage: v2ecstorage.NewExperimentResultStorage(mc), + metrics: r, + logger: l.Named("api"), + } +} + +func (s *eventCounterService) Register(server *grpc.Server) { + ecproto.RegisterEventCounterServiceServer(server, s) +} + +func (s *eventCounterService) GetEvaluationCountV2( + ctx context.Context, + req *ecproto.GetEvaluationCountV2Request, +) (*ecproto.GetEvaluationCountV2Response, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err = validateGetEvaluationCountV2Request(req); err != nil { + return nil, err + } + startAt := time.Unix(req.StartAt, 0) + endAt := time.Unix(req.EndAt, 0) + headers, rows, err := s.druidQuerier.QueryEvaluationCount( + ctx, + req.EnvironmentNamespace, + startAt, + endAt, + req.FeatureId, + req.FeatureVersion, + "", + []string{}, []*ecproto.Filter{}, + ) + if err != nil { + s.logger.Error( + "Failed to query evaluation counts", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + zap.Time("startAt", startAt), + zap.Time("endAt", endAt), + zap.String("featureId", req.FeatureId), + zap.Int32("featureVersion", req.FeatureVersion), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + vcs, err := convToVariationCounts(headers, rows, req.VariationIds) + if err != nil { + return nil, localizedError(statusInternal, locale.JaJP) + } + return &ecproto.GetEvaluationCountV2Response{ + Count: &ecproto.EvaluationCount{ + FeatureId: req.FeatureId, + FeatureVersion: req.FeatureVersion, + RealtimeCounts: vcs, + }, + }, nil +} + +func validateGetEvaluationCountV2Request(req *ecproto.GetEvaluationCountV2Request) error { + if req.StartAt == 0 { + return localizedError(statusStartAtRequired, locale.JaJP) + } + if req.EndAt == 0 { + return localizedError(statusEndAtRequired, locale.JaJP) + } + if req.StartAt > req.EndAt { + return localizedError(statusStartAtIsAfterEndAt, locale.JaJP) + } + if req.FeatureId == "" { + return localizedError(statusFeatureIDRequired, locale.JaJP) + } + return nil +} + +func convToVariationCounts( + headers *ecproto.Row, + rows []*ecproto.Row, + variationIDs []string, +) ([]*ecproto.VariationCount, error) { + vcsMap := map[string]*ecproto.VariationCount{} + for _, id := range variationIDs { + vcsMap[id] = &ecproto.VariationCount{VariationId: id} + } + varIdx, err := variationIdx(headers) + if err != nil { + return nil, err + } + for _, row := range rows { + vid := row.Cells[varIdx].Value + vc, ok := vcsMap[vid] + if !ok { + continue + } + for i, cell := range row.Cells { + switch headers.Cells[i].Value { + // Evaluation. + case ecdruid.ColumnEvaluationTotal: + vc.EventCount = int64(cell.ValueDouble) + case ecdruid.ColumnEvaluationUser: + vc.UserCount = int64(cell.ValueDouble) + // Goal. + case ecdruid.ColumnGoalTotal: + vc.EventCount = int64(cell.ValueDouble) + case ecdruid.ColumnGoalUser: + vc.UserCount = int64(cell.ValueDouble) + case ecdruid.ColumnGoalValueTotal: + vc.ValueSum = cell.ValueDouble + case ecdruid.ColumnGoalValueMean: + vc.ValueSumPerUserMean = cell.ValueDouble + case ecdruid.ColumnGoalValueVariance: + vc.ValueSumPerUserVariance = cell.ValueDouble + } + } + vcsMap[vid] = vc + } + vcs := []*ecproto.VariationCount{} + for _, vc := range vcsMap { + vcs = append(vcs, vc) + } + sort.SliceStable(vcs, func(i, j int) bool { return vcs[i].VariationId < vcs[j].VariationId }) + return vcs, nil +} + +func variationIdx(headers *ecproto.Row) (int, error) { + for i, cell := range headers.Cells { + if cell.Value == ecdruid.ColumnVariation { + return i, nil + } + } + return 0, errors.New("eventcounter: variation header not found") +} + +func (s *eventCounterService) GetEvaluationTimeseriesCount( + ctx context.Context, + req *ecproto.GetEvaluationTimeseriesCountRequest, +) (*ecproto.GetEvaluationTimeseriesCountResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if req.FeatureId == "" { + return nil, localizedError(statusFeatureIDRequired, locale.JaJP) + } + resp, err := s.featureClient.GetFeature(ctx, &featureproto.GetFeatureRequest{ + EnvironmentNamespace: req.EnvironmentNamespace, + Id: req.FeatureId, + }) + if err != nil { + s.logger.Error( + "Failed to get feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + zap.String("featureId", req.FeatureId), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + endAt := time.Now() + startAt, err := genInterval(jpLocation, endAt, 30) + if err != nil { + return nil, localizedError(statusInternal, locale.JaJP) + } + variationTSEvents := []*ecproto.VariationTimeseries{} + variationTSUsers := []*ecproto.VariationTimeseries{} + for _, variation := range resp.Feature.Variations { + varTS, err := s.druidQuerier.QueryEvaluationTimeseriesCount( + ctx, + req.EnvironmentNamespace, + startAt, + endAt, + req.FeatureId, + 0, + variation.Id, + ) + if err != nil { + s.logger.Error( + "Failed to query goal counts", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + zap.Time("startAt", startAt), + zap.Time("endAt", endAt), + zap.String("featureId", req.FeatureId), + zap.Int32("featureVersion", resp.Feature.Version), + zap.String("variationId", variation.Id), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + variationTSEvents = append(variationTSEvents, varTS[ecdruid.ColumnEvaluationTotal]) + variationTSUsers = append(variationTSUsers, varTS[ecdruid.ColumnEvaluationUser]) + } + return &ecproto.GetEvaluationTimeseriesCountResponse{ + EventCounts: variationTSEvents, + UserCounts: variationTSUsers, + }, nil +} + +func genInterval(loc *time.Location, endAt time.Time, durationDays int) (time.Time, error) { + year, month, day := endAt.In(loc).AddDate(0, 0, -durationDays).Date() + return time.Date(year, month, day, 0, 0, 0, 0, loc), nil +} + +func (s *eventCounterService) GetExperimentResult( + ctx context.Context, + req *ecproto.GetExperimentResultRequest, +) (*ecproto.GetExperimentResultResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if req.ExperimentId == "" { + return nil, localizedError(statusExperimentIDRequired, locale.JaJP) + } + result, err := s.mysqlExperimentResultStorage.GetExperimentResult(ctx, req.ExperimentId, req.EnvironmentNamespace) + if err != nil { + if err == v2ecstorage.ErrExperimentResultNotFound { + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to get experiment result", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + zap.String("experimentId", req.ExperimentId), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &ecproto.GetExperimentResultResponse{ + ExperimentResult: result.ExperimentResult, + }, nil +} + +func (s *eventCounterService) ListExperimentResults( + ctx context.Context, + req *ecproto.ListExperimentResultsRequest, +) (*ecproto.ListExperimentResultsResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if req.FeatureId == "" { + return nil, localizedError(statusFeatureIDRequired, locale.JaJP) + } + experiments, err := s.listExperiments(ctx, req.FeatureId, req.FeatureVersion, req.EnvironmentNamespace) + if err != nil { + if err == storage.ErrKeyNotFound { + listExperimentCountsCounter.WithLabelValues(codeSuccess).Inc() + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to get Experiment list", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + zap.String("featureID", req.FeatureId), + zap.Int32("featureVersion", req.FeatureVersion.Value), + )..., + ) + listExperimentCountsCounter.WithLabelValues(codeFail).Inc() + return nil, localizedError(statusInternal, locale.JaJP) + } + results := make(map[string]*ecproto.ExperimentResult, len(experiments)) + for _, e := range experiments { + er, err := s.getExperimentResultMySQL(ctx, e.Id, req.EnvironmentNamespace) + if err != nil { + if err == v2ecstorage.ErrExperimentResultNotFound { + getExperimentCountsCounter.WithLabelValues(codeSuccess).Inc() + } else { + s.logger.Error( + "Failed to get Experiment result", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + zap.String("experimentID", e.Id), + )..., + ) + getExperimentCountsCounter.WithLabelValues(codeFail).Inc() + } + continue + } + getExperimentCountsCounter.WithLabelValues(codeSuccess).Inc() + results[e.Id] = er + } + listExperimentCountsCounter.WithLabelValues(codeSuccess).Inc() + return &ecproto.ListExperimentResultsResponse{Results: results}, nil +} + +func (s *eventCounterService) listExperiments( + ctx context.Context, + featureID string, + featureVersion *wrappers.Int32Value, + environmentNamespace string, +) ([]*experimentproto.Experiment, error) { + experiments := []*experimentproto.Experiment{} + cursor := "" + for { + resp, err := s.experimentClient.ListExperiments(ctx, &experimentproto.ListExperimentsRequest{ + FeatureId: featureID, + FeatureVersion: featureVersion, + PageSize: listRequestPageSize, + Cursor: cursor, + EnvironmentNamespace: environmentNamespace, + }) + if err != nil { + return nil, err + } + experiments = append(experiments, resp.Experiments...) + featureSize := len(resp.Experiments) + if featureSize == 0 || featureSize < listRequestPageSize { + return experiments, nil + } + cursor = resp.Cursor + } +} + +func (s *eventCounterService) GetGoalCount( + ctx context.Context, + req *ecproto.GetGoalCountRequest, +) (*ecproto.GetGoalCountResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateGetGoalCountsRequest(req); err != nil { + return nil, err + } + startAt := time.Unix(req.StartAt, 0) + endAt := time.Unix(req.EndAt, 0) + headers, rows, err := s.druidQuerier.QueryCount( + ctx, + req.EnvironmentNamespace, + startAt, + endAt, + req.GoalId, + req.FeatureId, + req.FeatureVersion, + req.Reason, + req.Segments, + req.Filters, + ) + if err != nil { + s.logger.Error( + "Failed to query goal counts", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + zap.Time("startAt", startAt), + zap.Time("endAt", endAt), + zap.String("featureId", req.FeatureId), + zap.Int32("featureVersion", req.FeatureVersion), + zap.Strings("segments", req.Segments), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &ecproto.GetGoalCountResponse{Headers: headers, Rows: rows}, nil +} + +func validateGetGoalCountsRequest(req *ecproto.GetGoalCountRequest) error { + if req.StartAt == 0 { + return localizedError(statusStartAtRequired, locale.JaJP) + } + if req.EndAt == 0 { + return localizedError(statusEndAtRequired, locale.JaJP) + } + if req.StartAt > req.EndAt { + return localizedError(statusStartAtIsAfterEndAt, locale.JaJP) + } + if req.StartAt < time.Now().Add(-31*24*time.Hour).Unix() { + return localizedError(statusPeriodOutOfRange, locale.JaJP) + } + if req.GoalId == "" { + return localizedError(statusGoalIDRequired, locale.JaJP) + } + return nil +} + +func (s *eventCounterService) GetGoalCountV2( + ctx context.Context, + req *ecproto.GetGoalCountV2Request, +) (*ecproto.GetGoalCountV2Response, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err = validateGetGoalCountV2Request(req); err != nil { + return nil, err + } + startAt := time.Unix(req.StartAt, 0) + endAt := time.Unix(req.EndAt, 0) + headers, rows, err := s.druidQuerier.QueryGoalCount( + ctx, + req.EnvironmentNamespace, + startAt, + endAt, + req.GoalId, + req.FeatureId, + req.FeatureVersion, + "", + []string{}, []*ecproto.Filter{}, + ) + if err != nil { + s.logger.Error( + "Failed to query goal counts", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + zap.Time("startAt", startAt), + zap.Time("endAt", endAt), + zap.String("featureId", req.FeatureId), + zap.Int32("featureVersion", req.FeatureVersion), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + vcs, err := convToVariationCounts(headers, rows, req.VariationIds) + if err != nil { + return nil, localizedError(statusInternal, locale.JaJP) + } + return &ecproto.GetGoalCountV2Response{ + GoalCounts: &ecproto.GoalCounts{ + GoalId: req.GoalId, + RealtimeCounts: vcs, + }, + }, nil +} + +func validateGetGoalCountV2Request(req *ecproto.GetGoalCountV2Request) error { + if req.StartAt == 0 { + return localizedError(statusStartAtRequired, locale.JaJP) + } + if req.EndAt == 0 { + return localizedError(statusEndAtRequired, locale.JaJP) + } + if req.StartAt > req.EndAt { + return localizedError(statusStartAtIsAfterEndAt, locale.JaJP) + } + if req.GoalId == "" { + return localizedError(statusGoalIDRequired, locale.JaJP) + } + return nil +} + +func (s *eventCounterService) GetUserCountV2( + ctx context.Context, + req *ecproto.GetUserCountV2Request, +) (*ecproto.GetUserCountV2Response, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err = validateGetUserCountV2Request(req); err != nil { + return nil, err + } + startAt := time.Unix(req.StartAt, 0) + endAt := time.Unix(req.EndAt, 0) + headers, rows, err := s.druidQuerier.QueryUserCount(ctx, req.EnvironmentNamespace, startAt, endAt) + if err != nil { + s.logger.Error( + "Failed to query user count", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + zap.Time("startAt", startAt), + zap.Time("endAt", endAt), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + eventCount, userCount := convToUserCount(headers, rows) + return &ecproto.GetUserCountV2Response{ + EventCount: eventCount, + UserCount: userCount, + }, nil +} + +func validateGetUserCountV2Request(req *ecproto.GetUserCountV2Request) error { + if req.StartAt == 0 { + return localizedError(statusStartAtRequired, locale.JaJP) + } + if req.EndAt == 0 { + return localizedError(statusEndAtRequired, locale.JaJP) + } + if req.StartAt > req.EndAt { + return localizedError(statusStartAtIsAfterEndAt, locale.JaJP) + } + return nil +} + +func convToUserCount(headers *ecproto.Row, rows []*ecproto.Row) (eventCount, userCount int64) { + for _, row := range rows { + for i, cell := range row.Cells { + switch headers.Cells[i].Value { + case ecdruid.ColumnUserTotal: + eventCount = int64(cell.ValueDouble) + case ecdruid.ColumnUserCount: + userCount = int64(cell.ValueDouble) + } + } + } + return +} + +func (s *eventCounterService) ListUserMetadata( + ctx context.Context, + req *ecproto.ListUserMetadataRequest, +) (*ecproto.ListUserMetadataResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + data, err := s.druidQuerier.QuerySegmentMetadata(ctx, req.EnvironmentNamespace, ecdruid.DataTypeGoalEvents) + if err != nil { + s.logger.Error( + "Failed to query segment metadata", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &ecproto.ListUserMetadataResponse{Data: data}, nil +} + +func (s *eventCounterService) getExperimentResultMySQL( + ctx context.Context, + id, environmentNamespace string, +) (*ecproto.ExperimentResult, error) { + result, err := s.mysqlExperimentResultStorage.GetExperimentResult(ctx, id, environmentNamespace) + if err != nil { + if err == v2ecstorage.ErrExperimentResultNotFound { + return nil, err + } + s.logger.Error( + "Failed to get experiment count", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, err + } + return result.ExperimentResult, nil +} + +func (s *eventCounterService) checkRole( + ctx context.Context, + requiredRole accountproto.Account_Role, + environmentNamespace string, +) (*eventproto.Editor, error) { + editor, err := role.CheckRole(ctx, requiredRole, func(email string) (*accountproto.GetAccountResponse, error) { + return s.accountClient.GetAccount(ctx, &accountproto.GetAccountRequest{ + Email: email, + EnvironmentNamespace: environmentNamespace, + }) + }) + if err != nil { + switch status.Code(err) { + case codes.Unauthenticated: + s.logger.Info( + "Unauthenticated", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusUnauthenticated, locale.JaJP) + case codes.PermissionDenied: + s.logger.Info( + "Permission denied", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusPermissionDenied, locale.JaJP) + default: + s.logger.Error( + "Failed to check role", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + } + return editor, nil +} diff --git a/pkg/eventcounter/api/api_test.go b/pkg/eventcounter/api/api_test.go new file mode 100644 index 000000000..137bc3a5d --- /dev/null +++ b/pkg/eventcounter/api/api_test.go @@ -0,0 +1,1006 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/ptypes/wrappers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/storage" + + accountclientmock "github.com/bucketeer-io/bucketeer/pkg/account/client/mock" + "github.com/bucketeer-io/bucketeer/pkg/eventcounter/domain" + ecdruid "github.com/bucketeer-io/bucketeer/pkg/eventcounter/druid" + dmock "github.com/bucketeer-io/bucketeer/pkg/eventcounter/druid/mock" + v2ecs "github.com/bucketeer-io/bucketeer/pkg/eventcounter/storage/v2" + v2ecsmock "github.com/bucketeer-io/bucketeer/pkg/eventcounter/storage/v2/mock" + experimentclientmock "github.com/bucketeer-io/bucketeer/pkg/experiment/client/mock" + featureclientmock "github.com/bucketeer-io/bucketeer/pkg/feature/client/mock" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/token" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + ecproto "github.com/bucketeer-io/bucketeer/proto/eventcounter" + experimentproto "github.com/bucketeer-io/bucketeer/proto/experiment" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +func TestNewEventCounterService(t *testing.T) { + metrics := metrics.NewMetrics( + 9999, + "/metrics", + ) + reg := metrics.DefaultRegisterer() + logger, err := log.NewLogger() + require.NoError(t, err) + g := NewEventCounterService(nil, nil, nil, nil, nil, reg, logger) + assert.IsType(t, &eventCounterService{}, g) +} + +func TestGetEvaluationCountV2(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + now := time.Now() + + patterns := map[string]struct { + setup func(*eventCounterService) + input *ecproto.GetEvaluationCountV2Request + expected *ecproto.GetEvaluationCountV2Response + expectedErr error + }{ + "error: ErrStartAtRequired": { + input: &ecproto.GetEvaluationCountV2Request{ + EnvironmentNamespace: "ns0", + }, + expectedErr: localizedError(statusStartAtRequired, locale.JaJP), + }, + "error: ErrEndAtRequired": { + input: &ecproto.GetEvaluationCountV2Request{ + EnvironmentNamespace: "ns0", + StartAt: now.Add(-7 * 24 * time.Hour).Unix(), + }, + expectedErr: localizedError(statusEndAtRequired, locale.JaJP), + }, + "error: ErrStartAtIsAfterEndAt": { + input: &ecproto.GetEvaluationCountV2Request{ + EnvironmentNamespace: "ns0", + StartAt: now.Unix(), + EndAt: now.Add(-31 * 24 * time.Hour).Unix(), + }, + expectedErr: localizedError(statusStartAtIsAfterEndAt, locale.JaJP), + }, + "error: ErrFeatureIDRequired": { + input: &ecproto.GetEvaluationCountV2Request{ + EnvironmentNamespace: "ns0", + StartAt: now.Add(-30 * 24 * time.Hour).Unix(), + EndAt: now.Unix(), + }, + expectedErr: localizedError(statusFeatureIDRequired, locale.JaJP), + }, + "success: one variation": { + setup: func(s *eventCounterService) { + s.druidQuerier.(*dmock.MockQuerier).EXPECT().QueryEvaluationCount(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return( + &ecproto.Row{Cells: []*ecproto.Cell{ + {Value: ecdruid.ColumnVariation}, + {Value: ecdruid.ColumnEvaluationUser}, + {Value: ecdruid.ColumnEvaluationTotal}, + }}, + []*ecproto.Row{ + {Cells: []*ecproto.Cell{ + {Value: "vid0", Type: ecproto.Cell_STRING}, + {ValueDouble: float64(1), Type: ecproto.Cell_DOUBLE}, + {ValueDouble: float64(2), Type: ecproto.Cell_DOUBLE}, + }}, + {Cells: []*ecproto.Cell{ + {Value: "vid1", Type: ecproto.Cell_STRING}, + {ValueDouble: float64(12), Type: ecproto.Cell_DOUBLE}, + {ValueDouble: float64(123), Type: ecproto.Cell_DOUBLE}, + }}, + }, + nil) + }, + input: &ecproto.GetEvaluationCountV2Request{ + EnvironmentNamespace: "ns0", + StartAt: now.Add(-30 * 24 * time.Hour).Unix(), + EndAt: now.Unix(), + FeatureId: "fid", + FeatureVersion: int32(1), + VariationIds: []string{"vid1"}, + }, + expected: &ecproto.GetEvaluationCountV2Response{ + Count: &ecproto.EvaluationCount{ + FeatureId: "fid", + FeatureVersion: int32(1), + RealtimeCounts: []*ecproto.VariationCount{ + { + VariationId: "vid1", + UserCount: int64(12), + EventCount: int64(123), + }, + }, + }, + }, + expectedErr: nil, + }, + "success: all variations": { + setup: func(s *eventCounterService) { + s.druidQuerier.(*dmock.MockQuerier).EXPECT().QueryEvaluationCount(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return( + &ecproto.Row{Cells: []*ecproto.Cell{ + {Value: ecdruid.ColumnVariation}, + {Value: ecdruid.ColumnEvaluationUser}, + {Value: ecdruid.ColumnEvaluationTotal}, + }}, + []*ecproto.Row{ + {Cells: []*ecproto.Cell{ + {Value: "vid0", Type: ecproto.Cell_STRING}, + {ValueDouble: float64(1), Type: ecproto.Cell_DOUBLE}, + {ValueDouble: float64(2), Type: ecproto.Cell_DOUBLE}, + }}, + {Cells: []*ecproto.Cell{ + {Value: "vid1", Type: ecproto.Cell_STRING}, + {ValueDouble: float64(12), Type: ecproto.Cell_DOUBLE}, + {ValueDouble: float64(123), Type: ecproto.Cell_DOUBLE}, + }}, + }, + nil) + }, + input: &ecproto.GetEvaluationCountV2Request{ + EnvironmentNamespace: "ns0", + StartAt: now.Add(-30 * 24 * time.Hour).Unix(), + EndAt: now.Unix(), + FeatureId: "fid", + FeatureVersion: int32(1), + VariationIds: []string{"vid0", "vid1"}, + }, + expected: &ecproto.GetEvaluationCountV2Response{ + Count: &ecproto.EvaluationCount{ + FeatureId: "fid", + FeatureVersion: int32(1), + RealtimeCounts: []*ecproto.VariationCount{ + { + VariationId: "vid0", + UserCount: int64(1), + EventCount: int64(2), + }, + { + VariationId: "vid1", + UserCount: int64(12), + EventCount: int64(123), + }, + }, + }, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + gs := newEventCounterService(t, mockController) + if p.setup != nil { + p.setup(gs) + } + actual, err := gs.GetEvaluationCountV2(createContextWithToken(t, accountproto.Account_UNASSIGNED), p.input) + assert.Equal(t, p.expected, actual, "%s", msg) + assert.Equal(t, p.expectedErr, err, "%s", msg) + } +} + +func TestListExperiments(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*eventCounterService) + inputFeatureID string + inputFeatureVersion *wrappers.Int32Value + expected []*experimentproto.Experiment + environmentNamespace string + expectedErr error + }{ + "no error": { + setup: func(s *eventCounterService) { + s.experimentClient.(*experimentclientmock.MockClient).EXPECT().ListExperiments(gomock.Any(), &experimentproto.ListExperimentsRequest{ + FeatureId: "fid", + FeatureVersion: &wrappers.Int32Value{Value: int32(1)}, + PageSize: listRequestPageSize, + Cursor: "", + EnvironmentNamespace: "ns0", + }).Return(&experimentproto.ListExperimentsResponse{}, nil) + }, + inputFeatureID: "fid", + inputFeatureVersion: &wrappers.Int32Value{Value: int32(1)}, + environmentNamespace: "ns0", + expected: []*experimentproto.Experiment{}, + expectedErr: nil, + }, + "error": { + setup: func(s *eventCounterService) { + s.experimentClient.(*experimentclientmock.MockClient).EXPECT().ListExperiments(gomock.Any(), &experimentproto.ListExperimentsRequest{ + FeatureId: "fid", + FeatureVersion: &wrappers.Int32Value{Value: int32(1)}, + PageSize: listRequestPageSize, + Cursor: "", + EnvironmentNamespace: "ns0", + }).Return(nil, errors.New("test")) + }, + inputFeatureID: "fid", + inputFeatureVersion: &wrappers.Int32Value{Value: int32(1)}, + environmentNamespace: "ns0", + expected: nil, + expectedErr: errors.New("test"), + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := newEventCounterService(t, mockController) + p.setup(s) + actual, err := s.listExperiments(context.Background(), p.inputFeatureID, p.inputFeatureVersion, p.environmentNamespace) + assert.Equal(t, p.expected, actual) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestGetExperimentResultMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*eventCounterService) + input *ecproto.GetExperimentResultRequest + expectedErr error + }{ + "error: ErrExperimentIDRequired": { + input: &ecproto.GetExperimentResultRequest{EnvironmentNamespace: "ns0"}, + expectedErr: localizedError(statusExperimentIDRequired, locale.JaJP), + }, + "err: ErrNotFound": { + setup: func(s *eventCounterService) { + s.mysqlExperimentResultStorage.(*v2ecsmock.MockExperimentResultStorage).EXPECT().GetExperimentResult( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, v2ecs.ErrExperimentResultNotFound) + }, + input: &ecproto.GetExperimentResultRequest{ + ExperimentId: "eid", + EnvironmentNamespace: "ns0", + }, + expectedErr: localizedError(statusNotFound, locale.JaJP), + }, + "success: get the result from storage": { + setup: func(s *eventCounterService) { + s.mysqlExperimentResultStorage.(*v2ecsmock.MockExperimentResultStorage).EXPECT().GetExperimentResult( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(&domain.ExperimentResult{}, nil) + }, + input: &ecproto.GetExperimentResultRequest{ + ExperimentId: "eid", + EnvironmentNamespace: "ns0", + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + gs := newEventCounterService(t, mockController) + if p.setup != nil { + p.setup(gs) + } + actual, err := gs.GetExperimentResult(createContextWithToken(t, accountproto.Account_UNASSIGNED), p.input) + assert.Equal(t, p.expectedErr, err, "%s", msg) + if err == nil { + assert.NotNil(t, actual) + } + } +} + +func TestListExperimentResultsMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*eventCounterService) + input *ecproto.ListExperimentResultsRequest + expected *ecproto.ListExperimentResultsResponse + expectedErr error + }{ + "error: ErrFeatureIDRequired": { + input: &ecproto.ListExperimentResultsRequest{EnvironmentNamespace: "ns0"}, + expectedErr: localizedError(statusFeatureIDRequired, locale.JaJP), + }, + "err: ErrNotFound": { + setup: func(s *eventCounterService) { + s.experimentClient.(*experimentclientmock.MockClient).EXPECT().ListExperiments( + gomock.Any(), gomock.Any(), + ).Return(nil, storage.ErrKeyNotFound) + }, + input: &ecproto.ListExperimentResultsRequest{ + FeatureId: "fid", + EnvironmentNamespace: "ns0", + }, + expected: nil, + expectedErr: localizedError(statusNotFound, locale.JaJP), + }, + "err: ErrInternal": { + setup: func(s *eventCounterService) { + s.experimentClient.(*experimentclientmock.MockClient).EXPECT().ListExperiments( + gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("test")) + }, + input: &ecproto.ListExperimentResultsRequest{ + FeatureId: "fid", + FeatureVersion: &wrappers.Int32Value{Value: int32(1)}, + EnvironmentNamespace: "ns0", + }, + expected: nil, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success: no results": { + setup: func(s *eventCounterService) { + s.experimentClient.(*experimentclientmock.MockClient).EXPECT().ListExperiments( + gomock.Any(), gomock.Any(), + ).Return( + &experimentproto.ListExperimentsResponse{ + Experiments: []*experimentproto.Experiment{ + { + Id: "eid", + GoalId: "gid", + FeatureId: "fid", + FeatureVersion: int32(1), + }, + }, + }, + nil, + ) + s.mysqlExperimentResultStorage.(*v2ecsmock.MockExperimentResultStorage).EXPECT().GetExperimentResult( + gomock.Any(), "eid", gomock.Any(), + ).Return(nil, v2ecs.ErrExperimentResultNotFound) + }, + input: &ecproto.ListExperimentResultsRequest{ + FeatureId: "fid", + FeatureVersion: &wrappers.Int32Value{Value: int32(1)}, + EnvironmentNamespace: "ns0", + }, + expected: &ecproto.ListExperimentResultsResponse{ + Results: make(map[string]*ecproto.ExperimentResult, 0), + }, + expectedErr: nil, + }, + "success: get results from storage": { + setup: func(s *eventCounterService) { + s.experimentClient.(*experimentclientmock.MockClient).EXPECT().ListExperiments( + gomock.Any(), gomock.Any(), + ).Return( + &experimentproto.ListExperimentsResponse{ + Experiments: []*experimentproto.Experiment{ + { + Id: "eid", + GoalId: "gid", + FeatureId: "fid", + FeatureVersion: int32(1), + }, + }, + }, + nil, + ) + s.mysqlExperimentResultStorage.(*v2ecsmock.MockExperimentResultStorage).EXPECT().GetExperimentResult( + gomock.Any(), "eid", gomock.Any(), + ).Return( + &domain.ExperimentResult{ + ExperimentResult: &ecproto.ExperimentResult{ + Id: "eid", + GoalResults: []*ecproto.GoalResult{{GoalId: "gid"}}, + }, + }, + nil, + ) + }, + input: &ecproto.ListExperimentResultsRequest{ + FeatureId: "fid", + FeatureVersion: &wrappers.Int32Value{Value: int32(1)}, + EnvironmentNamespace: "ns0", + }, + expected: &ecproto.ListExperimentResultsResponse{ + Results: map[string]*ecproto.ExperimentResult{ + "eid": { + Id: "eid", + GoalResults: []*ecproto.GoalResult{{GoalId: "gid"}}, + }, + }, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := newEventCounterService(t, mockController) + if p.setup != nil { + p.setup(s) + } + actual, err := s.ListExperimentResults(createContextWithToken(t, accountproto.Account_UNASSIGNED), p.input) + assert.Equal(t, p.expected, actual) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestGetGoalCount(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + now := time.Now() + + patterns := map[string]struct { + setup func(*eventCounterService) + input *ecproto.GetGoalCountRequest + expected *ecproto.GetGoalCountResponse + expectedErr error + }{ + "error: ErrStartAtRequired": { + input: &ecproto.GetGoalCountRequest{ + EnvironmentNamespace: "ns0", + GoalId: "gid", + }, + expectedErr: localizedError(statusStartAtRequired, locale.JaJP), + }, + "error: ErrEndAtRequired": { + input: &ecproto.GetGoalCountRequest{ + EnvironmentNamespace: "ns0", + GoalId: "gid", + StartAt: now.Add(-7 * 24 * time.Hour).Unix(), + }, + expectedErr: localizedError(statusEndAtRequired, locale.JaJP), + }, + "error: ErrStartAtIsAfterEndAt": { + input: &ecproto.GetGoalCountRequest{ + EnvironmentNamespace: "ns0", + GoalId: "gid", + StartAt: now.Unix(), + EndAt: now.Add(-31 * 24 * time.Hour).Unix(), + }, + expectedErr: localizedError(statusStartAtIsAfterEndAt, locale.JaJP), + }, + "error: ErrPeriodOutOfRange": { + input: &ecproto.GetGoalCountRequest{ + EnvironmentNamespace: "ns0", + GoalId: "gid", + StartAt: now.Add(-32 * 24 * time.Hour).Unix(), + EndAt: now.Unix(), + }, + expectedErr: localizedError(statusPeriodOutOfRange, locale.JaJP), + }, + "error: ErrGoalIDRequired": { + input: &ecproto.GetGoalCountRequest{ + EnvironmentNamespace: "ns0", + StartAt: now.Add(-30 * 24 * time.Hour).Unix(), + EndAt: now.Unix(), + }, + expectedErr: localizedError(statusGoalIDRequired, locale.JaJP), + }, + "success": { + setup: func(s *eventCounterService) { + s.druidQuerier.(*dmock.MockQuerier).EXPECT().QueryCount(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return( + &ecproto.Row{Cells: []*ecproto.Cell{{Value: "val"}}}, []*ecproto.Row{{Cells: []*ecproto.Cell{{Value: "123"}}}}, nil) + }, + input: &ecproto.GetGoalCountRequest{ + EnvironmentNamespace: "ns0", + GoalId: "gid", + StartAt: now.Add(-30 * 24 * time.Hour).Unix(), + EndAt: now.Unix(), + }, + expected: &ecproto.GetGoalCountResponse{ + Headers: &ecproto.Row{Cells: []*ecproto.Cell{{Value: "val"}}}, + Rows: []*ecproto.Row{{Cells: []*ecproto.Cell{{Value: "123"}}}}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := newEventCounterService(t, mockController) + if p.setup != nil { + p.setup(s) + } + actual, err := s.GetGoalCount(createContextWithToken(t, accountproto.Account_UNASSIGNED), p.input) + assert.Equal(t, p.expected, actual) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestGetGoalCountV2(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + now := time.Now() + + patterns := map[string]struct { + setup func(*eventCounterService) + input *ecproto.GetGoalCountV2Request + expected *ecproto.GetGoalCountV2Response + expectedErr error + }{ + "error: ErrStartAtRequired": { + input: &ecproto.GetGoalCountV2Request{ + EnvironmentNamespace: "ns0", + GoalId: "gid", + }, + expectedErr: localizedError(statusStartAtRequired, locale.JaJP), + }, + "error: ErrEndAtRequired": { + input: &ecproto.GetGoalCountV2Request{ + EnvironmentNamespace: "ns0", + GoalId: "gid", + StartAt: now.Add(-7 * 24 * time.Hour).Unix(), + }, + expectedErr: localizedError(statusEndAtRequired, locale.JaJP), + }, + "error: ErrStartAtIsAfterEndAt": { + input: &ecproto.GetGoalCountV2Request{ + EnvironmentNamespace: "ns0", + GoalId: "gid", + StartAt: now.Unix(), + EndAt: now.Add(-31 * 24 * time.Hour).Unix(), + }, + expectedErr: localizedError(statusStartAtIsAfterEndAt, locale.JaJP), + }, + "error: ErrGoalIDRequired": { + input: &ecproto.GetGoalCountV2Request{ + EnvironmentNamespace: "ns0", + StartAt: now.Add(-31 * 24 * time.Hour).Unix(), + EndAt: now.Unix(), + }, + expectedErr: localizedError(statusGoalIDRequired, locale.JaJP), + }, + "success: one variation": { + setup: func(s *eventCounterService) { + s.druidQuerier.(*dmock.MockQuerier).EXPECT().QueryGoalCount(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return( + &ecproto.Row{Cells: []*ecproto.Cell{ + {Value: ecdruid.ColumnVariation}, + {Value: ecdruid.ColumnGoalUser}, + {Value: ecdruid.ColumnGoalTotal}, + {Value: ecdruid.ColumnGoalValueTotal}, + {Value: ecdruid.ColumnGoalValueMean}, + {Value: ecdruid.ColumnGoalValueVariance}, + }}, + []*ecproto.Row{ + {Cells: []*ecproto.Cell{ + {Value: "vid0", Type: ecproto.Cell_STRING}, + {ValueDouble: float64(1), Type: ecproto.Cell_DOUBLE}, + {ValueDouble: float64(2), Type: ecproto.Cell_DOUBLE}, + {ValueDouble: float64(1.23), Type: ecproto.Cell_DOUBLE}, + {ValueDouble: float64(1.234), Type: ecproto.Cell_DOUBLE}, + {ValueDouble: float64(1.2345), Type: ecproto.Cell_DOUBLE}, + }}, + {Cells: []*ecproto.Cell{ + {Value: "vid1", Type: ecproto.Cell_STRING}, + {ValueDouble: float64(12), Type: ecproto.Cell_DOUBLE}, + {ValueDouble: float64(123), Type: ecproto.Cell_DOUBLE}, + {ValueDouble: float64(123.45), Type: ecproto.Cell_DOUBLE}, + {ValueDouble: float64(123.456), Type: ecproto.Cell_DOUBLE}, + {ValueDouble: float64(123.4567), Type: ecproto.Cell_DOUBLE}, + }}, + }, + nil) + }, + input: &ecproto.GetGoalCountV2Request{ + EnvironmentNamespace: "ns0", + GoalId: "gid", + FeatureId: "fid", + FeatureVersion: int32(1), + VariationIds: []string{"vid1"}, + StartAt: now.Add(-30 * 24 * time.Hour).Unix(), + EndAt: now.Unix(), + }, + expected: &ecproto.GetGoalCountV2Response{ + GoalCounts: &ecproto.GoalCounts{ + GoalId: "gid", + RealtimeCounts: []*ecproto.VariationCount{ + { + VariationId: "vid1", + UserCount: int64(12), + EventCount: int64(123), + ValueSum: float64(123.45), + ValueSumPerUserMean: float64(123.456), + ValueSumPerUserVariance: float64(123.4567), + }, + }, + }, + }, + expectedErr: nil, + }, + "success: all variations": { + setup: func(s *eventCounterService) { + s.druidQuerier.(*dmock.MockQuerier).EXPECT().QueryGoalCount(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return( + &ecproto.Row{Cells: []*ecproto.Cell{ + {Value: ecdruid.ColumnVariation}, + {Value: ecdruid.ColumnGoalUser}, + {Value: ecdruid.ColumnGoalTotal}, + {Value: ecdruid.ColumnGoalValueTotal}, + {Value: ecdruid.ColumnGoalValueMean}, + {Value: ecdruid.ColumnGoalValueVariance}, + }}, + []*ecproto.Row{ + {Cells: []*ecproto.Cell{ + {Value: "vid0", Type: ecproto.Cell_STRING}, + {ValueDouble: float64(1), Type: ecproto.Cell_DOUBLE}, + {ValueDouble: float64(2), Type: ecproto.Cell_DOUBLE}, + {ValueDouble: float64(1.23), Type: ecproto.Cell_DOUBLE}, + {ValueDouble: float64(1.234), Type: ecproto.Cell_DOUBLE}, + {ValueDouble: float64(1.2345), Type: ecproto.Cell_DOUBLE}, + }}, + {Cells: []*ecproto.Cell{ + {Value: "vid1", Type: ecproto.Cell_STRING}, + {ValueDouble: float64(12), Type: ecproto.Cell_DOUBLE}, + {ValueDouble: float64(123), Type: ecproto.Cell_DOUBLE}, + {ValueDouble: float64(123.45), Type: ecproto.Cell_DOUBLE}, + {ValueDouble: float64(123.456), Type: ecproto.Cell_DOUBLE}, + {ValueDouble: float64(123.4567), Type: ecproto.Cell_DOUBLE}, + }}, + }, + nil) + }, + input: &ecproto.GetGoalCountV2Request{ + EnvironmentNamespace: "ns0", + GoalId: "gid", + FeatureId: "fid", + FeatureVersion: int32(1), + VariationIds: []string{"vid0", "vid1"}, + StartAt: now.Add(-30 * 24 * time.Hour).Unix(), + EndAt: now.Unix(), + }, + expected: &ecproto.GetGoalCountV2Response{ + GoalCounts: &ecproto.GoalCounts{ + GoalId: "gid", + RealtimeCounts: []*ecproto.VariationCount{ + { + VariationId: "vid0", + UserCount: int64(1), + EventCount: int64(2), + ValueSum: float64(1.23), + ValueSumPerUserMean: float64(1.234), + ValueSumPerUserVariance: float64(1.2345), + }, + { + VariationId: "vid1", + UserCount: int64(12), + EventCount: int64(123), + ValueSum: float64(123.45), + ValueSumPerUserMean: float64(123.456), + ValueSumPerUserVariance: float64(123.4567), + }, + }, + }, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := newEventCounterService(t, mockController) + if p.setup != nil { + p.setup(s) + } + actual, err := s.GetGoalCountV2(createContextWithToken(t, accountproto.Account_UNASSIGNED), p.input) + assert.Equal(t, p.expected, actual) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestGetUserCountV2(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + now := time.Now() + + patterns := map[string]struct { + setup func(*eventCounterService) + input *ecproto.GetUserCountV2Request + expected *ecproto.GetUserCountV2Response + expectedErr error + }{ + "error: ErrStartAtRequired": { + input: &ecproto.GetUserCountV2Request{ + EnvironmentNamespace: "ns0", + }, + expectedErr: localizedError(statusStartAtRequired, locale.JaJP), + }, + "error: ErrEndAtRequired": { + input: &ecproto.GetUserCountV2Request{ + EnvironmentNamespace: "ns0", + StartAt: now.Add(-7 * 24 * time.Hour).Unix(), + }, + expectedErr: localizedError(statusEndAtRequired, locale.JaJP), + }, + "error: ErrStartAtIsAfterEndAt": { + input: &ecproto.GetUserCountV2Request{ + EnvironmentNamespace: "ns0", + StartAt: now.Unix(), + EndAt: now.Add(-31 * 24 * time.Hour).Unix(), + }, + expectedErr: localizedError(statusStartAtIsAfterEndAt, locale.JaJP), + }, + "success": { + setup: func(s *eventCounterService) { + s.druidQuerier.(*dmock.MockQuerier).EXPECT().QueryUserCount(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return( + &ecproto.Row{Cells: []*ecproto.Cell{ + {Value: ecdruid.ColumnUserTotal}, + {Value: ecdruid.ColumnUserCount}, + }}, + []*ecproto.Row{ + {Cells: []*ecproto.Cell{ + {ValueDouble: float64(4), Type: ecproto.Cell_DOUBLE}, + {ValueDouble: float64(2), Type: ecproto.Cell_DOUBLE}, + }}, + }, + nil) + }, + input: &ecproto.GetUserCountV2Request{ + EnvironmentNamespace: "ns0", + StartAt: now.Add(-30 * 24 * time.Hour).Unix(), + EndAt: now.Unix(), + }, + expected: &ecproto.GetUserCountV2Response{ + EventCount: 4, + UserCount: 2, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := newEventCounterService(t, mockController) + if p.setup != nil { + p.setup(s) + } + actual, err := s.GetUserCountV2(createContextWithToken(t, accountproto.Account_UNASSIGNED), p.input) + assert.Equal(t, p.expected, actual) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestListUserMetadata(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*eventCounterService) + input *ecproto.ListUserMetadataRequest + expected *ecproto.ListUserMetadataResponse + expectedErr error + }{ + "success": { + setup: func(s *eventCounterService) { + s.druidQuerier.(*dmock.MockQuerier).EXPECT().QuerySegmentMetadata(gomock.Any(), gomock.Any(), gomock.Any()).Return([]string{"d1", "d2"}, nil) + }, + input: &ecproto.ListUserMetadataRequest{ + EnvironmentNamespace: "ns0", + }, + expected: &ecproto.ListUserMetadataResponse{ + Data: []string{"d1", "d2"}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := newEventCounterService(t, mockController) + if p.setup != nil { + p.setup(s) + } + actual, err := s.ListUserMetadata(createContextWithToken(t, accountproto.Account_UNASSIGNED), p.input) + assert.Equal(t, p.expected, actual) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestGenInterval(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + inputLocation *time.Location + inputEndAt time.Time + inputDurationDays int + expected time.Time + expectedErr error + }{ + "success": { + inputLocation: jpLocation, + inputEndAt: time.Date(2020, 12, 25, 0, 0, 0, 0, time.UTC), + inputDurationDays: 10, + expected: time.Date(2020, 12, 15, 0, 0, 0, 0, jpLocation), + expectedErr: nil, + }, + "over prime meridian": { + inputLocation: jpLocation, + inputEndAt: time.Date(2020, 12, 25, 23, 0, 0, 0, time.UTC), + inputDurationDays: 10, + expected: time.Date(2020, 12, 16, 0, 0, 0, 0, jpLocation), + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + actual, err := genInterval(p.inputLocation, p.inputEndAt, p.inputDurationDays) + assert.Equal(t, p.expected.Unix(), actual.Unix()) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestGetEvaluationTimeseriesCount(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*eventCounterService) + input *ecproto.GetEvaluationTimeseriesCountRequest + expected *ecproto.GetEvaluationTimeseriesCountResponse + expectedErr error + }{ + "error: ErrFeatureIDRequired": { + input: &ecproto.GetEvaluationTimeseriesCountRequest{ + EnvironmentNamespace: "ns0", + }, + expectedErr: localizedError(statusFeatureIDRequired, locale.JaJP), + }, + "success": { + setup: func(s *eventCounterService) { + s.featureClient.(*featureclientmock.MockClient).EXPECT().GetFeature(gomock.Any(), gomock.Any()).Return( + &featureproto.GetFeatureResponse{ + Feature: &featureproto.Feature{ + Id: "fid", + Variations: []*featureproto.Variation{{Id: "vid0"}, {Id: "vid1"}}, + }, + }, nil) + s.druidQuerier.(*dmock.MockQuerier).EXPECT().QueryEvaluationTimeseriesCount(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return( + map[string]*ecproto.VariationTimeseries{ + ecdruid.ColumnEvaluationTotal: { + VariationId: "vid0", + Timeseries: &ecproto.Timeseries{ + Timestamps: []int64{int64(1)}, + Values: []float64{float64(1.2)}, + }, + }, + ecdruid.ColumnEvaluationUser: { + VariationId: "vid0", + Timeseries: &ecproto.Timeseries{ + Timestamps: []int64{int64(2)}, + Values: []float64{float64(2.3)}, + }, + }, + }, nil) + s.druidQuerier.(*dmock.MockQuerier).EXPECT().QueryEvaluationTimeseriesCount(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return( + map[string]*ecproto.VariationTimeseries{ + ecdruid.ColumnEvaluationTotal: { + VariationId: "vid1", + Timeseries: &ecproto.Timeseries{ + Timestamps: []int64{int64(3)}, + Values: []float64{float64(3.4)}, + }, + }, + ecdruid.ColumnEvaluationUser: { + VariationId: "vid1", + Timeseries: &ecproto.Timeseries{ + Timestamps: []int64{int64(4)}, + Values: []float64{float64(4.5)}, + }, + }, + }, nil) + }, + input: &ecproto.GetEvaluationTimeseriesCountRequest{ + EnvironmentNamespace: "ns0", + FeatureId: "fid", + }, + expected: &ecproto.GetEvaluationTimeseriesCountResponse{ + EventCounts: []*ecproto.VariationTimeseries{ + { + VariationId: "vid0", + Timeseries: &ecproto.Timeseries{ + Timestamps: []int64{int64(1)}, + Values: []float64{float64(1.2)}, + }, + }, + { + VariationId: "vid1", + Timeseries: &ecproto.Timeseries{ + Timestamps: []int64{int64(3)}, + Values: []float64{float64(3.4)}, + }, + }, + }, + UserCounts: []*ecproto.VariationTimeseries{ + { + VariationId: "vid0", + Timeseries: &ecproto.Timeseries{ + Timestamps: []int64{int64(2)}, + Values: []float64{float64(2.3)}, + }, + }, + { + VariationId: "vid1", + Timeseries: &ecproto.Timeseries{ + Timestamps: []int64{int64(4)}, + Values: []float64{float64(4.5)}, + }, + }, + }, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := newEventCounterService(t, mockController) + if p.setup != nil { + p.setup(s) + } + actual, err := s.GetEvaluationTimeseriesCount(createContextWithToken(t, accountproto.Account_UNASSIGNED), p.input) + assert.Equal(t, p.expected, actual) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func newEventCounterService(t *testing.T, mockController *gomock.Controller) *eventCounterService { + logger, err := log.NewLogger() + require.NoError(t, err) + metrics := metrics.NewMetrics( + 9999, + "/metrics", + ) + reg := metrics.DefaultRegisterer() + accountClientMock := accountclientmock.NewMockClient(mockController) + ar := &accountproto.GetAccountResponse{ + Account: &accountproto.Account{ + Email: "email", + Role: accountproto.Account_VIEWER, + }, + } + accountClientMock.EXPECT().GetAccount(gomock.Any(), gomock.Any()).Return(ar, nil).AnyTimes() + return &eventCounterService{ + experimentClient: experimentclientmock.NewMockClient(mockController), + featureClient: featureclientmock.NewMockClient(mockController), + accountClient: accountClientMock, + mysqlExperimentResultStorage: v2ecsmock.NewMockExperimentResultStorage(mockController), + druidQuerier: dmock.NewMockQuerier(mockController), + metrics: reg, + logger: logger.Named("api"), + } +} + +func createContextWithToken(t *testing.T, role accountproto.Account_Role) context.Context { + t.Helper() + token := &token.IDToken{ + Email: "test@example.com", + AdminRole: role, + } + ctx := context.TODO() + return context.WithValue(ctx, rpc.Key, token) +} diff --git a/pkg/eventcounter/api/error.go b/pkg/eventcounter/api/error.go new file mode 100644 index 000000000..b56373961 --- /dev/null +++ b/pkg/eventcounter/api/error.go @@ -0,0 +1,146 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + gstatus "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/rpc/status" +) + +var ( + statusInternal = gstatus.New(codes.Internal, "eventcounter: internal") + statusFeatureIDRequired = gstatus.New(codes.InvalidArgument, "eventcounter: feature id is required") + statusExperimentIDRequired = gstatus.New(codes.InvalidArgument, "eventcounter: experiment id is required") + statusGoalIDRequired = gstatus.New(codes.InvalidArgument, "eventcounter: goal id is required") + statusStartAtRequired = gstatus.New(codes.InvalidArgument, "eventcounter: start at is required") + statusEndAtRequired = gstatus.New(codes.InvalidArgument, "eventcounter: end at is required") + statusPeriodOutOfRange = gstatus.New(codes.InvalidArgument, "eventcounter: period out of range") + statusStartAtIsAfterEndAt = gstatus.New(codes.InvalidArgument, "eventcounter: start at is after end at") + statusNotFound = gstatus.New(codes.NotFound, "eventcounter: not found") + statusUnauthenticated = gstatus.New(codes.Unauthenticated, "feature: unauthenticated") + statusPermissionDenied = gstatus.New(codes.PermissionDenied, "feature: permission denied") + + errInternalJaJP = status.MustWithDetails( + statusInternal, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "内部エラーが発生しました", + }, + ) + errFeatureIDRequiredJaJP = status.MustWithDetails( + statusFeatureIDRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "feature idは必須です", + }, + ) + errExperimentIDRequiredJaJP = status.MustWithDetails( + statusExperimentIDRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "experiment idは必須です", + }, + ) + errGoalIDRequiredJaJP = status.MustWithDetails( + statusGoalIDRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "goal idは必須です", + }, + ) + errStartAtRequiredJaJP = status.MustWithDetails( + statusStartAtRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "start atは必須です", + }, + ) + errEndAtRequiredJaJP = status.MustWithDetails( + statusEndAtRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "end atは必須です", + }, + ) + errStartAtIsAfterEndAtJaJP = status.MustWithDetails( + statusStartAtIsAfterEndAt, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "start at はend at以前を指定してください。", + }, + ) + errPeroidOutOfRangeJaJP = status.MustWithDetails( + statusPeriodOutOfRange, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "期間は過去30日以内を選択してください。", + }, + ) + errNotFoundJaJP = status.MustWithDetails( + statusNotFound, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "データが存在しません", + }, + ) + errUnauthenticatedJaJP = status.MustWithDetails( + statusUnauthenticated, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "認証されていません", + }, + ) + errPermissionDeniedJaJP = status.MustWithDetails( + statusPermissionDenied, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "権限がありません", + }, + ) +) + +func localizedError(s *gstatus.Status, loc string) error { + // handle loc if multi-lang is necessary + switch s { + case statusInternal: + return errInternalJaJP + case statusFeatureIDRequired: + return errFeatureIDRequiredJaJP + case statusExperimentIDRequired: + return errExperimentIDRequiredJaJP + case statusGoalIDRequired: + return errGoalIDRequiredJaJP + case statusStartAtRequired: + return errStartAtRequiredJaJP + case statusEndAtRequired: + return errEndAtRequiredJaJP + case statusPeriodOutOfRange: + return errPeroidOutOfRangeJaJP + case statusStartAtIsAfterEndAt: + return errStartAtIsAfterEndAtJaJP + case statusNotFound: + return errNotFoundJaJP + case statusUnauthenticated: + return errUnauthenticatedJaJP + case statusPermissionDenied: + return errPermissionDeniedJaJP + default: + return errInternalJaJP + } +} diff --git a/pkg/eventcounter/api/metrics.go b/pkg/eventcounter/api/metrics.go new file mode 100644 index 000000000..b5cc4cc88 --- /dev/null +++ b/pkg/eventcounter/api/metrics.go @@ -0,0 +1,78 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +const ( + codeFail = "Fail" + codeSuccess = "Success" +) + +var ( + cacheCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "event_counter", + Name: "api_cache_requests_total", + Help: "Total number of cache requests", + }, []string{"type", "layer", "code"}) + + listExperimentCountsCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "event_counter", + Name: "api_list_experiment_counts_calls_total", + Help: "Total number of ListExperimentCounts calls", + }, []string{"code"}) + + listExperimentResultsCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "event_counter", + Name: "api_list_experiment_results_calls_total", + Help: "Total number of ListExperimentResults calls", + }, []string{"code"}) + + getExperimentCountsCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "event_counter", + Name: "get_experiment_counts_calls_total", + Help: "Total number of GetExperimentCounts calls", + }, []string{"code"}) + + getExperimentResultCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "event_counter", + Name: "get_experiment_result_calls_total", + Help: "Total number of GetExperimentResult calls", + }, []string{"code"}) +) + +func registerMetrics(r metrics.Registerer) { + r.MustRegister( + cacheCounter, + listExperimentCountsCounter, + listExperimentResultsCounter, + getExperimentCountsCounter, + getExperimentResultCounter, + ) +} diff --git a/pkg/eventcounter/client/BUILD.bazel b/pkg/eventcounter/client/BUILD.bazel new file mode 100644 index 000000000..857a54b67 --- /dev/null +++ b/pkg/eventcounter/client/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["client.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/eventcounter/client", + visibility = ["//visibility:public"], + deps = [ + "//pkg/rpc/client:go_default_library", + "//proto/eventcounter:go_default_library", + "@org_golang_google_grpc//:go_default_library", + ], +) diff --git a/pkg/eventcounter/client/client.go b/pkg/eventcounter/client/client.go new file mode 100644 index 000000000..9086e400b --- /dev/null +++ b/pkg/eventcounter/client/client.go @@ -0,0 +1,50 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package client + +import ( + "google.golang.org/grpc" + + rpcclient "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + proto "github.com/bucketeer-io/bucketeer/proto/eventcounter" +) + +type Client interface { + proto.EventCounterServiceClient + Close() +} + +type client struct { + proto.EventCounterServiceClient + address string + connection *grpc.ClientConn +} + +func NewClient(addr, certPath string, opts ...rpcclient.Option) (Client, error) { + conn, err := rpcclient.NewClientConn(addr, certPath, opts...) + if err != nil { + return nil, err + } + return &client{ + EventCounterServiceClient: proto.NewEventCounterServiceClient(conn), + address: addr, + connection: conn, + }, nil +} + +func (c *client) Close() { + c.connection.Close() +} diff --git a/pkg/eventcounter/client/mock/BUILD.bazel b/pkg/eventcounter/client/mock/BUILD.bazel new file mode 100644 index 000000000..66304b838 --- /dev/null +++ b/pkg/eventcounter/client/mock/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["client.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/eventcounter/client/mock", + visibility = ["//visibility:public"], + deps = [ + "//proto/eventcounter:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@org_golang_google_grpc//:go_default_library", + ], +) diff --git a/pkg/eventcounter/client/mock/client.go b/pkg/eventcounter/client/mock/client.go new file mode 100644 index 000000000..5b33bbdf1 --- /dev/null +++ b/pkg/eventcounter/client/mock/client.go @@ -0,0 +1,210 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: client.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + grpc "google.golang.org/grpc" + + eventcounter "github.com/bucketeer-io/bucketeer/proto/eventcounter" +) + +// MockClient is a mock of Client interface. +type MockClient struct { + ctrl *gomock.Controller + recorder *MockClientMockRecorder +} + +// MockClientMockRecorder is the mock recorder for MockClient. +type MockClientMockRecorder struct { + mock *MockClient +} + +// NewMockClient creates a new mock instance. +func NewMockClient(ctrl *gomock.Controller) *MockClient { + mock := &MockClient{ctrl: ctrl} + mock.recorder = &MockClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClient) EXPECT() *MockClientMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockClient) Close() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Close") +} + +// Close indicates an expected call of Close. +func (mr *MockClientMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockClient)(nil).Close)) +} + +// GetEvaluationCountV2 mocks base method. +func (m *MockClient) GetEvaluationCountV2(ctx context.Context, in *eventcounter.GetEvaluationCountV2Request, opts ...grpc.CallOption) (*eventcounter.GetEvaluationCountV2Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetEvaluationCountV2", varargs...) + ret0, _ := ret[0].(*eventcounter.GetEvaluationCountV2Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEvaluationCountV2 indicates an expected call of GetEvaluationCountV2. +func (mr *MockClientMockRecorder) GetEvaluationCountV2(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEvaluationCountV2", reflect.TypeOf((*MockClient)(nil).GetEvaluationCountV2), varargs...) +} + +// GetEvaluationTimeseriesCount mocks base method. +func (m *MockClient) GetEvaluationTimeseriesCount(ctx context.Context, in *eventcounter.GetEvaluationTimeseriesCountRequest, opts ...grpc.CallOption) (*eventcounter.GetEvaluationTimeseriesCountResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetEvaluationTimeseriesCount", varargs...) + ret0, _ := ret[0].(*eventcounter.GetEvaluationTimeseriesCountResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEvaluationTimeseriesCount indicates an expected call of GetEvaluationTimeseriesCount. +func (mr *MockClientMockRecorder) GetEvaluationTimeseriesCount(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEvaluationTimeseriesCount", reflect.TypeOf((*MockClient)(nil).GetEvaluationTimeseriesCount), varargs...) +} + +// GetExperimentResult mocks base method. +func (m *MockClient) GetExperimentResult(ctx context.Context, in *eventcounter.GetExperimentResultRequest, opts ...grpc.CallOption) (*eventcounter.GetExperimentResultResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetExperimentResult", varargs...) + ret0, _ := ret[0].(*eventcounter.GetExperimentResultResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetExperimentResult indicates an expected call of GetExperimentResult. +func (mr *MockClientMockRecorder) GetExperimentResult(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetExperimentResult", reflect.TypeOf((*MockClient)(nil).GetExperimentResult), varargs...) +} + +// GetGoalCount mocks base method. +func (m *MockClient) GetGoalCount(ctx context.Context, in *eventcounter.GetGoalCountRequest, opts ...grpc.CallOption) (*eventcounter.GetGoalCountResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetGoalCount", varargs...) + ret0, _ := ret[0].(*eventcounter.GetGoalCountResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetGoalCount indicates an expected call of GetGoalCount. +func (mr *MockClientMockRecorder) GetGoalCount(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGoalCount", reflect.TypeOf((*MockClient)(nil).GetGoalCount), varargs...) +} + +// GetGoalCountV2 mocks base method. +func (m *MockClient) GetGoalCountV2(ctx context.Context, in *eventcounter.GetGoalCountV2Request, opts ...grpc.CallOption) (*eventcounter.GetGoalCountV2Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetGoalCountV2", varargs...) + ret0, _ := ret[0].(*eventcounter.GetGoalCountV2Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetGoalCountV2 indicates an expected call of GetGoalCountV2. +func (mr *MockClientMockRecorder) GetGoalCountV2(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGoalCountV2", reflect.TypeOf((*MockClient)(nil).GetGoalCountV2), varargs...) +} + +// GetUserCountV2 mocks base method. +func (m *MockClient) GetUserCountV2(ctx context.Context, in *eventcounter.GetUserCountV2Request, opts ...grpc.CallOption) (*eventcounter.GetUserCountV2Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetUserCountV2", varargs...) + ret0, _ := ret[0].(*eventcounter.GetUserCountV2Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUserCountV2 indicates an expected call of GetUserCountV2. +func (mr *MockClientMockRecorder) GetUserCountV2(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUserCountV2", reflect.TypeOf((*MockClient)(nil).GetUserCountV2), varargs...) +} + +// ListExperimentResults mocks base method. +func (m *MockClient) ListExperimentResults(ctx context.Context, in *eventcounter.ListExperimentResultsRequest, opts ...grpc.CallOption) (*eventcounter.ListExperimentResultsResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListExperimentResults", varargs...) + ret0, _ := ret[0].(*eventcounter.ListExperimentResultsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListExperimentResults indicates an expected call of ListExperimentResults. +func (mr *MockClientMockRecorder) ListExperimentResults(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListExperimentResults", reflect.TypeOf((*MockClient)(nil).ListExperimentResults), varargs...) +} + +// ListUserMetadata mocks base method. +func (m *MockClient) ListUserMetadata(ctx context.Context, in *eventcounter.ListUserMetadataRequest, opts ...grpc.CallOption) (*eventcounter.ListUserMetadataResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListUserMetadata", varargs...) + ret0, _ := ret[0].(*eventcounter.ListUserMetadataResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListUserMetadata indicates an expected call of ListUserMetadata. +func (mr *MockClientMockRecorder) ListUserMetadata(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUserMetadata", reflect.TypeOf((*MockClient)(nil).ListUserMetadata), varargs...) +} diff --git a/pkg/eventcounter/cmd/server/BUILD.bazel b/pkg/eventcounter/cmd/server/BUILD.bazel new file mode 100644 index 000000000..c2469098a --- /dev/null +++ b/pkg/eventcounter/cmd/server/BUILD.bazel @@ -0,0 +1,25 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["server.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/eventcounter/cmd/server", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/client:go_default_library", + "//pkg/cli:go_default_library", + "//pkg/eventcounter/api:go_default_library", + "//pkg/eventcounter/druid:go_default_library", + "//pkg/experiment/client:go_default_library", + "//pkg/feature/client:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/rpc/client:go_default_library", + "//pkg/storage/druid:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/token:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/eventcounter/cmd/server/server.go b/pkg/eventcounter/cmd/server/server.go new file mode 100644 index 000000000..a76205a73 --- /dev/null +++ b/pkg/eventcounter/cmd/server/server.go @@ -0,0 +1,224 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/eventcounter/api" + "github.com/bucketeer-io/bucketeer/pkg/eventcounter/druid" + experimentclient "github.com/bucketeer-io/bucketeer/pkg/experiment/client" + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + rpcclient "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + storagedruid "github.com/bucketeer-io/bucketeer/pkg/storage/druid" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/token" +) + +const command = "server" + +type server struct { + *kingpin.CmdClause + port *int + project *string + mysqlUser *string + mysqlPass *string + mysqlHost *string + mysqlPort *int + mysqlDBName *string + experimentService *string + featureService *string + accountService *string + certPath *string + keyPath *string + serviceTokenPath *string + oauthKeyPath *string + oauthClientID *string + oauthIssuer *string + druidURL *string + druidDatasourcePrefix *string + druidUsername *string + druidPassword *string +} + +func RegisterCommand(r cli.CommandRegistry, p cli.ParentCommand) cli.Command { + cmd := p.Command(command, "Start the server") + server := &server{ + CmdClause: cmd, + port: cmd.Flag("port", "Port to bind to.").Default("9090").Int(), + project: cmd.Flag("project", "Google Cloud project name.").String(), + mysqlUser: cmd.Flag("mysql-user", "MySQL user.").Required().String(), + mysqlPass: cmd.Flag("mysql-pass", "MySQL password.").Required().String(), + mysqlHost: cmd.Flag("mysql-host", "MySQL host.").Required().String(), + mysqlPort: cmd.Flag("mysql-port", "MySQL port.").Required().Int(), + mysqlDBName: cmd.Flag("mysql-db-name", "MySQL database name.").Required().String(), + experimentService: cmd.Flag( + "experiment-service", + "bucketeer-experiment-service address.", + ).Default("experiment:9090").String(), + featureService: cmd.Flag( + "feature-service", + "bucketeer-feature-service address.", + ).Default("feature:9090").String(), + accountService: cmd.Flag( + "account-service", + "bucketeer-account-service address.", + ).Default("account:9090").String(), + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + keyPath: cmd.Flag("key", "Path to TLS key.").Required().String(), + serviceTokenPath: cmd.Flag("service-token", "Path to service token.").Required().String(), + oauthKeyPath: cmd.Flag( + "oauth-key", + "Path to public key used to verify oauth token.", + ).Required().String(), + oauthClientID: cmd.Flag("oauth-client-id", "The oauth clientID registered at dex.").Required().String(), + oauthIssuer: cmd.Flag("oauth-issuer", "The url of dex issuer.").Required().String(), + druidURL: cmd.Flag("druid-url", "Druid URL.").String(), + druidDatasourcePrefix: cmd.Flag("druid-datasource-prefix", "Druid datasource prefix.").String(), + druidUsername: cmd.Flag("druid-username", "Druid username.").String(), + druidPassword: cmd.Flag("druid-password", "Druid password.").String(), + } + r.RegisterCommand(server) + return server +} + +func (s *server) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + registerer := metrics.DefaultRegisterer() + + mysqlClient, err := s.createMySQLClient(ctx, registerer, logger) + if err != nil { + return err + } + defer mysqlClient.Close() + + creds, err := rpcclient.NewPerRPCCredentials(*s.serviceTokenPath) + if err != nil { + return err + } + + experimentClient, err := experimentclient.NewClient(*s.experimentService, *s.certPath, + rpcclient.WithPerRPCCredentials(creds), + rpcclient.WithDialTimeout(30*time.Second), + rpcclient.WithBlock(), + rpcclient.WithMetrics(registerer), + rpcclient.WithLogger(logger), + ) + if err != nil { + return err + } + defer experimentClient.Close() + + featureClient, err := featureclient.NewClient(*s.featureService, *s.certPath, + rpcclient.WithPerRPCCredentials(creds), + rpcclient.WithDialTimeout(30*time.Second), + rpcclient.WithBlock(), + rpcclient.WithMetrics(registerer), + rpcclient.WithLogger(logger), + ) + if err != nil { + return err + } + defer featureClient.Close() + + accountClient, err := accountclient.NewClient(*s.accountService, *s.certPath, + rpcclient.WithPerRPCCredentials(creds), + rpcclient.WithDialTimeout(30*time.Second), + rpcclient.WithBlock(), + rpcclient.WithMetrics(registerer), + rpcclient.WithLogger(logger), + ) + if err != nil { + return err + } + defer accountClient.Close() + + druidQuerier, err := s.createDruidQuerier(ctx, logger) + if err != nil { + logger.Error("Failed to create druid querier", zap.Error(err)) + return err + } + + service := api.NewEventCounterService( + mysqlClient, + experimentClient, + featureClient, + accountClient, + druidQuerier, + registerer, + logger, + ) + + verifier, err := token.NewVerifier(*s.oauthKeyPath, *s.oauthIssuer, *s.oauthClientID) + if err != nil { + return err + } + + healthChecker := health.NewGrpcChecker( + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + ) + go healthChecker.Run(ctx) + + server := rpc.NewServer(service, *s.certPath, *s.keyPath, + rpc.WithPort(*s.port), + rpc.WithVerifier(verifier), + rpc.WithMetrics(registerer), + rpc.WithLogger(logger), + rpc.WithService(healthChecker), + rpc.WithHandler("/health", healthChecker), + ) + defer server.Stop(10 * time.Second) + go server.Run() + + <-ctx.Done() + return nil +} + +func (s *server) createMySQLClient( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (mysql.Client, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + return mysql.NewClient( + ctx, + *s.mysqlUser, *s.mysqlPass, *s.mysqlHost, + *s.mysqlPort, + *s.mysqlDBName, + mysql.WithLogger(logger), + mysql.WithMetrics(registerer), + ) +} + +func (s *server) createDruidQuerier(ctx context.Context, logger *zap.Logger) (druid.Querier, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + brokerClient, err := storagedruid.NewBrokerClient(ctx, *s.druidURL, *s.druidUsername, *s.druidPassword) + if err != nil { + logger.Error("Failed to create druid broker client", zap.Error(err)) + return nil, err + } + return druid.NewDruidQuerier(brokerClient, *s.druidDatasourcePrefix, druid.WithLogger(logger)), nil +} diff --git a/pkg/eventcounter/domain/BUILD.bazel b/pkg/eventcounter/domain/BUILD.bazel new file mode 100644 index 000000000..910ff51d9 --- /dev/null +++ b/pkg/eventcounter/domain/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["experiment_result.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/eventcounter/domain", + visibility = ["//visibility:public"], + deps = ["//proto/eventcounter:go_default_library"], +) diff --git a/pkg/eventcounter/domain/experiment_result.go b/pkg/eventcounter/domain/experiment_result.go new file mode 100644 index 000000000..d7bc24b0f --- /dev/null +++ b/pkg/eventcounter/domain/experiment_result.go @@ -0,0 +1,23 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + eventcounterproto "github.com/bucketeer-io/bucketeer/proto/eventcounter" +) + +type ExperimentResult struct { + *eventcounterproto.ExperimentResult +} diff --git a/pkg/eventcounter/druid/BUILD.bazel b/pkg/eventcounter/druid/BUILD.bazel new file mode 100644 index 000000000..868a336aa --- /dev/null +++ b/pkg/eventcounter/druid/BUILD.bazel @@ -0,0 +1,33 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "querier.go", + "query.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/eventcounter/druid", + visibility = ["//visibility:public"], + deps = [ + "//pkg/storage/druid:go_default_library", + "//proto/eventcounter:go_default_library", + "@com_github_ca_dp_godruid//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "querier_test.go", + "query_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//proto/eventcounter:go_default_library", + "@com_github_ca_dp_godruid//:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + ], +) diff --git a/pkg/eventcounter/druid/mock/BUILD.bazel b/pkg/eventcounter/druid/mock/BUILD.bazel new file mode 100644 index 000000000..3b7b55835 --- /dev/null +++ b/pkg/eventcounter/druid/mock/BUILD.bazel @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["querier.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/eventcounter/druid/mock", + visibility = ["//visibility:public"], + deps = [ + "//proto/eventcounter:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + ], +) diff --git a/pkg/eventcounter/druid/mock/querier.go b/pkg/eventcounter/druid/mock/querier.go new file mode 100644 index 000000000..ebf04ac41 --- /dev/null +++ b/pkg/eventcounter/druid/mock/querier.go @@ -0,0 +1,132 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: querier.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + time "time" + + gomock "github.com/golang/mock/gomock" + + eventcounter "github.com/bucketeer-io/bucketeer/proto/eventcounter" +) + +// MockQuerier is a mock of Querier interface. +type MockQuerier struct { + ctrl *gomock.Controller + recorder *MockQuerierMockRecorder +} + +// MockQuerierMockRecorder is the mock recorder for MockQuerier. +type MockQuerierMockRecorder struct { + mock *MockQuerier +} + +// NewMockQuerier creates a new mock instance. +func NewMockQuerier(ctrl *gomock.Controller) *MockQuerier { + mock := &MockQuerier{ctrl: ctrl} + mock.recorder = &MockQuerierMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockQuerier) EXPECT() *MockQuerierMockRecorder { + return m.recorder +} + +// QueryCount mocks base method. +func (m *MockQuerier) QueryCount(ctx context.Context, environmentNamespace string, startAt, endAt time.Time, goalID, featureID string, featureVersion int32, reason string, segmnets []string, filters []*eventcounter.Filter) (*eventcounter.Row, []*eventcounter.Row, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "QueryCount", ctx, environmentNamespace, startAt, endAt, goalID, featureID, featureVersion, reason, segmnets, filters) + ret0, _ := ret[0].(*eventcounter.Row) + ret1, _ := ret[1].([]*eventcounter.Row) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// QueryCount indicates an expected call of QueryCount. +func (mr *MockQuerierMockRecorder) QueryCount(ctx, environmentNamespace, startAt, endAt, goalID, featureID, featureVersion, reason, segmnets, filters interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "QueryCount", reflect.TypeOf((*MockQuerier)(nil).QueryCount), ctx, environmentNamespace, startAt, endAt, goalID, featureID, featureVersion, reason, segmnets, filters) +} + +// QueryEvaluationCount mocks base method. +func (m *MockQuerier) QueryEvaluationCount(ctx context.Context, environmentNamespace string, startAt, endAt time.Time, featureID string, featureVersion int32, reason string, segmnets []string, filters []*eventcounter.Filter) (*eventcounter.Row, []*eventcounter.Row, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "QueryEvaluationCount", ctx, environmentNamespace, startAt, endAt, featureID, featureVersion, reason, segmnets, filters) + ret0, _ := ret[0].(*eventcounter.Row) + ret1, _ := ret[1].([]*eventcounter.Row) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// QueryEvaluationCount indicates an expected call of QueryEvaluationCount. +func (mr *MockQuerierMockRecorder) QueryEvaluationCount(ctx, environmentNamespace, startAt, endAt, featureID, featureVersion, reason, segmnets, filters interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "QueryEvaluationCount", reflect.TypeOf((*MockQuerier)(nil).QueryEvaluationCount), ctx, environmentNamespace, startAt, endAt, featureID, featureVersion, reason, segmnets, filters) +} + +// QueryEvaluationTimeseriesCount mocks base method. +func (m *MockQuerier) QueryEvaluationTimeseriesCount(ctx context.Context, environmentNamespace string, startAt, endAt time.Time, featureID string, featureVersion int32, variationID string) (map[string]*eventcounter.VariationTimeseries, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "QueryEvaluationTimeseriesCount", ctx, environmentNamespace, startAt, endAt, featureID, featureVersion, variationID) + ret0, _ := ret[0].(map[string]*eventcounter.VariationTimeseries) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// QueryEvaluationTimeseriesCount indicates an expected call of QueryEvaluationTimeseriesCount. +func (mr *MockQuerierMockRecorder) QueryEvaluationTimeseriesCount(ctx, environmentNamespace, startAt, endAt, featureID, featureVersion, variationID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "QueryEvaluationTimeseriesCount", reflect.TypeOf((*MockQuerier)(nil).QueryEvaluationTimeseriesCount), ctx, environmentNamespace, startAt, endAt, featureID, featureVersion, variationID) +} + +// QueryGoalCount mocks base method. +func (m *MockQuerier) QueryGoalCount(ctx context.Context, environmentNamespace string, startAt, endAt time.Time, goalID, featureID string, featureVersion int32, reason string, segmnets []string, filters []*eventcounter.Filter) (*eventcounter.Row, []*eventcounter.Row, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "QueryGoalCount", ctx, environmentNamespace, startAt, endAt, goalID, featureID, featureVersion, reason, segmnets, filters) + ret0, _ := ret[0].(*eventcounter.Row) + ret1, _ := ret[1].([]*eventcounter.Row) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// QueryGoalCount indicates an expected call of QueryGoalCount. +func (mr *MockQuerierMockRecorder) QueryGoalCount(ctx, environmentNamespace, startAt, endAt, goalID, featureID, featureVersion, reason, segmnets, filters interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "QueryGoalCount", reflect.TypeOf((*MockQuerier)(nil).QueryGoalCount), ctx, environmentNamespace, startAt, endAt, goalID, featureID, featureVersion, reason, segmnets, filters) +} + +// QuerySegmentMetadata mocks base method. +func (m *MockQuerier) QuerySegmentMetadata(ctx context.Context, environmentNamespace, dataType string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "QuerySegmentMetadata", ctx, environmentNamespace, dataType) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// QuerySegmentMetadata indicates an expected call of QuerySegmentMetadata. +func (mr *MockQuerierMockRecorder) QuerySegmentMetadata(ctx, environmentNamespace, dataType interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "QuerySegmentMetadata", reflect.TypeOf((*MockQuerier)(nil).QuerySegmentMetadata), ctx, environmentNamespace, dataType) +} + +// QueryUserCount mocks base method. +func (m *MockQuerier) QueryUserCount(ctx context.Context, environmentNamespace string, startAt, endAt time.Time) (*eventcounter.Row, []*eventcounter.Row, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "QueryUserCount", ctx, environmentNamespace, startAt, endAt) + ret0, _ := ret[0].(*eventcounter.Row) + ret1, _ := ret[1].([]*eventcounter.Row) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// QueryUserCount indicates an expected call of QueryUserCount. +func (mr *MockQuerierMockRecorder) QueryUserCount(ctx, environmentNamespace, startAt, endAt interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "QueryUserCount", reflect.TypeOf((*MockQuerier)(nil).QueryUserCount), ctx, environmentNamespace, startAt, endAt) +} diff --git a/pkg/eventcounter/druid/querier.go b/pkg/eventcounter/druid/querier.go new file mode 100644 index 000000000..a008c2a15 --- /dev/null +++ b/pkg/eventcounter/druid/querier.go @@ -0,0 +1,650 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package druid + +import ( + "context" + "encoding/json" + "fmt" + "regexp" + "strings" + "time" + + "github.com/ca-dp/godruid" + "go.uber.org/zap" + + storagedruid "github.com/bucketeer-io/bucketeer/pkg/storage/druid" + ecproto "github.com/bucketeer-io/bucketeer/proto/eventcounter" +) + +const ( + DataTypeEvaluationEvents = "evaluation_events" + DataTypeGoalEvents = "goal_events" + DataTypeUserEvents = "user_events" + ColumnVariation = "Variation" + ColumnFeatureVersion = "Feature version" + ColumnCVR = "Conversion rate" + ColumnGoalUser = "Goal user" + ColumnGoalTotal = "Goal total" + ColumnGoalValueMean = "Goal value mean" + ColumnGoalValueTotal = "Goal value total" + ColumnGoalValueVariance = "Goal value variance" + ColumnEvaluationUser = "Evaluation user" + ColumnEvaluationTotal = "Evaluation total" + ColumnUser = "User" + ColumnUserCount = "User count" + ColumnUserTotal = "User total" +) + +var ( + variationRegex = regexp.MustCompile(`^.*:.*:(.*)$`) +) + +type Querier interface { + QuerySegmentMetadata(ctx context.Context, environmentNamespace, dataType string) ([]string, error) + QueryGoalCount( + ctx context.Context, + environmentNamespace string, + startAt, endAt time.Time, + goalID, featureID string, + featureVersion int32, + reason string, + segmnets []string, + filters []*ecproto.Filter, + ) (*ecproto.Row, []*ecproto.Row, error) + QueryEvaluationCount( + ctx context.Context, + environmentNamespace string, + startAt, endAt time.Time, + featureID string, + featureVersion int32, + reason string, + segmnets []string, + filters []*ecproto.Filter, + ) (*ecproto.Row, []*ecproto.Row, error) + QueryEvaluationTimeseriesCount( + ctx context.Context, + environmentNamespace string, + startAt, endAt time.Time, + featureID string, + featureVersion int32, + variationID string, + ) (map[string]*ecproto.VariationTimeseries, error) + QueryUserCount( + ctx context.Context, + environmentNamespace string, + startAt, endAt time.Time, + ) (*ecproto.Row, []*ecproto.Row, error) + QueryCount( + ctx context.Context, + environmentNamespace string, + startAt, endAt time.Time, + goalID, featureID string, + featureVersion int32, + reason string, + segmnets []string, + filters []*ecproto.Filter, + ) (*ecproto.Row, []*ecproto.Row, error) +} + +type options struct { + logger *zap.Logger +} + +type Option func(*options) + +func WithLogger(l *zap.Logger) Option { + return func(opts *options) { + opts.logger = l + } +} + +type druidQuerier struct { + brokerClient *storagedruid.BrokerClient + datasourcePrefix string + opts *options + logger *zap.Logger +} + +func NewDruidQuerier( + brokerClient *storagedruid.BrokerClient, + datasourcePrefix string, + opts ...Option, +) Querier { + dopts := &options{ + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + return &druidQuerier{ + brokerClient: brokerClient, + datasourcePrefix: datasourcePrefix, + opts: dopts, + logger: dopts.logger.Named("druid-querier"), + } +} + +func (q *druidQuerier) QuerySegmentMetadata( + ctx context.Context, + environmentNamespace, dataType string, +) ([]string, error) { + datasource := storagedruid.Datasource(q.datasourcePrefix, dataType) + endAt := time.Now() + // Fetch metadata from last 7 days data. + startAt := endAt.Add(-7 * 24 * time.Hour) + query := querySegmentMetadata(datasource, startAt, endAt) + if err := q.brokerClient.Query(query, ""); err != nil { + b, _ := json.Marshal(query) + q.logger.Error("Failed to query segment metadata", zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("datastore", datasource), + zap.String("query", string(b))) + return nil, err + } + data := []string{} + if len(query.QueryResult) == 0 { + return data, nil + } + userDataRegex := regexp.MustCompile(userDataPattern(environmentNamespace)) + for k := range query.QueryResult[0].Columns { + if userDataRegex.MatchString(k) { + data = append(data, removeEnvFromUserData(k, userDataRegex)) + } + } + return data, nil +} + +func (q *druidQuerier) QueryGoalCount( + ctx context.Context, + environmentNamespace string, + startAt, endAt time.Time, + goalID, featureID string, + featureVersion int32, + reason string, + segments []string, + filters []*ecproto.Filter, +) (*ecproto.Row, []*ecproto.Row, error) { + datasource := storagedruid.Datasource(q.datasourcePrefix, DataTypeGoalEvents) + envSegments := convToEnvSegments(environmentNamespace, segments) + envFilters := convToEnvFilters(environmentNamespace, filters) + query := queryGoalGroupBy( + datasource, + startAt, + endAt, + environmentNamespace, + goalID, + featureID, + featureVersion, + reason, + envSegments, + envFilters, + ) + if err := q.brokerClient.Query(query, ""); err != nil { + b, _ := json.Marshal(query) + q.logger.Error("Failed to query goal counts", zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("datastore", datasource), + zap.String("query", string(b))) + return nil, nil, err + } + columns := generateColumns( + featureID, + envSegments, + []string{ + ColumnGoalUser, + ColumnGoalTotal, + ColumnGoalValueTotal, + ColumnGoalValueMean, + ColumnGoalValueVariance, + }, + ) + headers := q.convHeaders(environmentNamespace, columns) + rows, errs := q.convToTable(query.QueryResult, columns) + if len(errs) > 0 { + q.logger.Error("Failed to convert query result to table", + zap.Errors("errs", errs), + zap.String("environmentNamespace", environmentNamespace), + zap.String("goalID", goalID), + zap.String("featureID", featureID), + zap.Strings("segments", segments), + zap.Time("startAt", startAt), + zap.Time("endAt", endAt)) + } + return headers, rows, nil +} + +func (q *druidQuerier) QueryEvaluationCount( + ctx context.Context, + environmentNamespace string, + startAt, endAt time.Time, + featureID string, + featureVersion int32, + reason string, + segments []string, + filters []*ecproto.Filter, +) (*ecproto.Row, []*ecproto.Row, error) { + datasource := storagedruid.Datasource(q.datasourcePrefix, DataTypeEvaluationEvents) + envSegments := convToEnvSegments(environmentNamespace, segments) + envFilters := convToEnvFilters(environmentNamespace, filters) + query := queryEvaluationGroupBy( + datasource, + startAt, + endAt, + environmentNamespace, + featureID, + featureVersion, + reason, + envSegments, + envFilters, + ) + if err := q.brokerClient.Query(query, ""); err != nil { + b, _ := json.Marshal(query) + q.logger.Error("Failed to query evaluation counts", zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("datastore", datasource), + zap.String("query", string(b))) + return nil, nil, err + } + columns := generateColumns(featureID, envSegments, []string{ColumnEvaluationUser, ColumnEvaluationTotal}) + headers := q.convHeaders(environmentNamespace, columns) + rows, errs := q.convToTable(query.QueryResult, columns) + if len(errs) > 0 { + q.logger.Error("Failed to convert query result to table", + zap.Errors("errs", errs), + zap.String("environmentNamespace", environmentNamespace), + zap.String("featureID", featureID), + zap.Strings("segments", segments), + zap.Time("startAt", startAt), + zap.Time("endAt", endAt)) + } + return headers, rows, nil +} + +func (q *druidQuerier) QueryEvaluationTimeseriesCount( + ctx context.Context, + environmentNamespace string, + startAt, endAt time.Time, + featureID string, + featureVersion int32, + variationID string, +) (map[string]*ecproto.VariationTimeseries, error) { + datasource := storagedruid.Datasource(q.datasourcePrefix, DataTypeEvaluationEvents) + query := queryEvaluationTimeseries( + datasource, + startAt, + endAt, + environmentNamespace, + featureID, + featureVersion, + variationID, + ) + if err := q.brokerClient.Query(query, ""); err != nil { + b, _ := json.Marshal(query) + q.logger.Error("Failed to query evaluation counts", zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("datastore", datasource), + zap.String("query", string(b))) + return nil, err + } + ts, errs := q.convToTimeseries( + variationID, + query.QueryResult, + []string{ColumnEvaluationTotal, ColumnEvaluationUser}, + ) + if len(errs) > 0 { + q.logger.Error("Failed to convert query result to table", + zap.Errors("errs", errs), + zap.String("environmentNamespace", environmentNamespace), + zap.String("featureID", featureID), + zap.Int32("featureVersion", featureVersion), + zap.String("variationID", variationID), + zap.Time("startAt", startAt), + zap.Time("endAt", endAt)) + } + return ts, nil +} + +func (q *druidQuerier) QueryUserCount( + ctx context.Context, + environmentNamespace string, + startAt, endAt time.Time, +) (*ecproto.Row, []*ecproto.Row, error) { + datasource := storagedruid.Datasource(q.datasourcePrefix, DataTypeUserEvents) + query := queryUserGroupBy(datasource, environmentNamespace, startAt, endAt) + if err := q.brokerClient.Query(query, ""); err != nil { + b, _ := json.Marshal(query) + q.logger.Error("Failed to query user count", zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("datastore", datasource), + zap.String("query", string(b))) + return nil, nil, err + } + columns := []string{ColumnUserTotal, ColumnUserCount} + headers := q.convHeaders(environmentNamespace, columns) + rows, errs := q.convToTable(query.QueryResult, columns) + if len(errs) > 0 { + q.logger.Error("Failed to convert query result to table for user count", + zap.Errors("errs", errs), + zap.String("environmentNamespace", environmentNamespace), + zap.Time("startAt", startAt), + zap.Time("endAt", endAt)) + } + return headers, rows, nil +} + +func (q *druidQuerier) QueryCount( + ctx context.Context, + environmentNamespace string, + startAt, endAt time.Time, + goalID string, + featureID string, + featureVersion int32, + reason string, + segments []string, + filters []*ecproto.Filter, +) (*ecproto.Row, []*ecproto.Row, error) { + goalHeader, goalRows, err := q.QueryGoalCount( + ctx, + environmentNamespace, + startAt, + endAt, + goalID, + featureID, + featureVersion, + reason, + segments, + filters, + ) + if err != nil { + return nil, nil, err + } + // If featureID is not provided, return goal count. + if featureID == "" { + return goalHeader, goalRows, nil + } + evalHeader, evalRows, err := q.QueryEvaluationCount( + ctx, + environmentNamespace, + startAt, + endAt, + featureID, + featureVersion, + reason, + segments, + filters, + ) + if err != nil { + return nil, nil, err + } + headers, rows := convToResult(ctx, evalHeader, goalHeader, evalRows, goalRows, segments) + return headers, rows, nil +} + +func (q *druidQuerier) convToTable(queryResult []godruid.GroupbyItem, columns []string) ([]*ecproto.Row, []error) { + rows := []*ecproto.Row{} + errs := []error{} + for _, item := range queryResult { + cells := []*ecproto.Cell{} + for _, column := range columns { + value, ok := item.Event[column] + if !ok { + errs = append(errs, fmt.Errorf("Column %s does not exist", column)) + cells = append(cells, &ecproto.Cell{Type: ecproto.Cell_STRING, Value: ""}) + continue + } + var cell *ecproto.Cell + switch value := value.(type) { + case float64: + cell = &ecproto.Cell{Type: ecproto.Cell_DOUBLE, ValueDouble: value} + case string: + if column == ColumnVariation { + groups := variationRegex.FindStringSubmatch(value) + if groups != nil { + cell = &ecproto.Cell{Type: ecproto.Cell_STRING, Value: groups[1]} + } else { + cell = &ecproto.Cell{Type: ecproto.Cell_STRING, Value: value} + } + } else { + cell = &ecproto.Cell{Type: ecproto.Cell_STRING, Value: value} + } + default: + cell = &ecproto.Cell{Type: ecproto.Cell_STRING, Value: ""} + errs = append(errs, fmt.Errorf("Value %v type of column %s is unknown", value, column)) + } + cells = append(cells, cell) + } + rows = append(rows, &ecproto.Row{Cells: cells}) + } + return rows, errs +} + +func (q *druidQuerier) convHeaders(environmentNamespace string, columns []string) *ecproto.Row { + userDataRegex := regexp.MustCompile(userDataPattern(environmentNamespace)) + headers := &ecproto.Row{Cells: []*ecproto.Cell{}} + for _, column := range columns { + if userDataRegex.MatchString(column) { + headers.Cells = append( + headers.Cells, + &ecproto.Cell{Type: ecproto.Cell_STRING, Value: removeEnvFromUserData(column, userDataRegex)}, + ) + continue + } + headers.Cells = append(headers.Cells, &ecproto.Cell{Type: ecproto.Cell_STRING, Value: column}) + } + return headers +} + +func generateColumns(featureID string, segments, valueColumns []string) []string { + columns := make([]string, len(valueColumns)) + copy(columns, valueColumns) + if featureID != "" { + columns = append([]string{ColumnVariation}, valueColumns...) + } + return append(segments, columns...) +} + +func convToResult( + ctx context.Context, + evalHeader, goalHeader *ecproto.Row, + evaluationRows, goalRows []*ecproto.Row, + segments []string, +) (*ecproto.Row, []*ecproto.Row) { + evalVarIdx := cellIndex(evalHeader, ColumnVariation) + evalMap := make(map[string]*ecproto.Row, len(evaluationRows)) + for _, evalRow := range evaluationRows { + key := newKey(evalRow.Cells[evalVarIdx].Value, segmentValuesNew(evalHeader, evalRow, segments)) + evalMap[key] = evalRow + } + + evalUUIdx := cellIndex(evalHeader, ColumnEvaluationUser) + goalUUIdx := cellIndex(goalHeader, ColumnGoalUser) + goalVarIdx := cellIndex(goalHeader, ColumnVariation) + genRows := []*ecproto.Row{} + for _, goalRow := range goalRows { + key := newKey(goalRow.Cells[goalVarIdx].Value, segmentValuesNew(goalHeader, goalRow, segments)) + evalRow, ok := evalMap[key] + if !ok { + continue + } + cells := []*ecproto.Cell{} + + // Copy evaluation count. + cells = append(cells, evalRow.Cells...) + + // Copy goal count. + for i, goalCell := range goalRow.Cells { + excludes := append(segments, ColumnVariation) + if contains(excludes, goalHeader.Cells[i].Value) { + continue + } + cells = append(cells, goalCell) + } + + // Calculate CVR. + var cvr float64 + if evalRow.Cells[evalUUIdx].ValueDouble > float64(0) { + euu := evalRow.Cells[evalUUIdx].ValueDouble + guu := goalRow.Cells[goalUUIdx].ValueDouble + cvr = guu / euu * 100 + } + cells = append(cells, &ecproto.Cell{Type: ecproto.Cell_DOUBLE, ValueDouble: cvr}) + + genRows = append(genRows, &ecproto.Row{Cells: cells}) + } + genHeaders := joinHeaders(evalHeader, goalHeader, segments) + return genHeaders, genRows +} + +func contains(haystack []string, needle string) bool { + for _, h := range haystack { + if h == needle { + return true + } + } + return false +} + +func segmentValuesNew(header, row *ecproto.Row, segments []string) []string { + segmentIdx := []int{} + for _, s := range segments { + segmentIdx = append(segmentIdx, cellIndex(header, s)) + } + ss := []string{} + for _, i := range segmentIdx { + ss = append(ss, row.Cells[i].Value) + } + return ss +} + +func newKey(variation string, segments []string) string { + return strings.Join(append(segments, variation), ",") +} + +func cellIndex(header *ecproto.Row, value string) int { + for i, cell := range header.Cells { + if cell.Value == value { + return i + } + } + return -1 +} + +func joinHeaders(evalHeader, goalHeader *ecproto.Row, segments []string) *ecproto.Row { + newHeader := []*ecproto.Cell{} + newHeader = append(newHeader, evalHeader.Cells...) + excludes := append(segments, ColumnVariation) + for _, h := range goalHeader.Cells { + if contains(excludes, h.Value) { + continue + } + newHeader = append(newHeader, h) + } + newHeader = append(newHeader, &ecproto.Cell{Value: ColumnCVR}) + return &ecproto.Row{Cells: newHeader} +} + +func userDataPattern(environmentNamespace string) string { + if environmentNamespace == "" { + return `^user\.data\.(.*)$` + } + return fmt.Sprintf(`^%s\.user\.data\.(.*)$`, environmentNamespace) +} + +func removeEnvFromUserData(key string, r *regexp.Regexp) string { + group := r.FindStringSubmatch(key) + return fmt.Sprintf("user.data.%s", group[1]) +} + +func convToEnvSegments(environmentNamespace string, segments []string) []string { + if environmentNamespace == "" { + return segments + } + userDataRegex := regexp.MustCompile(userDataPattern("")) + sgmt := []string{} + for _, s := range segments { + if userDataRegex.MatchString(s) { + sgmt = append(sgmt, fmt.Sprintf("%s.%s", environmentNamespace, s)) + continue + } + sgmt = append(sgmt, s) + } + return sgmt +} + +func convToEnvFilters(environmentNamespace string, filters []*ecproto.Filter) []*ecproto.Filter { + fls := []*ecproto.Filter{} + userDataRegex := regexp.MustCompile(userDataPattern("")) + for _, f := range filters { + key := f.Key + if environmentNamespace != "" && userDataRegex.MatchString(f.Key) { + key = fmt.Sprintf("%s.%s", environmentNamespace, key) + } + switch f.Operator { + case ecproto.Filter_EQUALS: + fls = append(fls, &ecproto.Filter{ + Operator: ecproto.Filter_EQUALS, + Key: key, + Values: f.Values, + }) + } + } + return fls +} + +func (q *druidQuerier) convToTimeseries( + variationID string, + godruidTS []godruid.Timeseries, + columns []string, +) (map[string]*ecproto.VariationTimeseries, []error) { + ts := []int64{} + values := map[string][]float64{} + errs := []error{} + for _, gdts := range godruidTS { + t, err := time.Parse(time.RFC3339, gdts.Timestamp) + if err != nil { + errs = append(errs, fmt.Errorf("time %s cannot be parsed", gdts.Timestamp)) + return nil, errs + } + ts = append(ts, t.Unix()) + for _, column := range columns { + value, ok := gdts.Result[column] + if !ok { + errs = append(errs, fmt.Errorf("Column %s does not exist", column)) + values[column] = append(values[column], 0) + continue + } + switch value := value.(type) { + case float64: + values[column] = append(values[column], value) + default: + values[column] = append(values[column], 0) + errs = append(errs, fmt.Errorf("Value %s type is unknown", value)) + } + } + } + variationTS := map[string]*ecproto.VariationTimeseries{} + for column, vals := range values { + variationTS[column] = &ecproto.VariationTimeseries{ + VariationId: variationID, + Timeseries: &ecproto.Timeseries{ + Timestamps: ts, + Values: vals, + }, + } + } + return variationTS, errs +} diff --git a/pkg/eventcounter/druid/querier_test.go b/pkg/eventcounter/druid/querier_test.go new file mode 100644 index 000000000..e3f64bd48 --- /dev/null +++ b/pkg/eventcounter/druid/querier_test.go @@ -0,0 +1,153 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package druid + +import ( + "regexp" + "testing" + + "github.com/stretchr/testify/assert" + + ecproto "github.com/bucketeer-io/bucketeer/proto/eventcounter" +) + +func TestConvToEnvSegments(t *testing.T) { + t.Parallel() + + patterns := map[string]struct { + inputNamespace string + inputSegments []string + expected []string + }{ + "empty environment namespace": { + inputNamespace: "", + inputSegments: []string{ + "tag", + "user.data.sgmt", + }, + expected: []string{ + "tag", + "user.data.sgmt", + }, + }, + "non empty environment namespace": { + inputNamespace: "ns", + inputSegments: []string{ + "tag", + "user.data.sgmt", + }, + expected: []string{ + "tag", + "ns.user.data.sgmt", + }, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + actual := convToEnvSegments(p.inputNamespace, p.inputSegments) + assert.Equal(t, p.expected, actual) + }) + } +} + +func TestConvToEnvFilters(t *testing.T) { + t.Parallel() + + patterns := map[string]struct { + inputNamespace string + inputFilters []*ecproto.Filter + expected []*ecproto.Filter + }{ + "empty environment namespace": { + inputNamespace: "", + inputFilters: []*ecproto.Filter{ + {Key: "tag", Operator: ecproto.Filter_EQUALS, Values: []string{"t0"}}, + {Key: "user.data.sgmt", Operator: ecproto.Filter_EQUALS, Values: []string{"d0"}}, + }, + expected: []*ecproto.Filter{ + {Key: "tag", Operator: ecproto.Filter_EQUALS, Values: []string{"t0"}}, + {Key: "user.data.sgmt", Operator: ecproto.Filter_EQUALS, Values: []string{"d0"}}, + }, + }, + "non empty environment namespace": { + inputNamespace: "ns", + inputFilters: []*ecproto.Filter{ + {Key: "tag", Operator: ecproto.Filter_EQUALS, Values: []string{"t0"}}, + {Key: "user.data.sgmt", Operator: ecproto.Filter_EQUALS, Values: []string{"d0"}}, + }, + expected: []*ecproto.Filter{ + {Key: "tag", Operator: ecproto.Filter_EQUALS, Values: []string{"t0"}}, + {Key: "ns.user.data.sgmt", Operator: ecproto.Filter_EQUALS, Values: []string{"d0"}}, + }, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + actual := convToEnvFilters(p.inputNamespace, p.inputFilters) + assert.Equal(t, p.expected, actual) + }) + } +} + +func TestUserDataPattern(t *testing.T) { + t.Parallel() + + patterns := map[string]struct { + inputNamespace string + expected string + }{ + "empty environment namespace": { + inputNamespace: "", + expected: `^user\.data\.(.*)$`, + }, + "non empty environment namespace": { + inputNamespace: "ns", + expected: `^ns\.user\.data\.(.*)$`, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + actual := userDataPattern(p.inputNamespace) + assert.Equal(t, p.expected, actual) + }) + } +} + +func TestRemoveEnvFromUserData(t *testing.T) { + t.Parallel() + + patterns := map[string]struct { + inputKey string + inputRegexp *regexp.Regexp + expected string + }{ + "empty environment namespace": { + inputKey: "user.data.attr", + inputRegexp: regexp.MustCompile(userDataPattern("")), + expected: "user.data.attr", + }, + "non empty environment namespace": { + inputKey: "ns.user.data.attr", + inputRegexp: regexp.MustCompile(userDataPattern("ns")), + expected: "user.data.attr", + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + actual := removeEnvFromUserData(p.inputKey, p.inputRegexp) + assert.Equal(t, p.expected, actual) + }) + } +} diff --git a/pkg/eventcounter/druid/query.go b/pkg/eventcounter/druid/query.go new file mode 100644 index 000000000..a8e1b5ccf --- /dev/null +++ b/pkg/eventcounter/druid/query.go @@ -0,0 +1,236 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package druid + +import ( + "fmt" + "time" + + "github.com/ca-dp/godruid" + + ecproto "github.com/bucketeer-io/bucketeer/proto/eventcounter" +) + +const ( + intervalStr = "2006-01-02T15:04" +) + +func querySegmentMetadata(datasource string, startAt, endAt time.Time) *godruid.QuerySegmentMetadata { + query := &godruid.QuerySegmentMetadata{ + QueryType: godruid.SEGMENTMETADATA, + DataSource: godruid.DataSourceTable(datasource), + Intervals: toConvInterval(startAt, endAt), + Merge: "true", + } + return query +} + +func toConvInterval(start, end time.Time) string { + return fmt.Sprintf("%s/%s", start.Format(intervalStr), end.Format(intervalStr)) +} + +func queryGoalGroupBy( + datasource string, + startAt, endAt time.Time, + environmentNamespace string, + goalID string, + featureID string, + featureVersion int32, + reason string, + segments []string, + fls []*ecproto.Filter, +) *godruid.QueryGroupBy { + filters := []*godruid.Filter{} + innerDimensions := []godruid.DimSpec{} + outerDimensions := []godruid.DimSpec{} + limitColumns := []godruid.Column{} + filters = append(filters, godruid.FilterSelector("environmentNamespace", environmentNamespace)) + filters = append(filters, godruid.FilterSelector("goalId", goalID)) + filters = append(filters, convToDruidFilters(fls)...) + innerDimensions = append(innerDimensions, godruid.DimDefault("userId", ColumnUser)) + for _, segment := range segments { + limitColumns = append(limitColumns, godruid.Column{Dimension: segment, Direction: godruid.DirectionASC}) + innerDimensions = append(innerDimensions, godruid.DimDefault(segment, segment)) + outerDimensions = append(outerDimensions, godruid.DimDefault(segment, segment)) + } + if featureID != "" { + filterEvaluationPattern := "" + if reason != "" { + filterEvaluationPattern = fmt.Sprintf("^%s:%d:.*:%s$", featureID, featureVersion, reason) + } else { + filterEvaluationPattern = fmt.Sprintf("^%s:%d:.*$", featureID, featureVersion) + } + innerEvaluationPattern := fmt.Sprintf("^%s:%d:.*$", featureID, featureVersion) + filters = append(filters, godruid.FilterRegex("evaluations", filterEvaluationPattern)) + innerDimensions = append(innerDimensions, evaluationsDim(innerEvaluationPattern)) + outerDimensions = append(outerDimensions, godruid.DimDefault(ColumnVariation, ColumnVariation)) + limitColumns = append(limitColumns, godruid.Column{Dimension: ColumnVariation, Direction: godruid.DirectionASC}) + } + innerQuery := &godruid.QueryGroupBy{ + QueryType: godruid.GROUPBY, + DataSource: godruid.DataSourceTable(datasource), + Intervals: toConvInterval(startAt, endAt), + Granularity: godruid.GranAll, + Filter: godruid.FilterAnd(filters...), + Dimensions: innerDimensions, + Aggregations: []godruid.Aggregation{ + *godruid.AggLongSum("count", "count"), + *godruid.AggDoubleSum("valueSum", "valueSum"), + }, + } + query := &godruid.QueryGroupBy{ + QueryType: "groupBy", + DataSource: godruid.DataSourceQuery(innerQuery), + Intervals: toConvInterval(startAt, endAt), + Granularity: godruid.GranAll, + LimitSpec: godruid.LimitDefault(10000, limitColumns), + Dimensions: outerDimensions, + Aggregations: []godruid.Aggregation{ + *godruid.AggLongSum(ColumnGoalTotal, "count"), + *godruid.AggDoubleSum(ColumnGoalValueTotal, "valueSum"), + *godruid.AggCount(ColumnGoalUser), + *godruid.AggRawJson( + fmt.Sprintf(`{ "type": "doubleMean", "name": "%s", "fieldName": "valueSum" }`, ColumnGoalValueMean), + ), + *godruid.ExtAggVariance(ColumnGoalValueVariance, "valueSum"), + }, + } + return query +} + +func queryEvaluationGroupBy( + datasource string, + startAt, endAt time.Time, + environmentNamespace string, + featureID string, + featureVersion int32, + reason string, + segments []string, + fls []*ecproto.Filter, +) *godruid.QueryGroupBy { + + filters := []*godruid.Filter{} + dimensions := []godruid.DimSpec{} + limitColumns := []godruid.Column{} + for _, segment := range segments { + limitColumns = append(limitColumns, godruid.Column{Dimension: segment, Direction: godruid.DirectionASC}) + dimensions = append(dimensions, godruid.DimDefault(segment, segment)) + } + filters = append(filters, godruid.FilterSelector("environmentNamespace", environmentNamespace)) + filters = append(filters, godruid.FilterSelector("featureId", featureID)) + if featureVersion == 0 { + dimensions = append(dimensions, godruid.DimDefault("featureVersion", ColumnFeatureVersion)) + } else { + filters = append(filters, godruid.FilterSelector("featureVersion", featureVersion)) + } + if reason != "" { + filters = append(filters, godruid.FilterSelector("reason", reason)) + } + filters = append(filters, convToDruidFilters(fls)...) + dimensions = append(dimensions, godruid.DimDefault("variationId", ColumnVariation)) + limitColumns = append(limitColumns, godruid.Column{Dimension: ColumnVariation, Direction: godruid.DirectionASC}) + query := &godruid.QueryGroupBy{ + QueryType: godruid.GROUPBY, + DataSource: godruid.DataSourceTable(datasource), + Intervals: toConvInterval(startAt, endAt), + Granularity: godruid.GranAll, + Filter: godruid.FilterAnd(filters...), + LimitSpec: godruid.LimitDefault(10000, limitColumns), + Dimensions: dimensions, + Aggregations: []godruid.Aggregation{ + *godruid.AggLongSum(ColumnEvaluationTotal, "count"), + *godruid.AggRawJson( + fmt.Sprintf(`{ "type": "thetaSketch", "name": "%s", "fieldName": "userIdThetaSketch" }`, ColumnEvaluationUser), + ), + }, + } + return query +} + +func queryUserGroupBy(datasource, environmentNamespace string, startAt, endAt time.Time) *godruid.QueryGroupBy { + countFieldName := "count" + query := &godruid.QueryGroupBy{ + QueryType: godruid.GROUPBY, + DataSource: godruid.DataSourceTable(datasource), + Intervals: toConvInterval(startAt, endAt), + Granularity: godruid.GranAll, + Filter: godruid.FilterAnd(godruid.FilterSelector("environmentNamespace", environmentNamespace)), + Dimensions: []godruid.DimSpec{godruid.DimDefault("userId", ColumnUser)}, + Aggregations: []godruid.Aggregation{ + *godruid.AggCount(countFieldName), + }, + } + query = &godruid.QueryGroupBy{ + QueryType: godruid.GROUPBY, + DataSource: godruid.DataSourceQuery(query), + Intervals: toConvInterval(startAt, endAt), + Granularity: godruid.GranAll, + Aggregations: []godruid.Aggregation{ + *godruid.AggLongSum(ColumnUserTotal, countFieldName), + *godruid.AggCount(ColumnUserCount), + }, + } + return query +} + +func evaluationsDim(evaluationPattern string) *godruid.DimFiltered { + variationExFn := godruid.DimExFnRegex("^(.*):.*$") + variationDelegate := godruid.DimExtraction("evaluations", ColumnVariation, variationExFn) + return godruid.DimFilteredRegex(variationDelegate, evaluationPattern) +} + +func queryEvaluationTimeseries( + datasource string, + startAt, endAt time.Time, + environmentNamespace string, + featureID string, + featureVersion int32, + variationID string, +) *godruid.QueryTimeseries { + filters := []*godruid.Filter{} + filters = append(filters, godruid.FilterSelector("environmentNamespace", environmentNamespace)) + filters = append(filters, godruid.FilterSelector("featureId", featureID)) + if featureVersion != 0 { + filters = append(filters, godruid.FilterSelector("featureVersion", featureVersion)) + } + filters = append(filters, godruid.FilterSelector("variationId", variationID)) + query := &godruid.QueryTimeseries{ + QueryType: godruid.TIMESERIES, + DataSource: godruid.DataSourceTable(datasource), + Intervals: toConvInterval(startAt, endAt), + Granularity: godruid.GranPeriod("P1D", "Asia/Tokyo", ""), + Filter: godruid.FilterAnd(filters...), + Aggregations: []godruid.Aggregation{ + *godruid.AggLongSum(ColumnEvaluationTotal, "count"), + *godruid.AggRawJson( + fmt.Sprintf(`{ "type": "thetaSketch", "name": "%s", "fieldName": "userIdThetaSketch" }`, ColumnEvaluationUser), + ), + }, + } + return query +} + +func convToDruidFilters(filters []*ecproto.Filter) []*godruid.Filter { + fls := []*godruid.Filter{} + for _, f := range filters { + switch f.Operator { + case ecproto.Filter_EQUALS: + for _, v := range f.Values { + fls = append(fls, godruid.FilterSelector(f.Key, v)) + } + } + } + return fls +} diff --git a/pkg/eventcounter/druid/query_test.go b/pkg/eventcounter/druid/query_test.go new file mode 100644 index 000000000..02deba3ed --- /dev/null +++ b/pkg/eventcounter/druid/query_test.go @@ -0,0 +1,693 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package druid + +import ( + "fmt" + "testing" + "time" + + "github.com/ca-dp/godruid" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + ecproto "github.com/bucketeer-io/bucketeer/proto/eventcounter" +) + +func TestQuerySegmentMetadata(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + layout := "2006-01-02 15:04:05 -0700 MST" + t1, err := time.Parse(layout, "2014-01-17 23:02:03 +0000 UTC") + require.NoError(t, err) + t2, err := time.Parse(layout, "2014-01-18 23:02:03 +0000 UTC") + require.NoError(t, err) + + patterns := map[string]struct { + inputDatasource string + inputStartAt time.Time + inputEndAt time.Time + expected *godruid.QuerySegmentMetadata + }{ + "success": { + inputDatasource: "ds", + inputStartAt: t1, + inputEndAt: t2, + expected: &godruid.QuerySegmentMetadata{ + QueryType: godruid.SEGMENTMETADATA, + DataSource: godruid.DataSourceTable("ds"), + Intervals: "2014-01-17T23:02/2014-01-18T23:02", + Merge: "true", + }, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + actual := querySegmentMetadata(p.inputDatasource, p.inputStartAt, p.inputEndAt) + assert.Equal(t, p.expected, actual) + }) + } +} + +func TestQueryGoalGroupBy(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + layout := "2006-01-02 15:04:05 -0700 MST" + t1, err := time.Parse(layout, "2014-01-17 23:02:03 +0000 UTC") + require.NoError(t, err) + t2, err := time.Parse(layout, "2014-01-18 23:02:03 +0000 UTC") + require.NoError(t, err) + + patterns := map[string]struct { + inputDatasource string + inputStartAt time.Time + inputEndAt time.Time + inputEnvironmentNamespace string + inputGoalID string + inputFeatureID string + inputFeatureVersion int32 + inputReason string + inputSegments []string + inputFilters []*ecproto.Filter + expected *godruid.QueryGroupBy + }{ + "no feature, no segments": { + inputDatasource: "ds", + inputStartAt: t1, + inputEndAt: t2, + inputEnvironmentNamespace: "ns", + inputGoalID: "gid", + expected: &godruid.QueryGroupBy{ + QueryType: "groupBy", + DataSource: &godruid.DataSource{ + Type: "query", + Query: &godruid.QueryGroupBy{ + QueryType: godruid.GROUPBY, + DataSource: godruid.DataSourceTable("ds"), + Intervals: "2014-01-17T23:02/2014-01-18T23:02", + Granularity: godruid.GranAll, + Filter: godruid.FilterAnd( + godruid.FilterSelector("environmentNamespace", "ns"), + &godruid.Filter{Type: "selector", Dimension: "goalId", Value: "gid"}, + ), + Dimensions: []godruid.DimSpec{ + godruid.DimDefault("userId", ColumnUser), + }, + Aggregations: []godruid.Aggregation{ + *godruid.AggRawJson(`{ "type": "longSum", "name": "count", "fieldName": "count" }`), + *godruid.AggRawJson(`{ "type": "doubleSum", "name": "valueSum", "fieldName": "valueSum" }`), + }, + }, + }, + Intervals: "2014-01-17T23:02/2014-01-18T23:02", + Granularity: godruid.GranAll, + LimitSpec: godruid.LimitDefault(10000, []godruid.Column{}), + Dimensions: []godruid.DimSpec{}, + Aggregations: []godruid.Aggregation{ + *godruid.AggRawJson(`{ "type": "longSum", "name": "Goal total", "fieldName": "count" }`), + *godruid.AggRawJson(`{ "type": "doubleSum", "name": "Goal value total", "fieldName": "valueSum" }`), + *godruid.AggRawJson(`{ "type": "count", "name": "Goal user"}`), + *godruid.AggRawJson(`{ "type": "doubleMean", "name": "Goal value mean", "fieldName": "valueSum" }`), + *godruid.AggRawJson(`{ "type": "variance", "name": "Goal value variance", "fieldName": "valueSum" }`), + }, + }, + }, + "no feature, segments": { + inputDatasource: "ds", + inputStartAt: t1, + inputEndAt: t2, + inputEnvironmentNamespace: "ns", + inputGoalID: "gid", + inputSegments: []string{"s1", "s2"}, + expected: &godruid.QueryGroupBy{ + QueryType: "groupBy", + DataSource: &godruid.DataSource{ + Type: "query", + Query: &godruid.QueryGroupBy{ + QueryType: godruid.GROUPBY, + DataSource: godruid.DataSourceTable("ds"), + Intervals: "2014-01-17T23:02/2014-01-18T23:02", + Granularity: godruid.GranAll, + Filter: godruid.FilterAnd( + godruid.FilterSelector("environmentNamespace", "ns"), + &godruid.Filter{Type: "selector", Dimension: "goalId", Value: "gid"}, + ), + Dimensions: []godruid.DimSpec{ + godruid.DimDefault("userId", ColumnUser), + godruid.DimDefault("s1", "s1"), + godruid.DimDefault("s2", "s2"), + }, + Aggregations: []godruid.Aggregation{ + *godruid.AggRawJson(`{ "type": "longSum", "name": "count", "fieldName": "count" }`), + *godruid.AggRawJson(`{ "type": "doubleSum", "name": "valueSum", "fieldName": "valueSum" }`), + }, + }, + }, + Intervals: "2014-01-17T23:02/2014-01-18T23:02", + Granularity: godruid.GranAll, + LimitSpec: godruid.LimitDefault(10000, []godruid.Column{ + {Dimension: "s1", Direction: godruid.DirectionASC}, + {Dimension: "s2", Direction: godruid.DirectionASC}, + }), + Dimensions: []godruid.DimSpec{ + godruid.DimDefault("s1", "s1"), + godruid.DimDefault("s2", "s2"), + }, + Aggregations: []godruid.Aggregation{ + *godruid.AggRawJson(`{ "type": "longSum", "name": "Goal total", "fieldName": "count" }`), + *godruid.AggRawJson(`{ "type": "doubleSum", "name": "Goal value total", "fieldName": "valueSum" }`), + *godruid.AggRawJson(`{ "type": "count", "name": "Goal user"}`), + *godruid.AggRawJson(`{ "type": "doubleMean", "name": "Goal value mean", "fieldName": "valueSum" }`), + *godruid.AggRawJson(`{ "type": "variance", "name": "Goal value variance", "fieldName": "valueSum" }`), + }, + }, + }, + "feature, no segments": { + inputDatasource: "ds", + inputStartAt: t1, + inputEndAt: t2, + inputEnvironmentNamespace: "ns", + inputGoalID: "gid", + inputFeatureID: "fid", + inputFeatureVersion: int32(1), + expected: &godruid.QueryGroupBy{ + QueryType: "groupBy", + DataSource: &godruid.DataSource{ + Type: "query", + Query: &godruid.QueryGroupBy{ + QueryType: godruid.GROUPBY, + DataSource: godruid.DataSourceTable("ds"), + Intervals: "2014-01-17T23:02/2014-01-18T23:02", + Granularity: godruid.GranAll, + Filter: godruid.FilterAnd( + godruid.FilterSelector("environmentNamespace", "ns"), + &godruid.Filter{Type: "selector", Dimension: "goalId", Value: "gid"}, + &godruid.Filter{Type: "regex", Dimension: "evaluations", Pattern: "^fid:1:.*$"}, + ), + Dimensions: []godruid.DimSpec{ + godruid.DimDefault("userId", ColumnUser), + &godruid.DimFiltered{ + Type: "regexFiltered", + Pattern: "^fid:1:.*$", + Delegate: &godruid.Dimension{ + Type: "extraction", + Dimension: "evaluations", + OutputName: "Variation", + ExtractionFn: &godruid.DimExtractionFn{ + Type: "regex", + Expr: "^(.*):.*$", + }, + }, + }, + }, + Aggregations: []godruid.Aggregation{ + *godruid.AggRawJson(`{ "type": "longSum", "name": "count", "fieldName": "count" }`), + *godruid.AggRawJson(`{ "type": "doubleSum", "name": "valueSum", "fieldName": "valueSum" }`), + }, + }, + }, + Intervals: "2014-01-17T23:02/2014-01-18T23:02", + Granularity: godruid.GranAll, + LimitSpec: godruid.LimitDefault(10000, []godruid.Column{{Dimension: "Variation", Direction: godruid.DirectionASC}}), + Dimensions: []godruid.DimSpec{ + godruid.DimDefault(ColumnVariation, ColumnVariation), + }, + Aggregations: []godruid.Aggregation{ + *godruid.AggRawJson(`{ "type": "longSum", "name": "Goal total", "fieldName": "count" }`), + *godruid.AggRawJson(`{ "type": "doubleSum", "name": "Goal value total", "fieldName": "valueSum" }`), + *godruid.AggRawJson(`{ "type": "count", "name": "Goal user"}`), + *godruid.AggRawJson(`{ "type": "doubleMean", "name": "Goal value mean", "fieldName": "valueSum" }`), + *godruid.AggRawJson(`{ "type": "variance", "name": "Goal value variance", "fieldName": "valueSum" }`), + }, + }, + }, + "feature, segments": { + inputDatasource: "ds", + inputStartAt: t1, + inputEndAt: t2, + inputEnvironmentNamespace: "ns", + inputGoalID: "gid", + inputSegments: []string{"s1", "s2"}, + inputFeatureID: "fid", + inputFeatureVersion: int32(1), + inputFilters: []*ecproto.Filter{ + {Key: "f0", Operator: ecproto.Filter_EQUALS, Values: []string{"v0"}}, + {Key: "f0", Operator: ecproto.Filter_EQUALS, Values: []string{"v1"}}, + {Key: "f1", Operator: ecproto.Filter_EQUALS, Values: []string{"v0"}}, + }, + expected: &godruid.QueryGroupBy{ + QueryType: "groupBy", + DataSource: &godruid.DataSource{ + Type: "query", + Query: &godruid.QueryGroupBy{ + QueryType: godruid.GROUPBY, + DataSource: godruid.DataSourceTable("ds"), + Intervals: "2014-01-17T23:02/2014-01-18T23:02", + Granularity: godruid.GranAll, + Filter: godruid.FilterAnd( + godruid.FilterSelector("environmentNamespace", "ns"), + &godruid.Filter{Type: "selector", Dimension: "goalId", Value: "gid"}, + &godruid.Filter{Type: "selector", Dimension: "f0", Value: "v0"}, + &godruid.Filter{Type: "selector", Dimension: "f0", Value: "v1"}, + &godruid.Filter{Type: "selector", Dimension: "f1", Value: "v0"}, + &godruid.Filter{Type: "regex", Dimension: "evaluations", Pattern: "^fid:1:.*$"}, + ), + Dimensions: []godruid.DimSpec{ + godruid.DimDefault("userId", ColumnUser), + godruid.DimDefault("s1", "s1"), + godruid.DimDefault("s2", "s2"), + &godruid.DimFiltered{ + Type: "regexFiltered", + Pattern: "^fid:1:.*$", + Delegate: &godruid.Dimension{ + Type: "extraction", + Dimension: "evaluations", + OutputName: "Variation", + ExtractionFn: &godruid.DimExtractionFn{ + Type: "regex", + Expr: "^(.*):.*$", + }, + }, + }, + }, + Aggregations: []godruid.Aggregation{ + *godruid.AggRawJson(`{ "type": "longSum", "name": "count", "fieldName": "count" }`), + *godruid.AggRawJson(`{ "type": "doubleSum", "name": "valueSum", "fieldName": "valueSum" }`), + }, + }, + }, + Intervals: "2014-01-17T23:02/2014-01-18T23:02", + Granularity: godruid.GranAll, + LimitSpec: godruid.LimitDefault(10000, []godruid.Column{ + {Dimension: "s1", Direction: godruid.DirectionASC}, + {Dimension: "s2", Direction: godruid.DirectionASC}, + {Dimension: "Variation", Direction: godruid.DirectionASC}, + }), + Dimensions: []godruid.DimSpec{ + godruid.DimDefault("s1", "s1"), + godruid.DimDefault("s2", "s2"), + godruid.DimDefault(ColumnVariation, ColumnVariation), + }, + Aggregations: []godruid.Aggregation{ + *godruid.AggRawJson(`{ "type": "longSum", "name": "Goal total", "fieldName": "count" }`), + *godruid.AggRawJson(`{ "type": "doubleSum", "name": "Goal value total", "fieldName": "valueSum" }`), + *godruid.AggRawJson(`{ "type": "count", "name": "Goal user"}`), + *godruid.AggRawJson(`{ "type": "doubleMean", "name": "Goal value mean", "fieldName": "valueSum" }`), + *godruid.AggRawJson(`{ "type": "variance", "name": "Goal value variance", "fieldName": "valueSum" }`), + }, + }, + }, + "feature, reason, segments": { + inputDatasource: "ds", + inputStartAt: t1, + inputEndAt: t2, + inputEnvironmentNamespace: "ns", + inputGoalID: "gid", + inputSegments: []string{"s1", "s2"}, + inputFeatureID: "fid", + inputReason: "DEFAULT", + inputFeatureVersion: int32(1), + inputFilters: []*ecproto.Filter{ + {Key: "f0", Operator: ecproto.Filter_EQUALS, Values: []string{"v0"}}, + {Key: "f0", Operator: ecproto.Filter_EQUALS, Values: []string{"v1"}}, + {Key: "f1", Operator: ecproto.Filter_EQUALS, Values: []string{"v0"}}, + }, + expected: &godruid.QueryGroupBy{ + QueryType: "groupBy", + DataSource: &godruid.DataSource{ + Type: "query", + Query: &godruid.QueryGroupBy{ + QueryType: godruid.GROUPBY, + DataSource: godruid.DataSourceTable("ds"), + Intervals: "2014-01-17T23:02/2014-01-18T23:02", + Granularity: godruid.GranAll, + Filter: godruid.FilterAnd( + godruid.FilterSelector("environmentNamespace", "ns"), + &godruid.Filter{Type: "selector", Dimension: "goalId", Value: "gid"}, + &godruid.Filter{Type: "selector", Dimension: "f0", Value: "v0"}, + &godruid.Filter{Type: "selector", Dimension: "f0", Value: "v1"}, + &godruid.Filter{Type: "selector", Dimension: "f1", Value: "v0"}, + &godruid.Filter{Type: "regex", Dimension: "evaluations", Pattern: "^fid:1:.*:DEFAULT$"}, + ), + Dimensions: []godruid.DimSpec{ + godruid.DimDefault("userId", ColumnUser), + godruid.DimDefault("s1", "s1"), + godruid.DimDefault("s2", "s2"), + &godruid.DimFiltered{ + Type: "regexFiltered", + Pattern: "^fid:1:.*$", + Delegate: &godruid.Dimension{ + Type: "extraction", + Dimension: "evaluations", + OutputName: "Variation", + ExtractionFn: &godruid.DimExtractionFn{ + Type: "regex", + Expr: "^(.*):.*$", + }, + }, + }, + }, + Aggregations: []godruid.Aggregation{ + *godruid.AggRawJson(`{ "type": "longSum", "name": "count", "fieldName": "count" }`), + *godruid.AggRawJson(`{ "type": "doubleSum", "name": "valueSum", "fieldName": "valueSum" }`), + }, + }, + }, + Intervals: "2014-01-17T23:02/2014-01-18T23:02", + Granularity: godruid.GranAll, + LimitSpec: godruid.LimitDefault(10000, []godruid.Column{ + {Dimension: "s1", Direction: godruid.DirectionASC}, + {Dimension: "s2", Direction: godruid.DirectionASC}, + {Dimension: "Variation", Direction: godruid.DirectionASC}, + }), + Dimensions: []godruid.DimSpec{ + godruid.DimDefault("s1", "s1"), + godruid.DimDefault("s2", "s2"), + godruid.DimDefault(ColumnVariation, ColumnVariation), + }, + Aggregations: []godruid.Aggregation{ + *godruid.AggRawJson(`{ "type": "longSum", "name": "Goal total", "fieldName": "count" }`), + *godruid.AggRawJson(`{ "type": "doubleSum", "name": "Goal value total", "fieldName": "valueSum" }`), + *godruid.AggRawJson(`{ "type": "count", "name": "Goal user"}`), + *godruid.AggRawJson(`{ "type": "doubleMean", "name": "Goal value mean", "fieldName": "valueSum" }`), + *godruid.AggRawJson(`{ "type": "variance", "name": "Goal value variance", "fieldName": "valueSum" }`), + }, + }, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + actual := queryGoalGroupBy( + p.inputDatasource, + p.inputStartAt, + p.inputEndAt, + p.inputEnvironmentNamespace, + p.inputGoalID, + p.inputFeatureID, + p.inputFeatureVersion, + p.inputReason, + p.inputSegments, + p.inputFilters) + assert.Equal(t, p.expected, actual) + }) + } +} + +func TestQueryEvaluationGroupBy(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + layout := "2006-01-02 15:04:05 -0700 MST" + t1, err := time.Parse(layout, "2014-01-17 23:02:03 +0000 UTC") + require.NoError(t, err) + t2, err := time.Parse(layout, "2014-01-18 23:02:03 +0000 UTC") + require.NoError(t, err) + + patterns := map[string]struct { + inputDatasource string + inputStartAt time.Time + inputEndAt time.Time + inputEnvironmentNamespace string + inputGoalID string + inputFeatureID string + inputFeatureVersion int32 + inputReason string + inputSegments []string + inputFilters []*ecproto.Filter + expected *godruid.QueryGroupBy + }{ + "feature, no segments": { + inputDatasource: "ds", + inputStartAt: t1, + inputEndAt: t2, + inputEnvironmentNamespace: "ns", + inputGoalID: "gid", + inputFeatureID: "fid", + inputFeatureVersion: int32(1), + expected: &godruid.QueryGroupBy{ + QueryType: "groupBy", + DataSource: godruid.DataSourceTable("ds"), + Intervals: "2014-01-17T23:02/2014-01-18T23:02", + Granularity: godruid.GranAll, + Filter: godruid.FilterAnd( + godruid.FilterSelector("environmentNamespace", "ns"), + godruid.FilterSelector("featureId", "fid"), + godruid.FilterSelector("featureVersion", int32(1)), + ), + LimitSpec: godruid.LimitDefault(10000, []godruid.Column{ + {Dimension: "Variation", Direction: godruid.DirectionASC}, + }), + Dimensions: []godruid.DimSpec{ + godruid.DimDefault("variationId", "Variation"), + }, + Aggregations: []godruid.Aggregation{ + *godruid.AggRawJson(`{ "type": "longSum", "name": "Evaluation total", "fieldName": "count" }`), + *godruid.AggRawJson(`{ "type": "thetaSketch", "name": "Evaluation user", "fieldName": "userIdThetaSketch" }`), + }, + }, + }, + "feature, segments": { + inputDatasource: "ds", + inputStartAt: t1, + inputEndAt: t2, + inputEnvironmentNamespace: "ns", + inputSegments: []string{"s1", "s2"}, + inputFeatureID: "fid", + inputFeatureVersion: int32(1), + inputFilters: []*ecproto.Filter{ + {Key: "f0", Operator: ecproto.Filter_EQUALS, Values: []string{"v0"}}, + {Key: "f0", Operator: ecproto.Filter_EQUALS, Values: []string{"v1"}}, + {Key: "f1", Operator: ecproto.Filter_EQUALS, Values: []string{"v0"}}, + }, + expected: &godruid.QueryGroupBy{ + QueryType: "groupBy", + DataSource: godruid.DataSourceTable("ds"), + Intervals: "2014-01-17T23:02/2014-01-18T23:02", + Granularity: godruid.GranAll, + Filter: godruid.FilterAnd( + godruid.FilterSelector("environmentNamespace", "ns"), + godruid.FilterSelector("featureId", "fid"), + godruid.FilterSelector("featureVersion", int32(1)), + &godruid.Filter{Type: "selector", Dimension: "f0", Value: "v0"}, + &godruid.Filter{Type: "selector", Dimension: "f0", Value: "v1"}, + &godruid.Filter{Type: "selector", Dimension: "f1", Value: "v0"}, + ), + LimitSpec: godruid.LimitDefault(10000, []godruid.Column{ + {Dimension: "s1", Direction: godruid.DirectionASC}, + {Dimension: "s2", Direction: godruid.DirectionASC}, + {Dimension: "Variation", Direction: godruid.DirectionASC}, + }), + Dimensions: []godruid.DimSpec{ + godruid.DimDefault("s1", "s1"), + godruid.DimDefault("s2", "s2"), + godruid.DimDefault("variationId", "Variation"), + }, + Aggregations: []godruid.Aggregation{ + *godruid.AggRawJson(`{ "type": "longSum", "name": "Evaluation total", "fieldName": "count" }`), + *godruid.AggRawJson(`{ "type": "thetaSketch", "name": "Evaluation user", "fieldName": "userIdThetaSketch" }`), + }, + }, + }, + "feature, reason, segments": { + inputDatasource: "ds", + inputStartAt: t1, + inputEndAt: t2, + inputEnvironmentNamespace: "ns", + inputSegments: []string{"s1", "s2"}, + inputFeatureID: "fid", + inputFeatureVersion: int32(1), + inputReason: "DEFAULT", + inputFilters: []*ecproto.Filter{ + {Key: "f0", Operator: ecproto.Filter_EQUALS, Values: []string{"v0"}}, + {Key: "f0", Operator: ecproto.Filter_EQUALS, Values: []string{"v1"}}, + {Key: "f1", Operator: ecproto.Filter_EQUALS, Values: []string{"v0"}}, + }, + expected: &godruid.QueryGroupBy{ + QueryType: "groupBy", + DataSource: godruid.DataSourceTable("ds"), + Intervals: "2014-01-17T23:02/2014-01-18T23:02", + Granularity: godruid.GranAll, + Filter: godruid.FilterAnd( + godruid.FilterSelector("environmentNamespace", "ns"), + godruid.FilterSelector("featureId", "fid"), + godruid.FilterSelector("featureVersion", int32(1)), + godruid.FilterSelector("reason", "DEFAULT"), + &godruid.Filter{Type: "selector", Dimension: "f0", Value: "v0"}, + &godruid.Filter{Type: "selector", Dimension: "f0", Value: "v1"}, + &godruid.Filter{Type: "selector", Dimension: "f1", Value: "v0"}, + ), + LimitSpec: godruid.LimitDefault(10000, []godruid.Column{ + {Dimension: "s1", Direction: godruid.DirectionASC}, + {Dimension: "s2", Direction: godruid.DirectionASC}, + {Dimension: "Variation", Direction: godruid.DirectionASC}, + }), + Dimensions: []godruid.DimSpec{ + godruid.DimDefault("s1", "s1"), + godruid.DimDefault("s2", "s2"), + godruid.DimDefault("variationId", "Variation"), + }, + Aggregations: []godruid.Aggregation{ + *godruid.AggRawJson(`{ "type": "longSum", "name": "Evaluation total", "fieldName": "count" }`), + *godruid.AggRawJson(`{ "type": "thetaSketch", "name": "Evaluation user", "fieldName": "userIdThetaSketch" }`), + }, + }, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + actual := queryEvaluationGroupBy( + p.inputDatasource, + p.inputStartAt, + p.inputEndAt, + p.inputEnvironmentNamespace, + p.inputFeatureID, + p.inputFeatureVersion, + p.inputReason, + p.inputSegments, + p.inputFilters, + ) + assert.Equal(t, p.expected, actual) + }) + } +} + +func TestQueryUserGroupBy(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + layout := "2006-01-02 15:04:05 -0700 MST" + t1, err := time.Parse(layout, "2014-01-17 23:02:03 +0000 UTC") + require.NoError(t, err) + t2, err := time.Parse(layout, "2014-01-18 23:02:03 +0000 UTC") + require.NoError(t, err) + patterns := map[string]struct { + inputDatasource string + inputStartAt time.Time + inputEndAt time.Time + inputEnvironmentNamespace string + inputFilters []*ecproto.Filter + expected *godruid.QueryGroupBy + }{ + "success": { + inputDatasource: "ds", + inputStartAt: t1, + inputEndAt: t2, + inputEnvironmentNamespace: "ns", + expected: &godruid.QueryGroupBy{ + QueryType: godruid.GROUPBY, + DataSource: &godruid.DataSource{ + Type: "query", + Query: &godruid.QueryGroupBy{ + QueryType: godruid.GROUPBY, + DataSource: godruid.DataSourceTable("ds"), + Intervals: toConvInterval(t1, t2), + Granularity: godruid.GranAll, + Filter: godruid.FilterAnd( + godruid.FilterSelector("environmentNamespace", "ns"), + ), + Dimensions: []godruid.DimSpec{ + godruid.DimDefault("userId", ColumnUser), + }, + Aggregations: []godruid.Aggregation{ + *godruid.AggRawJson(fmt.Sprintf(`{ "type": "count", "name": "count" }`)), + }, + }, + }, + Intervals: "2014-01-17T23:02/2014-01-18T23:02", + Granularity: godruid.GranAll, + Aggregations: []godruid.Aggregation{ + *godruid.AggRawJson(fmt.Sprintf(`{ "type": "longSum", "name": "%s", "fieldName": "count" }`, ColumnUserTotal)), + *godruid.AggRawJson(fmt.Sprintf(`{ "type": "count", "name": "%s" }`, ColumnUserCount)), + }, + }, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + actual := queryUserGroupBy( + p.inputDatasource, + p.inputEnvironmentNamespace, + p.inputStartAt, + p.inputEndAt, + ) + assert.Equal(t, p.expected, actual) + }) + } +} + +func TestQueryEvaluationTimeseries(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + layout := "2006-01-02 15:04:05 -0700 MST" + t1, err := time.Parse(layout, "2014-01-17 23:02:03 +0000 UTC") + require.NoError(t, err) + t2, err := time.Parse(layout, "2014-01-18 23:02:03 +0000 UTC") + require.NoError(t, err) + patterns := map[string]struct { + inputDatasource string + inputStartAt time.Time + inputEndAt time.Time + inputEnvironmentNamespace string + inputFeatureID string + inputFeatureVersion int32 + inputVariationID string + expected *godruid.QueryTimeseries + }{ + "success": { + inputDatasource: "ds", + inputStartAt: t1, + inputEndAt: t2, + inputEnvironmentNamespace: "ns", + inputFeatureID: "fid", + inputFeatureVersion: int32(0), + inputVariationID: "vid", + expected: &godruid.QueryTimeseries{ + QueryType: godruid.TIMESERIES, + DataSource: godruid.DataSourceTable("ds"), + Intervals: toConvInterval(t1, t2), + Granularity: godruid.GranPeriod("P1D", "Asia/Tokyo", ""), + Filter: godruid.FilterAnd( + godruid.FilterSelector("environmentNamespace", "ns"), + godruid.FilterSelector("featureId", "fid"), + godruid.FilterSelector("variationId", "vid"), + ), + Aggregations: []godruid.Aggregation{ + *godruid.AggLongSum(ColumnEvaluationTotal, "count"), + *godruid.AggRawJson( + fmt.Sprintf(`{ "type": "thetaSketch", "name": "%s", "fieldName": "userIdThetaSketch" }`, ColumnEvaluationUser), + ), + }, + }, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + actual := queryEvaluationTimeseries( + p.inputDatasource, + p.inputStartAt, + p.inputEndAt, + p.inputEnvironmentNamespace, + p.inputFeatureID, + p.inputFeatureVersion, + p.inputVariationID, + ) + assert.Equal(t, p.expected, actual) + }) + } +} diff --git a/pkg/eventcounter/storage/v2/BUILD.bazel b/pkg/eventcounter/storage/v2/BUILD.bazel new file mode 100644 index 000000000..f5c9629e7 --- /dev/null +++ b/pkg/eventcounter/storage/v2/BUILD.bazel @@ -0,0 +1,25 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["experiment_result.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/eventcounter/storage/v2", + visibility = ["//visibility:public"], + deps = [ + "//pkg/eventcounter/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/eventcounter:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["experiment_result_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/storage/v2/mysql/mock:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + ], +) diff --git a/pkg/eventcounter/storage/v2/experiment_result.go b/pkg/eventcounter/storage/v2/experiment_result.go new file mode 100644 index 000000000..27419639f --- /dev/null +++ b/pkg/eventcounter/storage/v2/experiment_result.go @@ -0,0 +1,78 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v2 + +import ( + "context" + "errors" + + "github.com/bucketeer-io/bucketeer/pkg/eventcounter/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + proto "github.com/bucketeer-io/bucketeer/proto/eventcounter" +) + +var ErrExperimentResultNotFound = errors.New("experimentResult: experiment result not found") + +type ExperimentResultStorage interface { + GetExperimentResult(ctx context.Context, id, environmentNamespace string) (*domain.ExperimentResult, error) +} + +type experimentResultStorage struct { + qe mysql.QueryExecer +} + +func NewExperimentResultStorage(qe mysql.QueryExecer) ExperimentResultStorage { + return &experimentResultStorage{qe} +} + +func (s *experimentResultStorage) GetExperimentResult( + ctx context.Context, + id, environmentNamespace string, +) (*domain.ExperimentResult, error) { + er := proto.ExperimentResult{} + er_for_goal_results := proto.ExperimentResult{} + query := ` + SELECT + id, + experiment_id, + updated_at, + data + FROM + experiment_result + WHERE + id = ? AND + environment_namespace = ? + ` + err := s.qe.QueryRowContext( + ctx, + query, + id, + environmentNamespace, + ).Scan( + &er.Id, + &er.ExperimentId, + &er.UpdatedAt, + &mysql.JSONPBObject{Val: &er_for_goal_results}, + ) + if err != nil { + if err == mysql.ErrNoRows { + return nil, ErrExperimentResultNotFound + } + return nil, err + } + er.GoalResults = er_for_goal_results.GoalResults + return &domain.ExperimentResult{ExperimentResult: &er}, nil +} diff --git a/pkg/eventcounter/storage/v2/experiment_result_test.go b/pkg/eventcounter/storage/v2/experiment_result_test.go new file mode 100644 index 000000000..fd359630d --- /dev/null +++ b/pkg/eventcounter/storage/v2/experiment_result_test.go @@ -0,0 +1,100 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "context" + "errors" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" +) + +func TestNewExperimentResultStorage(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + storage := NewExperimentResultStorage(mock.NewMockQueryExecer(mockController)) + assert.IsType(t, &experimentResultStorage{}, storage) +} + +func TestGetExperimentResult(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*experimentResultStorage) + id string + environmentNamespace string + expectedErr error + }{ + "ErrExperimentResultNotFound": { + setup: func(s *experimentResultStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id-0", + environmentNamespace: "ns", + expectedErr: ErrExperimentResultNotFound, + }, + "Error": { + setup: func(s *experimentResultStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(errors.New("error")) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + + }, + id: "id-0", + environmentNamespace: "ns", + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *experimentResultStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id-0", + environmentNamespace: "ns", + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newExperimentResultStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + _, err := storage.GetExperimentResult(context.Background(), p.id, p.environmentNamespace) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func newExperimentResultStorageWithMock(t *testing.T, mockController *gomock.Controller) *experimentResultStorage { + t.Helper() + return &experimentResultStorage{mock.NewMockQueryExecer(mockController)} +} diff --git a/pkg/eventcounter/storage/v2/mock/BUILD.bazel b/pkg/eventcounter/storage/v2/mock/BUILD.bazel new file mode 100644 index 000000000..0c6cb0390 --- /dev/null +++ b/pkg/eventcounter/storage/v2/mock/BUILD.bazel @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["experiment_result.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/eventcounter/storage/v2/mock", + visibility = ["//visibility:public"], + deps = [ + "//pkg/eventcounter/domain:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + ], +) diff --git a/pkg/eventcounter/storage/v2/mock/experiment_result.go b/pkg/eventcounter/storage/v2/mock/experiment_result.go new file mode 100644 index 000000000..1b8dc7f0a --- /dev/null +++ b/pkg/eventcounter/storage/v2/mock/experiment_result.go @@ -0,0 +1,52 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: experiment_result.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + domain "github.com/bucketeer-io/bucketeer/pkg/eventcounter/domain" +) + +// MockExperimentResultStorage is a mock of ExperimentResultStorage interface. +type MockExperimentResultStorage struct { + ctrl *gomock.Controller + recorder *MockExperimentResultStorageMockRecorder +} + +// MockExperimentResultStorageMockRecorder is the mock recorder for MockExperimentResultStorage. +type MockExperimentResultStorageMockRecorder struct { + mock *MockExperimentResultStorage +} + +// NewMockExperimentResultStorage creates a new mock instance. +func NewMockExperimentResultStorage(ctrl *gomock.Controller) *MockExperimentResultStorage { + mock := &MockExperimentResultStorage{ctrl: ctrl} + mock.recorder = &MockExperimentResultStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockExperimentResultStorage) EXPECT() *MockExperimentResultStorageMockRecorder { + return m.recorder +} + +// GetExperimentResult mocks base method. +func (m *MockExperimentResultStorage) GetExperimentResult(ctx context.Context, id, environmentNamespace string) (*domain.ExperimentResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetExperimentResult", ctx, id, environmentNamespace) + ret0, _ := ret[0].(*domain.ExperimentResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetExperimentResult indicates an expected call of GetExperimentResult. +func (mr *MockExperimentResultStorageMockRecorder) GetExperimentResult(ctx, id, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetExperimentResult", reflect.TypeOf((*MockExperimentResultStorage)(nil).GetExperimentResult), ctx, id, environmentNamespace) +} diff --git a/pkg/eventpersister/cmd/server/BUILD.bazel b/pkg/eventpersister/cmd/server/BUILD.bazel new file mode 100644 index 000000000..873791dd8 --- /dev/null +++ b/pkg/eventpersister/cmd/server/BUILD.bazel @@ -0,0 +1,24 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["server.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/eventpersister/cmd/server", + visibility = ["//visibility:public"], + deps = [ + "//pkg/cli:go_default_library", + "//pkg/eventpersister/datastore:go_default_library", + "//pkg/eventpersister/persister:go_default_library", + "//pkg/feature/client:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub:go_default_library", + "//pkg/pubsub/puller:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/rpc/client:go_default_library", + "//pkg/storage/kafka:go_default_library", + "//pkg/storage/v2/bigtable:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/eventpersister/cmd/server/server.go b/pkg/eventpersister/cmd/server/server.go new file mode 100644 index 000000000..557613964 --- /dev/null +++ b/pkg/eventpersister/cmd/server/server.go @@ -0,0 +1,303 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/eventpersister/datastore" + "github.com/bucketeer-io/bucketeer/pkg/eventpersister/persister" + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + "github.com/bucketeer-io/bucketeer/pkg/storage/kafka" + bigtable "github.com/bucketeer-io/bucketeer/pkg/storage/v2/bigtable" +) + +const ( + command = "server" +) + +type server struct { + *kingpin.CmdClause + port *int + project *string + bigtableInstance *string + subscription *string + topic *string + maxMPS *int + numWorkers *int + kafkaURL *string + kafkaTopicPrefix *string + kafkaTopicDataType *string + kafkaUsername *string + kafkaPassword *string + numWriters *int + flushSize *int + flushInterval *time.Duration + flushTimeout *time.Duration + featureService *string + certPath *string + keyPath *string + serviceTokenPath *string + pullerNumGoroutines *int + pullerMaxOutstandingMessages *int + pullerMaxOutstandingBytes *int + alloyDBRegion *string + alloyDBClusterID *string + alloyDBInstanceID *string + alloyDBUser *string + alloyDBPass *string + alloyDBName *string +} + +func RegisterServerCommand(r cli.CommandRegistry, p cli.ParentCommand) cli.Command { + cmd := p.Command(command, "Start the server") + server := &server{ + CmdClause: cmd, + port: cmd.Flag("port", "Port to bind to.").Default("9090").Int(), + project: cmd.Flag("project", "Google Cloud project name.").String(), + bigtableInstance: cmd.Flag("bigtable-instance", "Instance name to use Bigtable.").Required().String(), + subscription: cmd.Flag("subscription", "Google PubSub subscription name.").String(), + topic: cmd.Flag("topic", "Google PubSub topic name.").String(), + maxMPS: cmd.Flag("max-mps", "Maximum messages should be handled in a second.").Default("1000").Int(), + numWorkers: cmd.Flag("num-workers", "Number of workers.").Default("2").Int(), + kafkaURL: cmd.Flag("kafka-url", "Kafka URL.").String(), + kafkaTopicPrefix: cmd.Flag("kafka-topic-prefix", "Kafka topic dataset section prefix.").String(), + kafkaTopicDataType: cmd.Flag("kafka-topic-data-type", "Kafka topic data type.").String(), + kafkaUsername: cmd.Flag("kafka-username", "Kafka username.").String(), + kafkaPassword: cmd.Flag("kafka-password", "Kafka password.").String(), + numWriters: cmd.Flag("num-writers", "Number of writers.").Default("2").Int(), + flushSize: cmd.Flag( + "flush-size", + "Maximum number of messages to batch before writing to datastore.", + ).Default("50").Int(), + flushInterval: cmd.Flag("flush-interval", "Maximum interval between two flushes.").Default("5s").Duration(), + flushTimeout: cmd.Flag("flush-timeout", "Maximum time for a flush to finish.").Default("20s").Duration(), + featureService: cmd.Flag("feature-service", "bucketeer-feature-service address.").Default("feature:9090").String(), + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + keyPath: cmd.Flag("key", "Path to TLS key.").Required().String(), + serviceTokenPath: cmd.Flag("service-token", "Path to service token.").Required().String(), + pullerNumGoroutines: cmd.Flag( + "puller-num-goroutines", + "Number of goroutines will be spawned to pull messages.", + ).Int(), + pullerMaxOutstandingMessages: cmd.Flag( + "puller-max-outstanding-messages", + "Maximum number of unprocessed messages.", + ).Int(), + pullerMaxOutstandingBytes: cmd.Flag( + "puller-max-outstanding-bytes", + "Maximum size of unprocessed messages.", + ).Int(), + alloyDBRegion: cmd.Flag("alloydb-region", "").Required().String(), + alloyDBClusterID: cmd.Flag("alloydb-cluster-id", "").Required().String(), + alloyDBInstanceID: cmd.Flag("alloydb-instance-id", "").Required().String(), + alloyDBUser: cmd.Flag("alloydb-user", "").Required().String(), + alloyDBPass: cmd.Flag("alloydb-pass", "").Required().String(), + alloyDBName: cmd.Flag("alloydb-name", "").Required().String(), + } + r.RegisterCommand(server) + return server +} + +func (s *server) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + registerer := metrics.DefaultRegisterer() + + puller, err := s.createPuller(ctx, logger) + if err != nil { + return err + } + + datastore, err := s.createWriters(ctx, registerer, logger) + if err != nil { + return err + } + defer datastore.Close() + + btClient, err := s.createBigtableClient(ctx, registerer, logger) + if err != nil { + return err + } + defer btClient.Close() + + creds, err := client.NewPerRPCCredentials(*s.serviceTokenPath) + if err != nil { + return err + } + + featureClient, err := featureclient.NewClient(*s.featureService, *s.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer featureClient.Close() + + if err != nil { + return err + } + + // TODO: Return error after postgreSQL is stable. + // postgresClient, _ := s.createPostgresqlClient(ctx, logger) + // if err != nil { + // return err + // } + // if postgresClient != nil { + // defer postgresClient.Close() + // } + + p := persister.NewPersister( + featureClient, + puller, + datastore, + btClient, + nil, // Disable PostgreSQL temporarily due to instability issues on the Google side. + persister.WithMaxMPS(*s.maxMPS), + persister.WithNumWorkers(*s.numWorkers), + persister.WithFlushSize(*s.flushSize), + persister.WithFlushInterval(*s.flushInterval), + persister.WithMetrics(registerer), + persister.WithLogger(logger), + ) + defer p.Stop() + go p.Run() // nolint:errcheck + + healthChecker := health.NewGrpcChecker( + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + health.WithCheck("persister", p.Check), + ) + go healthChecker.Run(ctx) + + server := rpc.NewServer(healthChecker, *s.certPath, *s.keyPath, + rpc.WithPort(*s.port), + rpc.WithMetrics(registerer), + rpc.WithLogger(logger), + rpc.WithHandler("/health", healthChecker), + ) + defer server.Stop(10 * time.Second) + go server.Run() + + <-ctx.Done() + return nil +} + +func (s *server) createPuller(ctx context.Context, logger *zap.Logger) (puller.Puller, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + client, err := pubsub.NewClient(ctx, *s.project, pubsub.WithLogger(logger)) + if err != nil { + logger.Error("Failed to create PubSub client", zap.Error(err)) + return nil, err + } + return client.CreatePuller(*s.subscription, *s.topic, + pubsub.WithNumGoroutines(*s.pullerNumGoroutines), + pubsub.WithMaxOutstandingMessages(*s.pullerMaxOutstandingMessages), + pubsub.WithMaxOutstandingBytes(*s.pullerMaxOutstandingBytes), + ) +} + +func (s *server) createWriters( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (datastore.Writer, error) { + writers := make([]datastore.Writer, 0, *s.numWriters) + for i := 0; i < *s.numWriters; i++ { + writer, err := s.createKafkaWriter(ctx, registerer, logger) + if err != nil { + return nil, err + } + writers = append(writers, writer) + } + if len(writers) == 1 { + logger.Info("Created a single writer", zap.Int("numWriters", *s.numWriters)) + return writers[0], nil + } + logger.Info("Created a writer pool", zap.Int("numWriters", *s.numWriters), zap.Int("poolSize", len(writers))) + return datastore.NewWriterPool(writers), nil +} + +func (s *server) createKafkaWriter( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (datastore.Writer, error) { + logger.Debug("createKafkaWriter") + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + kafkaProducer, err := kafka.NewProducer( + ctx, + *s.project, + *s.kafkaURL, + *s.kafkaUsername, + *s.kafkaPassword) + if err != nil { + logger.Error("Failed to create Kafka producer", zap.Error(err)) + return nil, err + } + writer, err := datastore.NewKafkaWriter(kafkaProducer, + *s.kafkaTopicPrefix, + *s.kafkaTopicDataType, + datastore.WithMetrics(registerer), + datastore.WithLogger(logger), + ) + if err != nil { + logger.Error("Failed to create Kafka writer", zap.Error(err)) + return nil, err + } + return writer, nil +} + +func (s *server) createBigtableClient( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (bigtable.Client, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + return bigtable.NewBigtableClient(ctx, *s.project, *s.bigtableInstance, + bigtable.WithMetrics(registerer), + bigtable.WithLogger(logger), + ) +} + +// func (s *server) createPostgresqlClient( +// ctx context.Context, +// logger *zap.Logger, +// ) (postgres.Client, error) { +// ctx, cancel := context.WithTimeout(ctx, 10*time.Second) +// defer cancel() +// return postgres.NewClient( +// ctx, +// *s.project, *s.alloyDBRegion, *s.alloyDBClusterID, *s.alloyDBInstanceID, +// *s.alloyDBUser, *s.alloyDBPass, *s.alloyDBName, +// postgres.WithLogger(logger), +// ) +// } diff --git a/pkg/eventpersister/datastore/BUILD.bazel b/pkg/eventpersister/datastore/BUILD.bazel new file mode 100644 index 000000000..3bbf04365 --- /dev/null +++ b/pkg/eventpersister/datastore/BUILD.bazel @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "datastore.go", + "kafka.go", + "metrics.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/eventpersister/datastore", + visibility = ["//visibility:public"], + deps = [ + "//pkg/metrics:go_default_library", + "//pkg/storage/kafka:go_default_library", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@com_github_shopify_sarama//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["datastore_test.go"], + embed = [":go_default_library"], + deps = [ + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + ], +) diff --git a/pkg/eventpersister/datastore/datastore.go b/pkg/eventpersister/datastore/datastore.go new file mode 100644 index 000000000..57ca8aa7d --- /dev/null +++ b/pkg/eventpersister/datastore/datastore.go @@ -0,0 +1,56 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "context" + "sync/atomic" +) + +type Writer interface { + Write( + ctx context.Context, + events map[string]string, + environmentNamespace string, + ) (map[string]bool, error) + Close() +} + +type writerPool struct { + writes uint64 + writers []Writer +} + +func NewWriterPool(writers []Writer) Writer { + return &writerPool{ + writers: writers, + } +} + +func (p *writerPool) Write( + ctx context.Context, + events map[string]string, + environmentNamespace string, +) (map[string]bool, error) { + writes := atomic.AddUint64(&p.writes, 1) + index := int(writes) % len(p.writers) + return p.writers[index].Write(ctx, events, environmentNamespace) +} + +func (p *writerPool) Close() { + for _, w := range p.writers { + w.Close() + } +} diff --git a/pkg/eventpersister/datastore/datastore_test.go b/pkg/eventpersister/datastore/datastore_test.go new file mode 100644 index 000000000..2739712d1 --- /dev/null +++ b/pkg/eventpersister/datastore/datastore_test.go @@ -0,0 +1,77 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type testWriter struct { + writes int +} + +func (w *testWriter) Write(ctx context.Context, events map[string]string, environmentNamespace string) (map[string]bool, error) { + w.writes++ + return nil, nil +} + +func (w *testWriter) Close() {} + +func newTestWriters(num int) []Writer { + writers := make([]Writer, 0, num) + for i := 0; i < num; i++ { + writers = append(writers, &testWriter{}) + } + return writers +} + +func TestWriterPool(t *testing.T) { + testcases := []struct { + writes uint64 + writers []Writer + environmentNamespace string + }{ + { + writes: 0, + writers: newTestWriters(5), + environmentNamespace: "ns0", + }, + { + writes: 1<<64 - 1, + writers: newTestWriters(2), + environmentNamespace: "ns0", + }, + } + for i, tc := range testcases { + des := fmt.Sprintf("Index %d", i) + pool := writerPool{ + writes: tc.writes, + writers: tc.writers, + } + for j := 0; j < len(tc.writers); j++ { + pool.Write(context.Background(), nil, tc.environmentNamespace) + } + for j := 0; j < len(tc.writers); j++ { + writer, ok := tc.writers[j].(*testWriter) + require.Equal(t, true, ok, des) + assert.Equal(t, 1, writer.writes, des) + } + } +} diff --git a/pkg/eventpersister/datastore/kafka.go b/pkg/eventpersister/datastore/kafka.go new file mode 100644 index 000000000..dc23a821b --- /dev/null +++ b/pkg/eventpersister/datastore/kafka.go @@ -0,0 +1,137 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "context" + "time" + + "github.com/Shopify/sarama" + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" + storagekafka "github.com/bucketeer-io/bucketeer/pkg/storage/kafka" +) + +type options struct { + metrics metrics.Registerer + logger *zap.Logger +} + +type Option func(*options) + +func WithMetrics(r metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = r + } +} + +func WithLogger(l *zap.Logger) Option { + return func(opts *options) { + opts.logger = l + } +} + +type kafkaWriter struct { + producer *storagekafka.Producer + topicPrefix string + topicDataType string + logger *zap.Logger +} + +func NewKafkaWriter( + producer *storagekafka.Producer, + topicPrefix, + topicDataType string, + opts ...Option, +) (Writer, error) { + dopts := &options{ + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + if dopts.metrics != nil { + registerMetrics(dopts.metrics) + } + return &kafkaWriter{ + producer: producer, + topicPrefix: topicPrefix, + topicDataType: topicDataType, + logger: dopts.logger.Named("kafka"), + }, nil +} + +func (w *kafkaWriter) Close() { + if err := w.producer.Close(); err != nil { + w.logger.Error("Close failed", zap.Error(err)) + } +} + +func (w *kafkaWriter) Write( + ctx context.Context, + events map[string]string, + environmentNamespace string, +) (fails map[string]bool, err error) { + startTime := time.Now() + defer func() { + code := codeSuccess + if err != nil { + code = codeFail + } + writeCounter.WithLabelValues(writerKafka, code).Inc() + wroteHistogram.WithLabelValues(writerKafka, code).Observe(time.Since(startTime).Seconds()) + }() + messages := make([]*sarama.ProducerMessage, 0, len(events)) + for id, event := range events { + messages = append(messages, &sarama.ProducerMessage{ + Key: sarama.StringEncoder(id), + Topic: storagekafka.TopicName(w.topicPrefix, w.topicDataType), + Value: sarama.StringEncoder(event), + }) + } + err = w.producer.SendMessages(messages) + merr, ok := err.(sarama.ProducerErrors) + if !ok { + return nil, err + } + fails = make(map[string]bool, len(events)) + for _, e := range merr { + id := string(e.Msg.Key.(sarama.StringEncoder)) + if !w.isRepeatable(e.Err) { + fails[id] = false + w.logger.Error("MultiError NonRepeatable", + zap.Error(e), + zap.String("environmentNamespace", environmentNamespace), + zap.Any("msg", id), + ) + break + } + w.logger.Error("MultiError Repeatable", + zap.Error(e), + zap.String("environmentNamespace", environmentNamespace), + zap.Any("msg", id), + ) + } + return fails, nil +} + +func (w *kafkaWriter) isRepeatable(err error) bool { + switch err { + case sarama.ErrInvalidMessage: + return false + } + return true +} diff --git a/pkg/eventpersister/datastore/metrics.go b/pkg/eventpersister/datastore/metrics.go new file mode 100644 index 000000000..6a95f039d --- /dev/null +++ b/pkg/eventpersister/datastore/metrics.go @@ -0,0 +1,59 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +const ( + writerKafka = "Kafka" + codeSuccess = "Success" + codeFail = "Fail" +) + +var ( + registerOnce sync.Once + + writeCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "event_persister", + Name: "write_total", + Help: "Total number of writes", + }, []string{"writer", "code"}) + + wroteHistogram = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "bucketeer", + Subsystem: "event_persister", + Name: "wrote_seconds", + Help: "Histogram of events handling duration (seconds)", + Buckets: prometheus.DefBuckets, + }, []string{"writer", "code"}) +) + +func registerMetrics(r metrics.Registerer) { + registerOnce.Do(func() { + r.MustRegister( + writeCounter, + wroteHistogram, + ) + }) +} diff --git a/pkg/eventpersister/persister/BUILD.bazel b/pkg/eventpersister/persister/BUILD.bazel new file mode 100644 index 000000000..8d46fb2ed --- /dev/null +++ b/pkg/eventpersister/persister/BUILD.bazel @@ -0,0 +1,52 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "metrics.go", + "persister.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/eventpersister/persister", + visibility = ["//visibility:public"], + deps = [ + "//pkg/errgroup:go_default_library", + "//pkg/eventpersister/datastore:go_default_library", + "//pkg/eventpersister/storage/v2:go_default_library", + "//pkg/feature/client:go_default_library", + "//pkg/feature/storage:go_default_library", + "//pkg/health:go_default_library", + "//pkg/log:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub/puller:go_default_library", + "//pkg/pubsub/puller/codes:go_default_library", + "//pkg/storage/v2/bigtable:go_default_library", + "//pkg/storage/v2/postgres:go_default_library", + "//proto/event/client:go_default_library", + "//proto/event/service:go_default_library", + "//proto/feature:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["persister_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/feature/client/mock:go_default_library", + "//pkg/feature/storage/mock:go_default_library", + "//pkg/pubsub/puller/mock:go_default_library", + "//pkg/storage/v2/bigtable:go_default_library", + "//proto/event/client:go_default_library", + "//proto/event/service:go_default_library", + "//proto/feature:go_default_library", + "//proto/user:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/eventpersister/persister/metrics.go b/pkg/eventpersister/persister/metrics.go new file mode 100644 index 000000000..d60ea4b87 --- /dev/null +++ b/pkg/eventpersister/persister/metrics.go @@ -0,0 +1,51 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persister + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +var ( + receivedCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "event_persister", + Name: "received_total", + Help: "Total number of received messages", + }) + + handledCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "event_persister", + Name: "handled_total", + Help: "Total number of handled messages", + }, []string{"code"}) + + cacheCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "event_persister", + Name: "cache_requests_total", + Help: "Total number of cache requests", + }, []string{"type", "code"}) +) + +func registerMetrics(r metrics.Registerer) { + r.MustRegister(receivedCounter, handledCounter, cacheCounter) +} diff --git a/pkg/eventpersister/persister/persister.go b/pkg/eventpersister/persister/persister.go new file mode 100644 index 000000000..5e5771f9e --- /dev/null +++ b/pkg/eventpersister/persister/persister.go @@ -0,0 +1,594 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persister + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strconv" + "time" + + "github.com/golang/protobuf/proto" // nolint:staticcheck + "github.com/golang/protobuf/ptypes" + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/errgroup" + "github.com/bucketeer-io/bucketeer/pkg/eventpersister/datastore" + v2ec "github.com/bucketeer-io/bucketeer/pkg/eventpersister/storage/v2" + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + featurestorage "github.com/bucketeer-io/bucketeer/pkg/feature/storage" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller/codes" + bigtable "github.com/bucketeer-io/bucketeer/pkg/storage/v2/bigtable" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/postgres" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/client" + esproto "github.com/bucketeer-io/bucketeer/proto/event/service" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +var ( + ErrUnexpectedMessageType = errors.New("eventpersister: unexpected message type") +) + +type eventMap map[string]proto.Message +type environmentEventMap map[string]eventMap + +type options struct { + maxMPS int + numWorkers int + flushSize int + flushInterval time.Duration + flushTimeout time.Duration + metrics metrics.Registerer + logger *zap.Logger +} + +type Option func(*options) + +func WithMaxMPS(mps int) Option { + return func(opts *options) { + opts.maxMPS = mps + } +} + +func WithNumWorkers(n int) Option { + return func(opts *options) { + opts.numWorkers = n + } +} + +func WithFlushSize(s int) Option { + return func(opts *options) { + opts.flushSize = s + } +} + +func WithFlushInterval(i time.Duration) Option { + return func(opts *options) { + opts.flushInterval = i + } +} + +func WithFlushTimeout(timeout time.Duration) Option { + return func(opts *options) { + opts.flushTimeout = timeout + } +} + +func WithMetrics(r metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = r + } +} + +func WithLogger(l *zap.Logger) Option { + return func(opts *options) { + opts.logger = l + } +} + +type Persister struct { + featureClient featureclient.Client + puller puller.RateLimitedPuller + datastore datastore.Writer + userEvaluationStorage featurestorage.UserEvaluationsStorage + group errgroup.Group + opts *options + logger *zap.Logger + ctx context.Context + cancel func() + doneCh chan struct{} + postgresClient postgres.Client +} + +func NewPersister( + featureClient featureclient.Client, + p puller.Puller, + ds datastore.Writer, + bt bigtable.Client, + postgresClient postgres.Client, + opts ...Option, +) *Persister { + dopts := &options{ + maxMPS: 1000, + numWorkers: 1, + flushSize: 50, + flushInterval: 5 * time.Second, + flushTimeout: 20 * time.Second, + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + if dopts.metrics != nil { + registerMetrics(dopts.metrics) + } + ctx, cancel := context.WithCancel(context.Background()) + return &Persister{ + featureClient: featureClient, + puller: puller.NewRateLimitedPuller(p, dopts.maxMPS), + datastore: ds, + userEvaluationStorage: featurestorage.NewUserEvaluationsStorage(bt), + opts: dopts, + logger: dopts.logger.Named("persister"), + ctx: ctx, + cancel: cancel, + doneCh: make(chan struct{}), + postgresClient: postgresClient, + } +} + +func (p *Persister) Run() error { + defer close(p.doneCh) + p.group.Go(func() error { + return p.puller.Run(p.ctx) + }) + for i := 0; i < p.opts.numWorkers; i++ { + p.group.Go(p.batch) + } + return p.group.Wait() +} + +func (p *Persister) Stop() { + p.cancel() + <-p.doneCh +} + +func (p *Persister) Check(ctx context.Context) health.Status { + select { + case <-p.ctx.Done(): + p.logger.Error("Unhealthy due to context Done is closed", zap.Error(p.ctx.Err())) + return health.Unhealthy + default: + if p.group.FinishedCount() > 0 { + p.logger.Error("Unhealthy", zap.Int32("FinishedCount", p.group.FinishedCount())) + return health.Unhealthy + } + return health.Healthy + } +} + +func (p *Persister) batch() error { + batch := make(map[string]*puller.Message) + timer := time.NewTimer(p.opts.flushInterval) + defer timer.Stop() + for { + select { + case msg, ok := <-p.puller.MessageCh(): + if !ok { + return nil + } + receivedCounter.Inc() + id := msg.Attributes["id"] + if id == "" { + msg.Ack() + // TODO: better log format for msg data + handledCounter.WithLabelValues(codes.MissingID.String()).Inc() + continue + } + if previous, ok := batch[id]; ok { + previous.Ack() + p.logger.Warn("Message with duplicate id", zap.String("id", id)) + handledCounter.WithLabelValues(codes.DuplicateID.String()).Inc() + } + batch[id] = msg + if len(batch) < p.opts.flushSize { + continue + } + p.send(batch) + batch = make(map[string]*puller.Message) + timer.Reset(p.opts.flushInterval) + case <-timer.C: + if len(batch) > 0 { + p.send(batch) + batch = make(map[string]*puller.Message) + } + timer.Reset(p.opts.flushInterval) + case <-p.ctx.Done(): + return nil + } + } +} + +func (p *Persister) send(messages map[string]*puller.Message) { + ctx, cancel := context.WithTimeout(context.Background(), p.opts.flushTimeout) + defer cancel() + envEvents := p.extractEvents(messages) + if len(envEvents) == 0 { + p.logger.Error("all messages were bad") + return + } + fails := make(map[string]bool, len(messages)) + for environmentNamespace, events := range envEvents { + evs := make(map[string]string, len(events)) + for id, event := range events { + if p.postgresClient != nil { + if err := p.createEvent(event, id, environmentNamespace); err != nil { + p.logger.Error( + "failed to store an event", + zap.Error(err), + zap.String("id", id), + zap.String("environmentNamespace", environmentNamespace), + ) + } + } + eventJSON, repeatable, err := p.marshalEvent(event, environmentNamespace) + if err != nil { + if !repeatable { + p.logger.Error( + "failed to marshal an unrepeatable event", + zap.Error(err), + zap.String("id", id), + zap.String("environmentNamespace", environmentNamespace), + ) + } + fails[id] = repeatable + continue + } + evs[id] = eventJSON + } + if len(evs) > 0 { + fs, err := p.datastore.Write(ctx, evs, environmentNamespace) + if err != nil { + p.logger.Error( + "could not write to datastore", + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + ) + } + for id, f := range fs { + fails[id] = f + } + } + } + for id, m := range messages { + if repeatable, ok := fails[id]; ok { + if repeatable { + m.Nack() + handledCounter.WithLabelValues(codes.RepeatableError.String()).Inc() + } else { + m.Ack() + handledCounter.WithLabelValues(codes.NonRepeatableError.String()).Inc() + } + continue + } + m.Ack() + handledCounter.WithLabelValues(codes.OK.String()).Inc() + } +} + +func (p *Persister) extractEvents(messages map[string]*puller.Message) environmentEventMap { + envEvents := environmentEventMap{} + handleBadMessage := func(m *puller.Message, err error) { + m.Ack() + p.logger.Error("bad message", zap.Error(err), zap.Any("msg", m)) + handledCounter.WithLabelValues(codes.BadMessage.String()).Inc() + } + for _, m := range messages { + event := &eventproto.Event{} + if err := proto.Unmarshal(m.Data, event); err != nil { + handleBadMessage(m, err) + continue + } + var innerEvent ptypes.DynamicAny + if err := ptypes.UnmarshalAny(event.Event, &innerEvent); err != nil { + handleBadMessage(m, err) + continue + } + if innerEvents, ok := envEvents[event.EnvironmentNamespace]; ok { + innerEvents[event.Id] = innerEvent.Message + continue + } + envEvents[event.EnvironmentNamespace] = eventMap{event.Id: innerEvent.Message} + } + return envEvents +} + +func (p *Persister) marshalEvent(event interface{}, environmentNamespace string) (string, bool, error) { + switch event := event.(type) { + case *eventproto.EvaluationEvent: + return p.marshalEvaluationEvent(event, environmentNamespace) + case *eventproto.GoalEvent: + return p.marshalGoalEvent(event, environmentNamespace) + case *esproto.UserEvent: + return p.marshalUserEvent(event, environmentNamespace) + } + return "", false, ErrUnexpectedMessageType +} + +func (p *Persister) marshalEvaluationEvent( + e *eventproto.EvaluationEvent, + environmentNamespace string, +) (string, bool, error) { + m := map[string]string{} + m["environmentNamespace"] = environmentNamespace + m["sourceId"] = e.SourceId.String() + m["tag"] = e.Tag + m["timestamp"] = time.Unix(e.Timestamp, 0).Format(time.RFC3339) + m["featureId"] = e.FeatureId + m["featureVersion"] = strconv.FormatInt(int64(e.FeatureVersion), 10) + m["userId"] = e.UserId + m["metric.userId"] = e.UserId + m["variationId"] = e.VariationId + if e.Reason != nil { + m["reason"] = e.Reason.Type.String() + } + if e.User != nil { + for k, v := range e.User.Data { + m[userMetadataColumn(environmentNamespace, k)] = v + } + } + b, err := json.Marshal(m) + if err != nil { + return "", false, err + } + return string(b), false, nil +} + +func (p *Persister) marshalGoalEvent(e *eventproto.GoalEvent, environmentNamespace string) (string, bool, error) { + m := map[string]interface{}{} + m["environmentNamespace"] = environmentNamespace + m["sourceId"] = e.SourceId.String() + m["tag"] = e.Tag + m["timestamp"] = time.Unix(e.Timestamp, 0).Format(time.RFC3339) + m["goalId"] = e.GoalId + m["userId"] = e.UserId + m["metric.userId"] = e.UserId + if e.User != nil { + for k, v := range e.User.Data { + m[userMetadataColumn(environmentNamespace, k)] = v + } + } + m["value"] = strconv.FormatFloat(e.Value, 'f', -1, 64) + ue, retriable, err := p.getEvaluations(e, environmentNamespace) + if err != nil { + return "", retriable, err + } + evaluations := []string{} + for _, eval := range ue { + reason := "" + if eval.Reason != nil { + reason = eval.Reason.Type.String() + } + evaluations = append( + evaluations, + fmt.Sprintf("%s:%d:%s:%s", eval.FeatureId, eval.FeatureVersion, eval.VariationId, reason), + ) + } + if len(evaluations) == 0 { + p.logger.Warn( + "Goal event has no evaluations", + zap.String("environmentNamespace", environmentNamespace), + zap.String("sourceId", e.SourceId.String()), + zap.String("goalId", e.GoalId), + zap.String("userId", e.UserId), + zap.String("tag", e.Tag), + zap.String("timestamp", time.Unix(e.Timestamp, 0).Format(time.RFC3339)), + ) + } + m["evaluations"] = evaluations + b, err := json.Marshal(m) + if err != nil { + return "", false, err + } + return string(b), false, nil +} + +func (p *Persister) getEvaluations( + e *eventproto.GoalEvent, + environmentNamespace string, +) ([]*featureproto.Evaluation, bool, error) { + // Evaluations field in the GoalEvent is deprecated. + // The following conditions should be removed once all client SDKs are updated. + if e.SourceId == eventproto.SourceId_GOAL_BATCH { + // Because the Goal Batch Transformer includes events from the new and old SDKs + // we need to check both cases. + // If both cases fail, it will save the event with no evaluations + var ue []*featureproto.Evaluation + ue, err := p.getCurrentUserEvaluations(environmentNamespace, e.UserId, e.Tag) + if err != nil { + if err == bigtable.ErrKeyNotFound { + // Old SDK + resp, err := p.featureClient.EvaluateFeatures(p.ctx, &featureproto.EvaluateFeaturesRequest{ + User: e.User, + EnvironmentNamespace: environmentNamespace, + Tag: e.Tag, + }) + if err != nil { + return nil, false, err + } + return resp.UserEvaluations.Evaluations, false, nil + } + // Retry + return nil, true, err + } + return ue, false, nil + } + // Old SDK implementation doesn't include the Tag, so we use the evaluations from the client + if e.Tag == "" { + return e.Evaluations, false, nil + } + // New SDK implementation + ue, err := p.getCurrentUserEvaluations(environmentNamespace, e.UserId, e.Tag) + if err != nil && err != bigtable.ErrKeyNotFound { + p.logger.Error( + "Failed to get user evaluations", + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("sourceId", e.SourceId.String()), + zap.String("goalId", e.GoalId), + zap.String("userId", e.UserId), + zap.String("tag", e.Tag), + zap.String("timestamp", time.Unix(e.Timestamp, 0).Format(time.RFC3339)), + ) + return nil, true, err + } + return ue, false, nil +} + +func (p *Persister) getCurrentUserEvaluations( + environmentNamespace, + userID, + tag string, +) ([]*featureproto.Evaluation, error) { + evaluations, err := p.userEvaluationStorage.GetUserEvaluations( + p.ctx, + userID, + environmentNamespace, + tag, + ) + if err != nil { + return nil, err + } + return evaluations, nil +} + +func (p *Persister) marshalUserEvent(e *esproto.UserEvent, environmentNamespace string) (string, bool, error) { + m := map[string]interface{}{} + m["environmentNamespace"] = environmentNamespace + m["sourceId"] = e.SourceId.String() + m["tag"] = e.Tag + m["timestamp"] = time.Unix(e.LastSeen, 0).Format(time.RFC3339) + m["userId"] = e.UserId + b, err := json.Marshal(m) + if err != nil { + return "", false, err + } + return string(b), false, nil +} + +func userMetadataColumn(environmentNamespace string, key string) string { + if environmentNamespace == "" { + return fmt.Sprintf("user.data.%s", key) + } + return fmt.Sprintf("%s.user.data.%s", environmentNamespace, key) +} + +func (p *Persister) createEvent(event interface{}, id, environmentNamespace string) error { + switch event := event.(type) { + case *eventproto.EvaluationEvent: + return p.createEvaluationEvent(event, id, environmentNamespace) + case *eventproto.GoalEvent: + return p.createGoalEvent(event, id, environmentNamespace) + case *esproto.UserEvent: + return p.createUserEvent(event, id, environmentNamespace) + } + return ErrUnexpectedMessageType +} + +func (p *Persister) createEvaluationEvent( + event *eventproto.EvaluationEvent, + id, environmentNamespace string, +) error { + eventStorage := v2ec.NewEventCreationStorage(p.postgresClient) + if err := eventStorage.CreateEvaluationEvent(p.ctx, event, id, environmentNamespace); err != nil { + p.logger.Error( + "Failed to store evaluation event", + log.FieldsFromImcomingContext(p.ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return err + } + return nil +} + +func (p *Persister) createGoalEvent( + event *eventproto.GoalEvent, + id, environmentNamespace string, +) error { + ue, _, err := p.getEvaluations(event, environmentNamespace) + if err != nil { + return err + } + evaluations := []string{} + for _, eval := range ue { + reason := "" + if eval.Reason != nil { + reason = eval.Reason.Type.String() + } + evaluations = append( + evaluations, + fmt.Sprintf("%s:%d:%s:%s", eval.FeatureId, eval.FeatureVersion, eval.VariationId, reason), + ) + } + if len(evaluations) == 0 { + p.logger.Warn( + "Goal event has no evaluations", + zap.String("environmentNamespace", environmentNamespace), + zap.String("sourceId", event.SourceId.String()), + zap.String("goalId", event.GoalId), + zap.String("userId", event.UserId), + zap.String("tag", event.Tag), + zap.String("timestamp", time.Unix(event.Timestamp, 0).Format(time.RFC3339)), + ) + } + eventStorage := v2ec.NewEventCreationStorage(p.postgresClient) + if err := eventStorage.CreateGoalEvent(p.ctx, event, id, environmentNamespace, evaluations); err != nil { + p.logger.Error( + "Failed to store goal event", + log.FieldsFromImcomingContext(p.ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return err + } + return nil +} + +func (p *Persister) createUserEvent( + event *esproto.UserEvent, + id, environmentNamespace string, +) error { + eventStorage := v2ec.NewEventCreationStorage(p.postgresClient) + if err := eventStorage.CreateUserEvent(p.ctx, event, id, environmentNamespace); err != nil { + p.logger.Error( + "Failed to store user event", + log.FieldsFromImcomingContext(p.ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return err + } + return nil +} diff --git a/pkg/eventpersister/persister/persister_test.go b/pkg/eventpersister/persister/persister_test.go new file mode 100644 index 000000000..d5b58b06e --- /dev/null +++ b/pkg/eventpersister/persister/persister_test.go @@ -0,0 +1,481 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persister + +import ( + "bytes" + "context" + "encoding/json" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + fcmock "github.com/bucketeer-io/bucketeer/pkg/feature/client/mock" + ftmock "github.com/bucketeer-io/bucketeer/pkg/feature/storage/mock" + pullermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller/mock" + btstorage "github.com/bucketeer-io/bucketeer/pkg/storage/v2/bigtable" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/client" + esproto "github.com/bucketeer-io/bucketeer/proto/event/service" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" + userproto "github.com/bucketeer-io/bucketeer/proto/user" +) + +var defaultOptions = options{ + logger: zap.NewNop(), +} + +func TestMarshaEvent(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + layout := "2006-01-02 15:04:05 -0700 MST" + t1, err := time.Parse(layout, "2014-01-17 23:02:03 +0000 UTC") + require.NoError(t, err) + patterns := map[string]struct { + setup func(context.Context, *Persister) + input interface{} + expected string + expectedErr error + expectedRepeatable bool + }{ + "success: user event": { + setup: nil, + input: &esproto.UserEvent{ + UserId: "uid", + SourceId: eventproto.SourceId_ANDROID, + Tag: "tag", + LastSeen: t1.Unix(), + }, + expected: `{ + "environmentNamespace": "ns", + "sourceId": "ANDROID", + "tag": "tag", + "timestamp": "2014-01-17T23:02:03Z", + "userId":"uid" + }`, + expectedErr: nil, + expectedRepeatable: false, + }, + "success evaluation event": { + setup: nil, + input: &eventproto.EvaluationEvent{ + Tag: "tag", + Timestamp: t1.Unix(), + FeatureId: "fid", + FeatureVersion: int32(1), + UserId: "uid", + VariationId: "vid", + Reason: &featureproto.Reason{Type: featureproto.Reason_CLIENT}, + User: &userproto.User{ + Id: "uid", + Data: map[string]string{"atr": "av"}, + }, + }, + expected: `{ + "environmentNamespace":"ns", + "featureId": "fid", + "featureVersion": "1", + "metric.userId": "uid", + "ns.user.data.atr":"av", + "reason":"CLIENT", + "sourceId":"UNKNOWN", + "tag":"tag", + "timestamp":"2014-01-17T23:02:03Z", + "userId":"uid", + "variationId":"vid" + }`, + expectedErr: nil, + expectedRepeatable: false, + }, + "err goal batch event: internal error from bigtable": { + setup: func(ctx context.Context, p *Persister) { + p.userEvaluationStorage.(*ftmock.MockUserEvaluationsStorage).EXPECT().GetUserEvaluations( + ctx, + "uid", + "ns", + "tag", + ).Return(nil, btstorage.ErrInternal).Times(1) + }, + input: &eventproto.GoalEvent{ + SourceId: eventproto.SourceId_GOAL_BATCH, + Timestamp: t1.Unix(), + GoalId: "gid", + UserId: "uid", + User: &userproto.User{ + Id: "uid", + Data: map[string]string{"atr": "av"}, + }, + Value: float64(1.2), + Evaluations: nil, + Tag: "tag", + }, + expected: "", + expectedErr: btstorage.ErrInternal, + expectedRepeatable: true, + }, + "success goal batch event: getting evaluations from bigtable": { + setup: func(ctx context.Context, p *Persister) { + p.userEvaluationStorage.(*ftmock.MockUserEvaluationsStorage).EXPECT().GetUserEvaluations( + ctx, + "uid", + "ns", + "tag", + ).Return([]*featureproto.Evaluation{ + { + FeatureId: "fid-0", + FeatureVersion: int32(0), + VariationId: "vid-0", + Reason: &featureproto.Reason{Type: featureproto.Reason_CLIENT}, + }, + { + FeatureId: "fid-1", + FeatureVersion: int32(1), + VariationId: "vid-1", + Reason: &featureproto.Reason{Type: featureproto.Reason_TARGET}, + }, + }, nil).Times(1) + }, + input: &eventproto.GoalEvent{ + SourceId: eventproto.SourceId_GOAL_BATCH, + Timestamp: t1.Unix(), + GoalId: "gid", + UserId: "uid", + User: &userproto.User{ + Id: "uid", + Data: map[string]string{"atr": "av"}, + }, + Value: float64(1.2), + Evaluations: nil, + Tag: "tag", + }, + expected: `{ + "environmentNamespace": "ns", + "evaluations": ["fid-0:0:vid-0:CLIENT","fid-1:1:vid-1:TARGET"], + "goalId": "gid", + "metric.userId": "uid", + "ns.user.data.atr":"av", + "sourceId":"GOAL_BATCH", + "tag": "tag", + "timestamp": "2014-01-17T23:02:03Z", + "userId":"uid", + "value": "1.2" + }`, + expectedErr: nil, + expectedRepeatable: false, + }, + "success goal batch event: getting evaluations from evaluate process with segment users": { + setup: func(ctx context.Context, p *Persister) { + p.userEvaluationStorage.(*ftmock.MockUserEvaluationsStorage).EXPECT().GetUserEvaluations( + ctx, + "uid", + "ns", + "tag", + ).Return(nil, btstorage.ErrKeyNotFound).Times(1) + p.featureClient.(*fcmock.MockClient).EXPECT().EvaluateFeatures( + ctx, + &featureproto.EvaluateFeaturesRequest{ + User: &userproto.User{ + Id: "uid", + Data: map[string]string{"atr": "av"}, + }, + EnvironmentNamespace: "ns", + Tag: "tag", + }, + ).Return( + &featureproto.EvaluateFeaturesResponse{ + UserEvaluations: &featureproto.UserEvaluations{ + Id: "uid", + Evaluations: []*featureproto.Evaluation{ + { + FeatureId: "fid", + FeatureVersion: int32(1), + VariationId: "vid-1", + Reason: &featureproto.Reason{Type: featureproto.Reason_RULE}, + }, + }, + }, + }, nil, + ).Times(1) + }, + input: &eventproto.GoalEvent{ + SourceId: eventproto.SourceId_GOAL_BATCH, + Timestamp: t1.Unix(), + GoalId: "gid", + UserId: "uid", + User: &userproto.User{ + Id: "uid", + Data: map[string]string{"atr": "av"}, + }, + Value: float64(1.2), + Evaluations: nil, + Tag: "tag", + }, + expected: `{ + "environmentNamespace": "ns", + "evaluations": ["fid:1:vid-1:RULE"], + "goalId": "gid", + "metric.userId": "uid", + "ns.user.data.atr":"av", + "sourceId":"GOAL_BATCH", + "tag": "tag", + "timestamp": "2014-01-17T23:02:03Z", + "userId":"uid", + "value": "1.2" + }`, + expectedErr: nil, + expectedRepeatable: false, + }, + "err goal batch event: internal error from feature api": { + setup: func(ctx context.Context, p *Persister) { + p.userEvaluationStorage.(*ftmock.MockUserEvaluationsStorage).EXPECT().GetUserEvaluations( + ctx, + "uid", + "ns", + "tag", + ).Return(nil, btstorage.ErrKeyNotFound).Times(1) + p.featureClient.(*fcmock.MockClient).EXPECT().EvaluateFeatures( + ctx, + &featureproto.EvaluateFeaturesRequest{ + User: &userproto.User{ + Id: "uid", + Data: map[string]string{"atr": "av"}, + }, + EnvironmentNamespace: "ns", + Tag: "tag", + }, + ).Return( + nil, btstorage.ErrInternal, + ).Times(1) + }, + input: &eventproto.GoalEvent{ + SourceId: eventproto.SourceId_GOAL_BATCH, + Timestamp: t1.Unix(), + GoalId: "gid", + UserId: "uid", + User: &userproto.User{ + Id: "uid", + Data: map[string]string{"atr": "av"}, + }, + Value: float64(1.2), + Evaluations: nil, + Tag: "tag", + }, + expected: "", + expectedErr: btstorage.ErrInternal, + expectedRepeatable: false, + }, + "success goal event: no tag info": { + setup: nil, + input: &eventproto.GoalEvent{ + SourceId: eventproto.SourceId_ANDROID, + Timestamp: t1.Unix(), + GoalId: "gid", + UserId: "uid", + User: &userproto.User{ + Id: "uid", + Data: map[string]string{"atr": "av"}, + }, + Value: float64(1.2), + Evaluations: []*featureproto.Evaluation{ + { + FeatureId: "fid-0", + FeatureVersion: int32(0), + VariationId: "vid-0", + Reason: &featureproto.Reason{Type: featureproto.Reason_CLIENT}, + }, + { + FeatureId: "fid-1", + FeatureVersion: int32(1), + VariationId: "vid-1", + Reason: &featureproto.Reason{Type: featureproto.Reason_TARGET}, + }, + }, + Tag: "", + }, + expected: `{ + "environmentNamespace": "ns", + "evaluations": ["fid-0:0:vid-0:CLIENT","fid-1:1:vid-1:TARGET"], + "goalId": "gid", + "metric.userId": "uid", + "ns.user.data.atr":"av", + "sourceId":"ANDROID", + "tag": "", + "timestamp": "2014-01-17T23:02:03Z", + "userId":"uid", + "value": "1.2" + }`, + expectedErr: nil, + expectedRepeatable: false, + }, + "err goal event: internal": { + setup: func(ctx context.Context, p *Persister) { + p.userEvaluationStorage.(*ftmock.MockUserEvaluationsStorage).EXPECT().GetUserEvaluations( + ctx, + "uid", + "ns", + "tag", + ).Return(nil, btstorage.ErrInternal).Times(1) + }, + input: &eventproto.GoalEvent{ + SourceId: eventproto.SourceId_ANDROID, + Timestamp: t1.Unix(), + GoalId: "gid", + UserId: "uid", + User: &userproto.User{ + Id: "uid", + Data: map[string]string{"atr": "av"}, + }, + Value: float64(1.2), + Evaluations: nil, + Tag: "tag", + }, + expected: "", + expectedErr: btstorage.ErrInternal, + expectedRepeatable: true, + }, + "success goal event: key not found not in bigtable": { + setup: func(ctx context.Context, p *Persister) { + p.userEvaluationStorage.(*ftmock.MockUserEvaluationsStorage).EXPECT().GetUserEvaluations( + ctx, + "uid", + "ns", + "tag", + ).Return(nil, btstorage.ErrKeyNotFound).Times(1) + }, + input: &eventproto.GoalEvent{ + SourceId: eventproto.SourceId_ANDROID, + Timestamp: t1.Unix(), + GoalId: "gid", + UserId: "uid", + User: &userproto.User{ + Id: "uid", + Data: map[string]string{"atr": "av"}, + }, + Value: float64(1.2), + Evaluations: nil, + Tag: "tag", + }, + expected: `{ + "environmentNamespace": "ns", + "evaluations": [], + "goalId": "gid", + "metric.userId": "uid", + "ns.user.data.atr":"av", + "sourceId":"ANDROID", + "tag": "tag", + "timestamp": "2014-01-17T23:02:03Z", + "userId":"uid", + "value": "1.2" + }`, + expectedErr: nil, + expectedRepeatable: false, + }, + "success goal event: getting evaluations from bigtable": { + setup: func(ctx context.Context, p *Persister) { + p.userEvaluationStorage.(*ftmock.MockUserEvaluationsStorage).EXPECT().GetUserEvaluations( + ctx, + "uid", + "ns", + "tag", + ).Return([]*featureproto.Evaluation{ + { + FeatureId: "fid-0", + FeatureVersion: int32(0), + VariationId: "vid-0", + Reason: &featureproto.Reason{Type: featureproto.Reason_CLIENT}, + }, + { + FeatureId: "fid-1", + FeatureVersion: int32(1), + VariationId: "vid-1", + Reason: &featureproto.Reason{Type: featureproto.Reason_TARGET}, + }, + }, nil).Times(1) + }, + input: &eventproto.GoalEvent{ + SourceId: eventproto.SourceId_ANDROID, + Timestamp: t1.Unix(), + GoalId: "gid", + UserId: "uid", + User: &userproto.User{ + Id: "uid", + Data: map[string]string{"atr": "av"}, + }, + Value: float64(1.2), + Evaluations: nil, + Tag: "tag", + }, + expected: `{ + "environmentNamespace": "ns", + "evaluations": ["fid-0:0:vid-0:CLIENT","fid-1:1:vid-1:TARGET"], + "goalId": "gid", + "metric.userId": "uid", + "ns.user.data.atr":"av", + "sourceId":"ANDROID", + "tag": "tag", + "timestamp": "2014-01-17T23:02:03Z", + "userId":"uid", + "value": "1.2" + }`, + expectedErr: nil, + expectedRepeatable: false, + }, + "err: ErrUnexpectedMessageType": { + input: "", + expected: "", + expectedErr: ErrUnexpectedMessageType, + expectedRepeatable: false, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + persister := newPersister(mockController) + if p.setup != nil { + p.setup(persister.ctx, persister) + } + actual, repeatable, err := persister.marshalEvent(p.input, "ns") + assert.Equal(t, p.expectedRepeatable, repeatable) + if err != nil { + assert.Equal(t, actual, "") + assert.Equal(t, p.expectedErr, err) + } else { + assert.Equal(t, p.expectedErr, err) + buf := new(bytes.Buffer) + err = json.Compact(buf, []byte(p.expected)) + require.NoError(t, err) + assert.Equal(t, buf.String(), actual) + } + }) + } +} + +func newPersister(c *gomock.Controller) *Persister { + ctx, cancel := context.WithCancel(context.Background()) + return &Persister{ + featureClient: fcmock.NewMockClient(c), + puller: pullermock.NewMockRateLimitedPuller(c), + datastore: nil, + userEvaluationStorage: ftmock.NewMockUserEvaluationsStorage(c), + opts: &defaultOptions, + logger: defaultOptions.logger, + ctx: ctx, + cancel: cancel, + doneCh: make(chan struct{}), + } +} diff --git a/pkg/eventpersister/storage/v2/BUILD.bazel b/pkg/eventpersister/storage/v2/BUILD.bazel new file mode 100644 index 000000000..6844b0d58 --- /dev/null +++ b/pkg/eventpersister/storage/v2/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["persister.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/eventpersister/storage/v2", + visibility = ["//visibility:public"], + deps = [ + "//pkg/storage/v2/postgres:go_default_library", + "//proto/event/client:go_default_library", + "//proto/event/service:go_default_library", + ], +) diff --git a/pkg/eventpersister/storage/v2/persister.go b/pkg/eventpersister/storage/v2/persister.go new file mode 100644 index 000000000..db8489f3d --- /dev/null +++ b/pkg/eventpersister/storage/v2/persister.go @@ -0,0 +1,190 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v2 + +import ( + "context" + "errors" + "strconv" + + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/postgres" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/client" + esproto "github.com/bucketeer-io/bucketeer/proto/event/service" +) + +var ( + ErrGoalEventAlreadyExists = errors.New("persister: goal event already exists") + ErrEvaluationEventAlreadyExists = errors.New("persister: evaluation event already exists") + ErrUserEventAlreadyExists = errors.New("persister: user event already exists") +) + +type EventCreationStorage interface { + CreateGoalEvent( + ctx context.Context, + event *eventproto.GoalEvent, + id, environmentNamespace string, + evaluations []string, + ) error + CreateEvaluationEvent(ctx context.Context, event *eventproto.EvaluationEvent, id, environmentNamespace string) error + CreateUserEvent(ctx context.Context, event *esproto.UserEvent, id, environmentNamespace string) error +} + +type eventStorage struct { + qe postgres.Execer +} + +func NewEventCreationStorage(qe postgres.Execer) EventCreationStorage { + return &eventStorage{qe: qe} +} + +func (s *eventStorage) CreateGoalEvent( + ctx context.Context, + event *eventproto.GoalEvent, + id, environmentNamespace string, + evaluations []string, +) error { + query := ` + INSERT INTO goal_event ( + id, + timestamp, + goal_id, + value, + user_id, + user_data, + tag, + source_id, + environment_namespace, + evaluations + ) VALUES ( + $1, $2, $3, $4, $5, $6, $7, $8, $9, $10 + ) ON CONFLICT DO NOTHING + ` + userData := map[string]string{} + if event.User != nil { + userData = event.User.Data + } + _, err := s.qe.ExecContext( + ctx, + query, + id, + event.Timestamp, + event.GoalId, + strconv.FormatFloat(event.Value, 'f', -1, 64), + event.UserId, + postgres.JSONObject{Val: userData}, + event.Tag, + event.SourceId.String(), + environmentNamespace, + postgres.JSONObject{Val: evaluations}, + ) + if err != nil { + if err == postgres.ErrDuplicateEntry { + return ErrGoalEventAlreadyExists + } + return err + } + return nil +} + +func (s *eventStorage) CreateEvaluationEvent( + ctx context.Context, + event *eventproto.EvaluationEvent, + id, environmentNamespace string, +) error { + query := ` + INSERT INTO evaluation_event ( + id, + timestamp, + feature_id, + feature_version, + variation_id, + user_id, + user_data, + reason, + tag, + source_id, + environment_namespace + ) VALUES ( + $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11 + ) ON CONFLICT DO NOTHING + ` + userData := map[string]string{} + if event.User != nil { + userData = event.User.Data + } + reason := "" + if event.Reason != nil { + reason = event.Reason.Type.String() + } + _, err := s.qe.ExecContext( + ctx, + query, + id, + event.Timestamp, + event.FeatureId, + event.FeatureVersion, + event.VariationId, + event.UserId, + postgres.JSONObject{Val: userData}, + reason, + event.Tag, + event.SourceId.String(), + environmentNamespace, + ) + if err != nil { + if err == postgres.ErrDuplicateEntry { + return ErrEvaluationEventAlreadyExists + } + return err + } + return nil +} + +func (s *eventStorage) CreateUserEvent( + ctx context.Context, + event *esproto.UserEvent, + id, environmentNamespace string, +) error { + query := ` + INSERT INTO user_event ( + id, + tag, + user_id, + timestamp, + source_id, + environment_namespace + ) VALUES ( + $1, $2, $3, $4, $5, $6 + ) ON CONFLICT DO NOTHING + ` + _, err := s.qe.ExecContext( + ctx, + query, + id, + event.Tag, + event.UserId, + event.LastSeen, + event.SourceId.String(), + environmentNamespace, + ) + if err != nil { + if err == postgres.ErrDuplicateEntry { + return ErrUserEventAlreadyExists + } + return err + } + return nil +} diff --git a/pkg/experiment/api/BUILD.bazel b/pkg/experiment/api/BUILD.bazel new file mode 100644 index 000000000..a412b58e7 --- /dev/null +++ b/pkg/experiment/api/BUILD.bazel @@ -0,0 +1,64 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "api.go", + "error.go", + "experiment.go", + "goal.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/experiment/api", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/client:go_default_library", + "//pkg/experiment/command:go_default_library", + "//pkg/experiment/domain:go_default_library", + "//pkg/experiment/storage/v2:go_default_library", + "//pkg/feature/client:go_default_library", + "//pkg/locale:go_default_library", + "//pkg/log:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/role:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/rpc/status:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/account:go_default_library", + "//proto/event/domain:go_default_library", + "//proto/experiment:go_default_library", + "//proto/feature:go_default_library", + "@go_googleapis//google/rpc:errdetails_go_proto", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "api_test.go", + "experiment_test.go", + "goal_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/account/client/mock:go_default_library", + "//pkg/experiment/storage/v2:go_default_library", + "//pkg/feature/client/mock:go_default_library", + "//pkg/pubsub/publisher/mock:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/storage:go_default_library", + "//pkg/storage/testing:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/storage/v2/mysql/mock:go_default_library", + "//pkg/token:go_default_library", + "//proto/account:go_default_library", + "//proto/experiment:go_default_library", + "//proto/feature:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/experiment/api/api.go b/pkg/experiment/api/api.go new file mode 100644 index 000000000..c30f73718 --- /dev/null +++ b/pkg/experiment/api/api.go @@ -0,0 +1,129 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + "github.com/bucketeer-io/bucketeer/pkg/role" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + proto "github.com/bucketeer-io/bucketeer/proto/experiment" +) + +type options struct { + logger *zap.Logger +} + +type Option func(*options) + +func WithLogger(l *zap.Logger) Option { + return func(opts *options) { + opts.logger = l + } +} + +type experimentService struct { + featureClient featureclient.Client + accountClient accountclient.Client + mysqlClient mysql.Client + publisher publisher.Publisher + opts *options + logger *zap.Logger +} + +func NewExperimentService( + featureClient featureclient.Client, + accountClient accountclient.Client, + mysqlClient mysql.Client, + publisher publisher.Publisher, + opts ...Option, +) rpc.Service { + dopts := &options{ + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + return &experimentService{ + featureClient: featureClient, + accountClient: accountClient, + mysqlClient: mysqlClient, + publisher: publisher, + opts: dopts, + logger: dopts.logger.Named("api"), + } +} + +func (s *experimentService) Register(server *grpc.Server) { + proto.RegisterExperimentServiceServer(server, s) +} + +func (s *experimentService) checkRole( + ctx context.Context, + requiredRole accountproto.Account_Role, + environmentNamespace string, +) (*eventproto.Editor, error) { + editor, err := role.CheckRole(ctx, requiredRole, func(email string) (*accountproto.GetAccountResponse, error) { + return s.accountClient.GetAccount(ctx, &accountproto.GetAccountRequest{ + Email: email, + EnvironmentNamespace: environmentNamespace, + }) + }) + if err != nil { + switch status.Code(err) { + case codes.Unauthenticated: + s.logger.Info( + "Unauthenticated", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusUnauthenticated, locale.JaJP) + case codes.PermissionDenied: + s.logger.Info( + "Permission denied", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusPermissionDenied, locale.JaJP) + default: + s.logger.Error( + "Failed to check role", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + } + return editor, nil +} diff --git a/pkg/experiment/api/api_test.go b/pkg/experiment/api/api_test.go new file mode 100644 index 000000000..20adf11f6 --- /dev/null +++ b/pkg/experiment/api/api_test.go @@ -0,0 +1,115 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + accountclientmock "github.com/bucketeer-io/bucketeer/pkg/account/client/mock" + featureclientmock "github.com/bucketeer-io/bucketeer/pkg/feature/client/mock" + publishermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher/mock" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/storage" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + "github.com/bucketeer-io/bucketeer/pkg/token" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +func TestNewExperimentService(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + featureClientMock := featureclientmock.NewMockClient(mockController) + accountClientMock := accountclientmock.NewMockClient(mockController) + mysqlClient := mysqlmock.NewMockClient(mockController) + p := publishermock.NewMockPublisher(mockController) + logger := zap.NewNop() + s := NewExperimentService( + featureClientMock, + accountClientMock, + mysqlClient, + p, + WithLogger(logger), + ) + assert.IsType(t, &experimentService{}, s) +} + +func createExperimentService(c *gomock.Controller, s storage.Client) *experimentService { + featureClientMock := featureclientmock.NewMockClient(c) + fr := &featureproto.GetFeatureResponse{ + Feature: &featureproto.Feature{ + Id: "fid", + Version: 1, + Variations: []*featureproto.Variation{}, + }, + } + featureClientMock.EXPECT().GetFeature(gomock.Any(), gomock.Any()).Return(fr, nil).AnyTimes() + fsr := &featureproto.GetFeaturesResponse{ + Features: []*featureproto.Feature{{ + Id: "fid", + Version: 1, + Variations: []*featureproto.Variation{}, + }}, + } + featureClientMock.EXPECT().GetFeatures(gomock.Any(), gomock.Any()).Return(fsr, nil).AnyTimes() + accountClientMock := accountclientmock.NewMockClient(c) + ar := &accountproto.GetAccountResponse{ + Account: &accountproto.Account{ + Email: "email", + Role: accountproto.Account_VIEWER, + }, + } + accountClientMock.EXPECT().GetAccount(gomock.Any(), gomock.Any()).Return(ar, nil).AnyTimes() + mysqlClient := mysqlmock.NewMockClient(c) + p := publishermock.NewMockPublisher(c) + p.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + es := NewExperimentService(featureClientMock, accountClientMock, mysqlClient, p) + return es.(*experimentService) +} + +func createContextWithToken() context.Context { + token := &token.IDToken{ + Issuer: "issuer", + Subject: "sub", + Audience: "audience", + Expiry: time.Now().AddDate(100, 0, 0), + IssuedAt: time.Now(), + Email: "email", + AdminRole: accountproto.Account_OWNER, + } + ctx := context.TODO() + return context.WithValue(ctx, rpc.Key, token) +} + +func createContextWithTokenRoleUnassigned() context.Context { + token := &token.IDToken{ + Issuer: "issuer", + Subject: "sub", + Audience: "audience", + Expiry: time.Now().AddDate(100, 0, 0), + IssuedAt: time.Now(), + Email: "email", + AdminRole: accountproto.Account_UNASSIGNED, + } + ctx := context.TODO() + return context.WithValue(ctx, rpc.Key, token) +} diff --git a/pkg/experiment/api/error.go b/pkg/experiment/api/error.go new file mode 100644 index 000000000..410f4ca88 --- /dev/null +++ b/pkg/experiment/api/error.go @@ -0,0 +1,208 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "fmt" + + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + gstatus "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/rpc/status" +) + +var ( + statusInternal = gstatus.New(codes.Internal, "experiment: internal") + statusInvalidCursor = gstatus.New(codes.InvalidArgument, "experiment: cursor is invalid") + statusNoCommand = gstatus.New(codes.InvalidArgument, "experiment: must contain at least one command") + statusUnknownCommand = gstatus.New(codes.InvalidArgument, "experiment: unknown command") + statusFeatureIDRequired = gstatus.New(codes.InvalidArgument, "experiment: feature id must be specified") + statusExperimentIDRequired = gstatus.New(codes.InvalidArgument, "experiment: experiment id must be specified") + statusGoalIDRequired = gstatus.New(codes.InvalidArgument, "experiment: goal id must be specified") + statusInvalidGoalID = gstatus.New(codes.InvalidArgument, "experiment: invalid goal id") + statusGoalNameRequired = gstatus.New(codes.InvalidArgument, "experiment: goal name must be specified") + statusPeriodTooLong = gstatus.New(codes.InvalidArgument, "experiment: period too long") + statusInvalidOrderBy = gstatus.New(codes.InvalidArgument, "expriment: order_by is invalid") + statusNotFound = gstatus.New(codes.NotFound, "experiment: not found") + statusGoalNotFound = gstatus.New(codes.NotFound, "experiment: goal not found") + statusFeatureNotFound = gstatus.New(codes.NotFound, "experiment: feature not found") + statusAlreadyExists = gstatus.New(codes.AlreadyExists, "experiment: already exists") + statusUnauthenticated = gstatus.New(codes.Unauthenticated, "experiment: unauthenticated") + statusPermissionDenied = gstatus.New(codes.PermissionDenied, "experiment: permission denied") + + errInternalJaJP = status.MustWithDetails( + statusInternal, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "内部エラーが発生しました", + }, + ) + errInvalidCursorJaJP = status.MustWithDetails( + statusInvalidCursor, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なcursorです", + }, + ) + errNoCommandJaJP = status.MustWithDetails( + statusNoCommand, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "commandは必須です", + }, + ) + errUnknownCommandJaJP = status.MustWithDetails( + statusUnknownCommand, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不明なcommandです", + }, + ) + errFeatureIDRequiredJaJP = status.MustWithDetails( + statusFeatureIDRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "feature idは必須です", + }, + ) + errExperimentIDRequiredJaJP = status.MustWithDetails( + statusExperimentIDRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "experiment idは必須です", + }, + ) + errGoalIDRequiredJaJP = status.MustWithDetails( + statusGoalIDRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "goal idは必須です", + }, + ) + errInvalidGoalIDJaJP = status.MustWithDetails( + statusInvalidGoalID, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なgoal idです", + }, + ) + errGoalNameRequiredJaJP = status.MustWithDetails( + statusGoalNameRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "goal nameは必須です", + }, + ) + errPeriodTooLongJaJP = status.MustWithDetails( + statusPeriodTooLong, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: fmt.Sprintf("experiment期間は%d日以内で設定してください", maxExperimentPeriodDays), + }, + ) + errInvalidOrderByJaJP = status.MustWithDetails( + statusInvalidOrderBy, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なソート順の指定です", + }, + ) + errNotFoundJaJP = status.MustWithDetails( + statusNotFound, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "データが存在しません", + }, + ) + errFeatureNotFoundJaJP = status.MustWithDetails( + statusFeatureNotFound, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "featureが存在しません", + }, + ) + errGoalNotFoundJaJP = status.MustWithDetails( + statusGoalNotFound, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "goalが存在しません", + }, + ) + errAlreadyExistsJaJP = status.MustWithDetails( + statusAlreadyExists, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "同じidのデータがすでに存在します", + }, + ) + errUnauthenticatedJaJP = status.MustWithDetails( + statusUnauthenticated, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "認証されていません", + }, + ) + errPermissionDeniedJaJP = status.MustWithDetails( + statusPermissionDenied, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "権限がありません", + }, + ) +) + +func localizedError(s *gstatus.Status, loc string) error { + // handle loc if multi-lang is necessary + switch s { + case statusInternal: + return errInternalJaJP + case statusInvalidCursor: + return errInvalidCursorJaJP + case statusNoCommand: + return errNoCommandJaJP + case statusUnknownCommand: + return errUnknownCommandJaJP + case statusFeatureIDRequired: + return errFeatureIDRequiredJaJP + case statusExperimentIDRequired: + return errExperimentIDRequiredJaJP + case statusGoalIDRequired: + return errGoalIDRequiredJaJP + case statusInvalidGoalID: + return errInvalidGoalIDJaJP + case statusGoalNameRequired: + return errGoalNameRequiredJaJP + case statusPeriodTooLong: + return errPeriodTooLongJaJP + case statusInvalidOrderBy: + return errInvalidOrderByJaJP + case statusNotFound: + return errNotFoundJaJP + case statusFeatureNotFound: + return errFeatureNotFoundJaJP + case statusGoalNotFound: + return errGoalNotFoundJaJP + case statusAlreadyExists: + return errAlreadyExistsJaJP + case statusUnauthenticated: + return errUnauthenticatedJaJP + case statusPermissionDenied: + return errPermissionDeniedJaJP + default: + return errInternalJaJP + } +} diff --git a/pkg/experiment/api/experiment.go b/pkg/experiment/api/experiment.go new file mode 100644 index 000000000..5b9feb4c9 --- /dev/null +++ b/pkg/experiment/api/experiment.go @@ -0,0 +1,611 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "strconv" + + "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/experiment/command" + "github.com/bucketeer-io/bucketeer/pkg/experiment/domain" + v2es "github.com/bucketeer-io/bucketeer/pkg/experiment/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + proto "github.com/bucketeer-io/bucketeer/proto/experiment" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +const ( + maxExperimentPeriodDays = 30 + maxExperimentPeriod = maxExperimentPeriodDays * 24 * 60 * 60 +) + +func (s *experimentService) GetExperiment( + ctx context.Context, + req *proto.GetExperimentRequest, +) (*proto.GetExperimentResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateGetExperimentRequest(req); err != nil { + return nil, err + } + experimentStorage := v2es.NewExperimentStorage(s.mysqlClient) + experiment, err := experimentStorage.GetExperiment(ctx, req.Id, req.EnvironmentNamespace) + if err != nil { + if err == v2es.ErrExperimentNotFound { + return nil, localizedError(statusNotFound, locale.JaJP) + } + return nil, localizedError(statusInternal, locale.JaJP) + } + return &proto.GetExperimentResponse{ + Experiment: experiment.Experiment, + }, nil +} + +func validateGetExperimentRequest(req *proto.GetExperimentRequest) error { + if req.Id == "" { + return localizedError(statusExperimentIDRequired, locale.JaJP) + } + return nil +} + +func (s *experimentService) ListExperiments( + ctx context.Context, + req *proto.ListExperimentsRequest, +) (*proto.ListExperimentsResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + whereParts := []mysql.WherePart{ + mysql.NewFilter("deleted", "=", false), + mysql.NewFilter("environment_namespace", "=", req.EnvironmentNamespace), + } + if req.Archived != nil { + whereParts = append(whereParts, mysql.NewFilter("archived", "=", req.Archived.Value)) + } + if req.FeatureId != "" { + whereParts = append(whereParts, mysql.NewFilter("feature_id", "=", req.FeatureId)) + } + if req.FeatureVersion != nil { + whereParts = append(whereParts, mysql.NewFilter("feature_version", "=", req.FeatureVersion.Value)) + } + if req.From != 0 { + whereParts = append(whereParts, mysql.NewFilter("stopped_at", ">=", req.From)) + } + if req.To != 0 { + whereParts = append(whereParts, mysql.NewFilter("start_at", "<=", req.To)) + } + if req.Status != nil { + whereParts = append(whereParts, mysql.NewFilter("status", "=", req.Status.Value)) + } else if len(req.Statuses) > 0 { + statuses := make([]interface{}, 0, len(req.Statuses)) + for _, sts := range req.Statuses { + statuses = append(statuses, sts) + } + whereParts = append(whereParts, mysql.NewInFilter("status", statuses)) + } + if req.Maintainer != "" { + whereParts = append(whereParts, mysql.NewFilter("maintainer", "=", req.Maintainer)) + } + if req.SearchKeyword != "" { + whereParts = append(whereParts, mysql.NewSearchQuery([]string{"name", "description"}, req.SearchKeyword)) + } + orders, err := s.newExperimentListOrders(req.OrderBy, req.OrderDirection) + if err != nil { + s.logger.Error( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, err + } + limit := int(req.PageSize) + cursor := req.Cursor + if cursor == "" { + cursor = "0" + } + offset, err := strconv.Atoi(cursor) + if err != nil { + return nil, localizedError(statusInvalidCursor, locale.JaJP) + } + experimentStorage := v2es.NewExperimentStorage(s.mysqlClient) + experiments, nextCursor, totalCount, err := experimentStorage.ListExperiments( + ctx, + whereParts, + orders, + limit, + offset, + ) + if err != nil { + s.logger.Error( + "Failed to list experiments", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &proto.ListExperimentsResponse{ + Experiments: experiments, + Cursor: strconv.Itoa(nextCursor), + TotalCount: totalCount, + }, nil +} + +func (s *experimentService) newExperimentListOrders( + orderBy proto.ListExperimentsRequest_OrderBy, + orderDirection proto.ListExperimentsRequest_OrderDirection, +) ([]*mysql.Order, error) { + var column string + switch orderBy { + case proto.ListExperimentsRequest_DEFAULT, + proto.ListExperimentsRequest_NAME: + column = "name" + case proto.ListExperimentsRequest_CREATED_AT: + column = "created_at" + case proto.ListExperimentsRequest_UPDATED_AT: + column = "updated_at" + default: + return nil, localizedError(statusInvalidOrderBy, locale.JaJP) + } + direction := mysql.OrderDirectionAsc + if orderDirection == proto.ListExperimentsRequest_DESC { + direction = mysql.OrderDirectionDesc + } + return []*mysql.Order{mysql.NewOrder(column, direction)}, nil +} + +func (s *experimentService) CreateExperiment( + ctx context.Context, + req *proto.CreateExperimentRequest, +) (*proto.CreateExperimentResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateCreateExperimentRequest(req); err != nil { + return nil, err + } + resp, err := s.featureClient.GetFeature(ctx, &featureproto.GetFeatureRequest{ + Id: req.Command.FeatureId, + EnvironmentNamespace: req.EnvironmentNamespace, + }) + if err != nil { + if code := status.Code(err); code == codes.NotFound { + return nil, localizedError(statusFeatureNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to get feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + for _, gid := range req.Command.GoalIds { + _, err := s.getGoalMySQL(ctx, gid, req.EnvironmentNamespace) + if err != nil { + if err == v2es.ErrGoalNotFound { + return nil, localizedError(statusGoalNotFound, locale.JaJP) + } + return nil, localizedError(statusInternal, locale.JaJP) + } + } + experiment, err := domain.NewExperiment( + req.Command.FeatureId, + resp.Feature.Version, + resp.Feature.Variations, + req.Command.GoalIds, + req.Command.StartAt, + req.Command.StopAt, + req.Command.Name, + req.Command.Description, + req.Command.BaseVariationId, + editor.Email, + ) + if err != nil { + s.logger.Error( + "Failed to create a new experiment", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + experimentStorage := v2es.NewExperimentStorage(tx) + handler := command.NewExperimentCommandHandler( + editor, + experiment, + s.publisher, + req.EnvironmentNamespace, + ) + if err := handler.Handle(ctx, req.Command); err != nil { + return err + } + return experimentStorage.CreateExperiment(ctx, experiment, req.EnvironmentNamespace) + }) + if err != nil { + if err == v2es.ErrExperimentAlreadyExists { + return nil, localizedError(statusAlreadyExists, locale.JaJP) + } + s.logger.Error( + "Failed to create experiment", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &proto.CreateExperimentResponse{ + Experiment: experiment.Experiment, + }, nil +} + +func validateCreateExperimentRequest(req *proto.CreateExperimentRequest) error { + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + if req.Command.FeatureId == "" { + return localizedError(statusFeatureIDRequired, locale.JaJP) + } + if len(req.Command.GoalIds) == 0 { + return localizedError(statusGoalIDRequired, locale.JaJP) + } + for _, gid := range req.Command.GoalIds { + if gid == "" { + return localizedError(statusGoalIDRequired, locale.JaJP) + } + } + if err := validateExperimentPeriod(req.Command.StartAt, req.Command.StopAt); err != nil { + return err + } + // TODO: validate name empty check + return nil +} + +func validateExperimentPeriod(startAt, stopAt int64) error { + period := stopAt - startAt + if period <= 0 || period > int64(maxExperimentPeriod) { + return localizedError(statusPeriodTooLong, locale.JaJP) + } + return nil +} + +func (s *experimentService) UpdateExperiment( + ctx context.Context, + req *proto.UpdateExperimentRequest, +) (*proto.UpdateExperimentResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateUpdateExperimentRequest(req); err != nil { + return nil, err + } + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + experimentStorage := v2es.NewExperimentStorage(tx) + experiment, err := experimentStorage.GetExperiment(ctx, req.Id, req.EnvironmentNamespace) + if err != nil { + return err + } + handler := command.NewExperimentCommandHandler( + editor, + experiment, + s.publisher, + req.EnvironmentNamespace, + ) + if req.ChangeExperimentPeriodCommand != nil { + if err = handler.Handle(ctx, req.ChangeExperimentPeriodCommand); err != nil { + s.logger.Error( + "Failed to change period", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + return experimentStorage.UpdateExperiment(ctx, experiment, req.EnvironmentNamespace) + } + if req.ChangeNameCommand != nil { + if err = handler.Handle(ctx, req.ChangeNameCommand); err != nil { + s.logger.Error( + "Failed to change Name", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + } + if req.ChangeDescriptionCommand != nil { + if err = handler.Handle(ctx, req.ChangeDescriptionCommand); err != nil { + s.logger.Error( + "Failed to change Description", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + } + return experimentStorage.UpdateExperiment(ctx, experiment, req.EnvironmentNamespace) + }) + if err != nil { + if err == v2es.ErrExperimentNotFound || err == v2es.ErrExperimentUnexpectedAffectedRows { + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to update experiment", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &proto.UpdateExperimentResponse{}, nil +} + +func validateUpdateExperimentRequest(req *proto.UpdateExperimentRequest) error { + if req.Id == "" { + return localizedError(statusExperimentIDRequired, locale.JaJP) + } + if req.ChangeExperimentPeriodCommand != nil { + if err := validateExperimentPeriod( + req.ChangeExperimentPeriodCommand.StartAt, + req.ChangeExperimentPeriodCommand.StopAt, + ); err != nil { + return err + } + } + return nil +} + +func (s *experimentService) StartExperiment( + ctx context.Context, + req *proto.StartExperimentRequest, +) (*proto.StartExperimentResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateStartExperimentRequest(req); err != nil { + return nil, err + } + if err := s.updateExperiment(ctx, editor, req.Command, req.Id, req.EnvironmentNamespace); err != nil { + return nil, err + } + return &proto.StartExperimentResponse{}, nil +} + +func validateStartExperimentRequest(req *proto.StartExperimentRequest) error { + if req.Id == "" { + return localizedError(statusExperimentIDRequired, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + return nil +} + +func (s *experimentService) FinishExperiment( + ctx context.Context, + req *proto.FinishExperimentRequest, +) (*proto.FinishExperimentResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateFinishExperimentRequest(req); err != nil { + return nil, err + } + if err := s.updateExperiment(ctx, editor, req.Command, req.Id, req.EnvironmentNamespace); err != nil { + return nil, err + } + return &proto.FinishExperimentResponse{}, nil +} + +func validateFinishExperimentRequest(req *proto.FinishExperimentRequest) error { + if req.Id == "" { + return localizedError(statusExperimentIDRequired, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + return nil +} + +func (s *experimentService) StopExperiment( + ctx context.Context, + req *proto.StopExperimentRequest, +) (*proto.StopExperimentResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateStopExperimentRequest(req); err != nil { + return nil, err + } + if err := s.updateExperiment(ctx, editor, req.Command, req.Id, req.EnvironmentNamespace); err != nil { + return nil, err + } + return &proto.StopExperimentResponse{}, nil +} + +func validateStopExperimentRequest(req *proto.StopExperimentRequest) error { + if req.Id == "" { + return localizedError(statusExperimentIDRequired, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + return nil +} + +func (s *experimentService) ArchiveExperiment( + ctx context.Context, + req *proto.ArchiveExperimentRequest, +) (*proto.ArchiveExperimentResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if req.Id == "" { + return nil, localizedError(statusExperimentIDRequired, locale.JaJP) + } + if req.Command == nil { + return nil, localizedError(statusNoCommand, locale.JaJP) + } + err = s.updateExperiment( + ctx, + editor, + req.Command, + req.Id, + req.EnvironmentNamespace, + ) + if err != nil { + s.logger.Error( + "Failed to archive experiment", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + return &proto.ArchiveExperimentResponse{}, nil +} + +func (s *experimentService) DeleteExperiment( + ctx context.Context, + req *proto.DeleteExperimentRequest, +) (*proto.DeleteExperimentResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateDeleteExperimentRequest(req); err != nil { + return nil, err + } + if err := s.updateExperiment(ctx, editor, req.Command, req.Id, req.EnvironmentNamespace); err != nil { + return nil, err + } + return &proto.DeleteExperimentResponse{}, nil +} + +func validateDeleteExperimentRequest(req *proto.DeleteExperimentRequest) error { + if req.Id == "" { + return localizedError(statusExperimentIDRequired, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + return nil +} + +func (s *experimentService) updateExperiment( + ctx context.Context, + editor *eventproto.Editor, + cmd command.Command, + id, environmentNamespace string, +) error { + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + experimentStorage := v2es.NewExperimentStorage(tx) + experiment, err := experimentStorage.GetExperiment(ctx, id, environmentNamespace) + if err != nil { + s.logger.Error( + "Failed to get experiment", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return err + } + handler := command.NewExperimentCommandHandler(editor, experiment, s.publisher, environmentNamespace) + if err := handler.Handle(ctx, cmd); err != nil { + s.logger.Error( + "Failed to handle command", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return err + } + return experimentStorage.UpdateExperiment(ctx, experiment, environmentNamespace) + }) + if err != nil { + if err == v2es.ErrExperimentNotFound || err == v2es.ErrExperimentUnexpectedAffectedRows { + return localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to update experiment", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return localizedError(statusInternal, locale.JaJP) + } + return nil +} diff --git a/pkg/experiment/api/experiment_test.go b/pkg/experiment/api/experiment_test.go new file mode 100644 index 000000000..1459ed6f2 --- /dev/null +++ b/pkg/experiment/api/experiment_test.go @@ -0,0 +1,699 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + v2es "github.com/bucketeer-io/bucketeer/pkg/experiment/storage/v2" + storagetesting "github.com/bucketeer-io/bucketeer/pkg/storage/testing" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + experimentproto "github.com/bucketeer-io/bucketeer/proto/experiment" +) + +func TestGetExperimentMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*experimentService) + id string + environmentNamespace string + expectedErr error + }{ + { + setup: nil, + id: "", + environmentNamespace: "ns0", + expectedErr: errExperimentIDRequiredJaJP, + }, + { + setup: func(s *experimentService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id-0", + environmentNamespace: "ns0", + expectedErr: errNotFoundJaJP, + }, + { + setup: func(s *experimentService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id-1", + environmentNamespace: "ns0", + expectedErr: nil, + }, + } + for _, p := range patterns { + service := createExperimentService(mockController, nil) + if p.setup != nil { + p.setup(service) + } + req := &experimentproto.GetExperimentRequest{Id: p.id, EnvironmentNamespace: p.environmentNamespace} + _, err := service.GetExperiment(createContextWithTokenRoleUnassigned(), req) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestListExperimentMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*experimentService) + req *experimentproto.ListExperimentsRequest + expectedErr error + }{ + { + setup: func(s *experimentService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + req: &experimentproto.ListExperimentsRequest{FeatureId: "id-0", EnvironmentNamespace: "ns0"}, + expectedErr: nil, + }, + } + for _, p := range patterns { + service := createExperimentService(mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.ListExperiments(createContextWithTokenRoleUnassigned(), p.req) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestCreateExperimentMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(s *experimentService) + input *experimentproto.CreateExperimentRequest + expectedErr error + }{ + { + setup: func(s *experimentService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + input: &experimentproto.CreateExperimentRequest{ + Command: &experimentproto.CreateExperimentCommand{ + FeatureId: "fid", + GoalIds: []string{"goalId"}, + StartAt: 1, + StopAt: 10, + }, + EnvironmentNamespace: "ns0", + }, + expectedErr: nil, + }, + } + for _, p := range patterns { + ctx := createContextWithToken() + service := createExperimentService(mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.CreateExperiment(ctx, p.input) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestValidateCreateExperimentRequest(t *testing.T) { + t.Parallel() + patterns := []struct { + in *experimentproto.CreateExperimentRequest + expected error + }{ + { + in: &experimentproto.CreateExperimentRequest{ + Command: &experimentproto.CreateExperimentCommand{ + FeatureId: "fid", + GoalIds: []string{"gid"}, + StartAt: 1, + StopAt: 10, + }, + EnvironmentNamespace: "ns0", + }, + expected: nil, + }, + { + in: &experimentproto.CreateExperimentRequest{ + Command: &experimentproto.CreateExperimentCommand{ + FeatureId: "", + GoalIds: []string{"gid"}, + }, + EnvironmentNamespace: "ns0", + }, + expected: errFeatureIDRequiredJaJP, + }, + { + in: &experimentproto.CreateExperimentRequest{ + Command: &experimentproto.CreateExperimentCommand{ + FeatureId: "fid", + GoalIds: nil, + }, + EnvironmentNamespace: "ns0", + }, + expected: errGoalIDRequiredJaJP, + }, + { + in: &experimentproto.CreateExperimentRequest{ + Command: &experimentproto.CreateExperimentCommand{ + FeatureId: "fid", + GoalIds: []string{""}, + }, + EnvironmentNamespace: "ns0", + }, + expected: errGoalIDRequiredJaJP, + }, + { + in: &experimentproto.CreateExperimentRequest{ + Command: &experimentproto.CreateExperimentCommand{ + FeatureId: "fid", + GoalIds: []string{"gid", ""}, + }, + EnvironmentNamespace: "ns0", + }, + expected: errGoalIDRequiredJaJP, + }, + { + in: &experimentproto.CreateExperimentRequest{ + Command: &experimentproto.CreateExperimentCommand{ + FeatureId: "fid", + GoalIds: []string{"gid0", "gid1"}, + StartAt: 1, + StopAt: 30*24*60*60 + 2, + }, + EnvironmentNamespace: "ns0", + }, + expected: errPeriodTooLongJaJP, + }, + { + in: &experimentproto.CreateExperimentRequest{ + Command: &experimentproto.CreateExperimentCommand{ + FeatureId: "fid", + GoalIds: []string{"gid0", "gid1"}, + StartAt: 1, + StopAt: 10, + }, + EnvironmentNamespace: "ns0", + }, + expected: nil, + }, + } + for _, p := range patterns { + err := validateCreateExperimentRequest(p.in) + assert.Equal(t, p.expected, err) + } +} + +func TestUpdateExperimentMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*experimentService) + req *experimentproto.UpdateExperimentRequest + expectedErr error + }{ + { + setup: nil, + req: &experimentproto.UpdateExperimentRequest{ + EnvironmentNamespace: "ns0", + }, + expectedErr: errExperimentIDRequiredJaJP, + }, + { + setup: nil, + req: &experimentproto.UpdateExperimentRequest{ + Id: "id-1", + ChangeExperimentPeriodCommand: &experimentproto.ChangeExperimentPeriodCommand{ + StartAt: time.Now().Unix(), + StopAt: time.Now().AddDate(0, 0, 31).Unix(), + }, + EnvironmentNamespace: "ns0", + }, + expectedErr: errPeriodTooLongJaJP, + }, + { + setup: func(s *experimentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2es.ErrExperimentNotFound) + }, + req: &experimentproto.UpdateExperimentRequest{ + Id: "id-0", + EnvironmentNamespace: "ns0", + }, + expectedErr: errNotFoundJaJP, + }, + { + setup: func(s *experimentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &experimentproto.UpdateExperimentRequest{ + Id: "id-1", + ChangeNameCommand: &experimentproto.ChangeExperimentNameCommand{Name: "test-name"}, + EnvironmentNamespace: "ns0", + }, + expectedErr: nil, + }, + } + for _, p := range patterns { + ctx := createContextWithToken() + service := createExperimentService(mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.UpdateExperiment(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestStartExperimentMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*experimentService) + req *experimentproto.StartExperimentRequest + expectedErr error + }{ + "error id required": { + setup: nil, + req: &experimentproto.StartExperimentRequest{ + EnvironmentNamespace: "ns0", + }, + expectedErr: errExperimentIDRequiredJaJP, + }, + "error no command": { + setup: nil, + req: &experimentproto.StartExperimentRequest{ + Id: "eid", + EnvironmentNamespace: "ns0", + }, + expectedErr: errNoCommandJaJP, + }, + "error not found": { + setup: func(s *experimentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2es.ErrExperimentNotFound) + }, + req: &experimentproto.StartExperimentRequest{ + Id: "noop", + Command: &experimentproto.StartExperimentCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: errNotFoundJaJP, + }, + "success": { + setup: func(s *experimentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &experimentproto.StartExperimentRequest{ + Id: "eid", + Command: &experimentproto.StartExperimentCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken() + service := createExperimentService(mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.StartExperiment(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestFinishExperimentMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*experimentService) + req *experimentproto.FinishExperimentRequest + expectedErr error + }{ + "error id required": { + setup: nil, + req: &experimentproto.FinishExperimentRequest{ + EnvironmentNamespace: "ns0", + }, + expectedErr: errExperimentIDRequiredJaJP, + }, + "error no command": { + setup: nil, + req: &experimentproto.FinishExperimentRequest{ + Id: "eid", + EnvironmentNamespace: "ns0", + }, + expectedErr: errNoCommandJaJP, + }, + "error not found": { + setup: func(s *experimentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2es.ErrExperimentNotFound) + }, + req: &experimentproto.FinishExperimentRequest{ + Id: "noop", + Command: &experimentproto.FinishExperimentCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: errNotFoundJaJP, + }, + "success": { + setup: func(s *experimentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &experimentproto.FinishExperimentRequest{ + Id: "eid", + Command: &experimentproto.FinishExperimentCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken() + service := createExperimentService(mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.FinishExperiment(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestStopExperimentMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*experimentService) + req *experimentproto.StopExperimentRequest + expectedErr error + }{ + { + setup: nil, + req: &experimentproto.StopExperimentRequest{ + EnvironmentNamespace: "ns0", + }, + expectedErr: errExperimentIDRequiredJaJP, + }, + { + setup: nil, + req: &experimentproto.StopExperimentRequest{ + Id: "id-0", + EnvironmentNamespace: "ns0", + }, + expectedErr: errNoCommandJaJP, + }, + { + setup: func(s *experimentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2es.ErrExperimentNotFound) + }, + req: &experimentproto.StopExperimentRequest{ + Id: "id-0", + Command: &experimentproto.StopExperimentCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: errNotFoundJaJP, + }, + { + setup: func(s *experimentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &experimentproto.StopExperimentRequest{ + Id: "id-1", + Command: &experimentproto.StopExperimentCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: nil, + }, + } + for _, p := range patterns { + ctx := createContextWithToken() + service := createExperimentService(mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.StopExperiment(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestArchiveExperimentMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*experimentService) + req *experimentproto.ArchiveExperimentRequest + expectedErr error + }{ + { + setup: nil, + req: &experimentproto.ArchiveExperimentRequest{ + EnvironmentNamespace: "ns0", + }, + expectedErr: errExperimentIDRequiredJaJP, + }, + { + setup: nil, + req: &experimentproto.ArchiveExperimentRequest{ + Id: "id-0", + EnvironmentNamespace: "ns0", + }, + expectedErr: errNoCommandJaJP, + }, + { + setup: func(s *experimentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2es.ErrExperimentNotFound) + }, + req: &experimentproto.ArchiveExperimentRequest{ + Id: "id-0", + Command: &experimentproto.ArchiveExperimentCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: errNotFoundJaJP, + }, + { + setup: func(s *experimentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &experimentproto.ArchiveExperimentRequest{ + Id: "id-1", + Command: &experimentproto.ArchiveExperimentCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: nil, + }, + } + for _, p := range patterns { + ctx := createContextWithToken() + service := createExperimentService(mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.ArchiveExperiment(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestDeleteExperimentMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*experimentService) + req *experimentproto.DeleteExperimentRequest + expectedErr error + }{ + { + setup: nil, + req: &experimentproto.DeleteExperimentRequest{ + EnvironmentNamespace: "ns0", + }, + expectedErr: errExperimentIDRequiredJaJP, + }, + { + setup: nil, + req: &experimentproto.DeleteExperimentRequest{ + Id: "id-0", + EnvironmentNamespace: "ns0", + }, + expectedErr: errNoCommandJaJP, + }, + { + setup: func(s *experimentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2es.ErrExperimentNotFound) + }, + req: &experimentproto.DeleteExperimentRequest{ + Id: "id-0", + Command: &experimentproto.DeleteExperimentCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: errNotFoundJaJP, + }, + { + setup: func(s *experimentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &experimentproto.DeleteExperimentRequest{ + Id: "id-1", + Command: &experimentproto.DeleteExperimentCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: nil, + }, + } + for _, p := range patterns { + ctx := createContextWithToken() + service := createExperimentService(mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.DeleteExperiment(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestExperimentPermissionDenied(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + ctx := createContextWithTokenRoleUnassigned() + s := storagetesting.NewInMemoryStorage() + service := createExperimentService(mockController, s) + patterns := map[string]struct { + action func(context.Context, *experimentService) error + expected error + }{ + "CreateExperiment": { + action: func(ctx context.Context, es *experimentService) error { + _, err := es.CreateExperiment(ctx, &experimentproto.CreateExperimentRequest{}) + return err + }, + expected: errPermissionDeniedJaJP, + }, + "UpdateExperiment": { + action: func(ctx context.Context, es *experimentService) error { + _, err := es.UpdateExperiment(ctx, &experimentproto.UpdateExperimentRequest{}) + return err + }, + expected: errPermissionDeniedJaJP, + }, + "StopExperiment": { + action: func(ctx context.Context, es *experimentService) error { + _, err := es.StopExperiment(ctx, &experimentproto.StopExperimentRequest{}) + return err + }, + expected: errPermissionDeniedJaJP, + }, + "DeleteExperiment": { + action: func(ctx context.Context, es *experimentService) error { + _, err := es.DeleteExperiment(ctx, &experimentproto.DeleteExperimentRequest{}) + return err + }, + expected: errPermissionDeniedJaJP, + }, + } + for msg, p := range patterns { + actual := p.action(ctx, service) + assert.Equal(t, p.expected, actual, "%s", msg) + } +} diff --git a/pkg/experiment/api/goal.go b/pkg/experiment/api/goal.go new file mode 100644 index 000000000..ebe97c906 --- /dev/null +++ b/pkg/experiment/api/goal.go @@ -0,0 +1,381 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "regexp" + "strconv" + + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/experiment/command" + "github.com/bucketeer-io/bucketeer/pkg/experiment/domain" + v2es "github.com/bucketeer-io/bucketeer/pkg/experiment/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + proto "github.com/bucketeer-io/bucketeer/proto/experiment" +) + +var goalIDRegex = regexp.MustCompile("^[a-zA-Z0-9-]+$") + +func (s *experimentService) GetGoal(ctx context.Context, req *proto.GetGoalRequest) (*proto.GetGoalResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if req.Id == "" { + return nil, localizedError(statusGoalIDRequired, locale.JaJP) + } + goal, err := s.getGoalMySQL(ctx, req.Id, req.EnvironmentNamespace) + if err != nil { + if err == v2es.ErrGoalNotFound { + return nil, localizedError(statusNotFound, locale.JaJP) + } + return nil, localizedError(statusInternal, locale.JaJP) + } + return &proto.GetGoalResponse{Goal: goal.Goal}, nil +} + +func (s *experimentService) getGoalMySQL( + ctx context.Context, + goalID, environmentNamespace string, +) (*domain.Goal, error) { + goalStorage := v2es.NewGoalStorage(s.mysqlClient) + goal, err := goalStorage.GetGoal(ctx, goalID, environmentNamespace) + if err != nil { + s.logger.Error( + "Failed to get goal", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("goalId", goalID), + )..., + ) + } + return goal, err +} + +func (s *experimentService) ListGoals( + ctx context.Context, + req *proto.ListGoalsRequest, +) (*proto.ListGoalsResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + whereParts := []mysql.WherePart{ + mysql.NewFilter("deleted", "=", false), + mysql.NewFilter("environment_namespace", "=", req.EnvironmentNamespace), + } + if req.Archived != nil { + whereParts = append(whereParts, mysql.NewFilter("archived", "=", req.Archived.Value)) + } + if req.SearchKeyword != "" { + whereParts = append(whereParts, mysql.NewSearchQuery([]string{"id", "name", "description"}, req.SearchKeyword)) + } + orders, err := s.newGoalListOrders(req.OrderBy, req.OrderDirection) + if err != nil { + s.logger.Error( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, err + } + limit := int(req.PageSize) + cursor := req.Cursor + if cursor == "" { + cursor = "0" + } + offset, err := strconv.Atoi(cursor) + if err != nil { + return nil, localizedError(statusInvalidCursor, locale.JaJP) + } + var isInUseStatus *bool + if req.IsInUseStatus != nil { + isInUseStatus = &req.IsInUseStatus.Value + } + goalStorage := v2es.NewGoalStorage(s.mysqlClient) + goals, nextCursor, totalCount, err := goalStorage.ListGoals( + ctx, + whereParts, + orders, + limit, + offset, + isInUseStatus, + req.EnvironmentNamespace, + ) + if err != nil { + s.logger.Error( + "Failed to list goals", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &proto.ListGoalsResponse{ + Goals: goals, + Cursor: strconv.Itoa(nextCursor), + TotalCount: totalCount, + }, nil +} + +func (s *experimentService) newGoalListOrders( + orderBy proto.ListGoalsRequest_OrderBy, + orderDirection proto.ListGoalsRequest_OrderDirection, +) ([]*mysql.Order, error) { + var column string + switch orderBy { + case proto.ListGoalsRequest_DEFAULT, + proto.ListGoalsRequest_NAME: + column = "name" + case proto.ListGoalsRequest_CREATED_AT: + column = "created_at" + case proto.ListGoalsRequest_UPDATED_AT: + column = "updated_at" + default: + return nil, localizedError(statusInvalidOrderBy, locale.JaJP) + } + direction := mysql.OrderDirectionAsc + if orderDirection == proto.ListGoalsRequest_DESC { + direction = mysql.OrderDirectionDesc + } + return []*mysql.Order{mysql.NewOrder(column, direction)}, nil +} + +func (s *experimentService) CreateGoal( + ctx context.Context, + req *proto.CreateGoalRequest, +) (*proto.CreateGoalResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateCreateGoalRequest(req); err != nil { + return nil, err + } + goal, err := domain.NewGoal(req.Command.Id, req.Command.Name, req.Command.Description) + if err != nil { + s.logger.Error( + "Failed to create a new goal", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + goalStorage := v2es.NewGoalStorage(tx) + handler := command.NewGoalCommandHandler(editor, goal, s.publisher, req.EnvironmentNamespace) + if err := handler.Handle(ctx, req.Command); err != nil { + return err + } + return goalStorage.CreateGoal(ctx, goal, req.EnvironmentNamespace) + }) + if err != nil { + if err == v2es.ErrGoalAlreadyExists { + return nil, localizedError(statusAlreadyExists, locale.JaJP) + } + s.logger.Error( + "Failed to create goal", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &proto.CreateGoalResponse{}, nil +} + +func validateCreateGoalRequest(req *proto.CreateGoalRequest) error { + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + if req.Command.Id == "" { + return localizedError(statusGoalIDRequired, locale.JaJP) + } + if !goalIDRegex.MatchString(req.Command.Id) { + return localizedError(statusInvalidGoalID, locale.JaJP) + } + if req.Command.Name == "" { + return localizedError(statusGoalNameRequired, locale.JaJP) + } + return nil +} + +func (s *experimentService) UpdateGoal( + ctx context.Context, + req *proto.UpdateGoalRequest, +) (*proto.UpdateGoalResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if req.Id == "" { + return nil, localizedError(statusGoalIDRequired, locale.JaJP) + } + commands := make([]command.Command, 0) + if req.RenameCommand != nil { + commands = append(commands, req.RenameCommand) + } + if req.ChangeDescriptionCommand != nil { + commands = append(commands, req.ChangeDescriptionCommand) + } + if len(commands) == 0 { + return nil, localizedError(statusNoCommand, locale.JaJP) + } + err = s.updateGoal( + ctx, + editor, + req.EnvironmentNamespace, + req.Id, + commands, + ) + if err != nil { + s.logger.Error( + "Failed to update goal", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + return &proto.UpdateGoalResponse{}, nil +} + +func (s *experimentService) ArchiveGoal( + ctx context.Context, + req *proto.ArchiveGoalRequest, +) (*proto.ArchiveGoalResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if req.Id == "" { + return nil, localizedError(statusGoalIDRequired, locale.JaJP) + } + if req.Command == nil { + return nil, localizedError(statusNoCommand, locale.JaJP) + } + err = s.updateGoal( + ctx, + editor, + req.EnvironmentNamespace, + req.Id, + []command.Command{req.Command}, + ) + if err != nil { + s.logger.Error( + "Failed to archive goal", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + return &proto.ArchiveGoalResponse{}, nil +} + +func (s *experimentService) DeleteGoal( + ctx context.Context, + req *proto.DeleteGoalRequest, +) (*proto.DeleteGoalResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if req.Id == "" { + return nil, localizedError(statusGoalIDRequired, locale.JaJP) + } + if req.Command == nil { + return nil, localizedError(statusNoCommand, locale.JaJP) + } + err = s.updateGoal( + ctx, + editor, + req.EnvironmentNamespace, + req.Id, + []command.Command{req.Command}, + ) + if err != nil { + s.logger.Error( + "Failed to delete goal", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + return &proto.DeleteGoalResponse{}, nil +} + +func (s *experimentService) updateGoal( + ctx context.Context, + editor *eventproto.Editor, + environmentNamespace, goalID string, + commands []command.Command, +) error { + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + goalStorage := v2es.NewGoalStorage(tx) + goal, err := goalStorage.GetGoal(ctx, goalID, environmentNamespace) + if err != nil { + return err + } + handler := command.NewGoalCommandHandler(editor, goal, s.publisher, environmentNamespace) + for _, command := range commands { + if err := handler.Handle(ctx, command); err != nil { + return err + } + } + return goalStorage.UpdateGoal(ctx, goal, environmentNamespace) + }) + if err != nil { + if err == v2es.ErrGoalNotFound || err == v2es.ErrGoalUnexpectedAffectedRows { + return localizedError(statusNotFound, locale.JaJP) + } + return localizedError(statusInternal, locale.JaJP) + } + return nil +} diff --git a/pkg/experiment/api/goal_test.go b/pkg/experiment/api/goal_test.go new file mode 100644 index 000000000..3b25f79d9 --- /dev/null +++ b/pkg/experiment/api/goal_test.go @@ -0,0 +1,435 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + v2es "github.com/bucketeer-io/bucketeer/pkg/experiment/storage/v2" + storeclient "github.com/bucketeer-io/bucketeer/pkg/storage/testing" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + experimentproto "github.com/bucketeer-io/bucketeer/proto/experiment" +) + +func TestGetGoalMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*experimentService) + id string + environmentNamespace string + expectedErr error + }{ + { + setup: nil, + id: "", + environmentNamespace: "ns0", + expectedErr: errGoalIDRequiredJaJP, + }, + { + setup: func(s *experimentService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id-0", + environmentNamespace: "ns0", + expectedErr: errNotFoundJaJP, + }, + { + setup: func(s *experimentService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id-1", + environmentNamespace: "ns0", + expectedErr: nil, + }, + } + for _, p := range patterns { + service := createExperimentService(mockController, nil) + if p.setup != nil { + p.setup(service) + } + req := &experimentproto.GetGoalRequest{Id: p.id, EnvironmentNamespace: p.environmentNamespace} + _, err := service.GetGoal(createContextWithTokenRoleUnassigned(), req) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestListGoalMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*experimentService) + req *experimentproto.ListGoalsRequest + expectedErr error + }{ + { + setup: func(s *experimentService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + req: &experimentproto.ListGoalsRequest{EnvironmentNamespace: "ns0"}, + expectedErr: nil, + }, + } + for _, p := range patterns { + service := createExperimentService(mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.ListGoals(createContextWithTokenRoleUnassigned(), p.req) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestCreateGoalMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(s *experimentService) + req *experimentproto.CreateGoalRequest + expectedErr error + }{ + { + setup: nil, + req: &experimentproto.CreateGoalRequest{ + Command: nil, + EnvironmentNamespace: "ns0", + }, + expectedErr: errNoCommandJaJP, + }, + { + setup: nil, + req: &experimentproto.CreateGoalRequest{ + Command: &experimentproto.CreateGoalCommand{Id: ""}, + EnvironmentNamespace: "ns0", + }, + expectedErr: errGoalIDRequiredJaJP, + }, + { + setup: nil, + req: &experimentproto.CreateGoalRequest{ + Command: &experimentproto.CreateGoalCommand{Id: "bucketeer_goal_id?"}, + EnvironmentNamespace: "ns0", + }, + expectedErr: errInvalidGoalIDJaJP, + }, + { + setup: nil, + req: &experimentproto.CreateGoalRequest{ + Command: &experimentproto.CreateGoalCommand{Id: "Bucketeer-id-2019", Name: ""}, + EnvironmentNamespace: "ns0", + }, + expectedErr: errGoalNameRequiredJaJP, + }, + { + setup: func(s *experimentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2es.ErrGoalAlreadyExists) + }, + req: &experimentproto.CreateGoalRequest{ + Command: &experimentproto.CreateGoalCommand{Id: "Bucketeer-id-2019", Name: "name-0"}, + EnvironmentNamespace: "ns0", + }, + expectedErr: errAlreadyExistsJaJP, + }, + { + setup: func(s *experimentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &experimentproto.CreateGoalRequest{ + Command: &experimentproto.CreateGoalCommand{Id: "Bucketeer-id-2020", Name: "name-1"}, + EnvironmentNamespace: "ns0", + }, + expectedErr: nil, + }, + } + for _, p := range patterns { + ctx := createContextWithToken() + service := createExperimentService(mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.CreateGoal(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestUpdateGoalMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*experimentService) + req *experimentproto.UpdateGoalRequest + expectedErr error + }{ + { + setup: nil, + req: &experimentproto.UpdateGoalRequest{ + EnvironmentNamespace: "ns0", + }, + expectedErr: errGoalIDRequiredJaJP, + }, + { + setup: nil, + req: &experimentproto.UpdateGoalRequest{ + Id: "id-0", + EnvironmentNamespace: "ns0", + }, + expectedErr: errNoCommandJaJP, + }, + { + setup: func(s *experimentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2es.ErrGoalNotFound) + }, + req: &experimentproto.UpdateGoalRequest{ + Id: "id-0", + RenameCommand: &experimentproto.RenameGoalCommand{Name: "name-0"}, + EnvironmentNamespace: "ns0", + }, + expectedErr: errNotFoundJaJP, + }, + { + setup: func(s *experimentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &experimentproto.UpdateGoalRequest{ + Id: "id-1", + RenameCommand: &experimentproto.RenameGoalCommand{Name: "name-1"}, + EnvironmentNamespace: "ns0", + }, + expectedErr: nil, + }, + } + for _, p := range patterns { + ctx := createContextWithToken() + service := createExperimentService(mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.UpdateGoal(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestArchiveGoalMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*experimentService) + req *experimentproto.ArchiveGoalRequest + expectedErr error + }{ + { + setup: nil, + req: &experimentproto.ArchiveGoalRequest{ + EnvironmentNamespace: "ns0", + }, + expectedErr: errGoalIDRequiredJaJP, + }, + { + setup: nil, + req: &experimentproto.ArchiveGoalRequest{ + Id: "id-0", + EnvironmentNamespace: "ns0", + }, + expectedErr: errNoCommandJaJP, + }, + { + setup: func(s *experimentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2es.ErrGoalNotFound) + }, + req: &experimentproto.ArchiveGoalRequest{ + Id: "id-0", + Command: &experimentproto.ArchiveGoalCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: errNotFoundJaJP, + }, + { + setup: func(s *experimentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &experimentproto.ArchiveGoalRequest{ + Id: "id-1", + Command: &experimentproto.ArchiveGoalCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: nil, + }, + } + for _, p := range patterns { + ctx := createContextWithToken() + service := createExperimentService(mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.ArchiveGoal(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestDeleteGoalMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*experimentService) + req *experimentproto.DeleteGoalRequest + expectedErr error + }{ + { + setup: nil, + req: &experimentproto.DeleteGoalRequest{ + EnvironmentNamespace: "ns0", + }, + expectedErr: errGoalIDRequiredJaJP, + }, + { + setup: nil, + req: &experimentproto.DeleteGoalRequest{ + Id: "id-0", + EnvironmentNamespace: "ns0", + }, + expectedErr: errNoCommandJaJP, + }, + { + setup: func(s *experimentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2es.ErrGoalNotFound) + }, + req: &experimentproto.DeleteGoalRequest{ + Id: "id-0", + Command: &experimentproto.DeleteGoalCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: errNotFoundJaJP, + }, + { + setup: func(s *experimentService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &experimentproto.DeleteGoalRequest{ + Id: "id-1", + Command: &experimentproto.DeleteGoalCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: nil, + }, + } + for _, p := range patterns { + ctx := createContextWithToken() + service := createExperimentService(mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.DeleteGoal(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestGoalPermissionDenied(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + ctx := createContextWithTokenRoleUnassigned() + s := storeclient.NewInMemoryStorage() + service := createExperimentService(mockController, s) + patterns := map[string]struct { + action func(context.Context, *experimentService) error + expected error + }{ + "CreateGoal": { + action: func(ctx context.Context, es *experimentService) error { + _, err := es.CreateGoal(ctx, &experimentproto.CreateGoalRequest{}) + return err + }, + expected: errPermissionDeniedJaJP, + }, + "UpdateGoal": { + action: func(ctx context.Context, es *experimentService) error { + _, err := es.UpdateGoal(ctx, &experimentproto.UpdateGoalRequest{}) + return err + }, + expected: errPermissionDeniedJaJP, + }, + "DeleteGoal": { + action: func(ctx context.Context, es *experimentService) error { + _, err := es.DeleteGoal(ctx, &experimentproto.DeleteGoalRequest{}) + return err + }, + expected: errPermissionDeniedJaJP, + }, + } + for msg, p := range patterns { + actual := p.action(ctx, service) + assert.Equal(t, p.expected, actual, "%s", msg) + } +} diff --git a/pkg/experiment/batch/job/BUILD.bazel b/pkg/experiment/batch/job/BUILD.bazel new file mode 100644 index 000000000..b9c346928 --- /dev/null +++ b/pkg/experiment/batch/job/BUILD.bazel @@ -0,0 +1,35 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "experiment_status_updater.go", + "job.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/experiment/batch/job", + visibility = ["//visibility:public"], + deps = [ + "//pkg/environment/client:go_default_library", + "//pkg/experiment/client:go_default_library", + "//pkg/experiment/domain:go_default_library", + "//pkg/job:go_default_library", + "//pkg/metrics:go_default_library", + "//proto/environment:go_default_library", + "//proto/experiment:go_default_library", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["experiment_status_updater_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/experiment/client/mock:go_default_library", + "//proto/experiment:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/experiment/batch/job/experiment_status_updater.go b/pkg/experiment/batch/job/experiment_status_updater.go new file mode 100644 index 000000000..beddbefd4 --- /dev/null +++ b/pkg/experiment/batch/job/experiment_status_updater.go @@ -0,0 +1,221 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package job + +import ( + "context" + "time" + + "github.com/bucketeer-io/bucketeer/pkg/experiment/domain" + + wrappersproto "github.com/golang/protobuf/ptypes/wrappers" + "go.uber.org/zap" + + environmentclient "github.com/bucketeer-io/bucketeer/pkg/environment/client" + experimentclient "github.com/bucketeer-io/bucketeer/pkg/experiment/client" + "github.com/bucketeer-io/bucketeer/pkg/job" + environmentproto "github.com/bucketeer-io/bucketeer/proto/environment" + experimentproto "github.com/bucketeer-io/bucketeer/proto/experiment" +) + +const ( + listRequestSize = 500 +) + +type experimentStatusUpdater struct { + environmentClient environmentclient.Client + experimentClient experimentclient.Client + opts *options + logger *zap.Logger +} + +func NewExperimentStatusUpdater( + environmentClient environmentclient.Client, + experimentClient experimentclient.Client, + opts ...Option) job.Job { + + dopts := &options{ + timeout: 1 * time.Minute, + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + return &experimentStatusUpdater{ + environmentClient: environmentClient, + experimentClient: experimentClient, + opts: dopts, + logger: dopts.logger.Named("status-updater"), + } +} + +func (u *experimentStatusUpdater) Run(ctx context.Context) (lastErr error) { + ctx, cancel := context.WithTimeout(ctx, u.opts.timeout) + defer cancel() + environments, err := u.listEnvironments(ctx) + if err != nil { + u.logger.Error("Failed to list environments", zap.Error(err)) + lastErr = err + return + } + for _, env := range environments { + experiments := []*experimentproto.Experiment{} + statuses := []experimentproto.Experiment_Status{ + experimentproto.Experiment_WAITING, + experimentproto.Experiment_RUNNING, + } + for _, status := range statuses { + exps, err := u.listExperiments(ctx, env.Namespace, status) + if err != nil { + u.logger.Error("Failed to list experiments", zap.Error(err), + zap.String("environmentNamespace", env.Namespace), + zap.Int32("status", int32(status)), + ) + lastErr = err + continue + } + experiments = append(experiments, exps...) + } + for _, e := range experiments { + if err = u.updateStatus(ctx, env.Namespace, e); err != nil { + lastErr = err + } + } + } + return +} + +func (u *experimentStatusUpdater) updateStatus( + ctx context.Context, + environmentNamespace string, + experiment *experimentproto.Experiment, +) error { + if experiment.Status == experimentproto.Experiment_WAITING { + if err := u.updateToRunning(ctx, environmentNamespace, experiment); err != nil { + return err + } + return nil + } + if experiment.Status == experimentproto.Experiment_RUNNING { + if err := u.updateToStopped(ctx, environmentNamespace, experiment); err != nil { + return err + } + } + return nil +} + +func (u *experimentStatusUpdater) updateToRunning( + ctx context.Context, + environmentNamespace string, + experiment *experimentproto.Experiment, +) error { + de := domain.Experiment{Experiment: experiment} + if err := de.Start(); err != nil { + if err != domain.ErrExperimentBeforeStart { + u.logger.Error("Failed to start check if experiment running", zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("id", experiment.Id)) + return err + } + return nil + } + _, err := u.experimentClient.StartExperiment(ctx, &experimentproto.StartExperimentRequest{ + EnvironmentNamespace: environmentNamespace, + Id: experiment.Id, + Command: &experimentproto.StartExperimentCommand{}, + }) + if err != nil { + u.logger.Error("Failed to update status to running", zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("id", experiment.Id)) + return err + } + return nil +} + +func (u *experimentStatusUpdater) updateToStopped( + ctx context.Context, + environmentNamespace string, + experiment *experimentproto.Experiment, +) error { + de := domain.Experiment{Experiment: experiment} + if err := de.Finish(); err != nil { + if err != domain.ErrExperimentBeforeStop { + u.logger.Error("Failed to end check if experiment running", zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("id", experiment.Id)) + return err + } + return nil + } + _, err := u.experimentClient.FinishExperiment(ctx, &experimentproto.FinishExperimentRequest{ + EnvironmentNamespace: environmentNamespace, + Id: experiment.Id, + Command: &experimentproto.FinishExperimentCommand{}, + }) + if err != nil { + u.logger.Error("Failed to update status to stopped", zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("id", experiment.Id)) + return err + } + return nil +} + +func (u *experimentStatusUpdater) listExperiments( + ctx context.Context, + environmentNamespace string, + status experimentproto.Experiment_Status, +) ([]*experimentproto.Experiment, error) { + experiments := []*experimentproto.Experiment{} + cursor := "" + for { + resp, err := u.experimentClient.ListExperiments(ctx, &experimentproto.ListExperimentsRequest{ + PageSize: listRequestSize, + Cursor: cursor, + EnvironmentNamespace: environmentNamespace, + Status: &wrappersproto.Int32Value{Value: int32(status)}, + }) + if err != nil { + return nil, err + } + experiments = append(experiments, resp.Experiments...) + size := len(resp.Experiments) + if size == 0 || size < listRequestSize { + return experiments, nil + } + cursor = resp.Cursor + } +} + +func (u *experimentStatusUpdater) listEnvironments(ctx context.Context) ([]*environmentproto.Environment, error) { + environments := []*environmentproto.Environment{} + cursor := "" + for { + resp, err := u.environmentClient.ListEnvironments(ctx, &environmentproto.ListEnvironmentsRequest{ + PageSize: listRequestSize, + Cursor: cursor, + }) + if err != nil { + return nil, err + } + environments = append(environments, resp.Environments...) + environmentSize := len(resp.Environments) + if environmentSize == 0 || environmentSize < listRequestSize { + return environments, nil + } + cursor = resp.Cursor + } +} diff --git a/pkg/experiment/batch/job/experiment_status_updater_test.go b/pkg/experiment/batch/job/experiment_status_updater_test.go new file mode 100644 index 000000000..4f4f44874 --- /dev/null +++ b/pkg/experiment/batch/job/experiment_status_updater_test.go @@ -0,0 +1,129 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package job + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + "github.com/golang/mock/gomock" + + ecmock "github.com/bucketeer-io/bucketeer/pkg/experiment/client/mock" + experimentproto "github.com/bucketeer-io/bucketeer/proto/experiment" +) + +func TestUpdateStatus(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(t *testing.T, u *experimentStatusUpdater) + input *experimentproto.Experiment + expected error + }{ + "error: StartExperiment fails": { + setup: func(t *testing.T, u *experimentStatusUpdater) { + u.experimentClient.(*ecmock.MockClient).EXPECT().StartExperiment(gomock.Any(), gomock.Any()).Return( + nil, errors.New("test")) + }, + input: &experimentproto.Experiment{ + Id: "eid", + Status: experimentproto.Experiment_WAITING, + StartAt: time.Date(2019, 12, 25, 00, 00, 00, 0, time.UTC).Unix(), + }, + expected: errors.New("test"), + }, + "error: FinishExperiment fails": { + setup: func(t *testing.T, u *experimentStatusUpdater) { + u.experimentClient.(*ecmock.MockClient).EXPECT().FinishExperiment(gomock.Any(), gomock.Any()).Return( + nil, errors.New("test")) + }, + input: &experimentproto.Experiment{ + Id: "eid", + Status: experimentproto.Experiment_RUNNING, + StartAt: time.Date(2019, 12, 25, 00, 00, 00, 0, time.UTC).Unix(), + }, + expected: errors.New("test"), + }, + "success: no update waiting": { + input: &experimentproto.Experiment{ + Id: "eid", + Status: experimentproto.Experiment_WAITING, + StartAt: time.Date(2100, 12, 25, 00, 00, 00, 0, time.UTC).Unix(), + }, + expected: nil, + }, + "success: update waiting to running": { + setup: func(t *testing.T, u *experimentStatusUpdater) { + u.experimentClient.(*ecmock.MockClient).EXPECT().StartExperiment(gomock.Any(), gomock.Any()).Return( + &experimentproto.StartExperimentResponse{}, nil) + }, + input: &experimentproto.Experiment{ + Id: "eid", + Status: experimentproto.Experiment_WAITING, + StartAt: time.Date(2019, 12, 25, 00, 00, 00, 0, time.UTC).Unix(), + }, + expected: nil, + }, + "success: no update running": { + input: &experimentproto.Experiment{ + Id: "eid", + Status: experimentproto.Experiment_RUNNING, + StopAt: time.Date(2100, 12, 25, 00, 00, 00, 0, time.UTC).Unix(), + }, + expected: nil, + }, + "success: update running to stopped": { + setup: func(t *testing.T, u *experimentStatusUpdater) { + u.experimentClient.(*ecmock.MockClient).EXPECT().FinishExperiment(gomock.Any(), gomock.Any()).Return( + &experimentproto.FinishExperimentResponse{}, nil) + }, + input: &experimentproto.Experiment{ + Id: "eid", + Status: experimentproto.Experiment_RUNNING, + StartAt: time.Date(2019, 12, 25, 00, 00, 00, 0, time.UTC).Unix(), + StopAt: time.Date(2019, 12, 26, 00, 00, 00, 0, time.UTC).Unix(), + }, + expected: nil, + }, + } + + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + updater := newMockExperimentStatusUpdater(t, mockController) + if p.setup != nil { + p.setup(t, updater) + } + err := updater.updateStatus(context.Background(), "ns", p.input) + assert.Equal(t, p.expected, err) + }) + } +} + +func newMockExperimentStatusUpdater(t *testing.T, c *gomock.Controller) *experimentStatusUpdater { + return &experimentStatusUpdater{ + experimentClient: ecmock.NewMockClient(c), + opts: &options{ + timeout: 5 * time.Second, + }, + logger: zap.NewNop().Named("test-experiment-status-updater"), + } +} diff --git a/pkg/experiment/batch/job/job.go b/pkg/experiment/batch/job/job.go new file mode 100644 index 000000000..43e10650b --- /dev/null +++ b/pkg/experiment/batch/job/job.go @@ -0,0 +1,49 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package job + +import ( + "time" + + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +type options struct { + timeout time.Duration + metrics metrics.Registerer + logger *zap.Logger +} + +type Option func(*options) + +func WithTimeout(timeout time.Duration) Option { + return func(opts *options) { + opts.timeout = timeout + } +} + +func WithMetrics(r metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = r + } +} + +func WithLogger(l *zap.Logger) Option { + return func(opts *options) { + opts.logger = l + } +} diff --git a/pkg/experiment/client/BUILD.bazel b/pkg/experiment/client/BUILD.bazel new file mode 100644 index 000000000..ca71942cf --- /dev/null +++ b/pkg/experiment/client/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["client.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/experiment/client", + visibility = ["//visibility:public"], + deps = [ + "//pkg/rpc/client:go_default_library", + "//proto/experiment:go_default_library", + "@org_golang_google_grpc//:go_default_library", + ], +) diff --git a/pkg/experiment/client/client.go b/pkg/experiment/client/client.go new file mode 100644 index 000000000..3fe7a7327 --- /dev/null +++ b/pkg/experiment/client/client.go @@ -0,0 +1,50 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package client + +import ( + "google.golang.org/grpc" + + rpcclient "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + proto "github.com/bucketeer-io/bucketeer/proto/experiment" +) + +type Client interface { + proto.ExperimentServiceClient + Close() +} + +type client struct { + proto.ExperimentServiceClient + address string + connection *grpc.ClientConn +} + +func NewClient(addr, certPath string, opts ...rpcclient.Option) (Client, error) { + conn, err := rpcclient.NewClientConn(addr, certPath, opts...) + if err != nil { + return nil, err + } + return &client{ + ExperimentServiceClient: proto.NewExperimentServiceClient(conn), + address: addr, + connection: conn, + }, nil +} + +func (c *client) Close() { + c.connection.Close() +} diff --git a/pkg/experiment/client/mock/BUILD.bazel b/pkg/experiment/client/mock/BUILD.bazel new file mode 100644 index 000000000..5892582fa --- /dev/null +++ b/pkg/experiment/client/mock/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["client.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/experiment/client/mock", + visibility = ["//visibility:public"], + deps = [ + "//proto/experiment:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@org_golang_google_grpc//:go_default_library", + ], +) diff --git a/pkg/experiment/client/mock/client.go b/pkg/experiment/client/mock/client.go new file mode 100644 index 000000000..c43beb38d --- /dev/null +++ b/pkg/experiment/client/mock/client.go @@ -0,0 +1,350 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: client.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + grpc "google.golang.org/grpc" + + experiment "github.com/bucketeer-io/bucketeer/proto/experiment" +) + +// MockClient is a mock of Client interface. +type MockClient struct { + ctrl *gomock.Controller + recorder *MockClientMockRecorder +} + +// MockClientMockRecorder is the mock recorder for MockClient. +type MockClientMockRecorder struct { + mock *MockClient +} + +// NewMockClient creates a new mock instance. +func NewMockClient(ctrl *gomock.Controller) *MockClient { + mock := &MockClient{ctrl: ctrl} + mock.recorder = &MockClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClient) EXPECT() *MockClientMockRecorder { + return m.recorder +} + +// ArchiveExperiment mocks base method. +func (m *MockClient) ArchiveExperiment(ctx context.Context, in *experiment.ArchiveExperimentRequest, opts ...grpc.CallOption) (*experiment.ArchiveExperimentResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ArchiveExperiment", varargs...) + ret0, _ := ret[0].(*experiment.ArchiveExperimentResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ArchiveExperiment indicates an expected call of ArchiveExperiment. +func (mr *MockClientMockRecorder) ArchiveExperiment(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ArchiveExperiment", reflect.TypeOf((*MockClient)(nil).ArchiveExperiment), varargs...) +} + +// ArchiveGoal mocks base method. +func (m *MockClient) ArchiveGoal(ctx context.Context, in *experiment.ArchiveGoalRequest, opts ...grpc.CallOption) (*experiment.ArchiveGoalResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ArchiveGoal", varargs...) + ret0, _ := ret[0].(*experiment.ArchiveGoalResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ArchiveGoal indicates an expected call of ArchiveGoal. +func (mr *MockClientMockRecorder) ArchiveGoal(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ArchiveGoal", reflect.TypeOf((*MockClient)(nil).ArchiveGoal), varargs...) +} + +// Close mocks base method. +func (m *MockClient) Close() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Close") +} + +// Close indicates an expected call of Close. +func (mr *MockClientMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockClient)(nil).Close)) +} + +// CreateExperiment mocks base method. +func (m *MockClient) CreateExperiment(ctx context.Context, in *experiment.CreateExperimentRequest, opts ...grpc.CallOption) (*experiment.CreateExperimentResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateExperiment", varargs...) + ret0, _ := ret[0].(*experiment.CreateExperimentResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateExperiment indicates an expected call of CreateExperiment. +func (mr *MockClientMockRecorder) CreateExperiment(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateExperiment", reflect.TypeOf((*MockClient)(nil).CreateExperiment), varargs...) +} + +// CreateGoal mocks base method. +func (m *MockClient) CreateGoal(ctx context.Context, in *experiment.CreateGoalRequest, opts ...grpc.CallOption) (*experiment.CreateGoalResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateGoal", varargs...) + ret0, _ := ret[0].(*experiment.CreateGoalResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateGoal indicates an expected call of CreateGoal. +func (mr *MockClientMockRecorder) CreateGoal(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateGoal", reflect.TypeOf((*MockClient)(nil).CreateGoal), varargs...) +} + +// DeleteExperiment mocks base method. +func (m *MockClient) DeleteExperiment(ctx context.Context, in *experiment.DeleteExperimentRequest, opts ...grpc.CallOption) (*experiment.DeleteExperimentResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteExperiment", varargs...) + ret0, _ := ret[0].(*experiment.DeleteExperimentResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteExperiment indicates an expected call of DeleteExperiment. +func (mr *MockClientMockRecorder) DeleteExperiment(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteExperiment", reflect.TypeOf((*MockClient)(nil).DeleteExperiment), varargs...) +} + +// DeleteGoal mocks base method. +func (m *MockClient) DeleteGoal(ctx context.Context, in *experiment.DeleteGoalRequest, opts ...grpc.CallOption) (*experiment.DeleteGoalResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteGoal", varargs...) + ret0, _ := ret[0].(*experiment.DeleteGoalResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteGoal indicates an expected call of DeleteGoal. +func (mr *MockClientMockRecorder) DeleteGoal(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteGoal", reflect.TypeOf((*MockClient)(nil).DeleteGoal), varargs...) +} + +// FinishExperiment mocks base method. +func (m *MockClient) FinishExperiment(ctx context.Context, in *experiment.FinishExperimentRequest, opts ...grpc.CallOption) (*experiment.FinishExperimentResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "FinishExperiment", varargs...) + ret0, _ := ret[0].(*experiment.FinishExperimentResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FinishExperiment indicates an expected call of FinishExperiment. +func (mr *MockClientMockRecorder) FinishExperiment(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FinishExperiment", reflect.TypeOf((*MockClient)(nil).FinishExperiment), varargs...) +} + +// GetExperiment mocks base method. +func (m *MockClient) GetExperiment(ctx context.Context, in *experiment.GetExperimentRequest, opts ...grpc.CallOption) (*experiment.GetExperimentResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetExperiment", varargs...) + ret0, _ := ret[0].(*experiment.GetExperimentResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetExperiment indicates an expected call of GetExperiment. +func (mr *MockClientMockRecorder) GetExperiment(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetExperiment", reflect.TypeOf((*MockClient)(nil).GetExperiment), varargs...) +} + +// GetGoal mocks base method. +func (m *MockClient) GetGoal(ctx context.Context, in *experiment.GetGoalRequest, opts ...grpc.CallOption) (*experiment.GetGoalResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetGoal", varargs...) + ret0, _ := ret[0].(*experiment.GetGoalResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetGoal indicates an expected call of GetGoal. +func (mr *MockClientMockRecorder) GetGoal(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGoal", reflect.TypeOf((*MockClient)(nil).GetGoal), varargs...) +} + +// ListExperiments mocks base method. +func (m *MockClient) ListExperiments(ctx context.Context, in *experiment.ListExperimentsRequest, opts ...grpc.CallOption) (*experiment.ListExperimentsResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListExperiments", varargs...) + ret0, _ := ret[0].(*experiment.ListExperimentsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListExperiments indicates an expected call of ListExperiments. +func (mr *MockClientMockRecorder) ListExperiments(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListExperiments", reflect.TypeOf((*MockClient)(nil).ListExperiments), varargs...) +} + +// ListGoals mocks base method. +func (m *MockClient) ListGoals(ctx context.Context, in *experiment.ListGoalsRequest, opts ...grpc.CallOption) (*experiment.ListGoalsResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListGoals", varargs...) + ret0, _ := ret[0].(*experiment.ListGoalsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListGoals indicates an expected call of ListGoals. +func (mr *MockClientMockRecorder) ListGoals(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListGoals", reflect.TypeOf((*MockClient)(nil).ListGoals), varargs...) +} + +// StartExperiment mocks base method. +func (m *MockClient) StartExperiment(ctx context.Context, in *experiment.StartExperimentRequest, opts ...grpc.CallOption) (*experiment.StartExperimentResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "StartExperiment", varargs...) + ret0, _ := ret[0].(*experiment.StartExperimentResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StartExperiment indicates an expected call of StartExperiment. +func (mr *MockClientMockRecorder) StartExperiment(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartExperiment", reflect.TypeOf((*MockClient)(nil).StartExperiment), varargs...) +} + +// StopExperiment mocks base method. +func (m *MockClient) StopExperiment(ctx context.Context, in *experiment.StopExperimentRequest, opts ...grpc.CallOption) (*experiment.StopExperimentResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "StopExperiment", varargs...) + ret0, _ := ret[0].(*experiment.StopExperimentResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StopExperiment indicates an expected call of StopExperiment. +func (mr *MockClientMockRecorder) StopExperiment(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopExperiment", reflect.TypeOf((*MockClient)(nil).StopExperiment), varargs...) +} + +// UpdateExperiment mocks base method. +func (m *MockClient) UpdateExperiment(ctx context.Context, in *experiment.UpdateExperimentRequest, opts ...grpc.CallOption) (*experiment.UpdateExperimentResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateExperiment", varargs...) + ret0, _ := ret[0].(*experiment.UpdateExperimentResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateExperiment indicates an expected call of UpdateExperiment. +func (mr *MockClientMockRecorder) UpdateExperiment(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateExperiment", reflect.TypeOf((*MockClient)(nil).UpdateExperiment), varargs...) +} + +// UpdateGoal mocks base method. +func (m *MockClient) UpdateGoal(ctx context.Context, in *experiment.UpdateGoalRequest, opts ...grpc.CallOption) (*experiment.UpdateGoalResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateGoal", varargs...) + ret0, _ := ret[0].(*experiment.UpdateGoalResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateGoal indicates an expected call of UpdateGoal. +func (mr *MockClientMockRecorder) UpdateGoal(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateGoal", reflect.TypeOf((*MockClient)(nil).UpdateGoal), varargs...) +} diff --git a/pkg/experiment/cmd/batch/BUILD.bazel b/pkg/experiment/cmd/batch/BUILD.bazel new file mode 100644 index 000000000..dc48bada6 --- /dev/null +++ b/pkg/experiment/cmd/batch/BUILD.bazel @@ -0,0 +1,21 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["batch.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/experiment/cmd/batch", + visibility = ["//visibility:public"], + deps = [ + "//pkg/cli:go_default_library", + "//pkg/environment/client:go_default_library", + "//pkg/experiment/batch/job:go_default_library", + "//pkg/experiment/client:go_default_library", + "//pkg/health:go_default_library", + "//pkg/job:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/rpc/client:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/experiment/cmd/batch/batch.go b/pkg/experiment/cmd/batch/batch.go new file mode 100644 index 000000000..24cbdfa34 --- /dev/null +++ b/pkg/experiment/cmd/batch/batch.go @@ -0,0 +1,172 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package batch + +import ( + "context" + "os" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + "github.com/bucketeer-io/bucketeer/pkg/cli" + environmentclient "github.com/bucketeer-io/bucketeer/pkg/environment/client" + experimentjob "github.com/bucketeer-io/bucketeer/pkg/experiment/batch/job" + experimentclient "github.com/bucketeer-io/bucketeer/pkg/experiment/client" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/job" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/rpc/client" +) + +const command = "batch" + +type batch struct { + *kingpin.CmdClause + port *int + project *string + environmentService *string + experimentService *string + certPath *string + keyPath *string + serviceTokenPath *string +} + +func RegisterCommand(r cli.CommandRegistry, p cli.ParentCommand) cli.Command { + cmd := p.Command(command, "Start batch layer") + batch := &batch{ + CmdClause: cmd, + port: cmd.Flag("port", "Port to bind to.").Default("9090").Int(), + project: cmd.Flag("project", "Google Cloud project name.").String(), + environmentService: cmd.Flag( + "environment-service", + "bucketeer-environment-service address.", + ).Default("environment:9090").String(), + experimentService: cmd.Flag( + "experiment-service", + "bucketeer-experiment-service address.", + ).Default("experiment:9090").String(), + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + keyPath: cmd.Flag("key", "Path to TLS key.").Required().String(), + serviceTokenPath: cmd.Flag("service-token", "Path to service token.").Required().String(), + } + r.RegisterCommand(batch) + return batch +} + +func (b *batch) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + *b.serviceTokenPath = b.insertTelepresenceMountRoot(*b.serviceTokenPath) + *b.keyPath = b.insertTelepresenceMountRoot(*b.keyPath) + *b.certPath = b.insertTelepresenceMountRoot(*b.certPath) + + registerer := metrics.DefaultRegisterer() + + creds, err := client.NewPerRPCCredentials(*b.serviceTokenPath) + if err != nil { + return err + } + + clientOptions := []client.Option{ + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30 * time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + } + environmentClient, err := environmentclient.NewClient(*b.environmentService, *b.certPath, clientOptions...) + if err != nil { + return err + } + defer environmentClient.Close() + + experimentClient, err := experimentclient.NewClient(*b.experimentService, *b.certPath, clientOptions...) + if err != nil { + return err + } + defer experimentClient.Close() + + manager := job.NewManager( + registerer, + "experiment_batch", + logger, + ) + defer manager.Stop() + err = b.registerJobs(manager, environmentClient, experimentClient, logger) + if err != nil { + return err + } + go manager.Run() // nolint:errcheck + + healthChecker := health.NewGrpcChecker( + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + ) + go healthChecker.Run(ctx) + + server := rpc.NewServer(healthChecker, *b.certPath, *b.keyPath, + rpc.WithPort(*b.port), + rpc.WithMetrics(registerer), + rpc.WithLogger(logger), + rpc.WithHandler("/health", healthChecker), + ) + defer server.Stop(10 * time.Second) + go server.Run() + + <-ctx.Done() + return nil +} + +func (b *batch) registerJobs( + m *job.Manager, + environmentClient environmentclient.Client, + experimentClient experimentclient.Client, + logger *zap.Logger) error { + + jobs := []struct { + name string + cron string + job job.Job + }{ + { + cron: "0 * * * * *", + name: "experiment_status_updater", + job: experimentjob.NewExperimentStatusUpdater( + environmentClient, + experimentClient, + experimentjob.WithLogger(logger)), + }, + } + for i := range jobs { + if err := m.AddCronJob(jobs[i].name, jobs[i].cron, jobs[i].job); err != nil { + logger.Error("Failed to add cron job", + zap.String("name", jobs[i].name), + zap.String("cron", jobs[i].cron), + zap.Error(err)) + return err + } + } + return nil +} + +// for telepresence --swap-deployment +func (b *batch) insertTelepresenceMountRoot(path string) string { + volumeRoot := os.Getenv("TELEPRESENCE_ROOT") + if volumeRoot == "" { + return path + } + return volumeRoot + path +} diff --git a/pkg/experiment/cmd/server/BUILD.bazel b/pkg/experiment/cmd/server/BUILD.bazel new file mode 100644 index 000000000..ea8379f95 --- /dev/null +++ b/pkg/experiment/cmd/server/BUILD.bazel @@ -0,0 +1,24 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["server.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/experiment/cmd/server", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/client:go_default_library", + "//pkg/cli:go_default_library", + "//pkg/experiment/api:go_default_library", + "//pkg/feature/client:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/rpc/client:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/token:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/experiment/cmd/server/server.go b/pkg/experiment/cmd/server/server.go new file mode 100644 index 000000000..d478228d1 --- /dev/null +++ b/pkg/experiment/cmd/server/server.go @@ -0,0 +1,212 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "os" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/experiment/api" + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/token" +) + +const command = "server" + +type server struct { + *kingpin.CmdClause + port *int + project *string + mysqlUser *string + mysqlPass *string + mysqlHost *string + mysqlPort *int + mysqlDBName *string + topic *string + featureService *string + accountService *string + certPath *string + keyPath *string + serviceTokenPath *string + + oauthKeyPath *string + oauthClientID *string + oauthIssuer *string +} + +func RegisterCommand(r cli.CommandRegistry, p cli.ParentCommand) cli.Command { + cmd := p.Command(command, "Start the server") + server := &server{ + CmdClause: cmd, + port: cmd.Flag("port", "Port to bind to.").Default("9090").Int(), + project: cmd.Flag("project", "Google Cloud project name.").String(), + mysqlUser: cmd.Flag("mysql-user", "MySQL user.").Required().String(), + mysqlPass: cmd.Flag("mysql-pass", "MySQL password.").Required().String(), + mysqlHost: cmd.Flag("mysql-host", "MySQL host.").Required().String(), + mysqlPort: cmd.Flag("mysql-port", "MySQL port.").Required().Int(), + mysqlDBName: cmd.Flag("mysql-db-name", "MySQL database name.").Required().String(), + topic: cmd.Flag("topic", "PubSub topic to publish domain events.").Required().String(), + featureService: cmd.Flag("feature-service", "bucketeer-feature-service address.").Default("feature:9090").String(), + accountService: cmd.Flag("account-service", "bucketeer-account-service address.").Default("account:9090").String(), + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + keyPath: cmd.Flag("key", "Path to TLS key.").Required().String(), + serviceTokenPath: cmd.Flag("service-token", "Path to service token.").Required().String(), + oauthKeyPath: cmd.Flag("oauth-key", "Path to public key used to verify oauth token.").Required().String(), + oauthClientID: cmd.Flag("oauth-client-id", "The oauth clientID registered at dex.").Required().String(), + oauthIssuer: cmd.Flag("oauth-issuer", "The url of dex issuer.").Required().String(), + } + r.RegisterCommand(server) + return server +} + +func (s *server) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + *s.serviceTokenPath = s.insertTelepresenceMoutRoot(*s.serviceTokenPath) + *s.oauthKeyPath = s.insertTelepresenceMoutRoot(*s.oauthKeyPath) + *s.keyPath = s.insertTelepresenceMoutRoot(*s.keyPath) + *s.certPath = s.insertTelepresenceMoutRoot(*s.certPath) + + registerer := metrics.DefaultRegisterer() + + mysqlClient, err := s.createMySQLClient(ctx, registerer, logger) + if err != nil { + return err + } + defer mysqlClient.Close() + + publisher, err := s.createPublisher(ctx, registerer, logger) + if err != nil { + return err + } + defer publisher.Stop() + + creds, err := client.NewPerRPCCredentials(*s.serviceTokenPath) + if err != nil { + return err + } + + featureClient, err := featureclient.NewClient(*s.featureService, *s.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer featureClient.Close() + + accountClient, err := accountclient.NewClient(*s.accountService, *s.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer accountClient.Close() + + service := api.NewExperimentService( + featureClient, + accountClient, + mysqlClient, + publisher, + api.WithLogger(logger), + ) + + verifier, err := token.NewVerifier(*s.oauthKeyPath, *s.oauthIssuer, *s.oauthClientID) + if err != nil { + return err + } + + healthChecker := health.NewGrpcChecker( + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + ) + go healthChecker.Run(ctx) + + server := rpc.NewServer(service, *s.certPath, *s.keyPath, + rpc.WithPort(*s.port), + rpc.WithVerifier(verifier), + rpc.WithMetrics(registerer), + rpc.WithLogger(logger), + rpc.WithService(healthChecker), + rpc.WithHandler("/health", healthChecker), + ) + defer server.Stop(10 * time.Second) + go server.Run() + + <-ctx.Done() + return nil +} + +func (s *server) createMySQLClient( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (mysql.Client, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + return mysql.NewClient( + ctx, + *s.mysqlUser, *s.mysqlPass, *s.mysqlHost, + *s.mysqlPort, + *s.mysqlDBName, + mysql.WithLogger(logger), + mysql.WithMetrics(registerer), + ) +} + +func (s *server) createPublisher( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (publisher.Publisher, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + client, err := pubsub.NewClient( + ctx, + *s.project, + pubsub.WithMetrics(registerer), + pubsub.WithLogger(logger), + ) + if err != nil { + return nil, err + } + return client.CreatePublisher(*s.topic) +} + +func (s *server) insertTelepresenceMoutRoot(path string) string { + volumeRoot := os.Getenv("TELEPRESENCE_ROOT") + if volumeRoot == "" { + return path + } + return volumeRoot + path +} diff --git a/pkg/experiment/command/BUILD.bazel b/pkg/experiment/command/BUILD.bazel new file mode 100644 index 000000000..b3b886027 --- /dev/null +++ b/pkg/experiment/command/BUILD.bazel @@ -0,0 +1,40 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "command.go", + "experiment.go", + "goal.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/experiment/command", + visibility = ["//visibility:public"], + deps = [ + "//pkg/domainevent/domain:go_default_library", + "//pkg/experiment/domain:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//proto/event/domain:go_default_library", + "//proto/experiment:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "experiment_test.go", + "goal_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/experiment/domain:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/pubsub/publisher/mock:go_default_library", + "//proto/account:go_default_library", + "//proto/event/domain:go_default_library", + "//proto/experiment:go_default_library", + "//proto/feature:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + ], +) diff --git a/pkg/experiment/command/command.go b/pkg/experiment/command/command.go new file mode 100644 index 000000000..6fdbec614 --- /dev/null +++ b/pkg/experiment/command/command.go @@ -0,0 +1,30 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + "errors" +) + +var ( + ErrUnknownCommand = errors.New("command: unknown command") +) + +type Command interface{} + +type Handler interface { + Handle(ctx context.Context, cmd Command) error +} diff --git a/pkg/experiment/command/experiment.go b/pkg/experiment/command/experiment.go new file mode 100644 index 000000000..a37711342 --- /dev/null +++ b/pkg/experiment/command/experiment.go @@ -0,0 +1,189 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + + pb "github.com/golang/protobuf/proto" // nolint:staticcheck + + domainevent "github.com/bucketeer-io/bucketeer/pkg/domainevent/domain" + "github.com/bucketeer-io/bucketeer/pkg/experiment/domain" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + proto "github.com/bucketeer-io/bucketeer/proto/experiment" +) + +type experimentCommandHandler struct { + editor *eventproto.Editor + experiment *domain.Experiment + publisher publisher.Publisher + environmentNamespace string +} + +func NewExperimentCommandHandler( + editor *eventproto.Editor, + experiment *domain.Experiment, + p publisher.Publisher, + environmentNamespace string, +) Handler { + return &experimentCommandHandler{ + editor: editor, + experiment: experiment, + publisher: p, + environmentNamespace: environmentNamespace, + } +} + +func (h *experimentCommandHandler) Handle(ctx context.Context, cmd Command) error { + switch c := cmd.(type) { + case *proto.CreateExperimentCommand: + return h.create(ctx, c) + case *proto.ChangeExperimentPeriodCommand: + return h.changePeriod(ctx, c) + case *proto.ChangeExperimentNameCommand: + return h.changeName(ctx, c) + case *proto.ChangeExperimentDescriptionCommand: + return h.changeDescription(ctx, c) + case *proto.StopExperimentCommand: + return h.stop(ctx, c) + case *proto.StartExperimentCommand: + return h.start(ctx, c) + case *proto.FinishExperimentCommand: + return h.finish(ctx, c) + case *proto.ArchiveExperimentCommand: + return h.archive(ctx, c) + case *proto.DeleteExperimentCommand: + return h.delete(ctx, c) + default: + return ErrUnknownCommand + } +} + +func (h *experimentCommandHandler) create(ctx context.Context, cmd *proto.CreateExperimentCommand) error { + return h.send(ctx, eventproto.Event_EXPERIMENT_CREATED, &eventproto.ExperimentCreatedEvent{ + Id: h.experiment.Id, + FeatureId: h.experiment.FeatureId, + FeatureVersion: h.experiment.FeatureVersion, + Variations: h.experiment.Variations, + GoalId: h.experiment.GoalId, + GoalIds: h.experiment.GoalIds, + StartAt: h.experiment.StartAt, + StopAt: h.experiment.StopAt, + Stopped: h.experiment.Stopped, + StoppedAt: h.experiment.StoppedAt, + CreatedAt: h.experiment.CreatedAt, + UpdatedAt: h.experiment.UpdatedAt, + Name: h.experiment.Name, + Description: h.experiment.Description, + BaseVariationId: h.experiment.BaseVariationId, + }) +} + +func (h *experimentCommandHandler) changePeriod(ctx context.Context, cmd *proto.ChangeExperimentPeriodCommand) error { + if err := h.experiment.ChangePeriod(cmd.StartAt, cmd.StopAt); err != nil { + return err + } + return h.send(ctx, eventproto.Event_EXPERIMENT_PERIOD_CHANGED, &eventproto.ExperimentPeriodChangedEvent{ + Id: h.experiment.Id, + StartAt: cmd.StartAt, + StopAt: cmd.StopAt, + }) +} + +func (h *experimentCommandHandler) changeName(ctx context.Context, cmd *proto.ChangeExperimentNameCommand) error { + if err := h.experiment.ChangeName(cmd.Name); err != nil { + return err + } + return h.send(ctx, eventproto.Event_EXPERIMENT_NAME_CHANGED, &eventproto.ExperimentNameChangedEvent{ + Id: h.experiment.Id, + Name: h.experiment.Name, + }) +} + +func (h *experimentCommandHandler) changeDescription( + ctx context.Context, + cmd *proto.ChangeExperimentDescriptionCommand, +) error { + if err := h.experiment.ChangeDescription(cmd.Description); err != nil { + return err + } + return h.send(ctx, eventproto.Event_EXPERIMENT_DESCRIPTION_CHANGED, &eventproto.ExperimentDescriptionChangedEvent{ + Id: h.experiment.Id, + Description: h.experiment.Description, + }) +} + +func (h *experimentCommandHandler) stop(ctx context.Context, cmd *proto.StopExperimentCommand) error { + if err := h.experiment.Stop(); err != nil { + return err + } + return h.send(ctx, eventproto.Event_EXPERIMENT_STOPPED, &eventproto.ExperimentStoppedEvent{ + Id: h.experiment.Id, + StoppedAt: h.experiment.StoppedAt, + }) +} + +func (h *experimentCommandHandler) archive(ctx context.Context, cmd *proto.ArchiveExperimentCommand) error { + if err := h.experiment.SetArchived(); err != nil { + return err + } + return h.send(ctx, eventproto.Event_EXPERIMENT_ARCHIVED, &eventproto.ExperimentArchivedEvent{ + Id: h.experiment.Id, + }) +} + +func (h *experimentCommandHandler) delete(ctx context.Context, cmd *proto.DeleteExperimentCommand) error { + if err := h.experiment.SetDeleted(); err != nil { + return err + } + return h.send(ctx, eventproto.Event_EXPERIMENT_DELETED, &eventproto.ExperimentDeletedEvent{ + Id: h.experiment.Id, + }) +} + +func (h *experimentCommandHandler) send(ctx context.Context, eventType eventproto.Event_Type, event pb.Message) error { + e, err := domainevent.NewEvent( + h.editor, + eventproto.Event_EXPERIMENT, + h.experiment.Id, + eventType, + event, + h.environmentNamespace, + ) + if err != nil { + return err + } + // TODO: more reliable + // TODO: add metrics + if err := h.publisher.Publish(ctx, e); err != nil { + return err + } + return nil +} + +func (h *experimentCommandHandler) start(ctx context.Context, cmd *proto.StartExperimentCommand) error { + if err := h.experiment.Start(); err != nil { + return err + } + return h.send(ctx, eventproto.Event_EXPERIMENT_STARTED, &eventproto.ExperimentStartedEvent{}) +} + +func (h *experimentCommandHandler) finish(ctx context.Context, cmd *proto.FinishExperimentCommand) error { + if err := h.experiment.Finish(); err != nil { + return err + } + return h.send(ctx, eventproto.Event_EXPERIMENT_FINISHED, &eventproto.ExperimentFinishedEvent{}) +} diff --git a/pkg/experiment/command/experiment_test.go b/pkg/experiment/command/experiment_test.go new file mode 100644 index 000000000..b455be8b7 --- /dev/null +++ b/pkg/experiment/command/experiment_test.go @@ -0,0 +1,178 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + experimentdomain "github.com/bucketeer-io/bucketeer/pkg/experiment/domain" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + publishermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher/mock" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + experimentproto "github.com/bucketeer-io/bucketeer/proto/experiment" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +func TestChangePeriod(t *testing.T) { + now := time.Now() + startAt := now.Unix() + stopAt := now.Local().Add(time.Hour * 1).Unix() + + mockController := gomock.NewController(t) + defer mockController.Finish() + m := publishermock.NewMockPublisher(mockController) + e := newExperiment(startAt, stopAt) + h := newExperimentCommandHandler(t, m, e) + patterns := []*struct { + startAt int64 + stopAt int64 + expectedErr error + }{ + { + startAt: startAt + 10, + stopAt: stopAt + 10, + expectedErr: nil, + }, + { + startAt: stopAt + 10, + stopAt: startAt + 10, + expectedErr: experimentdomain.ErrExperimentStartIsAfterStop, + }, + { + startAt: startAt - 100, + stopAt: startAt - 10, + expectedErr: experimentdomain.ErrExperimentStopIsBeforeNow, + }, + } + for i, p := range patterns { + if p.expectedErr == nil { + m.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + } + cmd := &experimentproto.ChangeExperimentPeriodCommand{StartAt: p.startAt, StopAt: p.stopAt} + err := h.Handle(context.Background(), cmd) + des := fmt.Sprintf("index: %d", i) + assert.Equal(t, p.expectedErr, err, des) + if err == nil { + assert.Equal(t, p.startAt, e.Experiment.StartAt, des) + assert.Equal(t, p.stopAt, e.Experiment.StopAt, des) + } + } +} + +func TestHandleRenameChangeCommand(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + publisher := publishermock.NewMockPublisher(mockController) + e := newExperiment(0, 0) + h := newExperimentCommandHandler(t, publisher, e) + publisher.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + newName := "newGName" + cmd := &experimentproto.ChangeExperimentNameCommand{Name: newName} + err := h.Handle(context.Background(), cmd) + assert.NoError(t, err) + assert.Equal(t, newName, e.Name) +} + +func TestHandleChangeDescriptionExperimentCommand(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + publisher := publishermock.NewMockPublisher(mockController) + e := newExperiment(0, 0) + h := newExperimentCommandHandler(t, publisher, e) + publisher.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + newDesc := "newGDesc" + cmd := &experimentproto.ChangeExperimentDescriptionCommand{Description: newDesc} + err := h.Handle(context.Background(), cmd) + assert.NoError(t, err) + assert.Equal(t, newDesc, e.Description) +} + +func TestHandleArchiveExperimentCommand(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + publisher := publishermock.NewMockPublisher(mockController) + e := newExperiment(0, 0) + h := newExperimentCommandHandler(t, publisher, e) + publisher.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + cmd := &experimentproto.ArchiveExperimentCommand{} + err := h.Handle(context.Background(), cmd) + assert.NoError(t, err) + assert.True(t, e.Archived) +} + +func TestHandleDeleteExperimentCommand(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + publisher := publishermock.NewMockPublisher(mockController) + e := newExperiment(0, 0) + h := newExperimentCommandHandler(t, publisher, e) + publisher.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + cmd := &experimentproto.DeleteExperimentCommand{} + err := h.Handle(context.Background(), cmd) + assert.NoError(t, err) + assert.True(t, e.Deleted) +} + +func newExperiment(startAt int64, stopAt int64) *experimentdomain.Experiment { + return &experimentdomain.Experiment{ + Experiment: &experimentproto.Experiment{ + Id: "experiment-id", + GoalId: "goal-id", + FeatureId: "feature-id", + FeatureVersion: 1, + Variations: []*featureproto.Variation{ + { + Id: "variation-A", + Value: "A", + Name: "Variation A", + Description: "Thing does A", + }, + { + Id: "variation-B", + Value: "B", + Name: "Variation B", + Description: "Thing does B", + }, + }, + StartAt: startAt, + StopAt: stopAt, + CreatedAt: time.Now().Unix(), + }, + } +} + +func newExperimentCommandHandler(t *testing.T, publisher publisher.Publisher, experiment *experimentdomain.Experiment) Handler { + t.Helper() + return NewExperimentCommandHandler( + &eventproto.Editor{ + Email: "email", + Role: accountproto.Account_EDITOR, + }, + experiment, + publisher, + "ns0", + ) +} diff --git a/pkg/experiment/command/goal.go b/pkg/experiment/command/goal.go new file mode 100644 index 000000000..4b3be1249 --- /dev/null +++ b/pkg/experiment/command/goal.go @@ -0,0 +1,127 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + + pb "github.com/golang/protobuf/proto" // nolint:staticcheck + + domainevent "github.com/bucketeer-io/bucketeer/pkg/domainevent/domain" + "github.com/bucketeer-io/bucketeer/pkg/experiment/domain" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + proto "github.com/bucketeer-io/bucketeer/proto/experiment" +) + +type goalCommandHandler struct { + editor *eventproto.Editor + goal *domain.Goal + publisher publisher.Publisher + environmentNamespace string +} + +func NewGoalCommandHandler( + editor *eventproto.Editor, + goal *domain.Goal, + p publisher.Publisher, + environmentNamespace string, +) Handler { + return &goalCommandHandler{ + editor: editor, + goal: goal, + publisher: p, + environmentNamespace: environmentNamespace, + } +} + +func (h *goalCommandHandler) Handle(ctx context.Context, cmd Command) error { + switch c := cmd.(type) { + case *proto.CreateGoalCommand: + return h.create(ctx, c) + case *proto.RenameGoalCommand: + return h.rename(ctx, c) + case *proto.ChangeDescriptionGoalCommand: + return h.changeDescription(ctx, c) + case *proto.ArchiveGoalCommand: + return h.archive(ctx, c) + case *proto.DeleteGoalCommand: + return h.delete(ctx, c) + default: + return ErrUnknownCommand + } +} + +func (h *goalCommandHandler) create(ctx context.Context, cmd *proto.CreateGoalCommand) error { + return h.send(ctx, eventproto.Event_GOAL_CREATED, &eventproto.GoalCreatedEvent{ + Id: h.goal.Id, + Name: h.goal.Name, + Description: h.goal.Description, + Deleted: h.goal.Deleted, + CreatedAt: h.goal.CreatedAt, + UpdatedAt: h.goal.UpdatedAt, + }) +} + +func (h *goalCommandHandler) rename(ctx context.Context, cmd *proto.RenameGoalCommand) error { + if err := h.goal.Rename(cmd.Name); err != nil { + return err + } + return h.send(ctx, eventproto.Event_GOAL_RENAMED, &eventproto.GoalRenamedEvent{ + Id: h.goal.Id, + Name: cmd.Name, + }) +} + +func (h *goalCommandHandler) changeDescription(ctx context.Context, cmd *proto.ChangeDescriptionGoalCommand) error { + if err := h.goal.ChangeDescription(cmd.Description); err != nil { + return err + } + return h.send(ctx, eventproto.Event_GOAL_DESCRIPTION_CHANGED, &eventproto.GoalDescriptionChangedEvent{ + Id: h.goal.Id, + Description: cmd.Description, + }) +} + +func (h *goalCommandHandler) archive(ctx context.Context, cmd *proto.ArchiveGoalCommand) error { + if err := h.goal.SetArchived(); err != nil { + return err + } + return h.send(ctx, eventproto.Event_GOAL_ARCHIVED, &eventproto.GoalArchivedEvent{ + Id: h.goal.Id, + }) +} + +func (h *goalCommandHandler) delete(ctx context.Context, cmd *proto.DeleteGoalCommand) error { + if err := h.goal.SetDeleted(); err != nil { + return err + } + return h.send(ctx, eventproto.Event_GOAL_DELETED, &eventproto.GoalDeletedEvent{ + Id: h.goal.Id, + }) +} + +func (h *goalCommandHandler) send(ctx context.Context, eventType eventproto.Event_Type, event pb.Message) error { + e, err := domainevent.NewEvent(h.editor, eventproto.Event_GOAL, h.goal.Id, eventType, event, h.environmentNamespace) + if err != nil { + return err + } + // TODO: more reliable + // TODO: add metrics + if err := h.publisher.Publish(ctx, e); err != nil { + return err + } + return nil +} diff --git a/pkg/experiment/command/goal_test.go b/pkg/experiment/command/goal_test.go new file mode 100644 index 000000000..d0ce7521f --- /dev/null +++ b/pkg/experiment/command/goal_test.go @@ -0,0 +1,109 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/experiment/domain" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + publishermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher/mock" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + experimentproto "github.com/bucketeer-io/bucketeer/proto/experiment" +) + +func TestHandleRenameGoalCommand(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + publisher := publishermock.NewMockPublisher(mockController) + g, err := domain.NewGoal("gId", "gName", "gDesc") + assert.NoError(t, err) + + h := newGoalCommandHandler(t, publisher, g) + publisher.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + newName := "newGName" + cmd := &experimentproto.RenameGoalCommand{Name: newName} + err = h.Handle(context.Background(), cmd) + assert.NoError(t, err) + assert.Equal(t, newName, g.Name) +} + +func TestHandleChangeDescriptionGoalCommand(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + publisher := publishermock.NewMockPublisher(mockController) + g, err := domain.NewGoal("gId", "gName", "gDesc") + assert.NoError(t, err) + + h := newGoalCommandHandler(t, publisher, g) + publisher.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + newDesc := "newGDesc" + cmd := &experimentproto.ChangeDescriptionGoalCommand{Description: newDesc} + err = h.Handle(context.Background(), cmd) + assert.NoError(t, err) + assert.Equal(t, newDesc, g.Description) +} + +func TestHandleArchiveGoalCommand(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + publisher := publishermock.NewMockPublisher(mockController) + g, err := domain.NewGoal("gId", "gName", "gDesc") + assert.NoError(t, err) + + h := newGoalCommandHandler(t, publisher, g) + publisher.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + cmd := &experimentproto.ArchiveGoalCommand{} + err = h.Handle(context.Background(), cmd) + assert.NoError(t, err) + assert.True(t, g.Archived) +} + +func TestHandleDeleteGoalCommand(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + publisher := publishermock.NewMockPublisher(mockController) + g, err := domain.NewGoal("gId", "gName", "gDesc") + assert.NoError(t, err) + + h := newGoalCommandHandler(t, publisher, g) + publisher.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + cmd := &experimentproto.DeleteGoalCommand{} + err = h.Handle(context.Background(), cmd) + assert.NoError(t, err) + assert.True(t, g.Deleted) +} + +func newGoalCommandHandler(t *testing.T, publisher publisher.Publisher, goal *domain.Goal) Handler { + t.Helper() + return NewGoalCommandHandler( + &eventproto.Editor{ + Email: "email", + Role: accountproto.Account_EDITOR, + }, + goal, + publisher, + "ns0", + ) +} diff --git a/pkg/experiment/domain/BUILD.bazel b/pkg/experiment/domain/BUILD.bazel new file mode 100644 index 000000000..357e3f6e7 --- /dev/null +++ b/pkg/experiment/domain/BUILD.bazel @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "experiment.go", + "goal.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/experiment/domain", + visibility = ["//visibility:public"], + deps = [ + "//pkg/uuid:go_default_library", + "//proto/experiment:go_default_library", + "//proto/feature:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "experiment_test.go", + "goal_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//proto/experiment:go_default_library", + "//proto/feature:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + ], +) diff --git a/pkg/experiment/domain/experiment.go b/pkg/experiment/domain/experiment.go new file mode 100644 index 000000000..aae8a165a --- /dev/null +++ b/pkg/experiment/domain/experiment.go @@ -0,0 +1,236 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "errors" + "math" + "time" + + "github.com/bucketeer-io/bucketeer/pkg/uuid" + experimentproto "github.com/bucketeer-io/bucketeer/proto/experiment" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +var ( + ErrExperimentBeforeStart = errors.New("experiment: start timestamp is greater than now") + ErrExperimentBeforeStop = errors.New("experiment: stop timestamp is greater than now") + ErrExperimentStatusInvalid = errors.New("experiment: experiment status is invalid") + ErrExperimentAlreadyStopped = errors.New("experiment: experiment is already stopped") + ErrExperimentStartIsAfterStop = errors.New("experiment: start is after stop timestamp") + ErrExperimentStopIsBeforeStart = errors.New("experiment: stop is before start timestamp") + ErrExperimentStopIsBeforeNow = errors.New("experiment: stop is same or older than now timestamp") +) + +type Experiment struct { + *experimentproto.Experiment +} + +func NewExperiment( + featureID string, + featureVersion int32, + variations []*featureproto.Variation, + goalIDs []string, + startAt int64, + stopAt int64, + name string, + description string, + baseVariationID string, + maintainer string) (*Experiment, error) { + + id, err := uuid.NewUUID() + if err != nil { + return nil, err + } + goalIDs = removeDuplicated(goalIDs) + now := time.Now().Unix() + return &Experiment{ + &experimentproto.Experiment{ + Id: id.String(), + FeatureId: featureID, + FeatureVersion: featureVersion, + Variations: variations, + GoalIds: goalIDs, + StartAt: startAt, + StopAt: stopAt, + StoppedAt: math.MaxInt64, + CreatedAt: now, + UpdatedAt: now, + Name: name, + Description: description, + BaseVariationId: baseVariationID, + Status: experimentproto.Experiment_WAITING, + Maintainer: maintainer, + }, + }, nil +} + +func removeDuplicated(args []string) []string { + results := make([]string, 0, len(args)) + encountered := map[string]bool{} + for _, v := range args { + if _, duplicated := encountered[v]; !duplicated { + results = append(results, v) + encountered[v] = true + } + } + return results +} + +func (e *Experiment) Start() error { + if e.Status != experimentproto.Experiment_WAITING { + return ErrExperimentStatusInvalid + } + now := time.Now().Unix() + if e.StartAt > now { + return ErrExperimentBeforeStart + } + e.Experiment.Status = experimentproto.Experiment_RUNNING + e.Experiment.UpdatedAt = now + return nil +} + +func (e *Experiment) Finish() error { + if e.Status != experimentproto.Experiment_WAITING && e.Status != experimentproto.Experiment_RUNNING { + return ErrExperimentStatusInvalid + } + now := time.Now().Unix() + if e.StopAt > now { + return ErrExperimentBeforeStop + } + e.Experiment.Status = experimentproto.Experiment_STOPPED + e.Experiment.UpdatedAt = now + return nil +} + +func (e *Experiment) Stop() error { + if e.Stopped { + return ErrExperimentAlreadyStopped + } + now := time.Now().Unix() + e.Experiment.Stopped = true + e.Experiment.Status = experimentproto.Experiment_FORCE_STOPPED + e.Experiment.StoppedAt = now + e.Experiment.UpdatedAt = now + return nil +} + +func (e *Experiment) ChangePeriod(startAt, stopAt int64) error { + if err := e.validatePeriod(startAt, stopAt); err != nil { + return err + } + e.Experiment.StartAt = startAt + e.Experiment.StopAt = stopAt + e.Experiment.UpdatedAt = time.Now().Unix() + return nil +} + +func (e *Experiment) validatePeriod(startAt, stopAt int64) error { + if startAt >= stopAt { + return ErrExperimentStartIsAfterStop + } + if stopAt <= time.Now().Unix() { + return ErrExperimentStopIsBeforeNow + } + return nil +} + +func (e *Experiment) ChangeStartAt(startAt int64) error { + if err := e.validateStartAt(startAt); err != nil { + return err + } + e.Experiment.StartAt = startAt + e.Experiment.UpdatedAt = time.Now().Unix() + return nil +} + +func (e *Experiment) validateStartAt(startAt int64) error { + if startAt >= e.Experiment.StopAt { + return ErrExperimentStartIsAfterStop + } + return nil +} + +func (e *Experiment) ChangeStopAt(stopAt int64) error { + if err := e.validateStopAt(stopAt); err != nil { + return err + } + e.Experiment.StopAt = stopAt + e.Experiment.UpdatedAt = time.Now().Unix() + return nil +} + +func (e *Experiment) ChangeName(name string) error { + e.Experiment.Name = name + e.Experiment.UpdatedAt = time.Now().Unix() + return nil +} + +func (e *Experiment) ChangeDescription(description string) error { + e.Experiment.Description = description + e.Experiment.UpdatedAt = time.Now().Unix() + return nil +} + +func (e *Experiment) validateStopAt(stopAt int64) error { + if stopAt <= e.Experiment.StartAt { + return ErrExperimentStopIsBeforeStart + } + if stopAt <= time.Now().Unix() { + return ErrExperimentStopIsBeforeNow + } + return nil +} + +func (e *Experiment) SetArchived() error { + e.Experiment.Archived = true + e.Experiment.UpdatedAt = time.Now().Unix() + return nil +} + +func (e *Experiment) SetDeleted() error { + e.Experiment.Deleted = true + e.Experiment.UpdatedAt = time.Now().Unix() + return nil +} + +// SyncGoalIDs syncs goalID and goalIDs. +// FIXME: This function is needed until admin UI implements multiple goals. +func SyncGoalIDs(goalID string, goalIDs []string) (string, []string) { + if goalID == "" && len(goalIDs) == 0 { + return "", nil + } + if goalID == "" { + return goalIDs[0], goalIDs + } + if len(goalIDs) == 0 { + return goalID, []string{goalID} + } + return goalID, goalIDs +} + +// IsNotFinished returns true if the status is either waiting or running. +func (e *Experiment) IsNotFinished(t time.Time) bool { + if e.Experiment.Deleted { + return false + } + if e.Experiment.StopAt <= t.Unix() { + return false + } + if e.Experiment.StoppedAt <= t.Unix() { + return false + } + return true +} diff --git a/pkg/experiment/domain/experiment_test.go b/pkg/experiment/domain/experiment_test.go new file mode 100644 index 000000000..d4e5b2cd4 --- /dev/null +++ b/pkg/experiment/domain/experiment_test.go @@ -0,0 +1,515 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "math" + "reflect" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + experimentproto "github.com/bucketeer-io/bucketeer/proto/experiment" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +func TestNewExperiment(t *testing.T) { + featureID := "id" + featureVersion := int32(1) + variations := []*featureproto.Variation{ + { + Value: "A", + Name: "Variation A", + Description: "Thing does A", + }, + { + Value: "B", + Name: "Variation B", + Description: "Thing does B", + }, + { + Value: "C", + Name: "Variation C", + Description: "Thing does C", + }, + } + goalIDs := []string{"id-1", "id-2"} + startAt := int64(10) + stopAt := int64(20) + name := "name" + description := "description" + baseVariationId := "baseVariationId" + maintainer := "bucketeer@example.com" + + e, err := NewExperiment( + featureID, + featureVersion, + variations, + goalIDs, + startAt, + stopAt, + name, + description, + baseVariationId, + maintainer, + ) + + assert.NoError(t, err) + assert.Equal(t, featureID, e.FeatureId) + assert.Equal(t, featureVersion, e.FeatureVersion) + if !reflect.DeepEqual(variations, e.Variations) { + t.Fatal("Variations not equal") + } + if !reflect.DeepEqual(goalIDs, e.GoalIds) { + t.Fatal("GoalIDs not equal") + } + assert.Equal(t, startAt, e.StartAt) + assert.Equal(t, stopAt, e.StopAt) + assert.Equal(t, name, e.Name) + assert.Equal(t, description, e.Description) + assert.Equal(t, baseVariationId, e.BaseVariationId) + assert.Equal(t, maintainer, e.Maintainer) +} + +func TestRenameExperiment(t *testing.T) { + t.Parallel() + e := newExperiment(t) + newName := "newGName" + err := e.ChangeName(newName) + assert.NoError(t, err) + assert.Equal(t, newName, e.Name) +} + +func TestChangeDescriptionExperiment(t *testing.T) { + t.Parallel() + e := newExperiment(t) + newDesc := "newGDesc" + err := e.ChangeDescription(newDesc) + assert.NoError(t, err) + assert.Equal(t, newDesc, e.Description) +} + +func TestSetArchivedExperiment(t *testing.T) { + t.Parallel() + e := newExperiment(t) + err := e.SetArchived() + assert.NoError(t, err) + assert.True(t, e.Archived) +} + +func TestSetDeletedExperiment(t *testing.T) { + t.Parallel() + e := newExperiment(t) + err := e.SetDeleted() + assert.NoError(t, err) + assert.True(t, e.Deleted) +} + +func TestChangeStartAt(t *testing.T) { + t.Parallel() + stopAt := time.Now().Unix() + e := &Experiment{ + &experimentproto.Experiment{StopAt: stopAt}, + } + patterns := []*struct { + startAt int64 + expected error + }{ + { + startAt: stopAt + 1, + expected: ErrExperimentStartIsAfterStop, + }, + { + startAt: stopAt - 1, + expected: nil, + }, + } + for i, p := range patterns { + actual := e.ChangeStartAt(p.startAt) + assert.Equal(t, p.expected, actual, "i=%s", i) + } +} + +func TestChangeStopAt(t *testing.T) { + t.Parallel() + now := time.Now().Unix() + startAt := now - 10 + stopAt := now + 10 + e := &Experiment{ + &experimentproto.Experiment{StartAt: startAt}, + } + patterns := []*struct { + stopAt int64 + expected error + }{ + { + stopAt: startAt - 10, + expected: ErrExperimentStopIsBeforeStart, + }, + { + stopAt: now - 5, + expected: ErrExperimentStopIsBeforeNow, + }, + { + stopAt: stopAt, + expected: nil, + }, + } + for i, p := range patterns { + actual := e.ChangeStopAt(p.stopAt) + assert.Equal(t, p.expected, actual, "i=%s", i) + } +} + +func TestChangePeriod(t *testing.T) { + t.Parallel() + now := time.Now().Unix() + startAt := now - 10 + stopAt := now + 10 + e := &Experiment{ + &experimentproto.Experiment{StartAt: startAt, StopAt: stopAt}, + } + patterns := []*struct { + startAt int64 + stopAt int64 + expected error + }{ + { + startAt: startAt - 10, + stopAt: startAt - 10, + expected: ErrExperimentStartIsAfterStop, + }, + { + startAt: startAt - 10, + stopAt: startAt - 11, + expected: ErrExperimentStartIsAfterStop, + }, + { + startAt: startAt - 10, + stopAt: startAt - 9, + expected: ErrExperimentStopIsBeforeNow, + }, + { + startAt: startAt + 10, + stopAt: stopAt + 10, + expected: nil, + }, + } + for i, p := range patterns { + actual := e.ChangePeriod(p.startAt, p.stopAt) + assert.Equal(t, p.expected, actual, "i=%s", i) + } +} + +func TestRemoveDuplicated(t *testing.T) { + t.Parallel() + patterns := []*struct { + input []string + expected []string + }{ + { + input: []string{"gid"}, + expected: []string{"gid"}, + }, + { + input: []string{"gid", "gid"}, + expected: []string{"gid"}, + }, + { + input: []string{}, + expected: []string{}, + }, + } + for i, p := range patterns { + actual := removeDuplicated(p.input) + assert.Equal(t, p.expected, actual, "i=%s", i) + } +} + +func TestStartExperiment(t *testing.T) { + t.Parallel() + patterns := map[string]*struct { + input *Experiment + expectedErr error + }{ + "error not waiting": { + input: &Experiment{&experimentproto.Experiment{ + Id: "eID", + Status: experimentproto.Experiment_RUNNING, + }}, + expectedErr: ErrExperimentStatusInvalid, + }, + "error before start at": { + input: &Experiment{&experimentproto.Experiment{ + Id: "eID", + Status: experimentproto.Experiment_WAITING, + StartAt: time.Now().AddDate(0, 0, 1).Unix(), + }}, + expectedErr: ErrExperimentBeforeStart, + }, + "success": { + input: &Experiment{&experimentproto.Experiment{ + Id: "eID", + Status: experimentproto.Experiment_WAITING, + StartAt: time.Now().Unix(), + }}, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + err := p.input.Start() + if p.expectedErr != nil { + assert.Equal(t, p.expectedErr, err) + } else { + assert.NoError(t, err) + assert.Equal(t, p.input.Experiment.Status, experimentproto.Experiment_RUNNING) + } + }) + } +} + +func TestFinishExperiment(t *testing.T) { + t.Parallel() + patterns := map[string]*struct { + input *Experiment + expectedErr error + }{ + "error invalid status": { + input: &Experiment{&experimentproto.Experiment{ + Id: "eID", + Status: experimentproto.Experiment_STOPPED, + }}, + expectedErr: ErrExperimentStatusInvalid, + }, + "error before stop at": { + input: &Experiment{&experimentproto.Experiment{ + Id: "eID", + Status: experimentproto.Experiment_RUNNING, + StopAt: time.Now().AddDate(0, 0, 1).Unix(), + }}, + expectedErr: ErrExperimentBeforeStop, + }, + "success: waiting": { + input: &Experiment{&experimentproto.Experiment{ + Id: "eID", + Status: experimentproto.Experiment_WAITING, + StopAt: time.Now().Unix(), + }}, + expectedErr: nil, + }, + "success: running": { + input: &Experiment{&experimentproto.Experiment{ + Id: "eID", + Status: experimentproto.Experiment_RUNNING, + StopAt: time.Now().Unix(), + }}, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + err := p.input.Finish() + if p.expectedErr != nil { + assert.Equal(t, p.expectedErr, err) + } else { + assert.NoError(t, err) + assert.Equal(t, p.input.Experiment.Status, experimentproto.Experiment_STOPPED) + } + }) + } +} + +func TestStopExperiment(t *testing.T) { + t.Parallel() + patterns := []*struct { + input *Experiment + expectedErr error + }{ + { + input: &Experiment{&experimentproto.Experiment{Id: "eID", Stopped: true, StopAt: time.Now().Unix()}}, + expectedErr: ErrExperimentAlreadyStopped, + }, + { + input: &Experiment{&experimentproto.Experiment{Id: "eID"}}, + expectedErr: nil, + }, + } + for i, p := range patterns { + oldStoppedAt := p.input.StoppedAt + err := p.input.Stop() + if p.expectedErr != nil { + assert.Equal(t, p.expectedErr, err, "i=%s", i) + } else { + assert.NoError(t, err, "i=%s", i) + assert.True(t, p.input.Stopped, "i=%s", i) + assert.NotEqual(t, oldStoppedAt, p.input.StoppedAt, "i=%s", i) + assert.Equal(t, p.input.Experiment.Status, experimentproto.Experiment_FORCE_STOPPED) + } + } +} + +func TestSyncGoalIDs(t *testing.T) { + t.Parallel() + patterns := []*struct { + goalID string + goalIDs []string + exGoalID string + exGoalIDs []string + }{ + { + goalID: "gid", + exGoalID: "gid", + exGoalIDs: []string{"gid"}, + }, + { + goalIDs: []string{"gid"}, + exGoalID: "gid", + exGoalIDs: []string{"gid"}, + }, + { + goalID: "", + goalIDs: []string{"gid-0", "gid-1"}, + exGoalID: "gid-0", + exGoalIDs: []string{"gid-0", "gid-1"}, + }, + } + for i, p := range patterns { + gid, gids := SyncGoalIDs(p.goalID, p.goalIDs) + assert.Equal(t, p.exGoalID, gid, "i=%s", i) + assert.Equal(t, p.exGoalIDs, gids, "i=%s", i) + } +} + +func TestIsNotFinished(t *testing.T) { + t.Parallel() + layout := "2006-01-02 15:04:05 -0700 MST" + t1, err := time.Parse(layout, "2014-01-17 23:02:03 +0000 UTC") + require.NoError(t, err) + t2, err := time.Parse(layout, "2014-01-18 23:02:03 +0000 UTC") + require.NoError(t, err) + t3, err := time.Parse(layout, "2014-01-19 23:02:03 +0000 UTC") + require.NoError(t, err) + t4, err := time.Parse(layout, "2014-01-20 23:02:03 +0000 UTC") + require.NoError(t, err) + patterns := map[string]struct { + experiment *Experiment + input time.Time + expected bool + }{ + "before StartAt": { + experiment: &Experiment{Experiment: &experimentproto.Experiment{ + Deleted: false, + StartAt: t2.Unix(), + StopAt: t3.Unix(), + StoppedAt: math.MaxInt64, + }}, + input: t1, + expected: true, + }, + "running": { + experiment: &Experiment{Experiment: &experimentproto.Experiment{ + Deleted: false, + StartAt: t1.Unix(), + StopAt: t3.Unix(), + StoppedAt: math.MaxInt64, + }}, + input: t2, + expected: true, + }, + "after StoppedAt, before StoppAt": { + experiment: &Experiment{Experiment: &experimentproto.Experiment{ + Deleted: false, + StartAt: t1.Unix(), + StopAt: t4.Unix(), + StoppedAt: t2.Unix(), + }}, + input: t3, + expected: false, + }, + "after StopAt and StoppedAt": { + experiment: &Experiment{Experiment: &experimentproto.Experiment{ + Deleted: false, + StartAt: t1.Unix(), + StopAt: t3.Unix(), + StoppedAt: t2.Unix(), + }}, + input: t4, + expected: false, + }, + "Deleted": { + experiment: &Experiment{Experiment: &experimentproto.Experiment{ + Deleted: true, + StartAt: t1.Unix(), + StopAt: t3.Unix(), + StoppedAt: math.MaxInt64, + }}, + input: t2, + expected: false, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + assert.Equal(t, p.expected, p.experiment.IsNotFinished(p.input)) + }) + } +} + +func newExperiment(t *testing.T) *Experiment { + t.Helper() + featureID := "id" + featureVersion := int32(1) + variations := []*featureproto.Variation{ + { + Value: "A", + Name: "Variation A", + Description: "Thing does A", + }, + { + Value: "B", + Name: "Variation B", + Description: "Thing does B", + }, + { + Value: "C", + Name: "Variation C", + Description: "Thing does C", + }, + } + goalIDs := []string{"id-1", "id-2"} + startAt := int64(10) + stopAt := int64(20) + name := "name" + description := "description" + baseVariationId := "baseVariationId" + maintainer := "bucketeer@example.com" + + e, err := NewExperiment( + featureID, + featureVersion, + variations, + goalIDs, + startAt, + stopAt, + name, + description, + baseVariationId, + maintainer, + ) + assert.NoError(t, err) + return e +} diff --git a/pkg/experiment/domain/goal.go b/pkg/experiment/domain/goal.go new file mode 100644 index 000000000..b1ade7069 --- /dev/null +++ b/pkg/experiment/domain/goal.go @@ -0,0 +1,60 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "time" + + proto "github.com/bucketeer-io/bucketeer/proto/experiment" +) + +type Goal struct { + *proto.Goal +} + +func NewGoal(id, name, description string) (*Goal, error) { + now := time.Now().Unix() + return &Goal{&proto.Goal{ + Id: id, + Name: name, + Description: description, + CreatedAt: now, + UpdatedAt: now, + }}, nil +} + +func (g *Goal) Rename(name string) error { + g.Goal.Name = name + g.Goal.UpdatedAt = time.Now().Unix() + return nil +} + +func (g *Goal) ChangeDescription(description string) error { + g.Goal.Description = description + g.Goal.UpdatedAt = time.Now().Unix() + return nil +} + +func (g *Goal) SetArchived() error { + g.Goal.Archived = true + g.Goal.UpdatedAt = time.Now().Unix() + return nil +} + +func (g *Goal) SetDeleted() error { + g.Goal.Deleted = true + g.Goal.UpdatedAt = time.Now().Unix() + return nil +} diff --git a/pkg/experiment/domain/goal_test.go b/pkg/experiment/domain/goal_test.go new file mode 100644 index 000000000..991f93b81 --- /dev/null +++ b/pkg/experiment/domain/goal_test.go @@ -0,0 +1,64 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/stretchr/testify/assert" +) + +func TestRenameGoal(t *testing.T) { + t.Parallel() + g := newGoal(t) + newName := "newGName" + err := g.Rename(newName) + assert.NoError(t, err) + assert.Equal(t, newName, g.Name) +} + +func TestChangeDescriptionGoal(t *testing.T) { + t.Parallel() + g := newGoal(t) + newDesc := "newGDesc" + err := g.ChangeDescription(newDesc) + assert.NoError(t, err) + assert.Equal(t, newDesc, g.Description) +} + +func TestSetArchivedGoal(t *testing.T) { + t.Parallel() + g := newGoal(t) + err := g.SetArchived() + assert.NoError(t, err) + assert.True(t, g.Archived) +} + +func TestSetDeletedGoal(t *testing.T) { + t.Parallel() + g := newGoal(t) + err := g.SetDeleted() + assert.NoError(t, err) + assert.True(t, g.Deleted) +} + +func newGoal(t *testing.T) *Goal { + t.Helper() + g, err := NewGoal("gID", "gName", "gDesc") + require.NoError(t, err) + return g +} diff --git a/pkg/experiment/storage/v2/BUILD.bazel b/pkg/experiment/storage/v2/BUILD.bazel new file mode 100644 index 000000000..cebf92667 --- /dev/null +++ b/pkg/experiment/storage/v2/BUILD.bazel @@ -0,0 +1,33 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "experiment.go", + "goal.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/experiment/storage/v2", + visibility = ["//visibility:public"], + deps = [ + "//pkg/experiment/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/experiment:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "experiment_test.go", + "goal_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/experiment/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/storage/v2/mysql/mock:go_default_library", + "//proto/experiment:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + ], +) diff --git a/pkg/experiment/storage/v2/experiment.go b/pkg/experiment/storage/v2/experiment.go new file mode 100644 index 000000000..0aa17415b --- /dev/null +++ b/pkg/experiment/storage/v2/experiment.go @@ -0,0 +1,345 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v2 + +import ( + "context" + "errors" + "fmt" + + "github.com/bucketeer-io/bucketeer/pkg/experiment/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + proto "github.com/bucketeer-io/bucketeer/proto/experiment" +) + +var ( + ErrExperimentAlreadyExists = errors.New("experiment: already exists") + ErrExperimentNotFound = errors.New("experiment: not found") + ErrExperimentUnexpectedAffectedRows = errors.New("experiment: unexpected affected rows") +) + +type ExperimentStorage interface { + CreateExperiment(ctx context.Context, e *domain.Experiment, environmentNamespace string) error + UpdateExperiment(ctx context.Context, e *domain.Experiment, environmentNamespace string) error + GetExperiment(ctx context.Context, id, environmentNamespace string) (*domain.Experiment, error) + ListExperiments( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, + ) ([]*proto.Experiment, int, int64, error) +} + +type experimentStorage struct { + qe mysql.QueryExecer +} + +func NewExperimentStorage(qe mysql.QueryExecer) ExperimentStorage { + return &experimentStorage{qe: qe} +} + +func (s *experimentStorage) CreateExperiment( + ctx context.Context, + e *domain.Experiment, + environmentNamespace string, +) error { + query := ` + INSERT INTO experiment ( + id, + goal_id, + feature_id, + feature_version, + variations, + start_at, + stop_at, + stopped, + stopped_at, + created_at, + updated_at, + archived, + deleted, + goal_ids, + name, + description, + base_variation_id, + status, + maintainer, + environment_namespace + ) VALUES ( + ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, + ?, ?, ?, ?, ?, ?, ?, ?, ?, ? + ) + ` + _, err := s.qe.ExecContext( + ctx, + query, + e.Id, + e.GoalId, + e.FeatureId, + e.FeatureVersion, + mysql.JSONObject{Val: e.Variations}, + e.StartAt, + e.StopAt, + e.Stopped, + e.StoppedAt, + e.CreatedAt, + e.UpdatedAt, + e.Archived, + e.Deleted, + mysql.JSONObject{Val: e.GoalIds}, + e.Name, + e.Description, + e.BaseVariationId, + int32(e.Status), + e.Maintainer, + environmentNamespace, + ) + if err != nil { + if err == mysql.ErrDuplicateEntry { + return ErrExperimentAlreadyExists + } + return err + } + return nil +} + +func (s *experimentStorage) UpdateExperiment( + ctx context.Context, + e *domain.Experiment, + environmentNamespace string, +) error { + query := ` + UPDATE + experiment + SET + goal_id = ?, + feature_id = ?, + feature_version = ?, + variations = ?, + start_at = ?, + stop_at = ?, + stopped = ?, + stopped_at = ?, + created_at = ?, + updated_at = ?, + archived = ?, + deleted = ?, + goal_ids = ?, + name = ?, + description = ?, + base_variation_id = ?, + maintainer = ?, + status = ? + WHERE + id = ? AND + environment_namespace = ? + ` + result, err := s.qe.ExecContext( + ctx, + query, + e.GoalId, + e.FeatureId, + e.FeatureVersion, + mysql.JSONObject{Val: e.Variations}, + e.StartAt, + e.StopAt, + e.Stopped, + e.StoppedAt, + e.CreatedAt, + e.UpdatedAt, + e.Archived, + e.Deleted, + mysql.JSONObject{Val: e.GoalIds}, + e.Name, + e.Description, + e.BaseVariationId, + e.Maintainer, + int32(e.Status), + e.Id, + environmentNamespace, + ) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + if rowsAffected != 1 { + return ErrExperimentUnexpectedAffectedRows + } + return nil +} + +func (s *experimentStorage) GetExperiment( + ctx context.Context, + id, environmentNamespace string, +) (*domain.Experiment, error) { + experiment := proto.Experiment{} + var status int32 + query := ` + SELECT + id, + goal_id, + feature_id, + feature_version, + variations, + start_at, + stop_at, + stopped, + stopped_at, + created_at, + updated_at, + archived, + deleted, + goal_ids, + name, + description, + base_variation_id, + maintainer, + status + FROM + experiment + WHERE + id = ? AND + environment_namespace = ? + ` + err := s.qe.QueryRowContext( + ctx, + query, + id, + environmentNamespace, + ).Scan( + &experiment.Id, + &experiment.GoalId, + &experiment.FeatureId, + &experiment.FeatureVersion, + &mysql.JSONObject{Val: &experiment.Variations}, + &experiment.StartAt, + &experiment.StopAt, + &experiment.Stopped, + &experiment.StoppedAt, + &experiment.CreatedAt, + &experiment.UpdatedAt, + &experiment.Archived, + &experiment.Deleted, + &mysql.JSONObject{Val: &experiment.GoalIds}, + &experiment.Name, + &experiment.Description, + &experiment.BaseVariationId, + &experiment.Maintainer, + &status, + ) + if err != nil { + if err == mysql.ErrNoRows { + return nil, ErrExperimentNotFound + } + return nil, err + } + experiment.Status = proto.Experiment_Status(status) + return &domain.Experiment{Experiment: &experiment}, nil +} + +func (s *experimentStorage) ListExperiments( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, +) ([]*proto.Experiment, int, int64, error) { + whereSQL, whereArgs := mysql.ConstructWhereSQLString(whereParts) + orderBySQL := mysql.ConstructOrderBySQLString(orders) + limitOffsetSQL := mysql.ConstructLimitOffsetSQLString(limit, offset) + query := fmt.Sprintf(` + SELECT + id, + goal_id, + feature_id, + feature_version, + variations, + start_at, + stop_at, + stopped, + stopped_at, + created_at, + updated_at, + archived, + deleted, + goal_ids, + name, + description, + base_variation_id, + maintainer, + status + FROM + experiment + %s %s %s + `, whereSQL, orderBySQL, limitOffsetSQL, + ) + rows, err := s.qe.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, 0, 0, err + } + defer rows.Close() + experiments := make([]*proto.Experiment, 0, limit) + for rows.Next() { + experiment := proto.Experiment{} + var status int32 + err := rows.Scan( + &experiment.Id, + &experiment.GoalId, + &experiment.FeatureId, + &experiment.FeatureVersion, + &mysql.JSONObject{Val: &experiment.Variations}, + &experiment.StartAt, + &experiment.StopAt, + &experiment.Stopped, + &experiment.StoppedAt, + &experiment.CreatedAt, + &experiment.UpdatedAt, + &experiment.Archived, + &experiment.Deleted, + &mysql.JSONObject{Val: &experiment.GoalIds}, + &experiment.Name, + &experiment.Description, + &experiment.BaseVariationId, + &experiment.Maintainer, + &status, + ) + if err != nil { + return nil, 0, 0, err + } + experiment.Status = proto.Experiment_Status(status) + experiments = append(experiments, &experiment) + } + if rows.Err() != nil { + return nil, 0, 0, err + } + nextOffset := offset + len(experiments) + var totalCount int64 + countQuery := fmt.Sprintf(` + SELECT + COUNT(1) + FROM + experiment + %s %s + `, whereSQL, orderBySQL, + ) + err = s.qe.QueryRowContext(ctx, countQuery, whereArgs...).Scan(&totalCount) + if err != nil { + return nil, 0, 0, err + } + return experiments, nextOffset, totalCount, nil +} diff --git a/pkg/experiment/storage/v2/experiment_test.go b/pkg/experiment/storage/v2/experiment_test.go new file mode 100644 index 000000000..bb7f085de --- /dev/null +++ b/pkg/experiment/storage/v2/experiment_test.go @@ -0,0 +1,266 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/experiment/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + proto "github.com/bucketeer-io/bucketeer/proto/experiment" +) + +func TestNewExperimentStorage(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + db := NewExperimentStorage(mock.NewMockQueryExecer(mockController)) + assert.IsType(t, &experimentStorage{}, db) +} + +func TestCreateExperiment(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*experimentStorage) + input *domain.Experiment + environmentNamespace string + expectedErr error + }{ + { + setup: func(s *experimentStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, mysql.ErrDuplicateEntry) + }, + input: &domain.Experiment{ + Experiment: &proto.Experiment{Id: "id-0"}, + }, + environmentNamespace: "ns0", + expectedErr: ErrExperimentAlreadyExists, + }, + { + setup: func(s *experimentStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, nil) + }, + input: &domain.Experiment{ + Experiment: &proto.Experiment{Id: "id-1"}, + }, + environmentNamespace: "ns0", + expectedErr: nil, + }, + } + for _, p := range patterns { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + db := newExperimentStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(db) + } + err := db.CreateExperiment(ctx, p.input, p.environmentNamespace) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestUpdateExperiment(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*experimentStorage) + input *domain.Experiment + environmentNamespace string + expectedErr error + }{ + { + setup: func(s *experimentStorage) { + result := mock.NewMockResult(mockController) + result.EXPECT().RowsAffected().Return(int64(0), nil) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + input: &domain.Experiment{ + Experiment: &proto.Experiment{Id: "id-0"}, + }, + environmentNamespace: "ns", + expectedErr: ErrExperimentUnexpectedAffectedRows, + }, + { + setup: func(s *experimentStorage) { + result := mock.NewMockResult(mockController) + result.EXPECT().RowsAffected().Return(int64(1), nil) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + input: &domain.Experiment{ + Experiment: &proto.Experiment{Id: "id-0"}, + }, + environmentNamespace: "ns", + expectedErr: nil, + }, + } + for _, p := range patterns { + storage := newExperimentStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + err := storage.UpdateExperiment(context.Background(), p.input, p.environmentNamespace) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestGetExperiment(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*experimentStorage) + input string + environmentNamespace string + expected *domain.Experiment + expectedErr error + }{ + { + setup: func(s *experimentStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + input: "", + environmentNamespace: "ns0", + expected: nil, + expectedErr: ErrExperimentNotFound, + }, + { + setup: func(s *experimentStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + input: "id-0", + environmentNamespace: "ns0", + expected: &domain.Experiment{ + Experiment: &proto.Experiment{Id: "id-0"}, + }, + expectedErr: nil, + }, + } + for _, p := range patterns { + storage := newExperimentStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + _, err := storage.GetExperiment(context.Background(), p.input, p.environmentNamespace) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestListExperiments(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := []struct { + setup func(*experimentStorage) + whereParts []mysql.WherePart + orders []*mysql.Order + limit int + offset int + expected []*proto.Experiment + expectedCursor int + expectedErr error + }{ + { + setup: func(s *experimentStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + whereParts: nil, + orders: nil, + limit: 0, + offset: 0, + expected: nil, + expectedCursor: 0, + expectedErr: errors.New("error"), + }, + { + setup: func(s *experimentStorage) { + rows := mock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + whereParts: []mysql.WherePart{ + mysql.NewFilter("num", ">=", 5), + }, + orders: []*mysql.Order{ + mysql.NewOrder("id", mysql.OrderDirectionAsc), + }, + limit: 10, + offset: 5, + expected: []*proto.Experiment{}, + expectedCursor: 5, + expectedErr: nil, + }, + } + for _, p := range patterns { + storage := newExperimentStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + experiments, cursor, _, err := storage.ListExperiments( + context.Background(), + p.whereParts, + p.orders, + p.limit, + p.offset, + ) + assert.Equal(t, p.expected, experiments) + assert.Equal(t, p.expectedCursor, cursor) + assert.Equal(t, p.expectedErr, err) + } +} + +func newExperimentStorageWithMock(t *testing.T, mockController *gomock.Controller) *experimentStorage { + t.Helper() + return &experimentStorage{mock.NewMockQueryExecer(mockController)} +} diff --git a/pkg/experiment/storage/v2/goal.go b/pkg/experiment/storage/v2/goal.go new file mode 100644 index 000000000..596d5b611 --- /dev/null +++ b/pkg/experiment/storage/v2/goal.go @@ -0,0 +1,295 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v2 + +import ( + "context" + "errors" + "fmt" + + "github.com/bucketeer-io/bucketeer/pkg/experiment/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + proto "github.com/bucketeer-io/bucketeer/proto/experiment" +) + +var ( + ErrGoalAlreadyExists = errors.New("goal: already exists") + ErrGoalNotFound = errors.New("goal: not found") + ErrGoalUnexpectedAffectedRows = errors.New("goal: unexpected affected rows") +) + +type GoalStorage interface { + CreateGoal(ctx context.Context, g *domain.Goal, environmentNamespace string) error + UpdateGoal(ctx context.Context, g *domain.Goal, environmentNamespace string) error + GetGoal(ctx context.Context, id, environmentNamespace string) (*domain.Goal, error) + ListGoals( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, + isInUseStatus *bool, + environmentNamespace string, + ) ([]*proto.Goal, int, int64, error) +} + +type goalStorage struct { + qe mysql.QueryExecer +} + +func NewGoalStorage(qe mysql.QueryExecer) GoalStorage { + return &goalStorage{qe: qe} +} + +func (s *goalStorage) CreateGoal(ctx context.Context, g *domain.Goal, environmentNamespace string) error { + query := ` + INSERT INTO goal ( + id, + name, + description, + archived, + deleted, + created_at, + updated_at, + environment_namespace + ) VALUES ( + ?, ?, ?, ?, ?, ?, ?, ? + ) + ` + _, err := s.qe.ExecContext( + ctx, + query, + g.Id, + g.Name, + g.Description, + g.Archived, + g.Deleted, + g.CreatedAt, + g.UpdatedAt, + environmentNamespace, + ) + if err != nil { + if err == mysql.ErrDuplicateEntry { + return ErrGoalAlreadyExists + } + return err + } + return nil +} + +func (s *goalStorage) UpdateGoal(ctx context.Context, g *domain.Goal, environmentNamespace string) error { + query := ` + UPDATE + goal + SET + name = ?, + description = ?, + archived = ?, + deleted = ?, + created_at = ?, + updated_at = ? + WHERE + id = ? AND + environment_namespace = ? + ` + result, err := s.qe.ExecContext( + ctx, + query, + g.Name, + g.Description, + g.Archived, + g.Deleted, + g.CreatedAt, + g.UpdatedAt, + g.Id, + environmentNamespace, + ) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + if rowsAffected != 1 { + return ErrGoalUnexpectedAffectedRows + } + return nil +} + +func (s *goalStorage) GetGoal(ctx context.Context, id, environmentNamespace string) (*domain.Goal, error) { + goal := proto.Goal{} + query := ` + SELECT + id, + name, + description, + archived, + deleted, + created_at, + updated_at, + CASE + WHEN ( + SELECT + COUNT(1) + FROM + experiment + WHERE + environment_namespace = ? AND + goal_ids LIKE concat("%", goal.id, "%") + ) > 0 THEN TRUE + ELSE FALSE + END AS is_in_use_status + FROM + goal + WHERE + id = ? AND + environment_namespace = ? + ` + err := s.qe.QueryRowContext( + ctx, + query, + environmentNamespace, + id, + environmentNamespace, + ).Scan( + &goal.Id, + &goal.Name, + &goal.Description, + &goal.Archived, + &goal.Deleted, + &goal.CreatedAt, + &goal.UpdatedAt, + &goal.IsInUseStatus, + ) + if err != nil { + if err == mysql.ErrNoRows { + return nil, ErrGoalNotFound + } + return nil, err + } + return &domain.Goal{Goal: &goal}, nil +} + +func (s *goalStorage) ListGoals( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, + isInUseStatus *bool, + environmentNamespace string, +) ([]*proto.Goal, int, int64, error) { + whereSQL, whereArgs := mysql.ConstructWhereSQLString(whereParts) + prepareArgs := make([]interface{}, 0, len(whereArgs)+1) + prepareArgs = append(prepareArgs, environmentNamespace) + prepareArgs = append(prepareArgs, whereArgs...) + orderBySQL := mysql.ConstructOrderBySQLString(orders) + limitOffsetSQL := mysql.ConstructLimitOffsetSQLString(limit, offset) + var isInUseStatusSQL string + if isInUseStatus != nil { + if *isInUseStatus { + isInUseStatusSQL = "HAVING is_in_use_status = TRUE" + } else { + isInUseStatusSQL = "HAVING is_in_use_status = FALSE" + } + } + query := fmt.Sprintf(` + SELECT + id, + name, + description, + archived, + deleted, + created_at, + updated_at, + CASE + WHEN ( + SELECT + COUNT(1) + FROM + experiment + WHERE + environment_namespace = ? AND + goal_ids LIKE concat("%%", goal.id, "%%") + ) > 0 THEN TRUE + ELSE FALSE + END AS is_in_use_status + FROM + goal + %s %s %s %s + `, whereSQL, isInUseStatusSQL, orderBySQL, limitOffsetSQL, + ) + rows, err := s.qe.QueryContext(ctx, query, prepareArgs...) + if err != nil { + return nil, 0, 0, err + } + defer rows.Close() + goals := make([]*proto.Goal, 0, limit) + for rows.Next() { + goal := proto.Goal{} + err := rows.Scan( + &goal.Id, + &goal.Name, + &goal.Description, + &goal.Archived, + &goal.Deleted, + &goal.CreatedAt, + &goal.UpdatedAt, + &goal.IsInUseStatus, + ) + if err != nil { + return nil, 0, 0, err + } + goals = append(goals, &goal) + } + if rows.Err() != nil { + return nil, 0, 0, err + } + nextOffset := offset + len(goals) + var totalCount int64 + countConditionSQL := "> 0 THEN 1 ELSE 1" + if isInUseStatus != nil { + if *isInUseStatus { + countConditionSQL = "> 0 THEN 1 ELSE NULL" + } else { + countConditionSQL = "> 0 THEN NULL ELSE 1" + } + } + countQuery := fmt.Sprintf(` + SELECT + COUNT( + CASE + WHEN ( + SELECT + COUNT(1) + FROM + experiment + WHERE + environment_namespace = ? AND + goal_ids LIKE concat("%%", goal.id, "%%") + ) %s + END + ) + FROM + goal + %s %s + `, countConditionSQL, whereSQL, orderBySQL, + ) + err = s.qe.QueryRowContext(ctx, countQuery, prepareArgs...).Scan(&totalCount) + if err != nil { + return nil, 0, 0, err + } + return goals, nextOffset, totalCount, nil +} diff --git a/pkg/experiment/storage/v2/goal_test.go b/pkg/experiment/storage/v2/goal_test.go new file mode 100644 index 000000000..676a7a563 --- /dev/null +++ b/pkg/experiment/storage/v2/goal_test.go @@ -0,0 +1,274 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/experiment/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + proto "github.com/bucketeer-io/bucketeer/proto/experiment" +) + +func TestNewGoalStorage(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + db := NewGoalStorage(mock.NewMockQueryExecer(mockController)) + assert.IsType(t, &goalStorage{}, db) +} + +func TestCreateGoal(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*goalStorage) + input *domain.Goal + environmentNamespace string + expectedErr error + }{ + { + setup: func(s *goalStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, mysql.ErrDuplicateEntry) + }, + input: &domain.Goal{ + Goal: &proto.Goal{Id: "id-0"}, + }, + environmentNamespace: "ns0", + expectedErr: ErrGoalAlreadyExists, + }, + { + setup: func(s *goalStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, nil) + }, + input: &domain.Goal{ + Goal: &proto.Goal{Id: "id-1"}, + }, + environmentNamespace: "ns0", + expectedErr: nil, + }, + } + for _, p := range patterns { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + db := newGoalStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(db) + } + err := db.CreateGoal(ctx, p.input, p.environmentNamespace) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestUpdateGoal(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*goalStorage) + input *domain.Goal + environmentNamespace string + expectedErr error + }{ + { + setup: func(s *goalStorage) { + result := mock.NewMockResult(mockController) + result.EXPECT().RowsAffected().Return(int64(0), nil) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + input: &domain.Goal{ + Goal: &proto.Goal{Id: "id-0"}, + }, + environmentNamespace: "ns", + expectedErr: ErrGoalUnexpectedAffectedRows, + }, + { + setup: func(s *goalStorage) { + result := mock.NewMockResult(mockController) + result.EXPECT().RowsAffected().Return(int64(1), nil) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + input: &domain.Goal{ + Goal: &proto.Goal{Id: "id-0"}, + }, + environmentNamespace: "ns", + expectedErr: nil, + }, + } + for _, p := range patterns { + storage := newGoalStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + err := storage.UpdateGoal(context.Background(), p.input, p.environmentNamespace) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestGetGoal(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*goalStorage) + input string + environmentNamespace string + expected *domain.Goal + expectedErr error + }{ + { + setup: func(s *goalStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + input: "", + environmentNamespace: "ns0", + expected: nil, + expectedErr: ErrGoalNotFound, + }, + { + setup: func(s *goalStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + input: "id-0", + environmentNamespace: "ns0", + expected: &domain.Goal{ + Goal: &proto.Goal{Id: "id-0"}, + }, + expectedErr: nil, + }, + } + for _, p := range patterns { + storage := newGoalStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + _, err := storage.GetGoal(context.Background(), p.input, p.environmentNamespace) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestListGoals(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := []struct { + setup func(*goalStorage) + whereParts []mysql.WherePart + orders []*mysql.Order + limit int + offset int + isInUseStatus *bool + environmentNamespace string + expected []*proto.Goal + expectedCursor int + expectedErr error + }{ + { + setup: func(s *goalStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + whereParts: nil, + orders: nil, + limit: 0, + offset: 0, + isInUseStatus: nil, + environmentNamespace: "", + expected: nil, + expectedCursor: 0, + expectedErr: errors.New("error"), + }, + { + setup: func(s *goalStorage) { + rows := mock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + whereParts: []mysql.WherePart{ + mysql.NewFilter("num", ">=", 5), + }, + orders: []*mysql.Order{ + mysql.NewOrder("id", mysql.OrderDirectionAsc), + }, + limit: 10, + offset: 5, + isInUseStatus: nil, + environmentNamespace: "ns0", + expected: []*proto.Goal{}, + expectedCursor: 5, + expectedErr: nil, + }, + } + for _, p := range patterns { + storage := newGoalStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + goals, cursor, _, err := storage.ListGoals( + context.Background(), + p.whereParts, + p.orders, + p.limit, + p.offset, + p.isInUseStatus, + p.environmentNamespace, + ) + assert.Equal(t, p.expected, goals) + assert.Equal(t, p.expectedCursor, cursor) + assert.Equal(t, p.expectedErr, err) + } +} + +func newGoalStorageWithMock(t *testing.T, mockController *gomock.Controller) *goalStorage { + t.Helper() + return &goalStorage{mock.NewMockQueryExecer(mockController)} +} diff --git a/pkg/experiment/storage/v2/mock/BUILD.bazel b/pkg/experiment/storage/v2/mock/BUILD.bazel new file mode 100644 index 000000000..0905ffe23 --- /dev/null +++ b/pkg/experiment/storage/v2/mock/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "experiment.go", + "goal.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/experiment/storage/v2/mock", + visibility = ["//visibility:public"], + deps = [ + "//pkg/experiment/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/experiment:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + ], +) diff --git a/pkg/experiment/storage/v2/mock/experiment.go b/pkg/experiment/storage/v2/mock/experiment.go new file mode 100644 index 000000000..60203e80e --- /dev/null +++ b/pkg/experiment/storage/v2/mock/experiment.go @@ -0,0 +1,99 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: experiment.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + domain "github.com/bucketeer-io/bucketeer/pkg/experiment/domain" + mysql "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + experiment "github.com/bucketeer-io/bucketeer/proto/experiment" +) + +// MockExperimentStorage is a mock of ExperimentStorage interface. +type MockExperimentStorage struct { + ctrl *gomock.Controller + recorder *MockExperimentStorageMockRecorder +} + +// MockExperimentStorageMockRecorder is the mock recorder for MockExperimentStorage. +type MockExperimentStorageMockRecorder struct { + mock *MockExperimentStorage +} + +// NewMockExperimentStorage creates a new mock instance. +func NewMockExperimentStorage(ctrl *gomock.Controller) *MockExperimentStorage { + mock := &MockExperimentStorage{ctrl: ctrl} + mock.recorder = &MockExperimentStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockExperimentStorage) EXPECT() *MockExperimentStorageMockRecorder { + return m.recorder +} + +// CreateExperiment mocks base method. +func (m *MockExperimentStorage) CreateExperiment(ctx context.Context, e *domain.Experiment, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateExperiment", ctx, e, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateExperiment indicates an expected call of CreateExperiment. +func (mr *MockExperimentStorageMockRecorder) CreateExperiment(ctx, e, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateExperiment", reflect.TypeOf((*MockExperimentStorage)(nil).CreateExperiment), ctx, e, environmentNamespace) +} + +// GetExperiment mocks base method. +func (m *MockExperimentStorage) GetExperiment(ctx context.Context, id, environmentNamespace string) (*domain.Experiment, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetExperiment", ctx, id, environmentNamespace) + ret0, _ := ret[0].(*domain.Experiment) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetExperiment indicates an expected call of GetExperiment. +func (mr *MockExperimentStorageMockRecorder) GetExperiment(ctx, id, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetExperiment", reflect.TypeOf((*MockExperimentStorage)(nil).GetExperiment), ctx, id, environmentNamespace) +} + +// ListExperiments mocks base method. +func (m *MockExperimentStorage) ListExperiments(ctx context.Context, whereParts []mysql.WherePart, orders []*mysql.Order, limit, offset int) ([]*experiment.Experiment, int, int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListExperiments", ctx, whereParts, orders, limit, offset) + ret0, _ := ret[0].([]*experiment.Experiment) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(int64) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 +} + +// ListExperiments indicates an expected call of ListExperiments. +func (mr *MockExperimentStorageMockRecorder) ListExperiments(ctx, whereParts, orders, limit, offset interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListExperiments", reflect.TypeOf((*MockExperimentStorage)(nil).ListExperiments), ctx, whereParts, orders, limit, offset) +} + +// UpdateExperiment mocks base method. +func (m *MockExperimentStorage) UpdateExperiment(ctx context.Context, e *domain.Experiment, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateExperiment", ctx, e, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateExperiment indicates an expected call of UpdateExperiment. +func (mr *MockExperimentStorageMockRecorder) UpdateExperiment(ctx, e, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateExperiment", reflect.TypeOf((*MockExperimentStorage)(nil).UpdateExperiment), ctx, e, environmentNamespace) +} diff --git a/pkg/experiment/storage/v2/mock/goal.go b/pkg/experiment/storage/v2/mock/goal.go new file mode 100644 index 000000000..7699bcb91 --- /dev/null +++ b/pkg/experiment/storage/v2/mock/goal.go @@ -0,0 +1,99 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: goal.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + domain "github.com/bucketeer-io/bucketeer/pkg/experiment/domain" + mysql "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + experiment "github.com/bucketeer-io/bucketeer/proto/experiment" +) + +// MockGoalStorage is a mock of GoalStorage interface. +type MockGoalStorage struct { + ctrl *gomock.Controller + recorder *MockGoalStorageMockRecorder +} + +// MockGoalStorageMockRecorder is the mock recorder for MockGoalStorage. +type MockGoalStorageMockRecorder struct { + mock *MockGoalStorage +} + +// NewMockGoalStorage creates a new mock instance. +func NewMockGoalStorage(ctrl *gomock.Controller) *MockGoalStorage { + mock := &MockGoalStorage{ctrl: ctrl} + mock.recorder = &MockGoalStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockGoalStorage) EXPECT() *MockGoalStorageMockRecorder { + return m.recorder +} + +// CreateGoal mocks base method. +func (m *MockGoalStorage) CreateGoal(ctx context.Context, g *domain.Goal, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateGoal", ctx, g, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateGoal indicates an expected call of CreateGoal. +func (mr *MockGoalStorageMockRecorder) CreateGoal(ctx, g, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateGoal", reflect.TypeOf((*MockGoalStorage)(nil).CreateGoal), ctx, g, environmentNamespace) +} + +// GetGoal mocks base method. +func (m *MockGoalStorage) GetGoal(ctx context.Context, id, environmentNamespace string) (*domain.Goal, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGoal", ctx, id, environmentNamespace) + ret0, _ := ret[0].(*domain.Goal) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetGoal indicates an expected call of GetGoal. +func (mr *MockGoalStorageMockRecorder) GetGoal(ctx, id, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGoal", reflect.TypeOf((*MockGoalStorage)(nil).GetGoal), ctx, id, environmentNamespace) +} + +// ListGoals mocks base method. +func (m *MockGoalStorage) ListGoals(ctx context.Context, whereParts []mysql.WherePart, orders []*mysql.Order, limit, offset int, isInUseStatus *bool, environmentNamespace string) ([]*experiment.Goal, int, int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListGoals", ctx, whereParts, orders, limit, offset, isInUseStatus, environmentNamespace) + ret0, _ := ret[0].([]*experiment.Goal) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(int64) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 +} + +// ListGoals indicates an expected call of ListGoals. +func (mr *MockGoalStorageMockRecorder) ListGoals(ctx, whereParts, orders, limit, offset, isInUseStatus, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListGoals", reflect.TypeOf((*MockGoalStorage)(nil).ListGoals), ctx, whereParts, orders, limit, offset, isInUseStatus, environmentNamespace) +} + +// UpdateGoal mocks base method. +func (m *MockGoalStorage) UpdateGoal(ctx context.Context, g *domain.Goal, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateGoal", ctx, g, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateGoal indicates an expected call of UpdateGoal. +func (mr *MockGoalStorageMockRecorder) UpdateGoal(ctx, g, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateGoal", reflect.TypeOf((*MockGoalStorage)(nil).UpdateGoal), ctx, g, environmentNamespace) +} diff --git a/pkg/feature/api/BUILD.bazel b/pkg/feature/api/BUILD.bazel new file mode 100644 index 000000000..c5daf7613 --- /dev/null +++ b/pkg/feature/api/BUILD.bazel @@ -0,0 +1,96 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "api.go", + "error.go", + "feature.go", + "segment.go", + "segment_user.go", + "tag.go", + "user_evaluations.go", + "validation.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/feature/api", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/client:go_default_library", + "//pkg/cache:go_default_library", + "//pkg/cache/v3:go_default_library", + "//pkg/experiment/client:go_default_library", + "//pkg/experiment/domain:go_default_library", + "//pkg/feature/command:go_default_library", + "//pkg/feature/domain:go_default_library", + "//pkg/feature/storage:go_default_library", + "//pkg/feature/storage/v2:go_default_library", + "//pkg/locale:go_default_library", + "//pkg/log:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/role:go_default_library", + "//pkg/rpc/status:go_default_library", + "//pkg/storage:go_default_library", + "//pkg/storage/v2/bigtable:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/uuid:go_default_library", + "//proto/account:go_default_library", + "//proto/event/domain:go_default_library", + "//proto/event/service:go_default_library", + "//proto/experiment:go_default_library", + "//proto/feature:go_default_library", + "//proto/user:go_default_library", + "@go_googleapis//google/rpc:errdetails_go_proto", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + "@org_golang_x_sync//singleflight:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "api_test.go", + "feature_test.go", + "segment_test.go", + "segment_user_test.go", + "tag_test.go", + "user_evaluations_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/account/client/mock:go_default_library", + "//pkg/autoops/command:go_default_library", + "//pkg/cache/v3/mock:go_default_library", + "//pkg/experiment/client/mock:go_default_library", + "//pkg/feature/domain:go_default_library", + "//pkg/feature/storage:go_default_library", + "//pkg/feature/storage/mock:go_default_library", + "//pkg/feature/storage/v2:go_default_library", + "//pkg/locale:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/pubsub/publisher/mock:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/storage:go_default_library", + "//pkg/storage/v2/bigtable:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/storage/v2/mysql/mock:go_default_library", + "//pkg/token:go_default_library", + "//pkg/uuid:go_default_library", + "//proto/account:go_default_library", + "//proto/experiment:go_default_library", + "//proto/feature:go_default_library", + "//proto/user:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + "@go_googleapis//google/rpc:errdetails_go_proto", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + "@org_golang_google_grpc//status:go_default_library", + "@org_golang_x_sync//singleflight:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/feature/api/api.go b/pkg/feature/api/api.go new file mode 100644 index 000000000..bf4081012 --- /dev/null +++ b/pkg/feature/api/api.go @@ -0,0 +1,169 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + + "go.uber.org/zap" + "golang.org/x/sync/singleflight" + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + "github.com/bucketeer-io/bucketeer/pkg/cache" + cachev3 "github.com/bucketeer-io/bucketeer/pkg/cache/v3" + experimentclient "github.com/bucketeer-io/bucketeer/pkg/experiment/client" + featurestorage "github.com/bucketeer-io/bucketeer/pkg/feature/storage" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + "github.com/bucketeer-io/bucketeer/pkg/role" + bigtable "github.com/bucketeer-io/bucketeer/pkg/storage/v2/bigtable" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +type options struct { + logger *zap.Logger +} + +type Option func(*options) + +func WithLogger(l *zap.Logger) Option { + return func(opts *options) { + opts.logger = l + } +} + +type FeatureService struct { + mysqlClient mysql.Client + userEvaluationStorage featurestorage.UserEvaluationsStorage + accountClient accountclient.Client + experimentClient experimentclient.Client + featuresCache cachev3.FeaturesCache + segmentUsersCache cachev3.SegmentUsersCache + segmentUsersPublisher publisher.Publisher + domainPublisher publisher.Publisher + flightgroup singleflight.Group + opts *options + logger *zap.Logger +} + +func NewFeatureService( + mysqlClient mysql.Client, + btClient bigtable.Client, + accountClient accountclient.Client, + experimentClient experimentclient.Client, + v3Cache cache.MultiGetCache, + segmentUsersPublisher publisher.Publisher, + domainPublisher publisher.Publisher, + opts ...Option, +) *FeatureService { + dopts := &options{ + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + return &FeatureService{ + mysqlClient: mysqlClient, + userEvaluationStorage: featurestorage.NewUserEvaluationsStorage(btClient), + accountClient: accountClient, + experimentClient: experimentClient, + featuresCache: cachev3.NewFeaturesCache(v3Cache), + segmentUsersCache: cachev3.NewSegmentUsersCache(v3Cache), + segmentUsersPublisher: segmentUsersPublisher, + domainPublisher: domainPublisher, + opts: dopts, + logger: dopts.logger.Named("api"), + } +} + +func (s *FeatureService) Register(server *grpc.Server) { + featureproto.RegisterFeatureServiceServer(server, s) +} + +func (s *FeatureService) checkRole( + ctx context.Context, + requiredRole accountproto.Account_Role, + environmentNamespace string, +) (*eventproto.Editor, error) { + editor, err := role.CheckRole(ctx, requiredRole, func(email string) (*accountproto.GetAccountResponse, error) { + return s.accountClient.GetAccount(ctx, &accountproto.GetAccountRequest{ + Email: email, + EnvironmentNamespace: environmentNamespace, + }) + }) + if err != nil { + switch status.Code(err) { + case codes.Unauthenticated: + s.logger.Info( + "Unauthenticated", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusUnauthenticated, locale.JaJP) + case codes.PermissionDenied: + s.logger.Info( + "Permission denied", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusPermissionDenied, locale.JaJP) + default: + s.logger.Error( + "Failed to check role", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + } + return editor, nil +} + +func (s *FeatureService) reportInternalServerError( + ctx context.Context, + err error, + environmentNamespace string, + localizer locale.Localizer, +) error { + s.logger.Error( + "Internal server error", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + dt, err := statusInternal.WithDetails(&errdetails.LocalizedMessage{ + Locale: localizer.GetLocale(), + Message: localizer.MustLocalize(locale.InternalServerError), + }) + if err != nil { + return statusInternal.Err() + } + return dt.Err() +} diff --git a/pkg/feature/api/api_test.go b/pkg/feature/api/api_test.go new file mode 100644 index 000000000..3babaca90 --- /dev/null +++ b/pkg/feature/api/api_test.go @@ -0,0 +1,181 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "fmt" + "time" + + "github.com/golang/mock/gomock" + "go.uber.org/zap" + "golang.org/x/sync/singleflight" + + accountclientmock "github.com/bucketeer-io/bucketeer/pkg/account/client/mock" + cachev3mock "github.com/bucketeer-io/bucketeer/pkg/cache/v3/mock" + experimentclientmock "github.com/bucketeer-io/bucketeer/pkg/experiment/client/mock" + featurestoragemock "github.com/bucketeer-io/bucketeer/pkg/feature/storage/mock" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + publishermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher/mock" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + "github.com/bucketeer-io/bucketeer/pkg/token" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + experimentproto "github.com/bucketeer-io/bucketeer/proto/experiment" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +const ( + environmentNamespace = "test" + tag = "tag" + userID = "user-id" +) + +var ( + defaultOptions = options{ + logger: zap.NewNop(), + } + evaluation = &featureproto.Evaluation{ + FeatureId: "feature-id", + FeatureVersion: 1, + UserId: "user-id", + VariationId: "variation-id", + VariationValue: "variation-value", + } +) + +func createContextWithToken() context.Context { + token := &token.IDToken{ + Issuer: "issuer", + Subject: "sub", + Audience: "audience", + Expiry: time.Now().AddDate(100, 0, 0), + IssuedAt: time.Now(), + Email: "email", + AdminRole: accountproto.Account_OWNER, + } + ctx := context.TODO() + return context.WithValue(ctx, rpc.Key, token) +} + +func createContextWithTokenRoleUnassigned() context.Context { + token := &token.IDToken{ + Issuer: "issuer", + Subject: "sub", + Audience: "audience", + Expiry: time.Now().AddDate(100, 0, 0), + IssuedAt: time.Now(), + Email: "email", + AdminRole: accountproto.Account_UNASSIGNED, + } + ctx := context.TODO() + return context.WithValue(ctx, rpc.Key, token) +} + +// FIXME: Deprecated. Do not use for a new test. Replace this with createFeatureServiceNew. +func createFeatureService(c *gomock.Controller) *FeatureService { + p := publishermock.NewMockPublisher(c) + p.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + p.EXPECT().PublishMulti(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + a := accountclientmock.NewMockClient(c) + ar := &accountproto.GetAccountResponse{ + Account: &accountproto.Account{ + Email: "email", + Role: accountproto.Account_VIEWER, + }, + } + a.EXPECT().GetAccount(gomock.Any(), gomock.Any()).Return(ar, nil).AnyTimes() + e := experimentclientmock.NewMockClient(c) + e.EXPECT().ListExperiments(gomock.Any(), gomock.Any()).Return(&experimentproto.ListExperimentsResponse{}, nil).AnyTimes() + return &FeatureService{ + mysqlmock.NewMockClient(c), + nil, + a, + e, + cachev3mock.NewMockFeaturesCache(c), + cachev3mock.NewMockSegmentUsersCache(c), + p, + p, + singleflight.Group{}, + &defaultOptions, + defaultOptions.logger, + } +} + +func createFeatureServiceNew(c *gomock.Controller) *FeatureService { + segmentUsersPublisher := publishermock.NewMockPublisher(c) + domainPublisher := publishermock.NewMockPublisher(c) + a := accountclientmock.NewMockClient(c) + ar := &accountproto.GetAccountResponse{ + Account: &accountproto.Account{ + Email: "email", + Role: accountproto.Account_VIEWER, + }, + } + a.EXPECT().GetAccount(gomock.Any(), gomock.Any()).Return(ar, nil).AnyTimes() + return &FeatureService{ + mysqlClient: mysqlmock.NewMockClient(c), + userEvaluationStorage: featurestoragemock.NewMockUserEvaluationsStorage(c), + accountClient: a, + experimentClient: experimentclientmock.NewMockClient(c), + featuresCache: cachev3mock.NewMockFeaturesCache(c), + segmentUsersPublisher: segmentUsersPublisher, + domainPublisher: domainPublisher, + opts: &defaultOptions, + logger: defaultOptions.logger, + } +} + +func createFeatureVariations() []*featureproto.Variation { + return []*featureproto.Variation{ + { + Value: "variation_value_1", + Name: "variation_name_1", + Description: "variation_description_1", + }, + { + Value: "variation_value_2", + Name: "variation_name_2", + Description: "variation_description_2", + }, + } +} + +func createFeatureTags() []string { + return []string{"feature-tag-1", "feature-tag-2", "feature-tag-3"} +} + +func contains(needle string, haystack []string) bool { + for i := range haystack { + if haystack[i] == needle { + return true + } + } + return false +} + +type msgLengthMatcher struct{ length int } + +func newMsgLengthMatcher(length int) gomock.Matcher { + return &msgLengthMatcher{length: length} +} + +func (m *msgLengthMatcher) Matches(x interface{}) bool { + return len(x.([]publisher.Message)) == m.length +} + +func (m *msgLengthMatcher) String() string { + return fmt.Sprintf("length: %d", m.length) +} diff --git a/pkg/feature/api/error.go b/pkg/feature/api/error.go new file mode 100644 index 000000000..575fac0e7 --- /dev/null +++ b/pkg/feature/api/error.go @@ -0,0 +1,664 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "fmt" + + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + gstatus "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/rpc/status" +) + +var ( + statusInternal = gstatus.New(codes.Internal, "feature: internal") + statusMissingID = gstatus.New(codes.InvalidArgument, "feature: missing id") + statusMissingIDs = gstatus.New(codes.InvalidArgument, "feature: missing ids") + statusInvalidID = gstatus.New(codes.InvalidArgument, "feature: invalid id") + statusMissingKeyword = gstatus.New(codes.InvalidArgument, "feature: missing keyword") + statusMissingUser = gstatus.New(codes.InvalidArgument, "feature: missing user") + statusMissingUserID = gstatus.New(codes.InvalidArgument, "feature: missing user id") + statusMissingUserIDs = gstatus.New(codes.InvalidArgument, "feature: missing user ids") + statusMissingCommand = gstatus.New(codes.InvalidArgument, "feature: missing command") + statusMissingDefaultOnVariation = gstatus.New(codes.InvalidArgument, "feature: missing default on variation") + statusMissingDefaultOffVariation = gstatus.New(codes.InvalidArgument, "feature: missing default off variation") + statusInvalidDefaultOnVariation = gstatus.New(codes.InvalidArgument, "feature: invalid default on variation") + statusInvalidDefaultOffVariation = gstatus.New(codes.InvalidArgument, "feature: invalid default off variation") + statusMissingVariationID = gstatus.New(codes.InvalidArgument, "feature: missing variation id") + statusInvalidVariationID = gstatus.New(codes.InvalidArgument, "feature: invalid variation id") + statusDifferentVariationsSize = gstatus.New( + codes.InvalidArgument, + "feature: feature variations and rollout variations must have the same size", + ) + statusExceededMaxVariationWeight = gstatus.New( + codes.InvalidArgument, + fmt.Sprintf("feature: the sum of all weights value is %d", totalVariationWeight), + ) + statusIncorrectVariationWeight = gstatus.New( + codes.InvalidArgument, + fmt.Sprintf("command: variation weight must be between 0 and %d", totalVariationWeight), + ) + statusInvalidCursor = gstatus.New(codes.InvalidArgument, "feature: cursor is invalid") + statusInvalidOrderBy = gstatus.New(codes.InvalidArgument, "feature: order_by is invalid") + statusMissingName = gstatus.New(codes.InvalidArgument, "feature: missing name") + statusMissingFeatureVariations = gstatus.New( + codes.InvalidArgument, + "feature: feature must contain more than one variation", + ) + statusMissingFeatureTags = gstatus.New( + codes.InvalidArgument, + "feature: feature must contain one or more tags", + ) + statusMissingFeatureTag = gstatus.New(codes.InvalidArgument, "feature: missing feature tag") + statusMissingEvaluation = gstatus.New(codes.InvalidArgument, "feature: missing evaluation") + statusUnknownCommand = gstatus.New(codes.InvalidArgument, "feature: unknown command") + statusMissingRule = gstatus.New(codes.InvalidArgument, "feature: missing rule") + statusMissingRuleID = gstatus.New(codes.InvalidArgument, "feature: missing rule id") + statusMissingRuleClause = gstatus.New(codes.InvalidArgument, "feature: missing rule clause") + statusMissingClauseID = gstatus.New(codes.InvalidArgument, "feature: missing clause id") + statusMissingClauseAttribute = gstatus.New(codes.InvalidArgument, "feature: missing clause attribute") + statusMissingClauseValues = gstatus.New(codes.InvalidArgument, "feature: missing clause values") + statusMissingClauseValue = gstatus.New(codes.InvalidArgument, "feature: missing clause value") + statusMissingSegmentID = gstatus.New(codes.InvalidArgument, "feature: missing segment id") + statusMissingSegmentUsersData = gstatus.New(codes.InvalidArgument, "feature: missing segment users data") + statusMissingRuleStrategy = gstatus.New(codes.InvalidArgument, "feature: missing rule strategy") + statusUnknownStrategy = gstatus.New(codes.InvalidArgument, "feature: unknown strategy") + statusMissingFixedStrategy = gstatus.New(codes.InvalidArgument, "feature: missing fixed strategy") + statusMissingRolloutStrategy = gstatus.New(codes.InvalidArgument, "feature: missing rollout strategy") + statusExceededMaxSegmentUsersDataSize = gstatus.New( + codes.InvalidArgument, + fmt.Sprintf("feature: max segment users data size allowed is %d bytes", maxSegmentUsersDataSize), + ) + statusUnknownSegmentUserState = gstatus.New(codes.InvalidArgument, "feature: unknown segment user state") + statusIncorrectUUIDFormat = gstatus.New( + codes.InvalidArgument, + "feature: uuid format must be an uuid version 4", + ) + statusExceededMaxUserIDsLength = gstatus.New( + codes.InvalidArgument, + fmt.Sprintf("feature: max user ids length allowed is %d", maxUserIDsLength), + ) + statusIncorrectDestinationEnvironment = gstatus.New( + codes.InvalidArgument, + "feature: destination environment is the same as origin one", + ) + statusExceededMaxPageSizePerRequest = gstatus.New( + codes.InvalidArgument, + fmt.Sprintf("feature: max page size allowed is %d", maxPageSizePerRequest), + ) + statusNotFound = gstatus.New(codes.NotFound, "feature: not found") + statusSegmentNotFound = gstatus.New(codes.NotFound, "feature: segment not found") + statusAlreadyExists = gstatus.New(codes.AlreadyExists, "feature: already exists") + statusNothingChange = gstatus.New(codes.FailedPrecondition, "feature: no change") + statusSegmentUsersAlreadyUploading = gstatus.New( + codes.FailedPrecondition, + "feature: segment users already uploading", + ) + statusSegmentStatusNotSuceeded = gstatus.New( + codes.FailedPrecondition, + "feature: segment status is not suceeded", + ) + statusSegmentInUse = gstatus.New(codes.FailedPrecondition, "feature: segment is in use") + statusUnauthenticated = gstatus.New(codes.Unauthenticated, "feature: unauthenticated") + statusPermissionDenied = gstatus.New(codes.PermissionDenied, "feature: permission denied") + statusWaitingOrRunningExperimentExists = gstatus.New( + codes.FailedPrecondition, + "feature: experiment in waiting or running status exists", + ) + statusCycleExists = gstatus.New(codes.FailedPrecondition, "feature: circular dependency detected") + statusInvalidArchive = gstatus.New( + codes.FailedPrecondition, + "feature: cant't archive because this feature is used as a prerequsite", + ) + statusInvalidChangingVariation = gstatus.New( + codes.FailedPrecondition, + "feature: can't change or remove this variation because it is used as a prerequsite", + ) + statusInvalidPrerequisite = gstatus.New(codes.FailedPrecondition, "feature: invalid prerequisite") + + errInternalJaJP = status.MustWithDetails( + statusInternal, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "内部エラーが発生しました", + }, + ) + errMissingIDJaJP = status.MustWithDetails( + statusMissingID, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "idは必須です", + }, + ) + errMissingIDsJaJP = status.MustWithDetails( + statusMissingIDs, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "idsは必須です", + }, + ) + errInvalidIDJaJP = status.MustWithDetails( + statusInvalidID, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なidです", + }, + ) + errMissingKeywordJaJP = status.MustWithDetails( + statusMissingKeyword, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "keywordは必須です", + }, + ) + errMissingUserJaJP = status.MustWithDetails( + statusMissingUser, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "userは必須です", + }, + ) + errMissingUserIDJaJP = status.MustWithDetails( + statusMissingUserID, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "user idは必須です", + }, + ) + errMissingUserIDsJaJP = status.MustWithDetails( + statusMissingUserIDs, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "user idのリストは必須です", + }, + ) + errMissingCommandJaJP = status.MustWithDetails( + statusMissingCommand, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "commandは必須です", + }, + ) + errMissingDefaultOnVariationJaJP = status.MustWithDetails( + statusMissingDefaultOnVariation, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "default variationは必須です", + }, + ) + errMissingDefaultOffVariationJaJP = status.MustWithDetails( + statusMissingDefaultOffVariation, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "off variationは必須です", + }, + ) + errInvalidDefaultOnVariationJaJP = status.MustWithDetails( + statusInvalidDefaultOnVariation, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なdefault variationです", + }, + ) + errInvalidDefaultOffVariationJaJP = status.MustWithDetails( + statusInvalidDefaultOffVariation, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なoff variationです", + }, + ) + errMissingVariationIDJaJP = status.MustWithDetails( + statusMissingVariationID, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "variation idは必須です", + }, + ) + errInvalidVariationIDJaJP = status.MustWithDetails( + statusInvalidVariationID, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なvariation idです", + }, + ) + errDifferentVariationsSizeJaJP = status.MustWithDetails( + statusDifferentVariationsSize, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "featureのvariationsとrolloutのvariationsの数が異なります", + }, + ) + errExceededMaxVariationWeightJaJP = status.MustWithDetails( + statusExceededMaxVariationWeight, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: fmt.Sprintf("全てのweightの合計の最大サイズ (%d) を超えています", totalVariationWeight), + }, + ) + errIncorrectVariationWeightJaJP = status.MustWithDetails( + statusIncorrectVariationWeight, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: fmt.Sprintf("weightは0から%dの間である必要があります", totalVariationWeight), + }, + ) + errInvalidCursorJaJP = status.MustWithDetails( + statusInvalidCursor, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なcursorです", + }, + ) + errInvalidOrderByJaJP = status.MustWithDetails( + statusInvalidOrderBy, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なソート順の指定です", + }, + ) + errMissingNameJaJP = status.MustWithDetails( + statusMissingName, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "nameは必須です", + }, + ) + errMissingFeatureVariationsJaJP = status.MustWithDetails( + statusMissingFeatureVariations, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "featureのvariationsは必須です", + }, + ) + errMissingFeatureTagsJaJP = status.MustWithDetails( + statusMissingFeatureTags, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "featureのtagsは必須です", + }, + ) + errMissingFeatureTagJaJP = status.MustWithDetails( + statusMissingFeatureTag, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "feature tagは必須です", + }, + ) + errMissingEvaluationJaJP = status.MustWithDetails( + statusMissingEvaluation, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "evaluationは必須です", + }, + ) + errUnknownCommandJaJP = status.MustWithDetails( + statusUnknownCommand, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不明なcommandです", + }, + ) + errMissingRuleJaJP = status.MustWithDetails( + statusMissingRule, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "ruleは必須です", + }, + ) + errMissingRuleIDJaJP = status.MustWithDetails( + statusMissingRuleID, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "rule idは必須です", + }, + ) + errMissingRuleClauseJaJP = status.MustWithDetails( + statusMissingRuleClause, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "ruleの条件は必須です", + }, + ) + errMissingClauseIDJaJP = status.MustWithDetails( + statusMissingClauseID, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "条件のidは必須です", + }, + ) + errMissingClauseAttributeJaJP = status.MustWithDetails( + statusMissingClauseAttribute, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "条件のattributeは必須です", + }, + ) + errMissingClauseValuesJaJP = status.MustWithDetails( + statusMissingClauseValues, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "条件の値のリストは必須です", + }, + ) + errMissingClauseValueJaJP = status.MustWithDetails( + statusMissingClauseValue, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "条件の値は必須です", + }, + ) + errMissingSegmentIDJaJP = status.MustWithDetails( + statusMissingSegmentID, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "segment idは必須です", + }, + ) + errMissingSegmentUsersDataJaJP = status.MustWithDetails( + statusMissingSegmentUsersData, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "segment userリストのデータは必須です", + }, + ) + errMissingRuleStrategy = status.MustWithDetails( + statusMissingRuleStrategy, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "rule strategyは必須です", + }, + ) + errUnknownStrategy = status.MustWithDetails( + statusUnknownStrategy, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不明なstrategyです", + }, + ) + errMissingFixedStrategy = status.MustWithDetails( + statusMissingFixedStrategy, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "fixed stategyは必須です", + }, + ) + errMissingRolloutStrategy = status.MustWithDetails( + statusMissingRolloutStrategy, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "rollout strategyは必須です", + }, + ) + errExceededMaxSegmentUsersDataSizeJaJP = status.MustWithDetails( + statusExceededMaxSegmentUsersDataSize, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: fmt.Sprintf("segment userリストの最大データサイズ (%d bytes) を超えています", maxSegmentUsersDataSize), + }, + ) + errUnknownSegmentUserStateJaJP = status.MustWithDetails( + statusUnknownSegmentUserState, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不明なsegment userのstateです", + }, + ) + errIncorrectUUIDFormatJaJP = status.MustWithDetails( + statusIncorrectUUIDFormat, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なUUIDのフォーマットです", + }, + ) + errExceededMaxUserIDsLengthJaJP = status.MustWithDetails( + statusExceededMaxUserIDsLength, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: fmt.Sprintf("user idリストの最大数 (%d) を超えています", maxUserIDsLength), + }, + ) + errIncorrectDestinationEnvironmentJaJP = status.MustWithDetails( + statusIncorrectDestinationEnvironment, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "クローン元とクローン先のenvironmentが同じです", + }, + ) + errExceededMaxPageSizePerRequestJaJP = status.MustWithDetails( + statusExceededMaxPageSizePerRequest, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: fmt.Sprintf("page sizeの最大値 (%d) を超えています", maxPageSizePerRequest), + }, + ) + errNotFoundJaJP = status.MustWithDetails( + statusNotFound, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "データが存在しません", + }, + ) + errSegmentNotFoundJaJP = status.MustWithDetails( + statusSegmentNotFound, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "segmentが存在しません", + }, + ) + errAlreadyExistsJaJP = status.MustWithDetails( + statusAlreadyExists, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "同じidのデータがすでに存在します", + }, + ) + errNothingChangeJaJP = status.MustWithDetails( + statusNothingChange, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "変更点がありません", + }, + ) + errSegmentUsersAlreadyUploadingJaJP = status.MustWithDetails( + statusSegmentUsersAlreadyUploading, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "segment userのリストはすでにアップロード中です", + }, + ) + errSegmentStatusNotSuceededJaJP = status.MustWithDetails( + statusSegmentStatusNotSuceeded, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "segmentのstatusがsuceededではありません", + }, + ) + errSegmentInUseJaJP = status.MustWithDetails( + statusSegmentInUse, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "segmentがfeature flagで使用されているため、削除できません", + }, + ) + errUnauthenticatedJaJP = status.MustWithDetails( + statusUnauthenticated, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "認証されていません", + }, + ) + errPermissionDeniedJaJP = status.MustWithDetails( + statusPermissionDenied, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "権限がありません", + }, + ) + errWaitingOrRunningExperimentExistsJaJP = status.MustWithDetails( + statusWaitingOrRunningExperimentExists, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "開始予定、もしくは実行中のExperimentが存在します。更新する場合はExperimentを停止してください。", + }, + ) + errInvalidArchiveJaJP = status.MustWithDetails( + statusInvalidArchive, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "前提条件のフラグとして登録されているフラグをアーカイブすることはできません", + }, + ) + errInvalidChangingVariationJaJP = status.MustWithDetails( + statusInvalidChangingVariation, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "前提条件のフラグとして登録されているフラグのバリエーションを変更または削除することはできません", + }, + ) + errInvalidPrerequisiteJaJP = status.MustWithDetails( + statusInvalidPrerequisite, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なprerequisiteです", + }, + ) +) + +func localizedError(s *gstatus.Status, loc string) error { + // handle loc if multi-lang is necessary + switch s { + case statusInternal: + return errInternalJaJP + case statusMissingID: + return errMissingIDJaJP + case statusMissingIDs: + return errMissingIDsJaJP + case statusInvalidID: + return errInvalidIDJaJP + case statusMissingKeyword: + return errMissingKeywordJaJP + case statusMissingUser: + return errMissingUserJaJP + case statusMissingUserID: + return errMissingUserIDJaJP + case statusMissingUserIDs: + return errMissingUserIDsJaJP + case statusMissingCommand: + return errMissingCommandJaJP + case statusMissingDefaultOnVariation: + return errMissingDefaultOnVariationJaJP + case statusMissingDefaultOffVariation: + return errMissingDefaultOffVariationJaJP + case statusInvalidDefaultOnVariation: + return errInvalidDefaultOnVariationJaJP + case statusInvalidDefaultOffVariation: + return errInvalidDefaultOffVariationJaJP + case statusMissingVariationID: + return errMissingVariationIDJaJP + case statusInvalidVariationID: + return errInvalidVariationIDJaJP + case statusDifferentVariationsSize: + return errDifferentVariationsSizeJaJP + case statusExceededMaxVariationWeight: + return errExceededMaxVariationWeightJaJP + case statusIncorrectVariationWeight: + return errIncorrectVariationWeightJaJP + case statusInvalidCursor: + return errInvalidCursorJaJP + case statusInvalidOrderBy: + return errInvalidOrderByJaJP + case statusMissingName: + return errMissingNameJaJP + case statusMissingFeatureVariations: + return errMissingFeatureVariationsJaJP + case statusMissingFeatureTags: + return errMissingFeatureTagsJaJP + case statusMissingFeatureTag: + return errMissingFeatureTagJaJP + case statusMissingEvaluation: + return errMissingEvaluationJaJP + case statusUnknownCommand: + return errUnknownCommandJaJP + case statusMissingRule: + return errMissingRuleJaJP + case statusMissingRuleID: + return errMissingRuleIDJaJP + case statusMissingRuleClause: + return errMissingRuleClauseJaJP + case statusMissingClauseID: + return errMissingClauseIDJaJP + case statusMissingClauseAttribute: + return errMissingClauseAttributeJaJP + case statusMissingClauseValues: + return errMissingClauseValuesJaJP + case statusMissingClauseValue: + return errMissingClauseValueJaJP + case statusMissingSegmentID: + return errMissingSegmentIDJaJP + case statusMissingSegmentUsersData: + return errMissingSegmentUsersDataJaJP + case statusMissingRuleStrategy: + return errMissingRuleStrategy + case statusUnknownStrategy: + return errUnknownStrategy + case statusMissingFixedStrategy: + return errMissingFixedStrategy + case statusMissingRolloutStrategy: + return errMissingRolloutStrategy + case statusExceededMaxSegmentUsersDataSize: + return errExceededMaxSegmentUsersDataSizeJaJP + case statusUnknownSegmentUserState: + return errUnknownSegmentUserStateJaJP + case statusIncorrectUUIDFormat: + return errIncorrectUUIDFormatJaJP + case statusExceededMaxUserIDsLength: + return errExceededMaxUserIDsLengthJaJP + case statusIncorrectDestinationEnvironment: + return errIncorrectDestinationEnvironmentJaJP + case statusExceededMaxPageSizePerRequest: + return errExceededMaxPageSizePerRequestJaJP + case statusNotFound: + return errNotFoundJaJP + case statusSegmentNotFound: + return errSegmentNotFoundJaJP + case statusAlreadyExists: + return errAlreadyExistsJaJP + case statusNothingChange: + return errNothingChangeJaJP + case statusSegmentUsersAlreadyUploading: + return errSegmentUsersAlreadyUploadingJaJP + case statusSegmentStatusNotSuceeded: + return errSegmentStatusNotSuceededJaJP + case statusSegmentInUse: + return errSegmentInUseJaJP + case statusUnauthenticated: + return errUnauthenticatedJaJP + case statusPermissionDenied: + return errPermissionDeniedJaJP + case statusWaitingOrRunningExperimentExists: + return errWaitingOrRunningExperimentExistsJaJP + case statusInvalidArchive: + return errInvalidArchiveJaJP + case statusInvalidChangingVariation: + return errInvalidChangingVariationJaJP + case statusInvalidPrerequisite: + return errInvalidPrerequisiteJaJP + default: + return errInternalJaJP + } +} diff --git a/pkg/feature/api/feature.go b/pkg/feature/api/feature.go new file mode 100644 index 000000000..ec697139e --- /dev/null +++ b/pkg/feature/api/feature.go @@ -0,0 +1,1639 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "fmt" + "strconv" + "time" + + "github.com/golang/protobuf/ptypes/wrappers" + "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + experimentdomain "github.com/bucketeer-io/bucketeer/pkg/experiment/domain" + "github.com/bucketeer-io/bucketeer/pkg/feature/command" + "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + v2fs "github.com/bucketeer-io/bucketeer/pkg/feature/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + "github.com/bucketeer-io/bucketeer/pkg/storage" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + experimentproto "github.com/bucketeer-io/bucketeer/proto/experiment" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" + userproto "github.com/bucketeer-io/bucketeer/proto/user" +) + +const getMultiChunkSize = 1000 +const listRequestSize = 500 + +func (s *FeatureService) GetFeature( + ctx context.Context, + req *featureproto.GetFeatureRequest, +) (*featureproto.GetFeatureResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateGetFeatureRequest(req); err != nil { + return nil, err + } + featureStorage := v2fs.NewFeatureStorage(s.mysqlClient) + feature, err := featureStorage.GetFeature(ctx, req.Id, req.EnvironmentNamespace) + if err != nil { + if err == v2fs.ErrFeatureNotFound { + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to get feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", req.Id), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + if err := s.setLastUsedInfosToFeatureByChunk( + ctx, + []*featureproto.Feature{feature.Feature}, + req.EnvironmentNamespace, + ); err != nil { + return nil, err + } + return &featureproto.GetFeatureResponse{Feature: feature.Feature}, nil +} + +func (s *FeatureService) GetFeatures( + ctx context.Context, + req *featureproto.GetFeaturesRequest, +) (*featureproto.GetFeaturesResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateGetFeaturesRequest(req); err != nil { + return nil, err + } + whereParts := []mysql.WherePart{ + mysql.NewFilter("environment_namespace", "=", req.EnvironmentNamespace), + } + ids := make([]interface{}, 0, len(req.Ids)) + for _, id := range req.Ids { + ids = append(ids, id) + } + if len(ids) > 0 { + whereParts = append(whereParts, mysql.NewInFilter("id", ids)) + } + featureStorage := v2fs.NewFeatureStorage(s.mysqlClient) + features, _, _, err := featureStorage.ListFeatures( + ctx, + whereParts, + nil, + mysql.QueryNoLimit, + mysql.QueryNoOffset, + ) + if err != nil { + s.logger.Error( + "Failed to get feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &featureproto.GetFeaturesResponse{Features: features}, nil +} + +func (s *FeatureService) ListFeatures( + ctx context.Context, + req *featureproto.ListFeaturesRequest, +) (*featureproto.ListFeaturesResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + var features []*featureproto.Feature + var cursor string + var totalCount int64 + if req.HasExperiment == nil { + features, cursor, totalCount, err = s.listFeatures( + ctx, + req.PageSize, + req.Cursor, + req.Tags, + req.Maintainer, + req.Enabled, + req.Archived, + req.SearchKeyword, + req.OrderBy, + req.OrderDirection, + req.EnvironmentNamespace, + ) + } else { + features, cursor, totalCount, err = s.listFeaturesFilteredByExperiment( + ctx, + req.PageSize, + req.Cursor, + req.Tags, + req.Maintainer, + req.Enabled, + req.Archived, + req.SearchKeyword, + req.OrderBy, + req.OrderDirection, + req.HasExperiment.Value, + req.EnvironmentNamespace, + ) + } + if err != nil { + return nil, err + } + return &featureproto.ListFeaturesResponse{ + Features: features, + Cursor: cursor, + TotalCount: totalCount, + }, nil +} + +func (s *FeatureService) listFeatures( + ctx context.Context, + pageSize int64, + cursor string, + tags []string, + maintainer string, + enabled *wrappers.BoolValue, + archived *wrappers.BoolValue, + searchKeyword string, + orderBy featureproto.ListFeaturesRequest_OrderBy, + orderDirection featureproto.ListFeaturesRequest_OrderDirection, + environmentNamespace string, +) ([]*featureproto.Feature, string, int64, error) { + whereParts := []mysql.WherePart{ + mysql.NewFilter("deleted", "=", false), + mysql.NewFilter("environment_namespace", "=", environmentNamespace), + } + tagValues := make([]interface{}, 0, len(tags)) + for _, tag := range tags { + tagValues = append(tagValues, tag) + } + if len(tagValues) > 0 { + whereParts = append( + whereParts, + mysql.NewJSONFilter("tags", mysql.JSONContainsString, tagValues), + ) + } + if maintainer != "" { + whereParts = append(whereParts, mysql.NewFilter("maintainer", "=", maintainer)) + } + if enabled != nil { + whereParts = append(whereParts, mysql.NewFilter("enabled", "=", enabled.Value)) + } + if archived != nil { + whereParts = append(whereParts, mysql.NewFilter("archived", "=", archived.Value)) + } + if searchKeyword != "" { + whereParts = append(whereParts, mysql.NewSearchQuery([]string{"id", "name", "description"}, searchKeyword)) + } + orders, err := s.newListFeaturesOrdersMySQL(orderBy, orderDirection) + if err != nil { + s.logger.Error( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, "", 0, err + } + limit := int(pageSize) + if cursor == "" { + cursor = "0" + } + offset, err := strconv.Atoi(cursor) + if err != nil { + return nil, "", 0, localizedError(statusInvalidCursor, locale.JaJP) + } + featureStorage := v2fs.NewFeatureStorage(s.mysqlClient) + features, nextCursor, totalCount, err := featureStorage.ListFeatures( + ctx, + whereParts, + orders, + limit, + offset, + ) + if err != nil { + s.logger.Error( + "Failed to list features", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, "", 0, err + } + if err = s.setLastUsedInfosToFeatureByChunk(ctx, features, environmentNamespace); err != nil { + return nil, "", 0, err + } + return features, strconv.Itoa(nextCursor), totalCount, nil +} + +func (s *FeatureService) listFeaturesFilteredByExperiment( + ctx context.Context, + pageSize int64, + cursor string, + tags []string, + maintainer string, + enabled *wrappers.BoolValue, + archived *wrappers.BoolValue, + searchKeyword string, + orderBy featureproto.ListFeaturesRequest_OrderBy, + orderDirection featureproto.ListFeaturesRequest_OrderDirection, + hasExperiment bool, + environmentNamespace string, +) ([]*featureproto.Feature, string, int64, error) { + whereParts := []mysql.WherePart{ + mysql.NewFilter("feature.deleted", "=", false), + mysql.NewFilter("experiment.deleted", "=", false), + mysql.NewFilter("feature.environment_namespace", "=", environmentNamespace), + mysql.NewNullFilter("experiment.id", !hasExperiment), + } + tagValues := make([]interface{}, 0, len(tags)) + for _, tag := range tags { + tagValues = append(tagValues, tag) + } + if len(tagValues) > 0 { + whereParts = append( + whereParts, + mysql.NewJSONFilter("feature.tags", mysql.JSONContainsString, tagValues), + ) + } + if maintainer != "" { + whereParts = append(whereParts, mysql.NewFilter("feature.maintainer", "=", maintainer)) + } + if enabled != nil { + whereParts = append(whereParts, mysql.NewFilter("feature.enabled", "=", enabled.Value)) + } + if archived != nil { + whereParts = append(whereParts, mysql.NewFilter("feature.archived", "=", archived.Value)) + } + if searchKeyword != "" { + whereParts = append(whereParts, mysql.NewSearchQuery([]string{"id", "name", "description"}, searchKeyword)) + } + orders, err := s.newListFeaturesOrdersMySQL(orderBy, orderDirection) + if err != nil { + s.logger.Error( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, "", 0, err + } + limit := int(pageSize) + if cursor == "" { + cursor = "0" + } + offset, err := strconv.Atoi(cursor) + if err != nil { + return nil, "", 0, localizedError(statusInvalidCursor, locale.JaJP) + } + featureStorage := v2fs.NewFeatureStorage(s.mysqlClient) + features, nextCursor, totalCount, err := featureStorage.ListFeaturesFilteredByExperiment( + ctx, + whereParts, + orders, + limit, + offset, + ) + if err != nil { + s.logger.Error( + "Failed to list features filtered by experiment", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, "", 0, err + } + if err = s.setLastUsedInfosToFeatureByChunk(ctx, features, environmentNamespace); err != nil { + return nil, "", 0, err + } + return features, strconv.Itoa(nextCursor), totalCount, nil +} + +func (s *FeatureService) newListFeaturesOrdersMySQL( + orderBy featureproto.ListFeaturesRequest_OrderBy, + orderDirection featureproto.ListFeaturesRequest_OrderDirection, +) ([]*mysql.Order, error) { + var column string + switch orderBy { + case featureproto.ListFeaturesRequest_DEFAULT, + featureproto.ListFeaturesRequest_NAME: + column = "feature.name" + case featureproto.ListFeaturesRequest_CREATED_AT: + column = "feature.created_at" + case featureproto.ListFeaturesRequest_UPDATED_AT: + column = "feature.updated_at" + case featureproto.ListFeaturesRequest_TAGS: + column = "feature.tags" + case featureproto.ListFeaturesRequest_ENABLED: + column = "feature.enabled" + default: + return nil, localizedError(statusInvalidOrderBy, locale.JaJP) + } + direction := mysql.OrderDirectionAsc + if orderDirection == featureproto.ListFeaturesRequest_DESC { + direction = mysql.OrderDirectionDesc + } + return []*mysql.Order{mysql.NewOrder(column, direction)}, nil +} + +func (s *FeatureService) ListEnabledFeatures( + ctx context.Context, + req *featureproto.ListEnabledFeaturesRequest, +) (*featureproto.ListEnabledFeaturesResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + whereParts := []mysql.WherePart{ + mysql.NewFilter("archived", "=", false), + mysql.NewFilter("enabled", "=", true), + mysql.NewFilter("deleted", "=", false), + mysql.NewFilter("environment_namespace", "=", req.EnvironmentNamespace), + } + tagValues := make([]interface{}, 0, len(req.Tags)) + for _, tag := range req.Tags { + tagValues = append(tagValues, tag) + } + if len(tagValues) > 0 { + whereParts = append( + whereParts, + mysql.NewJSONFilter("tags", mysql.JSONContainsString, tagValues), + ) + } + limit := int(req.PageSize) + cursor := req.Cursor + if cursor == "" { + cursor = "0" + } + offset, err := strconv.Atoi(cursor) + if err != nil { + return nil, localizedError(statusInvalidCursor, locale.JaJP) + } + featureStorage := v2fs.NewFeatureStorage(s.mysqlClient) + features, nextCursor, _, err := featureStorage.ListFeatures( + ctx, + whereParts, + nil, + limit, + offset, + ) + if err != nil { + s.logger.Error( + "Failed to list enabled features", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + if err = s.setLastUsedInfosToFeatureByChunk(ctx, features, req.EnvironmentNamespace); err != nil { + return nil, err + } + return &featureproto.ListEnabledFeaturesResponse{ + Features: features, + Cursor: strconv.Itoa(nextCursor), + }, nil +} + +func (s *FeatureService) CreateFeature( + ctx context.Context, + req *featureproto.CreateFeatureRequest, +) (*featureproto.CreateFeatureResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err = validateCreateFeatureRequest(req.Command); err != nil { + return nil, err + } + feature, err := domain.NewFeature( + req.Command.Id, + req.Command.Name, + req.Command.Description, + req.Command.VariationType, + req.Command.Variations, + req.Command.Tags, + int(req.Command.DefaultOnVariationIndex.Value), + int(req.Command.DefaultOffVariationIndex.Value), + editor.Email, + ) + if err != nil { + return nil, err + } + var handler *command.FeatureCommandHandler = command.NewEmptyFeatureCommandHandler() + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + if err := s.upsertTags(ctx, tx, req.Command.Tags, req.EnvironmentNamespace); err != nil { + return err + } + + featureStorage := v2fs.NewFeatureStorage(tx) + if err := featureStorage.CreateFeature(ctx, feature, req.EnvironmentNamespace); err != nil { + s.logger.Error( + "Failed to store feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + handler = command.NewFeatureCommandHandler(editor, feature, req.EnvironmentNamespace, "") + if err := handler.Handle(ctx, req.Command); err != nil { + s.logger.Error( + "Failed to create feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + return nil + }) + if err != nil { + if err == v2fs.ErrFeatureAlreadyExists { + return nil, localizedError(statusAlreadyExists, locale.JaJP) + } + s.logger.Error( + "Failed to create feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + if errs := s.publishDomainEvents(ctx, handler.Events); len(errs) > 0 { + s.logger.Error( + "Failed to publish events", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Any("errors", errs), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &featureproto.CreateFeatureResponse{}, nil +} + +func (s *FeatureService) UpdateFeatureDetails( + ctx context.Context, + req *featureproto.UpdateFeatureDetailsRequest, +) (*featureproto.UpdateFeatureDetailsResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if req.Id == "" { + return nil, localizedError(statusMissingID, locale.JaJP) + } + runningExperimentExists, err := s.existsRunningExperiment(ctx, req.Id, req.EnvironmentNamespace) + if err != nil { + return nil, localizedError(statusInternal, locale.JaJP) + } + if runningExperimentExists { + return nil, localizedError(statusWaitingOrRunningExperimentExists, locale.JaJP) + } + var handler *command.FeatureCommandHandler = command.NewEmptyFeatureCommandHandler() + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + featureStorage := v2fs.NewFeatureStorage(tx) + feature, err := featureStorage.GetFeature(ctx, req.Id, req.EnvironmentNamespace) + if err != nil { + s.logger.Error( + "Failed to get feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + handler = command.NewFeatureCommandHandler(editor, feature, req.EnvironmentNamespace, req.Comment) + err = handler.Handle(ctx, &featureproto.IncrementFeatureVersionCommand{}) + if err != nil { + s.logger.Error( + "Failed to increment feature version", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + if req.RenameFeatureCommand != nil { + err = handler.Handle(ctx, req.RenameFeatureCommand) + if err != nil { + s.logger.Error( + "Failed to rename feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + } + if req.ChangeDescriptionCommand != nil { + err = handler.Handle(ctx, req.ChangeDescriptionCommand) + if err != nil { + s.logger.Error( + "Failed to change feature description", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + } + if req.AddTagCommands != nil { + for i := range req.AddTagCommands { + err = handler.Handle(ctx, req.AddTagCommands[i]) + if err != nil { + s.logger.Error( + "Failed to add tag to feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + } + tags := []string{} + for _, c := range req.AddTagCommands { + tags = append(tags, c.Tag) + } + if err := s.upsertTags(ctx, tx, tags, req.EnvironmentNamespace); err != nil { + return err + } + } + if req.RemoveTagCommands != nil { + for i := range req.RemoveTagCommands { + err = handler.Handle(ctx, req.RemoveTagCommands[i]) + if err != nil { + s.logger.Error( + "Failed to remove tag from feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + } + } + err = featureStorage.UpdateFeature(ctx, feature, req.EnvironmentNamespace) + if err != nil { + s.logger.Error( + "Failed to update feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + return nil + }) + if err != nil { + return nil, err + } + if errs := s.publishDomainEvents(ctx, handler.Events); len(errs) > 0 { + s.logger.Error( + "Failed to publish events", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Any("errors", errs), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &featureproto.UpdateFeatureDetailsResponse{}, nil +} + +func (s *FeatureService) existsRunningExperiment( + ctx context.Context, + featureID, environmentNamespace string, +) (bool, error) { + experiments, err := s.listExperiments(ctx, environmentNamespace, featureID) + if err != nil { + s.logger.Error( + "Failed to list experiments", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return false, err + } + return containsRunningExperiment(experiments), nil +} + +func containsRunningExperiment(experiments []*experimentproto.Experiment) bool { + now := time.Now() + for _, e := range experiments { + de := &experimentdomain.Experiment{Experiment: e} + if de.IsNotFinished(now) { + return true + } + } + return false +} + +// FIXME: remove this API after the new console is released +// Deprecated +func (s *FeatureService) EnableFeature( + ctx context.Context, + req *featureproto.EnableFeatureRequest, +) (*featureproto.EnableFeatureResponse, error) { + if err := validateEnableFeatureRequest(req); err != nil { + return nil, err + } + if err := s.updateFeature(ctx, req.Command, req.Id, req.EnvironmentNamespace, req.Comment); err != nil { + if status.Code(err) == codes.Internal { + s.logger.Error( + "Failed to enable feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + } + return nil, err + } + return &featureproto.EnableFeatureResponse{}, nil +} + +// FIXME: remove this API after the new console is released +// Deprecated +func (s *FeatureService) DisableFeature( + ctx context.Context, + req *featureproto.DisableFeatureRequest, +) (*featureproto.DisableFeatureResponse, error) { + if err := validateDisableFeatureRequest(req); err != nil { + return nil, err + } + if err := s.updateFeature(ctx, req.Command, req.Id, req.EnvironmentNamespace, req.Comment); err != nil { + if status.Code(err) == codes.Internal { + s.logger.Error( + "Failed to disable feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + } + return nil, err + } + return &featureproto.DisableFeatureResponse{}, nil +} + +func (s *FeatureService) ArchiveFeature( + ctx context.Context, + req *featureproto.ArchiveFeatureRequest, +) (*featureproto.ArchiveFeatureResponse, error) { + whereParts := []mysql.WherePart{ + mysql.NewFilter("archived", "=", false), + mysql.NewFilter("deleted", "=", false), + mysql.NewFilter("environment_namespace", "=", req.EnvironmentNamespace), + } + featureStorage := v2fs.NewFeatureStorage(s.mysqlClient) + features, _, _, err := featureStorage.ListFeatures( + ctx, + whereParts, + nil, + mysql.QueryNoLimit, + mysql.QueryNoOffset, + ) + if err != nil { + s.logger.Error( + "Failed to list feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + if err := validateArchiveFeatureRequest(req, features); err != nil { + return nil, err + } + if err := s.updateFeature(ctx, req.Command, req.Id, req.EnvironmentNamespace, req.Comment); err != nil { + if status.Code(err) == codes.Internal { + s.logger.Error( + "Failed to archive feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + } + return nil, err + } + return &featureproto.ArchiveFeatureResponse{}, nil +} + +func (s *FeatureService) UnarchiveFeature( + ctx context.Context, + req *featureproto.UnarchiveFeatureRequest, +) (*featureproto.UnarchiveFeatureResponse, error) { + if err := validateUnarchiveFeatureRequest(req); err != nil { + return nil, err + } + if err := s.updateFeature(ctx, req.Command, req.Id, req.EnvironmentNamespace, req.Comment); err != nil { + if status.Code(err) == codes.Internal { + s.logger.Error( + "Failed to unarchive feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + } + return nil, err + } + return &featureproto.UnarchiveFeatureResponse{}, nil +} + +func (s *FeatureService) DeleteFeature( + ctx context.Context, + req *featureproto.DeleteFeatureRequest, +) (*featureproto.DeleteFeatureResponse, error) { + if err := validateDeleteFeatureRequest(req); err != nil { + return nil, err + } + if err := s.updateFeature(ctx, req.Command, req.Id, req.EnvironmentNamespace, req.Comment); err != nil { + if status.Code(err) == codes.Internal { + s.logger.Error( + "Failed to delete feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + } + return nil, err + } + return &featureproto.DeleteFeatureResponse{}, nil +} + +func (s *FeatureService) updateFeature( + ctx context.Context, + cmd command.Command, + id, environmentNamespace, comment string, +) error { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, environmentNamespace) + if err != nil { + return err + } + if id == "" { + return localizedError(statusMissingID, locale.JaJP) + } + if cmd == nil { + return localizedError(statusMissingCommand, locale.JaJP) + } + runningExperimentExists, err := s.existsRunningExperiment(ctx, id, environmentNamespace) + if err != nil { + return localizedError(statusInternal, locale.JaJP) + } + if runningExperimentExists { + return localizedError(statusWaitingOrRunningExperimentExists, locale.JaJP) + } + var handler *command.FeatureCommandHandler = command.NewEmptyFeatureCommandHandler() + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + featureStorage := v2fs.NewFeatureStorage(tx) + feature, err := featureStorage.GetFeature(ctx, id, environmentNamespace) + if err != nil { + s.logger.Error( + "Failed to get feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return err + } + handler = command.NewFeatureCommandHandler(editor, feature, environmentNamespace, comment) + err = handler.Handle(ctx, &featureproto.IncrementFeatureVersionCommand{}) + if err != nil { + s.logger.Error( + "Failed to increment feature version", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return err + } + if err := handler.Handle(ctx, cmd); err != nil { + s.logger.Error( + "Failed to handle command", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return err + } + if err := featureStorage.UpdateFeature(ctx, feature, environmentNamespace); err != nil { + s.logger.Error( + "Failed to update feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return err + } + return nil + }) + if err != nil { + return s.convUpdateFeatureError(err) + } + if errs := s.publishDomainEvents(ctx, handler.Events); len(errs) > 0 { + s.logger.Error( + "Failed to publish events", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Any("errors", errs), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return localizedError(statusInternal, locale.JaJP) + } + return nil +} + +func (s *FeatureService) convUpdateFeatureError(err error) error { + switch err { + case v2fs.ErrFeatureNotFound, + v2fs.ErrFeatureUnexpectedAffectedRows, + storage.ErrKeyNotFound: + return localizedError(statusNotFound, locale.JaJP) + case domain.ErrAlreadyDisabled: + return localizedError(statusNothingChange, locale.JaJP) + case domain.ErrAlreadyEnabled: + return localizedError(statusNothingChange, locale.JaJP) + default: + return localizedError(statusInternal, locale.JaJP) + } +} + +func (s *FeatureService) UpdateFeatureVariations( + ctx context.Context, + req *featureproto.UpdateFeatureVariationsRequest, +) (*featureproto.UpdateFeatureVariationsResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if req.Id == "" { + return nil, localizedError(statusMissingID, locale.JaJP) + } + runningExperimentExists, err := s.existsRunningExperiment(ctx, req.Id, req.EnvironmentNamespace) + if err != nil { + return nil, localizedError(statusInternal, locale.JaJP) + } + if runningExperimentExists { + return nil, localizedError(statusWaitingOrRunningExperimentExists, locale.JaJP) + } + commands := make([]command.Command, 0, len(req.Commands)) + for _, c := range req.Commands { + cmd, err := command.UnmarshalCommand(c) + if err != nil { + s.logger.Error( + "Failed to unmarshal command", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + commands = append(commands, cmd) + } + var handler *command.FeatureCommandHandler = command.NewEmptyFeatureCommandHandler() + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + whereParts := []mysql.WherePart{ + mysql.NewFilter("archived", "=", false), + mysql.NewFilter("deleted", "=", false), + mysql.NewFilter("environment_namespace", "=", req.EnvironmentNamespace), + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + featureStorage := v2fs.NewFeatureStorage(tx) + features, _, _, err := featureStorage.ListFeatures( + ctx, + whereParts, + nil, + mysql.QueryNoLimit, + mysql.QueryNoOffset, + ) + if err != nil { + s.logger.Error( + "Failed to list feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + for _, cmd := range commands { + if err := validateFeatureVariationsCommand(features, cmd); err != nil { + s.logger.Info( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + } + f, err := findFeature(features, req.Id) + if err != nil { + s.logger.Error( + "Failed to find feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + feature := &domain.Feature{Feature: f} + handler = command.NewFeatureCommandHandler(editor, feature, req.EnvironmentNamespace, req.Comment) + err = handler.Handle(ctx, &featureproto.IncrementFeatureVersionCommand{}) + if err != nil { + s.logger.Error( + "Failed to increment feature version", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + for _, cmd := range commands { + err = handler.Handle(ctx, cmd) + if err != nil { + // TODO: make this error log more specific. + s.logger.Error( + "Failed to handle command", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + } + err = featureStorage.UpdateFeature(ctx, feature, req.EnvironmentNamespace) + if err != nil { + s.logger.Error( + "Failed to update feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + return nil + }) + if err != nil { + return nil, err + } + if errs := s.publishDomainEvents(ctx, handler.Events); len(errs) > 0 { + s.logger.Error( + "Failed to publish events", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Any("errors", errs), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &featureproto.UpdateFeatureVariationsResponse{}, nil +} + +func (s *FeatureService) publishDomainEvents(ctx context.Context, events []*eventproto.Event) map[string]error { + messages := make([]publisher.Message, 0, len(events)) + for _, event := range events { + messages = append(messages, event) + } + return s.domainPublisher.PublishMulti(ctx, messages) +} + +func (s *FeatureService) UpdateFeatureTargeting( + ctx context.Context, + req *featureproto.UpdateFeatureTargetingRequest, +) (*featureproto.UpdateFeatureTargetingResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if req.Id == "" { + return nil, localizedError(statusMissingID, locale.JaJP) + } + commands := make([]command.Command, 0, len(req.Commands)) + for _, c := range req.Commands { + cmd, err := command.UnmarshalCommand(c) + if err != nil { + s.logger.Error( + "Failed to unmarshal command", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + commands = append(commands, cmd) + } + runningExperimentExists, err := s.existsRunningExperiment(ctx, req.Id, req.EnvironmentNamespace) + if err != nil { + return nil, localizedError(statusInternal, locale.JaJP) + } + if runningExperimentExists { + return nil, localizedError(statusWaitingOrRunningExperimentExists, locale.JaJP) + } + // TODO: clean this up. + // Problem: Changes in the UI should be atomic meaning either all or no changes will be made. + // This means a transaction spanning all changes is needed. + // Also: + // Normally each command should be usable alone (load the feature from the repository change it and save it). + // Also here because many commands are run sequentially they all expect the same version of the feature. + var handler *command.FeatureCommandHandler = command.NewEmptyFeatureCommandHandler() + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + whereParts := []mysql.WherePart{ + mysql.NewFilter("archived", "=", false), + mysql.NewFilter("deleted", "=", false), + mysql.NewFilter("environment_namespace", "=", req.EnvironmentNamespace), + } + featureStorage := v2fs.NewFeatureStorage(tx) + features, _, _, err := featureStorage.ListFeatures( + ctx, + whereParts, + nil, + mysql.QueryNoLimit, + mysql.QueryNoOffset, + ) + if err != nil { + s.logger.Error( + "Failed to list feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + f, err := findFeature(features, req.Id) + if err != nil { + s.logger.Error( + "Failed to find feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + for _, cmd := range commands { + if err := validateFeatureTargetingCommand(features, f, cmd); err != nil { + s.logger.Info( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + } + feature := &domain.Feature{Feature: f} + handler = command.NewFeatureCommandHandler( + editor, + feature, + req.EnvironmentNamespace, + req.Comment, + ) + err = handler.Handle(ctx, &featureproto.IncrementFeatureVersionCommand{}) + if err != nil { + s.logger.Error( + "Failed to increment feature version", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + for _, cmd := range commands { + err = handler.Handle(ctx, cmd) + if err != nil { + // TODO: same as above. Make it more specific. + s.logger.Error( + "Failed to handle command", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + } + err = featureStorage.UpdateFeature(ctx, feature, req.EnvironmentNamespace) + if err != nil { + s.logger.Error( + "Failed to update feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + return nil + }) + if err != nil { + return nil, err + } + if errs := s.publishDomainEvents(ctx, handler.Events); len(errs) > 0 { + s.logger.Error( + "Failed to publish events", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Any("errors", errs), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &featureproto.UpdateFeatureTargetingResponse{}, nil +} + +func findFeature(fs []*featureproto.Feature, id string) (*featureproto.Feature, error) { + for _, f := range fs { + if f.Id == id { + return f, nil + } + } + return nil, localizedError(statusInternal, locale.JaJP) +} + +func (s *FeatureService) evaluateFeatures( + ctx context.Context, + user *userproto.User, + environmentNamespace string, + tag string, +) (*featureproto.UserEvaluations, error) { + fs, err, _ := s.flightgroup.Do( + environmentNamespace, + func() (interface{}, error) { + return s.getFeatures(ctx, environmentNamespace) + }, + ) + if err != nil { + s.logger.Error( + "Failed to list features", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + features := fs.([]*featureproto.Feature) + mapIDs := make(map[string]struct{}) + for _, f := range features { + feature := &domain.Feature{Feature: f} + for _, id := range feature.ListSegmentIDs() { + mapIDs[id] = struct{}{} + } + } + mapSegmentUsers, err := s.listSegmentUsers(ctx, mapIDs, environmentNamespace) + if err != nil { + s.logger.Error( + "Failed to list segments", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, err + } + userEvaluations, err := domain.EvaluateFeatures(features, user, mapSegmentUsers, tag) + if err != nil { + s.logger.Error( + "Failed to evaluate", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + } + return userEvaluations, nil +} + +func (s *FeatureService) getFeatures( + ctx context.Context, + environmentNamespace string, +) ([]*featureproto.Feature, error) { + features, err := s.featuresCache.Get(environmentNamespace) + if err == nil { + return features.Features, nil + } + s.logger.Info( + "No cached data for Features", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + fs, _, _, err := s.listFeatures( + ctx, + mysql.QueryNoLimit, + "", + nil, + "", + nil, + nil, + "", + featureproto.ListFeaturesRequest_DEFAULT, + featureproto.ListFeaturesRequest_ASC, + environmentNamespace, + ) + if err != nil { + s.logger.Error( + "Failed to retrive features from storage", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, err + } + return fs, nil +} + +func (s *FeatureService) listSegmentUsers( + ctx context.Context, + mapSegmentIDs map[string]struct{}, + environmentNamespace string, +) (map[string][]*featureproto.SegmentUser, error) { + if len(mapSegmentIDs) == 0 { + return nil, nil + } + users := make(map[string][]*featureproto.SegmentUser) + for segmentID := range mapSegmentIDs { + s, err, _ := s.flightgroup.Do( + s.segmentFlightID(environmentNamespace, segmentID), + func() (interface{}, error) { + return s.getSegmentUsers(ctx, segmentID, environmentNamespace) + }, + ) + if err != nil { + return nil, err + } + listUsers := s.([]*featureproto.SegmentUser) + users[segmentID] = listUsers + } + return users, nil +} + +func (s *FeatureService) segmentFlightID(environmentNamespace, segmentID string) string { + return fmt.Sprintf("%s:%s", environmentNamespace, segmentID) +} + +func (s *FeatureService) getSegmentUsers( + ctx context.Context, + segmentID, environmentNamespace string, +) ([]*featureproto.SegmentUser, error) { + segmentUsers, err := s.segmentUsersCache.Get(segmentID, environmentNamespace) + if err == nil { + return segmentUsers.Users, nil + } + s.logger.Info( + "No cached data for SegmentUsers", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("segmentId", segmentID), + )..., + ) + req := &featureproto.ListSegmentUsersRequest{ + SegmentId: segmentID, + EnvironmentNamespace: environmentNamespace, + } + res, err := s.ListSegmentUsers(ctx, req) + if err != nil { + s.logger.Error( + "Failed to retrieve segment users from storage", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("segmentId", segmentID), + )..., + ) + return nil, err + } + su := &featureproto.SegmentUsers{ + SegmentId: segmentID, + Users: res.Users, + } + if err := s.segmentUsersCache.Put(su, environmentNamespace); err != nil { + s.logger.Error( + "Failed to cache segment users", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("segmentId", segmentID), + )..., + ) + } + return res.Users, nil +} + +func (s *FeatureService) setLastUsedInfosToFeatureByChunk( + ctx context.Context, + features []*featureproto.Feature, + environmentNamespace string, +) error { + for i := 0; i < len(features); i += getMultiChunkSize { + end := i + getMultiChunkSize + if end > len(features) { + end = len(features) + } + if err := s.setLastUsedInfosToFeature(ctx, features[i:end], environmentNamespace); err != nil { + return err + } + } + return nil +} + +func (s *FeatureService) setLastUsedInfosToFeature( + ctx context.Context, + features []*featureproto.Feature, + environmentNamespace string, +) error { + ids := make([]string, 0, len(features)) + for _, f := range features { + ids = append(ids, domain.FeatureLastUsedInfoID(f.Id, f.Version)) + } + storage := v2fs.NewFeatureLastUsedInfoStorage(s.mysqlClient) + fluiList, err := storage.GetFeatureLastUsedInfos(ctx, ids, environmentNamespace) + if err != nil { + s.logger.Error( + "Failed to get feature last used infos", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return localizedError(statusInternal, locale.JaJP) + } + for _, f := range fluiList { + for _, feature := range features { + if feature.Id == f.FeatureLastUsedInfo.FeatureId { + feature.LastUsedInfo = f.FeatureLastUsedInfo + break + } + } + } + return nil +} + +func (s *FeatureService) EvaluateFeatures( + ctx context.Context, + req *featureproto.EvaluateFeaturesRequest, +) (*featureproto.EvaluateFeaturesResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateEvaluateFeatures(req); err != nil { + s.logger.Info( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + userEvaluations, err := s.evaluateFeatures(ctx, req.User, req.EnvironmentNamespace, req.Tag) + if err != nil { + s.logger.Error( + "Failed to evaluate features", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &featureproto.EvaluateFeaturesResponse{UserEvaluations: userEvaluations}, nil +} + +func (s *FeatureService) listExperiments( + ctx context.Context, + environmentNamespace, featureID string, +) ([]*experimentproto.Experiment, error) { + experiments := []*experimentproto.Experiment{} + cursor := "" + for { + resp, err := s.experimentClient.ListExperiments(ctx, &experimentproto.ListExperimentsRequest{ + FeatureId: featureID, + PageSize: listRequestSize, + Cursor: cursor, + EnvironmentNamespace: environmentNamespace, + }) + if err != nil { + return nil, err + } + experiments = append(experiments, resp.Experiments...) + featureSize := len(resp.Experiments) + if featureSize == 0 || featureSize < listRequestSize { + return experiments, nil + } + cursor = resp.Cursor + } +} + +func (s *FeatureService) CloneFeature( + ctx context.Context, + req *featureproto.CloneFeatureRequest, +) (*featureproto.CloneFeatureResponse, error) { + if err := validateCloneFeatureRequest(req); err != nil { + return nil, err + } + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.Command.EnvironmentNamespace) + if err != nil { + return nil, err + } + featureStorage := v2fs.NewFeatureStorage(s.mysqlClient) + f, err := featureStorage.GetFeature(ctx, req.Id, req.EnvironmentNamespace) + if err != nil { + if err == v2fs.ErrFeatureNotFound { + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to get feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", req.Id), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + domainFeature := &domain.Feature{ + Feature: f.Feature, + } + feature, err := domainFeature.Clone(editor.Email) + if err != nil { + return nil, err + } + var handler *command.FeatureCommandHandler = command.NewEmptyFeatureCommandHandler() + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + if err := featureStorage.CreateFeature(ctx, feature, req.Command.EnvironmentNamespace); err != nil { + s.logger.Error( + "Failed to store feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.Command.EnvironmentNamespace), + )..., + ) + return err + } + handler = command.NewFeatureCommandHandler(editor, feature, req.Command.EnvironmentNamespace, "") + if err := handler.Handle(ctx, req.Command); err != nil { + s.logger.Error( + "Failed to clone feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNameSpace", req.Command.EnvironmentNamespace), + )..., + ) + return err + } + return nil + }) + if err != nil { + if err == v2fs.ErrFeatureAlreadyExists { + return nil, localizedError(statusAlreadyExists, locale.JaJP) + } + s.logger.Error( + "Failed to clone feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.Command.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + if errs := s.publishDomainEvents(ctx, handler.Events); len(errs) > 0 { + s.logger.Error( + "Failed to publish events", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Any("errors", errs), + zap.String("environmentNameSpace", req.Command.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &featureproto.CloneFeatureResponse{}, nil +} diff --git a/pkg/feature/api/feature_test.go b/pkg/feature/api/feature_test.go new file mode 100644 index 000000000..64a1cdc50 --- /dev/null +++ b/pkg/feature/api/feature_test.go @@ -0,0 +1,2901 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/ptypes/wrappers" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/autoops/command" + cachev3mock "github.com/bucketeer-io/bucketeer/pkg/cache/v3/mock" + "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + v2fs "github.com/bucketeer-io/bucketeer/pkg/feature/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/storage" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" + userproto "github.com/bucketeer-io/bucketeer/proto/user" +) + +func TestGetFeatureMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*FeatureService) + input string + expected error + }{ + "error: id is empty": { + input: "", + expected: errMissingIDJaJP, + }, + "success": { + setup: func(s *FeatureService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + }, + input: "fid", + expected: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken() + fs := createFeatureServiceNew(mockController) + if p.setup != nil { + p.setup(fs) + } + req := &featureproto.GetFeatureRequest{ + EnvironmentNamespace: "ns0", + Id: p.input, + } + _, err := fs.GetFeature(ctx, req) + assert.Equal(t, p.expected, err) + }) + } +} + +func TestGetFeaturesMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*FeatureService) + input []string + expected error + }{ + "error: id is nil": { + input: nil, + expected: errMissingIDsJaJP, + }, + "error: contains empty id": { + input: []string{"id", ""}, + expected: errMissingIDsJaJP, + }, + "success": { + setup: func(s *FeatureService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + input: []string{"fid"}, + expected: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken() + fs := createFeatureServiceNew(mockController) + if p.setup != nil { + p.setup(fs) + } + req := &featureproto.GetFeaturesRequest{ + EnvironmentNamespace: "ns0", + Ids: p.input, + } + _, err := fs.GetFeatures(ctx, req) + assert.Equal(t, p.expected, err) + }) + } +} + +func TestListFeaturesMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*FeatureService) + orderBy featureproto.ListFeaturesRequest_OrderBy + hasExperiment bool + environmentNamespace string + expected error + }{ + { + setup: nil, + orderBy: featureproto.ListFeaturesRequest_OrderBy(999), + hasExperiment: false, + environmentNamespace: "ns0", + expected: errInvalidOrderByJaJP, + }, + { + setup: func(s *FeatureService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + orderBy: featureproto.ListFeaturesRequest_DEFAULT, + hasExperiment: false, + environmentNamespace: "ns0", + expected: nil, + }, + { + setup: func(s *FeatureService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + orderBy: featureproto.ListFeaturesRequest_DEFAULT, + hasExperiment: true, + environmentNamespace: "ns0", + expected: nil, + }, + } + for _, p := range patterns { + ctx := createContextWithToken() + service := createFeatureService(mockController) + if p.setup != nil { + p.setup(service) + } + req := &featureproto.ListFeaturesRequest{ + OrderBy: p.orderBy, + EnvironmentNamespace: "ns0", + } + _, err := service.ListFeatures(ctx, req) + assert.Equal(t, p.expected, err) + } +} + +func TestCreateFeatureMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + variations := createFeatureVariations() + tags := createFeatureTags() + patterns := []struct { + setup func(*FeatureService) + id, name, description string + variations []*featureproto.Variation + tags []string + defaultOnVariationIndex, defaultOffVariationIndex *wrappers.Int32Value + environmentNamespace string + expected error + }{ + { + setup: nil, + id: "", + name: "name", + description: "description", + variations: nil, + tags: nil, + defaultOnVariationIndex: nil, + defaultOffVariationIndex: nil, + environmentNamespace: "ns0", + expected: errMissingIDJaJP, + }, + { + setup: nil, + id: "bucketeer_id", + name: "name", + description: "description", + variations: nil, + tags: nil, + defaultOnVariationIndex: nil, + defaultOffVariationIndex: nil, + environmentNamespace: "ns0", + expected: errInvalidIDJaJP, + }, + { + setup: nil, + id: "Bucketeer-id-2019", + name: "", + description: "description", + variations: nil, + tags: nil, + defaultOnVariationIndex: nil, + defaultOffVariationIndex: nil, + environmentNamespace: "ns0", + expected: errMissingNameJaJP, + }, + { + setup: nil, + id: "Bucketeer-id-2019", + name: "name", + description: "description", + variations: nil, + tags: nil, + defaultOnVariationIndex: nil, + defaultOffVariationIndex: nil, + environmentNamespace: "ns0", + expected: errMissingFeatureVariationsJaJP, + }, + { + setup: nil, + id: "Bucketeer-id-2019", + name: "name", + description: "description", + variations: variations, + tags: nil, + environmentNamespace: "ns0", + expected: errMissingFeatureTagsJaJP, + }, + { + setup: nil, + id: "Bucketeer-id-2019", + name: "name", + description: "description", + variations: variations, + tags: tags, + defaultOnVariationIndex: nil, + defaultOffVariationIndex: nil, + environmentNamespace: "ns0", + expected: errMissingDefaultOnVariationJaJP, + }, + { + setup: nil, + id: "Bucketeer-id-2019", + name: "name", + description: "description", + variations: variations, + tags: tags, + defaultOnVariationIndex: &wrappers.Int32Value{Value: int32(0)}, + defaultOffVariationIndex: nil, + environmentNamespace: "ns0", + expected: errMissingDefaultOffVariationJaJP, + }, + { + setup: func(s *FeatureService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2fs.ErrFeatureAlreadyExists) + }, + id: "Bucketeer-id-2019", + name: "name", + description: "description", + variations: variations, + tags: tags, + defaultOnVariationIndex: &wrappers.Int32Value{Value: int32(0)}, + defaultOffVariationIndex: &wrappers.Int32Value{Value: int32(1)}, + environmentNamespace: "ns0", + expected: errAlreadyExistsJaJP, + }, + { + setup: func(s *FeatureService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + id: "Bucketeer-id-2019", + name: "name", + description: "description", + variations: variations, + tags: tags, + defaultOnVariationIndex: &wrappers.Int32Value{Value: int32(0)}, + defaultOffVariationIndex: &wrappers.Int32Value{Value: int32(1)}, + environmentNamespace: "ns0", + expected: nil, + }, + } + for _, p := range patterns { + ctx := createContextWithToken() + service := createFeatureService(mockController) + if p.setup != nil { + p.setup(service) + } + req := &featureproto.CreateFeatureRequest{ + Command: &featureproto.CreateFeatureCommand{ + Id: p.id, + Name: p.name, + Description: p.description, + Variations: p.variations, + Tags: p.tags, + DefaultOnVariationIndex: p.defaultOnVariationIndex, + DefaultOffVariationIndex: p.defaultOffVariationIndex, + }, + EnvironmentNamespace: p.environmentNamespace, + } + _, err := service.CreateFeature(ctx, req) + assert.Equal(t, p.expected, err) + } +} + +func TestSetFeatureToLastUsedInfosByChunk(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := []struct { + setup func(*FeatureService) + input []*featureproto.Feature + environmentNamespace string + expected error + }{ + { + setup: func(s *FeatureService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + }, + input: []*featureproto.Feature{ + { + Id: "feature-id-0", + Version: 1, + }, + }, + environmentNamespace: "ns0", + expected: nil, + }, + } + for _, p := range patterns { + fs := createFeatureServiceNew(mockController) + p.setup(fs) + err := fs.setLastUsedInfosToFeatureByChunk(context.Background(), p.input, p.environmentNamespace) + assert.Equal(t, p.expected, err) + } +} + +func TestConvUpdateFeatureError(t *testing.T) { + t.Parallel() + patterns := []struct { + input error + expectedErr error + }{ + { + input: v2fs.ErrFeatureNotFound, + expectedErr: errNotFoundJaJP, + }, + { + input: v2fs.ErrFeatureUnexpectedAffectedRows, + expectedErr: errNotFoundJaJP, + }, + { + input: storage.ErrKeyNotFound, + expectedErr: errNotFoundJaJP, + }, + { + input: domain.ErrAlreadyDisabled, + expectedErr: errNothingChangeJaJP, + }, + { + input: domain.ErrAlreadyEnabled, + expectedErr: errNothingChangeJaJP, + }, + { + input: errors.New("test"), + expectedErr: errInternalJaJP, + }, + } + for _, p := range patterns { + fs := &FeatureService{} + err := fs.convUpdateFeatureError(p.input) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestEvaluateFeatures(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + vID1 := newUUID(t) + vID2 := newUUID(t) + vID3 := newUUID(t) + vID4 := newUUID(t) + + patterns := map[string]struct { + setup func(*FeatureService) + input *featureproto.EvaluateFeaturesRequest + expected *featureproto.EvaluateFeaturesResponse + expectedErr error + }{ + "fail: ErrMissingUser": { + setup: nil, + input: &featureproto.EvaluateFeaturesRequest{}, + expected: nil, + expectedErr: errMissingUserJaJP, + }, + "fail: ErrMissingUserID": { + setup: nil, + input: &featureproto.EvaluateFeaturesRequest{User: &userproto.User{}}, + expected: nil, + expectedErr: errMissingUserIDJaJP, + }, + "fail: ErrMissingFeatureTag": { + setup: nil, + input: &featureproto.EvaluateFeaturesRequest{User: &userproto.User{Id: "test-id"}, EnvironmentNamespace: "ns0"}, + expected: nil, + expectedErr: errMissingFeatureTagJaJP, + }, + "fail: return errInternal when getting features": { + setup: func(s *FeatureService) { + s.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + nil, errors.New("error")) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + input: &featureproto.EvaluateFeaturesRequest{User: &userproto.User{Id: "test-id"}, EnvironmentNamespace: "ns0", Tag: "android"}, + expected: nil, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success: get from cache": { + setup: func(s *FeatureService) { + s.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: []*featureproto.Feature{ + { + Id: newUUID(t), + Variations: []*featureproto.Variation{ + { + Id: vID1, + Value: "true", + }, + { + Id: vID2, + Value: "false", + }, + }, + Rules: []*featureproto.Rule{ + { + Id: "rule-1", + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID2, + }, + }, + Clauses: []*featureproto.Clause{ + { + Id: "clause-1", + Attribute: "name", + Operator: featureproto.Clause_SEGMENT, + Values: []string{ + "segment-id", + }, + }, + }, + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID1, + }, + }, + Tags: []string{"android"}, + }, + { + Id: newUUID(t), + Variations: []*featureproto.Variation{ + { + Id: vID3, + Value: "true", + }, + { + Id: vID4, + Value: "false", + }, + }, + Rules: []*featureproto.Rule{ + { + Id: "rule-1", + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID4, + }, + }, + Clauses: []*featureproto.Clause{ + { + Id: "clause-1", + Attribute: "name", + Operator: featureproto.Clause_SEGMENT, + Values: []string{ + "segment-id", + }, + }, + }, + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID3, + }, + }, + Tags: []string{"ios"}, + }, + }, + }, nil) + s.segmentUsersCache.(*cachev3mock.MockSegmentUsersCache).EXPECT().Get(gomock.Any(), gomock.Any()).Return( + &featureproto.SegmentUsers{ + SegmentId: "segment-id", + Users: []*featureproto.SegmentUser{ + { + SegmentId: "segment-id", + UserId: "user-id-1", + State: featureproto.SegmentUser_INCLUDED, + Deleted: false, + }, + { + SegmentId: "segment-id", + UserId: "user-id-2", + State: featureproto.SegmentUser_INCLUDED, + Deleted: false, + }, + }, + }, nil) + }, + input: &featureproto.EvaluateFeaturesRequest{User: &userproto.User{Id: "user-id-1"}, EnvironmentNamespace: "ns0", Tag: "ios"}, + expected: &featureproto.EvaluateFeaturesResponse{ + UserEvaluations: &featureproto.UserEvaluations{ + Evaluations: []*featureproto.Evaluation{ + { + VariationId: vID4, + Reason: &featureproto.Reason{ + Type: featureproto.Reason_RULE, + RuleId: "rule-1", + }, + }, + }, + }, + }, + expectedErr: nil, + }, + "success: get from cache and filter by tag: return empty": { + setup: func(s *FeatureService) { + s.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: []*featureproto.Feature{ + { + Id: newUUID(t), + Variations: []*featureproto.Variation{ + { + Id: vID1, + Value: "true", + }, + { + Id: vID2, + Value: "false", + }, + }, + Rules: []*featureproto.Rule{ + { + Id: "rule-1", + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID2, + }, + }, + Clauses: []*featureproto.Clause{ + { + Id: "clause-1", + Attribute: "name", + Operator: featureproto.Clause_SEGMENT, + Values: []string{ + "segment-id", + }, + }, + }, + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID1, + }, + }, + Tags: []string{"android"}, + }, + { + Variations: []*featureproto.Variation{ + { + Id: vID1, + Value: "true", + }, + { + Id: vID2, + Value: "false", + }, + }, + Rules: []*featureproto.Rule{ + { + Id: "rule-1", + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID2, + }, + }, + Clauses: []*featureproto.Clause{ + { + Id: "clause-1", + Attribute: "name", + Operator: featureproto.Clause_SEGMENT, + Values: []string{ + "segment-id", + }, + }, + }, + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID1, + }, + }, + Tags: []string{"ios"}, + }, + }, + }, nil) + s.segmentUsersCache.(*cachev3mock.MockSegmentUsersCache).EXPECT().Get(gomock.Any(), gomock.Any()).Return( + &featureproto.SegmentUsers{ + SegmentId: "segment-id", + Users: []*featureproto.SegmentUser{ + { + SegmentId: "segment-id", + UserId: "user-id-1", + State: featureproto.SegmentUser_INCLUDED, + Deleted: false, + }, + { + SegmentId: "segment-id", + UserId: "user-id-2", + State: featureproto.SegmentUser_INCLUDED, + Deleted: false, + }, + }, + }, nil) + }, + input: &featureproto.EvaluateFeaturesRequest{User: &userproto.User{Id: "user-id-1"}, EnvironmentNamespace: "ns0", Tag: "web"}, + expected: &featureproto.EvaluateFeaturesResponse{ + UserEvaluations: &featureproto.UserEvaluations{ + Evaluations: []*featureproto.Evaluation{}, + }, + }, + expectedErr: nil, + }, + "success: get features from storage": { + setup: func(s *FeatureService) { + s.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + nil, errors.New("error")) + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + input: &featureproto.EvaluateFeaturesRequest{User: &userproto.User{Id: "test-id"}, EnvironmentNamespace: "ns0", Tag: "android"}, + expected: &featureproto.EvaluateFeaturesResponse{ + UserEvaluations: &featureproto.UserEvaluations{ + Evaluations: []*featureproto.Evaluation{}, + }, + }, + expectedErr: nil, + }, + "fail: return errInternal when getting segment users": { + setup: func(s *FeatureService) { + s.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: []*featureproto.Feature{ + { + Variations: []*featureproto.Variation{ + { + Id: vID1, + Value: "true", + }, + { + Id: vID2, + Value: "false", + }, + }, + Rules: []*featureproto.Rule{ + { + Id: "rule-1", + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID2, + }, + }, + Clauses: []*featureproto.Clause{ + { + Id: "clause-1", + Attribute: "name", + Operator: featureproto.Clause_SEGMENT, + Values: []string{ + "id-0", + }, + }, + }, + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID1, + }, + }, + Tags: []string{"android"}, + }, + }}, nil) + s.segmentUsersCache.(*cachev3mock.MockSegmentUsersCache).EXPECT().Get(gomock.Any(), gomock.Any()).Return( + nil, errors.New("random error")) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + input: &featureproto.EvaluateFeaturesRequest{User: &userproto.User{Id: "test-id"}, EnvironmentNamespace: "ns0", Tag: "android"}, + expected: nil, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success: get users from storage": { + setup: func(s *FeatureService) { + s.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: []*featureproto.Feature{ + { + Variations: []*featureproto.Variation{ + { + Id: vID1, + Value: "true", + }, + { + Id: vID2, + Value: "false", + }, + }, + Rules: []*featureproto.Rule{ + { + Id: "rule-1", + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID2, + }, + }, + Clauses: []*featureproto.Clause{ + { + Id: "clause-1", + Attribute: "name", + Operator: featureproto.Clause_SEGMENT, + Values: []string{ + "id-0", + }, + }, + }, + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID2, + }, + }, + Tags: []string{"android"}, + }, + }}, nil) + s.segmentUsersCache.(*cachev3mock.MockSegmentUsersCache).EXPECT().Get(gomock.Any(), gomock.Any()).Return( + nil, errors.New("random error")) + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + s.segmentUsersCache.(*cachev3mock.MockSegmentUsersCache).EXPECT().Put(gomock.Any(), gomock.Any()).Return(nil) + }, + input: &featureproto.EvaluateFeaturesRequest{User: &userproto.User{Id: "test-id"}, EnvironmentNamespace: "ns0", Tag: "android"}, + expected: &featureproto.EvaluateFeaturesResponse{ + UserEvaluations: &featureproto.UserEvaluations{ + Evaluations: []*featureproto.Evaluation{ + { + VariationId: vID2, + Reason: &featureproto.Reason{ + Type: featureproto.Reason_DEFAULT, + }, + }, + }, + }, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + ctx := createContextWithToken() + service := createFeatureService(mockController) + if p.setup != nil { + p.setup(service) + } + resp, err := service.EvaluateFeatures(ctx, p.input) + if err == nil { + if len(resp.UserEvaluations.Evaluations) > 0 { + assert.Equal(t, p.expected.UserEvaluations.Evaluations[0].VariationId, resp.UserEvaluations.Evaluations[0].VariationId, msg) + assert.Equal(t, p.expected.UserEvaluations.Evaluations[0].Reason, resp.UserEvaluations.Evaluations[0].Reason) + } else { + assert.Equal(t, p.expected.UserEvaluations.Evaluations, resp.UserEvaluations.Evaluations, msg) + } + } else { + assert.Equal(t, p.expected, resp, msg) + } + assert.Equal(t, p.expectedErr, err, msg) + } +} + +func TestUnauthenticated(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + service := createFeatureService(mockController) + patterns := map[string]struct { + action func(context.Context, *FeatureService) error + expected error + }{ + "GetFeature": { + action: func(ctx context.Context, fs *FeatureService) error { + _, err := fs.GetFeature(ctx, &featureproto.GetFeatureRequest{}) + return err + }, + expected: errUnauthenticatedJaJP, + }, + "GetFeatures": { + action: func(ctx context.Context, fs *FeatureService) error { + _, err := fs.GetFeatures(ctx, &featureproto.GetFeaturesRequest{}) + return err + }, + expected: errUnauthenticatedJaJP, + }, + "ListFeatures": { + action: func(ctx context.Context, fs *FeatureService) error { + _, err := fs.ListFeatures(ctx, &featureproto.ListFeaturesRequest{}) + return err + }, + expected: errUnauthenticatedJaJP, + }, + "ListFeaturesEnabled": { + action: func(ctx context.Context, fs *FeatureService) error { + _, err := fs.ListEnabledFeatures(ctx, &featureproto.ListEnabledFeaturesRequest{}) + return err + }, + expected: errUnauthenticatedJaJP, + }, + "EvaluateFeatures": { + action: func(ctx context.Context, fs *FeatureService) error { + _, err := fs.EvaluateFeatures(ctx, &featureproto.EvaluateFeaturesRequest{}) + return err + }, + expected: errUnauthenticatedJaJP, + }, + } + for msg, p := range patterns { + actual := p.action(ctx, service) + assert.Equal(t, p.expected, actual, "%s", msg) + } +} + +func TestPermissionDenied(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + ctx := createContextWithTokenRoleUnassigned() + service := createFeatureService(mockController) + patterns := map[string]struct { + action func(context.Context, *FeatureService) error + expected error + }{ + "CreateFeature": { + action: func(ctx context.Context, fs *FeatureService) error { + _, err := fs.CreateFeature(ctx, &featureproto.CreateFeatureRequest{}) + return err + }, + expected: errPermissionDeniedJaJP, + }, + "EnableFeature": { + action: func(ctx context.Context, fs *FeatureService) error { + _, err := fs.EnableFeature(ctx, &featureproto.EnableFeatureRequest{ + Id: "id", + Command: &featureproto.EnableFeatureCommand{}, + }) + return err + }, + expected: errPermissionDeniedJaJP, + }, + "DisableFeature": { + action: func(ctx context.Context, fs *FeatureService) error { + _, err := fs.DisableFeature(ctx, &featureproto.DisableFeatureRequest{ + Id: "id", + Command: &featureproto.DisableFeatureCommand{}, + }) + return err + }, + expected: errPermissionDeniedJaJP, + }, + "UnarchiveFeature": { + action: func(ctx context.Context, fs *FeatureService) error { + _, err := fs.UnarchiveFeature(ctx, &featureproto.UnarchiveFeatureRequest{ + Id: "id", + Command: &featureproto.UnarchiveFeatureCommand{}, + }) + return err + }, + expected: errPermissionDeniedJaJP, + }, + "DeleteFeature": { + action: func(ctx context.Context, fs *FeatureService) error { + _, err := fs.DeleteFeature(ctx, &featureproto.DeleteFeatureRequest{ + Id: "id", + Command: &featureproto.DeleteFeatureCommand{}, + }) + return err + }, + expected: errPermissionDeniedJaJP, + }, + "UpdateFeatureVariations": { + action: func(ctx context.Context, fs *FeatureService) error { + _, err := fs.UpdateFeatureVariations(ctx, &featureproto.UpdateFeatureVariationsRequest{}) + return err + }, + expected: errPermissionDeniedJaJP, + }, + "UpdateFeatureTargeting": { + action: func(ctx context.Context, fs *FeatureService) error { + _, err := fs.UpdateFeatureTargeting(ctx, &featureproto.UpdateFeatureTargetingRequest{}) + return err + }, + expected: errPermissionDeniedJaJP, + }, + "CloneFeature": { + action: func(ctx context.Context, fs *FeatureService) error { + _, err := fs.CloneFeature(ctx, &featureproto.CloneFeatureRequest{ + Id: "id", + Command: &featureproto.CloneFeatureCommand{ + EnvironmentNamespace: "ns1", + }, + EnvironmentNamespace: "ns0", + }) + return err + }, + expected: errPermissionDeniedJaJP, + }, + } + for msg, p := range patterns { + actual := p.action(ctx, service) + assert.Equal(t, p.expected, actual, "%s", msg) + } +} + +func TestEnableFeatureMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*FeatureService) + req *featureproto.EnableFeatureRequest + expectedErr error + }{ + { + setup: nil, + req: &featureproto.EnableFeatureRequest{ + EnvironmentNamespace: "ns0", + }, + expectedErr: errMissingIDJaJP, + }, + { + setup: nil, + req: &featureproto.EnableFeatureRequest{ + Id: "id-0", + EnvironmentNamespace: "ns0", + }, + expectedErr: errMissingCommandJaJP, + }, + { + setup: func(s *FeatureService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2fs.ErrFeatureNotFound) + }, + req: &featureproto.EnableFeatureRequest{ + Id: "id-0", + Command: &featureproto.EnableFeatureCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: errNotFoundJaJP, + }, + { + setup: func(s *FeatureService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &featureproto.EnableFeatureRequest{ + Id: "id-1", + Command: &featureproto.EnableFeatureCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: nil, + }, + } + for _, p := range patterns { + ctx := createContextWithToken() + service := createFeatureService(mockController) + if p.setup != nil { + p.setup(service) + } + _, err := service.EnableFeature(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestDisableFeatureMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*FeatureService) + req *featureproto.DisableFeatureRequest + expectedErr error + }{ + { + setup: nil, + req: &featureproto.DisableFeatureRequest{ + EnvironmentNamespace: "ns0", + }, + expectedErr: errMissingIDJaJP, + }, + { + setup: nil, + req: &featureproto.DisableFeatureRequest{ + Id: "id-0", + EnvironmentNamespace: "ns0", + }, + expectedErr: errMissingCommandJaJP, + }, + { + setup: func(s *FeatureService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2fs.ErrFeatureNotFound) + }, + req: &featureproto.DisableFeatureRequest{ + Id: "id-0", + Command: &featureproto.DisableFeatureCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: errNotFoundJaJP, + }, + { + setup: func(s *FeatureService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &featureproto.DisableFeatureRequest{ + Id: "id-1", + Command: &featureproto.DisableFeatureCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: nil, + }, + } + for _, p := range patterns { + ctx := createContextWithToken() + service := createFeatureService(mockController) + if p.setup != nil { + p.setup(service) + } + _, err := service.DisableFeature(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestValidateArchiveFeature(t *testing.T) { + t.Parallel() + f0 := makeFeature("fID-0") + f1 := makeFeature("fID-1") + f2 := makeFeature("fID-2") + f3 := makeFeature("fID-3") + f4 := makeFeature("fID-4") + f5 := makeFeature("fID-5") + + patterns := []struct { + req *featureproto.ArchiveFeatureRequest + fs []*featureproto.Feature + expectedErr error + }{ + { + req: &featureproto.ArchiveFeatureRequest{ + EnvironmentNamespace: "ns0", + }, + fs: nil, + expectedErr: errMissingIDJaJP, + }, + { + req: &featureproto.ArchiveFeatureRequest{ + Id: "id-0", + EnvironmentNamespace: "ns0", + }, + fs: nil, + expectedErr: errMissingCommandJaJP, + }, + { + req: &featureproto.ArchiveFeatureRequest{ + Id: "fID-0", + EnvironmentNamespace: "ns0", + Command: &featureproto.ArchiveFeatureCommand{}, + }, + fs: []*featureproto.Feature{ + { + Id: f0.Id, + }, + { + Id: f1.Id, + }, + { + Id: f2.Id, + }, + { + Id: f3.Id, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: f4.Id, + }, + { + FeatureId: f5.Id, + }, + }, + }, + { + Id: f4.Id, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: f0.Id, + }, + { + FeatureId: f2.Id, + }, + }, + }, + { + Id: f5.Id, + }, + }, + expectedErr: localizedError(statusInvalidArchive, locale.JaJP), + }, + { + req: &featureproto.ArchiveFeatureRequest{ + Id: "fID-0", + EnvironmentNamespace: "ns0", + Command: &featureproto.ArchiveFeatureCommand{}, + }, + fs: []*featureproto.Feature{ + { + Id: f0.Id, + }, + { + Id: f1.Id, + }, + { + Id: f2.Id, + }, + { + Id: f3.Id, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: f2.Id, + }, + { + FeatureId: f1.Id, + }, + }, + }, + { + Id: f4.Id, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: f5.Id, + }, + }, + }, + { + Id: f5.Id, + }, + }, + expectedErr: nil, + }, + } + for _, p := range patterns { + err := validateArchiveFeatureRequest(p.req, p.fs) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestUnarchiveFeatureMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*FeatureService) + req *featureproto.UnarchiveFeatureRequest + expectedErr error + }{ + { + setup: nil, + req: &featureproto.UnarchiveFeatureRequest{ + EnvironmentNamespace: "ns0", + }, + expectedErr: errMissingIDJaJP, + }, + { + setup: nil, + req: &featureproto.UnarchiveFeatureRequest{ + Id: "id-0", + EnvironmentNamespace: "ns0", + }, + expectedErr: errMissingCommandJaJP, + }, + { + setup: func(s *FeatureService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2fs.ErrFeatureNotFound) + }, + req: &featureproto.UnarchiveFeatureRequest{ + Id: "id-0", + Command: &featureproto.UnarchiveFeatureCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: errNotFoundJaJP, + }, + { + setup: func(s *FeatureService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &featureproto.UnarchiveFeatureRequest{ + Id: "id-1", + Command: &featureproto.UnarchiveFeatureCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: nil, + }, + } + for _, p := range patterns { + ctx := createContextWithToken() + service := createFeatureService(mockController) + if p.setup != nil { + p.setup(service) + } + _, err := service.UnarchiveFeature(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestDeleteFeatureMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*FeatureService) + req *featureproto.DeleteFeatureRequest + expectedErr error + }{ + { + setup: nil, + req: &featureproto.DeleteFeatureRequest{ + EnvironmentNamespace: "ns0", + }, + expectedErr: errMissingIDJaJP, + }, + { + setup: nil, + req: &featureproto.DeleteFeatureRequest{ + Id: "id-0", + EnvironmentNamespace: "ns0", + }, + expectedErr: errMissingCommandJaJP, + }, + { + setup: func(s *FeatureService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2fs.ErrFeatureNotFound) + }, + req: &featureproto.DeleteFeatureRequest{ + Id: "id-0", + Command: &featureproto.DeleteFeatureCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: errNotFoundJaJP, + }, + { + setup: func(s *FeatureService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &featureproto.DeleteFeatureRequest{ + Id: "id-1", + Command: &featureproto.DeleteFeatureCommand{}, + EnvironmentNamespace: "ns0", + }, + expectedErr: nil, + }, + } + for _, p := range patterns { + ctx := createContextWithToken() + service := createFeatureService(mockController) + if p.setup != nil { + p.setup(service) + } + _, err := service.DeleteFeature(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestCloneFeatureMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*FeatureService) + req *featureproto.CloneFeatureRequest + expectedErr error + }{ + { + setup: nil, + req: &featureproto.CloneFeatureRequest{ + Id: "", + }, + expectedErr: errMissingIDJaJP, + }, + { + setup: nil, + req: &featureproto.CloneFeatureRequest{ + Id: "id-0", + EnvironmentNamespace: "ns0", + }, + expectedErr: errMissingCommandJaJP, + }, + { + setup: nil, + req: &featureproto.CloneFeatureRequest{ + Id: "id-0", + Command: &featureproto.CloneFeatureCommand{ + EnvironmentNamespace: "ns0", + }, + EnvironmentNamespace: "ns0", + }, + expectedErr: errIncorrectDestinationEnvironmentJaJP, + }, + { + setup: func(s *FeatureService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2fs.ErrFeatureAlreadyExists) + }, + req: &featureproto.CloneFeatureRequest{ + Id: "id-0", + Command: &featureproto.CloneFeatureCommand{ + EnvironmentNamespace: "ns1", + }, + EnvironmentNamespace: "ns0", + }, + expectedErr: errAlreadyExistsJaJP, + }, + { + setup: func(s *FeatureService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &featureproto.CloneFeatureRequest{ + Id: "id-0", + Command: &featureproto.CloneFeatureCommand{ + EnvironmentNamespace: "ns1", + }, + EnvironmentNamespace: "ns0", + }, + expectedErr: nil, + }, + } + + for _, p := range patterns { + ctx := createContextWithToken() + service := createFeatureService(mockController) + if p.setup != nil { + p.setup(service) + } + _, err := service.CloneFeature(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestAddFixedStrategyRule(t *testing.T) { + t.Parallel() + f := makeFeature("feature-id") + rID := newUUID(t) + vID := f.Variations[0].Id + expected := &featureproto.Rule{ + Id: rID, + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{Variation: vID}, + }, + } + + patterns := []struct { + rule *featureproto.Rule + expected error + }{ + { + rule: &featureproto.Rule{ + Id: "", + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{Variation: vID}, + }, + }, + expected: localizedError(statusMissingRuleID, locale.JaJP), + }, + { + rule: &featureproto.Rule{ + Id: "rule-id", + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{Variation: vID}, + }, + }, + expected: localizedError(statusIncorrectUUIDFormat, locale.JaJP), + }, + { + rule: &featureproto.Rule{ + Id: rID, + Strategy: nil, + }, + expected: localizedError(statusMissingRuleStrategy, locale.JaJP), + }, + { + rule: &featureproto.Rule{ + Id: rID, + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{}, + }, + }, + expected: localizedError(statusMissingVariationID, locale.JaJP), + }, + { + rule: expected, + expected: nil, + }, + } + for _, p := range patterns { + err := validateRule(f.Variations, p.rule) + assert.Equal(t, p.expected, err) + } +} + +func TestAddRolloutStrategyRule(t *testing.T) { + t.Parallel() + f := makeFeature("feature-id") + rID := newUUID(t) + vID1 := f.Variations[0].Id + vID2 := f.Variations[1].Id + expected := &featureproto.Rule{ + Id: rID, + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_ROLLOUT, + RolloutStrategy: &featureproto.RolloutStrategy{ + Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: 30000, + }, + { + Variation: vID2, + Weight: 70000, + }, + }, + }, + }, + } + patterns := []*struct { + rule *featureproto.Rule + expected error + }{ + { + rule: &featureproto.Rule{ + Id: "", + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_ROLLOUT, + RolloutStrategy: &featureproto.RolloutStrategy{ + Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: 30000, + }, + { + Variation: vID2, + Weight: 70000, + }, + }, + }, + }, + }, + expected: localizedError(statusMissingRuleID, locale.JaJP), + }, + { + rule: &featureproto.Rule{ + Id: "rule-id", + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_ROLLOUT, + RolloutStrategy: &featureproto.RolloutStrategy{ + Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: 30000, + }, + { + Variation: vID2, + Weight: 70000, + }, + }, + }, + }, + }, + expected: localizedError(statusIncorrectUUIDFormat, locale.JaJP), + }, + { + rule: &featureproto.Rule{ + Id: rID, + Strategy: nil, + }, + expected: localizedError(statusMissingRuleStrategy, locale.JaJP), + }, + { + rule: &featureproto.Rule{ + Id: rID, + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_ROLLOUT, + RolloutStrategy: &featureproto.RolloutStrategy{ + Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: 30000, + }, + }, + }, + }, + }, + expected: localizedError(statusDifferentVariationsSize, locale.JaJP), + }, + { + rule: &featureproto.Rule{ + Id: rID, + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_ROLLOUT, + RolloutStrategy: &featureproto.RolloutStrategy{ + Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: "", + Weight: 30000, + }, + { + Variation: vID2, + Weight: 70000, + }, + }, + }, + }, + }, + expected: localizedError(statusMissingVariationID, locale.JaJP), + }, + { + rule: &featureproto.Rule{ + Id: rID, + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_ROLLOUT, + RolloutStrategy: &featureproto.RolloutStrategy{ + Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: 30000, + }, + { + Variation: "", + Weight: 70000, + }, + }, + }, + }, + }, + expected: localizedError(statusMissingVariationID, locale.JaJP), + }, + { + rule: &featureproto.Rule{ + Id: rID, + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_ROLLOUT, + RolloutStrategy: &featureproto.RolloutStrategy{ + Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: -1, + }, + { + Variation: vID2, + Weight: 70000, + }, + }, + }, + }, + }, + expected: localizedError(statusIncorrectVariationWeight, locale.JaJP), + }, + { + rule: &featureproto.Rule{ + Id: rID, + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_ROLLOUT, + RolloutStrategy: &featureproto.RolloutStrategy{ + Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: 30000, + }, + { + Variation: vID2, + Weight: -1, + }, + }, + }, + }, + }, + expected: localizedError(statusIncorrectVariationWeight, locale.JaJP), + }, + { + rule: &featureproto.Rule{ + Id: rID, + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_ROLLOUT, + RolloutStrategy: &featureproto.RolloutStrategy{ + Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: 30000, + }, + { + Variation: vID2, + Weight: 71000, + }, + }, + }, + }, + }, + expected: localizedError(statusExceededMaxVariationWeight, locale.JaJP), + }, + { + rule: expected, + expected: nil, + }, + } + for _, p := range patterns { + err := validateRule(f.Variations, p.rule) + assert.Equal(t, p.expected, err) + } +} + +func TestChangeRuleToFixedStrategy(t *testing.T) { + t.Parallel() + f := makeFeature("feature-id") + r := f.Rules[0] + rID := r.Id + vID := f.Variations[0].Id + expected := &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{Variation: vID}, + } + patterns := []*struct { + ruleID string + strategy *featureproto.Strategy + expected error + }{ + { + ruleID: "", + strategy: expected, + expected: localizedError(statusMissingRuleID, locale.JaJP), + }, + { + ruleID: rID, + strategy: nil, + expected: localizedError(statusMissingRuleStrategy, locale.JaJP), + }, + { + ruleID: rID, + strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + }, + expected: localizedError(statusMissingFixedStrategy, locale.JaJP), + }, + { + ruleID: rID, + strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{}, + }, + expected: localizedError(statusMissingVariationID, locale.JaJP), + }, + { + ruleID: "", + strategy: nil, + expected: localizedError(statusMissingRuleID, locale.JaJP), + }, + { + ruleID: rID, + strategy: expected, + expected: nil, + }, + } + for _, p := range patterns { + cmd := &featureproto.ChangeRuleStrategyCommand{ + RuleId: p.ruleID, + Strategy: p.strategy, + } + err := validateChangeRuleStrategy(f.Variations, cmd) + assert.Equal(t, p.expected, err) + } +} + +func TestChangeRuleToRolloutStrategy(t *testing.T) { + t.Parallel() + f := makeFeature("feature-id") + r := f.Rules[0] + rID := r.Id + vID1 := f.Variations[0].Id + vID2 := f.Variations[1].Id + expected := &featureproto.Strategy{ + Type: featureproto.Strategy_ROLLOUT, + RolloutStrategy: &featureproto.RolloutStrategy{ + Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: 30000, + }, + { + Variation: vID2, + Weight: 70000, + }, + }, + }, + } + patterns := map[string]*struct { + ruleID string + strategy *featureproto.Strategy + expected error + }{ + "fail: errMissingRuleID": { + ruleID: "", + strategy: expected, + expected: localizedError(statusMissingRuleID, locale.JaJP), + }, + "fail: errMissingRuleStrategy": { + ruleID: rID, + strategy: nil, + expected: localizedError(statusMissingRuleStrategy, locale.JaJP), + }, + "fail: errDifferentVariationsSizeJaJP": { + ruleID: rID, + strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_ROLLOUT, + RolloutStrategy: &featureproto.RolloutStrategy{ + Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: 30000, + }, + }, + }, + }, + expected: localizedError(statusDifferentVariationsSize, locale.JaJP), + }, + "fail: errMissingVariationIDJaJP: idx-0": { + ruleID: rID, + strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_ROLLOUT, + RolloutStrategy: &featureproto.RolloutStrategy{ + Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: "", + Weight: 30000, + }, + { + Variation: vID2, + Weight: 70000, + }, + }, + }, + }, + expected: localizedError(statusMissingVariationID, locale.JaJP), + }, + "fail: errMissingVariationIDJaJP: idx-1": { + ruleID: rID, + strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_ROLLOUT, + RolloutStrategy: &featureproto.RolloutStrategy{ + Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: 30000, + }, + { + Variation: "", + Weight: 70000, + }, + }, + }, + }, + expected: localizedError(statusMissingVariationID, locale.JaJP), + }, + "fail: errIncorrectVariationWeightJaJP: idx-0": { + ruleID: rID, + strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_ROLLOUT, + RolloutStrategy: &featureproto.RolloutStrategy{ + Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: -1, + }, + { + Variation: vID2, + Weight: 70000, + }, + }, + }, + }, + expected: localizedError(statusIncorrectVariationWeight, locale.JaJP), + }, + "fail: errIncorrectVariationWeightJaJP: idx-1": { + ruleID: rID, + strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_ROLLOUT, + RolloutStrategy: &featureproto.RolloutStrategy{ + Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: 30000, + }, + { + Variation: vID2, + Weight: -1, + }, + }, + }, + }, + expected: localizedError(statusIncorrectVariationWeight, locale.JaJP), + }, + "fail: errIncorrectVariationWeightJaJP: more than total weight": { + ruleID: rID, + strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_ROLLOUT, + RolloutStrategy: &featureproto.RolloutStrategy{ + Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: 30000, + }, + { + Variation: vID2, + Weight: 70001, + }, + }, + }, + }, + expected: localizedError(statusExceededMaxVariationWeight, locale.JaJP), + }, + "fail: errIncorrectVariationWeightJaJP: less than total weight": { + ruleID: rID, + strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_ROLLOUT, + RolloutStrategy: &featureproto.RolloutStrategy{ + Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: 29999, + }, + { + Variation: vID2, + Weight: 70000, + }, + }, + }, + }, + expected: localizedError(statusExceededMaxVariationWeight, locale.JaJP), + }, + "success": { + ruleID: rID, + strategy: expected, + expected: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + cmd := &featureproto.ChangeRuleStrategyCommand{ + RuleId: p.ruleID, + Strategy: p.strategy, + } + err := validateChangeRuleStrategy(f.Variations, cmd) + assert.Equal(t, p.expected, err) + }) + } +} + +func TestChangeFixedStrategy(t *testing.T) { + t.Parallel() + f := makeFeature("feature-id") + r := f.Rules[0] + rID := r.Id + vID := f.Variations[0].Id + patterns := []*struct { + ruleID string + strategy *featureproto.FixedStrategy + expected error + }{ + { + ruleID: "", + strategy: &featureproto.FixedStrategy{Variation: vID}, + expected: localizedError(statusMissingRuleID, locale.JaJP), + }, + { + ruleID: rID, + strategy: nil, + expected: localizedError(statusMissingFixedStrategy, locale.JaJP), + }, + { + ruleID: rID, + strategy: &featureproto.FixedStrategy{}, + expected: localizedError(statusMissingVariationID, locale.JaJP), + }, + { + ruleID: "", + strategy: nil, + expected: localizedError(statusMissingRuleID, locale.JaJP), + }, + { + ruleID: rID, + strategy: &featureproto.FixedStrategy{Variation: vID}, + expected: nil, + }, + } + for _, p := range patterns { + cmd := &featureproto.ChangeFixedStrategyCommand{ + RuleId: p.ruleID, + Strategy: p.strategy, + } + err := validateChangeFixedStrategy(cmd) + assert.Equal(t, p.expected, err) + } +} + +func TestChangeRolloutStrategy(t *testing.T) { + t.Parallel() + f := makeFeature("feature-id") + r := f.Rules[0] + rID := r.Id + vID1 := f.Variations[0].Id + vID2 := f.Variations[1].Id + expected := &featureproto.RolloutStrategy{Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: 70000, + }, + { + Variation: vID2, + Weight: 30000, + }, + }} + patterns := []*struct { + ruleID string + strategy *featureproto.RolloutStrategy + expected error + }{ + { + ruleID: "", + strategy: &featureproto.RolloutStrategy{}, + expected: localizedError(statusMissingRuleID, locale.JaJP), + }, + { + ruleID: rID, + strategy: nil, + expected: localizedError(statusMissingRolloutStrategy, locale.JaJP), + }, + { + ruleID: rID, + strategy: &featureproto.RolloutStrategy{Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: 70000, + }, + }}, + expected: localizedError(statusDifferentVariationsSize, locale.JaJP), + }, + { + ruleID: rID, + strategy: &featureproto.RolloutStrategy{Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: "", + Weight: 70000, + }, + { + Variation: vID2, + Weight: 30000, + }, + }}, + expected: localizedError(statusMissingVariationID, locale.JaJP), + }, + { + ruleID: rID, + strategy: &featureproto.RolloutStrategy{Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: 70000, + }, + { + Variation: "", + Weight: 30000, + }, + }}, + expected: localizedError(statusMissingVariationID, locale.JaJP), + }, + { + ruleID: rID, + strategy: &featureproto.RolloutStrategy{Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: -1, + }, + { + Variation: vID2, + Weight: 30000, + }, + }}, + expected: localizedError(statusIncorrectVariationWeight, locale.JaJP), + }, + { + ruleID: rID, + strategy: &featureproto.RolloutStrategy{Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: 70000, + }, + { + Variation: vID2, + Weight: -1, + }, + }}, + expected: localizedError(statusIncorrectVariationWeight, locale.JaJP), + }, + { + ruleID: rID, + strategy: &featureproto.RolloutStrategy{Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: 62000, + }, + { + Variation: vID2, + Weight: 59000, + }, + }}, + expected: localizedError(statusExceededMaxVariationWeight, locale.JaJP), + }, + { + ruleID: "", + strategy: nil, + expected: localizedError(statusMissingRuleID, locale.JaJP), + }, + { + ruleID: rID, + strategy: expected, + expected: nil, + }, + } + for _, p := range patterns { + cmd := &featureproto.ChangeRolloutStrategyCommand{ + RuleId: p.ruleID, + Strategy: p.strategy, + } + err := validateChangeRolloutStrategy(f.Variations, cmd) + assert.Equal(t, p.expected, err) + } +} + +func TestChangeDefaultStrategy(t *testing.T) { + t.Parallel() + f := makeFeature("feature-id") + patterns := map[string]*struct { + strategy *featureproto.Strategy + expectedErr error + }{ + "fail: errMissingRuleStrategy": { + strategy: nil, + expectedErr: localizedError(statusMissingRuleStrategy, locale.JaJP), + }, + "fail: errIncorrectVariationWeightJaJP: more than total weight": { + strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_ROLLOUT, + RolloutStrategy: &featureproto.RolloutStrategy{ + Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: "variation-A", + Weight: 30000, + }, + { + Variation: "variation-B", + Weight: 70001, + }, + }, + }, + }, + expectedErr: localizedError(statusExceededMaxVariationWeight, locale.JaJP), + }, + "fail: errIncorrectVariationWeightJaJP: less than total weight": { + strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_ROLLOUT, + RolloutStrategy: &featureproto.RolloutStrategy{ + Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: "variation-A", + Weight: 29999, + }, + { + Variation: "variation-B", + Weight: 70000, + }, + }, + }, + }, + expectedErr: localizedError(statusExceededMaxVariationWeight, locale.JaJP), + }, + "success": { + strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_ROLLOUT, + RolloutStrategy: &featureproto.RolloutStrategy{ + Variations: []*featureproto.RolloutStrategy_Variation{ + { + Variation: "variation-A", + Weight: 30000, + }, + { + Variation: "variation-B", + Weight: 70000, + }, + }, + }, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + cmd := &featureproto.ChangeDefaultStrategyCommand{ + Strategy: p.strategy, + } + err := validateChangeDefaultStrategy(f.Variations, cmd) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestValidateFeatureVariationsCommand(t *testing.T) { + t.Parallel() + fID0 := "fID-0" + fID1 := "fID-1" + fID2 := "fID-2" + fID3 := "fID-3" + fID4 := "fID-4" + fID5 := "fID-5" + pattens := []*struct { + cmd command.Command + fs []*featureproto.Feature + expectedErr error + }{ + { + cmd: &featureproto.CreateFeatureCommand{}, + fs: []*featureproto.Feature{ + { + Id: fID0, + }, + }, + expectedErr: nil, + }, + { + cmd: &featureproto.RemoveVariationCommand{ + Id: "variation-A", + }, + fs: []*featureproto.Feature{ + { + Id: fID0, + }, + { + Id: fID1, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID0, + VariationId: "variation-A", + }, + { + FeatureId: fID2, + }, + }, + }, + { + Id: fID2, + }, + { + Id: fID3, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID4, + }, + { + FeatureId: fID5, + }, + }, + }, + { + Id: fID4, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID0, + }, + { + FeatureId: fID2, + }, + }, + }, + { + Id: fID5, + }, + }, + expectedErr: localizedError(statusInvalidChangingVariation, locale.JaJP), + }, + { + cmd: &featureproto.RemoveVariationCommand{ + Id: "variation-A", + }, + fs: []*featureproto.Feature{ + { + Id: fID0, + }, + { + Id: fID1, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID2, + }, + }, + }, + { + Id: fID2, + }, + { + Id: fID3, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID4, + }, + { + FeatureId: fID5, + }, + }, + }, + { + Id: fID4, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID2, + }, + }, + }, + { + Id: fID5, + }, + }, + expectedErr: nil, + }, + } + for _, p := range pattens { + err := validateFeatureVariationsCommand(p.fs, p.cmd) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestValidateAddPrerequisite(t *testing.T) { + t.Parallel() + fID0 := "fID-0" + fID1 := "fID-1" + fID2 := "fID-2" + fID3 := "fID-3" + fID4 := "fID-4" + fID5 := "fID-5" + pattens := []*struct { + prerequisite *featureproto.Prerequisite + fs []*featureproto.Feature + expectedErr error + }{ + { + prerequisite: &featureproto.Prerequisite{ + FeatureId: fID1, + VariationId: "variation-A", + }, + fs: []*featureproto.Feature{ + { + Id: fID0, + Prerequisites: []*featureproto.Prerequisite{}, + }, + { + Id: fID1, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID0, + }, + { + FeatureId: fID2, + }, + }, + Variations: []*featureproto.Variation{ + { + Id: "variation-A", + }, + }, + }, + { + Id: fID2, + Prerequisites: []*featureproto.Prerequisite{}, + }, + { + Id: fID3, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID4, + }, + { + FeatureId: fID5, + }, + }, + }, + { + Id: fID4, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID0, + }, + { + FeatureId: fID2, + }, + }, + }, + { + Id: fID5, + Prerequisites: []*featureproto.Prerequisite{}, + }, + }, + expectedErr: localizedError(statusCycleExists, locale.JaJP), + }, + { + prerequisite: &featureproto.Prerequisite{ + FeatureId: fID1, + VariationId: "variation-A", + }, + fs: []*featureproto.Feature{ + { + Id: fID0, + }, + { + Id: fID1, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID2, + }, + }, + Variations: []*featureproto.Variation{ + { + Id: "variation-A", + }, + }, + }, + { + Id: fID2, + }, + { + Id: fID3, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID4, + }, + { + FeatureId: fID5, + }, + }, + }, + { + Id: fID4, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID2, + }, + }, + }, + { + Id: fID5, + }, + }, + expectedErr: nil, + }, + { + prerequisite: &featureproto.Prerequisite{ + FeatureId: fID0, + VariationId: "variation-A", + }, + fs: []*featureproto.Feature{ + { + Id: fID0, + Variations: []*featureproto.Variation{ + { + Id: "variation-A", + }, + }, + }, + { + Id: fID1, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID2, + }, + }, + }, + { + Id: fID2, + }, + { + Id: fID3, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID4, + }, + { + FeatureId: fID5, + }, + }, + }, + { + Id: fID4, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID2, + }, + }, + }, + { + Id: fID5, + }, + }, + expectedErr: localizedError(statusInvalidPrerequisite, locale.JaJP), + }, + { + prerequisite: &featureproto.Prerequisite{ + FeatureId: fID1, + VariationId: "variation-A", + }, + fs: []*featureproto.Feature{ + { + Id: fID0, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID1, + VariationId: "variation-B", + }, + }, + }, + { + Id: fID1, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID2, + }, + }, + Variations: []*featureproto.Variation{ + { + Id: "variation-A", + }, + }, + }, + { + Id: fID2, + }, + { + Id: fID3, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID4, + }, + { + FeatureId: fID5, + }, + }, + }, + { + Id: fID4, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID2, + }, + }, + }, + { + Id: fID5, + }, + }, + expectedErr: localizedError(statusInvalidPrerequisite, locale.JaJP), + }, + { + prerequisite: &featureproto.Prerequisite{ + FeatureId: fID1, + VariationId: "variation-A", + }, + fs: []*featureproto.Feature{ + { + Id: fID0, + }, + { + Id: fID1, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID2, + }, + }, + Variations: []*featureproto.Variation{ + { + Id: "variation-B", + }, + }, + }, + { + Id: fID2, + }, + { + Id: fID3, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID4, + }, + { + FeatureId: fID5, + }, + }, + }, + { + Id: fID4, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID2, + }, + }, + }, + { + Id: fID5, + }, + }, + expectedErr: localizedError(statusInvalidVariationID, locale.JaJP), + }, + } + for _, p := range pattens { + err := validateAddPrerequisite(p.fs, p.fs[0], p.prerequisite) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestValidateChangePrerequisiteVariation(t *testing.T) { + t.Parallel() + fID0 := "fID-0" + fID1 := "fID-1" + fID2 := "fID-2" + fID3 := "fID-3" + fID4 := "fID-4" + fID5 := "fID-5" + pattens := []*struct { + prerequisite *featureproto.Prerequisite + fs []*featureproto.Feature + expectedErr error + }{ + { + prerequisite: &featureproto.Prerequisite{ + FeatureId: fID1, + VariationId: "variation-A", + }, + fs: []*featureproto.Feature{ + { + Id: fID0, + }, + { + Id: fID1, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID2, + }, + }, + Variations: []*featureproto.Variation{ + { + Id: "variation-A", + }, + }, + }, + { + Id: fID2, + }, + { + Id: fID3, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID4, + }, + { + FeatureId: fID5, + }, + }, + }, + { + Id: fID4, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID2, + }, + }, + }, + { + Id: fID5, + }, + }, + expectedErr: nil, + }, + { + prerequisite: &featureproto.Prerequisite{ + FeatureId: fID1, + VariationId: "variation-A", + }, + fs: []*featureproto.Feature{ + { + Id: fID0, + }, + { + Id: fID1, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID2, + }, + }, + Variations: []*featureproto.Variation{ + { + Id: "variation-B", + }, + }, + }, + { + Id: fID2, + }, + { + Id: fID3, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID4, + }, + { + FeatureId: fID5, + }, + }, + }, + { + Id: fID4, + Prerequisites: []*featureproto.Prerequisite{ + { + FeatureId: fID2, + }, + }, + }, + { + Id: fID5, + }, + }, + expectedErr: localizedError(statusInvalidVariationID, locale.JaJP), + }, + } + for _, p := range pattens { + err := validateChangePrerequisiteVariation(p.fs, p.prerequisite) + assert.Equal(t, p.expectedErr, err) + } +} + +func makeFeature(id string) *domain.Feature { + return &domain.Feature{ + Feature: &featureproto.Feature{ + Id: id, + Name: "test feature", + Version: 1, + CreatedAt: time.Now().Unix(), + Variations: []*featureproto.Variation{ + { + Id: "variation-A", + Value: "A", + Name: "Variation A", + Description: "Thing does A", + }, + { + Id: "variation-B", + Value: "B", + Name: "Variation B", + Description: "Thing does B", + }, + }, + Targets: []*featureproto.Target{ + { + Variation: "variation-B", + Users: []string{ + "user1", + }, + }, + }, + Rules: []*featureproto.Rule{ + { + Id: "rule-1", + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-A", + }, + }, + Clauses: []*featureproto.Clause{ + { + Id: "clause-1", + Attribute: "name", + Operator: featureproto.Clause_EQUALS, + Values: []string{ + "user1", + "user2", + }, + }, + }, + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-B", + }, + }, + }, + } +} + +func newUUID(t *testing.T) string { + t.Helper() + id, err := uuid.NewUUID() + if err != nil { + t.Fatal(err) + } + return id.String() +} diff --git a/pkg/feature/api/segment.go b/pkg/feature/api/segment.go new file mode 100644 index 000000000..d963e1549 --- /dev/null +++ b/pkg/feature/api/segment.go @@ -0,0 +1,462 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "strconv" + + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/feature/command" + "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + v2fs "github.com/bucketeer-io/bucketeer/pkg/feature/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +func (s *FeatureService) CreateSegment( + ctx context.Context, + req *featureproto.CreateSegmentRequest, +) (*featureproto.CreateSegmentResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err = validateCreateSegmentRequest(req.Command); err != nil { + s.logger.Info( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + segment, err := domain.NewSegment(req.Command.Name, req.Command.Description) + if err != nil { + s.logger.Error( + "Failed to create segment", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + segmentStorage := v2fs.NewSegmentStorage(tx) + if err := segmentStorage.CreateSegment(ctx, segment, req.EnvironmentNamespace); err != nil { + s.logger.Error( + "Failed to store segment", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + handler := command.NewSegmentCommandHandler( + editor, + segment, + s.domainPublisher, + req.EnvironmentNamespace, + ) + if err := handler.Handle(ctx, req.Command); err != nil { + s.logger.Error( + "Failed to handle command", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + return nil + }) + if err != nil { + if err == v2fs.ErrSegmentAlreadyExists { + return nil, localizedError(statusAlreadyExists, locale.JaJP) + } + s.logger.Error( + "Failed to create segment", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &featureproto.CreateSegmentResponse{ + Segment: segment.Segment, + }, nil +} + +func (s *FeatureService) DeleteSegment( + ctx context.Context, + req *featureproto.DeleteSegmentRequest, +) (*featureproto.DeleteSegmentResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateDeleteSegmentRequest(req); err != nil { + s.logger.Info( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + if err := s.checkSegmentInUse(ctx, req.Id, req.EnvironmentNamespace); err != nil { + return nil, err + } + if err := s.updateSegment( + ctx, + editor, + []command.Command{req.Command}, + req.Id, + req.EnvironmentNamespace, + ); err != nil { + return nil, err + } + return &featureproto.DeleteSegmentResponse{}, nil +} + +func (s *FeatureService) checkSegmentInUse(ctx context.Context, segmentID, environmentNamespace string) error { + features := []*featureproto.Feature{} + var cursor string + for { + f, cursor, _, err := s.listFeatures( + ctx, + mysql.QueryNoLimit, + cursor, + nil, + "", + nil, + nil, + "", + featureproto.ListFeaturesRequest_DEFAULT, + featureproto.ListFeaturesRequest_ASC, + environmentNamespace, + ) + if err != nil { + s.logger.Error( + "Failed to list features", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return localizedError(statusInternal, locale.JaJP) + } + features = append(features, f...) + size := len(f) + if cursor == "" || size == 0 || size < listRequestSize { + break + } + } + if s.containsInRules(segmentID, features) { + s.logger.Warn( + "Segment User in use", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.String("segmentId", segmentID), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return localizedError(statusSegmentInUse, locale.JaJP) + } + return nil +} + +func (s *FeatureService) containsInRules(segmentID string, features []*featureproto.Feature) bool { + for _, f := range features { + for _, r := range f.Rules { + for _, c := range r.Clauses { + if c.Operator == featureproto.Clause_SEGMENT { + for _, id := range c.Values { + if segmentID == id { + return true + } + } + } + } + } + } + return false +} + +func (s *FeatureService) UpdateSegment( + ctx context.Context, + req *featureproto.UpdateSegmentRequest, +) (*featureproto.UpdateSegmentResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + s.logger.Info( + "Permission denied", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + commands := make([]command.Command, 0, len(req.Commands)) + for _, c := range req.Commands { + cmd, err := command.UnmarshalCommand(c) + if err != nil { + s.logger.Error( + "Failed to unmarshal command", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + commands = append(commands, cmd) + } + if err := validateUpdateSegment(req.Id, commands); err != nil { + s.logger.Info( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + if err := s.updateSegment(ctx, editor, commands, req.Id, req.EnvironmentNamespace); err != nil { + return nil, err + } + return &featureproto.UpdateSegmentResponse{}, nil +} + +func (s *FeatureService) updateSegment( + ctx context.Context, + editor *eventproto.Editor, + commands []command.Command, + segmentID, environmentNamespace string, +) error { + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + segmentStorage := v2fs.NewSegmentStorage(tx) + segment, err := segmentStorage.GetSegment(ctx, segmentID, environmentNamespace) + if err != nil { + s.logger.Error( + "Failed to get segment", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return err + } + handler := command.NewSegmentCommandHandler( + editor, + segment, + s.domainPublisher, + environmentNamespace, + ) + for _, cmd := range commands { + if err := handler.Handle(ctx, cmd); err != nil { + s.logger.Error( + "Failed to handle command", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return err + } + } + return segmentStorage.UpdateSegment(ctx, segment, environmentNamespace) + }) + if err != nil { + if err == v2fs.ErrSegmentNotFound || err == v2fs.ErrSegmentUnexpectedAffectedRows { + return localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to update segment", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return localizedError(statusInternal, locale.JaJP) + } + return nil +} + +func (s *FeatureService) GetSegment( + ctx context.Context, + req *featureproto.GetSegmentRequest, +) (*featureproto.GetSegmentResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateGetSegmentRequest(req); err != nil { + s.logger.Info( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + segmentStorage := v2fs.NewSegmentStorage(s.mysqlClient) + segment, err := segmentStorage.GetSegment(ctx, req.Id, req.EnvironmentNamespace) + if err != nil { + if err == v2fs.ErrSegmentNotFound { + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to get segment", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &featureproto.GetSegmentResponse{Segment: segment.Segment}, nil + +} + +func (s *FeatureService) ListSegments( + ctx context.Context, + req *featureproto.ListSegmentsRequest, +) (*featureproto.ListSegmentsResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateListSegmentsRequest(req); err != nil { + s.logger.Info( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + whereParts := []mysql.WherePart{ + mysql.NewFilter("deleted", "=", false), + mysql.NewFilter("environment_namespace", "=", req.EnvironmentNamespace), + } + if req.Status != nil { + whereParts = append(whereParts, mysql.NewFilter("status", "=", req.Status.Value)) + } + if req.SearchKeyword != "" { + whereParts = append(whereParts, mysql.NewSearchQuery([]string{"name", "description"}, req.SearchKeyword)) + } + orders, err := s.newSegmentListOrders(req.OrderBy, req.OrderDirection) + if err != nil { + s.logger.Error( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, err + } + limit := int(req.PageSize) + cursor := req.Cursor + if cursor == "" { + cursor = "0" + } + offset, err := strconv.Atoi(cursor) + if err != nil { + return nil, localizedError(statusInvalidCursor, locale.JaJP) + } + var isInUseStatus *bool + if req.IsInUseStatus != nil { + isInUseStatus = &req.IsInUseStatus.Value + } + segmentStorage := v2fs.NewSegmentStorage(s.mysqlClient) + segments, nextCursor, totalCount, err := segmentStorage.ListSegments( + ctx, + whereParts, + orders, + limit, + offset, + isInUseStatus, + req.EnvironmentNamespace, + ) + if err != nil { + s.logger.Error( + "Failed to list segments", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &featureproto.ListSegmentsResponse{ + Segments: segments, + Cursor: strconv.Itoa(nextCursor), + TotalCount: totalCount, + }, nil +} + +func (s *FeatureService) newSegmentListOrders( + orderBy featureproto.ListSegmentsRequest_OrderBy, + orderDirection featureproto.ListSegmentsRequest_OrderDirection, +) ([]*mysql.Order, error) { + var column string + switch orderBy { + case featureproto.ListSegmentsRequest_DEFAULT, + featureproto.ListSegmentsRequest_NAME: + column = "name" + case featureproto.ListSegmentsRequest_CREATED_AT: + column = "created_at" + case featureproto.ListSegmentsRequest_UPDATED_AT: + column = "updated_at" + default: + return nil, localizedError(statusInvalidOrderBy, locale.JaJP) + } + direction := mysql.OrderDirectionAsc + if orderDirection == featureproto.ListSegmentsRequest_DESC { + direction = mysql.OrderDirectionDesc + } + return []*mysql.Order{mysql.NewOrder(column, direction)}, nil +} diff --git a/pkg/feature/api/segment_test.go b/pkg/feature/api/segment_test.go new file mode 100644 index 000000000..3bfe13d00 --- /dev/null +++ b/pkg/feature/api/segment_test.go @@ -0,0 +1,376 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/ptypes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + v2fs "github.com/bucketeer-io/bucketeer/pkg/feature/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + "github.com/bucketeer-io/bucketeer/pkg/token" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +func TestCreateSegmentMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + testcases := []struct { + setup func(*FeatureService) + role accountproto.Account_Role + cmd *featureproto.CreateSegmentCommand + environmentNamespace string + expected error + }{ + { + setup: nil, + role: accountproto.Account_OWNER, + cmd: nil, + environmentNamespace: "ns0", + expected: errMissingCommandJaJP, + }, + { + setup: nil, + role: accountproto.Account_OWNER, + cmd: &featureproto.CreateSegmentCommand{ + Name: "", + Description: "description", + }, + environmentNamespace: "ns0", + expected: errMissingNameJaJP, + }, + { + setup: func(s *FeatureService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + role: accountproto.Account_OWNER, + cmd: &featureproto.CreateSegmentCommand{ + Name: "name", + Description: "description", + }, + environmentNamespace: "ns0", + expected: nil, + }, + } + for _, tc := range testcases { + service := createFeatureService(mockController) + if tc.setup != nil { + tc.setup(service) + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ctx = setToken(ctx, tc.role) + req := &featureproto.CreateSegmentRequest{Command: tc.cmd, EnvironmentNamespace: tc.environmentNamespace} + _, err := service.CreateSegment(ctx, req) + assert.Equal(t, tc.expected, err) + } +} + +func TestDeleteSegmentMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + testcases := []struct { + setup func(*FeatureService) + role accountproto.Account_Role + id string + cmd *featureproto.DeleteSegmentCommand + environmentNamespace string + expected error + }{ + { + setup: nil, + role: accountproto.Account_OWNER, + id: "", + cmd: nil, + environmentNamespace: "ns0", + expected: errMissingIDJaJP, + }, + { + setup: nil, + role: accountproto.Account_OWNER, + id: "id", + cmd: nil, + environmentNamespace: "ns0", + expected: errMissingCommandJaJP, + }, + { + setup: func(s *FeatureService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2fs.ErrSegmentNotFound) + }, + role: accountproto.Account_OWNER, + id: "id", + cmd: &featureproto.DeleteSegmentCommand{}, + environmentNamespace: "ns0", + expected: errNotFoundJaJP, + }, + { + setup: func(s *FeatureService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + role: accountproto.Account_OWNER, + id: "id", + cmd: &featureproto.DeleteSegmentCommand{}, + environmentNamespace: "ns0", + expected: nil, + }, + } + for _, tc := range testcases { + service := createFeatureService(mockController) + if tc.setup != nil { + tc.setup(service) + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ctx = setToken(ctx, tc.role) + req := &featureproto.DeleteSegmentRequest{ + Id: tc.id, + Command: tc.cmd, + EnvironmentNamespace: tc.environmentNamespace, + } + _, err := service.DeleteSegment(ctx, req) + assert.Equal(t, tc.expected, err) + } +} + +func TestUpdateSegmentMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + changeSegmentNameCmd, err := ptypes.MarshalAny(&featureproto.ChangeSegmentNameCommand{Name: "name"}) + require.NoError(t, err) + testcases := []struct { + setup func(*FeatureService) + role accountproto.Account_Role + id string + cmds []*featureproto.Command + environmentNamespace string + expected error + }{ + { + setup: nil, + role: accountproto.Account_OWNER, + id: "", + cmds: nil, + environmentNamespace: "ns0", + expected: errMissingIDJaJP, + }, + { + setup: nil, + role: accountproto.Account_OWNER, + id: "id", + cmds: nil, + environmentNamespace: "ns0", + expected: errMissingCommandJaJP, + }, + { + setup: func(s *FeatureService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + role: accountproto.Account_OWNER, + id: "id", + cmds: []*featureproto.Command{ + {Command: changeSegmentNameCmd}, + }, + environmentNamespace: "ns0", + expected: nil, + }, + } + for _, tc := range testcases { + service := createFeatureService(mockController) + if tc.setup != nil { + tc.setup(service) + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ctx = setToken(ctx, tc.role) + req := &featureproto.UpdateSegmentRequest{ + Id: tc.id, + Commands: tc.cmds, + EnvironmentNamespace: tc.environmentNamespace, + } + _, err := service.UpdateSegment(ctx, req) + assert.Equal(t, tc.expected, err) + } +} + +func TestGetSegmentMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + testcases := []struct { + setup func(*FeatureService) + id string + environmentNamespace string + expected error + }{ + { + setup: nil, + id: "", + environmentNamespace: "ns0", + expected: errMissingIDJaJP, + }, + { + setup: func(s *FeatureService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id", + environmentNamespace: "ns0", + expected: errNotFoundJaJP, + }, + { + setup: func(s *FeatureService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id", + environmentNamespace: "ns0", + expected: nil, + }, + } + for _, tc := range testcases { + service := createFeatureService(mockController) + if tc.setup != nil { + tc.setup(service) + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ctx = setToken(ctx, accountproto.Account_UNASSIGNED) + req := &featureproto.GetSegmentRequest{Id: tc.id, EnvironmentNamespace: tc.environmentNamespace} + _, err := service.GetSegment(ctx, req) + assert.Equal(t, tc.expected, err) + } +} + +func TestListSegmentsMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + testcases := []struct { + setup func(*FeatureService) + pageSize int64 + environmentNamespace string + expected error + }{ + { + setup: nil, + pageSize: int64(maxPageSizePerRequest + 1), + environmentNamespace: "ns0", + expected: errExceededMaxPageSizePerRequestJaJP, + }, + { + setup: func(s *FeatureService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + pageSize: int64(maxPageSizePerRequest), + environmentNamespace: "ns0", + expected: nil, + }, + } + for _, tc := range testcases { + service := createFeatureService(mockController) + if tc.setup != nil { + tc.setup(service) + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ctx = setToken(ctx, accountproto.Account_UNASSIGNED) + req := &featureproto.ListSegmentsRequest{PageSize: tc.pageSize, EnvironmentNamespace: tc.environmentNamespace} + _, err := service.ListSegments(ctx, req) + assert.Equal(t, tc.expected, err) + } +} + +func setToken(ctx context.Context, role accountproto.Account_Role) context.Context { + t := &token.IDToken{ + Issuer: "issuer", + Subject: "sub", + Audience: "audience", + Expiry: time.Now().AddDate(100, 0, 0), + IssuedAt: time.Now(), + Email: "email", + AdminRole: role, + } + return context.WithValue(ctx, rpc.Key, t) +} diff --git a/pkg/feature/api/segment_user.go b/pkg/feature/api/segment_user.go new file mode 100644 index 000000000..ecee15d3b --- /dev/null +++ b/pkg/feature/api/segment_user.go @@ -0,0 +1,501 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "bytes" + "context" + "strconv" + "strings" + + "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/feature/command" + "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + v2fs "github.com/bucketeer-io/bucketeer/pkg/feature/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + serviceeventproto "github.com/bucketeer-io/bucketeer/proto/event/service" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +func (s *FeatureService) AddSegmentUser( + ctx context.Context, + req *featureproto.AddSegmentUserRequest, +) (*featureproto.AddSegmentUserResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateAddSegmentUserRequest(req); err != nil { + s.logger.Info( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + if err := validateAddSegmentUserCommand(req.Command); err != nil { + s.logger.Info( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + if err := s.updateSegmentUser( + ctx, + editor, + req.Id, + req.Command.UserIds, + req.Command.State, + false, + req.Command, + req.EnvironmentNamespace, + ); err != nil { + return nil, err + } + return &featureproto.AddSegmentUserResponse{}, nil +} + +func (s *FeatureService) DeleteSegmentUser( + ctx context.Context, + req *featureproto.DeleteSegmentUserRequest, +) (*featureproto.DeleteSegmentUserResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateDeleteSegmentUserRequest(req); err != nil { + s.logger.Info( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + if err := validateDeleteSegmentUserCommand(req.Command); err != nil { + s.logger.Info( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + if err := s.updateSegmentUser( + ctx, + editor, + req.Id, + req.Command.UserIds, + req.Command.State, + true, + req.Command, + req.EnvironmentNamespace, + ); err != nil { + return nil, err + } + return &featureproto.DeleteSegmentUserResponse{}, nil +} + +func (s *FeatureService) updateSegmentUser( + ctx context.Context, + editor *eventproto.Editor, + segmentID string, + userIDs []string, + state featureproto.SegmentUser_State, + deleted bool, + cmd command.Command, + environmentNamespace string, +) error { + segmentUsers := make([]*featureproto.SegmentUser, 0, len(userIDs)) + for _, userID := range userIDs { + userID = strings.TrimSpace(userID) + user := domain.NewSegmentUser(segmentID, userID, state, deleted) + segmentUsers = append(segmentUsers, user.SegmentUser) + } + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + segmentStorage := v2fs.NewSegmentStorage(tx) + segment, err := segmentStorage.GetSegment(ctx, segmentID, environmentNamespace) + if err != nil { + s.logger.Error( + "Failed to get segment", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return err + } + segmentUserStorage := v2fs.NewSegmentUserStorage(tx) + if err := segmentUserStorage.UpsertSegmentUsers(ctx, segmentUsers, environmentNamespace); err != nil { + s.logger.Error( + "Failed to store segment user", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return err + } + handler := command.NewSegmentCommandHandler( + editor, + segment, + s.domainPublisher, + environmentNamespace, + ) + if err := handler.Handle(ctx, cmd); err != nil { + s.logger.Error( + "Failed to handle command", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return err + } + if err := segmentStorage.UpdateSegment(ctx, segment, environmentNamespace); err != nil { + return err + } + return nil + }) + if err != nil { + if err == v2fs.ErrSegmentNotFound || err == v2fs.ErrSegmentUnexpectedAffectedRows { + return localizedError(statusSegmentNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to upsert segment user", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return localizedError(statusInternal, locale.JaJP) + } + return nil +} + +func (s *FeatureService) GetSegmentUser( + ctx context.Context, + req *featureproto.GetSegmentUserRequest, +) (*featureproto.GetSegmentUserResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateGetSegmentUserRequest(req); err != nil { + s.logger.Info( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + segmentUserStorage := v2fs.NewSegmentUserStorage(s.mysqlClient) + id := domain.SegmentUserID(req.SegmentId, req.UserId, req.State) + user, err := segmentUserStorage.GetSegmentUser(ctx, id, req.EnvironmentNamespace) + if err != nil { + if err == v2fs.ErrSegmentUserNotFound { + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to get segment user", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &featureproto.GetSegmentUserResponse{ + User: user.SegmentUser, + }, nil +} + +func (s *FeatureService) ListSegmentUsers( + ctx context.Context, + req *featureproto.ListSegmentUsersRequest, +) (*featureproto.ListSegmentUsersResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateListSegmentUsersRequest(req); err != nil { + s.logger.Info( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + whereParts := []mysql.WherePart{ + mysql.NewFilter("segment_id", "=", req.SegmentId), + mysql.NewFilter("deleted", "=", false), + mysql.NewFilter("environment_namespace", "=", req.EnvironmentNamespace), + } + if req.State != nil { + whereParts = append(whereParts, mysql.NewFilter("state", "=", req.State.GetValue())) + } + if req.UserId != "" { + whereParts = append(whereParts, mysql.NewFilter("user_id", "=", req.UserId)) + } + limit := int(req.PageSize) + cursor := req.Cursor + if cursor == "" { + cursor = "0" + } + offset, err := strconv.Atoi(cursor) + if err != nil { + return nil, localizedError(statusInvalidCursor, locale.JaJP) + } + segmentUserStorage := v2fs.NewSegmentUserStorage(s.mysqlClient) + users, nextCursor, err := segmentUserStorage.ListSegmentUsers( + ctx, + whereParts, + nil, + limit, + offset, + ) + if err != nil { + s.logger.Error( + "Failed to list segment users", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &featureproto.ListSegmentUsersResponse{ + Users: users, + Cursor: strconv.Itoa(nextCursor), + }, nil +} + +func (s *FeatureService) BulkUploadSegmentUsers( + ctx context.Context, + req *featureproto.BulkUploadSegmentUsersRequest, +) (*featureproto.BulkUploadSegmentUsersResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateBulkUploadSegmentUsersRequest(req); err != nil { + s.logger.Info( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + if err := validateBulkUploadSegmentUsersCommand(req.Command); err != nil { + s.logger.Info( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + segmentStorage := v2fs.NewSegmentStorage(tx) + segment, err := segmentStorage.GetSegment(ctx, req.SegmentId, req.EnvironmentNamespace) + if err != nil { + return err + } + if segment.IsInUseStatus { + return localizedError(statusSegmentInUse, locale.JaJP) + } + if segment.Status == featureproto.Segment_UPLOADING { + return localizedError(statusSegmentUsersAlreadyUploading, locale.JaJP) + } + handler := command.NewSegmentCommandHandler( + editor, + segment, + s.domainPublisher, + req.EnvironmentNamespace, + ) + if err := handler.Handle(ctx, req.Command); err != nil { + s.logger.Error( + "Failed to handle command", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return err + } + if err := segmentStorage.UpdateSegment(ctx, segment, req.EnvironmentNamespace); err != nil { + return err + } + return s.publishBulkSegmentUsersReceivedEvent( + ctx, + editor, + req.EnvironmentNamespace, + req.SegmentId, + req.Command.Data, + req.Command.State, + ) + }) + if err != nil { + if err == v2fs.ErrSegmentNotFound || err == v2fs.ErrFeatureUnexpectedAffectedRows { + return nil, localizedError(statusSegmentNotFound, locale.JaJP) + } + if status.Code(err) == codes.FailedPrecondition { + return nil, err + } + s.logger.Error( + "Failed to bulk upload segment users", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &featureproto.BulkUploadSegmentUsersResponse{}, nil +} + +func (s *FeatureService) publishBulkSegmentUsersReceivedEvent( + ctx context.Context, + editor *eventproto.Editor, + environmentNamespace string, + segmentID string, + data []byte, + state featureproto.SegmentUser_State, +) error { + id, err := uuid.NewUUID() + if err != nil { + return err + } + e := &serviceeventproto.BulkSegmentUsersReceivedEvent{ + Id: id.String(), + EnvironmentNamespace: environmentNamespace, + SegmentId: segmentID, + Data: data, + State: state, + Editor: editor, + } + return s.segmentUsersPublisher.Publish(ctx, e) +} + +func (s *FeatureService) BulkDownloadSegmentUsers( + ctx context.Context, + req *featureproto.BulkDownloadSegmentUsersRequest, +) (*featureproto.BulkDownloadSegmentUsersResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateBulkDownloadSegmentUsersRequest(req); err != nil { + s.logger.Info( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + segmentStorage := v2fs.NewSegmentStorage(s.mysqlClient) + segment, err := segmentStorage.GetSegment(ctx, req.SegmentId, req.EnvironmentNamespace) + if err != nil { + if err == v2fs.ErrSegmentNotFound { + return nil, localizedError(statusSegmentNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to get segment", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + if segment.Status != featureproto.Segment_SUCEEDED { + return nil, localizedError(statusSegmentStatusNotSuceeded, locale.JaJP) + } + whereParts := []mysql.WherePart{ + mysql.NewFilter("segment_id", "=", req.SegmentId), + mysql.NewFilter("state", "=", int32(req.State)), + mysql.NewFilter("deleted", "=", false), + mysql.NewFilter("environment_namespace", "=", req.EnvironmentNamespace), + } + segmentUserStorage := v2fs.NewSegmentUserStorage(s.mysqlClient) + users, _, err := segmentUserStorage.ListSegmentUsers( + ctx, + whereParts, + nil, + mysql.QueryNoLimit, + mysql.QueryNoOffset, + ) + if err != nil { + s.logger.Error( + "Failed to list segment users", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + var buf bytes.Buffer + for _, user := range users { + buf.WriteString(user.UserId + "\n") + } + return &featureproto.BulkDownloadSegmentUsersResponse{ + Data: buf.Bytes(), + }, nil +} diff --git a/pkg/feature/api/segment_user_test.go b/pkg/feature/api/segment_user_test.go new file mode 100644 index 000000000..ef9116d2d --- /dev/null +++ b/pkg/feature/api/segment_user_test.go @@ -0,0 +1,230 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "strings" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + v2fs "github.com/bucketeer-io/bucketeer/pkg/feature/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +func TestBulkUploadSegmentUsersMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + testcases := map[string]struct { + setup func(*FeatureService) + environmentNamespace string + role accountproto.Account_Role + segmentID string + cmd *featureproto.BulkUploadSegmentUsersCommand + expectedErr error + }{ + "ErrMissingSegmentID": { + setup: nil, + environmentNamespace: "ns0", + role: accountproto.Account_OWNER, + segmentID: "", + cmd: nil, + expectedErr: errMissingSegmentIDJaJP, + }, + "ErrMissingCommand": { + setup: nil, + environmentNamespace: "ns0", + role: accountproto.Account_OWNER, + segmentID: "id", + cmd: nil, + expectedErr: errMissingCommandJaJP, + }, + "ErrMissingSegmentUsersData": { + setup: nil, + environmentNamespace: "ns0", + role: accountproto.Account_OWNER, + segmentID: "id", + cmd: &featureproto.BulkUploadSegmentUsersCommand{}, + expectedErr: errMissingSegmentUsersDataJaJP, + }, + "ErrExceededMaxSegmentUsersDataSize": { + setup: nil, + environmentNamespace: "ns0", + role: accountproto.Account_OWNER, + segmentID: "id", + cmd: &featureproto.BulkUploadSegmentUsersCommand{ + Data: []byte(strings.Repeat("a", maxSegmentUsersDataSize+1)), + }, + expectedErr: errExceededMaxSegmentUsersDataSizeJaJP, + }, + "ErrUnknownSegmentUserState": { + setup: nil, + environmentNamespace: "ns0", + role: accountproto.Account_OWNER, + segmentID: "id", + cmd: &featureproto.BulkUploadSegmentUsersCommand{ + Data: []byte("data"), + State: featureproto.SegmentUser_State(99), + }, + expectedErr: errUnknownSegmentUserStateJaJP, + }, + "ErrSegmentNotFound": { + setup: func(s *FeatureService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2fs.ErrSegmentNotFound) + }, + environmentNamespace: "ns0", + role: accountproto.Account_OWNER, + segmentID: "not_found_id", + cmd: &featureproto.BulkUploadSegmentUsersCommand{ + Data: []byte("data"), + State: featureproto.SegmentUser_INCLUDED, + }, + expectedErr: errSegmentNotFoundJaJP, + }, + "ErrSegmentUsersAlreadyUploading": { + setup: func(s *FeatureService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(localizedError(statusSegmentUsersAlreadyUploading, locale.JaJP)) + }, + environmentNamespace: "ns0", + role: accountproto.Account_OWNER, + segmentID: "id", + cmd: &featureproto.BulkUploadSegmentUsersCommand{ + Data: []byte("data"), + State: featureproto.SegmentUser_INCLUDED, + }, + expectedErr: errSegmentUsersAlreadyUploadingJaJP, + }, + "Success": { + setup: func(s *FeatureService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + environmentNamespace: "ns0", + role: accountproto.Account_OWNER, + segmentID: "id", + cmd: &featureproto.BulkUploadSegmentUsersCommand{ + Data: []byte("data"), + State: featureproto.SegmentUser_INCLUDED, + }, + expectedErr: nil, + }, + } + + for msg, tc := range testcases { + t.Run(msg, func(t *testing.T) { + service := createFeatureService(mockController) + if tc.setup != nil { + tc.setup(service) + } + ctx := setToken(context.Background(), tc.role) + req := &featureproto.BulkUploadSegmentUsersRequest{ + EnvironmentNamespace: tc.environmentNamespace, + SegmentId: tc.segmentID, + Command: tc.cmd, + } + _, err := service.BulkUploadSegmentUsers(ctx, req) + assert.Equal(t, tc.expectedErr, err) + }) + } +} + +func TestBulkDownloadSegmentUsersMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + testcases := map[string]struct { + setup func(*FeatureService) + environmentNamespace string + segmentID string + state featureproto.SegmentUser_State + expectedErr error + }{ + "ErrMissingSegmentID": { + setup: nil, + environmentNamespace: "ns0", + segmentID: "", + state: featureproto.SegmentUser_INCLUDED, + expectedErr: errMissingSegmentIDJaJP, + }, + "ErrUnknownSegmentUserState": { + setup: nil, + environmentNamespace: "ns0", + segmentID: "id", + state: featureproto.SegmentUser_State(99), + expectedErr: errUnknownSegmentUserStateJaJP, + }, + "ErrSegmentNotFound": { + setup: func(s *FeatureService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + environmentNamespace: "ns0", + segmentID: "id", + state: featureproto.SegmentUser_INCLUDED, + expectedErr: errSegmentNotFoundJaJP, + }, + "ErrSegmentStatusNotSuceeded": { + setup: func(s *FeatureService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + environmentNamespace: "ns0", + segmentID: "id", + state: featureproto.SegmentUser_INCLUDED, + expectedErr: errSegmentStatusNotSuceededJaJP, + }, + } + for msg, tc := range testcases { + t.Run(msg, func(t *testing.T) { + service := createFeatureService(mockController) + if tc.setup != nil { + tc.setup(service) + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ctx = setToken(ctx, accountproto.Account_UNASSIGNED) + req := &featureproto.BulkDownloadSegmentUsersRequest{ + EnvironmentNamespace: tc.environmentNamespace, + SegmentId: tc.segmentID, + State: tc.state, + } + _, err := service.BulkDownloadSegmentUsers(ctx, req) + assert.Equal(t, tc.expectedErr, err) + }) + } +} diff --git a/pkg/feature/api/tag.go b/pkg/feature/api/tag.go new file mode 100644 index 000000000..ec35ec3f4 --- /dev/null +++ b/pkg/feature/api/tag.go @@ -0,0 +1,156 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "strconv" + + "go.uber.org/zap" + "google.golang.org/genproto/googleapis/rpc/errdetails" + + "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + v2fs "github.com/bucketeer-io/bucketeer/pkg/feature/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +func (s *FeatureService) ListTags( + ctx context.Context, + req *featureproto.ListTagsRequest, +) (*featureproto.ListTagsResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + localizer := locale.NewLocalizer(locale.NewLocale(locale.JaJP)) + whereParts := []mysql.WherePart{ + mysql.NewFilter("environment_namespace", "=", req.EnvironmentNamespace), + } + if req.SearchKeyword != "" { + whereParts = append(whereParts, mysql.NewSearchQuery([]string{"id"}, req.SearchKeyword)) + } + orders, err := s.newListTagsOrdersMySQL(req.OrderBy, req.OrderDirection, localizer) + if err != nil { + s.logger.Error( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + limit := int(req.PageSize) + cursor := req.Cursor + if cursor == "" { + cursor = "0" + } + offset, err := strconv.Atoi(cursor) + if err != nil { + dt, err := statusInvalidCursor.WithDetails(&errdetails.LocalizedMessage{ + Locale: localizer.GetLocale(), + Message: localizer.MustLocalizeWithTemplate(locale.InvalidArgumentError, "cursor"), + }) + if err != nil { + return nil, statusInternal.Err() + } + return nil, dt.Err() + } + tagStorage := v2fs.NewTagStorage(s.mysqlClient) + tags, nextCursor, totalCount, err := tagStorage.ListTags( + ctx, + whereParts, + orders, + limit, + offset, + ) + if err != nil { + s.logger.Error( + "Failed to list tags", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, s.reportInternalServerError(ctx, err, req.EnvironmentNamespace, localizer) + } + return &featureproto.ListTagsResponse{ + Tags: tags, + Cursor: strconv.Itoa(nextCursor), + TotalCount: totalCount, + }, nil +} + +func (s *FeatureService) newListTagsOrdersMySQL( + orderBy featureproto.ListTagsRequest_OrderBy, + orderDirection featureproto.ListTagsRequest_OrderDirection, + localizer locale.Localizer, +) ([]*mysql.Order, error) { + var column string + switch orderBy { + case featureproto.ListTagsRequest_DEFAULT, + featureproto.ListTagsRequest_ID: + column = "tag.id" + case featureproto.ListTagsRequest_CREATED_AT: + column = "tag.created_at" + case featureproto.ListTagsRequest_UPDATED_AT: + column = "tag.updated_at" + default: + dt, err := statusInvalidOrderBy.WithDetails(&errdetails.LocalizedMessage{ + Locale: localizer.GetLocale(), + Message: localizer.MustLocalizeWithTemplate(locale.InvalidArgumentError, "order_by"), + }) + if err != nil { + return nil, statusInternal.Err() + } + return nil, dt.Err() + } + direction := mysql.OrderDirectionAsc + if orderDirection == featureproto.ListTagsRequest_DESC { + direction = mysql.OrderDirectionDesc + } + return []*mysql.Order{mysql.NewOrder(column, direction)}, nil +} + +func (s *FeatureService) upsertTags( + ctx context.Context, + tx mysql.Transaction, + tags []string, + environmentNamespace string, +) error { + tagStorage := v2fs.NewTagStorage(tx) + for _, tag := range tags { + if tag == "" { + continue + } + t := domain.NewTag(tag) + if err := tagStorage.UpsertTag(ctx, t, environmentNamespace); err != nil { + s.logger.Error( + "Failed to store tag", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("tagID", tag), + )..., + ) + return err + } + } + return nil +} diff --git a/pkg/feature/api/tag_test.go b/pkg/feature/api/tag_test.go new file mode 100644 index 000000000..087563154 --- /dev/null +++ b/pkg/feature/api/tag_test.go @@ -0,0 +1,144 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "errors" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/locale" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +func TestListTagsMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + ctx := createContextWithToken() + service := createFeatureServiceNew(mockController) + localizer := locale.NewLocalizer(locale.NewLocale(locale.JaJP)) + createError := func(msg string, status *status.Status) error { + status, err := status.WithDetails(&errdetails.LocalizedMessage{ + Locale: localizer.GetLocale(), + Message: msg, + }) + require.NoError(t, err) + return status.Err() + } + + patterns := map[string]struct { + setup func(*FeatureService) + input *featureproto.ListTagsRequest + expected *featureproto.ListTagsResponse + expectedErr error + }{ + "errInvalidCursor": { + setup: nil, + input: &featureproto.ListTagsRequest{EnvironmentNamespace: environmentNamespace, Cursor: "foo"}, + expected: nil, + expectedErr: createError(localizer.MustLocalizeWithTemplate(locale.InvalidArgumentError, "cursor"), statusInvalidCursor), + }, + "errInternal": { + setup: func(fs *FeatureService) { + fs.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("test")) + }, + input: &featureproto.ListTagsRequest{EnvironmentNamespace: environmentNamespace}, + expected: nil, + expectedErr: createError(localizer.MustLocalize(locale.InternalServerError), statusInternal), + }, + "success": { + setup: func(fs *FeatureService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + fs.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + fs.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + input: &featureproto.ListTagsRequest{ + PageSize: 2, + Cursor: "", + EnvironmentNamespace: environmentNamespace, + }, + expected: &featureproto.ListTagsResponse{Tags: []*featureproto.Tag{}, Cursor: "0"}, + expectedErr: nil, + }, + } + + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + if p.setup != nil { + p.setup(service) + } + actual, err := service.ListTags(ctx, p.input) + assert.Equal(t, p.expected, actual, msg) + assert.Equal(t, p.expectedErr, err, msg) + }) + } +} + +func TestUpsertTags(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + service := createFeatureServiceNew(mockController) + ctx := createContextWithToken() + transaction := mysqlmock.NewMockTransaction(mockController) + internalErr := errors.New("test") + patterns := map[string]struct { + setup func(*mysqlmock.MockTransaction) + expectedErr error + }{ + "error: internal error when creating tag": { + setup: func(mt *mysqlmock.MockTransaction) { + mt.EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, internalErr) + }, + expectedErr: internalErr, + }, + "success: create new tag": { + setup: func(mt *mysqlmock.MockTransaction) { + mt.EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, nil) + }, + expectedErr: nil, + }, + } + + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + p.setup(transaction) + err := service.upsertTags(ctx, transaction, []string{"tag"}, environmentNamespace) + assert.Equal(t, p.expectedErr, err, msg) + }) + } +} diff --git a/pkg/feature/api/user_evaluations.go b/pkg/feature/api/user_evaluations.go new file mode 100644 index 000000000..4576eee0f --- /dev/null +++ b/pkg/feature/api/user_evaluations.go @@ -0,0 +1,91 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +func (s *FeatureService) GetUserEvaluations( + ctx context.Context, + req *featureproto.GetUserEvaluationsRequest, +) (*featureproto.GetUserEvaluationsResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateGetUserEvaluationsRequest(req); err != nil { + return nil, err + } + evaluations, err := s.userEvaluationStorage.GetUserEvaluations( + ctx, + req.UserId, + req.EnvironmentNamespace, + req.Tag, + ) + if err != nil { + s.logger.Error( + "Failed to get user evaluations", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + zap.String("userId", req.UserId), + zap.String("tag", req.Tag), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &featureproto.GetUserEvaluationsResponse{ + Evaluations: evaluations, + }, nil +} + +func (s *FeatureService) UpsertUserEvaluation( + ctx context.Context, + req *featureproto.UpsertUserEvaluationRequest, +) (*featureproto.UpsertUserEvaluationResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateUpsertUserEvaluationRequest(req); err != nil { + return nil, err + } + if err := s.userEvaluationStorage.UpsertUserEvaluation( + ctx, + req.Evaluation, + req.EnvironmentNamespace, + req.Tag, + ); err != nil { + s.logger.Error( + "Failed to upsert user evaluation", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + zap.String("tag", req.Tag), + zap.Any("evaluation", req.Evaluation), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &featureproto.UpsertUserEvaluationResponse{}, nil +} diff --git a/pkg/feature/api/user_evaluations_test.go b/pkg/feature/api/user_evaluations_test.go new file mode 100644 index 000000000..d7a73bd95 --- /dev/null +++ b/pkg/feature/api/user_evaluations_test.go @@ -0,0 +1,228 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + ftstorage "github.com/bucketeer-io/bucketeer/pkg/feature/storage" + ftmock "github.com/bucketeer-io/bucketeer/pkg/feature/storage/mock" + "github.com/bucketeer-io/bucketeer/pkg/locale" + bigtable "github.com/bucketeer-io/bucketeer/pkg/storage/v2/bigtable" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +func TestGetUserEvaluations(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := []struct { + desc string + setup func(context.Context, ftstorage.UserEvaluationsStorage) + role accountproto.Account_Role + req *featureproto.GetUserEvaluationsRequest + expected *featureproto.GetUserEvaluationsResponse + expectedErr error + }{ + { + desc: "ErrMissingFeatureTag", + setup: nil, + role: accountproto.Account_EDITOR, + req: &featureproto.GetUserEvaluationsRequest{ + EnvironmentNamespace: environmentNamespace, + Tag: "", + UserId: userID, + }, + expected: nil, + expectedErr: localizedError(statusMissingFeatureTag, locale.JaJP), + }, + { + desc: "ErrMissingUserID", + setup: nil, + role: accountproto.Account_EDITOR, + req: &featureproto.GetUserEvaluationsRequest{ + EnvironmentNamespace: environmentNamespace, + Tag: tag, + UserId: "", + }, + expected: nil, + expectedErr: localizedError(statusMissingUserID, locale.JaJP), + }, + { + desc: "ErrInternal", + setup: func(ctx context.Context, s ftstorage.UserEvaluationsStorage) { + s.(*ftmock.MockUserEvaluationsStorage).EXPECT().GetUserEvaluations( + ctx, + userID, + environmentNamespace, + tag, + ).Return(nil, bigtable.ErrInternal).Times(1) + }, + role: accountproto.Account_EDITOR, + req: &featureproto.GetUserEvaluationsRequest{ + EnvironmentNamespace: environmentNamespace, + Tag: tag, + UserId: userID, + }, + expected: nil, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + { + desc: "Success", + setup: func(ctx context.Context, s ftstorage.UserEvaluationsStorage) { + s.(*ftmock.MockUserEvaluationsStorage).EXPECT().GetUserEvaluations( + ctx, + userID, + environmentNamespace, + tag, + ).Return([]*featureproto.Evaluation{}, nil).Times(1) + }, + role: accountproto.Account_EDITOR, + req: &featureproto.GetUserEvaluationsRequest{ + EnvironmentNamespace: environmentNamespace, + Tag: tag, + UserId: userID, + }, + expected: &featureproto.GetUserEvaluationsResponse{ + Evaluations: []*featureproto.Evaluation{}, + }, + expectedErr: nil, + }, + } + for _, p := range patterns { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + service := createFeatureServiceNew(mockController) + ctx = setToken(ctx, p.role) + if p.setup != nil { + p.setup(ctx, service.userEvaluationStorage) + } + resp, err := service.GetUserEvaluations( + ctx, + p.req, + ) + assert.Equal(t, p.expected, resp, p.desc) + assert.Equal(t, p.expectedErr, err, p.desc) + } +} + +func TestUpsertUserEvaluation(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := []struct { + desc string + setup func(context.Context, ftstorage.UserEvaluationsStorage) + role accountproto.Account_Role + req *featureproto.UpsertUserEvaluationRequest + expected *featureproto.UpsertUserEvaluationResponse + expectedErr error + }{ + { + desc: "ErrPermissionDenied", + setup: nil, + role: accountproto.Account_UNASSIGNED, + req: &featureproto.UpsertUserEvaluationRequest{ + EnvironmentNamespace: environmentNamespace, + Evaluation: evaluation, + Tag: tag, + }, + expected: nil, + expectedErr: localizedError(statusPermissionDenied, locale.JaJP), + }, + { + desc: "ErrMissingFeatureTag", + setup: nil, + role: accountproto.Account_EDITOR, + req: &featureproto.UpsertUserEvaluationRequest{ + EnvironmentNamespace: environmentNamespace, + Tag: "", + Evaluation: evaluation, + }, + expected: nil, + expectedErr: localizedError(statusMissingFeatureTag, locale.JaJP), + }, + { + desc: "ErrMissingEvaluation", + setup: nil, + role: accountproto.Account_EDITOR, + req: &featureproto.UpsertUserEvaluationRequest{ + EnvironmentNamespace: environmentNamespace, + Tag: tag, + Evaluation: nil, + }, + expected: nil, + expectedErr: localizedError(statusMissingEvaluation, locale.JaJP), + }, + { + desc: "ErrInternal", + setup: func(ctx context.Context, s ftstorage.UserEvaluationsStorage) { + s.(*ftmock.MockUserEvaluationsStorage).EXPECT().UpsertUserEvaluation( + ctx, + evaluation, + environmentNamespace, + tag, + ).Return(bigtable.ErrInternal).Times(1) + }, + role: accountproto.Account_EDITOR, + req: &featureproto.UpsertUserEvaluationRequest{ + EnvironmentNamespace: environmentNamespace, + Evaluation: evaluation, + Tag: tag, + }, + expected: nil, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + { + desc: "Success", + setup: func(ctx context.Context, s ftstorage.UserEvaluationsStorage) { + s.(*ftmock.MockUserEvaluationsStorage).EXPECT().UpsertUserEvaluation( + ctx, + evaluation, + environmentNamespace, + tag, + ).Return(nil).Times(1) + }, + role: accountproto.Account_EDITOR, + req: &featureproto.UpsertUserEvaluationRequest{ + EnvironmentNamespace: environmentNamespace, + Evaluation: evaluation, + Tag: tag, + }, + expected: &featureproto.UpsertUserEvaluationResponse{}, + expectedErr: nil, + }, + } + for _, p := range patterns { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + service := createFeatureServiceNew(mockController) + ctx = setToken(ctx, p.role) + if p.setup != nil { + p.setup(ctx, service.userEvaluationStorage) + } + resp, err := service.UpsertUserEvaluation( + ctx, + p.req, + ) + assert.Equal(t, p.expected, resp, p.desc) + assert.Equal(t, p.expectedErr, err, p.desc) + } +} diff --git a/pkg/feature/api/validation.go b/pkg/feature/api/validation.go new file mode 100644 index 000000000..0490c134c --- /dev/null +++ b/pkg/feature/api/validation.go @@ -0,0 +1,659 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "regexp" + + "github.com/bucketeer-io/bucketeer/pkg/feature/command" + "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +const ( + maxPageSizePerRequest = 100 + maxUserIDsLength = 100000 + maxSegmentUsersDataSize = 2000000 // 2MB + totalVariationWeight = int32(100000) +) + +var featureIDRegex = regexp.MustCompile("^[a-zA-Z0-9-]+$") + +func validateCreateFeatureRequest(cmd *featureproto.CreateFeatureCommand) error { + if cmd == nil { + return localizedError(statusMissingCommand, locale.JaJP) + } + if cmd.Id == "" { + return localizedError(statusMissingID, locale.JaJP) + } + if !featureIDRegex.MatchString(cmd.Id) { + return localizedError(statusInvalidID, locale.JaJP) + } + if cmd.Name == "" { + return localizedError(statusMissingName, locale.JaJP) + } + variationSize := len(cmd.Variations) + if variationSize < 2 { + return localizedError(statusMissingFeatureVariations, locale.JaJP) + } + if len(cmd.Tags) == 0 { + return localizedError(statusMissingFeatureTags, locale.JaJP) + } + if cmd.DefaultOnVariationIndex == nil { + return localizedError(statusMissingDefaultOnVariation, locale.JaJP) + } + if int(cmd.DefaultOnVariationIndex.Value) >= variationSize { + return localizedError(statusInvalidDefaultOnVariation, locale.JaJP) + } + if cmd.DefaultOffVariationIndex == nil { + return localizedError(statusMissingDefaultOffVariation, locale.JaJP) + } + if int(cmd.DefaultOffVariationIndex.Value) >= variationSize { + return localizedError(statusInvalidDefaultOffVariation, locale.JaJP) + } + return nil +} + +func validateCreateSegmentRequest(cmd *featureproto.CreateSegmentCommand) error { + if cmd == nil { + return localizedError(statusMissingCommand, locale.JaJP) + } + if cmd.Name == "" { + return localizedError(statusMissingName, locale.JaJP) + } + return nil +} + +func validateGetSegmentRequest(req *featureproto.GetSegmentRequest) error { + if req.Id == "" { + return localizedError(statusMissingID, locale.JaJP) + } + return nil +} + +func validateListSegmentsRequest(req *featureproto.ListSegmentsRequest) error { + if req.PageSize > maxPageSizePerRequest { + return localizedError(statusExceededMaxPageSizePerRequest, locale.JaJP) + } + return nil +} + +func validateDeleteSegmentRequest(req *featureproto.DeleteSegmentRequest) error { + if req.Id == "" { + return localizedError(statusMissingID, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusMissingCommand, locale.JaJP) + } + return nil +} + +func validateUpdateSegment(segmentID string, commands []command.Command) error { + if segmentID == "" { + return localizedError(statusMissingID, locale.JaJP) + } + return validateUpdateSegmentCommands(commands) +} + +func validateUpdateSegmentCommands(commands []command.Command) error { + for _, cmd := range commands { + switch c := cmd.(type) { + case *featureproto.ChangeSegmentNameCommand: + return validateChangeSegmentName(c) + case *featureproto.ChangeSegmentDescriptionCommand: + return nil + case *featureproto.AddRuleCommand: + return validateAddSegmentRule(c) + case *featureproto.DeleteRuleCommand: + return validateDeleteSegmentRule(c) + case *featureproto.AddClauseCommand: + return validateAddSegmentClauseCommand(c) + case *featureproto.DeleteClauseCommand: + return validateDeleteSegmentClauseCommand(c) + case *featureproto.ChangeClauseAttributeCommand: + return validateChangeClauseAttributeCommand(c) + case *featureproto.ChangeClauseOperatorCommand: + return validateChangeClauseOperatorCommand(c) + case *featureproto.AddClauseValueCommand: + return validateAddClauseValueCommand(c) + case *featureproto.RemoveClauseValueCommand: + return validateRemoveClauseValueCommand(c) + default: + return localizedError(statusUnknownCommand, locale.JaJP) + } + } + return localizedError(statusMissingCommand, locale.JaJP) +} + +func validateChangeSegmentName(cmd *featureproto.ChangeSegmentNameCommand) error { + if cmd.Name == "" { + return localizedError(statusMissingName, locale.JaJP) + } + return nil +} + +func validateAddSegmentRule(cmd *featureproto.AddRuleCommand) error { + if cmd.Rule == nil { + return localizedError(statusMissingRule, locale.JaJP) + } + if cmd.Rule.Id == "" { + return localizedError(statusMissingRuleID, locale.JaJP) + } + if err := uuid.ValidateUUID(cmd.Rule.Id); err != nil { + return localizedError(statusIncorrectUUIDFormat, locale.JaJP) + } + if len(cmd.Rule.Clauses) == 0 { + return localizedError(statusMissingRuleClause, locale.JaJP) + } + return validateClauses(cmd.Rule.Clauses) +} + +func validateClauses(clauses []*featureproto.Clause) error { + for _, clause := range clauses { + if clause.Attribute == "" { + return localizedError(statusMissingClauseAttribute, locale.JaJP) + } + if len(clause.Values) == 0 { + return localizedError(statusMissingClauseValues, locale.JaJP) + } + } + return nil +} + +func validateDeleteSegmentRule(cmd *featureproto.DeleteRuleCommand) error { + if cmd == nil { + return localizedError(statusMissingCommand, locale.JaJP) + } + if cmd.Id == "" { + return localizedError(statusMissingRuleID, locale.JaJP) + } + return nil +} + +func validateAddSegmentClauseCommand(cmd *featureproto.AddClauseCommand) error { + if cmd.RuleId == "" { + return localizedError(statusMissingRuleID, locale.JaJP) + } + if cmd.Clause == nil { + return localizedError(statusMissingRuleClause, locale.JaJP) + } + return validateClauses([]*featureproto.Clause{cmd.Clause}) +} + +func validateDeleteSegmentClauseCommand(cmd *featureproto.DeleteClauseCommand) error { + if cmd.Id == "" { + return localizedError(statusMissingClauseID, locale.JaJP) + } + if cmd.RuleId == "" { + return localizedError(statusMissingRuleID, locale.JaJP) + } + return nil +} + +func validateChangeClauseAttributeCommand(cmd *featureproto.ChangeClauseAttributeCommand) error { + if cmd.Id == "" { + return localizedError(statusMissingClauseID, locale.JaJP) + } + if cmd.RuleId == "" { + return localizedError(statusMissingRuleID, locale.JaJP) + } + if cmd.Attribute == "" { + return localizedError(statusMissingClauseAttribute, locale.JaJP) + } + return nil +} + +func validateChangeClauseOperatorCommand(cmd *featureproto.ChangeClauseOperatorCommand) error { + if cmd.Id == "" { + return localizedError(statusMissingClauseID, locale.JaJP) + } + if cmd.RuleId == "" { + return localizedError(statusMissingRuleID, locale.JaJP) + } + return nil +} + +func validateAddClauseValueCommand(cmd *featureproto.AddClauseValueCommand) error { + return validateClauseValueCommand(cmd.Id, cmd.RuleId, cmd.Value) +} + +func validateRemoveClauseValueCommand(cmd *featureproto.RemoveClauseValueCommand) error { + return validateClauseValueCommand(cmd.Id, cmd.RuleId, cmd.Value) +} + +func validateClauseValueCommand(clauseID string, ruleID string, value string) error { + if clauseID == "" { + return localizedError(statusMissingClauseID, locale.JaJP) + } + if ruleID == "" { + return localizedError(statusMissingRuleID, locale.JaJP) + } + if value == "" { + return localizedError(statusMissingClauseValue, locale.JaJP) + } + return nil +} + +func validateAddSegmentUserRequest(req *featureproto.AddSegmentUserRequest) error { + if req.Id == "" { + return localizedError(statusMissingID, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusMissingCommand, locale.JaJP) + } + return validateSegmentUserState(req.Command.State) +} + +func validateAddSegmentUserCommand(cmd *featureproto.AddSegmentUserCommand) error { + return validateUserIDs(cmd.UserIds) +} + +func validateDeleteSegmentUserRequest(req *featureproto.DeleteSegmentUserRequest) error { + if req.Id == "" { + return localizedError(statusMissingID, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusMissingCommand, locale.JaJP) + } + return validateSegmentUserState(req.Command.State) +} + +func validateSegmentUserState(state featureproto.SegmentUser_State) error { + switch state { + case featureproto.SegmentUser_INCLUDED: + return nil + default: + return localizedError(statusUnknownSegmentUserState, locale.JaJP) + } +} + +func validateDeleteSegmentUserCommand(cmd *featureproto.DeleteSegmentUserCommand) error { + return validateUserIDs(cmd.UserIds) +} + +func validateUserIDs(userIDs []string) error { + size := len(userIDs) + if size == 0 { + return localizedError(statusMissingUserIDs, locale.JaJP) + } + if size > maxUserIDsLength { + return localizedError(statusExceededMaxUserIDsLength, locale.JaJP) + } + for _, id := range userIDs { + if id == "" { + return localizedError(statusMissingUserID, locale.JaJP) + } + } + return nil +} + +func validateGetSegmentUserRequest(req *featureproto.GetSegmentUserRequest) error { + if req.SegmentId == "" { + return localizedError(statusMissingSegmentID, locale.JaJP) + } + if req.UserId == "" { + return localizedError(statusMissingUserID, locale.JaJP) + } + return nil +} + +func validateListSegmentUsersRequest(req *featureproto.ListSegmentUsersRequest) error { + if req.SegmentId == "" { + return localizedError(statusMissingSegmentID, locale.JaJP) + } + if req.PageSize > maxPageSizePerRequest { + return localizedError(statusExceededMaxPageSizePerRequest, locale.JaJP) + } + return nil +} + +func validateBulkUploadSegmentUsersRequest(req *featureproto.BulkUploadSegmentUsersRequest) error { + if req.SegmentId == "" { + return localizedError(statusMissingSegmentID, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusMissingCommand, locale.JaJP) + } + return nil +} + +func validateBulkUploadSegmentUsersCommand(cmd *featureproto.BulkUploadSegmentUsersCommand) error { + if len(cmd.Data) == 0 { + return localizedError(statusMissingSegmentUsersData, locale.JaJP) + } + if len(cmd.Data) > maxSegmentUsersDataSize { + return localizedError(statusExceededMaxSegmentUsersDataSize, locale.JaJP) + } + return validateSegmentUserState(cmd.State) +} + +func validateBulkDownloadSegmentUsersRequest(req *featureproto.BulkDownloadSegmentUsersRequest) error { + if req.SegmentId == "" { + return localizedError(statusMissingSegmentID, locale.JaJP) + } + return validateSegmentUserState(req.State) +} + +func validateEvaluateFeatures(req *featureproto.EvaluateFeaturesRequest) error { + if req.User == nil { + return localizedError(statusMissingUser, locale.JaJP) + } + if req.User.Id == "" { + return localizedError(statusMissingUserID, locale.JaJP) + } + if req.Tag == "" { + return localizedError(statusMissingFeatureTag, locale.JaJP) + } + return nil +} + +func validateUpsertUserEvaluationRequest(req *featureproto.UpsertUserEvaluationRequest) error { + if req.Tag == "" { + return localizedError(statusMissingFeatureTag, locale.JaJP) + } + if req.Evaluation == nil { + return localizedError(statusMissingEvaluation, locale.JaJP) + } + return nil +} + +func validateGetUserEvaluationsRequest(req *featureproto.GetUserEvaluationsRequest) error { + if req.Tag == "" { + return localizedError(statusMissingFeatureTag, locale.JaJP) + } + if req.UserId == "" { + return localizedError(statusMissingUserID, locale.JaJP) + } + return nil +} + +func validateGetFeatureRequest(req *featureproto.GetFeatureRequest) error { + if req.Id == "" { + return localizedError(statusMissingID, locale.JaJP) + } + return nil +} + +func validateGetFeaturesRequest(req *featureproto.GetFeaturesRequest) error { + if len(req.Ids) == 0 { + return localizedError(statusMissingIDs, locale.JaJP) + } + for _, id := range req.Ids { + if id == "" { + return localizedError(statusMissingIDs, locale.JaJP) + } + } + return nil +} + +func validateEnableFeatureRequest(req *featureproto.EnableFeatureRequest) error { + if req.Id == "" { + return localizedError(statusMissingID, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusMissingCommand, locale.JaJP) + } + return nil +} + +func validateDisableFeatureRequest(req *featureproto.DisableFeatureRequest) error { + if req.Id == "" { + return localizedError(statusMissingID, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusMissingCommand, locale.JaJP) + } + return nil +} + +func validateDeleteFeatureRequest(req *featureproto.DeleteFeatureRequest) error { + if req.Id == "" { + return localizedError(statusMissingID, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusMissingCommand, locale.JaJP) + } + return nil +} + +func validateFeatureVariationsCommand( + fs []*featureproto.Feature, + cmd command.Command, +) error { + switch c := cmd.(type) { + case *featureproto.RemoveVariationCommand: + return validateVariationCommand(fs, c.Id) + case *featureproto.ChangeVariationValueCommand: + return validateVariationCommand(fs, c.Id) + default: + return nil + } +} + +func validateArchiveFeatureRequest(req *featureproto.ArchiveFeatureRequest, fs []*featureproto.Feature) error { + if req.Id == "" { + return localizedError(statusMissingID, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusMissingCommand, locale.JaJP) + } + for _, f := range fs { + for _, p := range f.Prerequisites { + if p.FeatureId == req.Id { + return localizedError(statusInvalidArchive, locale.JaJP) + } + } + } + return nil +} + +func validateVariationCommand(fs []*featureproto.Feature, vID string) error { + for _, f := range fs { + for _, p := range f.Prerequisites { + if p.VariationId == vID { + return localizedError(statusInvalidChangingVariation, locale.JaJP) + } + } + } + return nil +} + +func validateUnarchiveFeatureRequest(req *featureproto.UnarchiveFeatureRequest) error { + if req.Id == "" { + return localizedError(statusMissingID, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusMissingCommand, locale.JaJP) + } + return nil +} + +func validateCloneFeatureRequest(req *featureproto.CloneFeatureRequest) error { + if req.Id == "" { + return localizedError(statusMissingID, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusMissingCommand, locale.JaJP) + } + if req.Command.EnvironmentNamespace == req.EnvironmentNamespace { + return localizedError(statusIncorrectDestinationEnvironment, locale.JaJP) + } + return nil +} + +func validateFeatureTargetingCommand( + fs []*featureproto.Feature, + tarF *featureproto.Feature, + cmd command.Command, +) error { + switch c := cmd.(type) { + case *featureproto.AddRuleCommand: + return validateRule(tarF.Variations, c.Rule) + case *featureproto.ChangeRuleStrategyCommand: + return validateChangeRuleStrategy(tarF.Variations, c) + case *featureproto.ChangeDefaultStrategyCommand: + return validateChangeDefaultStrategy(tarF.Variations, c) + case *featureproto.ChangeFixedStrategyCommand: + return validateChangeFixedStrategy(c) + case *featureproto.ChangeRolloutStrategyCommand: + return validateChangeRolloutStrategy(tarF.Variations, c) + case *featureproto.AddPrerequisiteCommand: + return validateAddPrerequisite(fs, tarF, c.Prerequisite) + case *featureproto.ChangePrerequisiteVariationCommand: + return validateChangePrerequisiteVariation(fs, c.Prerequisite) + default: + return nil + } +} + +func validateRule(variations []*featureproto.Variation, rule *featureproto.Rule) error { + if rule.Id == "" { + return localizedError(statusMissingRuleID, locale.JaJP) + } + if err := uuid.ValidateUUID(rule.Id); err != nil { + return localizedError(statusIncorrectUUIDFormat, locale.JaJP) + } + return validateStrategy(variations, rule.Strategy) +} + +func validateChangeRuleStrategy(variations []*featureproto.Variation, cmd *featureproto.ChangeRuleStrategyCommand, +) error { + if cmd.RuleId == "" { + return localizedError(statusMissingRuleID, locale.JaJP) + } + return validateStrategy(variations, cmd.Strategy) +} + +func validateChangeDefaultStrategy( + variations []*featureproto.Variation, + cmd *featureproto.ChangeDefaultStrategyCommand, +) error { + if cmd.Strategy == nil { + return localizedError(statusMissingRuleStrategy, locale.JaJP) + } + return validateStrategy(variations, cmd.Strategy) +} + +func validateStrategy(variations []*featureproto.Variation, strategy *featureproto.Strategy) error { + if strategy == nil { + return localizedError(statusMissingRuleStrategy, locale.JaJP) + } + if strategy.Type == featureproto.Strategy_FIXED { + return validateFixedStrategy(strategy.FixedStrategy) + } + if strategy.Type == featureproto.Strategy_ROLLOUT { + return validateRolloutStrategy(variations, strategy.RolloutStrategy) + } + return localizedError(statusUnknownStrategy, locale.JaJP) +} + +func validateChangeFixedStrategy(cmd *featureproto.ChangeFixedStrategyCommand) error { + if cmd.RuleId == "" { + return localizedError(statusMissingRuleID, locale.JaJP) + } + return validateFixedStrategy(cmd.Strategy) +} + +func validateChangeRolloutStrategy( + variations []*featureproto.Variation, + cmd *featureproto.ChangeRolloutStrategyCommand, +) error { + if cmd.RuleId == "" { + return localizedError(statusMissingRuleID, locale.JaJP) + } + return validateRolloutStrategy(variations, cmd.Strategy) +} + +func validateFixedStrategy(strategy *featureproto.FixedStrategy) error { + if strategy == nil { + return localizedError(statusMissingFixedStrategy, locale.JaJP) + } + if strategy.Variation == "" { + return localizedError(statusMissingVariationID, locale.JaJP) + } + return nil +} + +func validateRolloutStrategy(variations []*featureproto.Variation, strategy *featureproto.RolloutStrategy) error { + if strategy == nil { + return localizedError(statusMissingRolloutStrategy, locale.JaJP) + } + if len(variations) != len(strategy.Variations) { + return localizedError(statusDifferentVariationsSize, locale.JaJP) + } + sum := int32(0) + for _, v := range strategy.Variations { + if v.Variation == "" { + return localizedError(statusMissingVariationID, locale.JaJP) + } + if v.Weight < 0 { + return localizedError(statusIncorrectVariationWeight, locale.JaJP) + } + sum += v.Weight + } + if sum != totalVariationWeight { + return localizedError(statusExceededMaxVariationWeight, locale.JaJP) + } + return nil +} + +func validateAddPrerequisite( + fs []*featureproto.Feature, + tarF *featureproto.Feature, + p *featureproto.Prerequisite, +) error { + if tarF.Id == p.FeatureId { + return localizedError(statusInvalidPrerequisite, locale.JaJP) + } + for _, pf := range tarF.Prerequisites { + if pf.FeatureId == p.FeatureId { + return localizedError(statusInvalidPrerequisite, locale.JaJP) + } + } + if err := validateVariationID(fs, p); err != nil { + return err + } + tarF.Prerequisites = append(tarF.Prerequisites, p) + _, err := domain.TopologicalSort(fs) + if err != nil { + if err == domain.ErrCycleExists { + return localizedError(statusCycleExists, locale.JaJP) + } + return localizedError(statusInternal, locale.JaJP) + } + return nil +} + +func validateChangePrerequisiteVariation(fs []*featureproto.Feature, p *featureproto.Prerequisite) error { + if err := validateVariationID(fs, p); err != nil { + return err + } + return nil +} + +func validateVariationID(fs []*featureproto.Feature, p *featureproto.Prerequisite) error { + f, err := findFeature(fs, p.FeatureId) + if err != nil { + return err + } + for _, v := range f.Variations { + if v.Id == p.VariationId { + return nil + } + } + return localizedError(statusInvalidVariationID, locale.JaJP) +} diff --git a/pkg/feature/cacher/BUILD.bazel b/pkg/feature/cacher/BUILD.bazel new file mode 100644 index 000000000..507ec0440 --- /dev/null +++ b/pkg/feature/cacher/BUILD.bazel @@ -0,0 +1,33 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "cacher.go", + "metrics.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/feature/cacher", + visibility = ["//visibility:public"], + deps = [ + "//pkg/cache:go_default_library", + "//pkg/cache/v3:go_default_library", + "//pkg/errgroup:go_default_library", + "//pkg/feature/client:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub/puller:go_default_library", + "//pkg/pubsub/puller/codes:go_default_library", + "//proto/event/domain:go_default_library", + "//proto/feature:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["cacher_test.go"], + embed = [":go_default_library"], +) diff --git a/pkg/feature/cacher/cacher.go b/pkg/feature/cacher/cacher.go new file mode 100644 index 000000000..de04f8710 --- /dev/null +++ b/pkg/feature/cacher/cacher.go @@ -0,0 +1,309 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cacher + +import ( + "context" + "fmt" + "time" + + "github.com/golang/protobuf/proto" // nolint:staticcheck + "github.com/golang/protobuf/ptypes/wrappers" + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/cache" + cachev3 "github.com/bucketeer-io/bucketeer/pkg/cache/v3" + "github.com/bucketeer-io/bucketeer/pkg/errgroup" + featureservice "github.com/bucketeer-io/bucketeer/pkg/feature/client" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller/codes" + domainevent "github.com/bucketeer-io/bucketeer/proto/event/domain" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +const ( + listRequestSize = 500 +) + +type options struct { + maxMPS int + numWorkers int + flushSize int + flushInterval time.Duration + metrics metrics.Registerer + logger *zap.Logger +} + +type Option func(*options) + +func WithMaxMPS(mps int) Option { + return func(opts *options) { + opts.maxMPS = mps + } +} + +func WithNumWorkers(n int) Option { + return func(opts *options) { + opts.numWorkers = n + } +} + +func WithFlushSize(size int) Option { + return func(opts *options) { + opts.flushSize = size + } +} + +func WithFlushInterval(interval time.Duration) Option { + return func(opts *options) { + opts.flushInterval = interval + } +} + +func WithMetrics(r metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = r + } +} + +func WithLogger(l *zap.Logger) Option { + return func(opts *options) { + opts.logger = l + } +} + +type FeatureCacher struct { + puller puller.RateLimitedPuller + featuresCache cachev3.FeaturesCache + featureClient featureservice.Client + group errgroup.Group + opts *options + logger *zap.Logger + ctx context.Context + cancel func() + doneCh chan struct{} +} + +func NewFeatureCacher( + p puller.Puller, + client featureservice.Client, + v3Cache cache.MultiGetCache, + opts ...Option, +) *FeatureCacher { + ctx, cancel := context.WithCancel(context.Background()) + dopts := &options{ + maxMPS: 1000, + numWorkers: 1, + flushSize: 100, + flushInterval: time.Minute, + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + if dopts.metrics != nil { + registerMetrics(dopts.metrics) + } + return &FeatureCacher{ + puller: puller.NewRateLimitedPuller(p, dopts.maxMPS), + featuresCache: cachev3.NewFeaturesCache(v3Cache), + featureClient: client, + opts: dopts, + logger: dopts.logger.Named("cacher"), + ctx: ctx, + cancel: cancel, + doneCh: make(chan struct{}), + } +} + +func (c *FeatureCacher) Run() error { + defer close(c.doneCh) + c.group.Go(func() error { + return c.puller.Run(c.ctx) + }) + for i := 0; i < c.opts.numWorkers; i++ { + c.group.Go(c.batch) + } + return c.group.Wait() +} + +func (c *FeatureCacher) Stop() { + c.cancel() + <-c.doneCh +} + +func (c *FeatureCacher) Check(ctx context.Context) health.Status { + select { + case <-c.ctx.Done(): + c.logger.Error("Unhealthy due to context Done is closed", zap.Error(c.ctx.Err())) + return health.Unhealthy + default: + if c.group.FinishedCount() > 0 { + c.logger.Error("Unhealthy", zap.Int32("FinishedCount", c.group.FinishedCount())) + return health.Unhealthy + } + return health.Healthy + } +} + +func (c *FeatureCacher) batch() error { + chunk := make(map[string]*puller.Message, c.opts.flushSize) + timer := time.NewTimer(c.opts.flushInterval) + defer timer.Stop() + for { + select { + case msg, ok := <-c.puller.MessageCh(): + if !ok { + return nil + } + receivedCounter.Inc() + id := msg.Attributes["id"] + if id == "" { + msg.Ack() + handledCounter.WithLabelValues(codes.MissingID.String()).Inc() + continue + } + if _, ok := chunk[id]; ok { + c.logger.Warn("Message with duplicate id", zap.String("id", id)) + handledCounter.WithLabelValues(codes.DuplicateID.String()).Inc() + } + chunk[id] = msg + if len(chunk) >= c.opts.flushSize { + c.handleChunk(chunk) + chunk = make(map[string]*puller.Message, c.opts.flushSize) + timer.Reset(c.opts.flushInterval) + } + case <-timer.C: + if len(chunk) > 0 { + c.handleChunk(chunk) + chunk = make(map[string]*puller.Message, c.opts.flushSize) + } + timer.Reset(c.opts.flushInterval) + case <-c.ctx.Done(): + return nil + } + } +} + +func (c *FeatureCacher) handleChunk(chunk map[string]*puller.Message) { + handledFeatures := make(map[string]struct{}, len(chunk)) + for _, msg := range chunk { + event, err := c.unmarshalMessage(msg) + if err != nil { + msg.Ack() + handledCounter.WithLabelValues(codes.BadMessage.String()).Inc() + continue + } + featureID, isTarget := c.extractFeatureID(event) + if !isTarget { + msg.Ack() + handledCounter.WithLabelValues(codes.OK.String()).Inc() + continue + } + if featureID == "" { + msg.Ack() + handledCounter.WithLabelValues(codes.BadMessage.String()).Inc() + c.logger.Warn("Message contains an empty FeatureID", zap.Any("event", event)) + continue + } + if _, ok := handledFeatures[c.handledFeatureKey(featureID, event.EnvironmentNamespace)]; ok { + msg.Ack() + handledCounter.WithLabelValues(codes.OK.String()).Inc() + continue + } + if ok := c.refresh(event.EnvironmentNamespace); ok { + msg.Ack() + handledFeatures[c.handledFeatureKey(featureID, event.EnvironmentNamespace)] = struct{}{} + handledCounter.WithLabelValues(codes.OK.String()).Inc() + } else { + msg.Nack() + handledCounter.WithLabelValues(codes.RepeatableError.String()).Inc() + } + } +} + +func (c *FeatureCacher) handledFeatureKey(featureID, environmentNamespace string) string { + if environmentNamespace == "" { + return featureID + } + return fmt.Sprintf("%s:%s", environmentNamespace, featureID) +} + +func (c *FeatureCacher) refresh(environmentNamespace string) bool { + features, err := c.listFeatures(environmentNamespace) + if err != nil { + c.logger.Error("Failed to retrieve features", zap.Error(err), + zap.String("environmentNamespace", environmentNamespace)) + return false + } + err = c.featuresCache.Put(&featureproto.Features{ + Features: features, + }, environmentNamespace) + if err != nil { + c.logger.Error( + "Failed to cache Features", + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + ) + return false + } + return true +} + +func (c *FeatureCacher) listFeatures(environmentNamespace string) ([]*featureproto.Feature, error) { + features := []*featureproto.Feature{} + cursor := "" + for { + resp, err := c.featureClient.ListFeatures(c.ctx, &featureproto.ListFeaturesRequest{ + PageSize: listRequestSize, + Cursor: cursor, + EnvironmentNamespace: environmentNamespace, + Archived: &wrappers.BoolValue{Value: false}, + }) + if err != nil { + return nil, err + } + for _, f := range resp.Features { + if !f.Enabled && f.OffVariation == "" { + continue + } + features = append(features, f) + } + featureSize := len(resp.Features) + if featureSize == 0 || featureSize < listRequestSize { + return features, nil + } + cursor = resp.Cursor + } +} + +func (c *FeatureCacher) unmarshalMessage(msg *puller.Message) (*domainevent.Event, error) { + event := &domainevent.Event{} + err := proto.Unmarshal(msg.Data, event) + if err != nil { + c.logger.Error("Failed to unmarshal message", zap.Error(err), zap.String("msgID", msg.ID)) + return nil, err + } + return event, nil +} + +func (c *FeatureCacher) extractFeatureID(event *domainevent.Event) (string, bool) { + if event.EntityType != domainevent.Event_FEATURE { + return "", false + } + return event.EntityId, true +} diff --git a/pkg/feature/cacher/cacher_test.go b/pkg/feature/cacher/cacher_test.go new file mode 100644 index 000000000..89296e472 --- /dev/null +++ b/pkg/feature/cacher/cacher_test.go @@ -0,0 +1,30 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cacher + +import ( + "testing" +) + +func TestExtractFeatureID(t *testing.T) { + // testcases := []struct { + // event *domainevent.Event + // id string + // isTarget bool + // }{} + // for i, tc := range testcases { + // des := fmt.Sprintf("index: %d", i) + // } +} diff --git a/pkg/feature/cacher/metrics.go b/pkg/feature/cacher/metrics.go new file mode 100644 index 000000000..ee070b548 --- /dev/null +++ b/pkg/feature/cacher/metrics.go @@ -0,0 +1,43 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cacher + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +var ( + receivedCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "feature", + Name: "cacher_received_total", + Help: "Total number of received messages", + }) + + handledCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "feature", + Name: "cacher_handled_total", + Help: "Total number of handled messages", + }, []string{"code"}) +) + +func registerMetrics(r metrics.Registerer) { + r.MustRegister(receivedCounter, handledCounter) +} diff --git a/pkg/feature/client/BUILD.bazel b/pkg/feature/client/BUILD.bazel new file mode 100644 index 000000000..2f0596996 --- /dev/null +++ b/pkg/feature/client/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["client.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/feature/client", + visibility = ["//visibility:public"], + deps = [ + "//pkg/rpc/client:go_default_library", + "//proto/feature:go_default_library", + "@org_golang_google_grpc//:go_default_library", + ], +) diff --git a/pkg/feature/client/client.go b/pkg/feature/client/client.go new file mode 100644 index 000000000..9211e6851 --- /dev/null +++ b/pkg/feature/client/client.go @@ -0,0 +1,50 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package client + +import ( + "google.golang.org/grpc" + + rpcclient "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + proto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +type Client interface { + proto.FeatureServiceClient + Close() +} + +type client struct { + proto.FeatureServiceClient + address string + connection *grpc.ClientConn +} + +func NewClient(addr, certPath string, opts ...rpcclient.Option) (Client, error) { + conn, err := rpcclient.NewClientConn(addr, certPath, opts...) + if err != nil { + return nil, err + } + return &client{ + FeatureServiceClient: proto.NewFeatureServiceClient(conn), + address: addr, + connection: conn, + }, nil +} + +func (c *client) Close() { + c.connection.Close() +} diff --git a/pkg/feature/client/mock/BUILD.bazel b/pkg/feature/client/mock/BUILD.bazel new file mode 100644 index 000000000..a5c0faeae --- /dev/null +++ b/pkg/feature/client/mock/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["client.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/feature/client/mock", + visibility = ["//visibility:public"], + deps = [ + "//proto/feature:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@org_golang_google_grpc//:go_default_library", + ], +) diff --git a/pkg/feature/client/mock/client.go b/pkg/feature/client/mock/client.go new file mode 100644 index 000000000..ab3ccd780 --- /dev/null +++ b/pkg/feature/client/mock/client.go @@ -0,0 +1,630 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: client.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + grpc "google.golang.org/grpc" + + feature "github.com/bucketeer-io/bucketeer/proto/feature" +) + +// MockClient is a mock of Client interface. +type MockClient struct { + ctrl *gomock.Controller + recorder *MockClientMockRecorder +} + +// MockClientMockRecorder is the mock recorder for MockClient. +type MockClientMockRecorder struct { + mock *MockClient +} + +// NewMockClient creates a new mock instance. +func NewMockClient(ctrl *gomock.Controller) *MockClient { + mock := &MockClient{ctrl: ctrl} + mock.recorder = &MockClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClient) EXPECT() *MockClientMockRecorder { + return m.recorder +} + +// AddSegmentUser mocks base method. +func (m *MockClient) AddSegmentUser(ctx context.Context, in *feature.AddSegmentUserRequest, opts ...grpc.CallOption) (*feature.AddSegmentUserResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "AddSegmentUser", varargs...) + ret0, _ := ret[0].(*feature.AddSegmentUserResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AddSegmentUser indicates an expected call of AddSegmentUser. +func (mr *MockClientMockRecorder) AddSegmentUser(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSegmentUser", reflect.TypeOf((*MockClient)(nil).AddSegmentUser), varargs...) +} + +// ArchiveFeature mocks base method. +func (m *MockClient) ArchiveFeature(ctx context.Context, in *feature.ArchiveFeatureRequest, opts ...grpc.CallOption) (*feature.ArchiveFeatureResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ArchiveFeature", varargs...) + ret0, _ := ret[0].(*feature.ArchiveFeatureResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ArchiveFeature indicates an expected call of ArchiveFeature. +func (mr *MockClientMockRecorder) ArchiveFeature(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ArchiveFeature", reflect.TypeOf((*MockClient)(nil).ArchiveFeature), varargs...) +} + +// BulkDownloadSegmentUsers mocks base method. +func (m *MockClient) BulkDownloadSegmentUsers(ctx context.Context, in *feature.BulkDownloadSegmentUsersRequest, opts ...grpc.CallOption) (*feature.BulkDownloadSegmentUsersResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "BulkDownloadSegmentUsers", varargs...) + ret0, _ := ret[0].(*feature.BulkDownloadSegmentUsersResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BulkDownloadSegmentUsers indicates an expected call of BulkDownloadSegmentUsers. +func (mr *MockClientMockRecorder) BulkDownloadSegmentUsers(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BulkDownloadSegmentUsers", reflect.TypeOf((*MockClient)(nil).BulkDownloadSegmentUsers), varargs...) +} + +// BulkUploadSegmentUsers mocks base method. +func (m *MockClient) BulkUploadSegmentUsers(ctx context.Context, in *feature.BulkUploadSegmentUsersRequest, opts ...grpc.CallOption) (*feature.BulkUploadSegmentUsersResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "BulkUploadSegmentUsers", varargs...) + ret0, _ := ret[0].(*feature.BulkUploadSegmentUsersResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BulkUploadSegmentUsers indicates an expected call of BulkUploadSegmentUsers. +func (mr *MockClientMockRecorder) BulkUploadSegmentUsers(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BulkUploadSegmentUsers", reflect.TypeOf((*MockClient)(nil).BulkUploadSegmentUsers), varargs...) +} + +// CloneFeature mocks base method. +func (m *MockClient) CloneFeature(ctx context.Context, in *feature.CloneFeatureRequest, opts ...grpc.CallOption) (*feature.CloneFeatureResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CloneFeature", varargs...) + ret0, _ := ret[0].(*feature.CloneFeatureResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CloneFeature indicates an expected call of CloneFeature. +func (mr *MockClientMockRecorder) CloneFeature(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloneFeature", reflect.TypeOf((*MockClient)(nil).CloneFeature), varargs...) +} + +// Close mocks base method. +func (m *MockClient) Close() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Close") +} + +// Close indicates an expected call of Close. +func (mr *MockClientMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockClient)(nil).Close)) +} + +// CreateFeature mocks base method. +func (m *MockClient) CreateFeature(ctx context.Context, in *feature.CreateFeatureRequest, opts ...grpc.CallOption) (*feature.CreateFeatureResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateFeature", varargs...) + ret0, _ := ret[0].(*feature.CreateFeatureResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateFeature indicates an expected call of CreateFeature. +func (mr *MockClientMockRecorder) CreateFeature(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateFeature", reflect.TypeOf((*MockClient)(nil).CreateFeature), varargs...) +} + +// CreateSegment mocks base method. +func (m *MockClient) CreateSegment(ctx context.Context, in *feature.CreateSegmentRequest, opts ...grpc.CallOption) (*feature.CreateSegmentResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateSegment", varargs...) + ret0, _ := ret[0].(*feature.CreateSegmentResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateSegment indicates an expected call of CreateSegment. +func (mr *MockClientMockRecorder) CreateSegment(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSegment", reflect.TypeOf((*MockClient)(nil).CreateSegment), varargs...) +} + +// DeleteFeature mocks base method. +func (m *MockClient) DeleteFeature(ctx context.Context, in *feature.DeleteFeatureRequest, opts ...grpc.CallOption) (*feature.DeleteFeatureResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteFeature", varargs...) + ret0, _ := ret[0].(*feature.DeleteFeatureResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteFeature indicates an expected call of DeleteFeature. +func (mr *MockClientMockRecorder) DeleteFeature(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteFeature", reflect.TypeOf((*MockClient)(nil).DeleteFeature), varargs...) +} + +// DeleteSegment mocks base method. +func (m *MockClient) DeleteSegment(ctx context.Context, in *feature.DeleteSegmentRequest, opts ...grpc.CallOption) (*feature.DeleteSegmentResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteSegment", varargs...) + ret0, _ := ret[0].(*feature.DeleteSegmentResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteSegment indicates an expected call of DeleteSegment. +func (mr *MockClientMockRecorder) DeleteSegment(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSegment", reflect.TypeOf((*MockClient)(nil).DeleteSegment), varargs...) +} + +// DeleteSegmentUser mocks base method. +func (m *MockClient) DeleteSegmentUser(ctx context.Context, in *feature.DeleteSegmentUserRequest, opts ...grpc.CallOption) (*feature.DeleteSegmentUserResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteSegmentUser", varargs...) + ret0, _ := ret[0].(*feature.DeleteSegmentUserResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteSegmentUser indicates an expected call of DeleteSegmentUser. +func (mr *MockClientMockRecorder) DeleteSegmentUser(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSegmentUser", reflect.TypeOf((*MockClient)(nil).DeleteSegmentUser), varargs...) +} + +// DisableFeature mocks base method. +func (m *MockClient) DisableFeature(ctx context.Context, in *feature.DisableFeatureRequest, opts ...grpc.CallOption) (*feature.DisableFeatureResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DisableFeature", varargs...) + ret0, _ := ret[0].(*feature.DisableFeatureResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DisableFeature indicates an expected call of DisableFeature. +func (mr *MockClientMockRecorder) DisableFeature(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableFeature", reflect.TypeOf((*MockClient)(nil).DisableFeature), varargs...) +} + +// EnableFeature mocks base method. +func (m *MockClient) EnableFeature(ctx context.Context, in *feature.EnableFeatureRequest, opts ...grpc.CallOption) (*feature.EnableFeatureResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "EnableFeature", varargs...) + ret0, _ := ret[0].(*feature.EnableFeatureResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// EnableFeature indicates an expected call of EnableFeature. +func (mr *MockClientMockRecorder) EnableFeature(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableFeature", reflect.TypeOf((*MockClient)(nil).EnableFeature), varargs...) +} + +// EvaluateFeatures mocks base method. +func (m *MockClient) EvaluateFeatures(ctx context.Context, in *feature.EvaluateFeaturesRequest, opts ...grpc.CallOption) (*feature.EvaluateFeaturesResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "EvaluateFeatures", varargs...) + ret0, _ := ret[0].(*feature.EvaluateFeaturesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// EvaluateFeatures indicates an expected call of EvaluateFeatures. +func (mr *MockClientMockRecorder) EvaluateFeatures(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EvaluateFeatures", reflect.TypeOf((*MockClient)(nil).EvaluateFeatures), varargs...) +} + +// GetFeature mocks base method. +func (m *MockClient) GetFeature(ctx context.Context, in *feature.GetFeatureRequest, opts ...grpc.CallOption) (*feature.GetFeatureResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetFeature", varargs...) + ret0, _ := ret[0].(*feature.GetFeatureResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetFeature indicates an expected call of GetFeature. +func (mr *MockClientMockRecorder) GetFeature(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFeature", reflect.TypeOf((*MockClient)(nil).GetFeature), varargs...) +} + +// GetFeatures mocks base method. +func (m *MockClient) GetFeatures(ctx context.Context, in *feature.GetFeaturesRequest, opts ...grpc.CallOption) (*feature.GetFeaturesResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetFeatures", varargs...) + ret0, _ := ret[0].(*feature.GetFeaturesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetFeatures indicates an expected call of GetFeatures. +func (mr *MockClientMockRecorder) GetFeatures(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFeatures", reflect.TypeOf((*MockClient)(nil).GetFeatures), varargs...) +} + +// GetSegment mocks base method. +func (m *MockClient) GetSegment(ctx context.Context, in *feature.GetSegmentRequest, opts ...grpc.CallOption) (*feature.GetSegmentResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetSegment", varargs...) + ret0, _ := ret[0].(*feature.GetSegmentResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSegment indicates an expected call of GetSegment. +func (mr *MockClientMockRecorder) GetSegment(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSegment", reflect.TypeOf((*MockClient)(nil).GetSegment), varargs...) +} + +// GetSegmentUser mocks base method. +func (m *MockClient) GetSegmentUser(ctx context.Context, in *feature.GetSegmentUserRequest, opts ...grpc.CallOption) (*feature.GetSegmentUserResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetSegmentUser", varargs...) + ret0, _ := ret[0].(*feature.GetSegmentUserResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSegmentUser indicates an expected call of GetSegmentUser. +func (mr *MockClientMockRecorder) GetSegmentUser(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSegmentUser", reflect.TypeOf((*MockClient)(nil).GetSegmentUser), varargs...) +} + +// GetUserEvaluations mocks base method. +func (m *MockClient) GetUserEvaluations(ctx context.Context, in *feature.GetUserEvaluationsRequest, opts ...grpc.CallOption) (*feature.GetUserEvaluationsResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetUserEvaluations", varargs...) + ret0, _ := ret[0].(*feature.GetUserEvaluationsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUserEvaluations indicates an expected call of GetUserEvaluations. +func (mr *MockClientMockRecorder) GetUserEvaluations(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUserEvaluations", reflect.TypeOf((*MockClient)(nil).GetUserEvaluations), varargs...) +} + +// ListEnabledFeatures mocks base method. +func (m *MockClient) ListEnabledFeatures(ctx context.Context, in *feature.ListEnabledFeaturesRequest, opts ...grpc.CallOption) (*feature.ListEnabledFeaturesResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListEnabledFeatures", varargs...) + ret0, _ := ret[0].(*feature.ListEnabledFeaturesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListEnabledFeatures indicates an expected call of ListEnabledFeatures. +func (mr *MockClientMockRecorder) ListEnabledFeatures(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListEnabledFeatures", reflect.TypeOf((*MockClient)(nil).ListEnabledFeatures), varargs...) +} + +// ListFeatures mocks base method. +func (m *MockClient) ListFeatures(ctx context.Context, in *feature.ListFeaturesRequest, opts ...grpc.CallOption) (*feature.ListFeaturesResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListFeatures", varargs...) + ret0, _ := ret[0].(*feature.ListFeaturesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListFeatures indicates an expected call of ListFeatures. +func (mr *MockClientMockRecorder) ListFeatures(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListFeatures", reflect.TypeOf((*MockClient)(nil).ListFeatures), varargs...) +} + +// ListSegmentUsers mocks base method. +func (m *MockClient) ListSegmentUsers(ctx context.Context, in *feature.ListSegmentUsersRequest, opts ...grpc.CallOption) (*feature.ListSegmentUsersResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListSegmentUsers", varargs...) + ret0, _ := ret[0].(*feature.ListSegmentUsersResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListSegmentUsers indicates an expected call of ListSegmentUsers. +func (mr *MockClientMockRecorder) ListSegmentUsers(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSegmentUsers", reflect.TypeOf((*MockClient)(nil).ListSegmentUsers), varargs...) +} + +// ListSegments mocks base method. +func (m *MockClient) ListSegments(ctx context.Context, in *feature.ListSegmentsRequest, opts ...grpc.CallOption) (*feature.ListSegmentsResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListSegments", varargs...) + ret0, _ := ret[0].(*feature.ListSegmentsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListSegments indicates an expected call of ListSegments. +func (mr *MockClientMockRecorder) ListSegments(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSegments", reflect.TypeOf((*MockClient)(nil).ListSegments), varargs...) +} + +// ListTags mocks base method. +func (m *MockClient) ListTags(ctx context.Context, in *feature.ListTagsRequest, opts ...grpc.CallOption) (*feature.ListTagsResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListTags", varargs...) + ret0, _ := ret[0].(*feature.ListTagsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListTags indicates an expected call of ListTags. +func (mr *MockClientMockRecorder) ListTags(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTags", reflect.TypeOf((*MockClient)(nil).ListTags), varargs...) +} + +// UnarchiveFeature mocks base method. +func (m *MockClient) UnarchiveFeature(ctx context.Context, in *feature.UnarchiveFeatureRequest, opts ...grpc.CallOption) (*feature.UnarchiveFeatureResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UnarchiveFeature", varargs...) + ret0, _ := ret[0].(*feature.UnarchiveFeatureResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UnarchiveFeature indicates an expected call of UnarchiveFeature. +func (mr *MockClientMockRecorder) UnarchiveFeature(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnarchiveFeature", reflect.TypeOf((*MockClient)(nil).UnarchiveFeature), varargs...) +} + +// UpdateFeatureDetails mocks base method. +func (m *MockClient) UpdateFeatureDetails(ctx context.Context, in *feature.UpdateFeatureDetailsRequest, opts ...grpc.CallOption) (*feature.UpdateFeatureDetailsResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateFeatureDetails", varargs...) + ret0, _ := ret[0].(*feature.UpdateFeatureDetailsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateFeatureDetails indicates an expected call of UpdateFeatureDetails. +func (mr *MockClientMockRecorder) UpdateFeatureDetails(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateFeatureDetails", reflect.TypeOf((*MockClient)(nil).UpdateFeatureDetails), varargs...) +} + +// UpdateFeatureTargeting mocks base method. +func (m *MockClient) UpdateFeatureTargeting(ctx context.Context, in *feature.UpdateFeatureTargetingRequest, opts ...grpc.CallOption) (*feature.UpdateFeatureTargetingResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateFeatureTargeting", varargs...) + ret0, _ := ret[0].(*feature.UpdateFeatureTargetingResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateFeatureTargeting indicates an expected call of UpdateFeatureTargeting. +func (mr *MockClientMockRecorder) UpdateFeatureTargeting(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateFeatureTargeting", reflect.TypeOf((*MockClient)(nil).UpdateFeatureTargeting), varargs...) +} + +// UpdateFeatureVariations mocks base method. +func (m *MockClient) UpdateFeatureVariations(ctx context.Context, in *feature.UpdateFeatureVariationsRequest, opts ...grpc.CallOption) (*feature.UpdateFeatureVariationsResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateFeatureVariations", varargs...) + ret0, _ := ret[0].(*feature.UpdateFeatureVariationsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateFeatureVariations indicates an expected call of UpdateFeatureVariations. +func (mr *MockClientMockRecorder) UpdateFeatureVariations(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateFeatureVariations", reflect.TypeOf((*MockClient)(nil).UpdateFeatureVariations), varargs...) +} + +// UpdateSegment mocks base method. +func (m *MockClient) UpdateSegment(ctx context.Context, in *feature.UpdateSegmentRequest, opts ...grpc.CallOption) (*feature.UpdateSegmentResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateSegment", varargs...) + ret0, _ := ret[0].(*feature.UpdateSegmentResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateSegment indicates an expected call of UpdateSegment. +func (mr *MockClientMockRecorder) UpdateSegment(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSegment", reflect.TypeOf((*MockClient)(nil).UpdateSegment), varargs...) +} + +// UpsertUserEvaluation mocks base method. +func (m *MockClient) UpsertUserEvaluation(ctx context.Context, in *feature.UpsertUserEvaluationRequest, opts ...grpc.CallOption) (*feature.UpsertUserEvaluationResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpsertUserEvaluation", varargs...) + ret0, _ := ret[0].(*feature.UpsertUserEvaluationResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpsertUserEvaluation indicates an expected call of UpsertUserEvaluation. +func (mr *MockClientMockRecorder) UpsertUserEvaluation(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertUserEvaluation", reflect.TypeOf((*MockClient)(nil).UpsertUserEvaluation), varargs...) +} diff --git a/pkg/feature/cmd/cacher/BUILD.bazel b/pkg/feature/cmd/cacher/BUILD.bazel new file mode 100644 index 000000000..bdf458a70 --- /dev/null +++ b/pkg/feature/cmd/cacher/BUILD.bazel @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["cacher.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/feature/cmd/cacher", + visibility = ["//visibility:public"], + deps = [ + "//pkg/cache/v3:go_default_library", + "//pkg/cli:go_default_library", + "//pkg/feature/cacher:go_default_library", + "//pkg/feature/client:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub:go_default_library", + "//pkg/pubsub/puller:go_default_library", + "//pkg/redis/v3:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/rpc/client:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/feature/cmd/cacher/cacher.go b/pkg/feature/cmd/cacher/cacher.go new file mode 100644 index 000000000..0fb5a1430 --- /dev/null +++ b/pkg/feature/cmd/cacher/cacher.go @@ -0,0 +1,172 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cacher + +import ( + "context" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + cachev3 "github.com/bucketeer-io/bucketeer/pkg/cache/v3" + "github.com/bucketeer-io/bucketeer/pkg/cli" + tc "github.com/bucketeer-io/bucketeer/pkg/feature/cacher" + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" + redisv3 "github.com/bucketeer-io/bucketeer/pkg/redis/v3" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/rpc/client" +) + +const command = "tag-cacher" + +type featureCacher struct { + *kingpin.CmdClause + port *int + project *string + subscription *string + topic *string + maxMPS *int + numWorkers *int + flushSize *int + flushInterval *time.Duration + featureService *string + redisServerName *string + redisAddr *string + certPath *string + keyPath *string + serviceTokenPath *string + pullerNumGoroutines *int + pullerMaxOutstandingMessages *int + pullerMaxOutstandingBytes *int +} + +func RegisterCommand(r cli.CommandRegistry, p cli.ParentCommand) cli.Command { + cmd := p.Command(command, "Start tag cacher") + c := &featureCacher{ + CmdClause: cmd, + port: cmd.Flag("port", "Port to bind to.").Default("9090").Int(), + project: cmd.Flag("project", "Google Cloud project name.").String(), + subscription: cmd.Flag("subscription", "Google PubSub subscription name.").String(), + topic: cmd.Flag("topic", "Google PubSub topic name.").String(), + maxMPS: cmd.Flag("max-mps", "Maximum messages should be handled in a second.").Default("5000").Int(), + numWorkers: cmd.Flag("num-workers", "Number of workers.").Default("2").Int(), + flushSize: cmd.Flag("flush-size", "Maximum number of messages in one flush.").Default("100").Int(), + flushInterval: cmd.Flag("flush-interval", "Maximum interval between two flushes.").Default("1m").Duration(), + featureService: cmd.Flag("feature-service", "bucketeer-feature-service address.").Default("feature:9090").String(), + redisServerName: cmd.Flag("redis-server-name", "Name of the redis.").Required().String(), + redisAddr: cmd.Flag("redis-addr", "Address of the redis.").Required().String(), + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + keyPath: cmd.Flag("key", "Path to TLS key.").Required().String(), + serviceTokenPath: cmd.Flag("service-token", "Path to service token.").Required().String(), + pullerNumGoroutines: cmd.Flag( + "puller-num-goroutines", + "Number of goroutines will be spawned to pull messages.", + ).Int(), + pullerMaxOutstandingMessages: cmd.Flag( + "puller-max-outstanding-messages", + "Maximum number of unprocessed messages.", + ).Int(), + pullerMaxOutstandingBytes: cmd.Flag("puller-max-outstanding-bytes", "Maximum size of unprocessed messages.").Int(), + } + r.RegisterCommand(c) + return c +} + +func (c *featureCacher) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + registerer := metrics.DefaultRegisterer() + + puller, err := c.createPuller(ctx, logger) + if err != nil { + return err + } + + creds, err := client.NewPerRPCCredentials(*c.serviceTokenPath) + if err != nil { + return err + } + + featureClient, err := featureclient.NewClient(*c.featureService, *c.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer featureClient.Close() + + redisV3Client, err := redisv3.NewClient( + *c.redisAddr, + redisv3.WithServerName(*c.redisServerName), + redisv3.WithMetrics(registerer), + redisv3.WithLogger(logger), + ) + if err != nil { + return err + } + defer redisV3Client.Close() + redisV3Cache := cachev3.NewRedisCache(redisV3Client) + + cacher := tc.NewFeatureCacher(puller, featureClient, redisV3Cache, + tc.WithMaxMPS(*c.maxMPS), + tc.WithNumWorkers(*c.numWorkers), + tc.WithFlushSize(*c.flushSize), + tc.WithFlushInterval(*c.flushInterval), + tc.WithMetrics(registerer), + tc.WithLogger(logger), + ) + defer cacher.Stop() + go cacher.Run() // nolint:errcheck + + healthChecker := health.NewGrpcChecker( + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + health.WithCheck("cacher", cacher.Check), + ) + go healthChecker.Run(ctx) + + server := rpc.NewServer(healthChecker, *c.certPath, *c.keyPath, + rpc.WithPort(*c.port), + rpc.WithMetrics(registerer), + rpc.WithLogger(logger), + rpc.WithHandler("/health", healthChecker), + ) + defer server.Stop(10 * time.Second) + go server.Run() + + <-ctx.Done() + return nil +} + +func (c *featureCacher) createPuller(ctx context.Context, logger *zap.Logger) (puller.Puller, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + client, err := pubsub.NewClient(ctx, *c.project, pubsub.WithLogger(logger)) + if err != nil { + return nil, err + } + return client.CreatePuller(*c.subscription, *c.topic, + pubsub.WithNumGoroutines(*c.pullerNumGoroutines), + pubsub.WithMaxOutstandingMessages(*c.pullerMaxOutstandingMessages), + pubsub.WithMaxOutstandingBytes(*c.pullerMaxOutstandingBytes), + ) +} diff --git a/pkg/feature/cmd/recorder/BUILD.bazel b/pkg/feature/cmd/recorder/BUILD.bazel new file mode 100644 index 000000000..f4be718c9 --- /dev/null +++ b/pkg/feature/cmd/recorder/BUILD.bazel @@ -0,0 +1,20 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["recorder.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/feature/cmd/recorder", + visibility = ["//visibility:public"], + deps = [ + "//pkg/cli:go_default_library", + "//pkg/feature/recorder:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub:go_default_library", + "//pkg/pubsub/puller:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/feature/cmd/recorder/recorder.go b/pkg/feature/cmd/recorder/recorder.go new file mode 100644 index 000000000..aef770f86 --- /dev/null +++ b/pkg/feature/cmd/recorder/recorder.go @@ -0,0 +1,172 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package recorder + +import ( + "context" + "os" + "time" + + "go.uber.org/zap" + "gopkg.in/alecthomas/kingpin.v2" + + "github.com/bucketeer-io/bucketeer/pkg/cli" + featurerecorder "github.com/bucketeer-io/bucketeer/pkg/feature/recorder" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" +) + +const command = "recorder" + +type recorder struct { + *kingpin.CmdClause + port *int + project *string + mysqlUser *string + mysqlPass *string + mysqlHost *string + mysqlPort *int + mysqlDBName *string + subscription *string + topic *string + maxMPS *int + flushInterval *time.Duration + startupInterval *time.Duration + certPath *string + keyPath *string + pullerNumGoroutines *int + pullerMaxOutstandingMessages *int + pullerMaxOutstandingBytes *int +} + +func RegisterCommand(r cli.CommandRegistry, p cli.ParentCommand) cli.Command { + cmd := p.Command(command, "Start feature recorder") + recorder := &recorder{ + CmdClause: cmd, + port: cmd.Flag("port", "Port to bind to.").Default("9090").Int(), + project: cmd.Flag("project", "Google Cloud project name.").String(), + mysqlUser: cmd.Flag("mysql-user", "MySQL user.").Required().String(), + mysqlPass: cmd.Flag("mysql-pass", "MySQL password.").Required().String(), + mysqlHost: cmd.Flag("mysql-host", "MySQL host.").Required().String(), + mysqlPort: cmd.Flag("mysql-port", "MySQL port.").Required().Int(), + mysqlDBName: cmd.Flag("mysql-db-name", "MySQL database name.").Required().String(), + subscription: cmd.Flag("subscription", "Google PubSub subscription name.").String(), + topic: cmd.Flag("topic", "Google PubSub topic name.").String(), + maxMPS: cmd.Flag( + "max-mps", + "Maximum messages should be handled in a second.", + ).Default("5000").Int(), + flushInterval: cmd.Flag("flush-interval", "Interval between two flushes.").Default("1m").Duration(), + startupInterval: cmd.Flag("startup-interval", "Interval to start workers").Default("1s").Duration(), + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + keyPath: cmd.Flag("key", "Path to TLS key.").Required().String(), + pullerNumGoroutines: cmd.Flag( + "puller-num-goroutines", + "Number of goroutines will be spawned to pull messages.", + ).Int(), + pullerMaxOutstandingMessages: cmd.Flag( + "puller-max-outstanding-messages", + "Maximum number of unprocessed messages.", + ).Int(), + pullerMaxOutstandingBytes: cmd.Flag( + "puller-max-outstanding-bytes", + "Maximum size of unprocessed messages.", + ).Int(), + } + r.RegisterCommand(recorder) + return recorder +} + +func (r *recorder) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + *r.keyPath = r.insertTelepresenceMountRoot(*r.keyPath) + *r.certPath = r.insertTelepresenceMountRoot(*r.certPath) + + puller, err := r.createPuller(ctx, logger) + if err != nil { + return err + } + + registerer := metrics.DefaultRegisterer() + mysqlClient, err := mysql.NewClient( + ctx, + *r.mysqlUser, + *r.mysqlPass, + *r.mysqlHost, + *r.mysqlPort, + *r.mysqlDBName, + mysql.WithLogger(logger), + ) + if err != nil { + logger.Error("Failed to create mysql client", zap.Error(err)) + return err + } + defer mysqlClient.Close() + + recorder := featurerecorder.NewRecorder(puller, mysqlClient, + featurerecorder.WithMaxMPS(*r.maxMPS), + featurerecorder.WithLogger(logger), + featurerecorder.WithMetrics(registerer), + featurerecorder.WithFlushInterval(*r.flushInterval), + featurerecorder.WithStartupInterval(*r.startupInterval), + ) + defer recorder.Stop() + go recorder.Run() // nolint:errcheck + + healthChecker := health.NewGrpcChecker( + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + health.WithCheck("recorder", recorder.Check), + ) + go healthChecker.Run(ctx) + + server := rpc.NewServer(healthChecker, *r.certPath, *r.keyPath, + rpc.WithPort(*r.port), + rpc.WithMetrics(registerer), + rpc.WithLogger(logger), + rpc.WithHandler("/health", healthChecker), + ) + defer server.Stop(10 * time.Second) + go server.Run() + + <-ctx.Done() + return nil +} + +func (r *recorder) createPuller(ctx context.Context, logger *zap.Logger) (puller.Puller, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + client, err := pubsub.NewClient(ctx, *r.project, pubsub.WithLogger(logger)) + if err != nil { + return nil, err + } + return client.CreatePuller(*r.subscription, *r.topic, + pubsub.WithNumGoroutines(*r.pullerNumGoroutines), + pubsub.WithMaxOutstandingMessages(*r.pullerMaxOutstandingMessages), + pubsub.WithMaxOutstandingBytes(*r.pullerMaxOutstandingBytes), + ) +} + +// for telepresence --swap-deployment +func (r *recorder) insertTelepresenceMountRoot(path string) string { + volumeRoot := os.Getenv("TELEPRESENCE_ROOT") + if volumeRoot == "" { + return path + } + return volumeRoot + path +} diff --git a/pkg/feature/cmd/segmentpersister/BUILD.bazel b/pkg/feature/cmd/segmentpersister/BUILD.bazel new file mode 100644 index 000000000..5a3def909 --- /dev/null +++ b/pkg/feature/cmd/segmentpersister/BUILD.bazel @@ -0,0 +1,21 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["persister.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/feature/cmd/segmentpersister", + visibility = ["//visibility:public"], + deps = [ + "//pkg/cache/v3:go_default_library", + "//pkg/cli:go_default_library", + "//pkg/feature/segmentpersister:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub:go_default_library", + "//pkg/redis/v3:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/feature/cmd/segmentpersister/persister.go b/pkg/feature/cmd/segmentpersister/persister.go new file mode 100644 index 000000000..8761d5f28 --- /dev/null +++ b/pkg/feature/cmd/segmentpersister/persister.go @@ -0,0 +1,221 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package segmentpersister + +import ( + "context" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + cachev3 "github.com/bucketeer-io/bucketeer/pkg/cache/v3" + "github.com/bucketeer-io/bucketeer/pkg/cli" + fsp "github.com/bucketeer-io/bucketeer/pkg/feature/segmentpersister" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub" + redisv3 "github.com/bucketeer-io/bucketeer/pkg/redis/v3" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" +) + +const command = "segment-persister" + +type Persister interface { + Run(context.Context, metrics.Metrics, *zap.Logger) error +} + +type persister struct { + *kingpin.CmdClause + port *int + project *string + mysqlUser *string + mysqlPass *string + mysqlHost *string + mysqlPort *int + mysqlDBName *string + domainEventTopic *string + bulkSegmentUsersReceivedEventTopic *string + bulkSegmentUsersReceivedEventSubscription *string + maxMPS *int + numWorkers *int + flushSize *int + flushInterval *time.Duration + pullerNumGoroutines *int + pullerMaxOutstandingMessages *int + pullerMaxOutstandingBytes *int + redisServerName *string + redisAddr *string + redisPoolMaxIdle *int + redisPoolMaxActive *int + certPath *string + keyPath *string +} + +func RegisterCommand(r cli.CommandRegistry, p cli.ParentCommand) cli.Command { + cmd := p.Command(command, "Start segment persister") + persister := &persister{ + CmdClause: cmd, + port: cmd.Flag("port", "Port to bind to.").Default("9090").Int(), + project: cmd.Flag("project", "Google Cloud project name.").String(), + mysqlUser: cmd.Flag("mysql-user", "MySQL user.").Required().String(), + mysqlPass: cmd.Flag("mysql-pass", "MySQL password.").Required().String(), + mysqlHost: cmd.Flag("mysql-host", "MySQL host.").Required().String(), + mysqlPort: cmd.Flag("mysql-port", "MySQL port.").Required().Int(), + mysqlDBName: cmd.Flag("mysql-db-name", "MySQL database name.").Required().String(), + domainEventTopic: cmd.Flag("domain-event-topic", "PubSub topic to publish domain events.").Required().String(), + bulkSegmentUsersReceivedEventTopic: cmd.Flag( + "bulk-segment-users-received-event-topic", + "PubSub topic to subscribe bulk segment users received events.", + ).Required().String(), + bulkSegmentUsersReceivedEventSubscription: cmd.Flag( + "bulk-segment-users-received-event-subscription", + "PubSub subscription to subscribe bulk segment users received events.", + ).Required().String(), + maxMPS: cmd.Flag("max-mps", "Maximum messages should be handled in a second.").Default("100").Int(), + numWorkers: cmd.Flag("num-workers", "Number of workers.").Default("2").Int(), + flushSize: cmd.Flag("flush-size", "Maximum number of messages in one flush.").Default("2").Int(), + flushInterval: cmd.Flag("flush-interval", "Maximum interval between two flushes.").Default("10s").Duration(), + pullerNumGoroutines: cmd.Flag( + "puller-num-goroutines", + "Number of goroutines will be spawned to pull messages.", + ).Int(), + pullerMaxOutstandingMessages: cmd.Flag( + "puller-max-outstanding-messages", + "Maximum number of unprocessed messages.", + ).Int(), + pullerMaxOutstandingBytes: cmd.Flag("puller-max-outstanding-bytes", "Maximum size of unprocessed messages.").Int(), + redisServerName: cmd.Flag("redis-server-name", "Name of the redis.").Required().String(), + redisAddr: cmd.Flag("redis-addr", "Address of the redis.").Required().String(), + redisPoolMaxIdle: cmd.Flag( + "redis-pool-max-idle", + "Maximum number of idle connections in the pool.", + ).Default("5").Int(), + redisPoolMaxActive: cmd.Flag( + "redis-pool-max-active", + "Maximum number of connections allocated by the pool at a given time.", + ).Default("10").Int(), + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + keyPath: cmd.Flag("key", "Path to TLS key.").Required().String(), + } + r.RegisterCommand(persister) + return persister +} + +func (p *persister) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + registerer := metrics.DefaultRegisterer() + + mysqlClient, err := p.createMySQLClient(ctx, registerer, logger) + if err != nil { + return err + } + defer mysqlClient.Close() + + redisV3Client, err := redisv3.NewClient( + *p.redisAddr, + redisv3.WithPoolSize(*p.redisPoolMaxActive), + redisv3.WithMinIdleConns(*p.redisPoolMaxIdle), + redisv3.WithServerName(*p.redisServerName), + redisv3.WithMetrics(registerer), + redisv3.WithLogger(logger), + ) + if err != nil { + return err + } + defer redisV3Client.Close() + redisV3Cache := cachev3.NewRedisCache(redisV3Client) + + pubsubClient, err := p.createPubsubClient(ctx, logger) + if err != nil { + return err + } + segmentUsersPuller, err := pubsubClient.CreatePuller( + *p.bulkSegmentUsersReceivedEventSubscription, + *p.bulkSegmentUsersReceivedEventTopic, + pubsub.WithNumGoroutines(*p.pullerNumGoroutines), + pubsub.WithMaxOutstandingMessages(*p.pullerMaxOutstandingMessages), + pubsub.WithMaxOutstandingBytes(*p.pullerMaxOutstandingBytes), + ) + if err != nil { + return err + } + + domainPublisher, err := pubsubClient.CreatePublisher(*p.domainEventTopic) + if err != nil { + return err + } + defer domainPublisher.Stop() + + persister := fsp.NewPersister( + segmentUsersPuller, + domainPublisher, + mysqlClient, + redisV3Cache, + fsp.WithMaxMPS(*p.maxMPS), + fsp.WithNumWorkers(*p.numWorkers), + fsp.WithFlushSize(*p.flushSize), + fsp.WithFlushInterval(*p.flushInterval), + fsp.WithMetrics(registerer), + fsp.WithLogger(logger), + ) + defer persister.Stop() + go persister.Run() // nolint:errcheck + + healthChecker := health.NewGrpcChecker( + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + health.WithCheck("segment-persister", persister.Check), + ) + go healthChecker.Run(ctx) + + server := rpc.NewServer(healthChecker, *p.certPath, *p.keyPath, + rpc.WithPort(*p.port), + rpc.WithMetrics(registerer), + rpc.WithLogger(logger), + rpc.WithHandler("/health", healthChecker), + ) + defer server.Stop(10 * time.Second) + go server.Run() + + <-ctx.Done() + return nil +} + +func (p *persister) createMySQLClient( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (mysql.Client, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + return mysql.NewClient( + ctx, + *p.mysqlUser, *p.mysqlPass, *p.mysqlHost, + *p.mysqlPort, + *p.mysqlDBName, + mysql.WithLogger(logger), + mysql.WithMetrics(registerer), + ) +} +func (p *persister) createPubsubClient(ctx context.Context, logger *zap.Logger) (*pubsub.Client, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + client, err := pubsub.NewClient(ctx, *p.project, pubsub.WithLogger(logger)) + if err != nil { + return nil, err + } + return client, nil +} diff --git a/pkg/feature/cmd/server/BUILD.bazel b/pkg/feature/cmd/server/BUILD.bazel new file mode 100644 index 000000000..ecec29b76 --- /dev/null +++ b/pkg/feature/cmd/server/BUILD.bazel @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["server.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/feature/cmd/server", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/client:go_default_library", + "//pkg/cache/v3:go_default_library", + "//pkg/cli:go_default_library", + "//pkg/experiment/client:go_default_library", + "//pkg/feature/api:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub:go_default_library", + "//pkg/redis/v3:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/rpc/client:go_default_library", + "//pkg/storage/v2/bigtable:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/token:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/feature/cmd/server/server.go b/pkg/feature/cmd/server/server.go new file mode 100644 index 000000000..a39b2798e --- /dev/null +++ b/pkg/feature/cmd/server/server.go @@ -0,0 +1,272 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + cachev3 "github.com/bucketeer-io/bucketeer/pkg/cache/v3" + "github.com/bucketeer-io/bucketeer/pkg/cli" + experimentclient "github.com/bucketeer-io/bucketeer/pkg/experiment/client" + "github.com/bucketeer-io/bucketeer/pkg/feature/api" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub" + redisv3 "github.com/bucketeer-io/bucketeer/pkg/redis/v3" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + bigtable "github.com/bucketeer-io/bucketeer/pkg/storage/v2/bigtable" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/token" +) + +const command = "server" + +type server struct { + *kingpin.CmdClause + port *int + project *string + mysqlUser *string + mysqlPass *string + mysqlHost *string + mysqlPort *int + mysqlDBName *string + bigtableInstance *string + accountService *string + experimentService *string + redisServerName *string + redisAddr *string + redisPoolMaxIdle *int + redisPoolMaxActive *int + bulkSegmentUsersReceivedEventTopic *string + domainEventTopic *string + certPath *string + keyPath *string + serviceTokenPath *string + + oauthKeyPath *string + oauthClientID *string + oauthIssuer *string +} + +func RegisterServerCommand(r cli.CommandRegistry, p cli.ParentCommand) cli.Command { + cmd := p.Command(command, "Start the gRPC server") + server := &server{ + CmdClause: cmd, + port: cmd.Flag("port", "Port to bind to.").Default("9090").Int(), + project: cmd.Flag("project", "Google Cloud project name.").Required().String(), + mysqlUser: cmd.Flag("mysql-user", "MySQL user.").Required().String(), + mysqlPass: cmd.Flag("mysql-pass", "MySQL password.").Required().String(), + mysqlHost: cmd.Flag("mysql-host", "MySQL host.").Required().String(), + mysqlPort: cmd.Flag("mysql-port", "MySQL port.").Required().Int(), + mysqlDBName: cmd.Flag("mysql-db-name", "MySQL database name.").Required().String(), + bigtableInstance: cmd.Flag("bigtable-instance", "Instance name to use Bigtable.").Required().String(), + accountService: cmd.Flag( + "account-service", + "bucketeer-account-service address.", + ).Default("account:9090").String(), + experimentService: cmd.Flag( + "experiment-service", + "bucketeer-experiment-service address.", + ).Default("experiment:9090").String(), + redisServerName: cmd.Flag("redis-server-name", "Name of the redis.").Required().String(), + redisAddr: cmd.Flag("redis-addr", "Address of the redis.").Required().String(), + redisPoolMaxIdle: cmd.Flag( + "redis-pool-max-idle", + "Maximum number of idle connections in the pool.", + ).Default("5").Int(), + redisPoolMaxActive: cmd.Flag( + "redis-pool-max-active", + "Maximum number of connections allocated by the pool at a given time.", + ).Default("10").Int(), + bulkSegmentUsersReceivedEventTopic: cmd.Flag( + "bulk-segment-users-received-event-topic", + "PubSub topic to publish bulk segment users received events.", + ).Required().String(), + domainEventTopic: cmd.Flag("domain-event-topic", "PubSub topic to publish domain events.").Required().String(), + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + keyPath: cmd.Flag("key", "Path to TLS key.").Required().String(), + serviceTokenPath: cmd.Flag("service-token", "Path to service token.").Required().String(), + oauthKeyPath: cmd.Flag("oauth-key", "Path to public key used to verify oauth token.").Required().String(), + oauthClientID: cmd.Flag("oauth-client-id", "The oauth clientID registered at dex.").Required().String(), + oauthIssuer: cmd.Flag("oauth-issuer", "The url of dex issuer.").Required().String(), + } + r.RegisterCommand(server) + return server +} + +func (s *server) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + registerer := metrics.DefaultRegisterer() + + mysqlClient, err := s.createMySQLClient(ctx, registerer, logger) + if err != nil { + return err + } + defer mysqlClient.Close() + + btClient, err := s.createBigtableClient(ctx, registerer, logger) + if err != nil { + return err + } + defer btClient.Close() + + creds, err := client.NewPerRPCCredentials(*s.serviceTokenPath) + if err != nil { + return err + } + + accountClient, err := accountclient.NewClient(*s.accountService, *s.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer accountClient.Close() + + experimentClient, err := experimentclient.NewClient(*s.experimentService, *s.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer experimentClient.Close() + + redisV3Client, err := redisv3.NewClient( + *s.redisAddr, + redisv3.WithPoolSize(*s.redisPoolMaxActive), + redisv3.WithMinIdleConns(*s.redisPoolMaxIdle), + redisv3.WithServerName(*s.redisServerName), + redisv3.WithMetrics(registerer), + redisv3.WithLogger(logger), + ) + if err != nil { + return err + } + defer redisV3Client.Close() + redisV3Cache := cachev3.NewRedisCache(redisV3Client) + + pubsubClient, err := s.createPubsubClient(ctx, registerer, logger) + if err != nil { + return err + } + segmentUsersPublisher, err := pubsubClient.CreatePublisher(*s.bulkSegmentUsersReceivedEventTopic) + if err != nil { + return err + } + defer segmentUsersPublisher.Stop() + domainPublisher, err := pubsubClient.CreatePublisher(*s.domainEventTopic) + if err != nil { + return err + } + defer domainPublisher.Stop() + + service := api.NewFeatureService( + mysqlClient, + btClient, + accountClient, + experimentClient, + redisV3Cache, + segmentUsersPublisher, + domainPublisher, + api.WithLogger(logger), + ) + + verifier, err := token.NewVerifier(*s.oauthKeyPath, *s.oauthIssuer, *s.oauthClientID) + if err != nil { + return err + } + + healthChecker := health.NewGrpcChecker( + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + ) + go healthChecker.Run(ctx) + + server := rpc.NewServer(service, *s.certPath, *s.keyPath, + rpc.WithPort(*s.port), + rpc.WithVerifier(verifier), + rpc.WithMetrics(registerer), + rpc.WithLogger(logger), + rpc.WithService(healthChecker), + rpc.WithHandler("/health", healthChecker), + ) + defer server.Stop(10 * time.Second) + go server.Run() + + <-ctx.Done() + return nil +} + +func (s *server) createMySQLClient( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (mysql.Client, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + return mysql.NewClient( + ctx, + *s.mysqlUser, *s.mysqlPass, *s.mysqlHost, + *s.mysqlPort, + *s.mysqlDBName, + mysql.WithLogger(logger), + mysql.WithMetrics(registerer), + ) +} + +func (s *server) createPubsubClient( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (*pubsub.Client, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + client, err := pubsub.NewClient( + ctx, + *s.project, + pubsub.WithMetrics(registerer), + pubsub.WithLogger(logger), + ) + if err != nil { + return nil, err + } + return client, nil +} + +func (s *server) createBigtableClient( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (bigtable.Client, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + return bigtable.NewBigtableClient(ctx, *s.project, *s.bigtableInstance, + bigtable.WithMetrics(registerer), + bigtable.WithLogger(logger), + ) +} diff --git a/pkg/feature/command/BUILD.bazel b/pkg/feature/command/BUILD.bazel new file mode 100644 index 000000000..bdd9db290 --- /dev/null +++ b/pkg/feature/command/BUILD.bazel @@ -0,0 +1,43 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "command.go", + "detail.go", + "eventfactory.go", + "feature.go", + "segment.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/feature/command", + visibility = ["//visibility:public"], + deps = [ + "//pkg/domainevent/domain:go_default_library", + "//pkg/feature/domain:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/uuid:go_default_library", + "//proto/event/domain:go_default_library", + "//proto/feature:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "feature_test.go", + "segment_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/feature/domain:go_default_library", + "//pkg/pubsub/publisher/mock:go_default_library", + "//pkg/uuid:go_default_library", + "//proto/account:go_default_library", + "//proto/event/domain:go_default_library", + "//proto/feature:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + ], +) diff --git a/pkg/feature/command/command.go b/pkg/feature/command/command.go new file mode 100644 index 000000000..5537c9fa6 --- /dev/null +++ b/pkg/feature/command/command.go @@ -0,0 +1,44 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + "errors" + + "github.com/golang/protobuf/ptypes" + + proto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +var ( + errBadCommand = errors.New("command: cannot handle command") +) + +type Command interface{} + +type Handler interface { + Handle(ctx context.Context, cmd Command) error +} + +func UnmarshalCommand(cmd *proto.Command) (Command, error) { + var x ptypes.DynamicAny + if err := ptypes.UnmarshalAny(cmd.Command, &x); err != nil { + return nil, err + } + return x.Message, nil +} + +// TODO: write test to unmarshal any and get correct type diff --git a/pkg/feature/command/detail.go b/pkg/feature/command/detail.go new file mode 100644 index 000000000..0967da254 --- /dev/null +++ b/pkg/feature/command/detail.go @@ -0,0 +1,89 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + proto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +func (h *FeatureCommandHandler) RenameFeature(ctx context.Context, cmd *proto.RenameFeatureCommand) error { + err := h.feature.Rename(cmd.Name) + if err != nil { + return err + } + event, err := h.eventFactory.CreateEvent(eventproto.Event_FEATURE_RENAMED, &eventproto.FeatureRenamedEvent{ + Id: h.feature.Id, + Name: cmd.Name, + }) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) ChangeDescription(ctx context.Context, cmd *proto.ChangeDescriptionCommand) error { + err := h.feature.ChangeDescription(cmd.Description) + if err != nil { + return err + } + event, err := h.eventFactory.CreateEvent( + eventproto.Event_FEATURE_DESCRIPTION_CHANGED, + &eventproto.FeatureDescriptionChangedEvent{ + Id: h.feature.Id, + Description: cmd.Description, + }, + ) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) AddTag(ctx context.Context, cmd *proto.AddTagCommand) error { + err := h.feature.AddTag(cmd.Tag) + if err != nil { + return err + } + event, err := h.eventFactory.CreateEvent(eventproto.Event_FEATURE_TAG_ADDED, &eventproto.FeatureTagAddedEvent{ + Id: h.feature.Id, + Tag: cmd.Tag, + }) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) RemoveTag(ctx context.Context, cmd *proto.RemoveTagCommand) error { + err := h.feature.RemoveTag(cmd.Tag) + if err != nil { + return err + } + event, err := h.eventFactory.CreateEvent(eventproto.Event_FEATURE_TAG_REMOVED, &eventproto.FeatureTagRemovedEvent{ + Id: h.feature.Id, + Tag: cmd.Tag, + }) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} diff --git a/pkg/feature/command/eventfactory.go b/pkg/feature/command/eventfactory.go new file mode 100644 index 000000000..4c11c29e0 --- /dev/null +++ b/pkg/feature/command/eventfactory.go @@ -0,0 +1,47 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "github.com/golang/protobuf/proto" // nolint:staticcheck + + domainevent "github.com/bucketeer-io/bucketeer/pkg/domainevent/domain" + "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + domainproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +type FeatureEventFactory struct { + editor *eventproto.Editor + feature *domain.Feature + environmentNamespace string + comment string +} + +func (s *FeatureEventFactory) CreateEvent( + eventType eventproto.Event_Type, + event proto.Message, +) (*domainproto.Event, error) { + return domainevent.NewEvent( + s.editor, + eventproto.Event_FEATURE, + s.feature.Id, + eventType, + event, + s.environmentNamespace, + domainevent.WithComment(s.comment), + domainevent.WithNewVersion(s.feature.Version), + ) +} diff --git a/pkg/feature/command/feature.go b/pkg/feature/command/feature.go new file mode 100644 index 000000000..32cd6d8bb --- /dev/null +++ b/pkg/feature/command/feature.go @@ -0,0 +1,758 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + "strings" + + "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + proto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +type FeatureCommandHandler struct { + feature *domain.Feature + eventFactory *FeatureEventFactory + Events []*eventproto.Event +} + +func NewFeatureCommandHandler( + editor *eventproto.Editor, + feature *domain.Feature, + environmentNamespace string, + comment string, +) *FeatureCommandHandler { + return &FeatureCommandHandler{ + feature: feature, + eventFactory: &FeatureEventFactory{ + editor: editor, + feature: feature, + environmentNamespace: environmentNamespace, + comment: comment, + }, + Events: []*eventproto.Event{}, + } +} + +// for unit test +func NewEmptyFeatureCommandHandler() *FeatureCommandHandler { + return &FeatureCommandHandler{} +} + +func (h *FeatureCommandHandler) Handle(ctx context.Context, cmd Command) error { + switch c := cmd.(type) { + case *proto.CreateFeatureCommand: + return h.CreateFeature(ctx, c) + case *proto.EnableFeatureCommand: + return h.EnableFeature(ctx, c) + case *proto.DisableFeatureCommand: + return h.DisableFeature(ctx, c) + case *proto.ArchiveFeatureCommand: + return h.ArchiveFeature(ctx, c) + case *proto.UnarchiveFeatureCommand: + return h.UnarchiveFeature(ctx, c) + case *proto.DeleteFeatureCommand: + return h.DeleteFeature(ctx, c) + case *proto.AddUserToVariationCommand: + return h.AddUserToVariation(ctx, c) + case *proto.RemoveUserFromVariationCommand: + return h.RemoveUserFromVariation(ctx, c) + case *proto.AddRuleCommand: + return h.AddRule(ctx, c) + case *proto.ChangeRuleStrategyCommand: + return h.ChangeRuleStrategy(ctx, c) + case *proto.DeleteRuleCommand: + return h.DeleteRule(ctx, c) + case *proto.AddClauseCommand: + return h.AddClause(ctx, c) + case *proto.DeleteClauseCommand: + return h.DeleteClause(ctx, c) + case *proto.ChangeClauseAttributeCommand: + return h.ChangeClauseAttribute(ctx, c) + case *proto.ChangeClauseOperatorCommand: + return h.ChangeClauseOperator(ctx, c) + case *proto.AddClauseValueCommand: + return h.AddClauseValue(ctx, c) + case *proto.RemoveClauseValueCommand: + return h.RemoveClauseValue(ctx, c) + case *proto.ChangeDefaultStrategyCommand: + return h.ChangeDefaultStrategy(ctx, c) + case *proto.ChangeOffVariationCommand: + return h.ChangeOffVariation(ctx, c) + case *proto.ChangeFixedStrategyCommand: + return h.ChangeFixedStrategy(ctx, c) + case *proto.ChangeRolloutStrategyCommand: + return h.ChangeRolloutStrategy(ctx, c) + case *proto.AddVariationCommand: + return h.AddVariation(ctx, c) + case *proto.RemoveVariationCommand: + return h.RemoveVariation(ctx, c) + case *proto.ChangeVariationValueCommand: + return h.ChangeVariationValue(ctx, c) + case *proto.ChangeVariationNameCommand: + return h.ChangeVariationName(ctx, c) + case *proto.ChangeVariationDescriptionCommand: + return h.ChangeVariationDescription(ctx, c) + case *proto.RenameFeatureCommand: + return h.RenameFeature(ctx, c) + case *proto.ChangeDescriptionCommand: + return h.ChangeDescription(ctx, c) + case *proto.AddTagCommand: + return h.AddTag(ctx, c) + case *proto.RemoveTagCommand: + return h.RemoveTag(ctx, c) + case *proto.IncrementFeatureVersionCommand: + return h.IncrementFeatureVersion(ctx, c) + case *proto.CloneFeatureCommand: + return h.CloneFeature(ctx, c) + case *proto.ResetSamplingSeedCommand: + return h.ResetSamplingSeed(ctx, c) + case *proto.AddPrerequisiteCommand: + return h.AddPrerequisite(ctx, c) + case *proto.ChangePrerequisiteVariationCommand: + return h.ChangePrerequisiteVariation(ctx, c) + case *proto.RemovePrerequisiteCommand: + return h.RemovePrerequisite(ctx, c) + default: + return errBadCommand + } +} + +func (h *FeatureCommandHandler) CreateFeature(ctx context.Context, cmd *proto.CreateFeatureCommand) error { + event, err := h.eventFactory.CreateEvent(eventproto.Event_FEATURE_CREATED, &eventproto.FeatureCreatedEvent{ + Id: h.feature.Id, + Name: h.feature.Name, + Description: h.feature.Description, + User: "default", + Variations: h.feature.Variations, + DefaultOnVariationIndex: cmd.DefaultOnVariationIndex, + DefaultOffVariationIndex: cmd.DefaultOffVariationIndex, + VariationType: cmd.VariationType, + }) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) EnableFeature(ctx context.Context, cmd *proto.EnableFeatureCommand) error { + if err := h.feature.Enable(); err != nil { + return err + } + event, err := h.eventFactory.CreateEvent(eventproto.Event_FEATURE_ENABLED, &eventproto.FeatureEnabledEvent{ + Id: h.feature.Id, + }) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) DisableFeature(ctx context.Context, cmd *proto.DisableFeatureCommand) error { + if err := h.feature.Disable(); err != nil { + return err + } + event, err := h.eventFactory.CreateEvent(eventproto.Event_FEATURE_DISABLED, &eventproto.FeatureDisabledEvent{ + Id: h.feature.Id, + }) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) ArchiveFeature(ctx context.Context, cmd *proto.ArchiveFeatureCommand) error { + if err := h.feature.Archive(); err != nil { + return err + } + event, err := h.eventFactory.CreateEvent(eventproto.Event_FEATURE_ARCHIVED, &eventproto.FeatureArchivedEvent{ + Id: h.feature.Id, + }) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) UnarchiveFeature(ctx context.Context, cmd *proto.UnarchiveFeatureCommand) error { + if err := h.feature.Unarchive(); err != nil { + return err + } + event, err := h.eventFactory.CreateEvent(eventproto.Event_FEATURE_UNARCHIVED, &eventproto.FeatureUnarchivedEvent{ + Id: h.feature.Id, + }) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) DeleteFeature(ctx context.Context, cmd *proto.DeleteFeatureCommand) error { + if err := h.feature.Delete(); err != nil { + return err + } + event, err := h.eventFactory.CreateEvent(eventproto.Event_FEATURE_DELETED, &eventproto.FeatureDeletedEvent{ + Id: h.feature.Id, + }) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) IncrementFeatureVersion( + ctx context.Context, + cmd *proto.IncrementFeatureVersionCommand, +) error { + err := h.feature.IncrementVersion() + if err != nil { + return err + } + event, err := h.eventFactory.CreateEvent( + eventproto.Event_FEATURE_VERSION_INCREMENTED, + &eventproto.FeatureVersionIncrementedEvent{ + Id: h.feature.Id, + Version: h.feature.Version, + }, + ) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) CloneFeature(ctx context.Context, cmd *proto.CloneFeatureCommand) error { + event, err := h.eventFactory.CreateEvent(eventproto.Event_FEATURE_CLONED, &eventproto.FeatureClonedEvent{ + Id: h.feature.Id, + Name: h.feature.Name, + Description: h.feature.Description, + Variations: h.feature.Variations, + Targets: h.feature.Targets, + Rules: h.feature.Rules, + DefaultStrategy: h.feature.DefaultStrategy, + OffVariation: h.feature.OffVariation, + Tags: h.feature.Tags, + Maintainer: h.feature.Maintainer, + VariationType: h.feature.VariationType, + }) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) ResetSamplingSeed(ctx context.Context, cmd *proto.ResetSamplingSeedCommand) error { + if err := h.feature.ResetSamplingSeed(); err != nil { + return err + } + event, err := h.eventFactory.CreateEvent( + eventproto.Event_SAMPLING_SEED_RESET, + &eventproto.FeatureSamplingSeedResetEvent{ + SamplingSeed: h.feature.SamplingSeed, + }, + ) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) AddPrerequisite(ctx context.Context, cmd *proto.AddPrerequisiteCommand) error { + if err := h.feature.AddPrerequisite(cmd.Prerequisite.FeatureId, cmd.Prerequisite.VariationId); err != nil { + return err + } + event, err := h.eventFactory.CreateEvent( + eventproto.Event_PREREQUISITE_ADDED, + &eventproto.PrerequisiteAddedEvent{ + Prerequisite: cmd.Prerequisite, + }, + ) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) ChangePrerequisiteVariation( + ctx context.Context, + cmd *proto.ChangePrerequisiteVariationCommand, +) error { + if err := h.feature.ChangePrerequisiteVariation(cmd.Prerequisite.FeatureId, cmd.Prerequisite.VariationId); err != nil { + return err + } + event, err := h.eventFactory.CreateEvent( + eventproto.Event_PREREQUISITE_VARIATION_CHANGED, + &eventproto.PrerequisiteVariationChangedEvent{ + Prerequisite: cmd.Prerequisite, + }, + ) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) RemovePrerequisite(ctx context.Context, cmd *proto.RemovePrerequisiteCommand) error { + if err := h.feature.RemovePrerequisite(cmd.FeatureId); err != nil { + return err + } + event, err := h.eventFactory.CreateEvent( + eventproto.Event_PREREQUISITE_REMOVED, + &eventproto.PrerequisiteRemovedEvent{ + FeatureId: cmd.FeatureId, + }, + ) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) AddUserToVariation(ctx context.Context, cmd *proto.AddUserToVariationCommand) error { + userID := strings.TrimSpace(cmd.User) + err := h.feature.AddUserToVariation(cmd.Id, userID) + if err != nil { + return err + } + event, err := h.eventFactory.CreateEvent(eventproto.Event_VARIATION_USER_ADDED, &eventproto.VariationUserAddedEvent{ + FeatureId: h.feature.Id, + Id: cmd.Id, + User: userID, + }) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) RemoveUserFromVariation( + ctx context.Context, + cmd *proto.RemoveUserFromVariationCommand, +) error { + userID := strings.TrimSpace(cmd.User) + err := h.feature.RemoveUserFromVariation(cmd.Id, userID) + if err != nil { + return err + } + event, err := h.eventFactory.CreateEvent( + eventproto.Event_VARIATION_USER_REMOVED, + &eventproto.VariationUserRemovedEvent{ + FeatureId: h.feature.Id, + Id: cmd.Id, + User: userID, + }, + ) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) AddRule(ctx context.Context, cmd *proto.AddRuleCommand) error { + for _, clause := range cmd.Rule.Clauses { + id, err := uuid.NewUUID() + if err != nil { + return err + } + clause.Id = id.String() + } + err := h.feature.AddRule(cmd.Rule) + if err != nil { + return err + } + event, err := h.eventFactory.CreateEvent(eventproto.Event_FEATURE_RULE_ADDED, &eventproto.FeatureRuleAddedEvent{ + Id: h.feature.Id, + Rule: cmd.Rule, + }) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) ChangeRuleStrategy(ctx context.Context, cmd *proto.ChangeRuleStrategyCommand) error { + if err := h.feature.ChangeRuleStrategy(cmd.RuleId, cmd.Strategy); err != nil { + return err + } + event, err := h.eventFactory.CreateEvent( + eventproto.Event_FEATURE_RULE_STRATEGY_CHANGED, + &eventproto.FeatureChangeRuleStrategyEvent{ + FeatureId: h.feature.Id, + RuleId: cmd.RuleId, + Strategy: cmd.Strategy, + }, + ) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) DeleteRule(ctx context.Context, cmd *proto.DeleteRuleCommand) error { + err := h.feature.DeleteRule(cmd.Id) + if err != nil { + return err + } + event, err := h.eventFactory.CreateEvent(eventproto.Event_FEATURE_RULE_DELETED, &eventproto.FeatureRuleDeletedEvent{ + Id: h.feature.Id, + RuleId: cmd.Id, + }) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) AddClause(ctx context.Context, cmd *proto.AddClauseCommand) error { + id, err := uuid.NewUUID() + if err != nil { + return err + } + cmd.Clause.Id = id.String() + err = h.feature.AddClause(cmd.RuleId, cmd.Clause) + if err != nil { + return err + } + event, err := h.eventFactory.CreateEvent(eventproto.Event_RULE_CLAUSE_ADDED, &eventproto.RuleClauseAddedEvent{ + FeatureId: h.feature.Id, + RuleId: cmd.RuleId, + Clause: cmd.Clause, + }) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) DeleteClause(ctx context.Context, cmd *proto.DeleteClauseCommand) error { + err := h.feature.DeleteClause(cmd.RuleId, cmd.Id) + if err != nil { + return err + } + event, err := h.eventFactory.CreateEvent(eventproto.Event_RULE_CLAUSE_DELETED, &eventproto.RuleClauseDeletedEvent{ + FeatureId: h.feature.Id, + RuleId: cmd.RuleId, + Id: cmd.Id, + }) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) ChangeClauseAttribute( + ctx context.Context, + cmd *proto.ChangeClauseAttributeCommand, +) error { + err := h.feature.ChangeClauseAttribute(cmd.RuleId, cmd.Id, cmd.Attribute) + if err != nil { + return err + } + event, err := h.eventFactory.CreateEvent( + eventproto.Event_CLAUSE_ATTRIBUTE_CHANGED, + &eventproto.ClauseAttributeChangedEvent{ + FeatureId: h.feature.Id, + RuleId: cmd.RuleId, + Id: cmd.Id, + Attribute: cmd.Attribute, + }, + ) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) ChangeClauseOperator( + ctx context.Context, + cmd *proto.ChangeClauseOperatorCommand, +) error { + err := h.feature.ChangeClauseOperator(cmd.RuleId, cmd.Id, cmd.Operator) + if err != nil { + return err + } + event, err := h.eventFactory.CreateEvent( + eventproto.Event_CLAUSE_OPERATOR_CHANGED, + &eventproto.ClauseOperatorChangedEvent{ + FeatureId: h.feature.Id, + RuleId: cmd.RuleId, + Id: cmd.Id, + Operator: cmd.Operator, + }, + ) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) AddClauseValue(ctx context.Context, cmd *proto.AddClauseValueCommand) error { + err := h.feature.AddClauseValue(cmd.RuleId, cmd.Id, cmd.Value) + if err != nil { + return err + } + event, err := h.eventFactory.CreateEvent(eventproto.Event_CLAUSE_VALUE_ADDED, &eventproto.ClauseValueAddedEvent{ + FeatureId: h.feature.Id, + RuleId: cmd.RuleId, + Id: cmd.Id, + Value: cmd.Value, + }) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) RemoveClauseValue(ctx context.Context, cmd *proto.RemoveClauseValueCommand) error { + err := h.feature.RemoveClauseValue(cmd.RuleId, cmd.Id, cmd.Value) + if err != nil { + return err + } + event, err := h.eventFactory.CreateEvent(eventproto.Event_CLAUSE_VALUE_REMOVED, &eventproto.ClauseValueRemovedEvent{ + FeatureId: h.feature.Id, + RuleId: cmd.RuleId, + Id: cmd.Id, + Value: cmd.Value, + }) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) ChangeDefaultStrategy( + ctx context.Context, + cmd *proto.ChangeDefaultStrategyCommand, +) error { + err := h.feature.ChangeDefaultStrategy(cmd.Strategy) + if err != nil { + return err + } + event, err := h.eventFactory.CreateEvent( + eventproto.Event_FEATURE_DEFAULT_STRATEGY_CHANGED, + &eventproto.FeatureDefaultStrategyChangedEvent{ + Id: h.feature.Id, + Strategy: cmd.Strategy, + }, + ) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) ChangeOffVariation(ctx context.Context, cmd *proto.ChangeOffVariationCommand) error { + err := h.feature.ChangeOffVariation(cmd.Id) + if err != nil { + return err + } + event, err := h.eventFactory.CreateEvent( + eventproto.Event_FEATURE_OFF_VARIATION_CHANGED, + &eventproto.FeatureOffVariationChangedEvent{ + Id: h.feature.Id, + OffVariation: cmd.Id, + }, + ) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) ChangeFixedStrategy( + ctx context.Context, + cmd *proto.ChangeFixedStrategyCommand, +) error { + if err := h.feature.ChangeFixedStrategy(cmd.RuleId, cmd.Strategy); err != nil { + return err + } + event, err := h.eventFactory.CreateEvent( + eventproto.Event_RULE_FIXED_STRATEGY_CHANGED, + &eventproto.FeatureFixedStrategyChangedEvent{ + FeatureId: h.feature.Id, + RuleId: cmd.RuleId, + Strategy: cmd.Strategy, + }, + ) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) ChangeRolloutStrategy( + ctx context.Context, + cmd *proto.ChangeRolloutStrategyCommand, +) error { + if err := h.feature.ChangeRolloutStrategy(cmd.RuleId, cmd.Strategy); err != nil { + return err + } + event, err := h.eventFactory.CreateEvent( + eventproto.Event_RULE_ROLLOUT_STRATEGY_CHANGED, + &eventproto.FeatureRolloutStrategyChangedEvent{ + FeatureId: h.feature.Id, + RuleId: cmd.RuleId, + Strategy: cmd.Strategy, + }, + ) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) AddVariation(ctx context.Context, cmd *proto.AddVariationCommand) error { + id, err := uuid.NewUUID() + if err != nil { + return err + } + if err = h.feature.AddVariation(id.String(), cmd.Value, cmd.Name, cmd.Description); err != nil { + return err + } + event, err := h.eventFactory.CreateEvent( + eventproto.Event_FEATURE_VARIATION_ADDED, + &eventproto.FeatureVariationAddedEvent{ + Id: h.feature.Id, + Variation: &proto.Variation{ + Id: id.String(), + Value: cmd.Value, + Name: cmd.Name, + Description: cmd.Description, + }, + }, + ) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) RemoveVariation(ctx context.Context, cmd *proto.RemoveVariationCommand) error { + err := h.feature.RemoveVariation(cmd.Id) + if err != nil { + return err + } + event, err := h.eventFactory.CreateEvent( + eventproto.Event_FEATURE_VARIATION_REMOVED, + &eventproto.FeatureVariationRemovedEvent{ + Id: h.feature.Id, + VariationId: cmd.Id, + }, + ) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) ChangeVariationValue( + ctx context.Context, + cmd *proto.ChangeVariationValueCommand, +) error { + err := h.feature.ChangeVariationValue(cmd.Id, cmd.Value) + if err != nil { + return err + } + event, err := h.eventFactory.CreateEvent( + eventproto.Event_VARIATION_VALUE_CHANGED, + &eventproto.VariationValueChangedEvent{ + FeatureId: h.feature.Id, + Id: cmd.Id, + Value: cmd.Value, + }, + ) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) ChangeVariationName( + ctx context.Context, + cmd *proto.ChangeVariationNameCommand, +) error { + err := h.feature.ChangeVariationName(cmd.Id, cmd.Name) + if err != nil { + return err + } + event, err := h.eventFactory.CreateEvent( + eventproto.Event_VARIATION_NAME_CHANGED, + &eventproto.VariationNameChangedEvent{ + FeatureId: h.feature.Id, + Id: cmd.Id, + Name: cmd.Name, + }, + ) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} + +func (h *FeatureCommandHandler) ChangeVariationDescription( + ctx context.Context, + cmd *proto.ChangeVariationDescriptionCommand, +) error { + err := h.feature.ChangeVariationDescription(cmd.Id, cmd.Description) + if err != nil { + return err + } + event, err := h.eventFactory.CreateEvent( + eventproto.Event_VARIATION_DESCRIPTION_CHANGED, + &eventproto.VariationDescriptionChangedEvent{ + FeatureId: h.feature.Id, + Id: cmd.Id, + Description: cmd.Description, + }, + ) + if err != nil { + return err + } + h.Events = append(h.Events, event) + return nil +} diff --git a/pkg/feature/command/feature_test.go b/pkg/feature/command/feature_test.go new file mode 100644 index 000000000..0b89fbaf4 --- /dev/null +++ b/pkg/feature/command/feature_test.go @@ -0,0 +1,587 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + "fmt" + "reflect" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + proto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +func TestAddFixedStrategyRule(t *testing.T) { + f := makeFeature("feature-id") + id, _ := uuid.NewUUID() + rID := id.String() + vID := f.Variations[0].Id + expected := &proto.Rule{ + Id: rID, + Strategy: &proto.Strategy{ + Type: proto.Strategy_FIXED, + FixedStrategy: &proto.FixedStrategy{Variation: vID}, + }, + } + patterns := []*struct { + rule *proto.Rule + expected error + }{ + { + rule: expected, + expected: nil, + }, + } + targetingCmd := &FeatureCommandHandler{ + feature: f, + eventFactory: makeEventFactory(f), + } + for i, p := range patterns { + cmd := &proto.AddRuleCommand{Rule: p.rule} + err := targetingCmd.Handle(context.Background(), cmd) + des := fmt.Sprintf("index: %d", i) + assert.Equal(t, p.expected, err, des) + } + if !reflect.DeepEqual(expected, f.Rules[1]) { + t.Fatalf("Rule is not equal. Expected: %v, actual: %v", expected, f.Rules[1]) + } +} + +func TestAddRolloutStrategyRule(t *testing.T) { + f := makeFeature("feature-id") + id, _ := uuid.NewUUID() + rID := id.String() + vID1 := f.Variations[0].Id + vID2 := f.Variations[1].Id + expected := &proto.Rule{ + Id: rID, + Strategy: &proto.Strategy{ + Type: proto.Strategy_ROLLOUT, + RolloutStrategy: &proto.RolloutStrategy{ + Variations: []*proto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: 30000, + }, + { + Variation: vID2, + Weight: 70000, + }, + }, + }, + }, + } + patterns := []*struct { + rule *proto.Rule + expected error + }{ + { + rule: expected, + expected: nil, + }, + } + targetingCmd := &FeatureCommandHandler{ + feature: f, + eventFactory: makeEventFactory(f), + } + for i, p := range patterns { + cmd := &proto.AddRuleCommand{Rule: p.rule} + err := targetingCmd.Handle(context.Background(), cmd) + des := fmt.Sprintf("index: %d", i) + assert.Equal(t, p.expected, err, des) + } + if !reflect.DeepEqual(expected, f.Rules[1]) { + t.Fatalf("Rule is not equal. Expected: %v, actual: %v", expected, f.Rules[1]) + } +} + +func TestChangeRuleToFixedStrategy(t *testing.T) { + f := makeFeature("feature-id") + r := f.Rules[0] + rID := r.Id + vID := f.Variations[0].Id + expected := &proto.Strategy{ + Type: proto.Strategy_FIXED, + FixedStrategy: &proto.FixedStrategy{Variation: vID}, + } + patterns := []*struct { + ruleID string + strategy *proto.Strategy + expected error + }{ + { + ruleID: rID, + strategy: expected, + expected: nil, + }, + } + targetingCmd := &FeatureCommandHandler{ + feature: f, + eventFactory: makeEventFactory(f), + } + for _, p := range patterns { + cmd := &proto.ChangeRuleStrategyCommand{ + RuleId: p.ruleID, + Strategy: p.strategy, + } + err := targetingCmd.Handle(context.Background(), cmd) + assert.Equal(t, p.expected, err) + } + if !reflect.DeepEqual(expected, r.Strategy) { + t.Fatalf("Strategy is not equal. Expected: %v, actual: %v", expected, r.Strategy) + } +} + +func TestChangeRuleToRolloutStrategy(t *testing.T) { + f := makeFeature("feature-id") + r := f.Rules[0] + rID := r.Id + vID1 := f.Variations[0].Id + vID2 := f.Variations[1].Id + expected := &proto.Strategy{ + Type: proto.Strategy_ROLLOUT, + RolloutStrategy: &proto.RolloutStrategy{ + Variations: []*proto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: 30000, + }, + { + Variation: vID2, + Weight: 70000, + }, + }, + }, + } + patterns := map[string]*struct { + ruleID string + strategy *proto.Strategy + expected error + }{ + "success": { + ruleID: rID, + strategy: expected, + expected: nil, + }, + } + targetingCmd := &FeatureCommandHandler{ + feature: f, + eventFactory: makeEventFactory(f), + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + cmd := &proto.ChangeRuleStrategyCommand{ + RuleId: p.ruleID, + Strategy: p.strategy, + } + err := targetingCmd.Handle(context.Background(), cmd) + assert.Equal(t, p.expected, err) + }) + } + if !reflect.DeepEqual(expected, r.Strategy) { + t.Fatalf("Strategy is not equal. Expected: %v, actual: %v", expected, r.Strategy) + } +} + +func TestChangeFixedStrategy(t *testing.T) { + f := makeFeature("feature-id") + r := f.Rules[0] + rID := r.Id + vID := f.Variations[0].Id + patterns := []*struct { + ruleID string + strategy *proto.FixedStrategy + expected error + }{ + { + ruleID: rID, + strategy: &proto.FixedStrategy{Variation: vID}, + expected: nil, + }, + } + targetingCmd := &FeatureCommandHandler{ + feature: f, + eventFactory: makeEventFactory(f), + } + for _, p := range patterns { + cmd := &proto.ChangeFixedStrategyCommand{ + RuleId: p.ruleID, + Strategy: p.strategy, + } + err := targetingCmd.Handle(context.Background(), cmd) + assert.Equal(t, p.expected, err) + } + if r.Strategy.FixedStrategy.Variation != vID { + t.Fatalf("Wrong variation id has been saved. Expected: %s, actual: %s", vID, r.Strategy.FixedStrategy.Variation) + } +} + +func TestChangeRolloutStrategy(t *testing.T) { + f := makeFeature("feature-id") + r := f.Rules[0] + rID := r.Id + vID1 := f.Variations[0].Id + vID2 := f.Variations[1].Id + expected := &proto.RolloutStrategy{Variations: []*proto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: 70000, + }, + { + Variation: vID2, + Weight: 30000, + }, + }} + patterns := []*struct { + ruleID string + strategy *proto.RolloutStrategy + expected error + }{ + { + ruleID: rID, + strategy: expected, + expected: nil, + }, + } + targetingCmd := &FeatureCommandHandler{ + feature: f, + eventFactory: makeEventFactory(f), + } + for _, p := range patterns { + cmd := &proto.ChangeRolloutStrategyCommand{ + RuleId: p.ruleID, + Strategy: p.strategy, + } + err := targetingCmd.Handle(context.Background(), cmd) + assert.Equal(t, p.expected, err) + } + if !reflect.DeepEqual(expected, r.Strategy.RolloutStrategy) { + t.Fatalf("Different rollout strategies. Expected: %v, actual: %v", expected, r.Strategy.RolloutStrategy) + } +} + +func TestChangeDefaultStrategy(t *testing.T) { + patterns := map[string]*struct { + strategy *proto.Strategy + expectedErr error + }{ + "success": { + strategy: &proto.Strategy{ + Type: proto.Strategy_ROLLOUT, + RolloutStrategy: &proto.RolloutStrategy{ + Variations: []*proto.RolloutStrategy_Variation{ + { + Variation: "variation-A", + Weight: 30000, + }, + { + Variation: "variation-B", + Weight: 70000, + }, + }, + }, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + f := makeFeature("feature-id") + targetingCmd := &FeatureCommandHandler{ + feature: f, + eventFactory: makeEventFactory(f), + } + cmd := &proto.ChangeDefaultStrategyCommand{ + Strategy: p.strategy, + } + err := targetingCmd.Handle(context.Background(), cmd) + assert.Equal(t, p.expectedErr, err) + if p.expectedErr != nil { + return + } + assert.Equal(t, p.strategy, f.DefaultStrategy) + }) + } +} + +func TestEnableFeature(t *testing.T) { + patterns := []struct { + desc string + cmd *proto.EnableFeatureCommand + expected error + }{ + { + desc: "success", + cmd: &proto.EnableFeatureCommand{}, + expected: nil, + }, + } + for _, p := range patterns { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + f := makeFeature("feature-id") + cmd := &FeatureCommandHandler{ + feature: f, + eventFactory: makeEventFactory(f), + } + err := cmd.Handle(ctx, p.cmd) + assert.Equal(t, p.expected, err, p.desc) + assert.True(t, f.Feature.Enabled, p.desc) + } +} + +func TestDisableFeature(t *testing.T) { + patterns := []struct { + desc string + cmd *proto.DisableFeatureCommand + expected error + }{ + { + desc: "success", + cmd: &proto.DisableFeatureCommand{}, + expected: nil, + }, + } + for _, p := range patterns { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + f := makeFeature("feature-id") + f.Feature.Enabled = true + cmd := &FeatureCommandHandler{ + feature: f, + eventFactory: makeEventFactory(f), + } + err := cmd.Handle(ctx, p.cmd) + assert.Equal(t, p.expected, err, p.desc) + assert.False(t, f.Feature.Enabled, p.desc) + } +} + +func TestResetSamplingSeed(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + patterns := map[string]struct { + cmd *proto.ResetSamplingSeedCommand + expected error + }{ + "success": { + cmd: &proto.ResetSamplingSeedCommand{}, + expected: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + f := makeFeature("fid") + assert.Empty(t, f.Feature.SamplingSeed) + cmd := &FeatureCommandHandler{ + feature: f, + eventFactory: makeEventFactory(f), + } + err := cmd.Handle(ctx, p.cmd) + assert.Equal(t, p.expected, err) + assert.NotEmpty(t, f.Feature.SamplingSeed) + }) + } +} + +func TestAddPrerequisite(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + patterns := map[string]struct { + cmd *proto.AddPrerequisiteCommand + expected error + }{ + "success": { + cmd: &proto.AddPrerequisiteCommand{ + Prerequisite: &proto.Prerequisite{ + FeatureId: "test-feature2", + VariationId: "variation D", + }, + }, + expected: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + f := makeFeature("fid") + assert.Empty(t, f.Feature.Prerequisites) + cmd := &FeatureCommandHandler{ + feature: f, + eventFactory: makeEventFactory(f), + } + err := cmd.Handle(ctx, p.cmd) + assert.Equal(t, p.expected, err) + assert.NotEmpty(t, f.Feature.Prerequisites) + }) + } +} + +func TestRemovePrerequisite(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + patterns := map[string]struct { + cmd *proto.RemovePrerequisiteCommand + prerequisite []*proto.Prerequisite + expected error + }{ + "success": { + cmd: &proto.RemovePrerequisiteCommand{ + FeatureId: "test-feature2", + }, + prerequisite: []*proto.Prerequisite{ + { + FeatureId: "test-feature2", + VariationId: "variation D", + }, + }, + expected: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + f := makeFeature("fid") + f.Prerequisites = p.prerequisite + assert.NotEmpty(t, f.Feature.Prerequisites) + cmd := &FeatureCommandHandler{ + feature: f, + eventFactory: makeEventFactory(f), + } + err := cmd.Handle(ctx, p.cmd) + assert.Equal(t, p.expected, err) + assert.Empty(t, f.Feature.Prerequisites) + }) + } +} + +func TestChangePrerequisiteVariation(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + patterns := map[string]struct { + cmd *proto.ChangePrerequisiteVariationCommand + prerequisite []*proto.Prerequisite + expectedErr error + expectedVariation string + }{ + "success": { + cmd: &proto.ChangePrerequisiteVariationCommand{ + Prerequisite: &proto.Prerequisite{ + FeatureId: "test-feature2", + VariationId: "variation A", + }, + }, + prerequisite: []*proto.Prerequisite{ + { + FeatureId: "test-feature2", + VariationId: "variation D", + }, + }, + expectedErr: nil, + expectedVariation: "variation A", + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + f := makeFeature("fid") + f.Prerequisites = p.prerequisite + assert.NotEmpty(t, f.Feature.Prerequisites) + cmd := &FeatureCommandHandler{ + feature: f, + eventFactory: makeEventFactory(f), + } + err := cmd.Handle(ctx, p.cmd) + assert.Equal(t, p.expectedErr, err) + assert.Equal(t, p.expectedVariation, f.Prerequisites[0].VariationId) + }) + } +} + +func makeFeature(id string) *domain.Feature { + return &domain.Feature{ + Feature: &proto.Feature{ + Id: id, + Name: "test feature", + Version: 1, + CreatedAt: time.Now().Unix(), + Variations: []*proto.Variation{ + { + Id: "variation-A", + Value: "A", + Name: "Variation A", + Description: "Thing does A", + }, + { + Id: "variation-B", + Value: "B", + Name: "Variation B", + Description: "Thing does B", + }, + }, + Targets: []*proto.Target{ + { + Variation: "variation-B", + Users: []string{ + "user1", + }, + }, + }, + Rules: []*proto.Rule{ + { + Id: "rule-1", + Strategy: &proto.Strategy{ + Type: proto.Strategy_FIXED, + FixedStrategy: &proto.FixedStrategy{ + Variation: "variation-A", + }, + }, + Clauses: []*proto.Clause{ + { + Id: "clause-1", + Attribute: "name", + Operator: proto.Clause_EQUALS, + Values: []string{ + "user1", + "user2", + }, + }, + }, + }, + }, + DefaultStrategy: &proto.Strategy{ + Type: proto.Strategy_FIXED, + FixedStrategy: &proto.FixedStrategy{ + Variation: "variation-B", + }, + }, + }, + } +} + +func makeEventFactory(feature *domain.Feature) *FeatureEventFactory { + return &FeatureEventFactory{ + editor: &eventproto.Editor{ + Email: "email", + Role: accountproto.Account_EDITOR, + }, + feature: feature, + environmentNamespace: "ns0", + } +} diff --git a/pkg/feature/command/segment.go b/pkg/feature/command/segment.go new file mode 100644 index 000000000..90b10fc4a --- /dev/null +++ b/pkg/feature/command/segment.go @@ -0,0 +1,319 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + + "github.com/golang/protobuf/proto" // nolint:staticcheck + + domainevent "github.com/bucketeer-io/bucketeer/pkg/domainevent/domain" + "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +type segmentCommandHandler struct { + editor *eventproto.Editor + segment *domain.Segment + publisher publisher.Publisher + environmentNamespace string +} + +func NewSegmentCommandHandler( + editor *eventproto.Editor, + segment *domain.Segment, + publisher publisher.Publisher, + environmentNamespace string, +) Handler { + return &segmentCommandHandler{ + editor: editor, + segment: segment, + publisher: publisher, + environmentNamespace: environmentNamespace, + } +} + +func (h *segmentCommandHandler) Handle(ctx context.Context, cmd Command) error { + switch c := cmd.(type) { + case *featureproto.CreateSegmentCommand: + return h.CreateSegment(ctx, c) + case *featureproto.DeleteSegmentCommand: + return h.DeleteSegment(ctx) + case *featureproto.ChangeSegmentNameCommand: + return h.ChangeName(ctx, c) + case *featureproto.ChangeSegmentDescriptionCommand: + return h.ChangeDescription(ctx, c) + case *featureproto.AddRuleCommand: + return h.AddRule(ctx, c) + case *featureproto.DeleteRuleCommand: + return h.DeleteRule(ctx, c) + case *featureproto.AddClauseCommand: + return h.AddClause(ctx, c) + case *featureproto.DeleteClauseCommand: + return h.DeleteClause(ctx, c) + case *featureproto.ChangeClauseAttributeCommand: + return h.ChangeClauseAttribute(ctx, c) + case *featureproto.ChangeClauseOperatorCommand: + return h.ChangeClauseOperator(ctx, c) + case *featureproto.AddClauseValueCommand: + return h.AddClauseValue(ctx, c) + case *featureproto.RemoveClauseValueCommand: + return h.RemoveClauseValue(ctx, c) + case *featureproto.AddSegmentUserCommand: + return h.AddSegmentUser(ctx, c) + case *featureproto.DeleteSegmentUserCommand: + return h.DeleteSegmentUser(ctx, c) + case *featureproto.BulkUploadSegmentUsersCommand: + return h.BulkUploadSegmentUsers(ctx, c) + case *featureproto.ChangeBulkUploadSegmentUsersStatusCommand: + return h.ChangeBulkUploadSegmentUsersStatus(ctx, c) + default: + return errBadCommand + } +} + +func (h *segmentCommandHandler) CreateSegment(ctx context.Context, cmd *featureproto.CreateSegmentCommand) error { + return h.send(ctx, eventproto.Event_SEGMENT_CREATED, &eventproto.SegmentCreatedEvent{ + Id: h.segment.Id, + Name: h.segment.Name, + Description: h.segment.Description, + }) +} + +func (h *segmentCommandHandler) DeleteSegment(ctx context.Context) error { + if err := h.segment.SetDeleted(); err != nil { + return err + } + return h.send(ctx, eventproto.Event_SEGMENT_DELETED, &eventproto.SegmentDeletedEvent{ + Id: h.segment.Id, + }) +} + +func (h *segmentCommandHandler) ChangeName(ctx context.Context, cmd *featureproto.ChangeSegmentNameCommand) error { + if err := h.segment.ChangeName(cmd.Name); err != nil { + return err + } + return h.send(ctx, eventproto.Event_SEGMENT_NAME_CHANGED, &eventproto.SegmentNameChangedEvent{ + Id: h.segment.Id, + Name: h.segment.Name, + }) +} + +func (h *segmentCommandHandler) ChangeDescription( + ctx context.Context, + cmd *featureproto.ChangeSegmentDescriptionCommand, +) error { + if err := h.segment.ChangeDescription(cmd.Description); err != nil { + return err + } + return h.send(ctx, eventproto.Event_SEGMENT_DESCRIPTION_CHANGED, &eventproto.SegmentDescriptionChangedEvent{ + Id: h.segment.Id, + Description: h.segment.Description, + }) +} + +func (h *segmentCommandHandler) AddRule(ctx context.Context, cmd *featureproto.AddRuleCommand) error { + for _, clause := range cmd.Rule.Clauses { + id, err := uuid.NewUUID() + if err != nil { + return err + } + clause.Id = id.String() + } + if err := h.segment.AddRule(cmd.Rule); err != nil { + return err + } + return h.send(ctx, eventproto.Event_SEGMENT_RULE_ADDED, &eventproto.SegmentRuleAddedEvent{ + Id: h.segment.Id, + Rule: cmd.Rule, + }) +} + +func (h *segmentCommandHandler) DeleteRule(ctx context.Context, cmd *featureproto.DeleteRuleCommand) error { + if err := h.segment.DeleteRule(cmd.Id); err != nil { + return err + } + return h.send(ctx, eventproto.Event_SEGMENT_RULE_DELETED, &eventproto.SegmentRuleDeletedEvent{ + Id: h.segment.Id, + RuleId: cmd.Id, + }) +} + +func (h *segmentCommandHandler) AddClause(ctx context.Context, cmd *featureproto.AddClauseCommand) error { + id, err := uuid.NewUUID() + if err != nil { + return err + } + cmd.Clause.Id = id.String() + if err := h.segment.AddClause(cmd.RuleId, cmd.Clause); err != nil { + return err + } + return h.send(ctx, eventproto.Event_SEGMENT_RULE_CLAUSE_ADDED, &eventproto.SegmentRuleClauseAddedEvent{ + SegmentId: h.segment.Id, + RuleId: cmd.RuleId, + Clause: cmd.Clause, + }) +} + +func (h *segmentCommandHandler) DeleteClause(ctx context.Context, cmd *featureproto.DeleteClauseCommand) error { + if err := h.segment.DeleteClause(cmd.RuleId, cmd.Id); err != nil { + return err + } + return h.send(ctx, eventproto.Event_SEGMENT_RULE_CLAUSE_DELETED, &eventproto.SegmentRuleClauseDeletedEvent{ + SegmentId: h.segment.Id, + RuleId: cmd.RuleId, + ClauseId: cmd.Id, + }) +} + +func (h *segmentCommandHandler) ChangeClauseAttribute( + ctx context.Context, + cmd *featureproto.ChangeClauseAttributeCommand, +) error { + if err := h.segment.ChangeClauseAttribute(cmd.RuleId, cmd.Id, cmd.Attribute); err != nil { + return err + } + return h.send(ctx, eventproto.Event_SEGMENT_CLAUSE_ATTRIBUTE_CHANGED, &eventproto.SegmentClauseAttributeChangedEvent{ + SegmentId: h.segment.Id, + RuleId: cmd.RuleId, + ClauseId: cmd.Id, + Attribute: cmd.Attribute, + }) +} + +func (h *segmentCommandHandler) ChangeClauseOperator( + ctx context.Context, + cmd *featureproto.ChangeClauseOperatorCommand, +) error { + if err := h.segment.ChangeClauseOperator(cmd.RuleId, cmd.Id, cmd.Operator); err != nil { + return err + } + return h.send(ctx, eventproto.Event_SEGMENT_CLAUSE_OPERATOR_CHANGED, &eventproto.SegmentClauseOperatorChangedEvent{ + SegmentId: h.segment.Id, + RuleId: cmd.RuleId, + ClauseId: cmd.Id, + Operator: cmd.Operator, + }) +} + +func (h *segmentCommandHandler) AddClauseValue(ctx context.Context, cmd *featureproto.AddClauseValueCommand) error { + if err := h.segment.AddClauseValue(cmd.RuleId, cmd.Id, cmd.Value); err != nil { + return err + } + return h.send(ctx, eventproto.Event_SEGMENT_CLAUSE_VALUE_ADDED, &eventproto.SegmentClauseValueAddedEvent{ + SegmentId: h.segment.Id, + RuleId: cmd.RuleId, + ClauseId: cmd.Id, + Value: cmd.Value, + }) +} + +func (h *segmentCommandHandler) RemoveClauseValue( + ctx context.Context, + cmd *featureproto.RemoveClauseValueCommand, +) error { + if err := h.segment.RemoveClauseValue(cmd.RuleId, cmd.Id, cmd.Value); err != nil { + return err + } + return h.send(ctx, eventproto.Event_SEGMENT_CLAUSE_VALUE_REMOVED, &eventproto.SegmentClauseValueRemovedEvent{ + SegmentId: h.segment.Id, + RuleId: cmd.RuleId, + ClauseId: cmd.Id, + Value: cmd.Value, + }) +} + +func (h *segmentCommandHandler) AddSegmentUser(ctx context.Context, cmd *featureproto.AddSegmentUserCommand) error { + count := int64(len(cmd.UserIds)) + switch cmd.State { + case featureproto.SegmentUser_INCLUDED: + h.segment.AddIncludedUserCount(count) + } + return h.send(ctx, eventproto.Event_SEGMENT_USER_ADDED, &eventproto.SegmentUserAddedEvent{ + SegmentId: h.segment.Id, + UserIds: cmd.UserIds, + State: cmd.State, + }) +} + +func (h *segmentCommandHandler) DeleteSegmentUser( + ctx context.Context, + cmd *featureproto.DeleteSegmentUserCommand, +) error { + count := int64(len(cmd.UserIds)) + switch cmd.State { + case featureproto.SegmentUser_INCLUDED: + h.segment.RemoveIncludedUserCount(count) + } + return h.send(ctx, eventproto.Event_SEGMENT_USER_DELETED, &eventproto.SegmentUserDeletedEvent{ + SegmentId: h.segment.Id, + UserIds: cmd.UserIds, + State: cmd.State, + }) +} + +func (h *segmentCommandHandler) BulkUploadSegmentUsers( + ctx context.Context, + cmd *featureproto.BulkUploadSegmentUsersCommand, +) error { + h.segment.SetStatus(featureproto.Segment_UPLOADING) + return h.send(ctx, eventproto.Event_SEGMENT_BULK_UPLOAD_USERS, &eventproto.SegmentBulkUploadUsersEvent{ + SegmentId: h.segment.Id, + Status: featureproto.Segment_UPLOADING, + State: cmd.State, + }) +} + +func (h *segmentCommandHandler) ChangeBulkUploadSegmentUsersStatus( + ctx context.Context, + cmd *featureproto.ChangeBulkUploadSegmentUsersStatusCommand, +) error { + h.segment.SetStatus(cmd.Status) + switch cmd.State { + case featureproto.SegmentUser_INCLUDED: + h.segment.SetIncludedUserCount(cmd.Count) + } + return h.send( + ctx, + eventproto.Event_SEGMENT_BULK_UPLOAD_USERS_STATUS_CHANGED, + &eventproto.SegmentBulkUploadUsersStatusChangedEvent{ + SegmentId: h.segment.Id, + Status: cmd.Status, + State: cmd.State, + Count: cmd.Count, + }, + ) +} + +func (h *segmentCommandHandler) send(ctx context.Context, eventType eventproto.Event_Type, event proto.Message) error { + e, err := domainevent.NewEvent( + h.editor, + eventproto.Event_SEGMENT, + h.segment.Id, + eventType, + event, + h.environmentNamespace, + ) + if err != nil { + return err + } + if err := h.publisher.Publish(ctx, e); err != nil { + return err + } + return nil +} diff --git a/pkg/feature/command/segment_test.go b/pkg/feature/command/segment_test.go new file mode 100644 index 000000000..5aa9324ad --- /dev/null +++ b/pkg/feature/command/segment_test.go @@ -0,0 +1,85 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + publishermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher/mock" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +func TestChangeBulkUploadSegmentUsersStatus(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + patterns := map[string]struct { + setup func(*segmentCommandHandler) + cmd *featureproto.ChangeBulkUploadSegmentUsersStatusCommand + expectedErr error + }{ + "succeeded included": { + setup: func(s *segmentCommandHandler) { + s.publisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil) + }, + cmd: &featureproto.ChangeBulkUploadSegmentUsersStatusCommand{ + Status: featureproto.Segment_SUCEEDED, + State: featureproto.SegmentUser_INCLUDED, + Count: 1, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + segment, err := domain.NewSegment("test-name", "test-description") + assert.NoError(t, err) + handler := newMockSegmentCommandHandler(t, mockController, segment) + p.setup(handler) + err = handler.Handle(ctx, p.cmd) + assert.Equal(t, p.expectedErr, err, msg) + assert.Equal(t, segment.Status, p.cmd.Status) + switch p.cmd.State { + case featureproto.SegmentUser_INCLUDED: + assert.Equal(t, segment.IncludedUserCount, p.cmd.Count) + default: + t.Fatal("unknown segment user state") + } + }) + } +} + +func newMockSegmentCommandHandler(t *testing.T, mockController *gomock.Controller, segment *domain.Segment) *segmentCommandHandler { + t.Helper() + return &segmentCommandHandler{ + &eventproto.Editor{ + Email: "email", + Role: accountproto.Account_OWNER, + }, + segment, + publishermock.NewMockPublisher(mockController), + "bucketeer-environment-space", + } +} diff --git a/pkg/feature/domain/BUILD.bazel b/pkg/feature/domain/BUILD.bazel new file mode 100644 index 000000000..ec26a7d20 --- /dev/null +++ b/pkg/feature/domain/BUILD.bazel @@ -0,0 +1,48 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "clause_evaluator.go", + "evaluation.go", + "feature.go", + "feature_last_used_info.go", + "rule_evaluator.go", + "segment.go", + "segment_evaluator.go", + "segment_user.go", + "strategy_evaluator.go", + "tag.go", + "user_evaluations.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/feature/domain", + visibility = ["//visibility:public"], + deps = [ + "//pkg/uuid:go_default_library", + "//proto/feature:go_default_library", + "//proto/user:go_default_library", + "@com_github_blang_semver//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "clause_evaluator_test.go", + "evaluation_test.go", + "feature_last_used_info_test.go", + "feature_test.go", + "rule_evaluator_test.go", + "segment_test.go", + "user_evaluations_test.go", + ], + embed = [":go_default_library"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/feature/domain", + deps = [ + "//proto/feature:go_default_library", + "//proto/user:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + ], +) diff --git a/pkg/feature/domain/clause_evaluator.go b/pkg/feature/domain/clause_evaluator.go new file mode 100644 index 000000000..2d05368f2 --- /dev/null +++ b/pkg/feature/domain/clause_evaluator.go @@ -0,0 +1,279 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "strconv" + "strings" + + "github.com/blang/semver" + + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +type clauseEvaluator struct { + segmentEvaluator +} + +func (c *clauseEvaluator) Evaluate( + targetValue string, + clause *featureproto.Clause, + userID string, + segmentUsers []*featureproto.SegmentUser, +) bool { + switch clause.Operator { + case featureproto.Clause_EQUALS: + // TODO: this should only be one value or equals makes no sense. + return c.equals(targetValue, clause.Values) + case featureproto.Clause_IN: + return c.in(targetValue, clause.Values) + case featureproto.Clause_STARTS_WITH: + return c.startsWith(targetValue, clause.Values) + case featureproto.Clause_ENDS_WITH: + return c.endsWith(targetValue, clause.Values) + case featureproto.Clause_SEGMENT: + return c.segmentEvaluator.Evaluate(clause.Values, userID, segmentUsers) + case featureproto.Clause_GREATER: + return c.greater(targetValue, clause.Values) + case featureproto.Clause_GREATER_OR_EQUAL: + return c.greaterOrEqual(targetValue, clause.Values) + case featureproto.Clause_LESS: + return c.less(targetValue, clause.Values) + case featureproto.Clause_LESS_OR_EQUAL: + return c.lessOrEqual(targetValue, clause.Values) + case featureproto.Clause_BEFORE: + return c.before(targetValue, clause.Values) + case featureproto.Clause_AFTER: + return c.after(targetValue, clause.Values) + } + return false +} + +func (c *clauseEvaluator) equals(targetValue string, values []string) bool { + for i := range values { + if targetValue == values[i] { + return true + } + } + return false +} + +func (c *clauseEvaluator) in(targetValue string, values []string) bool { + for i := range values { + if targetValue == values[i] { + return true + } + } + return false +} + +func (c *clauseEvaluator) startsWith(targetValue string, values []string) bool { + for i := range values { + if strings.HasPrefix(targetValue, values[i]) { + return true + } + } + return false +} + +func (c *clauseEvaluator) endsWith(targetValue string, values []string) bool { + for i := range values { + if strings.HasSuffix(targetValue, values[i]) { + return true + } + } + return false +} + +func (c *clauseEvaluator) greater(targetValue string, values []string) bool { + floatTarget, floatValues, err := parseFloat(targetValue, values) + if err == nil { + for _, value := range floatValues { + if floatTarget > value { + return true + } + } + return false + } + semverTarget, semverValues, err := parseSemver(targetValue, values) + if err == nil { + for _, value := range semverValues { + if semverTarget.GT(value) { + return true + } + } + return false + } + for _, value := range values { + if targetValue > value { + return true + } + } + return false +} + +func (c *clauseEvaluator) greaterOrEqual(targetValue string, values []string) bool { + floatTarget, floatValues, err := parseFloat(targetValue, values) + if err == nil { + for _, value := range floatValues { + if floatTarget >= value { + return true + } + } + return false + } + semverTarget, semverValues, err := parseSemver(targetValue, values) + if err == nil { + for _, value := range semverValues { + if semverTarget.GTE(value) { + return true + } + } + return false + } + for _, value := range values { + if targetValue >= value { + return true + } + } + return false +} + +func (c *clauseEvaluator) less(targetValue string, values []string) bool { + floatTarget, floatValues, err := parseFloat(targetValue, values) + if err == nil { + for _, value := range floatValues { + if floatTarget < value { + return true + } + } + return false + } + semverTarget, semverValues, err := parseSemver(targetValue, values) + if err == nil { + for _, value := range semverValues { + if semverTarget.LT(value) { + return true + } + } + return false + } + for _, value := range values { + if targetValue < value { + return true + } + } + return false +} + +func (c *clauseEvaluator) lessOrEqual(targetValue string, values []string) bool { + floatTarget, floatValues, err := parseFloat(targetValue, values) + if err == nil { + for _, value := range floatValues { + if floatTarget <= value { + return true + } + } + return false + } + semverTarget, semverValues, err := parseSemver(targetValue, values) + if err == nil { + for _, value := range semverValues { + if semverTarget.LTE(value) { + return true + } + } + return false + } + for _, value := range values { + if targetValue <= value { + return true + } + } + return false +} + +func (c *clauseEvaluator) before(targetValue string, values []string) bool { + intTarget, intValues, err := parseInt(targetValue, values) + if err == nil { + for _, value := range intValues { + if intTarget < value { + return true + } + } + } + return false +} + +func (c *clauseEvaluator) after(targetValue string, values []string) bool { + intTarget, intValues, err := parseInt(targetValue, values) + if err == nil { + for _, value := range intValues { + if intTarget > value { + return true + } + } + } + return false +} + +func parseInt(targetValue string, values []string) (int64, []int64, error) { + intTarget, err := strconv.ParseInt(targetValue, 10, 64) + if err != nil { + return -1, nil, err + } + intValues := make([]int64, 0, len(values)) + for _, value := range values { + v, err := strconv.ParseInt(value, 10, 64) + if err == nil { + intValues = append(intValues, v) + } + } + return intTarget, intValues, nil +} + +func parseFloat(targetValue string, values []string) (float64, []float64, error) { + floatTarget, err := strconv.ParseFloat(targetValue, 64) + if err != nil { + return -1, nil, err + } + floatValues := make([]float64, 0, len(values)) + for _, value := range values { + v, err := strconv.ParseFloat(value, 64) + if err == nil { + floatValues = append(floatValues, v) + } + + } + return floatTarget, floatValues, nil +} + +func parseSemver(targetValue string, values []string) (semver.Version, []semver.Version, error) { + versionTarget, err := semver.Parse(targetValue) + if err != nil { + return semver.Version{}, nil, err + } + versionValues := make([]semver.Version, 0, len(values)) + for _, value := range values { + v, err := semver.Parse(value) + if err == nil { + versionValues = append(versionValues, v) + } + } + if err != nil { + return semver.Version{}, nil, err + } + return versionTarget, versionValues, nil +} diff --git a/pkg/feature/domain/clause_evaluator_test.go b/pkg/feature/domain/clause_evaluator_test.go new file mode 100644 index 000000000..752b441de --- /dev/null +++ b/pkg/feature/domain/clause_evaluator_test.go @@ -0,0 +1,1150 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +func TestGreaterFloat(t *testing.T) { + t.Parallel() + testcases := []struct { + targetValue string + values []string + expected bool + }{ + // Int + { + targetValue: "1", + values: []string{"1"}, + expected: false, + }, + { + targetValue: "1", + values: []string{"1", "2", "3"}, + expected: false, + }, + { + targetValue: "1", + values: []string{"a", "1", "2.0"}, + expected: false, + }, + { + targetValue: "1", + values: []string{"a", "b", "c"}, + expected: false, + }, + { + targetValue: "1", + values: []string{"0a", "1a"}, + expected: false, + }, + { + targetValue: "1", + values: []string{"0"}, + expected: true, + }, + { + targetValue: "1", + values: []string{"0.0", "1.0", "2.0"}, + expected: true, + }, + { + targetValue: "1", + values: []string{"0.9", "1.0", "2.0"}, + expected: true, + }, + { + targetValue: "1", + values: []string{"0", "1", "2"}, + expected: true, + }, + { + targetValue: "1", + values: []string{"a", "0", "1.0"}, + expected: true, + }, + { + targetValue: "1", + values: []string{"a", "0", "1"}, + expected: true, + }, + { + targetValue: "1", + values: []string{"0a", "0"}, + expected: true, + }, + // Float + { + targetValue: "1.0", + values: []string{"1.0", "2.0", "3.0"}, + expected: false, + }, + { + targetValue: "1.0", + values: []string{"1", "2", "3"}, + expected: false, + }, + { + targetValue: "1.0", + values: []string{"a", "1", "2.0"}, + expected: false, + }, + { + targetValue: "1.0", + values: []string{"a", "b", "c"}, + expected: false, + }, + { + targetValue: "1.0", + values: []string{"0", "1.0", "2.0"}, + expected: true, + }, + { + targetValue: "1.0", + values: []string{"a", "0.0", "1.0"}, + expected: true, + }, + { + targetValue: "1.2", + values: []string{"a", "1.1", "2.0"}, + expected: true, + }, + } + clauseEvaluator := &clauseEvaluator{} + for i, tc := range testcases { + clause := &featureproto.Clause{ + Operator: featureproto.Clause_GREATER, + Values: tc.values, + } + des := fmt.Sprintf("index: %d", i) + res := clauseEvaluator.Evaluate(tc.targetValue, clause, "userId", nil) + assert.Equal(t, tc.expected, res, des) + } +} + +func TestGreaterSemver(t *testing.T) { + t.Parallel() + testcases := []struct { + targetValue string + values []string + expected bool + }{ + { + targetValue: "1.0.0", + values: []string{"1.0.0", "0.0", "1.0.1"}, + expected: false, + }, + { + targetValue: "1.0.0", + values: []string{"1.0.0", "1.0.1", "v0.0.7"}, + expected: false, + }, + { + targetValue: "0.0.8", + values: []string{"1.0.0", "0.0.9", "1.0.1"}, + expected: false, + }, + { + targetValue: "1.1.0", + values: []string{"1.1.0", "v1.0.9", "1.1.1"}, + expected: false, + }, + { + targetValue: "2.1.0", + values: []string{"2.1.0", "v2.0.9", "2.1.1"}, + expected: false, + }, + { + targetValue: "1.0.1", + values: []string{"1.0.1", "1.0.0", "v0.0.7"}, + expected: true, + }, + { + targetValue: "1.1.1", + values: []string{"1.1.1", "v1.0.9", "1.1.0"}, + expected: true, + }, + { + targetValue: "2.1.1", + values: []string{"2.1.1", "v2.0.9", "2.1.0"}, + expected: true, + }, + } + clauseEvaluator := &clauseEvaluator{} + for i, tc := range testcases { + clause := &featureproto.Clause{ + Operator: featureproto.Clause_GREATER, + Values: tc.values, + } + des := fmt.Sprintf("index: %d", i) + res := clauseEvaluator.Evaluate(tc.targetValue, clause, "userId", nil) + assert.Equal(t, tc.expected, res, des) + } +} + +func TestGreaterString(t *testing.T) { + t.Parallel() + testcases := []struct { + targetValue string + values []string + expected bool + }{ + { + targetValue: "b", + values: []string{"c", "d", "e"}, + expected: false, + }, + { + targetValue: "v1.0.0", + values: []string{"v2.0.0", "v1.0.9", "v1.0.8"}, + expected: false, + }, + { + targetValue: "b", + values: []string{"1", "a", "2.0"}, + expected: true, + }, + { + targetValue: "b", + values: []string{"c", "d", "a"}, + expected: true, + }, + { + targetValue: "v1.0.0", + values: []string{"v1.0.0", "v1.0.9", "v0.0.9"}, + expected: true, + }, + } + clauseEvaluator := &clauseEvaluator{} + for i, tc := range testcases { + clause := &featureproto.Clause{ + Operator: featureproto.Clause_GREATER, + Values: tc.values, + } + des := fmt.Sprintf("index: %d", i) + res := clauseEvaluator.Evaluate(tc.targetValue, clause, "userId", nil) + assert.Equal(t, tc.expected, res, des) + } +} + +func TestGreaterOrEqualFloat(t *testing.T) { + t.Parallel() + testcases := []struct { + targetValue string + values []string + expected bool + }{ + // Int + { + targetValue: "1", + values: []string{"2"}, + expected: false, + }, + { + targetValue: "1", + values: []string{"2", "3", "4"}, + expected: false, + }, + { + targetValue: "1", + values: []string{"2.0", "3.0", "4.0"}, + expected: false, + }, + { + targetValue: "1", + values: []string{"a", "2", "3.0"}, + expected: false, + }, + { + targetValue: "1", + values: []string{"a", "b", "c"}, + expected: false, + }, + { + targetValue: "1", + values: []string{"0a", "1a"}, + expected: false, + }, + { + targetValue: "1", + values: []string{"1"}, + expected: true, + }, + { + targetValue: "1", + values: []string{"0", "1", "2"}, + expected: true, + }, + { + targetValue: "1", + values: []string{"0.0", "1.0", "2.0"}, + expected: true, + }, + { + targetValue: "1", + values: []string{"1.0", "2.0", "3.0"}, + expected: true, + }, + { + targetValue: "1", + values: []string{"1", "2", "3"}, + expected: true, + }, + { + targetValue: "1", + values: []string{"a", "0", "1.0"}, + expected: true, + }, + { + targetValue: "1", + values: []string{"a", "0", "1"}, + expected: true, + }, + { + targetValue: "1", + values: []string{"a", "1", "2.0"}, + expected: true, + }, + { + targetValue: "1", + values: []string{"a", "1.0", "2"}, + expected: true, + }, + { + targetValue: "1", + values: []string{"0a", "0"}, + expected: true, + }, + // Float + { + targetValue: "1.0", + values: []string{"2.0", "3.0", "4.0"}, + expected: false, + }, + { + targetValue: "1.0", + values: []string{"2", "3", "4"}, + expected: false, + }, + { + targetValue: "1.0", + values: []string{"a", "1.1", "2.0"}, + expected: false, + }, + { + targetValue: "1.0", + values: []string{"a", "b", "c"}, + expected: false, + }, + { + targetValue: "1.0", + values: []string{"0.9", "2.0", "3.0"}, + expected: true, + }, + { + targetValue: "1.0", + values: []string{"a", "0", "2.0"}, + expected: true, + }, + { + targetValue: "1.1", + values: []string{"1", "2.0", "3.0"}, + expected: true, + }, + { + targetValue: "1.1", + values: []string{"1.1", "2.0", "3.0"}, + expected: true, + }, + { + targetValue: "1.1", + values: []string{"a", "1.0", "2.0"}, + expected: true, + }, + } + clauseEvaluator := &clauseEvaluator{} + for i, tc := range testcases { + clause := &featureproto.Clause{ + Operator: featureproto.Clause_GREATER_OR_EQUAL, + Values: tc.values, + } + des := fmt.Sprintf("index: %d", i) + res := clauseEvaluator.Evaluate(tc.targetValue, clause, "userId", nil) + assert.Equal(t, tc.expected, res, des) + } +} + +func TestGreaterOrEqualSemver(t *testing.T) { + t.Parallel() + testcases := []struct { + targetValue string + values []string + expected bool + }{ + { + targetValue: "1.0.0", + values: []string{"1.0.1", "0.0", "1.0.2"}, + expected: false, + }, + { + targetValue: "1.0.0", + values: []string{"1.0.1", "1.0.2", "v0.0.7"}, + expected: false, + }, + { + targetValue: "0.0.8", + values: []string{"1.0.0", "0.0.9", "1.0.1"}, + expected: false, + }, + { + targetValue: "1.1.0", + values: []string{"1.1.1", "v1.0.9", "1.1.2"}, + expected: false, + }, + { + targetValue: "2.1.0", + values: []string{"2.1.1", "v2.0.9", "2.1.2"}, + expected: false, + }, + { + targetValue: "1.0.0", + values: []string{"1.0.1", "1.0.0", "v0.0.7"}, + expected: true, + }, + { + targetValue: "1.1.1", + values: []string{"1.1.2", "v1.0.9", "1.1.1"}, + expected: true, + }, + { + targetValue: "2.1.1", + values: []string{"2.1.2", "v2.0.9", "2.1.1"}, + expected: true, + }, + { + targetValue: "1.0.1", + values: []string{"1.0.2", "1.0.1", "v0.0.7"}, + expected: true, + }, + { + targetValue: "1.1.1", + values: []string{"1.1.2", "v1.0.9", "1.1.0"}, + expected: true, + }, + { + targetValue: "2.1.1", + values: []string{"2.1.2", "v2.0.9", "2.1.0"}, + expected: true, + }, + } + clauseEvaluator := &clauseEvaluator{} + for i, tc := range testcases { + clause := &featureproto.Clause{ + Operator: featureproto.Clause_GREATER_OR_EQUAL, + Values: tc.values, + } + des := fmt.Sprintf("index: %d", i) + res := clauseEvaluator.Evaluate(tc.targetValue, clause, "userId", nil) + assert.Equal(t, tc.expected, res, des) + } +} + +func TestGreaterOrEqualString(t *testing.T) { + t.Parallel() + testcases := []struct { + targetValue string + values []string + expected bool + }{ + { + targetValue: "b", + values: []string{"c", "d", "e"}, + expected: false, + }, + { + targetValue: "v1.0.0", + values: []string{"v2.0.0", "v1.0.9", "v1.0.8"}, + expected: false, + }, + { + targetValue: "b", + values: []string{"1", "a", "2.0"}, + expected: true, + }, + { + targetValue: "b", + values: []string{"d", "c", "b"}, + expected: true, + }, + { + targetValue: "b", + values: []string{"c", "d", "a"}, + expected: true, + }, + { + targetValue: "b", + values: []string{"d", "c", "b"}, + expected: true, + }, + { + targetValue: "v1.0.0", + values: []string{"v1.0.8", "v1.0.9", "v1.0.0"}, + expected: true, + }, + { + targetValue: "v1.0.0", + values: []string{"v1.0.8", "v1.0.9", "v0.0.9"}, + expected: true, + }, + } + clauseEvaluator := &clauseEvaluator{} + for i, tc := range testcases { + clause := &featureproto.Clause{ + Operator: featureproto.Clause_GREATER_OR_EQUAL, + Values: tc.values, + } + des := fmt.Sprintf("index: %d", i) + res := clauseEvaluator.Evaluate(tc.targetValue, clause, "userId", nil) + assert.Equal(t, tc.expected, res, des) + } +} + +func TestLessThanSemver(t *testing.T) { + t.Parallel() + testcases := []struct { + targetValue string + values []string + expected bool + }{ + { + targetValue: "1.0.0", + values: []string{"1.0.0", "0.0", "0.0.9"}, + expected: false, + }, + { + targetValue: "1.0.0", + values: []string{"1.0.0", "v0.0.8", "0.0.7"}, + expected: false, + }, + { + targetValue: "0.0.8", + values: []string{"0.0.8", "0.0.7", "v0.0.9"}, + expected: false, + }, + { + targetValue: "1.1.0", + values: []string{"1.1.0", "v1.0.9", "1.0.8"}, + expected: false, + }, + { + targetValue: "2.1.0", + values: []string{"2.1.0", "v2.0.9", "2.0.9"}, + expected: false, + }, + { + targetValue: "1.0.1", + values: []string{"1.0.1", "v0.0.7", "1.0.2"}, + expected: true, + }, + { + targetValue: "1.1.1", + values: []string{"1.1.1", "v1.0.9", "1.1.2"}, + expected: true, + }, + { + targetValue: "2.1.1", + values: []string{"2.1.1", "v2.0.9", "2.1.2"}, + expected: true, + }, + } + clauseEvaluator := &clauseEvaluator{} + for i, tc := range testcases { + clause := &featureproto.Clause{ + Operator: featureproto.Clause_LESS, + Values: tc.values, + } + des := fmt.Sprintf("index: %d", i) + res := clauseEvaluator.Evaluate(tc.targetValue, clause, "userId", nil) + assert.Equal(t, tc.expected, res, des) + } +} + +func TestLessFloat(t *testing.T) { + t.Parallel() + testcases := []struct { + targetValue string + values []string + expected bool + }{ + // Int + { + targetValue: "3", + values: []string{"3"}, + expected: false, + }, + { + targetValue: "3", + values: []string{"1", "2", "3"}, + expected: false, + }, + { + targetValue: "3", + values: []string{"a", "1", "2.0"}, + expected: false, + }, + { + targetValue: "3", + values: []string{"a", "b", "c"}, + expected: false, + }, + { + targetValue: "3", + values: []string{"0a", "1a"}, + expected: false, + }, + { + targetValue: "3", + values: []string{"4"}, + expected: true, + }, + { + targetValue: "3", + values: []string{"2.0", "3.0", "4.0"}, + expected: true, + }, + { + targetValue: "3", + values: []string{"1.0", "2.0", "3.1"}, + expected: true, + }, + { + targetValue: "3", + values: []string{"2", "3", "4"}, + expected: true, + }, + { + targetValue: "3", + values: []string{"d", "3", "3.5"}, + expected: true, + }, + { + targetValue: "3", + values: []string{"a", "0", "4"}, + expected: true, + }, + { + targetValue: "3", + values: []string{"4a", "4"}, + expected: true, + }, + // Float + { + targetValue: "3.0", + values: []string{"1.0", "2.0", "3.0"}, + expected: false, + }, + { + targetValue: "3.0", + values: []string{"1", "2", "3"}, + expected: false, + }, + { + targetValue: "3.0", + values: []string{"a", "1", "2.0"}, + expected: false, + }, + { + targetValue: "3.0", + values: []string{"a", "b", "c"}, + expected: false, + }, + { + targetValue: "3.0", + values: []string{"2", "3.0", "3.1"}, + expected: true, + }, + { + targetValue: "3.0", + values: []string{"a", "0.0", "3.9"}, + expected: true, + }, + { + targetValue: "3.2", + values: []string{"a", "1.1", "3.5"}, + expected: true, + }, + } + clauseEvaluator := &clauseEvaluator{} + for i, tc := range testcases { + clause := &featureproto.Clause{ + Operator: featureproto.Clause_LESS, + Values: tc.values, + } + des := fmt.Sprintf("index: %d", i) + res := clauseEvaluator.Evaluate(tc.targetValue, clause, "userId", nil) + assert.Equal(t, tc.expected, res, des) + } +} + +func TestLessString(t *testing.T) { + t.Parallel() + testcases := []struct { + targetValue string + values []string + expected bool + }{ + { + targetValue: "c", + values: []string{"c", "b", "a"}, + expected: false, + }, + { + targetValue: "c", + values: []string{"1", "a", "2.0"}, + expected: false, + }, + { + targetValue: "v2.0.0", + values: []string{"v2.0.0", "v1.0.9", "v1.0.8"}, + expected: false, + }, + { + targetValue: "c", + values: []string{"b", "c", "d"}, + expected: true, + }, + { + targetValue: "c", + values: []string{"3", "1.0", "d"}, + expected: true, + }, + { + targetValue: "v2.0.0", + values: []string{"v1.0.0", "v1.0.9", "v2.1.0"}, + expected: true, + }, + } + clauseEvaluator := &clauseEvaluator{} + for i, tc := range testcases { + clause := &featureproto.Clause{ + Operator: featureproto.Clause_LESS, + Values: tc.values, + } + des := fmt.Sprintf("index: %d", i) + res := clauseEvaluator.Evaluate(tc.targetValue, clause, "userId", nil) + assert.Equal(t, tc.expected, res, des) + } +} + +func TestLessOrEqualFloat(t *testing.T) { + t.Parallel() + testcases := []struct { + targetValue string + values []string + expected bool + }{ + // Int + { + targetValue: "3", + values: []string{"2"}, + expected: false, + }, + { + targetValue: "3", + values: []string{"0", "1", "2"}, + expected: false, + }, + { + targetValue: "3", + values: []string{"0", "1.0", "2.0"}, + expected: false, + }, + { + targetValue: "3", + values: []string{"a", "1", "2.0"}, + expected: false, + }, + { + targetValue: "3", + values: []string{"a", "b", "c"}, + expected: false, + }, + { + targetValue: "3", + values: []string{"3a", "4a"}, + expected: false, + }, + { + targetValue: "3", + values: []string{"3"}, + expected: true, + }, + { + targetValue: "3", + values: []string{"2", "3", "4"}, + expected: true, + }, + { + targetValue: "3", + values: []string{"1.0", "2.0", "3.0"}, + expected: true, + }, + { + targetValue: "3", + values: []string{"1.0", "2.0", "3.1"}, + expected: true, + }, + { + targetValue: "3", + values: []string{"1", "2", "4"}, + expected: true, + }, + { + targetValue: "3", + values: []string{"a", "0", "3.0"}, + expected: true, + }, + { + targetValue: "3", + values: []string{"a", "1.0", "4"}, + expected: true, + }, + { + targetValue: "3", + values: []string{"a", "1", "3.5"}, + expected: true, + }, + { + targetValue: "3", + values: []string{"3a", "3"}, + expected: true, + }, + // Float + { + targetValue: "3.0", + values: []string{"0", "1.0", "2.0"}, + expected: false, + }, + { + targetValue: "3.0", + values: []string{"0", "1", "2"}, + expected: false, + }, + { + targetValue: "3.0", + values: []string{"a", "1.1", "2.0"}, + expected: false, + }, + { + targetValue: "3.0", + values: []string{"a", "b", "c"}, + expected: false, + }, + { + targetValue: "3.0", + values: []string{"0.9", "2.0", "3.0"}, + expected: true, + }, + { + targetValue: "3.0", + values: []string{"a", "0", "3.1"}, + expected: true, + }, + { + targetValue: "3.1", + values: []string{"1", "2.0", "3.9"}, + expected: true, + }, + { + targetValue: "3.1", + values: []string{"1.1", "2.0", "4"}, + expected: true, + }, + { + targetValue: "3.1", + values: []string{"a", "1.0", "3.1"}, + expected: true, + }, + } + clauseEvaluator := &clauseEvaluator{} + for i, tc := range testcases { + clause := &featureproto.Clause{ + Operator: featureproto.Clause_LESS_OR_EQUAL, + Values: tc.values, + } + des := fmt.Sprintf("index: %d", i) + res := clauseEvaluator.Evaluate(tc.targetValue, clause, "userId", nil) + assert.Equal(t, tc.expected, res, des) + } +} + +func TestLessThanOrEqualSemver(t *testing.T) { + t.Parallel() + testcases := []struct { + targetValue string + values []string + expected bool + }{ + { + targetValue: "1.0.1", + values: []string{"1.0.0", "0.0", "0.0.9"}, + expected: false, + }, + { + targetValue: "1.0.1", + values: []string{"1.0.0", "v0.0.8", "0.0.7"}, + expected: false, + }, + { + targetValue: "0.0.9", + values: []string{"0.0.8", "0.0.7", "v0.0.9"}, + expected: false, + }, + { + targetValue: "1.1.1", + values: []string{"1.1.0", "v1.0.9", "1.0.8"}, + expected: false, + }, + { + targetValue: "2.1.1", + values: []string{"2.1.0", "v2.0.9", "2.0.9"}, + expected: false, + }, + { + targetValue: "1.0.1", + values: []string{"1.0.1", "v0.0.7", "1.0.0"}, + expected: true, + }, + { + targetValue: "1.1.1", + values: []string{"1.1.1", "v1.0.9", "1.1.0"}, + expected: true, + }, + { + targetValue: "2.1.1", + values: []string{"2.1.1", "v2.0.9", "2.1.0"}, + expected: true, + }, + { + targetValue: "1.0.1", + values: []string{"1.0.0", "v0.0.7", "1.0.2"}, + expected: true, + }, + { + targetValue: "1.1.1", + values: []string{"1.1.0", "v1.0.9", "1.1.2"}, + expected: true, + }, + { + targetValue: "2.1.1", + values: []string{"2.1.0", "v2.0.9", "2.1.2"}, + expected: true, + }, + } + clauseEvaluator := &clauseEvaluator{} + for i, tc := range testcases { + clause := &featureproto.Clause{ + Operator: featureproto.Clause_LESS_OR_EQUAL, + Values: tc.values, + } + des := fmt.Sprintf("index: %d", i) + res := clauseEvaluator.Evaluate(tc.targetValue, clause, "userId", nil) + assert.Equal(t, tc.expected, res, des) + } +} + +func TestLessOrEqualString(t *testing.T) { + t.Parallel() + testcases := []struct { + targetValue string + values []string + expected bool + }{ + { + targetValue: "d", + values: []string{"a", "b", "c"}, + expected: false, + }, + { + targetValue: "c", + values: []string{"1", "a", "2.0"}, + expected: false, + }, + { + targetValue: "v2.0.0", + values: []string{"v1.0.0", "v1.0.9", "v1.0.8"}, + expected: false, + }, + { + targetValue: "c", + values: []string{"3.0", "c", "b"}, + expected: true, + }, + { + targetValue: "c", + values: []string{"c", "b", "a"}, + expected: true, + }, + { + targetValue: "c", + values: []string{"a", "b", "d"}, + expected: true, + }, + { + targetValue: "v2.0.0", + values: []string{"v1.0.0", "v1.0.9", "v2.0.0"}, + expected: true, + }, + { + targetValue: "v2.0.0", + values: []string{"v1.0.0", "v1.0.9", "v2.0.1"}, + expected: true, + }, + } + clauseEvaluator := &clauseEvaluator{} + for i, tc := range testcases { + clause := &featureproto.Clause{ + Operator: featureproto.Clause_LESS_OR_EQUAL, + Values: tc.values, + } + des := fmt.Sprintf("index: %d", i) + res := clauseEvaluator.Evaluate(tc.targetValue, clause, "userId", nil) + assert.Equal(t, tc.expected, res, des) + } +} + +func TestBeforeInt(t *testing.T) { + t.Parallel() + testcases := []struct { + targetValue string + values []string + expected bool + }{ + // Int + { + targetValue: "1519223320", + values: []string{"1419223320"}, + expected: false, + }, + { + targetValue: "1519223320", + values: []string{"1619223320"}, + expected: true, + }, + { + targetValue: "1519223320", + values: []string{"1519223320", "1519200000"}, + expected: false, + }, + // Strings + { + targetValue: "15192XXX23320", + values: []string{"1519223330", "1519223311", "1519223300"}, + expected: false, + }, + { + targetValue: "1519223320", + values: []string{"1519223320", "1519200000", "15192XXX23300"}, + expected: false, + }, + // Float + { + targetValue: "15192233.30", + values: []string{"1519223330", "1519223311", "1519223300"}, + expected: false, + }, + { + targetValue: "1519223320", + values: []string{"1519223320", "1519200000", "15192233.00"}, + expected: false, + }, + } + + clauseEvaluator := &clauseEvaluator{} + for i, tc := range testcases { + clause := &featureproto.Clause{ + Operator: featureproto.Clause_BEFORE, + Values: tc.values, + } + des := fmt.Sprintf("index: %d", i) + res := clauseEvaluator.Evaluate(tc.targetValue, clause, "userId", nil) + assert.Equal(t, tc.expected, res, des) + } +} + +func TestAfterInt(t *testing.T) { + t.Parallel() + testcases := []struct { + targetValue string + values []string + expected bool + }{ + // Int + { + targetValue: "1519223320", + values: []string{"1419223320"}, + expected: true, + }, + { + targetValue: "1519223320", + values: []string{"1619223320"}, + expected: false, + }, + { + targetValue: "1519223320", + values: []string{"1519223320", "1519223319"}, + expected: true, + }, + // Strings + { + targetValue: "15192XXX23320", + values: []string{"1519223330", "1519223311", "1519223300"}, + expected: false, + }, + { + targetValue: "1519223320", + values: []string{"1519223320", "1519200000", "15192XXX23300"}, + expected: true, + }, + // Float + { + targetValue: "15192233.30", + values: []string{"1519223330", "1519223311", "1519223300"}, + expected: false, + }, + { + targetValue: "1519223320", + values: []string{"1519223320", "1519200000", "15192233.00"}, + expected: true, + }, + } + + clauseEvaluator := &clauseEvaluator{} + for i, tc := range testcases { + clause := &featureproto.Clause{ + Operator: featureproto.Clause_AFTER, + Values: tc.values, + } + des := fmt.Sprintf("index: %d", i) + res := clauseEvaluator.Evaluate(tc.targetValue, clause, "userId", nil) + assert.Equal(t, tc.expected, res, des) + } +} diff --git a/pkg/feature/domain/evaluation.go b/pkg/feature/domain/evaluation.go new file mode 100644 index 000000000..347245ea5 --- /dev/null +++ b/pkg/feature/domain/evaluation.go @@ -0,0 +1,139 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "fmt" + + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" + userproto "github.com/bucketeer-io/bucketeer/proto/user" +) + +type Mark int + +const ( + unvisited Mark = iota + temporary + permanently +) + +func EvaluationID(featureID string, featureVersion int32, userID string) string { + return fmt.Sprintf("%s:%d:%s", featureID, featureVersion, userID) +} + +func EvaluateFeatures( + fs []*featureproto.Feature, + user *userproto.User, + mapSegmentUsers map[string][]*featureproto.SegmentUser, + targetTag string, +) (*featureproto.UserEvaluations, error) { + flagVariations := map[string]string{} + // fs need to be sorted in order from upstream to downstream. + sortedFs, err := TopologicalSort(fs) + if err != nil { + return nil, err + } + evaluations := make([]*featureproto.Evaluation, 0, len(fs)) + for _, f := range sortedFs { + feature := &Feature{Feature: f} + segmentUsers := []*featureproto.SegmentUser{} + for _, id := range feature.ListSegmentIDs() { + segmentUsers = append(segmentUsers, mapSegmentUsers[id]...) + } + reason, variation, err := feature.assignUser(user, segmentUsers, flagVariations) + if err != nil { + return nil, err + } + // VariationId is used to check if prerequisite flag's result is what user expects it to be. + flagVariations[f.Id] = variation.Id + + // We need to filter evaluations because we fetch all features in the environment namespace. + if exist := tagExist(f.Tags, targetTag); !exist { + continue + } + // FIXME: Remove the next two lines when the Variation + // no longer is being used + // For security reasons, it removes the variation name and description + variation.Name = "" + variation.Description = "" + evaluationID := EvaluationID(f.Id, f.Version, user.Id) + evaluation := &featureproto.Evaluation{ + Id: evaluationID, + FeatureId: f.Id, + FeatureVersion: f.Version, + UserId: user.Id, + VariationId: variation.Id, + VariationValue: variation.Value, + Variation: variation, // deprecated + Reason: reason, + } + evaluations = append(evaluations, evaluation) + } + id := UserEvaluationsID(user.Id, user.Data, fs) + userEvaluations := NewUserEvaluations(id, evaluations) + return userEvaluations.UserEvaluations, nil +} + +func tagExist(tags []string, target string) bool { + for _, tag := range tags { + if tag == target { + return true + } + } + return false +} + +// This logic is based on https://en.wikipedia.org/wiki/Topological_sorting. +// Note: This algorithm is not an exact topological sort because the order is reversed (=from upstream to downstream). +func TopologicalSort(features []*featureproto.Feature) ([]*featureproto.Feature, error) { + marks := map[string]Mark{} + mapFeatures := map[string]*featureproto.Feature{} + for _, f := range features { + marks[f.Id] = unvisited + mapFeatures[f.Id] = f + } + var sortedFeatures []*featureproto.Feature + var sort func(f *featureproto.Feature) error + sort = func(f *featureproto.Feature) error { + if marks[f.Id] == permanently { + return nil + } + if marks[f.Id] == temporary { + return ErrCycleExists + } + marks[f.Id] = temporary + for _, p := range f.Prerequisites { + pf, ok := mapFeatures[p.FeatureId] + if !ok { + return errFeatureNotFound + } + if err := sort(pf); err != nil { + return err + } + } + marks[f.Id] = permanently + sortedFeatures = append(sortedFeatures, f) + return nil + } + for _, f := range features { + if marks[f.Id] != unvisited { + continue + } + if err := sort(f); err != nil { + return nil, err + } + } + return sortedFeatures, nil +} diff --git a/pkg/feature/domain/evaluation_test.go b/pkg/feature/domain/evaluation_test.go new file mode 100644 index 000000000..fb8a9ba90 --- /dev/null +++ b/pkg/feature/domain/evaluation_test.go @@ -0,0 +1,367 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/proto" + + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" + userproto "github.com/bucketeer-io/bucketeer/proto/user" +) + +func TestEvaluateFeature(t *testing.T) { + t.Parallel() + f := makeFeature("fID-0") + f.Tags = append(f.Tags, "tag-1") + f1 := makeFeature("fID-1") + f1.Tags = append(f1.Tags, "tag-1") + f1.Enabled = false + f1.OffVariation = f1.Variations[0].Id + f2 := makeFeature("fID-2") + f2.Tags = append(f2.Tags, "tag-1") + patterns := []struct { + enabled bool + offVariation string + userID string + prerequisite []*featureproto.Prerequisite + expected *featureproto.Evaluation + expectedError error + }{ + { + enabled: false, + offVariation: "not-found", + userID: "uID-0", + prerequisite: []*featureproto.Prerequisite{}, + expected: nil, + expectedError: errVariationNotFound, + }, + { + enabled: false, + offVariation: "variation-A", + userID: "uID-0", + prerequisite: []*featureproto.Prerequisite{}, + expected: &featureproto.Evaluation{ + Id: EvaluationID(f.Id, f.Version, "uID-0"), + FeatureId: "fID-0", + FeatureVersion: 1, + UserId: "uID-0", + VariationId: "variation-A", + VariationValue: "A", + Variation: &featureproto.Variation{ + Id: "variation-A", + Value: "A", + }, + Reason: &featureproto.Reason{Type: featureproto.Reason_OFF_VARIATION}, + }, + expectedError: nil, + }, + { + enabled: false, + offVariation: "", + userID: "uID-0", + prerequisite: []*featureproto.Prerequisite{}, + expected: &featureproto.Evaluation{ + Id: EvaluationID(f.Id, f.Version, "uID-0"), + FeatureId: "fID-0", + FeatureVersion: 1, + UserId: "uID-0", + VariationId: "variation-B", + VariationValue: "B", + Variation: &featureproto.Variation{ + Id: "variation-B", + Value: "B", + }, + Reason: &featureproto.Reason{Type: featureproto.Reason_DEFAULT}, + }, + expectedError: nil, + }, + { + enabled: true, + offVariation: "", + userID: "uID-2", + prerequisite: []*featureproto.Prerequisite{}, + expected: &featureproto.Evaluation{ + Id: EvaluationID(f.Id, f.Version, "uID-2"), + FeatureId: "fID-0", + FeatureVersion: 1, + UserId: "uID-2", + VariationId: "variation-B", + VariationValue: "B", + Variation: &featureproto.Variation{ + Id: "variation-B", + Value: "B", + }, + Reason: &featureproto.Reason{Type: featureproto.Reason_DEFAULT}, + }, + expectedError: nil, + }, + { + enabled: true, + offVariation: "v1", + userID: "uID-2", + prerequisite: []*featureproto.Prerequisite{}, + expected: &featureproto.Evaluation{ + Id: EvaluationID(f.Id, f.Version, "uID-2"), + FeatureId: "fID-0", + FeatureVersion: 1, + UserId: "uID-2", + VariationId: "variation-B", + VariationValue: "B", + Variation: &featureproto.Variation{ + Id: "variation-B", + Value: "B", + }, + Reason: &featureproto.Reason{Type: featureproto.Reason_DEFAULT}, + }, + expectedError: nil, + }, + { + enabled: true, + offVariation: "variation-A", + userID: "uID-2", + prerequisite: []*featureproto.Prerequisite{ + { + FeatureId: f1.Id, + VariationId: f1.Variations[1].Id, + }, + }, + expected: &featureproto.Evaluation{ + Id: EvaluationID(f.Id, f.Version, "uID-2"), + FeatureId: "fID-0", + FeatureVersion: 1, + UserId: "uID-2", + VariationId: "variation-A", + VariationValue: "A", + Variation: &featureproto.Variation{ + Id: "variation-A", + Value: "A", + }, + Reason: &featureproto.Reason{Type: featureproto.Reason_PREREQUISITE}, + }, + expectedError: nil, + }, + { + enabled: true, + offVariation: "", + userID: "uID-2", + prerequisite: []*featureproto.Prerequisite{ + { + FeatureId: f2.Id, + VariationId: f2.Variations[0].Id, + }, + }, + expected: &featureproto.Evaluation{ + Id: EvaluationID(f.Id, f.Version, "uID-2"), + FeatureId: "fID-0", + FeatureVersion: 1, + UserId: "uID-2", + VariationId: "variation-B", + VariationValue: "B", + Variation: &featureproto.Variation{ + Id: "variation-B", + Value: "B", + }, + Reason: &featureproto.Reason{Type: featureproto.Reason_DEFAULT}, + }, + expectedError: nil, + }, + } + + for _, p := range patterns { + user := &userproto.User{Id: p.userID} + f.Enabled = p.enabled + f.OffVariation = p.offVariation + f.Prerequisites = p.prerequisite + segmentUser := map[string][]*featureproto.SegmentUser{} + evaluation, err := EvaluateFeatures([]*featureproto.Feature{f.Feature, f1.Feature, f2.Feature}, user, segmentUser, "tag-1") + assert.Equal(t, p.expectedError, err) + if evaluation != nil { + actual, err := findEvaluation(evaluation.Evaluations, f.Id) + assert.NoError(t, err) + assert.True(t, proto.Equal(p.expected, actual)) + } + } +} + +func findEvaluation(es []*featureproto.Evaluation, fId string) (*featureproto.Evaluation, error) { + for _, e := range es { + if fId == e.FeatureId { + return e, nil + } + } + return nil, fmt.Errorf("%s was not found", fId) +} + +func TestTopologicalSort(t *testing.T) { + t.Parallel() + f0 := makeFeature("fID-0") + f1 := makeFeature("fID-1") + f2 := makeFeature("fID-2") + f3 := makeFeature("fID-3") + f4 := makeFeature("fID-4") + f5 := makeFeature("fID-5") + patterns := []struct { + f0Prerequisite []*featureproto.Prerequisite + f1Prerequisite []*featureproto.Prerequisite + f2Prerequisite []*featureproto.Prerequisite + f3Prerequisite []*featureproto.Prerequisite + f4Prerequisite []*featureproto.Prerequisite + f5Prerequisite []*featureproto.Prerequisite + expected []*featureproto.Feature + expectedError error + }{ + { + f0Prerequisite: []*featureproto.Prerequisite{}, + f1Prerequisite: []*featureproto.Prerequisite{ + { + FeatureId: f0.Id, + }, + }, + f2Prerequisite: []*featureproto.Prerequisite{ + { + FeatureId: f1.Id, + }, + }, + f3Prerequisite: []*featureproto.Prerequisite{ + { + FeatureId: f1.Id, + }, + { + FeatureId: f2.Id, + }, + }, + f4Prerequisite: []*featureproto.Prerequisite{ + { + FeatureId: f0.Id, + }, + { + FeatureId: f3.Id, + }, + }, + f5Prerequisite: []*featureproto.Prerequisite{ + { + FeatureId: f4.Id, + }, + { + FeatureId: f3.Id, + }, + }, + expected: []*featureproto.Feature{ + f0.Feature, f1.Feature, f2.Feature, f3.Feature, f4.Feature, f5.Feature, + }, + expectedError: nil, + }, + { + f0Prerequisite: []*featureproto.Prerequisite{}, + f1Prerequisite: []*featureproto.Prerequisite{ + { + FeatureId: f0.Id, + }, + }, + f2Prerequisite: []*featureproto.Prerequisite{ + { + FeatureId: f1.Id, + }, + }, + f3Prerequisite: []*featureproto.Prerequisite{ + { + FeatureId: f1.Id, + }, + { + FeatureId: f2.Id, + }, + }, + f4Prerequisite: []*featureproto.Prerequisite{ + { + FeatureId: f0.Id, + }, + { + FeatureId: f3.Id, + }, + }, + f5Prerequisite: []*featureproto.Prerequisite{}, + expected: []*featureproto.Feature{ + f0.Feature, f1.Feature, f2.Feature, f5.Feature, f3.Feature, f4.Feature, + }, + expectedError: nil, + }, + { + f0Prerequisite: []*featureproto.Prerequisite{}, + f1Prerequisite: []*featureproto.Prerequisite{ + { + FeatureId: f0.Id, + }, + }, + f2Prerequisite: []*featureproto.Prerequisite{ + { + FeatureId: f3.Id, + }, + }, + f3Prerequisite: []*featureproto.Prerequisite{ + { + FeatureId: f2.Id, + }, + }, + f4Prerequisite: []*featureproto.Prerequisite{ + { + FeatureId: f0.Id, + }, + { + FeatureId: f3.Id, + }, + }, + f5Prerequisite: []*featureproto.Prerequisite{ + { + FeatureId: f4.Id, + }, + { + FeatureId: f3.Id, + }, + }, + expected: nil, + expectedError: ErrCycleExists, + }, + { + f0Prerequisite: []*featureproto.Prerequisite{}, + f1Prerequisite: []*featureproto.Prerequisite{}, + f2Prerequisite: []*featureproto.Prerequisite{}, + f3Prerequisite: []*featureproto.Prerequisite{}, + f4Prerequisite: []*featureproto.Prerequisite{}, + f5Prerequisite: []*featureproto.Prerequisite{}, + expected: []*featureproto.Feature{ + f2.Feature, f0.Feature, f5.Feature, f3.Feature, f1.Feature, f4.Feature, + }, + expectedError: nil, + }, + } + for _, p := range patterns { + f0.Prerequisites = p.f0Prerequisite + f1.Prerequisites = p.f1Prerequisite + f2.Prerequisites = p.f2Prerequisite + f3.Prerequisites = p.f3Prerequisite + f4.Prerequisites = p.f4Prerequisite + f5.Prerequisites = p.f5Prerequisite + fs := []*featureproto.Feature{ + f2.Feature, f0.Feature, f5.Feature, f3.Feature, f1.Feature, f4.Feature, + } + actual, err := TopologicalSort(fs) + assert.Equal(t, p.expectedError, err) + assert.Equal(t, p.expected, actual) + } +} diff --git a/pkg/feature/domain/feature.go b/pkg/feature/domain/feature.go new file mode 100644 index 000000000..be4d3fdfa --- /dev/null +++ b/pkg/feature/domain/feature.go @@ -0,0 +1,873 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "encoding/json" + "errors" + "strconv" + "time" + + "github.com/bucketeer-io/bucketeer/pkg/uuid" + "github.com/bucketeer-io/bucketeer/proto/feature" + userproto "github.com/bucketeer-io/bucketeer/proto/user" +) + +const ( + SecondsToStale = 90 * 24 * 60 * 60 // 90 days +) + +var ( + errClauseNotFound = errors.New("feature: clause not found") + errDefaultStrategyNotFound = errors.New("feature: default strategy not found") + errClauseAlreadyExists = errors.New("feature: clause already exists") + errRuleMustHaveAtLeastOneClause = errors.New("feature: rule must have at least one clause") + errClauseMustHaveAtLeastOneValue = errors.New("feature: clause must have at least one value") + errRuleAlreadyExists = errors.New("feature: rule already exists") + errRuleNotFound = errors.New("feature: rule not found") + errTargetNotFound = errors.New("feature: target not found") + errValueNotFound = errors.New("feature: value not found") + errVariationInUse = errors.New("feature: variation in use") + errVariationNotFound = errors.New("feature: variation not found") + errVariationTypeUnmatched = errors.New("feature: variation value and type are unmatched") + errTagsMustHaveAtLeastOneTag = errors.New("feature: tags must have at least one tag set") + errUnsupportedStrategy = errors.New("feature: unsupported strategy") + errFeatureNotFound = errors.New("feature: feature not found") + errPrerequisiteVariationNotFound = errors.New("feature: prerequisite variation not found") + ErrCycleExists = errors.New("feature: cycle exists in features") + errPrerequisiteNotFound = errors.New("feature: prerequisite not found") + ErrAlreadyEnabled = errors.New("feature: already enabled") + ErrAlreadyDisabled = errors.New("feature: already disabled") + ErrLastUsedInfoNotFound = errors.New("feature: last used info not found") +) + +// TODO: think about splitting out ruleset / variation + +type Feature struct { + *feature.Feature + ruleEvaluator + strategyEvaluator +} + +func NewFeature( + id, name, description string, + variationType feature.Feature_VariationType, + variations []*feature.Variation, + tags []string, + defaultOnVariationIndex, defaultOffVariationIndex int, + maintainer string, +) (*Feature, error) { + f := &Feature{Feature: &feature.Feature{ + Id: id, + Name: name, + Description: description, + Version: 1, + VariationType: variationType, + CreatedAt: time.Now().Unix(), + Maintainer: maintainer, + }} + for i := range variations { + id, err := uuid.NewUUID() + if err != nil { + return nil, err + } + if err = f.AddVariation(id.String(), variations[i].Value, variations[i].Name, variations[i].Description); err != nil { + return nil, err + } + } + f.Tags = append(f.Tags, tags...) + if err := f.ChangeDefaultStrategy(&feature.Strategy{ + Type: feature.Strategy_FIXED, + FixedStrategy: &feature.FixedStrategy{ + Variation: f.Variations[defaultOnVariationIndex].Id, + }, + }); err != nil { + return nil, err + } + if err := f.ChangeOffVariation(f.Variations[defaultOffVariationIndex].Id); err != nil { + return nil, err + } + return f, nil +} + +func (f *Feature) assignUser( + user *userproto.User, + segmentUsers []*feature.SegmentUser, + flagVariations map[string]string, +) (*feature.Reason, *feature.Variation, error) { + for _, pf := range f.Prerequisites { + variation, ok := flagVariations[pf.FeatureId] + if !ok { + return nil, nil, errPrerequisiteVariationNotFound + } + if pf.VariationId != variation { + if f.OffVariation != "" { + variation, err := findVariation(f.OffVariation, f.Variations) + return &feature.Reason{Type: feature.Reason_PREREQUISITE}, variation, err + } + } + } + // It doesn't assign the user in case of the feature is disabled and OffVariation is not set + if !f.Enabled && f.OffVariation != "" { + variation, err := findVariation(f.OffVariation, f.Variations) + return &feature.Reason{Type: feature.Reason_OFF_VARIATION}, variation, err + } + // evaluate from top to bottom, return if one rule matches + // evaluate targeting rules + for i := range f.Targets { + if contains(user.Id, f.Targets[i].Users) { + variation, err := findVariation(f.Targets[i].Variation, f.Variations) + return &feature.Reason{Type: feature.Reason_TARGET}, variation, err + } + } + // evaluate ruleset + rule := f.ruleEvaluator.Evaluate(f.Rules, user, segmentUsers) + if rule != nil { + variation, err := f.strategyEvaluator.Evaluate( + rule.Strategy, + user.Id, + f.Variations, + f.Feature.Id, + f.Feature.SamplingSeed, + ) + return &feature.Reason{ + Type: feature.Reason_RULE, + RuleId: rule.Id, + }, variation, err + } + // use default strategy + if f.DefaultStrategy == nil { + return nil, nil, errDefaultStrategyNotFound + } + variation, err := f.strategyEvaluator.Evaluate( + f.DefaultStrategy, + user.Id, + f.Variations, + f.Feature.Id, + f.Feature.SamplingSeed, + ) + if err != nil { + return nil, nil, err + } + return &feature.Reason{Type: feature.Reason_DEFAULT}, variation, nil +} + +func findVariation(v string, vs []*feature.Variation) (*feature.Variation, error) { + for i := range vs { + if vs[i].Id == v { + return vs[i], nil + } + } + return nil, errVariationNotFound +} + +func (f *Feature) Rename(name string) error { + f.Name = name + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) ChangeDescription(description string) error { + f.Description = description + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) ChangeOffVariation(id string) error { + _, err := findVariation(id, f.Variations) + if err != nil { + return err + } + f.OffVariation = id + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) AddTag(tag string) error { + if contains(tag, f.Tags) { + // output info log + return nil + } + f.Tags = append(f.Tags, tag) + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) RemoveTag(tag string) error { + if len(f.Tags) <= 1 { + return errTagsMustHaveAtLeastOneTag + } + idx, err := index(tag, f.Tags) + if err != nil { + return err + } + f.Tags = append(f.Tags[:idx], f.Tags[idx+1:]...) + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) Enable() error { + if f.Enabled { + return ErrAlreadyEnabled + } + f.Enabled = true + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) Disable() error { + if !f.Enabled { + return ErrAlreadyDisabled + } + f.Enabled = false + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) Archive() error { + f.Archived = true + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) Unarchive() error { + f.Archived = false + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) Delete() error { + f.Deleted = true + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) AddUserToVariation(variation string, user string) error { + idx, err := f.findTarget(variation) + if err != nil { + return err + } + if contains(user, f.Targets[idx].Users) { + return nil + } + f.Targets[idx].Users = append(f.Targets[idx].Users, user) + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) RemoveUserFromVariation(variation string, user string) error { + idx, err := f.findTarget(variation) + if err != nil { + return err + } + uidx, err := index(user, f.Targets[idx].Users) + if err != nil { + return err + } + f.Targets[idx].Users = append(f.Targets[idx].Users[:uidx], f.Targets[idx].Users[uidx+1:]...) + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) AddRule(rule *feature.Rule) error { + if _, err := f.findRule(rule.Id); err == nil { + return errRuleAlreadyExists + } + if err := validateStrategy(rule.Strategy, f.Variations); err != nil { + return err + } + // TODO: rule validation needed? + // - maybe check if 2 rules are the same (not id but logic) + // - check if two rules are the same but have different targets + f.Rules = append(f.Rules, rule) + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) ChangeRuleStrategy(ruleID string, strategy *feature.Strategy) error { + idx, err := f.findRule(ruleID) + if err != nil { + return errRuleNotFound + } + if err := validateStrategy(strategy, f.Variations); err != nil { + return err + } + f.Rules[idx].Strategy = strategy + f.UpdatedAt = time.Now().Unix() + return nil +} + +func validateStrategy(strategy *feature.Strategy, variations []*feature.Variation) error { + switch strategy.Type { + case feature.Strategy_FIXED: + return validateFixedStrategy(strategy.FixedStrategy, variations) + case feature.Strategy_ROLLOUT: + return validateRolloutStrategy(strategy.RolloutStrategy, variations) + default: + return errUnsupportedStrategy + } +} + +func validateRolloutStrategy(strategy *feature.RolloutStrategy, variations []*feature.Variation) error { + for _, v := range strategy.Variations { + if _, err := findVariation(v.Variation, variations); err != nil { + return errVariationNotFound + } + } + return nil +} + +func validateFixedStrategy(strategy *feature.FixedStrategy, variations []*feature.Variation) error { + if _, err := findVariation(strategy.Variation, variations); err != nil { + return errVariationNotFound + } + return nil +} + +func (f *Feature) DeleteRule(rule string) error { + idx, err := f.findRule(rule) + if err != nil { + return err + } + f.Rules = append(f.Rules[:idx], f.Rules[idx+1:]...) + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) AddClause(rule string, clause *feature.Clause) error { + // TODO: do same validation as in addrule? + idx, err := f.findRule(rule) + if err != nil { + return err + } + f.Rules[idx].Clauses = append(f.Rules[idx].Clauses, clause) + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) DeleteClause(rule string, clause string) error { + ruleIdx, err := f.findRule(rule) + if err != nil { + return err + } + idx, err := f.findClause(clause, f.Rules[ruleIdx].Clauses) + if err != nil { + return err + } + f.Rules[ruleIdx].Clauses = append(f.Rules[ruleIdx].Clauses[:idx], f.Rules[ruleIdx].Clauses[idx+1:]...) + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) ChangeClauseAttribute(rule string, clause string, attribute string) error { + ruleIdx, err := f.findRule(rule) + if err != nil { + return err + } + idx, err := f.findClause(clause, f.Rules[ruleIdx].Clauses) + if err != nil { + return err + } + if f.Rules[ruleIdx].Clauses[idx].Attribute == attribute { + // TODO: should something be returned so no event is created? + return nil + } + f.Rules[ruleIdx].Clauses[idx].Attribute = attribute + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) ChangeClauseOperator(rule string, clause string, operator feature.Clause_Operator) error { + ruleIdx, err := f.findRule(rule) + if err != nil { + return err + } + idx, err := f.findClause(clause, f.Rules[ruleIdx].Clauses) + if err != nil { + return err + } + if f.Rules[ruleIdx].Clauses[idx].Operator == operator { + // TODO: same as attribute. maybe stop event from being generated + return nil + } + f.Rules[ruleIdx].Clauses[idx].Operator = operator + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) AddClauseValue(rule string, clause string, value string) error { + ruleIdx, err := f.findRule(rule) + if err != nil { + return err + } + idx, err := f.findClause(clause, f.Rules[ruleIdx].Clauses) + if err != nil { + return err + } + _, err = index(value, f.Rules[ruleIdx].Clauses[idx].Values) + if err == nil { + // TODO: same as attribute. maybe stop event from being generated + return nil + } + f.Rules[ruleIdx].Clauses[idx].Values = append(f.Rules[ruleIdx].Clauses[idx].Values, value) + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) RemoveClauseValue(rule string, clause string, value string) error { + ruleIdx, err := f.findRule(rule) + if err != nil { + return err + } + clauseIdx, err := f.findClause(clause, f.Rules[ruleIdx].Clauses) + if err != nil { + return err + } + idx, err := index(value, f.Rules[ruleIdx].Clauses[clauseIdx].Values) + if err != nil { + // TODO: same as attribute. maybe stop event from being generated + return nil + } + f.Rules[ruleIdx].Clauses[clauseIdx].Values = append( + f.Rules[ruleIdx].Clauses[clauseIdx].Values[:idx], + f.Rules[ruleIdx].Clauses[clauseIdx].Values[idx+1:]..., + ) + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) AddVariation(id string, value string, name string, description string) error { + if err := validateVariation(f.VariationType, value); err != nil { + return err + } + variation := &feature.Variation{ + Id: id, + Value: value, + Name: name, + Description: description, + } + f.Variations = append(f.Variations, variation) + f.addTarget(id) + f.addVariationToRules(id) + f.addVariationToDefaultStrategy(id) + f.UpdatedAt = time.Now().Unix() + return nil +} + +func validateVariation(variationType feature.Feature_VariationType, value string) error { + switch variationType { + case feature.Feature_BOOLEAN: + if value != "true" && value != "false" { + return errVariationTypeUnmatched + } + case feature.Feature_NUMBER: + if _, err := strconv.ParseFloat(value, 64); err != nil { + return errVariationTypeUnmatched + } + case feature.Feature_JSON: + var js map[string]interface{} + if json.Unmarshal([]byte(value), &js) != nil { + return errVariationTypeUnmatched + } + } + return nil +} + +func (f *Feature) addTarget(variationID string) { + target := &feature.Target{ + Variation: variationID, + } + f.Targets = append(f.Targets, target) +} + +func (f *Feature) addVariationToRules(variationID string) { + for _, rule := range f.Rules { + if rule.Strategy.Type == feature.Strategy_ROLLOUT { + f.addVariationToRolloutStrategy(rule.Strategy.RolloutStrategy, variationID) + } + } +} + +func (f *Feature) addVariationToDefaultStrategy(variationID string) { + if f.DefaultStrategy != nil && f.DefaultStrategy.Type == feature.Strategy_ROLLOUT { + f.addVariationToRolloutStrategy(f.DefaultStrategy.RolloutStrategy, variationID) + } +} + +func (f *Feature) addVariationToRolloutStrategy(strategy *feature.RolloutStrategy, variationID string) { + strategy.Variations = append(strategy.Variations, &feature.RolloutStrategy_Variation{ + Variation: variationID, + Weight: 0, + }) +} + +func (f *Feature) RemoveVariation(id string) error { + idx, err := f.findVariationIndex(id) + if err != nil { + return err + } + if err = f.validateRemoveVariation(id); err != nil { + return err + } + if err = f.removeTarget(id); err != nil { + return err + } + f.removeVariationFromRules(id) + f.removeVariationFromDefaultStrategy(id) + f.Variations = append(f.Variations[:idx], f.Variations[idx+1:]...) + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) validateRemoveVariation(id string) error { + if strategyContainsVariation(id, f.Feature.DefaultStrategy) { + return errVariationInUse + } + if f.rulesContainsVariation(id) { + return errVariationInUse + } + if f.OffVariation == id { + return errVariationInUse + } + return nil +} + +func (f *Feature) rulesContainsVariation(id string) bool { + for _, r := range f.Feature.Rules { + if ok := strategyContainsVariation(id, r.Strategy); ok { + return true + } + } + return false +} + +func strategyContainsVariation(id string, strategy *feature.Strategy) bool { + if strategy.Type == feature.Strategy_FIXED { + if strategy.FixedStrategy.Variation == id { + return true + } + } else if strategy.Type == feature.Strategy_ROLLOUT { + for _, v := range strategy.RolloutStrategy.Variations { + if v.Variation == id && v.Weight > 0 { + return true + } + } + } + return false +} + +func (f *Feature) removeTarget(variationID string) error { + idx, err := f.findTarget(variationID) + if err != nil { + return err + } + f.Targets = append(f.Targets[:idx], f.Targets[idx+1:]...) + return nil +} + +func (f *Feature) removeVariationFromRules(variationID string) { + for _, rule := range f.Rules { + if rule.Strategy.Type == feature.Strategy_ROLLOUT { + f.removeVariationFromRolloutStrategy(rule.Strategy.RolloutStrategy, variationID) + return + } + } +} + +func (f *Feature) removeVariationFromDefaultStrategy(variationID string) { + if f.DefaultStrategy != nil && f.DefaultStrategy.Type == feature.Strategy_ROLLOUT { + f.removeVariationFromRolloutStrategy(f.DefaultStrategy.RolloutStrategy, variationID) + } +} + +func (f *Feature) removeVariationFromRolloutStrategy(strategy *feature.RolloutStrategy, variationID string) { + for i, v := range strategy.Variations { + if v.Variation == variationID { + strategy.Variations = append(strategy.Variations[:i], strategy.Variations[i+1:]...) + return + } + } +} + +func (f *Feature) ChangeVariationValue(id string, value string) error { + idx, err := f.findVariationIndex(id) + if err != nil { + return err + } + f.Variations[idx].Value = value + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) ChangeVariationName(id string, name string) error { + idx, err := f.findVariationIndex(id) + if err != nil { + return err + } + f.Variations[idx].Name = name + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) ChangeVariationDescription(id string, description string) error { + idx, err := f.findVariationIndex(id) + if err != nil { + return err + } + f.Variations[idx].Description = description + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) ChangeDefaultStrategy(s *feature.Strategy) error { + f.DefaultStrategy = s + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) ChangeFixedStrategy(ruleID string, strategy *feature.FixedStrategy) error { + ruleIdx, err := f.findRule(ruleID) + if err != nil { + return err + } + if _, err := findVariation(strategy.Variation, f.Variations); err != nil { + return err + } + f.Rules[ruleIdx].Strategy.FixedStrategy = strategy + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) ChangeRolloutStrategy(ruleID string, strategy *feature.RolloutStrategy) error { + ruleIdx, err := f.findRule(ruleID) + if err != nil { + return err + } + for _, v := range strategy.Variations { + if _, err := findVariation(v.Variation, f.Variations); err != nil { + return err + } + } + f.Rules[ruleIdx].Strategy.RolloutStrategy = strategy + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) ListSegmentIDs() []string { + mapIDs := make(map[string]struct{}) + for _, r := range f.Rules { + for _, c := range r.Clauses { + if c.Operator == feature.Clause_SEGMENT { + for _, v := range c.Values { + mapIDs[v] = struct{}{} + } + } + } + } + ids := make([]string, 0, len(mapIDs)) + for id := range mapIDs { + ids = append(ids, id) + } + return ids +} + +func (f *Feature) IncrementVersion() error { + f.Version++ + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) ResetSamplingSeed() error { + id, err := uuid.NewUUID() + if err != nil { + return err + } + f.SamplingSeed = id.String() + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) AddPrerequisite(fID, variationID string) error { + p := &feature.Prerequisite{FeatureId: fID, VariationId: variationID} + f.Prerequisites = append(f.Prerequisites, p) + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) ChangePrerequisiteVariation(fID, variationID string) error { + idx, err := f.findPrerequisite(fID) + if err != nil { + return err + } + f.Prerequisites[idx].VariationId = variationID + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) RemovePrerequisite(fID string) error { + idx, err := f.findPrerequisite(fID) + if err != nil { + return err + } + f.Prerequisites = append(f.Prerequisites[:idx], f.Prerequisites[idx+1:]...) + f.UpdatedAt = time.Now().Unix() + return nil +} + +func (f *Feature) findPrerequisite(fID string) (int, error) { + for i := range f.Prerequisites { + if f.Prerequisites[i].FeatureId == fID { + return i, nil + } + } + return -1, errPrerequisiteNotFound +} + +func (f *Feature) IsStale(t time.Time) bool { + if f.LastUsedInfo == nil { + return false + } + if (t.Unix() - f.LastUsedInfo.LastUsedAt) < SecondsToStale { + return false + } + return true +} + +func (f *Feature) findTarget(id string) (int, error) { + for i := range f.Targets { + if f.Targets[i].Variation == id { + return i, nil + } + } + return -1, errTargetNotFound +} + +func (f *Feature) findVariationIndex(id string) (int, error) { + for i := range f.Variations { + if f.Variations[i].Id == id { + return i, nil + } + } + return -1, errVariationNotFound +} + +func (f *Feature) findRule(id string) (int, error) { + for i := range f.Rules { + if f.Rules[i].Id == id { + return i, nil + } + } + return -1, errRuleNotFound +} + +// TODO: this should be on Rule.. should wrap Rule.. +// or maybe just find clause directly without finding the rule first. +func (f *Feature) findClause(id string, clauses []*feature.Clause) (int, error) { + for i := range clauses { + if clauses[i].Id == id { + return i, nil + } + } + return -1, nil +} + +// TODO: this should be on Clause.. should wrap Clause.. do you see a pattern here? +func index(needle string, haystack []string) (int, error) { + for i := range haystack { + if haystack[i] == needle { + return i, nil + } + } + return -1, errValueNotFound +} + +func contains(needle string, haystack []string) bool { + for i := range haystack { + if haystack[i] == needle { + return true + } + } + return false +} + +func (f *Feature) Clone( + maintainer string, +) (*Feature, error) { + now := time.Now().Unix() + newFeature := &Feature{Feature: &feature.Feature{ + Id: f.Id, + Name: f.Name, + Description: f.Description, + Enabled: false, + Deleted: false, + Version: 1, + CreatedAt: now, + UpdatedAt: now, + Variations: f.Variations, + Targets: f.Targets, + Rules: f.Rules, + DefaultStrategy: f.DefaultStrategy, + OffVariation: f.OffVariation, + Tags: f.Tags, + Maintainer: maintainer, + VariationType: f.VariationType, + Archived: false, + }} + for i := range newFeature.Variations { + id, err := uuid.NewUUID() + if err != nil { + return nil, err + } + if newFeature.Variations[i].Id == newFeature.OffVariation { + newFeature.OffVariation = id.String() + } + for idx := range newFeature.Targets { + if newFeature.Targets[idx].Variation == newFeature.Variations[i].Id { + newFeature.Targets[idx].Variation = id.String() + break + } + } + if err = updateStrategyVariationID(newFeature.Variations[i].Id, id.String(), newFeature.DefaultStrategy); err != nil { + return nil, err + } + for idx := range newFeature.Rules { + err = updateStrategyVariationID(newFeature.Variations[i].Id, id.String(), newFeature.Rules[idx].Strategy) + if err != nil { + return nil, err + } + } + newFeature.Variations[i].Id = id.String() + } + return newFeature, nil +} + +func updateStrategyVariationID(varID, uID string, s *feature.Strategy) error { + switch s.Type { + case feature.Strategy_FIXED: + if varID == s.FixedStrategy.Variation { + s.FixedStrategy.Variation = uID + } + case feature.Strategy_ROLLOUT: + for i := range s.RolloutStrategy.Variations { + if s.RolloutStrategy.Variations[i].Variation == varID { + s.RolloutStrategy.Variations[i].Variation = uID + break + } + } + default: + return errUnsupportedStrategy + } + return nil +} diff --git a/pkg/feature/domain/feature_last_used_info.go b/pkg/feature/domain/feature_last_used_info.go new file mode 100644 index 000000000..a2d7e1b5a --- /dev/null +++ b/pkg/feature/domain/feature_last_used_info.go @@ -0,0 +1,117 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "fmt" + + "github.com/blang/semver" + + "github.com/bucketeer-io/bucketeer/proto/feature" +) + +type FeatureLastUsedInfo struct { + *feature.FeatureLastUsedInfo +} + +func NewFeatureLastUsedInfo( + featureID string, + version int32, + lastUsedAt int64, + clientVersion string, +) *FeatureLastUsedInfo { + info := &FeatureLastUsedInfo{FeatureLastUsedInfo: &feature.FeatureLastUsedInfo{ + FeatureId: featureID, + Version: version, + LastUsedAt: lastUsedAt, + CreatedAt: lastUsedAt, + }} + info.SetClientVersion(clientVersion) // nolint:errcheck + return info +} + +func (f *FeatureLastUsedInfo) ID() string { + return FeatureLastUsedInfoID(f.FeatureId, f.Version) +} + +func (f *FeatureLastUsedInfo) UsedAt(v int64) { + if f.LastUsedAt < v { + f.LastUsedAt = v + } +} + +func (f *FeatureLastUsedInfo) SetClientVersion(version string) error { + if err := f.setOldestClientVersion(version); err != nil { + return err + } + if err := f.setLatestClientVersion(version); err != nil { + return err + } + return nil +} + +func (f *FeatureLastUsedInfo) setOldestClientVersion(version string) error { + clientSemVersion, err := f.parseSemver(version) + if err != nil { + // Because the client version is optional and + // it could not be a semantic version, it ignores parse errors + return nil + } + if f.ClientOldestVersion == "" { + f.ClientOldestVersion = clientSemVersion.String() + return nil + } + currentSemVersion, err := f.parseSemver(f.ClientOldestVersion) + if err != nil { + return err + } + if currentSemVersion.GT(clientSemVersion) { + f.ClientOldestVersion = clientSemVersion.String() + } + return nil +} + +func (f *FeatureLastUsedInfo) setLatestClientVersion(version string) error { + clientSemVersion, err := f.parseSemver(version) + if err != nil { + // Because the client version is optional and + // it could not be a semantic version, it ignores parse errors + return nil + } + if f.ClientLatestVersion == "" { + f.ClientLatestVersion = clientSemVersion.String() + return nil + } + currentSemVersion, err := f.parseSemver(f.ClientLatestVersion) + if err != nil { + return err + } + if currentSemVersion.LT(clientSemVersion) { + f.ClientLatestVersion = clientSemVersion.String() + } + return nil +} + +func (f *FeatureLastUsedInfo) parseSemver(value string) (semver.Version, error) { + version, err := semver.Parse(value) + if err != nil { + return semver.Version{}, err + } + return version, nil +} + +func FeatureLastUsedInfoID(featureID string, version int32) string { + return fmt.Sprintf("%s:%d", featureID, version) +} diff --git a/pkg/feature/domain/feature_last_used_info_test.go b/pkg/feature/domain/feature_last_used_info_test.go new file mode 100644 index 000000000..883ab17da --- /dev/null +++ b/pkg/feature/domain/feature_last_used_info_test.go @@ -0,0 +1,254 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + proto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +func TestFeatureLastUsedInfoID(t *testing.T) { + patterns := []struct { + featureID, expect string + version int32 + }{ + { + featureID: "feature-id", + version: 10, + expect: "feature-id:10", + }, + } + + for _, p := range patterns { + assert.Equal(t, p.expect, FeatureLastUsedInfoID(p.featureID, p.version)) + } +} + +func TestID(t *testing.T) { + patterns := []struct { + featureID, expect string + version int32 + }{ + { + featureID: "feature-id", + version: 10, + expect: "feature-id:10", + }, + } + + for _, p := range patterns { + f := FeatureLastUsedInfo{FeatureLastUsedInfo: &proto.FeatureLastUsedInfo{ + FeatureId: p.featureID, + Version: p.version, + }} + assert.Equal(t, p.expect, f.ID()) + } +} + +func TestNewFeatureLastUsedInfo(t *testing.T) { + patterns := []struct { + featureID string + version int32 + lastUsedAt int64 + createdAt int64 + clientVersion string + expect *FeatureLastUsedInfo + }{ + { + featureID: "feature-id", + version: 10, + createdAt: 123445566, + lastUsedAt: 123445566, + clientVersion: "1.0.0", + }, + } + + for _, p := range patterns { + expect := &FeatureLastUsedInfo{FeatureLastUsedInfo: &proto.FeatureLastUsedInfo{ + FeatureId: p.featureID, + Version: p.version, + LastUsedAt: p.lastUsedAt, + CreatedAt: p.createdAt, + }} + expect.SetClientVersion(p.clientVersion) + assert.Equal(t, expect, NewFeatureLastUsedInfo(p.featureID, p.version, p.lastUsedAt, p.clientVersion)) + } +} + +func TestUsedAt(t *testing.T) { + patterns := []struct { + v, expect int64 + desc string + }{ + { + v: 0, + expect: 1, + }, + { + v: 1, + expect: 1, + }, + { + v: 2, + expect: 2, + }, + } + for _, p := range patterns { + featureLastUsed := NewFeatureLastUsedInfo("id", 10, 1, "1.0.0") + featureLastUsed.UsedAt(p.v) + assert.Equal(t, p.expect, featureLastUsed.LastUsedAt, p.desc) + } +} + +func TestSetOldestClientVersion(t *testing.T) { + patterns := []struct { + currentVersion, clientVersion, expect, desc string + expectError bool + }{ + { + currentVersion: "", + clientVersion: "1.0.0", + expect: "1.0.0", + expectError: false, + desc: "empty_current_version", + }, + { + currentVersion: "1.0.0", + clientVersion: "", + expect: "1.0.0", + expectError: false, + desc: "empty_client_version", + }, + { + currentVersion: "10", + clientVersion: "1.0.0", + expect: "10", + expectError: true, + desc: "invalid_current_version", + }, + { + currentVersion: "1.0.0", + clientVersion: "10", + expect: "1.0.0", + expectError: false, + desc: "invalid_client_version", + }, + { + currentVersion: "1.0.0", + clientVersion: "1.0.1", + expect: "1.0.0", + expectError: false, + desc: "client_version_greater_than_current_version", + }, + { + currentVersion: "1.0.0", + clientVersion: "1.0.0", + expect: "1.0.0", + expectError: false, + desc: "client_version_equal_current_version", + }, + { + currentVersion: "1.0.9", + clientVersion: "1.0.1", + expect: "1.0.1", + expectError: false, + desc: "client_version_less_than_current_version", + }, + } + for _, p := range patterns { + info := &FeatureLastUsedInfo{FeatureLastUsedInfo: &proto.FeatureLastUsedInfo{ + FeatureId: "id", + Version: 10, + LastUsedAt: 1, + CreatedAt: 1, + ClientOldestVersion: p.currentVersion, + }} + err := info.setOldestClientVersion(p.clientVersion) + assert.Equal(t, p.expectError, err != nil, p.desc) + assert.Equal(t, p.expect, info.ClientOldestVersion, p.desc) + } +} + +func TestSetLatestClientVersion(t *testing.T) { + patterns := []struct { + currentVersion, clientVersion, expect, desc string + expectError bool + }{ + { + currentVersion: "", + clientVersion: "1.0.0", + expect: "1.0.0", + expectError: false, + desc: "empty_current_version", + }, + { + currentVersion: "1.0.0", + clientVersion: "", + expect: "1.0.0", + expectError: false, + desc: "empty_client_version", + }, + { + currentVersion: "10", + clientVersion: "1.0.0", + expect: "10", + expectError: true, + desc: "invalid_current_version", + }, + { + currentVersion: "1.0.0", + clientVersion: "10", + expect: "1.0.0", + expectError: false, + desc: "invalid_client_version", + }, + { + currentVersion: "1.0.0", + clientVersion: "1.0.1", + expect: "1.0.1", + expectError: false, + desc: "client_version_greater_than_current_version", + }, + { + currentVersion: "1.0.0", + clientVersion: "1.0.0", + expect: "1.0.0", + expectError: false, + desc: "client_version_equal_current_version", + }, + { + currentVersion: "1.0.9", + clientVersion: "1.0.1", + expect: "1.0.9", + expectError: false, + desc: "client_version_less_than_current_version", + }, + } + for _, p := range patterns { + info := &FeatureLastUsedInfo{FeatureLastUsedInfo: &proto.FeatureLastUsedInfo{ + FeatureId: "id", + Version: 10, + LastUsedAt: 1, + CreatedAt: 1, + ClientLatestVersion: p.currentVersion, + }} + err := info.setLatestClientVersion(p.clientVersion) + assert.Equal(t, p.expectError, err != nil, p.desc) + assert.Equal(t, p.expect, info.ClientLatestVersion, p.desc) + } +} diff --git a/pkg/feature/domain/feature_test.go b/pkg/feature/domain/feature_test.go new file mode 100644 index 000000000..420fa48dc --- /dev/null +++ b/pkg/feature/domain/feature_test.go @@ -0,0 +1,1816 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "fmt" + "reflect" + "sort" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/bucketeer-io/bucketeer/proto/feature" + proto "github.com/bucketeer-io/bucketeer/proto/feature" + userproto "github.com/bucketeer-io/bucketeer/proto/user" +) + +func makeFeature(id string) *Feature { + return &Feature{ + Feature: &proto.Feature{ + Id: id, + Name: "test feature", + Version: 1, + Enabled: true, + CreatedAt: time.Now().Unix(), + VariationType: feature.Feature_STRING, + Variations: []*proto.Variation{ + { + Id: "variation-A", + Value: "A", + Name: "Variation A", + Description: "Thing does A", + }, + { + Id: "variation-B", + Value: "B", + Name: "Variation B", + Description: "Thing does B", + }, + { + Id: "variation-C", + Value: "C", + Name: "Variation C", + Description: "Thing does C", + }, + }, + Targets: []*proto.Target{ + { + Variation: "variation-A", + Users: []string{ + "user1", + }, + }, + { + Variation: "variation-B", + Users: []string{ + "user2", + }, + }, + { + Variation: "variation-C", + Users: []string{ + "user3", + }, + }, + }, + Rules: []*proto.Rule{ + { + Id: "rule-1", + Strategy: &proto.Strategy{ + Type: proto.Strategy_FIXED, + FixedStrategy: &proto.FixedStrategy{ + Variation: "variation-A", + }, + }, + Clauses: []*proto.Clause{ + { + Id: "clause-1", + Attribute: "name", + Operator: proto.Clause_EQUALS, + Values: []string{ + "user1", + "user2", + }, + }, + }, + }, + { + Id: "rule-2", + Strategy: &proto.Strategy{ + Type: proto.Strategy_FIXED, + FixedStrategy: &proto.FixedStrategy{ + Variation: "variation-B", + }, + }, + Clauses: []*proto.Clause{ + { + Id: "clause-2", + Attribute: "name", + Operator: proto.Clause_EQUALS, + Values: []string{ + "user3", + "user4", + }, + }, + }, + }, + }, + DefaultStrategy: &proto.Strategy{ + Type: proto.Strategy_FIXED, + FixedStrategy: &proto.FixedStrategy{ + Variation: "variation-B", + }, + }, + }, + } +} + +/* +func TestUserAssignment(t *testing.T) { + f := feature("test-feature") + + fmt.Println(f.assignUser("user1")) + fmt.Println(f.assignUser("user2")) + fmt.Println(f.assignUser("user3")) + + user1hash := f.Hash("user1") + user2hash := f.Hash("user2") + fmt.Println(hex.EncodeToString(user1hash[:8])) + fmt.Println(hex.EncodeToString(user2hash[:8])) + + fmt.Println(f.Bucket("user1")) + fmt.Println(f.Bucket("user2")) +func TestProportions(t *testing.T) { + f := feature("test-feature") + bucketA := 0 + bucketB := 0 + for i := 10000; i < 20000; i++ { + user := fmt.Sprintf("user-%d", i) + bucket := f.Bucket(user) + if bucket < 0.2 { + bucketA++ + } else { + bucketB++ + } + } + a := float64(bucketA) / 10000.0 // should be close to 0.2 + b := float64(bucketB) / 10000.0 // should be close to 0.8 + assert.InEpsilon(t, 0.2, a, 0.05) + assert.InEpsilon(t, 0.8, b, 0.05) +} + +func TestCorrelation(t *testing.T) { + // create some hundred tests + // assign all people in each test + // compute mutual information between each test pair + // should be high with itself and else low +} +*/ + +func TestNewFeature(t *testing.T) { + id := "id" + name := "name" + description := "description" + variations := []*proto.Variation{ + { + Value: "A", + Name: "Variation A", + Description: "Thing does A", + }, + { + Value: "B", + Name: "Variation B", + Description: "Thing does B", + }, + { + Value: "C", + Name: "Variation C", + Description: "Thing does C", + }, + } + variationType := feature.Feature_STRING + tags := []string{"android", "ios", "web"} + defaultOnVariationIndex := 0 + defaultOffVariationIndex := 2 + maintainer := "bucketeer@example.com" + f, err := NewFeature( + id, + name, + description, + variationType, + variations, + tags, + defaultOnVariationIndex, + defaultOffVariationIndex, + maintainer, + ) + strategy := &feature.Strategy{ + Type: feature.Strategy_FIXED, + FixedStrategy: &feature.FixedStrategy{Variation: f.Variations[defaultOnVariationIndex].Id}, + } + assert.NoError(t, err) + assert.Equal(t, id, f.Id) + assert.Equal(t, name, f.Name) + assert.Equal(t, description, f.Description) + for i := range variations { + assert.Equal(t, variations[i].Name, f.Variations[i].Name) + assert.Equal(t, variations[i].Description, f.Variations[i].Description) + } + assert.Equal(t, tags, f.Tags) + assert.Equal(t, tags, f.Tags) + assert.Equal(t, f.Variations[defaultOffVariationIndex].Id, f.OffVariation) + assert.Equal(t, strategy, f.DefaultStrategy) + assert.Equal(t, maintainer, f.Maintainer) +} + +func TestAddVariation(t *testing.T) { + createFeature := func() *Feature { + f := makeFeature("test-feature") + f.Rules = []*proto.Rule{ + { + Id: "rule-0", + Strategy: &proto.Strategy{ + Type: proto.Strategy_ROLLOUT, + RolloutStrategy: &proto.RolloutStrategy{Variations: []*proto.RolloutStrategy_Variation{ + {Variation: "A", Weight: 70000}, + {Variation: "B", Weight: 30000}, + {Variation: "C", Weight: 0}, + }, + }, + }, + Clauses: []*proto.Clause{ + { + Id: "clause-0", + Attribute: "name", + Operator: proto.Clause_EQUALS, + Values: []string{ + "user1", + }, + }, + }, + }, + } + f.DefaultStrategy = &proto.Strategy{ + Type: proto.Strategy_ROLLOUT, + RolloutStrategy: &feature.RolloutStrategy{Variations: []*proto.RolloutStrategy_Variation{ + {Variation: "A", Weight: 70000}, + {Variation: "B", Weight: 30000}, + {Variation: "C", Weight: 0}, + }}, + } + return f + } + patterns := map[string]struct { + input string + }{ + "Add D": { + input: "variation-D", + }, + } + for _, p := range patterns { + f := createFeature() + f.AddVariation(p.input, p.input, "", "") + assert.Equal(t, p.input, f.Targets[3].Variation) + assert.Equal(t, p.input, f.Rules[0].Strategy.RolloutStrategy.Variations[3].Variation) + assert.Equal(t, p.input, f.DefaultStrategy.RolloutStrategy.Variations[3].Variation) + } +} + +func TestAssignUserOffVariation(t *testing.T) { + t.Parallel() + f := makeFeature("test-feature") + patterns := []struct { + enabled bool + offVariation string + userID string + Flagvariations map[string]string + prerequisite []*proto.Prerequisite + expectedReason *proto.Reason + expectedVariation *proto.Variation + expectedError error + }{ + { + enabled: false, + offVariation: "variation-C", + userID: "user5", + Flagvariations: map[string]string{}, + prerequisite: []*proto.Prerequisite{}, + expectedReason: &proto.Reason{Type: proto.Reason_OFF_VARIATION}, + expectedVariation: f.Variations[2], + expectedError: nil, + }, + { + enabled: false, + offVariation: "", + userID: "user5", + Flagvariations: map[string]string{}, + prerequisite: []*proto.Prerequisite{}, + expectedReason: &proto.Reason{Type: proto.Reason_DEFAULT}, + expectedVariation: f.Variations[1], + expectedError: nil, + }, + { + enabled: false, + offVariation: "variation-E", + userID: "user5", + Flagvariations: map[string]string{}, + prerequisite: []*proto.Prerequisite{}, + expectedReason: &proto.Reason{Type: proto.Reason_OFF_VARIATION}, + expectedVariation: nil, + expectedError: errVariationNotFound, + }, + { + enabled: true, + offVariation: "", + userID: "user4", + Flagvariations: map[string]string{}, + prerequisite: []*proto.Prerequisite{}, + expectedReason: &proto.Reason{Type: proto.Reason_DEFAULT}, + expectedVariation: f.Variations[1], + expectedError: nil, + }, + { + enabled: true, + offVariation: "variation-C", + userID: "user4", + Flagvariations: map[string]string{}, + prerequisite: []*proto.Prerequisite{}, + expectedReason: &proto.Reason{Type: proto.Reason_DEFAULT}, + expectedVariation: f.Variations[1], + expectedError: nil, + }, + { + enabled: true, + offVariation: "variation-C", + userID: "user4", + Flagvariations: map[string]string{ + "test-feature2": "variation A", // not matched with expected prerequisites variations + }, + prerequisite: []*proto.Prerequisite{ + { + FeatureId: "test-feature2", + VariationId: "variation D", + }, + }, + expectedReason: &proto.Reason{Type: proto.Reason_PREREQUISITE}, + expectedVariation: f.Variations[2], + expectedError: nil, + }, + { + enabled: true, + offVariation: "variation-C", + userID: "user4", + Flagvariations: map[string]string{ + "test-feature2": "variation D", // matched with expected prerequisites variations + }, + prerequisite: []*proto.Prerequisite{ + { + FeatureId: "test-feature2", + VariationId: "variation D", + }, + }, + expectedReason: &proto.Reason{Type: proto.Reason_DEFAULT}, + expectedVariation: f.Variations[1], + expectedError: nil, + }, + { + enabled: true, + offVariation: "variation-C", + userID: "user4", + Flagvariations: map[string]string{}, // not found prerequisite vatiation + prerequisite: []*proto.Prerequisite{ + { + FeatureId: "test-feature2", + VariationId: "variation D", + }, + }, + expectedReason: nil, + expectedVariation: nil, + expectedError: errPrerequisiteVariationNotFound, + }, + } + for _, p := range patterns { + user := &userproto.User{Id: p.userID} + f.Enabled = p.enabled + f.OffVariation = p.offVariation + f.Prerequisites = p.prerequisite + reason, variation, err := f.assignUser(user, nil, p.Flagvariations) + assert.Equal(t, p.expectedReason, reason) + assert.Equal(t, p.expectedVariation, variation) + assert.Equal(t, p.expectedError, err) + } +} + +func TestAssignUserTarget(t *testing.T) { + f := makeFeature("test-feature") + patterns := []struct { + userID string + expectedReason proto.Reason_Type + expectedVariationID string + }{ + { + userID: "user1", + expectedReason: proto.Reason_TARGET, + expectedVariationID: "variation-A", + }, + { + userID: "user2", + expectedReason: proto.Reason_TARGET, + expectedVariationID: "variation-B", + }, + { + userID: "user3", + expectedReason: proto.Reason_TARGET, + expectedVariationID: "variation-C", + }, + { + userID: "user4", + expectedReason: proto.Reason_DEFAULT, + expectedVariationID: "variation-B", + }, + } + for _, p := range patterns { + user := &userproto.User{Id: p.userID} + reason, variation, err := f.assignUser(user, nil, nil) + assert.Equal(t, p.expectedReason, reason.Type) + assert.Equal(t, p.expectedVariationID, variation.Id) + assert.NoError(t, err) + } +} + +func TestAssignUserRuleSet(t *testing.T) { + user := &userproto.User{ + Id: "user-id", + Data: map[string]string{"name": "user3"}, + } + f := makeFeature("test-feature") + reason, variation, err := f.assignUser(user, nil, nil) + if err != nil { + t.Fatalf("Failed to assign user. Error: %v", err) + } + if reason.RuleId != "rule-2" { + t.Fatalf("Failed to assign user. Reason id does not match. ID: %s", reason.RuleId) + } + if variation.Id != "variation-B" { + t.Fatalf("Failed to assign user. Variation id does not match. ID: %s", variation.Id) + } +} + +func TestAssignUserWithNoDefaultStrategy(t *testing.T) { + user := &userproto.User{ + Id: "user-id1", + Data: map[string]string{"name3": "user3"}, + } + f := makeFeature("test-feature") + f.DefaultStrategy = nil + + reason, variation, err := f.assignUser(user, nil, nil) + if reason != nil { + t.Fatalf("Failed to assign user. Reason should be nil: %v", reason) + } + if variation != nil { + t.Fatalf("Failed to assign user. Variation should be nil: %v", variation) + } + if err != errDefaultStrategyNotFound { + t.Fatalf("Failed to assign user. Error: %v", err) + } +} + +func TestAssignUserDefaultStrategy(t *testing.T) { + user := &userproto.User{ + Id: "user-id1", + Data: map[string]string{"name3": "user3"}, + } + f := makeFeature("test-feature") + reason, variation, err := f.assignUser(user, nil, nil) + if err != nil { + t.Fatalf("Failed to assign user. Error: %v", err) + } + if reason.Type != proto.Reason_DEFAULT { + t.Fatalf("Failed to assign user. Reason type does not match. Current: %s, target: %v", reason.Type, proto.Reason_DEFAULT) + } + targetVariationID := "variation-B" + if variation.Id != targetVariationID { + t.Fatalf("Failed to assign user. Variation id does not match. Current: %s, target: %s", variation.Id, targetVariationID) + } +} + +func TestAssignUserSamplingSeed(t *testing.T) { + user := &userproto.User{ + Id: "uid", + Data: map[string]string{}, + } + f := makeFeature("fid") + f.DefaultStrategy = &proto.Strategy{ + Type: proto.Strategy_ROLLOUT, + RolloutStrategy: &proto.RolloutStrategy{ + Variations: []*proto.RolloutStrategy_Variation{ + { + Variation: f.Variations[0].Id, + Weight: 30000, + }, + { + Variation: f.Variations[1].Id, + Weight: 40000, + }, + { + Variation: f.Variations[2].Id, + Weight: 30000, + }, + }, + }, + } + reason, variation, err := f.assignUser(user, nil, nil) + if err != nil { + t.Fatalf("Failed to assign user. Error: %v", err) + } + if reason.Type != proto.Reason_DEFAULT { + t.Fatalf("Failed to assign user. Reason type does not match. Current: %s, target: %v", reason.Type, proto.Reason_DEFAULT) + } + if variation.Id != f.DefaultStrategy.RolloutStrategy.Variations[1].Variation { + t.Fatalf("Failed to assign user. Variation id does not match. Current: %s, target: %s", variation.Id, f.DefaultStrategy.RolloutStrategy.Variations[1].Variation) + } + // Channge sampling seed to change assigned variation. + f.SamplingSeed = "test" + reason, variation, err = f.assignUser(user, nil, nil) + if err != nil { + t.Fatalf("Failed to assign user. Error: %v", err) + } + if reason.Type != proto.Reason_DEFAULT { + t.Fatalf("Failed to assign user. Reason type does not match. Current: %s, target: %v", reason.Type, proto.Reason_DEFAULT) + } + if variation.Id != f.DefaultStrategy.RolloutStrategy.Variations[0].Variation { + t.Fatalf("Failed to assign user. Variation id does not match. Current: %s, target: %s", variation.Id, f.DefaultStrategy.RolloutStrategy.Variations[0].Variation) + } +} + +func TestRename(t *testing.T) { + f := makeFeature("test-feature") + name := "new name" + f.Rename(name) + assert.Equal(t, name, f.Name) +} + +func TestChangeDescription(t *testing.T) { + f := makeFeature("test-feature") + desc := "new desc" + f.ChangeDescription(desc) + assert.Equal(t, desc, f.Description) +} + +func TestAddTag(t *testing.T) { + tag := "test-tag" + f := makeFeature("test-feature") + if len(f.Tags) > 0 { + t.Fatalf("Failed to add tag. It should be empty before add a tag: %v", f.Tags) + } + f.AddTag(tag) + if len(f.Tags) == 0 { + t.Fatal("Failed to add tag. Tags is empty.") + } + if len(f.Tags) != 1 { + t.Fatalf("Failed to add tag. Tags has more than one element: %v", f.Tags) + } + if f.Tags[0] != tag { + t.Fatalf("Failed to add tag. Tag does not match, current: %s, target: %s", f.Tags[0], tag) + } +} + +func TestRemoveTag(t *testing.T) { + tag1 := "test-tag1" + tag2 := "test-tag2" + f := makeFeature("test-feature") + f.AddTag(tag1) + f.AddTag(tag2) + f.RemoveTag(tag1) + if f.Tags[0] == tag1 { + t.Fatalf("Failed to remove tag %s. Tags: %v", tag1, f.Tags) + } + if len(f.Tags) != 1 { + t.Fatalf("Failed to remove tag. It should remove only 1: %v", f.Tags) + } + if err := f.RemoveTag(tag2); err != errTagsMustHaveAtLeastOneTag { + t.Fatalf("Failed to remove tag. It must keep at least 1 tag %v", f.Tags) + } +} + +func TestEnable(t *testing.T) { + t.Parallel() + patterns := []struct { + origin bool + expectedErr error + }{ + { + origin: true, + expectedErr: ErrAlreadyEnabled, + }, + { + origin: false, + expectedErr: nil, + }, + } + for _, p := range patterns { + f := makeFeature("test-feature") + f.Enabled = p.origin + err := f.Enable() + assert.Equal(t, p.expectedErr, err) + assert.True(t, f.Enabled) + } +} + +func TestDisable(t *testing.T) { + t.Parallel() + patterns := []struct { + origin bool + expectedErr error + }{ + { + origin: false, + expectedErr: ErrAlreadyDisabled, + }, + { + origin: true, + expectedErr: nil, + }, + } + for _, p := range patterns { + f := makeFeature("test-feature") + f.Enabled = p.origin + err := f.Disable() + assert.Equal(t, p.expectedErr, err) + assert.False(t, f.Enabled) + } +} + +func TestArchive(t *testing.T) { + f := makeFeature("test-feature") + f.Archived = false + f.Archive() + assert.True(t, f.Archived) +} + +func TestUnarchive(t *testing.T) { + t.Parallel() + f := makeFeature("test-feature") + f.Archive() + assert.True(t, f.Archived) + f.Unarchive() + assert.False(t, f.Archived) +} + +func TestDelete(t *testing.T) { + f := makeFeature("test-feature") + f.Deleted = false + f.Delete() + assert.True(t, f.Deleted) +} + +func TestAddUserToVariation(t *testing.T) { + f := makeFeature("test-feature") + patterns := []struct { + variation string + user string + idx int + expectedLen int + expectedErr error + }{ + { + variation: "", + user: "", + idx: -1, + expectedLen: -1, + expectedErr: errTargetNotFound, + }, + { + variation: "variation-A", + user: "user1", + idx: 0, + expectedLen: 1, + expectedErr: nil, + }, + { + variation: "variation-A", + user: "newUser1", + idx: 0, + expectedLen: 2, + expectedErr: nil, + }, + } + + for _, p := range patterns { + err := f.AddUserToVariation(p.variation, p.user) + if err != nil { + assert.Equal(t, p.expectedErr, err) + } else { + assert.Equal(t, p.expectedLen, len(f.Targets[p.idx].Users)) + } + } +} + +func TestRemoveUserFromVariation(t *testing.T) { + f := makeFeature("test-feature") + patterns := []struct { + variation string + user string + idx int + expectedLen int + expectedErr error + }{ + { + variation: "", + user: "", + idx: -1, + expectedLen: -1, + expectedErr: errTargetNotFound, + }, + { + variation: "variation-A", + user: "newUser1", + idx: -1, + expectedLen: -1, + expectedErr: errValueNotFound, + }, + { + variation: "variation-A", + user: "user1", + idx: 0, + expectedLen: 0, + expectedErr: nil, + }, + } + + for _, p := range patterns { + err := f.RemoveUserFromVariation(p.variation, p.user) + if err != nil { + assert.Equal(t, p.expectedErr, err) + } else { + assert.Equal(t, p.expectedLen, len(f.Targets[p.idx].Users)) + } + } +} + +func TestAddFixedStrategyRule(t *testing.T) { + f := makeFeature("test-feature") + varitions := f.Variations + patterns := []struct { + id string + strategy *proto.Strategy + expected error + }{ + { + id: "rule-2", + strategy: nil, + expected: errRuleAlreadyExists, + }, + { + id: "rule-3", + strategy: &proto.Strategy{ + Type: proto.Strategy_FIXED, + FixedStrategy: &proto.FixedStrategy{Variation: ""}, + }, + expected: errVariationNotFound, + }, + { + id: "rule-3", + strategy: &proto.Strategy{ + Type: proto.Strategy_FIXED, + FixedStrategy: &proto.FixedStrategy{Variation: varitions[0].Id}, + }, + expected: nil, + }, + } + for _, p := range patterns { + rule := &proto.Rule{ + Id: p.id, + Strategy: p.strategy, + } + err := f.AddRule(rule) + assert.Equal(t, p.expected, err) + } + rule := &proto.Rule{ + Id: patterns[2].id, + Strategy: patterns[2].strategy, + } + assert.Equal(t, rule, f.Rules[2]) +} + +func TestAddRolloutStrategyRule(t *testing.T) { + f := makeFeature("test-feature") + varitions := f.Variations + patterns := []struct { + id string + strategy *proto.Strategy + expected error + }{ + { + id: "rule-2", + strategy: nil, + expected: errRuleAlreadyExists, + }, + { + id: "rule-3", + strategy: &proto.Strategy{ + Type: proto.Strategy_ROLLOUT, + RolloutStrategy: &proto.RolloutStrategy{ + Variations: []*proto.RolloutStrategy_Variation{ + { + Variation: varitions[0].Id, + Weight: 30000, + }, + { + Variation: "", + Weight: 70000, + }, + }, + }, + }, + expected: errVariationNotFound, + }, + { + id: "rule-3", + strategy: &proto.Strategy{ + Type: proto.Strategy_ROLLOUT, + RolloutStrategy: &proto.RolloutStrategy{ + Variations: []*proto.RolloutStrategy_Variation{ + { + Variation: varitions[0].Id, + Weight: 30000, + }, + { + Variation: varitions[1].Id, + Weight: 70000, + }, + }, + }, + }, + expected: nil, + }, + } + for _, p := range patterns { + rule := &proto.Rule{ + Id: p.id, + Strategy: p.strategy, + } + err := f.AddRule(rule) + assert.Equal(t, p.expected, err) + } + rule := &proto.Rule{ + Id: patterns[2].id, + Strategy: patterns[2].strategy, + } + assert.Equal(t, rule, f.Rules[2]) +} + +func TestChangeRuleStrategyToFixed(t *testing.T) { + f := makeFeature("test-feature") + r := f.Rules[0] + rID := r.Id + vID := f.Variations[1].Id + expected := &proto.Strategy{ + Type: proto.Strategy_FIXED, + FixedStrategy: &proto.FixedStrategy{Variation: vID}, + } + patterns := []*struct { + ruleID string + strategy *proto.Strategy + expected error + }{ + { + ruleID: "", + strategy: expected, + expected: errRuleNotFound, + }, + { + ruleID: rID, + strategy: &proto.Strategy{ + Type: proto.Strategy_FIXED, + FixedStrategy: &proto.FixedStrategy{Variation: ""}, + }, + expected: errVariationNotFound, + }, + { + ruleID: rID, + strategy: &proto.Strategy{ + Type: proto.Strategy_FIXED, + FixedStrategy: &proto.FixedStrategy{Variation: "variation-D"}, + }, + expected: errVariationNotFound, + }, + { + ruleID: "", + expected: errRuleNotFound, + }, + { + ruleID: rID, + strategy: expected, + expected: nil, + }, + } + for _, p := range patterns { + err := f.ChangeRuleStrategy(p.ruleID, p.strategy) + assert.Equal(t, p.expected, err) + } + if !reflect.DeepEqual(expected, r.Strategy) { + t.Fatalf("Strategy is not equal. Expected: %s, actual: %s", expected, r.Strategy) + } +} + +func TestChangeRuleToRolloutStrategy(t *testing.T) { + f := makeFeature("test-feature") + r := f.Rules[0] + rID := r.Id + vID1 := f.Variations[0].Id + vID2 := f.Variations[1].Id + expected := &proto.Strategy{ + Type: proto.Strategy_ROLLOUT, + RolloutStrategy: &proto.RolloutStrategy{Variations: []*proto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: 30, + }, + { + Variation: vID2, + Weight: 70, + }, + }}, + } + patterns := []*struct { + ruleID string + strategy *proto.Strategy + expected error + }{ + { + ruleID: "", + strategy: expected, + expected: errRuleNotFound, + }, + { + ruleID: rID, + strategy: &proto.Strategy{ + Type: proto.Strategy_ROLLOUT, + RolloutStrategy: &proto.RolloutStrategy{Variations: []*proto.RolloutStrategy_Variation{ + { + Variation: "", + Weight: 30000, + }, + { + Variation: vID2, + Weight: 70000, + }, + }}, + }, + expected: errVariationNotFound, + }, + { + ruleID: rID, + strategy: &proto.Strategy{ + Type: proto.Strategy_ROLLOUT, + RolloutStrategy: &proto.RolloutStrategy{Variations: []*proto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: 30000, + }, + { + Variation: "", + Weight: 70000, + }, + }}, + }, + expected: errVariationNotFound, + }, + { + ruleID: rID, + strategy: &proto.Strategy{ + Type: proto.Strategy_ROLLOUT, + RolloutStrategy: &proto.RolloutStrategy{Variations: []*proto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: 30000, + }, + { + Variation: "variation-D", + Weight: 70000, + }, + }}, + }, + expected: errVariationNotFound, + }, + { + ruleID: "", + strategy: nil, + expected: errRuleNotFound, + }, + { + ruleID: rID, + strategy: expected, + expected: nil, + }, + } + for _, p := range patterns { + err := f.ChangeRuleStrategy(p.ruleID, p.strategy) + assert.Equal(t, p.expected, err) + } + if !reflect.DeepEqual(expected, r.Strategy) { + t.Fatalf("Strategy is not equal. Expected: %v, actual: %v", expected, r.Strategy) + } +} + +func TestDeleteRule(t *testing.T) { + f := makeFeature("test-feature") + patterns := []struct { + rule string + expectedLen int + expectedErr error + }{ + { + rule: "", + expectedLen: -1, + expectedErr: errRuleNotFound, + }, + { + rule: "rule-1", + expectedLen: 1, + expectedErr: nil, + }, + } + + for _, p := range patterns { + err := f.DeleteRule(p.rule) + if err != nil { + assert.Equal(t, p.expectedErr, err) + } else { + assert.Equal(t, p.expectedLen, len(f.Rules)) + } + } +} + +func TestDeleteClause(t *testing.T) { + f := makeFeature("test-feature") + patterns := []struct { + rule string + clause string + ruleIdx int + expectedLen int + expectedErr error + }{ + { + rule: "", + clause: "", + ruleIdx: -1, + expectedLen: -1, + expectedErr: errRuleNotFound, + }, + { + rule: "rule-1", + clause: "clause-1", + ruleIdx: 0, + expectedLen: 0, + expectedErr: nil, + }, + } + + for _, p := range patterns { + err := f.DeleteClause(p.rule, p.clause) + if err != nil { + assert.Equal(t, p.expectedErr, err) + } else { + assert.Equal(t, p.expectedLen, len(f.Rules[p.ruleIdx].Clauses)) + } + } +} + +func TestChangeClauseAttribute(t *testing.T) { + f := makeFeature("test-feature") + patterns := []struct { + rule string + clause string + attribute string + ruleIdx int + idx int + expectedErr error + }{ + { + rule: "rule-1", + clause: "clause-1", + attribute: "newAttribute", + ruleIdx: 0, + idx: 0, + expectedErr: nil, + }, + } + + for _, p := range patterns { + err := f.ChangeClauseAttribute(p.rule, p.clause, p.attribute) + if err != nil { + assert.Equal(t, p.expectedErr, err) + } else { + assert.Equal(t, p.attribute, f.Rules[p.ruleIdx].Clauses[p.idx].Attribute) + } + } +} + +func TestChangeClauseOperator(t *testing.T) { + f := makeFeature("test-feature") + patterns := []struct { + rule string + clause string + operator proto.Clause_Operator + ruleIdx int + idx int + expectedErr error + }{ + { + rule: "rule-1", + clause: "clause-1", + operator: proto.Clause_IN, + ruleIdx: 0, + idx: 0, + expectedErr: nil, + }, + } + + for _, p := range patterns { + err := f.ChangeClauseOperator(p.rule, p.clause, p.operator) + if err != nil { + assert.Equal(t, p.expectedErr, err) + } else { + assert.Equal(t, p.operator, f.Rules[p.ruleIdx].Clauses[p.idx].Operator) + } + } +} + +func TestAddClauseValueToFeature(t *testing.T) { + f := makeFeature("test-feature") + patterns := []struct { + rule string + clause string + value string + ruleIdx int + idx int + expectedLen int + expectedErr error + }{ + { + rule: "rule-1", + clause: "clause-1", + value: "newUser1", + ruleIdx: 0, + idx: 0, + expectedLen: 3, + expectedErr: nil, + }, + } + + for _, p := range patterns { + err := f.AddClauseValue(p.rule, p.clause, p.value) + if err != nil { + assert.Equal(t, p.expectedErr, err) + } else { + assert.Equal(t, p.expectedLen, len(f.Rules[p.ruleIdx].Clauses[p.idx].Values)) + } + } +} + +func TestRemoveClauseValueFromFeature(t *testing.T) { + f := makeFeature("test-feature") + patterns := []struct { + rule string + clause string + value string + ruleIdx int + idx int + expectedLen int + expectedErr error + }{ + { + rule: "rule-1", + clause: "clause-1", + value: "user1", + ruleIdx: 0, + idx: 0, + expectedLen: 1, + expectedErr: nil, + }, + } + + for _, p := range patterns { + err := f.RemoveClauseValue(p.rule, p.clause, p.value) + if err != nil { + assert.Equal(t, p.expectedErr, err) + } else { + assert.Equal(t, p.expectedLen, len(f.Rules[p.ruleIdx].Clauses[p.idx].Values)) + } + } +} + +func TestChangeVariationValue(t *testing.T) { + f := makeFeature("test-feature") + patterns := []struct { + id string + value string + idx int + expectedErr error + }{ + { + id: "variation-A", + value: "newValue", + idx: 0, + expectedErr: nil, + }, + } + + for _, p := range patterns { + err := f.ChangeVariationValue(p.id, p.value) + if err != nil { + assert.Equal(t, p.expectedErr, err) + } else { + assert.Equal(t, p.value, f.Variations[p.idx].Value) + } + } +} + +func TestChangeVariationName(t *testing.T) { + f := makeFeature("test-feature") + patterns := []struct { + id string + name string + idx int + expectedErr error + }{ + { + id: "variation-A", + name: "newName", + idx: 0, + expectedErr: nil, + }, + } + + for _, p := range patterns { + err := f.ChangeVariationName(p.id, p.name) + if err != nil { + assert.Equal(t, p.expectedErr, err) + } else { + assert.Equal(t, p.name, f.Variations[p.idx].Name) + } + } +} + +func TestChangeVariationDescription(t *testing.T) { + f := makeFeature("test-feature") + patterns := []struct { + id string + desc string + idx int + expectedErr error + }{ + { + id: "variation-A", + desc: "newDesc", + idx: 0, + expectedErr: nil, + }, + } + + for _, p := range patterns { + err := f.ChangeVariationDescription(p.id, p.desc) + if err != nil { + assert.Equal(t, p.expectedErr, err) + } else { + assert.Equal(t, p.desc, f.Variations[p.idx].Description) + } + } +} + +func TestListSegmentIDs(t *testing.T) { + f := makeFeature("test-feature") + expected := []string{"newUser1", "newUser2"} + newRule := &proto.Rule{ + Clauses: []*proto.Clause{ + {Operator: proto.Clause_SEGMENT, Values: expected}, + }, + } + f.Rules = append(f.Rules, newRule) + actual := f.ListSegmentIDs() + sort.Strings(actual) + assert.Equal(t, expected, actual) +} + +func TestRemoveVariationUsingFixedStrategy(t *testing.T) { + f := makeFeature("test-feature") + expected := "variation-C" + patterns := []*struct { + id string + expected error + }{ + { + id: "variation-A", + expected: errVariationInUse, + }, + { + id: "variation-B", + expected: errVariationInUse, + }, + { + id: expected, + expected: nil, + }, + } + for i, p := range patterns { + err := f.RemoveVariation(p.id) + des := fmt.Sprintf("index: %d", i) + assert.Equal(t, p.expected, err, des) + } + if _, err := f.findVariationIndex(expected); err == nil { + t.Fatalf("Variation not deleted. Actual: %v", f.Variations) + } + actualSize := len(f.Variations) + expectedSize := 2 + if expectedSize != actualSize { + t.Fatalf("Different sizes. Expected: %d, actual: %d", expectedSize, actualSize) + } +} + +func TestRemoveVariationUsingRolloutStrategy(t *testing.T) { + f := makeFeature("test-feature") + f.ChangeDefaultStrategy(&proto.Strategy{ + Type: proto.Strategy_ROLLOUT, + RolloutStrategy: &proto.RolloutStrategy{ + Variations: []*proto.RolloutStrategy_Variation{ + { + Variation: "variation-A", + Weight: 100000, + }, + { + Variation: "variation-B", + Weight: 70000, + }, + { + Variation: "variation-C", + Weight: 0, + }, + }, + }, + }) + expected := "variation-C" + patterns := []*struct { + id string + expected error + }{ + { + id: "variation-A", + expected: errVariationInUse, + }, + { + id: "variation-B", + expected: errVariationInUse, + }, + { + id: expected, + expected: nil, + }, + } + for i, p := range patterns { + err := f.RemoveVariation(p.id) + des := fmt.Sprintf("index: %d", i) + assert.Equal(t, p.expected, err, des) + } + if _, err := f.findVariationIndex(expected); err == nil { + t.Fatalf("Variation not deleted. Actual: %v", f.Variations) + } + actualSize := len(f.Variations) + expectedSize := 2 + if expectedSize != actualSize { + t.Fatalf("Different sizes. Expected: %d, actual: %d", expectedSize, actualSize) + } +} + +func TestRemoveVariationUsingOffVariation(t *testing.T) { + f := makeFeature("test-feature") + err := f.ChangeOffVariation("variation-C") + assert.NoError(t, err) + expected := "variation-D" + f.AddVariation( + expected, + "value", + "name", + "description", + ) + patterns := []*struct { + des, id string + expected error + }{ + { + des: "in use", + id: "variation-C", + expected: errVariationInUse, + }, + { + des: "success", + id: expected, + expected: nil, + }, + } + for _, p := range patterns { + err := f.RemoveVariation(p.id) + assert.Equal(t, p.expected, err, p.des) + } + if _, err := f.findVariationIndex(expected); err == nil { + t.Fatalf("Variation not deleted. Actual: %v", f.Variations) + } + actualSize := len(f.Variations) + expectedSize := 3 + if expectedSize != actualSize { + t.Fatalf("Different sizes. Expected: %d, actual: %d", expectedSize, actualSize) + } +} + +func TestChangeFixedStrategy(t *testing.T) { + f := makeFeature("test-feature") + r := f.Rules[0] + rID := r.Id + vID := f.Variations[1].Id + patterns := []*struct { + ruleID, variationID string + expected error + }{ + { + ruleID: "", + variationID: vID, + expected: errRuleNotFound, + }, + { + ruleID: rID, + variationID: "", + expected: errVariationNotFound, + }, + { + ruleID: "", + variationID: "", + expected: errRuleNotFound, + }, + { + ruleID: rID, + variationID: vID, + expected: nil, + }, + } + for _, p := range patterns { + err := f.ChangeFixedStrategy(p.ruleID, &proto.FixedStrategy{Variation: p.variationID}) + assert.Equal(t, p.expected, err) + } + if r.Strategy.FixedStrategy.Variation != vID { + t.Fatalf("Wrong variation id has been saved. Expected: %s, actual: %s", vID, r.Strategy.FixedStrategy.Variation) + } +} + +func TestChangeRolloutStrategy(t *testing.T) { + f := makeFeature("test-feature") + r := f.Rules[0] + rID := r.Id + vID1 := f.Variations[0].Id + vID2 := f.Variations[1].Id + expected := &proto.RolloutStrategy{Variations: []*proto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: 30, + }, + { + Variation: vID2, + Weight: 70, + }, + }} + patterns := []*struct { + ruleID string + strategy *proto.RolloutStrategy + expected error + }{ + { + ruleID: "", + strategy: &proto.RolloutStrategy{}, + expected: errRuleNotFound, + }, + { + ruleID: rID, + strategy: &proto.RolloutStrategy{Variations: []*proto.RolloutStrategy_Variation{ + { + Variation: "", + Weight: 30, + }, + { + Variation: vID2, + Weight: 70, + }, + }}, + expected: errVariationNotFound, + }, + { + ruleID: rID, + strategy: &proto.RolloutStrategy{Variations: []*proto.RolloutStrategy_Variation{ + { + Variation: vID1, + Weight: 30, + }, + { + Variation: "", + Weight: 70, + }, + }}, + expected: errVariationNotFound, + }, + { + ruleID: "", + strategy: nil, + expected: errRuleNotFound, + }, + { + ruleID: rID, + strategy: expected, + expected: nil, + }, + } + for _, p := range patterns { + err := f.ChangeRolloutStrategy(p.ruleID, p.strategy) + assert.Equal(t, p.expected, err) + } + if !reflect.DeepEqual(expected, r.Strategy.RolloutStrategy) { + t.Fatalf("Different rollout strategies. Expected: %v, actual: %v", expected, r.Strategy.RolloutStrategy) + } +} + +func TestIsStale(t *testing.T) { + t.Parallel() + layout := "2006-01-02 15:04:05 -0700 MST" + t1, err := time.Parse(layout, "2014-01-01 0:00:00 +0000 UTC") + require.NoError(t, err) + t2, err := time.Parse(layout, "2014-03-31 23:59:59 +0000 UTC") + require.NoError(t, err) + t3, err := time.Parse(layout, "2014-04-01 0:00:00 +0000 UTC") + require.NoError(t, err) + patterns := map[string]struct { + feature *Feature + input time.Time + expected bool + }{ + "false": { + feature: &Feature{Feature: &proto.Feature{ + LastUsedInfo: &proto.FeatureLastUsedInfo{ + LastUsedAt: t1.Unix(), + }, + }}, + input: t2, + expected: false, + }, + "true": { + feature: &Feature{Feature: &proto.Feature{ + LastUsedInfo: &proto.FeatureLastUsedInfo{ + LastUsedAt: t1.Unix(), + }, + }}, + input: t3, + expected: true, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + assert.Equal(t, p.expected, p.feature.IsStale(p.input)) + }) + } +} + +func TestValidateVariation(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + variationType feature.Feature_VariationType + value string + expected error + }{ + "invalid bool": { + variationType: feature.Feature_BOOLEAN, + value: "hoge", + expected: errVariationTypeUnmatched, + }, + "invalid number": { + variationType: feature.Feature_NUMBER, + value: `{"foo":"foo","fee":20,"hoo": [1, "lee", null], "boo": true}`, + expected: errVariationTypeUnmatched, + }, + "invalid json": { + variationType: feature.Feature_JSON, + value: "true", + expected: errVariationTypeUnmatched, + }, + "valid bool": { + variationType: feature.Feature_BOOLEAN, + value: "true", + expected: nil, + }, + "valid number float": { + variationType: feature.Feature_NUMBER, + value: "1.23", + expected: nil, + }, + "valid number int": { + variationType: feature.Feature_NUMBER, + value: "123", + expected: nil, + }, + "valid json": { + variationType: feature.Feature_JSON, + value: `{"foo":"foo","fee":20,"hoo": [1, "lee", null], "boo": true}`, + expected: nil, + }, + "valid string": { + variationType: feature.Feature_STRING, + value: `{"foo":"foo","fee":20,"hoo": [1, "lee", null], "boo": true}`, + expected: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + assert.Equal(t, p.expected, validateVariation(p.variationType, p.value)) + }) + } +} + +func TestNewClonedFeature(t *testing.T) { + t.Parallel() + pattenrs := []struct { + maintainer string + offVariationIndex int + expectedEnabled bool + expectedVersion int32 + defaultStrategy *feature.Strategy + rules []*feature.Rule + }{ + { + maintainer: "sample@example.com", + offVariationIndex: 2, + expectedEnabled: false, + expectedVersion: int32(1), + defaultStrategy: &proto.Strategy{ + Type: proto.Strategy_FIXED, + FixedStrategy: &proto.FixedStrategy{ + Variation: "variation-B", + }, + }, + rules: []*proto.Rule{ + { + Id: "rule-1", + Strategy: &proto.Strategy{ + Type: proto.Strategy_FIXED, + FixedStrategy: &proto.FixedStrategy{ + Variation: "variation-A", + }, + }, + Clauses: []*proto.Clause{ + { + Id: "clause-1", + Attribute: "name", + Operator: proto.Clause_EQUALS, + Values: []string{ + "user1", + "user2", + }, + }, + }, + }, + { + Id: "rule-2", + Strategy: &proto.Strategy{ + Type: proto.Strategy_FIXED, + FixedStrategy: &proto.FixedStrategy{ + Variation: "variation-B", + }, + }, + Clauses: []*proto.Clause{ + { + Id: "clause-2", + Attribute: "name", + Operator: proto.Clause_EQUALS, + Values: []string{ + "user3", + "user4", + }, + }, + }, + }, + }, + }, + { + maintainer: "sample@example.com", + offVariationIndex: 2, + expectedEnabled: false, + expectedVersion: int32(1), + defaultStrategy: &proto.Strategy{ + Type: proto.Strategy_ROLLOUT, + RolloutStrategy: &proto.RolloutStrategy{ + Variations: []*proto.RolloutStrategy_Variation{ + { + Variation: "variation-A", + Weight: 100000, + }, + { + Variation: "variation-B", + Weight: 70000, + }, + { + Variation: "variation-C", + Weight: 0, + }, + }, + }, + }, + rules: []*proto.Rule{ + { + Id: "rule-1", + Strategy: &proto.Strategy{ + Type: proto.Strategy_ROLLOUT, + RolloutStrategy: &proto.RolloutStrategy{ + Variations: []*proto.RolloutStrategy_Variation{ + { + Variation: "variation-A", + Weight: 100000, + }, + { + Variation: "variation-B", + Weight: 70000, + }, + { + Variation: "variation-C", + Weight: 0, + }, + }, + }, + }, + }, + { + Id: "rule-2", + Strategy: &proto.Strategy{ + Type: proto.Strategy_ROLLOUT, + RolloutStrategy: &proto.RolloutStrategy{ + Variations: []*proto.RolloutStrategy_Variation{ + { + Variation: "variation-A", + Weight: 100, + }, + { + Variation: "variation-B", + Weight: 500, + }, + { + Variation: "variation-C", + Weight: 300, + }, + }, + }, + }, + }, + }, + }, + } + for _, p := range pattenrs { + f := makeFeature("test-feature") + f.Maintainer = "bucketeer@example.com" + f.OffVariation = f.Variations[p.offVariationIndex].Id + f.DefaultStrategy = p.defaultStrategy + f.Rules = p.rules + actual, err := f.Clone(p.maintainer) + assert.NoError(t, err) + assert.Equal(t, p.maintainer, actual.Maintainer) + assert.Equal(t, p.expectedEnabled, actual.Enabled) + assert.Equal(t, p.expectedVersion, actual.Version) + assert.Equal(t, actual.OffVariation, actual.Variations[p.offVariationIndex].Id) + for i := range actual.Variations { + assert.Equal(t, actual.Variations[i].Id, actual.Targets[i].Variation) + } + if actual.DefaultStrategy.Type == feature.Strategy_FIXED { + assert.Equal(t, actual.Variations[1].Id, actual.DefaultStrategy.FixedStrategy.Variation) + } else { + for i := range actual.Variations { + assert.Equal(t, actual.Variations[i].Id, actual.DefaultStrategy.RolloutStrategy.Variations[i].Variation) + } + } + for i := range actual.Rules { + if actual.Rules[i].Strategy.Type == feature.Strategy_FIXED { + assert.Equal(t, actual.Rules[i].Strategy.FixedStrategy.Variation, actual.Variations[i].Id) + } else { + for idx := range actual.Variations { + assert.Equal(t, actual.Rules[i].Strategy.RolloutStrategy.Variations[idx].Variation, actual.Variations[idx].Id) + } + } + } + } +} + +func TestResetSamplingSeed(t *testing.T) { + f := makeFeature("test-feature") + assert.Empty(t, f.SamplingSeed) + err := f.ResetSamplingSeed() + assert.NoError(t, err) + assert.NotEmpty(t, f.SamplingSeed) +} diff --git a/pkg/feature/domain/rule_evaluator.go b/pkg/feature/domain/rule_evaluator.go new file mode 100644 index 000000000..47ed16006 --- /dev/null +++ b/pkg/feature/domain/rule_evaluator.go @@ -0,0 +1,64 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" + userproto "github.com/bucketeer-io/bucketeer/proto/user" +) + +type ruleEvaluator struct { + clauseEvaluator +} + +func (e *ruleEvaluator) Evaluate( + rules []*featureproto.Rule, + user *userproto.User, + segmentUsers []*featureproto.SegmentUser, +) *featureproto.Rule { + for _, rule := range rules { + if e.evaluateRule(rule, user, segmentUsers) { + return rule + } + } + return nil +} + +func (e *ruleEvaluator) evaluateRule( + rule *featureproto.Rule, + user *userproto.User, + segmentUsers []*featureproto.SegmentUser, +) bool { + for _, clause := range rule.Clauses { + if !e.evaluateClause(clause, user, segmentUsers) { + return false + } + } + return true +} + +func (e *ruleEvaluator) evaluateClause( + clause *featureproto.Clause, + user *userproto.User, + segmentUsers []*featureproto.SegmentUser, +) bool { + var targetAttr string + if clause.Attribute == "id" { + targetAttr = user.Id + } else { + targetAttr = user.Data[clause.Attribute] + } + return e.clauseEvaluator.Evaluate(targetAttr, clause, user.Id, segmentUsers) +} diff --git a/pkg/feature/domain/rule_evaluator_test.go b/pkg/feature/domain/rule_evaluator_test.go new file mode 100644 index 000000000..9cfeeb856 --- /dev/null +++ b/pkg/feature/domain/rule_evaluator_test.go @@ -0,0 +1,252 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" + userproto "github.com/bucketeer-io/bucketeer/proto/user" +) + +func TestRuleEvaluator(t *testing.T) { + f := newFeature() + testcases := []struct { + user *userproto.User + expected *featureproto.Rule + }{ + { + user: &userproto.User{ + Id: "user-id-1", + Data: map[string]string{"full-name": "bucketeer project"}, + }, + expected: f.Rules[0], + }, + { + user: &userproto.User{ + Id: "user-id-1", + Data: map[string]string{"first-name": "bucketeer"}, + }, + expected: f.Rules[1], + }, + { + user: &userproto.User{ + Id: "user-id-1", + Data: map[string]string{"last-name": "project"}, + }, + expected: f.Rules[2], + }, + { + user: &userproto.User{ + Id: "user-id-3", + Data: map[string]string{"email": "bucketeer@gmail.com"}, + }, + expected: f.Rules[4], + }, + { + user: &userproto.User{ + Id: "user-id-1", + Data: nil, + }, + expected: f.Rules[3], + }, + { + user: &userproto.User{ + Id: "user-id-2", + Data: nil, + }, + expected: f.Rules[3], + }, + { + user: &userproto.User{ + Id: "user-id-3", + Data: nil, + }, + expected: nil, + }, + { + user: &userproto.User{ + Id: "user-id-4", + Data: nil, + }, + expected: nil, + }, + } + values := newSegmentUserIDs() + ruleEvaluator := &ruleEvaluator{} + for i, tc := range testcases { + des := fmt.Sprintf("index: %d", i) + assert.Equal(t, tc.expected, ruleEvaluator.Evaluate(f.Rules, tc.user, values), des) + } +} + +func newFeature() *Feature { + return &Feature{ + Feature: &featureproto.Feature{ + Id: "feature-id", + Name: "test feature", + Version: 1, + CreatedAt: time.Now().Unix(), + Variations: []*featureproto.Variation{ + { + Id: "variation-A", + Value: "A", + Name: "Variation A", + Description: "Thing does A", + }, + { + Id: "variation-B", + Value: "B", + Name: "Variation B", + Description: "Thing does B", + }, + }, + Rules: []*featureproto.Rule{ + { + Id: "rule-id-1", + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-A", + }, + }, + Clauses: []*featureproto.Clause{ + { + Id: "clause-id-1", + Attribute: "full-name", + Operator: featureproto.Clause_EQUALS, + Values: []string{"bucketeer project"}, + }, + }, + }, + { + Id: "rule-id-2", + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-A", + }, + }, + Clauses: []*featureproto.Clause{ + { + Id: "clause-id-2", + Attribute: "first-name", + Operator: featureproto.Clause_STARTS_WITH, + Values: []string{"buck"}, + }, + }, + }, + { + Id: "rule-id-3", + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-A", + }, + }, + Clauses: []*featureproto.Clause{ + { + Id: "clause-id-3", + Attribute: "last-name", + Operator: featureproto.Clause_ENDS_WITH, + Values: []string{"ject"}, + }, + }, + }, + { + Id: "rule-id-4", + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-B", + }, + }, + Clauses: []*featureproto.Clause{ + { + Id: "clause-id-4", + Attribute: "", + Operator: featureproto.Clause_SEGMENT, + Values: []string{ + "segment-id-1", + "segment-id-2", + }, + }, + }, + }, + { + Id: "rule-id-5", + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-B", + }, + }, + Clauses: []*featureproto.Clause{ + { + Id: "clause-id-5", + Attribute: "email", + Operator: featureproto.Clause_IN, + Values: []string{"bucketeer@gmail.com"}, + }, + }, + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-B", + }, + }, + }, + } +} + +func newSegmentUserIDs() (values []*featureproto.SegmentUser) { + values = append(values, &featureproto.SegmentUser{ + UserId: "user-id-1", + SegmentId: "segment-id-1", + State: featureproto.SegmentUser_INCLUDED, + }) + values = append(values, &featureproto.SegmentUser{ + UserId: "user-id-1", + SegmentId: "segment-id-2", + State: featureproto.SegmentUser_INCLUDED, + }) + values = append(values, &featureproto.SegmentUser{ + UserId: "user-id-2", + SegmentId: "segment-id-1", + State: featureproto.SegmentUser_INCLUDED, + }) + values = append(values, &featureproto.SegmentUser{ + UserId: "user-id-2", + SegmentId: "segment-id-2", + State: featureproto.SegmentUser_INCLUDED, + }) + values = append(values, &featureproto.SegmentUser{ + UserId: "user-id-3", + SegmentId: "segment-id-1", + State: featureproto.SegmentUser_INCLUDED, + }) + values = append(values, &featureproto.SegmentUser{ + UserId: "user-id-4", + SegmentId: "segment-id-2", + State: featureproto.SegmentUser_INCLUDED, + }) + return values +} diff --git a/pkg/feature/domain/segment.go b/pkg/feature/domain/segment.go new file mode 100644 index 000000000..a396f268a --- /dev/null +++ b/pkg/feature/domain/segment.go @@ -0,0 +1,209 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "time" + + "github.com/bucketeer-io/bucketeer/pkg/uuid" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +type Segment struct { + *featureproto.Segment +} + +func NewSegment(name string, description string) (*Segment, error) { + id, err := uuid.NewUUID() + if err != nil { + return nil, err + } + return &Segment{ + Segment: &featureproto.Segment{ + Id: id.String(), + Name: name, + Description: description, + Version: 1, + CreatedAt: time.Now().Unix(), + }, + }, nil +} + +func (s *Segment) SetDeleted() error { + s.Segment.Deleted = true + s.Segment.UpdatedAt = time.Now().Unix() + return nil +} + +func (s *Segment) ChangeName(name string) error { + s.Name = name + s.UpdatedAt = time.Now().Unix() + return nil +} + +func (s *Segment) ChangeDescription(description string) error { + s.Description = description + s.UpdatedAt = time.Now().Unix() + return nil +} + +func (s *Segment) AddRule(rule *featureproto.Rule) error { + if _, err := s.findRuleIndex(rule.Id); err == nil { + return errRuleAlreadyExists + } + s.Rules = append(s.Rules, rule) + s.UpdatedAt = time.Now().Unix() + return nil +} + +func (s *Segment) DeleteRule(rule string) error { + idx, err := s.findRuleIndex(rule) + if err != nil { + return err + } + s.Rules = append(s.Rules[:idx], s.Rules[idx+1:]...) + s.UpdatedAt = time.Now().Unix() + return nil +} + +func (s *Segment) AddClause(ruleID string, clause *featureproto.Clause) error { + idx, err := s.findRuleIndex(ruleID) + if err != nil { + return err + } + rule := s.Rules[idx] + if _, err := s.findClauseIndex(clause.Id, rule.Clauses); err == nil { + return errClauseAlreadyExists + } + rule.Clauses = append(rule.Clauses, clause) + s.UpdatedAt = time.Now().Unix() + return nil +} + +func (s *Segment) DeleteClause(ruleID string, clauseID string) error { + ruleIdx, err := s.findRuleIndex(ruleID) + if err != nil { + return err + } + rule := s.Rules[ruleIdx] + if len(rule.Clauses) <= 1 { + return errRuleMustHaveAtLeastOneClause + } + clauseIdx, err := s.findClauseIndex(clauseID, rule.Clauses) + if err != nil { + return err + } + rule.Clauses = append(rule.Clauses[:clauseIdx], rule.Clauses[clauseIdx+1:]...) + s.UpdatedAt = time.Now().Unix() + return nil +} + +func (s *Segment) ChangeClauseAttribute(ruleID string, clauseID string, attribute string) error { + clause, err := s.findClause(ruleID, clauseID) + if err != nil { + return err + } + clause.Attribute = attribute + s.UpdatedAt = time.Now().Unix() + return nil +} + +func (s *Segment) ChangeClauseOperator(ruleID string, clauseID string, operator featureproto.Clause_Operator) error { + clause, err := s.findClause(ruleID, clauseID) + if err != nil { + return err + } + clause.Operator = operator + s.UpdatedAt = time.Now().Unix() + return nil +} + +func (s *Segment) AddClauseValue(ruleID string, clauseID string, value string) error { + clause, err := s.findClause(ruleID, clauseID) + if err != nil { + return err + } + clause.Values = append(clause.Values, value) + s.UpdatedAt = time.Now().Unix() + return nil +} + +func (s *Segment) RemoveClauseValue(ruleID string, clauseID string, value string) error { + clause, err := s.findClause(ruleID, clauseID) + if err != nil { + return err + } + if len(clause.Values) <= 1 { + return errClauseMustHaveAtLeastOneValue + } + idx, err := index(value, clause.Values) + if err != nil { + return err + } + clause.Values = append(clause.Values[:idx], clause.Values[idx+1:]...) + s.UpdatedAt = time.Now().Unix() + return nil +} + +func (s *Segment) AddIncludedUserCount(count int64) { + s.IncludedUserCount += count + s.UpdatedAt = time.Now().Unix() +} + +func (s *Segment) RemoveIncludedUserCount(count int64) { + s.IncludedUserCount -= count + s.UpdatedAt = time.Now().Unix() +} + +func (s *Segment) SetIncludedUserCount(count int64) { + s.IncludedUserCount = count + s.UpdatedAt = time.Now().Unix() +} + +func (s *Segment) findRuleIndex(id string) (int, error) { + for i, r := range s.Rules { + if r.Id == id { + return i, nil + } + } + return -1, errRuleNotFound +} + +func (s *Segment) findClauseIndex(clauseID string, clauses []*featureproto.Clause) (int, error) { + for i, c := range clauses { + if c.Id == clauseID { + return i, nil + } + } + return -1, errClauseNotFound +} + +func (s *Segment) findClause(ruleID string, clauseID string) (*featureproto.Clause, error) { + ruleIdx, err := s.findRuleIndex(ruleID) + if err != nil { + return nil, err + } + rule := s.Rules[ruleIdx] + clauseIdx, err := s.findClauseIndex(clauseID, rule.Clauses) + if err != nil { + return nil, errClauseNotFound + } + return rule.Clauses[clauseIdx], nil +} + +func (s *Segment) SetStatus(status featureproto.Segment_Status) { + s.Status = status + s.UpdatedAt = time.Now().Unix() +} diff --git a/pkg/feature/domain/segment_evaluator.go b/pkg/feature/domain/segment_evaluator.go new file mode 100644 index 000000000..0ff21fc16 --- /dev/null +++ b/pkg/feature/domain/segment_evaluator.go @@ -0,0 +1,60 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +type segmentEvaluator struct { +} + +func (e *segmentEvaluator) Evaluate(segmentIDs []string, userID string, segmentUsers []*featureproto.SegmentUser) bool { + return e.findSegmentUser(segmentIDs, userID, featureproto.SegmentUser_INCLUDED, segmentUsers) +} + +func (e *segmentEvaluator) findSegmentUser( + segmentIDs []string, + userID string, + state featureproto.SegmentUser_State, + segmentUsers []*featureproto.SegmentUser, +) bool { + for _, segmentID := range segmentIDs { + if !e.containsSegmentUser(segmentID, userID, state, segmentUsers) { + return false + } + } + return true +} + +func (e *segmentEvaluator) containsSegmentUser( + segmentID, userID string, + state featureproto.SegmentUser_State, + segmentUsers []*featureproto.SegmentUser, +) bool { + for _, user := range segmentUsers { + if user.SegmentId != segmentID { + continue + } + if user.UserId != userID { + continue + } + if user.State != state { + continue + } + return true + } + return false +} diff --git a/pkg/feature/domain/segment_test.go b/pkg/feature/domain/segment_test.go new file mode 100644 index 000000000..95de201c0 --- /dev/null +++ b/pkg/feature/domain/segment_test.go @@ -0,0 +1,265 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +func TestAddClauseValueToSegment(t *testing.T) { + testcases := []struct { + ruleID, clauseID string + size int + expected error + }{ + { + ruleID: "rule-id-2", + clauseID: "clause-id-2", + size: 2, + expected: errRuleNotFound, + }, + { + ruleID: "rule-id-1", + clauseID: "clause-id-2", + size: 2, + expected: errClauseNotFound, + }, + { + ruleID: "rule-id-1", + clauseID: "clause-id-1", + size: 3, + expected: nil, + }, + } + s := newSegment(t) + rule := &featureproto.Rule{ + Id: "rule-id-1", + Clauses: []*featureproto.Clause{ + { + Id: "clause-id-1", + Values: []string{"value-1", "value-2"}, + }, + }, + } + err := s.AddRule(rule) + require.NoError(t, err) + for i, tc := range testcases { + des := fmt.Sprintf("index: %d", i) + err := s.AddClauseValue(tc.ruleID, tc.clauseID, "value-3") + assert.Equal(t, tc.expected, err, des) + assert.Equal(t, tc.size, len(s.Rules[0].Clauses[0].Values), des) + } + clause, err := s.findClause(rule.Id, rule.Clauses[0].Id) + require.NoError(t, err) + idx, err := index("value-3", clause.Values) + assert.Equal(t, idx != -1, err == nil) +} + +func TestRemoveClauseValueFromSegment(t *testing.T) { + testcases := []struct { + ruleID, clauseID, value string + size int + expected error + }{ + { + ruleID: "rule-id-2", + clauseID: "clause-id-2", + value: "value-3", + size: 2, + expected: errRuleNotFound, + }, + { + ruleID: "rule-id-1", + clauseID: "clause-id-2", + value: "value-3", + size: 2, + expected: errClauseNotFound, + }, + { + ruleID: "rule-id-1", + clauseID: "clause-id-1", + value: "value-3", + size: 2, + expected: errValueNotFound, + }, + { + ruleID: "rule-id-1", + clauseID: "clause-id-1", + value: "value-2", + size: 1, + expected: nil, + }, + { + ruleID: "rule-id-1", + clauseID: "clause-id-1", + value: "value-1", + size: 1, + expected: errClauseMustHaveAtLeastOneValue, + }, + } + s := newSegment(t) + rule := &featureproto.Rule{ + Id: "rule-id-1", + Clauses: []*featureproto.Clause{ + { + Id: "clause-id-1", + Values: []string{"value-1", "value-2"}, + }, + }, + } + err := s.AddRule(rule) + require.NoError(t, err) + for i, tc := range testcases { + des := fmt.Sprintf("index: %d", i) + err := s.RemoveClauseValue(tc.ruleID, tc.clauseID, tc.value) + assert.Equal(t, tc.expected, err, des) + assert.Equal(t, tc.size, len(s.Rules[0].Clauses[0].Values), des) + } + clause, err := s.findClause(rule.Id, rule.Clauses[0].Id) + require.NoError(t, err) + idx, err := index("value-2", clause.Values) + assert.Equal(t, idx == -1, err == errValueNotFound) +} + +func TestFindRuleIndex(t *testing.T) { + testcases := []struct { + ruleID string + index int + expected error + }{ + { + ruleID: "rule-id-2", + index: -1, + expected: errRuleNotFound, + }, + { + ruleID: "rule-id-1", + index: 0, + expected: nil, + }, + } + s := newSegment(t) + rule := &featureproto.Rule{Id: "rule-id-1"} + err := s.AddRule(rule) + require.NoError(t, err) + for i, tc := range testcases { + des := fmt.Sprintf("index: %d", i) + index, err := s.findRuleIndex(tc.ruleID) + assert.Equal(t, tc.expected, err, des) + assert.Equal(t, tc.index, index, des) + } +} + +func TestFindClauseIndex(t *testing.T) { + testcases := []struct { + clauseID string + index int + expected error + }{ + { + clauseID: "clause-id-3", + index: -1, + expected: errClauseNotFound, + }, + { + clauseID: "clause-id-2", + index: 1, + expected: nil, + }, + { + clauseID: "clause-id-1", + index: 0, + expected: nil, + }, + } + clauses := []*featureproto.Clause{ + {Id: "clause-id-1"}, + {Id: "clause-id-2"}, + } + s := newSegment(t) + for i, tc := range testcases { + des := fmt.Sprintf("index: %d", i) + index, err := s.findClauseIndex(tc.clauseID, clauses) + assert.Equal(t, tc.expected, err, des) + assert.Equal(t, tc.index, index, des) + } +} + +func TestFindClause(t *testing.T) { + clause1 := &featureproto.Clause{ + Id: "clause-id-1", + } + clause2 := &featureproto.Clause{ + Id: "clause-id-2", + } + testcases := []struct { + ruleID, clauseID string + clause *featureproto.Clause + expected error + }{ + { + ruleID: "rule-id-2", + clauseID: "clause-id-1", + clause: nil, + expected: errRuleNotFound, + }, + { + ruleID: "rule-id-1", + clauseID: "clause-id-3", + clause: nil, + expected: errClauseNotFound, + }, + { + ruleID: "rule-id-1", + clauseID: "clause-id-2", + clause: clause2, + expected: nil, + }, + { + ruleID: "rule-id-1", + clauseID: "clause-id-1", + clause: clause1, + expected: nil, + }, + } + s := newSegment(t) + rule := &featureproto.Rule{ + Id: "rule-id-1", + Clauses: []*featureproto.Clause{ + clause1, + clause2, + }, + } + err := s.AddRule(rule) + require.NoError(t, err) + for i, tc := range testcases { + des := fmt.Sprintf("index: %d", i) + clause, err := s.findClause(tc.ruleID, tc.clauseID) + assert.Equal(t, tc.expected, err, des) + assert.Equal(t, tc.clause, clause, des) + } +} + +func newSegment(t *testing.T) *Segment { + s, err := NewSegment("name", "description") + require.NoError(t, err) + return s +} diff --git a/pkg/feature/domain/segment_user.go b/pkg/feature/domain/segment_user.go new file mode 100644 index 000000000..d78c44a1e --- /dev/null +++ b/pkg/feature/domain/segment_user.go @@ -0,0 +1,42 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "fmt" + + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +type SegmentUser struct { + *featureproto.SegmentUser +} + +func NewSegmentUser(segmentID string, userID string, state featureproto.SegmentUser_State, deleted bool) *SegmentUser { + id := SegmentUserID(segmentID, userID, state) + return &SegmentUser{ + SegmentUser: &featureproto.SegmentUser{ + Id: id, + SegmentId: segmentID, + UserId: userID, + State: state, + Deleted: deleted, + }, + } +} + +func SegmentUserID(segmentID string, userID string, state featureproto.SegmentUser_State) string { + return fmt.Sprintf("%s:%s:%v", segmentID, userID, state) +} diff --git a/pkg/feature/domain/strategy_evaluator.go b/pkg/feature/domain/strategy_evaluator.go new file mode 100644 index 000000000..f1bf4f508 --- /dev/null +++ b/pkg/feature/domain/strategy_evaluator.go @@ -0,0 +1,85 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "crypto/md5" + "encoding/hex" + "fmt" + "strconv" + + "github.com/bucketeer-io/bucketeer/proto/feature" +) + +const max = float64(0xffffffffffffffff) + +type strategyEvaluator struct { +} + +func (e *strategyEvaluator) Evaluate( + strategy *feature.Strategy, + userID string, + variations []*feature.Variation, + featureID string, + samplingSeed string, +) (*feature.Variation, error) { + switch strategy.Type { + case feature.Strategy_FIXED: + return findVariation(strategy.FixedStrategy.Variation, variations) + case feature.Strategy_ROLLOUT: + variationID, err := e.rollout(strategy.RolloutStrategy, userID, featureID, samplingSeed) + if err != nil { + return nil, err + } + return findVariation(variationID, variations) + } + return nil, errUnsupportedStrategy +} + +func (e *strategyEvaluator) rollout( + strategy *feature.RolloutStrategy, + userID, featureID, samplingSeed string, +) (string, error) { + bucket, err := e.bucket(userID, featureID, samplingSeed) + if err != nil { + return "", err + } + sum := 0.0 + for i := range strategy.Variations { + sum += float64(strategy.Variations[i].Weight) / 100000.0 + if bucket < sum { + return strategy.Variations[i].Variation, nil + } + } + return "", errVariationNotFound +} + +func (e *strategyEvaluator) bucket(userID string, featureID string, samplingSeed string) (float64, error) { + hash := e.hash(userID, featureID, samplingSeed) + // use first 16 characters (hex string) / first 8 bytes (byte array) + intVal, err := strconv.ParseUint(hex.EncodeToString(hash[:])[:16], 16, 64) + if err != nil { + return 0.0, err + } + return float64(intVal) / max, nil +} + +func (e *strategyEvaluator) hash(userID string, featureID string, samplingSeed string) [16]byte { + // concat feature test id and user id + // TODO: explain why this makes sense? Why does it make sense to add 'prerequisit' key here? + concat := fmt.Sprintf("%s-%s%s", featureID, userID, samplingSeed) + // returns 16 bytes which, if shown as hex string, has 32 characters + return md5.Sum([]byte(concat)) +} diff --git a/pkg/feature/domain/tag.go b/pkg/feature/domain/tag.go new file mode 100644 index 000000000..533b6b093 --- /dev/null +++ b/pkg/feature/domain/tag.go @@ -0,0 +1,36 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "time" + + "github.com/bucketeer-io/bucketeer/proto/feature" +) + +type Tag struct { + *feature.Tag +} + +func NewTag(id string) *Tag { + now := time.Now().Unix() + return &Tag{ + Tag: &feature.Tag{ + Id: id, + CreatedAt: now, + UpdatedAt: now, + }, + } +} diff --git a/pkg/feature/domain/user_evaluations.go b/pkg/feature/domain/user_evaluations.go new file mode 100644 index 000000000..9ad427705 --- /dev/null +++ b/pkg/feature/domain/user_evaluations.go @@ -0,0 +1,64 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "fmt" + "hash/fnv" + "sort" + "strconv" + "time" + + "github.com/bucketeer-io/bucketeer/proto/feature" +) + +type UserEvaluations struct { + *feature.UserEvaluations +} + +func NewUserEvaluations(id string, evaluations []*feature.Evaluation) *UserEvaluations { + now := time.Now().Unix() + return &UserEvaluations{&feature.UserEvaluations{ + Id: id, + Evaluations: evaluations, + CreatedAt: now, + }} +} + +func UserEvaluationsID(userID string, userMetadata map[string]string, features []*feature.Feature) string { + sort.SliceStable(features, func(i, j int) bool { + return features[i].Id < features[j].Id + }) + // TODO: consider about a better hash algorithm? + h := fnv.New64a() + h.Write([]byte(userID)) // nolint:errcheck + keys := sortMapKeys(userMetadata) + for _, key := range keys { + fmt.Fprintf(h, "%s:%s", key, userMetadata[key]) + } + for _, feature := range features { + fmt.Fprintf(h, "%s:%d", feature.Id, feature.Version) + } + return strconv.FormatUint(h.Sum64(), 10) +} + +func sortMapKeys(data map[string]string) []string { + keys := make([]string, 0, len(data)) + for key := range data { + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} diff --git a/pkg/feature/domain/user_evaluations_test.go b/pkg/feature/domain/user_evaluations_test.go new file mode 100644 index 000000000..6ddbcac70 --- /dev/null +++ b/pkg/feature/domain/user_evaluations_test.go @@ -0,0 +1,75 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + proto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +func TestNewUserEvaluations(t *testing.T) { + patterns := []struct { + id string + evaluations []*proto.Evaluation + expected *proto.UserEvaluations + }{ + { + id: "1234", + evaluations: []*proto.Evaluation{{Id: "test-id"}}, + expected: &proto.UserEvaluations{ + Id: "1234", + Evaluations: []*proto.Evaluation{{Id: "test-id"}}, + }, + }, + } + + for _, p := range patterns { + actual := NewUserEvaluations(p.id, p.evaluations) + assert.Equal(t, p.expected.Id, actual.Id) + assert.Equal(t, p.expected.Evaluations, actual.Evaluations) + assert.NotZero(t, actual.CreatedAt) + } +} + +func TestSortMapKeys(t *testing.T) { + patterns := []struct { + input map[string]string + expected []string + desc string + }{ + { + input: nil, + expected: []string{}, + desc: "nil", + }, + { + input: map[string]string{}, + expected: []string{}, + desc: "empty", + }, + { + input: map[string]string{"b": "value-b", "c": "value-c", "a": "value-a", "d": "value-d"}, + expected: []string{"a", "b", "c", "d"}, + desc: "success", + }, + } + for _, p := range patterns { + keys := sortMapKeys(p.input) + assert.Equal(t, p.expected, keys, p.desc) + } +} diff --git a/pkg/feature/recorder/BUILD.bazel b/pkg/feature/recorder/BUILD.bazel new file mode 100644 index 000000000..dd8a67258 --- /dev/null +++ b/pkg/feature/recorder/BUILD.bazel @@ -0,0 +1,44 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "metrics.go", + "recorder.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/feature/recorder", + visibility = ["//visibility:public"], + deps = [ + "//pkg/errgroup:go_default_library", + "//pkg/feature/domain:go_default_library", + "//pkg/feature/storage/v2:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub/puller:go_default_library", + "//pkg/pubsub/puller/codes:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/event/client:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@io_bazel_rules_go//proto/wkt:any_go_proto", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["recorder_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/feature/domain:go_default_library", + "//pkg/pubsub/puller:go_default_library", + "//pkg/storage/v2/mysql/mock:go_default_library", + "//proto/event/client:go_default_library", + "//proto/user:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@com_github_stretchr_testify//assert:go_default_library", + ], +) diff --git a/pkg/feature/recorder/metrics.go b/pkg/feature/recorder/metrics.go new file mode 100644 index 000000000..3364a2daa --- /dev/null +++ b/pkg/feature/recorder/metrics.go @@ -0,0 +1,46 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package recorder + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +var ( + receivedCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "feature", + Name: "recorder_received_total", + Help: "Total number of received messages", + }, + ) + handledCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "feature", + Name: "recorder_handled_total", + Help: "Total number of handled messages", + }, []string{"code"}) +) + +func registerMetrics(r metrics.Registerer) { + r.MustRegister( + receivedCounter, + handledCounter, + ) +} diff --git a/pkg/feature/recorder/recorder.go b/pkg/feature/recorder/recorder.go new file mode 100644 index 000000000..4398f0495 --- /dev/null +++ b/pkg/feature/recorder/recorder.go @@ -0,0 +1,329 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package recorder + +import ( + "context" + "time" + + "github.com/golang/protobuf/proto" // nolint:staticcheck + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/any" + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/errgroup" + "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + ftstorage "github.com/bucketeer-io/bucketeer/pkg/feature/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller/codes" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + + "github.com/bucketeer-io/bucketeer/proto/event/client" +) + +const ( + userDataAppVersion = "app_version" +) + +type options struct { + maxMPS int + numWorkers int + flushInterval time.Duration + startupInterval time.Duration + metrics metrics.Registerer + logger *zap.Logger +} + +type Option func(*options) + +func WithMaxMPS(mps int) Option { + return func(opts *options) { + opts.maxMPS = mps + } +} + +func WithFlushInterval(interval time.Duration) Option { + return func(opts *options) { + opts.flushInterval = interval + } +} + +func WithStartupInterval(interval time.Duration) Option { + return func(opts *options) { + opts.startupInterval = interval + } +} + +func WithMetrics(r metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = r + } +} + +func WithLogger(l *zap.Logger) Option { + return func(opts *options) { + opts.logger = l + } +} + +type Recorder interface { + Check(context.Context) health.Status + Run() error + Stop() +} + +type recorder struct { + puller puller.RateLimitedPuller + storageClient mysql.Client + opts *options + group errgroup.Group + logger *zap.Logger + ctx context.Context + cancel func() + doneCh chan struct{} +} + +type lastUsedInfoCache map[string]*domain.FeatureLastUsedInfo +type environmentLastUsedInfoCache map[string]lastUsedInfoCache + +func NewRecorder(p puller.Puller, sc mysql.Client, opts ...Option) Recorder { + ctx, cancel := context.WithCancel(context.Background()) + dopts := &options{ + maxMPS: 1000, + numWorkers: 1, + logger: zap.NewNop(), + flushInterval: time.Minute, + startupInterval: time.Second, + } + for _, opt := range opts { + opt(dopts) + } + if dopts.metrics != nil { + registerMetrics(dopts.metrics) + } + return &recorder{ + puller: puller.NewRateLimitedPuller(p, dopts.maxMPS), + storageClient: sc, + opts: dopts, + logger: dopts.logger.Named("recorder"), + ctx: ctx, + cancel: cancel, + doneCh: make(chan struct{}), + } +} + +// Run starts workers. +// To distribute requests to DB, sleep for a second when starting each worker. +func (r *recorder) Run() error { + defer close(r.doneCh) + r.group.Go(func() error { + return r.puller.Run(r.ctx) + }) + for i := 0; i < r.opts.numWorkers; i++ { + r.group.Go(r.runWorker) + time.Sleep(r.opts.startupInterval) + } + return r.group.Wait() +} + +func (r *recorder) Stop() { + r.cancel() + <-r.doneCh +} + +func (r *recorder) Check(ctx context.Context) health.Status { + select { + case <-r.ctx.Done(): + r.logger.Error("Unhealthy due to context Done is closed", zap.Error(r.ctx.Err())) + return health.Unhealthy + default: + if r.group.FinishedCount() > 0 { + r.logger.Error("Unhealthy", zap.Int32("FinishedCount", r.group.FinishedCount())) + return health.Unhealthy + } + return health.Healthy + } +} + +func (r *recorder) runWorker() error { + timer := time.NewTimer(r.opts.flushInterval) + defer timer.Stop() + envCache := environmentLastUsedInfoCache{} + defer r.writeEnvLastUsedInfo(envCache) + for { + select { + case <-r.ctx.Done(): + return nil + case msg, ok := <-r.puller.MessageCh(): + if !ok { + return nil + } + receivedCounter.Inc() + event, err := r.unmarshalMessage(msg) + if err != nil { + msg.Ack() + handledCounter.WithLabelValues(codes.BadMessage.String()).Inc() + continue + } + evaluationEvent, err := r.unmarshalEvent(event.Event) + if err != nil { + msg.Ack() + handledCounter.WithLabelValues(codes.BadMessage.String()).Inc() + continue + } + r.cacheEnvLastUsedInfo(evaluationEvent, envCache, event.EnvironmentNamespace) + msg.Ack() + handledCounter.WithLabelValues(codes.OK.String()).Inc() + case <-timer.C: + r.writeEnvLastUsedInfo(envCache) + envCache = make(environmentLastUsedInfoCache, len(envCache)) + timer.Reset(r.opts.flushInterval) + } + } +} + +func (r *recorder) cacheEnvLastUsedInfo( + e *client.EvaluationEvent, + envCache environmentLastUsedInfoCache, + environmentNamespace string, +) { + // FIXME: Until the Web SDK is released including the fix below, + // We need to ignore the error, otherwise the Feature Flag Status used won't be updated + // https://github.com/bucketeer-io/bucketeer/issues/1145 + var clientVersion string + if e.User == nil { + r.logger.Warn("Failed to cache last used info. User is nil.", + zap.String("environmentNamespace", environmentNamespace), + zap.String("featureId", e.FeatureId), + zap.Int32("featureVersion", e.FeatureVersion)) + } else { + clientVersion = e.User.Data[userDataAppVersion] + } + id := domain.FeatureLastUsedInfoID(e.FeatureId, e.FeatureVersion) + if cache, ok := envCache[environmentNamespace]; ok { + if info, ok := cache[id]; ok { + info.UsedAt(e.Timestamp) + if err := info.SetClientVersion(clientVersion); err != nil { + r.logger.Error("Failed to set client version", + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("featureId", info.FeatureId), + zap.Int32("featureVersion", info.Version), + zap.String("clientVersion", clientVersion)) + } + return + } + cache[id] = domain.NewFeatureLastUsedInfo(e.FeatureId, e.FeatureVersion, e.Timestamp, clientVersion) + return + } + cache := lastUsedInfoCache{} + cache[id] = domain.NewFeatureLastUsedInfo(e.FeatureId, e.FeatureVersion, e.Timestamp, clientVersion) + envCache[environmentNamespace] = cache +} + +func (r *recorder) writeEnvLastUsedInfo(envCache environmentLastUsedInfoCache) { + for environmentNamespace, cache := range envCache { + info := make([]*domain.FeatureLastUsedInfo, 0, len(cache)) + for _, v := range cache { + info = append(info, v) + } + if err := r.upsertMultiFeatureLastUsedInfo(context.Background(), info, environmentNamespace); err != nil { + r.logger.Error("failed to write featureLastUsedInfo", zap.Error(err), + zap.String("environmentNamespace", environmentNamespace)) + continue + } + } +} + +func (r *recorder) unmarshalMessage(msg *puller.Message) (*client.Event, error) { + var event client.Event + if err := proto.Unmarshal(msg.Data, &event); err != nil { + r.logger.Error("bad message", zap.Error(err), zap.Any("msg", msg)) + return nil, err + } + return &event, nil +} + +func (r *recorder) unmarshalEvent(event *any.Any) (*client.EvaluationEvent, error) { + var evaluationEvent client.EvaluationEvent + if err := ptypes.UnmarshalAny(event, &evaluationEvent); err != nil { + r.logger.Error("unexpected event", zap.Error(err), zap.Any("event", event)) + return nil, err + } + return &evaluationEvent, nil +} + +func (r *recorder) upsertMultiFeatureLastUsedInfo( + ctx context.Context, + featureLastUsedInfos []*domain.FeatureLastUsedInfo, + environmentNamespace string, +) error { + ids := make([]string, 0, len(featureLastUsedInfos)) + for _, f := range featureLastUsedInfos { + ids = append(ids, f.ID()) + } + tx, err := r.storageClient.BeginTx(ctx) + if err != nil { + r.logger.Error("Failed to begin transaction", zap.Error(err)) + return err + } + err = r.storageClient.RunInTransaction(ctx, tx, func() error { + storage := ftstorage.NewFeatureLastUsedInfoStorage(r.storageClient) + updatedInfo := make([]*domain.FeatureLastUsedInfo, 0, len(ids)) + currentInfo, err := storage.GetFeatureLastUsedInfos(ctx, ids, environmentNamespace) + if err != nil { + return err + } + currentInfoMap := make(map[string]*domain.FeatureLastUsedInfo, len(currentInfo)) + for _, c := range currentInfo { + currentInfoMap[c.ID()] = c + } + for _, f := range featureLastUsedInfos { + v, ok := currentInfoMap[f.ID()] + if !ok { + updatedInfo = append(updatedInfo, f) + continue + } + var update bool + if v.LastUsedAt < f.LastUsedAt { + update = true + v.LastUsedAt = f.LastUsedAt + } + if v.ClientOldestVersion != f.ClientOldestVersion { + update = true + v.ClientOldestVersion = f.ClientOldestVersion + } + if v.ClientLatestVersion != f.ClientLatestVersion { + update = true + v.ClientLatestVersion = f.ClientLatestVersion + } + if update { + updatedInfo = append(updatedInfo, v) + } + } + for _, info := range updatedInfo { + if err := storage.UpsertFeatureLastUsedInfo(ctx, info, environmentNamespace); err != nil { + return err + } + } + return nil + }) + if err != nil { + return err + } + return nil +} diff --git a/pkg/feature/recorder/recorder_test.go b/pkg/feature/recorder/recorder_test.go new file mode 100644 index 000000000..c21b42c8c --- /dev/null +++ b/pkg/feature/recorder/recorder_test.go @@ -0,0 +1,170 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package recorder + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" + sqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + "github.com/bucketeer-io/bucketeer/proto/event/client" + userproto "github.com/bucketeer-io/bucketeer/proto/user" +) + +type PullerMock struct{} + +func (p *PullerMock) Pull(ctx context.Context, f func(context.Context, *puller.Message)) error { + timer := time.NewTimer(time.Millisecond * 100) + for { + select { + case <-ctx.Done(): + return nil + case <-timer.C: + event := client.EvaluationEvent{ + FeatureId: "id", + FeatureVersion: 10, + Timestamp: int64(time.Now().Nanosecond()), + } + data, _ := proto.Marshal(&event) + f(ctx, &puller.Message{ + Data: data, + Ack: func() {}, + }) + } + } +} + +func TestNewRecorder(t *testing.T) { + t.Parallel() + puller := &PullerMock{} + mockController := gomock.NewController(t) + defer mockController.Finish() + db := sqlmock.NewMockClient(mockController) + assert.IsType(t, &recorder{}, NewRecorder(puller, db)) +} + +func TestRecorderRun(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + db := sqlmock.NewMockClient(mockController) + recorder := NewRecorder(&PullerMock{}, db) + go recorder.Run() + time.Sleep(time.Second) + recorder.Stop() +} + +func TestUnmarshalMessage(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + db := sqlmock.NewMockClient(mockController) + recorder := NewRecorder(&PullerMock{}, db).(*recorder) + event := client.Event{Id: "hoge"} + data, err := proto.Marshal(&event) + assert.NoError(t, err) + msg := puller.Message{Data: data} + e, err := recorder.unmarshalMessage(&msg) + assert.NoError(t, err) + assert.Equal(t, event.Id, e.Id) +} + +func TestUnmarshalEvent(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + db := sqlmock.NewMockClient(mockController) + recorder := NewRecorder(&PullerMock{}, db).(*recorder) + evalEvent := client.EvaluationEvent{FeatureId: "id"} + any, err := ptypes.MarshalAny(&evalEvent) + assert.NoError(t, err) + evale, err := recorder.unmarshalEvent(any) + assert.NoError(t, err) + assert.Equal(t, evalEvent.FeatureId, evale.FeatureId) +} + +func TestCacheEnvLastUsedInfo(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + db := sqlmock.NewMockClient(mockController) + recorder := NewRecorder(&PullerMock{}, db, WithFlushInterval(1*time.Second)).(*recorder) + expectVersion := "1.0.0" + user := &userproto.User{ + Id: "id", + Data: map[string]string{"app_version": expectVersion}, + } + patterns := []struct { + input *client.EvaluationEvent + environmentNamespace string + expect int64 + }{ + { + input: &client.EvaluationEvent{FeatureId: "id", FeatureVersion: 10, Timestamp: 0, User: user}, + environmentNamespace: "ns0", + expect: 1, + }, + { + input: &client.EvaluationEvent{FeatureId: "id", FeatureVersion: 10, Timestamp: 2, User: user}, + environmentNamespace: "ns0", + expect: 2, + }, + { + input: &client.EvaluationEvent{FeatureId: "id", FeatureVersion: 11, Timestamp: 1, User: user}, + environmentNamespace: "ns0", + expect: 1, + }, + { + input: &client.EvaluationEvent{FeatureId: "id", FeatureVersion: 10, Timestamp: 1, User: user}, + environmentNamespace: "ns1", + expect: 1, + }, + } + envCache := make(environmentLastUsedInfoCache, 1) + e := client.EvaluationEvent{FeatureId: "id", FeatureVersion: 10, Timestamp: 1, User: user} + recorder.cacheEnvLastUsedInfo(&e, envCache, "ns0") + for i, p := range patterns { + recorder.cacheEnvLastUsedInfo(p.input, envCache, p.environmentNamespace) + key := domain.FeatureLastUsedInfoID(p.input.FeatureId, p.input.FeatureVersion) + assert.Equal(t, p.expect, envCache[p.environmentNamespace][key].LastUsedAt, "i=%d", i) + assert.Equal(t, expectVersion, envCache[p.environmentNamespace][key].ClientOldestVersion, "i=%d", i) + assert.Equal(t, expectVersion, envCache[p.environmentNamespace][key].ClientLatestVersion, "i=%d", i) + } +} + +func BenchmarkCacheEnvLastUsedInfo(b *testing.B) { + mockController := gomock.NewController(b) + defer mockController.Finish() + db := sqlmock.NewMockClient(mockController) + recorder := NewRecorder(&PullerMock{}, db).(*recorder) + b.ResetTimer() + envCache := make(environmentLastUsedInfoCache, 1) + for i := 0; i < b.N; i++ { + e := client.EvaluationEvent{ + FeatureId: "id", + FeatureVersion: 10, + Timestamp: int64(time.Now().Nanosecond()), + } + recorder.cacheEnvLastUsedInfo(&e, envCache, "ns0") + } +} diff --git a/pkg/feature/segmentpersister/BUILD.bazel b/pkg/feature/segmentpersister/BUILD.bazel new file mode 100644 index 000000000..be0ee2187 --- /dev/null +++ b/pkg/feature/segmentpersister/BUILD.bazel @@ -0,0 +1,56 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "metrics.go", + "persister.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/feature/segmentpersister", + visibility = ["//visibility:public"], + deps = [ + "//pkg/cache:go_default_library", + "//pkg/cache/v3:go_default_library", + "//pkg/errgroup:go_default_library", + "//pkg/feature/command:go_default_library", + "//pkg/feature/domain:go_default_library", + "//pkg/feature/storage/v2:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/pubsub/puller:go_default_library", + "//pkg/pubsub/puller/codes:go_default_library", + "//pkg/storage:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/event/domain:go_default_library", + "//proto/event/service:go_default_library", + "//proto/feature:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["persister_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/cache/mock:go_default_library", + "//pkg/cache/v3/mock:go_default_library", + "//pkg/feature/domain:go_default_library", + "//pkg/feature/storage/v2:go_default_library", + "//pkg/metrics/mock:go_default_library", + "//pkg/pubsub/publisher/mock:go_default_library", + "//pkg/pubsub/puller/mock:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/storage/v2/mysql/mock:go_default_library", + "//proto/account:go_default_library", + "//proto/event/domain:go_default_library", + "//proto/event/service:go_default_library", + "//proto/feature:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/feature/segmentpersister/metrics.go b/pkg/feature/segmentpersister/metrics.go new file mode 100644 index 000000000..f7021fb81 --- /dev/null +++ b/pkg/feature/segmentpersister/metrics.go @@ -0,0 +1,43 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package segmentpersister + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +var ( + receivedCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "feature", + Name: "segment_persister_received_total", + Help: "Total number of received messages", + }) + + handledCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "feature", + Name: "segment_persister_handled_total", + Help: "Total number of handled messages", + }, []string{"code"}) +) + +func registerMetrics(r metrics.Registerer) { + r.MustRegister(receivedCounter, handledCounter) +} diff --git a/pkg/feature/segmentpersister/persister.go b/pkg/feature/segmentpersister/persister.go new file mode 100644 index 000000000..752d220e4 --- /dev/null +++ b/pkg/feature/segmentpersister/persister.go @@ -0,0 +1,456 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package segmentpersister + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + "github.com/golang/protobuf/proto" // nolint:staticcheck + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/cache" + cachev3 "github.com/bucketeer-io/bucketeer/pkg/cache/v3" + "github.com/bucketeer-io/bucketeer/pkg/errgroup" + "github.com/bucketeer-io/bucketeer/pkg/feature/command" + "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + v2fs "github.com/bucketeer-io/bucketeer/pkg/feature/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller/codes" + "github.com/bucketeer-io/bucketeer/pkg/storage" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + domainproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + serviceevent "github.com/bucketeer-io/bucketeer/proto/event/service" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +const ( + maxUserIDLength = 100 +) + +var ( + errSegmentInUse = errors.New("segment: segment is in use") + errExceededMaxUserIDLength = fmt.Errorf("segment: max user id length allowed is %d", maxUserIDLength) +) + +type options struct { + maxMPS int + numWorkers int + flushSize int + flushInterval time.Duration + metrics metrics.Registerer + logger *zap.Logger +} + +type Option func(*options) + +func WithMaxMPS(mps int) Option { + return func(opts *options) { + opts.maxMPS = mps + } +} + +func WithNumWorkers(n int) Option { + return func(opts *options) { + opts.numWorkers = n + } +} + +func WithFlushSize(s int) Option { + return func(opts *options) { + opts.flushSize = s + } +} + +func WithFlushInterval(i time.Duration) Option { + return func(opts *options) { + opts.flushInterval = i + } +} + +func WithMetrics(r metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = r + } +} + +func WithLogger(logger *zap.Logger) Option { + return func(opts *options) { + opts.logger = logger + } +} + +type Persister struct { + puller puller.RateLimitedPuller + domainPublisher publisher.Publisher + mysqlClient mysql.Client + segmentUsersCache cachev3.SegmentUsersCache + group errgroup.Group + opts *options + logger *zap.Logger + ctx context.Context + cancel func() + doneCh chan struct{} +} + +func NewPersister( + p puller.Puller, + domainPublisher publisher.Publisher, + mysqlClient mysql.Client, + v3Cache cache.MultiGetCache, + opts ...Option, +) *Persister { + dopts := &options{ + maxMPS: 100, + numWorkers: 2, + flushSize: 2, + flushInterval: 10 * time.Second, + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + ctx, cancel := context.WithCancel(context.Background()) + if dopts.metrics != nil { + registerMetrics(dopts.metrics) + } + return &Persister{ + puller: puller.NewRateLimitedPuller(p, dopts.maxMPS), + domainPublisher: domainPublisher, + mysqlClient: mysqlClient, + segmentUsersCache: cachev3.NewSegmentUsersCache(v3Cache), + opts: dopts, + logger: dopts.logger.Named("segment-persister"), + ctx: ctx, + cancel: cancel, + doneCh: make(chan struct{}), + } +} + +func (p *Persister) Run() error { + defer close(p.doneCh) + p.group.Go(func() error { + return p.puller.Run(p.ctx) + }) + for i := 0; i < p.opts.numWorkers; i++ { + p.group.Go(p.runWorker) + } + return p.group.Wait() +} + +func (p *Persister) Stop() { + p.cancel() + <-p.doneCh +} + +func (p *Persister) Check(ctx context.Context) health.Status { + select { + case <-p.ctx.Done(): + p.logger.Error("unhealthy due to context Done is closed", zap.Error(p.ctx.Err())) + return health.Unhealthy + default: + if p.group.FinishedCount() > 0 { + p.logger.Error("unhealthy", zap.Int32("finishedCount", p.group.FinishedCount())) + return health.Unhealthy + } + return health.Healthy + } +} + +func (p *Persister) runWorker() error { + chunk := make(map[string]*puller.Message, p.opts.flushSize) + timer := time.NewTimer(p.opts.flushInterval) + defer timer.Stop() + for { + select { + case msg, ok := <-p.puller.MessageCh(): + if !ok { + return nil + } + receivedCounter.Inc() + id := msg.Attributes["id"] + if id == "" { + msg.Ack() + handledCounter.WithLabelValues(codes.MissingID.String()).Inc() + continue + } + if _, ok := chunk[id]; ok { + p.logger.Warn("message with duplicate id", zap.String("id", id)) + handledCounter.WithLabelValues(codes.DuplicateID.String()).Inc() + } + chunk[id] = msg + if len(chunk) >= p.opts.flushSize { + p.handleChunk(chunk) + chunk = make(map[string]*puller.Message, p.opts.flushSize) + timer.Reset(p.opts.flushInterval) + } + case <-timer.C: + if len(chunk) > 0 { + p.handleChunk(chunk) + chunk = make(map[string]*puller.Message, p.opts.flushSize) + } + timer.Reset(p.opts.flushInterval) + case <-p.ctx.Done(): + return nil + } + } +} + +func (p *Persister) handleChunk(chunk map[string]*puller.Message) { + for _, msg := range chunk { + p.logger.Debug("handling a message", zap.String("msgID", msg.ID)) + event, err := p.unmarshalMessage(msg) + if err != nil { + msg.Ack() + p.logger.Error("failed to unmarshal message", zap.Error(err), zap.String("msgID", msg.ID)) + handledCounter.WithLabelValues(codes.BadMessage.String()).Inc() + continue + } + if !validateSegmentUserState(event.State) { + msg.Ack() + p.logger.Error( + "invalid state", + zap.String("environmentNamespace", event.EnvironmentNamespace), + zap.Int32("state", int32(event.State)), + ) + handledCounter.WithLabelValues(codes.BadMessage.String()).Inc() + if err := p.updateSegmentStatus( + p.ctx, + event.Editor, + event.EnvironmentNamespace, + event.SegmentId, + 0, + event.State, + featureproto.Segment_FAILED, + ); err != nil { + p.logger.Error( + "failed to update segment status", + zap.Error(err), + zap.String("environmentNamespace", event.EnvironmentNamespace), + ) + } + continue + } + if err := p.handleEvent(p.ctx, event); err != nil { + switch err { + case storage.ErrKeyNotFound, v2fs.ErrSegmentNotFound: + msg.Ack() + p.logger.Warn("segment not found", zap.Error(err), zap.String("environmentNamespace", event.EnvironmentNamespace)) + handledCounter.WithLabelValues(codes.NonRepeatableError.String()).Inc() + case errSegmentInUse: + msg.Ack() + p.logger.Warn( + "segment is in use", + zap.Error(err), + zap.String("environmentNamespace", event.EnvironmentNamespace), + ) + handledCounter.WithLabelValues(codes.NonRepeatableError.String()).Inc() + case errExceededMaxUserIDLength: + msg.Ack() + p.logger.Warn( + "exceeded max user id length", + zap.Error(err), + zap.String("environmentNamespace", event.EnvironmentNamespace), + ) + handledCounter.WithLabelValues(codes.NonRepeatableError.String()).Inc() + if err := p.updateSegmentStatus( + p.ctx, + event.Editor, + event.EnvironmentNamespace, + event.SegmentId, + 0, + event.State, + featureproto.Segment_FAILED, + ); err != nil { + p.logger.Error( + "failed to update segment status", + zap.Error(err), + zap.String("environmentNamespace", event.EnvironmentNamespace), + ) + } + default: + // retryable + msg.Nack() + p.logger.Error( + "failed to handle event", + zap.Error(err), + zap.String("environmentNamespace", event.EnvironmentNamespace), + ) + handledCounter.WithLabelValues(codes.RepeatableError.String()).Inc() + } + continue + } + msg.Ack() + p.logger.Debug( + "suceeded to persist segment users", + zap.String("msgID", msg.ID), + zap.String("environmentNamespace", event.EnvironmentNamespace), + zap.String("segmentId", event.SegmentId), + ) + handledCounter.WithLabelValues(codes.OK.String()).Inc() + } +} + +func (p *Persister) unmarshalMessage(msg *puller.Message) (*serviceevent.BulkSegmentUsersReceivedEvent, error) { + event := &serviceevent.BulkSegmentUsersReceivedEvent{} + err := proto.Unmarshal(msg.Data, event) + if err != nil { + return nil, err + } + return event, nil +} + +func validateSegmentUserState(state featureproto.SegmentUser_State) bool { + switch state { + case featureproto.SegmentUser_INCLUDED: + return true + default: + return false + } +} + +func (p *Persister) handleEvent(ctx context.Context, event *serviceevent.BulkSegmentUsersReceivedEvent) error { + segmentStorage := v2fs.NewSegmentStorage(p.mysqlClient) + segment, err := segmentStorage.GetSegment(ctx, event.SegmentId, event.EnvironmentNamespace) + if err != nil { + return err + } + if segment.IsInUseStatus { + return errSegmentInUse + } + cnt, err := p.persistSegmentUsers(ctx, event.EnvironmentNamespace, event.SegmentId, event.Data, event.State) + if err != nil { + return err + } + return p.updateSegmentStatus( + ctx, + event.Editor, + event.EnvironmentNamespace, + event.SegmentId, + cnt, + event.State, + featureproto.Segment_SUCEEDED, + ) +} + +func (p *Persister) persistSegmentUsers( + ctx context.Context, + environmentNamespace string, + segmentID string, + data []byte, + state featureproto.SegmentUser_State, +) (int64, error) { + segmentUserIDs := strings.Split( + strings.NewReplacer( + ",", "\n", + "\r\n", "\n", + ).Replace(string(data)), + "\n", + ) + uniqueSegmentUserIDs := make(map[string]struct{}, len(segmentUserIDs)) + for _, id := range segmentUserIDs { + id = strings.TrimSpace(id) + if id == "" { + continue + } + if len(id) > maxUserIDLength { + return 0, errExceededMaxUserIDLength + } + uniqueSegmentUserIDs[id] = struct{}{} + } + allSegmentUsers := make([]*featureproto.SegmentUser, 0, len(uniqueSegmentUserIDs)) + var cnt int64 + for id := range uniqueSegmentUserIDs { + cnt++ + user := domain.NewSegmentUser(segmentID, id, state, false) + allSegmentUsers = append(allSegmentUsers, user.SegmentUser) + } + tx, err := p.mysqlClient.BeginTx(ctx) + if err != nil { + p.logger.Error("Failed to begin transaction", zap.Error(err)) + return 0, err + } + err = p.mysqlClient.RunInTransaction(ctx, tx, func() error { + segmentUserStorage := v2fs.NewSegmentUserStorage(tx) + if err := segmentUserStorage.UpsertSegmentUsers(ctx, allSegmentUsers, environmentNamespace); err != nil { + return err + } + return p.updateCache(segmentID, environmentNamespace, allSegmentUsers) + }) + if err != nil { + return 0, nil + } + return cnt, nil +} + +func (p *Persister) updateSegmentStatus( + ctx context.Context, + editor *domainproto.Editor, + environmentNamespace string, + segmentID string, + cnt int64, + state featureproto.SegmentUser_State, + status featureproto.Segment_Status, +) error { + tx, err := p.mysqlClient.BeginTx(ctx) + if err != nil { + p.logger.Error("Failed to begin transaction", zap.Error(err)) + return err + } + return p.mysqlClient.RunInTransaction(ctx, tx, func() error { + segmentStorage := v2fs.NewSegmentStorage(tx) + segment, err := segmentStorage.GetSegment(ctx, segmentID, environmentNamespace) + if err != nil { + return err + } + changeCmd := &featureproto.ChangeBulkUploadSegmentUsersStatusCommand{ + Status: status, + State: state, + Count: cnt, + } + handler := command.NewSegmentCommandHandler(editor, segment, p.domainPublisher, environmentNamespace) + if err := handler.Handle(ctx, changeCmd); err != nil { + return err + } + return segmentStorage.UpdateSegment(ctx, segment, environmentNamespace) + }) +} + +func (p *Persister) updateCache(segmentID, environmentNamespace string, users []*featureproto.SegmentUser) error { + segmentUsers := &featureproto.SegmentUsers{ + SegmentId: segmentID, + Users: users, + } + if err := p.segmentUsersCache.Put(segmentUsers, environmentNamespace); err != nil { + p.logger.Error( + "Failed to cache segment users", + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + ) + return err + } + p.logger.Info("Segment users successfully cached", + zap.String("environmentNamespace", environmentNamespace), + zap.String("segmentId", segmentID), + zap.Int("size", len(users)), + ) + return nil +} diff --git a/pkg/feature/segmentpersister/persister_test.go b/pkg/feature/segmentpersister/persister_test.go new file mode 100644 index 000000000..93d763f48 --- /dev/null +++ b/pkg/feature/segmentpersister/persister_test.go @@ -0,0 +1,161 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package segmentpersister + +import ( + "context" + "strings" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + cachemock "github.com/bucketeer-io/bucketeer/pkg/cache/mock" + cachev3mock "github.com/bucketeer-io/bucketeer/pkg/cache/v3/mock" + "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + v2fs "github.com/bucketeer-io/bucketeer/pkg/feature/storage/v2" + metricsmock "github.com/bucketeer-io/bucketeer/pkg/metrics/mock" + publishermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher/mock" + pullermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller/mock" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + serviceevent "github.com/bucketeer-io/bucketeer/proto/event/service" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +func TestNewPersister(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + puller := pullermock.NewMockPuller(mockController) + publisher := publishermock.NewMockPublisher(mockController) + mysqlClient := mysqlmock.NewMockClient(mockController) + redis := cachemock.NewMockMultiGetCache(mockController) + registerer := metricsmock.NewMockRegisterer(mockController) + registerer.EXPECT().MustRegister(gomock.Any()).Return() + p := NewPersister( + puller, + publisher, + mysqlClient, + redis, + WithMaxMPS(100), + WithNumWorkers(1), + WithFlushSize(1), + WithFlushInterval(time.Second), + WithMetrics(registerer), + WithLogger(zap.NewNop()), + ) + assert.IsType(t, &Persister{}, p) +} + +func TestHandleEventMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*Persister) + event *serviceevent.BulkSegmentUsersReceivedEvent + segment *domain.Segment + expectedCount int64 + expectedErr error + }{ + "err: ErrSegmentNotFound": { + setup: func(p *Persister) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + p.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + event: &serviceevent.BulkSegmentUsersReceivedEvent{ + SegmentId: "sid", + EnvironmentNamespace: "ns0", + }, + expectedErr: v2fs.ErrSegmentNotFound, + }, + "err: errExceededMaxUserIDLength": { + setup: func(p *Persister) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + p.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + event: &serviceevent.BulkSegmentUsersReceivedEvent{ + SegmentId: "sid", + EnvironmentNamespace: "ns0", + Data: []byte(strings.Repeat("a", maxUserIDLength+1)), + State: featureproto.SegmentUser_INCLUDED, + }, + expectedErr: errExceededMaxUserIDLength, + }, + "success": { + setup: func(p *Persister) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + p.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + p.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil).Times(2) + p.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil).Times(2) + }, + event: &serviceevent.BulkSegmentUsersReceivedEvent{ + SegmentId: "sid", + EnvironmentNamespace: "ns0", + Data: []byte("user1\nuser2\r\nuser2\n"), + State: featureproto.SegmentUser_INCLUDED, + Editor: &eventproto.Editor{ + Email: "email", + Role: accountproto.Account_OWNER, + }, + }, + expectedErr: nil, + }, + } + for msg, pat := range patterns { + t.Run(msg, func(t *testing.T) { + persister := newPersister(t, mockController) + if pat.setup != nil { + pat.setup(persister) + } + err := persister.handleEvent(context.Background(), pat.event) + assert.Equal(t, pat.expectedErr, err) + }) + } +} + +func newPersister(t *testing.T, mockController *gomock.Controller) *Persister { + t.Helper() + ctx, cancel := context.WithCancel(context.Background()) + logger := zap.NewNop() + return &Persister{ + puller: pullermock.NewMockRateLimitedPuller(mockController), + domainPublisher: publishermock.NewMockPublisher(mockController), + mysqlClient: mysqlmock.NewMockClient(mockController), + segmentUsersCache: cachev3mock.NewMockSegmentUsersCache(mockController), + logger: logger.Named("persister"), + ctx: ctx, + cancel: cancel, + doneCh: make(chan struct{}), + } +} diff --git a/pkg/feature/storage/BUILD.bazel b/pkg/feature/storage/BUILD.bazel new file mode 100644 index 000000000..cffc49d0b --- /dev/null +++ b/pkg/feature/storage/BUILD.bazel @@ -0,0 +1,38 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "feature_last_used_info.go", + "user_evaluations.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/feature/storage", + visibility = ["//visibility:public"], + deps = [ + "//pkg/feature/domain:go_default_library", + "//pkg/storage:go_default_library", + "//pkg/storage/v2/bigtable:go_default_library", + "//proto/feature:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "feature_last_used_info_test.go", + "user_evaluations_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/feature/domain:go_default_library", + "//pkg/storage:go_default_library", + "//pkg/storage/testing:go_default_library", + "//pkg/storage/v2/bigtable:go_default_library", + "//pkg/storage/v2/bigtable/mock:go_default_library", + "//proto/feature:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + ], +) diff --git a/pkg/feature/storage/feature_last_used_info.go b/pkg/feature/storage/feature_last_used_info.go new file mode 100644 index 000000000..56a29a57e --- /dev/null +++ b/pkg/feature/storage/feature_last_used_info.go @@ -0,0 +1,158 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package storage + +import ( + "context" + + "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage" + proto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +type FeatureLastUsedStorage interface { + GetFeatureLastUsedInfos( + ctx context.Context, + ids []string, + environmentNamespace string, + ) ([]*domain.FeatureLastUsedInfo, error) + UpsertFeatureLastUsedInfos( + ctx context.Context, + featureLastUsedInfos []*domain.FeatureLastUsedInfo, + environmentNamespace string, + ) error +} + +type FeatureLastUsedLister interface { + ListFeatureLastUsedInfo( + ctx context.Context, + pageSize int, + cursor, environmentNamespace string, + filters ...*storage.Filter, + ) ([]*proto.FeatureLastUsedInfo, string, error) +} + +const featureLastUsedInfoKind = "FeatureLastUsedInfo" + +type featureLastUsedInfoStorage struct { + client storage.GetPutter +} + +func NewFeatureLastUsedInfoStorage(client storage.GetPutter) FeatureLastUsedStorage { + return &featureLastUsedInfoStorage{client: client} +} + +func (s *featureLastUsedInfoStorage) GetFeatureLastUsedInfos( + ctx context.Context, + ids []string, + environmentNamespace string, +) ([]*domain.FeatureLastUsedInfo, error) { + keys := make([]*storage.Key, 0, len(ids)) + featureLastUsedInfos := make([]*proto.FeatureLastUsedInfo, 0, len(keys)) + for _, k := range ids { + keys = append(keys, s.newKey(k, environmentNamespace)) + featureLastUsedInfos = append(featureLastUsedInfos, &proto.FeatureLastUsedInfo{}) + } + err := s.client.GetMulti(ctx, keys, featureLastUsedInfos) + if err != nil { + merr, ok := err.(storage.MultiError) + if !ok { + return nil, err + } + for _, e := range merr { + switch e { + case nil: + case storage.ErrKeyNotFound: + default: + return nil, e + } + } + } + // NOTE: If the performance matters, remove the following loop and return protos. + domainFeatureLastUsedInfos := make([]*domain.FeatureLastUsedInfo, 0, len(keys)) + for _, f := range featureLastUsedInfos { + if f == nil { + continue + } + domainFeatureLastUsedInfos = append( + domainFeatureLastUsedInfos, + &domain.FeatureLastUsedInfo{FeatureLastUsedInfo: f}, + ) + } + return domainFeatureLastUsedInfos, nil +} + +func (s *featureLastUsedInfoStorage) UpsertFeatureLastUsedInfos( + ctx context.Context, + featureLastUsedInfos []*domain.FeatureLastUsedInfo, + environmentNamespace string, +) error { + keys := make([]*storage.Key, 0, len(featureLastUsedInfos)) + featureLastUsedInfoProtos := make([]*proto.FeatureLastUsedInfo, 0, len(featureLastUsedInfos)) + for _, f := range featureLastUsedInfos { + keys = append(keys, storage.NewKey(f.ID(), featureLastUsedInfoKind, environmentNamespace)) + featureLastUsedInfoProtos = append(featureLastUsedInfoProtos, f.FeatureLastUsedInfo) + } + return s.client.PutMulti(ctx, keys, featureLastUsedInfoProtos) +} + +func (s *featureLastUsedInfoStorage) newKey(featureLastUsedInfoKey, environmentNamespace string) *storage.Key { + return storage.NewKey(featureLastUsedInfoKey, featureLastUsedInfoKind, environmentNamespace) +} + +type featureLastUsedInfoLister struct { + client storage.Querier +} + +func NewFeatureLastUsedInfoLister(client storage.Querier) FeatureLastUsedLister { + return &featureLastUsedInfoLister{client: client} +} + +func (l *featureLastUsedInfoLister) ListFeatureLastUsedInfo( + ctx context.Context, + pageSize int, + cursor, environmentNamespace string, + filters ...*storage.Filter, +) ([]*proto.FeatureLastUsedInfo, string, error) { + query := storage.Query{ + Kind: featureLastUsedInfoKind, + Limit: pageSize, + StartCursor: cursor, + Filters: filters, + EnvironmentNamespace: environmentNamespace, + } + it, err := l.client.RunQuery(ctx, query) + if err != nil { + return nil, "", err + } + featureLastUseds := make([]*proto.FeatureLastUsedInfo, 0, pageSize) + for { + featureLastUsed := &proto.FeatureLastUsedInfo{} + err := it.Next(featureLastUsed) + if err == storage.ErrIteratorDone { + break + } + if err != nil { + return nil, "", err + } + featureLastUseds = append(featureLastUseds, featureLastUsed) + } + nextCursor, err := it.Cursor() + if err != nil { + return nil, "", err + } + return featureLastUseds, nextCursor, nil +} diff --git a/pkg/feature/storage/feature_last_used_info_test.go b/pkg/feature/storage/feature_last_used_info_test.go new file mode 100644 index 000000000..53ebee299 --- /dev/null +++ b/pkg/feature/storage/feature_last_used_info_test.go @@ -0,0 +1,264 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage" + storagetesting "github.com/bucketeer-io/bucketeer/pkg/storage/testing" + proto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +func TestNewFeatureLastUsedStorage(t *testing.T) { + db := NewFeatureLastUsedInfoStorage(storagetesting.NewInMemoryStorage()) + assert.IsType(t, &featureLastUsedInfoStorage{}, db) +} + +func TestGetFeatureLastUsedInfos(t *testing.T) { + t.Parallel() + patterns := []struct { + input []string + environmentNamespace string + expected []*domain.FeatureLastUsedInfo + expectedErr error + }{ + { + input: []string{}, + environmentNamespace: "ns0", + expected: []*domain.FeatureLastUsedInfo{}, + expectedErr: nil, + }, + { + input: []string{"feature-id-1:1"}, + environmentNamespace: "ns0", + expected: []*domain.FeatureLastUsedInfo{ + { + FeatureLastUsedInfo: &proto.FeatureLastUsedInfo{ + FeatureId: "feature-id-1", + Version: 1, + LastUsedAt: 2, + CreatedAt: 1, + }, + }, + }, + expectedErr: nil, + }, + { + input: []string{ + "feature-id-1:1", + "feature-id-2:1", + }, + environmentNamespace: "ns0", + expected: []*domain.FeatureLastUsedInfo{ + { + FeatureLastUsedInfo: &proto.FeatureLastUsedInfo{ + FeatureId: "feature-id-1", + Version: 1, + LastUsedAt: 2, + CreatedAt: 1, + }, + }, + { + FeatureLastUsedInfo: &proto.FeatureLastUsedInfo{ + FeatureId: "feature-id-2", + Version: 1, + LastUsedAt: 2, + CreatedAt: 1, + }, + }, + }, + expectedErr: nil, + }, + } + client := storagetesting.NewInMemoryStorage() + keys := []*storage.Key{ + storage.NewKey("feature-id-1:1", featureLastUsedInfoKind, "ns0"), + storage.NewKey("feature-id-2:1", featureLastUsedInfoKind, "ns0"), + } + existedEls := []*proto.FeatureLastUsedInfo{ + { + FeatureId: "feature-id-1", + Version: 1, + LastUsedAt: 2, + CreatedAt: 1, + }, + { + FeatureId: "feature-id-2", + Version: 1, + LastUsedAt: 2, + CreatedAt: 1, + }, + } + err := client.PutMulti(context.Background(), keys, existedEls) + assert.NoError(t, err) + db := NewFeatureLastUsedInfoStorage(client) + for _, p := range patterns { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + actual, err := db.GetFeatureLastUsedInfos(ctx, p.input, p.environmentNamespace) + assert.Equal(t, p.expectedErr, err) + if err == nil && len(p.input) > 0 { + for i, e := range p.expected { + assert.NoError(t, err) + assert.Equal(t, e.FeatureId, actual[i].FeatureId) + assert.Equal(t, e.Version, actual[i].Version) + assert.Equal(t, e.LastUsedAt, actual[i].LastUsedAt) + assert.Equal(t, e.CreatedAt, actual[i].CreatedAt) + } + } + } +} + +func TestUpsertFeatureLastUsedInfo(t *testing.T) { + t.Parallel() + patterns := []struct { + data []*domain.FeatureLastUsedInfo + environmentNamespace string + expectedErr error + }{ + // insert + { + data: []*domain.FeatureLastUsedInfo{ + { + FeatureLastUsedInfo: &proto.FeatureLastUsedInfo{ + FeatureId: "feature-id-10", + Version: 1, + LastUsedAt: 2, + CreatedAt: 1, + }, + }, + }, + environmentNamespace: "ns0", + expectedErr: nil, + }, + // multi insert + { + data: []*domain.FeatureLastUsedInfo{ + { + FeatureLastUsedInfo: &proto.FeatureLastUsedInfo{ + FeatureId: "feature-id-11", + Version: 1, + LastUsedAt: 2, + CreatedAt: 1, + }, + }, + { + FeatureLastUsedInfo: &proto.FeatureLastUsedInfo{ + FeatureId: "feature-id-12", + Version: 1, + LastUsedAt: 2, + CreatedAt: 1, + }, + }, + }, + environmentNamespace: "ns0", + expectedErr: nil, + }, + // update + { + data: []*domain.FeatureLastUsedInfo{ + { + FeatureLastUsedInfo: &proto.FeatureLastUsedInfo{ + FeatureId: "feature-id-1", + Version: 1, + LastUsedAt: 3, + CreatedAt: 1, + }, + }, + }, + environmentNamespace: "ns0", + expectedErr: nil, + }, + // insert & update + { + data: []*domain.FeatureLastUsedInfo{ + { + FeatureLastUsedInfo: &proto.FeatureLastUsedInfo{ + FeatureId: "feature-id-2", + Version: 1, + LastUsedAt: 3, + CreatedAt: 1, + }, + }, + { + FeatureLastUsedInfo: &proto.FeatureLastUsedInfo{ + FeatureId: "feature-id-13", + Version: 1, + LastUsedAt: 3, + CreatedAt: 1, + }, + }, + }, + environmentNamespace: "ns0", + expectedErr: nil, + }, + } + client := storagetesting.NewInMemoryStorage() + keys := []*storage.Key{ + storage.NewKey("feature-id-1:1", featureLastUsedInfoKind, "ns0"), + storage.NewKey("feature-id-2:1", featureLastUsedInfoKind, "ns0"), + } + existedEls := []*proto.FeatureLastUsedInfo{ + { + FeatureId: "feature-id-1", + Version: 1, + LastUsedAt: 2, + CreatedAt: 1, + }, + { + FeatureId: "feature-id-2", + Version: 1, + LastUsedAt: 2, + CreatedAt: 1, + }, + } + err := client.PutMulti(context.Background(), keys, existedEls) + assert.NoError(t, err) + db := NewFeatureLastUsedInfoStorage(client) + for _, p := range patterns { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + err := db.UpsertFeatureLastUsedInfos(ctx, p.data, p.environmentNamespace) + assert.NoError(t, err) + actual := make([]*proto.FeatureLastUsedInfo, len(p.data)) + for i := range actual { + actual[i] = &proto.FeatureLastUsedInfo{} + } + keys := make([]*storage.Key, 0, len(p.data)) + for _, d := range p.data { + keys = append(keys, storage.NewKey(d.ID(), featureLastUsedInfoKind, p.environmentNamespace)) + } + err = client.GetMulti(ctx, keys, actual) + assert.NoError(t, err) + for i, e := range p.data { + assert.NoError(t, err) + assert.Equal(t, e.FeatureLastUsedInfo.FeatureId, actual[i].FeatureId) + assert.Equal(t, e.FeatureLastUsedInfo.Version, actual[i].Version) + assert.Equal(t, e.FeatureLastUsedInfo.LastUsedAt, actual[i].LastUsedAt) + assert.Equal(t, e.FeatureLastUsedInfo.CreatedAt, actual[i].CreatedAt) + } + } +} + +func TestNewFeatureLastUsedLister(t *testing.T) { + db := NewFeatureLastUsedInfoLister(storagetesting.NewInMemoryStorage()) + assert.IsType(t, &featureLastUsedInfoLister{}, db) +} diff --git a/pkg/feature/storage/mock/BUILD.bazel b/pkg/feature/storage/mock/BUILD.bazel new file mode 100644 index 000000000..20cfc83c6 --- /dev/null +++ b/pkg/feature/storage/mock/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "feature_last_used_info.go", + "user_evaluations.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/feature/storage/mock", + visibility = ["//visibility:public"], + deps = [ + "//pkg/feature/domain:go_default_library", + "//pkg/storage:go_default_library", + "//proto/feature:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + ], +) diff --git a/pkg/feature/storage/mock/feature_last_used_info.go b/pkg/feature/storage/mock/feature_last_used_info.go new file mode 100644 index 000000000..b32681909 --- /dev/null +++ b/pkg/feature/storage/mock/feature_last_used_info.go @@ -0,0 +1,112 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: feature_last_used_info.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + domain "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + storage "github.com/bucketeer-io/bucketeer/pkg/storage" + feature "github.com/bucketeer-io/bucketeer/proto/feature" +) + +// MockFeatureLastUsedStorage is a mock of FeatureLastUsedStorage interface. +type MockFeatureLastUsedStorage struct { + ctrl *gomock.Controller + recorder *MockFeatureLastUsedStorageMockRecorder +} + +// MockFeatureLastUsedStorageMockRecorder is the mock recorder for MockFeatureLastUsedStorage. +type MockFeatureLastUsedStorageMockRecorder struct { + mock *MockFeatureLastUsedStorage +} + +// NewMockFeatureLastUsedStorage creates a new mock instance. +func NewMockFeatureLastUsedStorage(ctrl *gomock.Controller) *MockFeatureLastUsedStorage { + mock := &MockFeatureLastUsedStorage{ctrl: ctrl} + mock.recorder = &MockFeatureLastUsedStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockFeatureLastUsedStorage) EXPECT() *MockFeatureLastUsedStorageMockRecorder { + return m.recorder +} + +// GetFeatureLastUsedInfos mocks base method. +func (m *MockFeatureLastUsedStorage) GetFeatureLastUsedInfos(ctx context.Context, ids []string, environmentNamespace string) ([]*domain.FeatureLastUsedInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFeatureLastUsedInfos", ctx, ids, environmentNamespace) + ret0, _ := ret[0].([]*domain.FeatureLastUsedInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetFeatureLastUsedInfos indicates an expected call of GetFeatureLastUsedInfos. +func (mr *MockFeatureLastUsedStorageMockRecorder) GetFeatureLastUsedInfos(ctx, ids, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFeatureLastUsedInfos", reflect.TypeOf((*MockFeatureLastUsedStorage)(nil).GetFeatureLastUsedInfos), ctx, ids, environmentNamespace) +} + +// UpsertFeatureLastUsedInfos mocks base method. +func (m *MockFeatureLastUsedStorage) UpsertFeatureLastUsedInfos(ctx context.Context, featureLastUsedInfos []*domain.FeatureLastUsedInfo, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpsertFeatureLastUsedInfos", ctx, featureLastUsedInfos, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpsertFeatureLastUsedInfos indicates an expected call of UpsertFeatureLastUsedInfos. +func (mr *MockFeatureLastUsedStorageMockRecorder) UpsertFeatureLastUsedInfos(ctx, featureLastUsedInfos, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertFeatureLastUsedInfos", reflect.TypeOf((*MockFeatureLastUsedStorage)(nil).UpsertFeatureLastUsedInfos), ctx, featureLastUsedInfos, environmentNamespace) +} + +// MockFeatureLastUsedLister is a mock of FeatureLastUsedLister interface. +type MockFeatureLastUsedLister struct { + ctrl *gomock.Controller + recorder *MockFeatureLastUsedListerMockRecorder +} + +// MockFeatureLastUsedListerMockRecorder is the mock recorder for MockFeatureLastUsedLister. +type MockFeatureLastUsedListerMockRecorder struct { + mock *MockFeatureLastUsedLister +} + +// NewMockFeatureLastUsedLister creates a new mock instance. +func NewMockFeatureLastUsedLister(ctrl *gomock.Controller) *MockFeatureLastUsedLister { + mock := &MockFeatureLastUsedLister{ctrl: ctrl} + mock.recorder = &MockFeatureLastUsedListerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockFeatureLastUsedLister) EXPECT() *MockFeatureLastUsedListerMockRecorder { + return m.recorder +} + +// ListFeatureLastUsedInfo mocks base method. +func (m *MockFeatureLastUsedLister) ListFeatureLastUsedInfo(ctx context.Context, pageSize int, cursor, environmentNamespace string, filters ...*storage.Filter) ([]*feature.FeatureLastUsedInfo, string, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, pageSize, cursor, environmentNamespace} + for _, a := range filters { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListFeatureLastUsedInfo", varargs...) + ret0, _ := ret[0].([]*feature.FeatureLastUsedInfo) + ret1, _ := ret[1].(string) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// ListFeatureLastUsedInfo indicates an expected call of ListFeatureLastUsedInfo. +func (mr *MockFeatureLastUsedListerMockRecorder) ListFeatureLastUsedInfo(ctx, pageSize, cursor, environmentNamespace interface{}, filters ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, pageSize, cursor, environmentNamespace}, filters...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListFeatureLastUsedInfo", reflect.TypeOf((*MockFeatureLastUsedLister)(nil).ListFeatureLastUsedInfo), varargs...) +} diff --git a/pkg/feature/storage/mock/user_evaluations.go b/pkg/feature/storage/mock/user_evaluations.go new file mode 100644 index 000000000..996bcd35d --- /dev/null +++ b/pkg/feature/storage/mock/user_evaluations.go @@ -0,0 +1,66 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: user_evaluations.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + feature "github.com/bucketeer-io/bucketeer/proto/feature" +) + +// MockUserEvaluationsStorage is a mock of UserEvaluationsStorage interface. +type MockUserEvaluationsStorage struct { + ctrl *gomock.Controller + recorder *MockUserEvaluationsStorageMockRecorder +} + +// MockUserEvaluationsStorageMockRecorder is the mock recorder for MockUserEvaluationsStorage. +type MockUserEvaluationsStorageMockRecorder struct { + mock *MockUserEvaluationsStorage +} + +// NewMockUserEvaluationsStorage creates a new mock instance. +func NewMockUserEvaluationsStorage(ctrl *gomock.Controller) *MockUserEvaluationsStorage { + mock := &MockUserEvaluationsStorage{ctrl: ctrl} + mock.recorder = &MockUserEvaluationsStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockUserEvaluationsStorage) EXPECT() *MockUserEvaluationsStorageMockRecorder { + return m.recorder +} + +// GetUserEvaluations mocks base method. +func (m *MockUserEvaluationsStorage) GetUserEvaluations(ctx context.Context, userID, environmentNamespace, tag string) ([]*feature.Evaluation, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUserEvaluations", ctx, userID, environmentNamespace, tag) + ret0, _ := ret[0].([]*feature.Evaluation) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUserEvaluations indicates an expected call of GetUserEvaluations. +func (mr *MockUserEvaluationsStorageMockRecorder) GetUserEvaluations(ctx, userID, environmentNamespace, tag interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUserEvaluations", reflect.TypeOf((*MockUserEvaluationsStorage)(nil).GetUserEvaluations), ctx, userID, environmentNamespace, tag) +} + +// UpsertUserEvaluation mocks base method. +func (m *MockUserEvaluationsStorage) UpsertUserEvaluation(ctx context.Context, evaluation *feature.Evaluation, environmentNamespace, tag string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpsertUserEvaluation", ctx, evaluation, environmentNamespace, tag) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpsertUserEvaluation indicates an expected call of UpsertUserEvaluation. +func (mr *MockUserEvaluationsStorageMockRecorder) UpsertUserEvaluation(ctx, evaluation, environmentNamespace, tag interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertUserEvaluation", reflect.TypeOf((*MockUserEvaluationsStorage)(nil).UpsertUserEvaluation), ctx, evaluation, environmentNamespace, tag) +} diff --git a/pkg/feature/storage/user_evaluations.go b/pkg/feature/storage/user_evaluations.go new file mode 100644 index 000000000..f61454c17 --- /dev/null +++ b/pkg/feature/storage/user_evaluations.go @@ -0,0 +1,125 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package storage + +import ( + "context" + "fmt" + + "github.com/golang/protobuf/proto" // nolint:staticcheck + + "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + storage "github.com/bucketeer-io/bucketeer/pkg/storage/v2/bigtable" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +const ( + tableName = "user_evaluations" + columnFamily = "ue" + columnName = "evaluations" +) + +type UserEvaluationsStorage interface { + UpsertUserEvaluation(ctx context.Context, evaluation *featureproto.Evaluation, environmentNamespace, tag string) error + GetUserEvaluations(ctx context.Context, userID, environmentNamespace, tag string) ([]*featureproto.Evaluation, error) +} + +type userEvaluationsStorage struct { + client storage.Client +} + +func NewUserEvaluationsStorage(client storage.Client) UserEvaluationsStorage { + return &userEvaluationsStorage{client: client} +} + +func (s *userEvaluationsStorage) UpsertUserEvaluation( + ctx context.Context, + evaluation *featureproto.Evaluation, + environmentNamespace, tag string, +) error { + key := newKey( + environmentNamespace, + tag, + evaluation.UserId, + evaluation.FeatureId, + evaluation.FeatureVersion, + ) + value, err := proto.Marshal(evaluation) + if err != nil { + return err + } + req := &storage.WriteRequest{ + TableName: tableName, + ColumnFamily: columnFamily, + ColumnName: columnName, + Items: []*storage.WriteItem{ + { + Key: key, + Value: value, + }, + }, + } + if err := s.client.WriteRow(ctx, req); err != nil { + return err + } + return nil +} + +func (s *userEvaluationsStorage) GetUserEvaluations( + ctx context.Context, + userID, environmentNamespace, tag string, +) ([]*featureproto.Evaluation, error) { + prefix := newPrefix(environmentNamespace, tag, userID) + req := &storage.ReadRequest{ + TableName: tableName, + ColumnFamily: columnFamily, + RowSet: storage.RowPrefix(prefix), + RowFilters: []storage.RowFilter{ + storage.LatestNFilter(1), + }, + } + it, err := s.client.ReadRows(ctx, req) + if err != nil { + return nil, err + } + items, err := it.ReadItems(columnName) + if err != nil { + return nil, err + } + evaluations := make([]*featureproto.Evaluation, 0, len(items)) + for _, item := range items { + evaluation := &featureproto.Evaluation{} + if err := proto.Unmarshal(item.Value, evaluation); err != nil { + return nil, err + } + evaluations = append(evaluations, evaluation) + } + return evaluations, nil +} + +func newKey( + environmentNamespace, tag, userID, featureID string, + featureVersion int32, +) string { + evaluationID := domain.EvaluationID(featureID, featureVersion, userID) + key := fmt.Sprintf("%s#%s#%s", userID, tag, evaluationID) + return storage.NewKey(environmentNamespace, key) +} + +func newPrefix(environmentNamespace, tag, userID string) string { + prefix := fmt.Sprintf("%s#%s", userID, tag) + return storage.NewKey(environmentNamespace, prefix) +} diff --git a/pkg/feature/storage/user_evaluations_test.go b/pkg/feature/storage/user_evaluations_test.go new file mode 100644 index 000000000..53482dc1f --- /dev/null +++ b/pkg/feature/storage/user_evaluations_test.go @@ -0,0 +1,220 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "fmt" + "testing" + + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/assert" + + storage "github.com/bucketeer-io/bucketeer/pkg/storage/v2/bigtable" + btmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/bigtable/mock" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +const ( + environmentNamespace = "environmentNamespace" + tag = "tag" + userID = "user-id" +) + +var ( + evaluation = &featureproto.Evaluation{ + FeatureId: "feature-id", + FeatureVersion: 1, + UserId: "user-id", + VariationId: "variation-id", + VariationValue: "variation-value", + } +) + +func TestNewUserEvaluationsStorage(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + db := NewUserEvaluationsStorage(btmock.NewMockClient(mockController)) + assert.IsType(t, &userEvaluationsStorage{}, db) +} + +type rows struct { + columnFamily string + value []byte +} + +func (r *rows) ReadItems(column string) ([]*storage.ReadItem, error) { + items := []*storage.ReadItem{ + { + RowKey: "Row-1", + Column: fmt.Sprintf("%s:%s", r.columnFamily, column), + Timestamp: 0, + Value: r.value, + }, + } + return items, nil +} + +func TestGetUserEvaluations(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + prefix := newPrefix(environmentNamespace, tag, userID) + req := &storage.ReadRequest{ + TableName: tableName, + ColumnFamily: columnFamily, + RowSet: storage.RowPrefix(prefix), + RowFilters: []storage.RowFilter{ + storage.LatestNFilter(1), + }, + } + value, err := proto.Marshal(evaluation) + assert.NoError(t, err) + patterns := []struct { + desc string + setup func(context.Context, *userEvaluationsStorage) + expected []*featureproto.Evaluation + expectedErr error + }{ + { + desc: "ErrInternal", + setup: func(ctx context.Context, s *userEvaluationsStorage) { + s.client.(*btmock.MockClient).EXPECT().ReadRows( + ctx, + req, + ).Return(nil, storage.ErrInternal) + }, + expected: nil, + expectedErr: storage.ErrInternal, + }, + { + desc: "ErrKeyNotFound", + setup: func(ctx context.Context, s *userEvaluationsStorage) { + s.client.(*btmock.MockClient).EXPECT().ReadRows( + ctx, + req, + ).Return(nil, storage.ErrKeyNotFound) + }, + expected: nil, + expectedErr: storage.ErrKeyNotFound, + }, + { + desc: "Success", + setup: func(ctx context.Context, s *userEvaluationsStorage) { + s.client.(*btmock.MockClient).EXPECT().ReadRows( + ctx, + req, + ).Return( + &rows{ + columnFamily: columnFamily, + value: value, + }, + nil, + ) + }, + expected: []*featureproto.Evaluation{evaluation}, + expectedErr: nil, + }, + } + for _, p := range patterns { + s := createNewUserEvaluationsStorage(mockController) + p.setup(ctx, s) + actual, err := s.GetUserEvaluations( + ctx, + userID, + environmentNamespace, + tag, + ) + if p.expected != nil { + assert.True(t, proto.Equal(p.expected[0], actual[0]), p.desc) + } + assert.Equal(t, p.expectedErr, err, "%s", p.desc) + } +} + +func TestUpsertUserEvaluation(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + value, err := proto.Marshal(evaluation) + assert.NoError(t, err) + key := newKey( + environmentNamespace, + tag, + evaluation.UserId, + evaluation.FeatureId, + evaluation.FeatureVersion, + ) + req := &storage.WriteRequest{ + TableName: tableName, + ColumnFamily: columnFamily, + ColumnName: columnName, + Items: []*storage.WriteItem{ + { + Key: key, + Value: value, + }, + }, + } + patterns := []struct { + desc string + setup func(context.Context, *userEvaluationsStorage) + expected error + }{ + { + desc: "ErrInternal", + setup: func(ctx context.Context, s *userEvaluationsStorage) { + s.client.(*btmock.MockClient).EXPECT().WriteRow( + ctx, + req, + ).Return(storage.ErrInternal) + }, + expected: storage.ErrInternal, + }, + { + desc: "Success", + setup: func(ctx context.Context, s *userEvaluationsStorage) { + s.client.(*btmock.MockClient).EXPECT().WriteRow( + ctx, + req, + ).Return(nil) + }, + expected: nil, + }, + } + for _, p := range patterns { + s := createNewUserEvaluationsStorage(mockController) + p.setup(ctx, s) + actual := s.UpsertUserEvaluation( + ctx, + evaluation, + environmentNamespace, + tag, + ) + assert.Equal(t, p.expected, actual, "%s", p.desc) + } +} + +func createNewUserEvaluationsStorage(c *gomock.Controller) *userEvaluationsStorage { + return &userEvaluationsStorage{ + client: btmock.NewMockClient(c), + } +} diff --git a/pkg/feature/storage/v2/BUILD.bazel b/pkg/feature/storage/v2/BUILD.bazel new file mode 100644 index 000000000..3825a5c9b --- /dev/null +++ b/pkg/feature/storage/v2/BUILD.bazel @@ -0,0 +1,36 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "feature.go", + "feature_last_used_info.go", + "segment.go", + "segment_user.go", + "tag.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/feature/storage/v2", + visibility = ["//visibility:public"], + deps = [ + "//pkg/feature/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/feature:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "feature_last_used_info_test.go", + "feature_test.go", + "segment_test.go", + "segment_user_test.go", + "tag_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/storage/v2/mysql/mock:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + ], +) diff --git a/pkg/feature/storage/v2/feature.go b/pkg/feature/storage/v2/feature.go new file mode 100644 index 000000000..9ad9c6c75 --- /dev/null +++ b/pkg/feature/storage/v2/feature.go @@ -0,0 +1,467 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v2 + +import ( + "context" + "errors" + "fmt" + + "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + proto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +var ( + ErrFeatureAlreadyExists = errors.New("feature: already exists") + ErrFeatureNotFound = errors.New("feature: not found") + ErrFeatureUnexpectedAffectedRows = errors.New("feature: unexpected affected rows") +) + +type FeatureStorage interface { + CreateFeature(ctx context.Context, feature *domain.Feature, environmentNamespace string) error + UpdateFeature(ctx context.Context, feature *domain.Feature, environmentNamespace string) error + GetFeature(ctx context.Context, key, environmentNamespace string) (*domain.Feature, error) + ListFeatures( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, + ) ([]*proto.Feature, int, int64, error) + ListFeaturesFilteredByExperiment( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, + ) ([]*proto.Feature, int, int64, error) +} + +type featureStorage struct { + qe mysql.QueryExecer +} + +func NewFeatureStorage(qe mysql.QueryExecer) FeatureStorage { + return &featureStorage{qe: qe} +} + +func (s *featureStorage) CreateFeature( + ctx context.Context, + feature *domain.Feature, + environmentNamespace string, +) error { + query := ` + INSERT INTO feature ( + id, + name, + description, + enabled, + archived, + deleted, + evaluation_undelayable, + ttl, + version, + created_at, + updated_at, + variation_type, + variations, + targets, + rules, + default_strategy, + off_variation, + tags, + maintainer, + sampling_seed, + prerequisites, + environment_namespace + ) VALUES ( + ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, + ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, + ?, ? + ) + ` + _, err := s.qe.ExecContext( + ctx, + query, + feature.Id, + feature.Name, + feature.Description, + feature.Enabled, + feature.Archived, + feature.Deleted, + feature.EvaluationUndelayable, + feature.Ttl, + feature.Version, + feature.CreatedAt, + feature.UpdatedAt, + int32(feature.VariationType), + mysql.JSONObject{Val: feature.Variations}, + mysql.JSONObject{Val: feature.Targets}, + mysql.JSONObject{Val: feature.Rules}, + mysql.JSONObject{Val: feature.DefaultStrategy}, + feature.OffVariation, + mysql.JSONObject{Val: feature.Tags}, + feature.Maintainer, + feature.SamplingSeed, + mysql.JSONObject{Val: feature.Prerequisites}, + environmentNamespace, + ) + if err != nil { + if err == mysql.ErrDuplicateEntry { + return ErrFeatureAlreadyExists + } + return err + } + return nil +} + +func (s *featureStorage) UpdateFeature( + ctx context.Context, + feature *domain.Feature, + environmentNamespace string, +) error { + query := ` + UPDATE + feature + SET + name = ?, + description = ?, + enabled = ?, + archived = ?, + deleted = ?, + evaluation_undelayable = ?, + ttl = ?, + version = ?, + created_at = ?, + updated_at = ?, + variation_type = ?, + variations = ?, + targets = ?, + rules = ?, + default_strategy = ?, + off_variation = ?, + tags = ?, + maintainer = ?, + sampling_seed = ?, + prerequisites = ? + WHERE + id = ? AND + environment_namespace = ? + ` + result, err := s.qe.ExecContext( + ctx, + query, + feature.Name, + feature.Description, + feature.Enabled, + feature.Archived, + feature.Deleted, + feature.EvaluationUndelayable, + feature.Ttl, + feature.Version, + feature.CreatedAt, + feature.UpdatedAt, + int32(feature.VariationType), + mysql.JSONObject{Val: feature.Variations}, + mysql.JSONObject{Val: feature.Targets}, + mysql.JSONObject{Val: feature.Rules}, + mysql.JSONObject{Val: feature.DefaultStrategy}, + feature.OffVariation, + mysql.JSONObject{Val: feature.Tags}, + feature.Maintainer, + feature.SamplingSeed, + mysql.JSONObject{Val: feature.Prerequisites}, + feature.Id, + environmentNamespace, + ) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + if rowsAffected != 1 { + return ErrFeatureUnexpectedAffectedRows + } + return nil +} + +func (s *featureStorage) GetFeature( + ctx context.Context, + key, environmentNamespace string, +) (*domain.Feature, error) { + feature := proto.Feature{} + query := ` + SELECT + id, + name, + description, + enabled, + archived, + deleted, + evaluation_undelayable, + ttl, + version, + created_at, + updated_at, + variation_type, + variations, + targets, + rules, + default_strategy, + off_variation, + tags, + maintainer, + sampling_seed, + prerequisites + FROM + feature + WHERE + id = ? AND + environment_namespace = ? + ` + err := s.qe.QueryRowContext( + ctx, + query, + key, + environmentNamespace, + ).Scan( + &feature.Id, + &feature.Name, + &feature.Description, + &feature.Enabled, + &feature.Archived, + &feature.Deleted, + &feature.EvaluationUndelayable, + &feature.Ttl, + &feature.Version, + &feature.CreatedAt, + &feature.UpdatedAt, + &feature.VariationType, + &mysql.JSONObject{Val: &feature.Variations}, + &mysql.JSONObject{Val: &feature.Targets}, + &mysql.JSONObject{Val: &feature.Rules}, + &mysql.JSONObject{Val: &feature.DefaultStrategy}, + &feature.OffVariation, + &mysql.JSONObject{Val: &feature.Tags}, + &feature.Maintainer, + &feature.SamplingSeed, + &mysql.JSONObject{Val: feature.Prerequisites}, + ) + if err != nil { + if err == mysql.ErrNoRows { + return nil, ErrFeatureNotFound + } + return nil, err + } + return &domain.Feature{Feature: &feature}, nil +} + +func (s *featureStorage) ListFeatures( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, +) ([]*proto.Feature, int, int64, error) { + whereSQL, whereArgs := mysql.ConstructWhereSQLString(whereParts) + orderBySQL := mysql.ConstructOrderBySQLString(orders) + limitOffsetSQL := mysql.ConstructLimitOffsetSQLString(limit, offset) + query := fmt.Sprintf(` + SELECT + id, + name, + description, + enabled, + archived, + deleted, + evaluation_undelayable, + ttl, + version, + created_at, + updated_at, + variation_type, + variations, + targets, + rules, + default_strategy, + off_variation, + tags, + maintainer, + sampling_seed, + prerequisites + FROM + feature + %s %s %s + `, whereSQL, orderBySQL, limitOffsetSQL, + ) + rows, err := s.qe.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, 0, 0, err + } + defer rows.Close() + features := make([]*proto.Feature, 0, limit) + for rows.Next() { + feature := proto.Feature{} + err := rows.Scan( + &feature.Id, + &feature.Name, + &feature.Description, + &feature.Enabled, + &feature.Archived, + &feature.Deleted, + &feature.EvaluationUndelayable, + &feature.Ttl, + &feature.Version, + &feature.CreatedAt, + &feature.UpdatedAt, + &feature.VariationType, + &mysql.JSONObject{Val: &feature.Variations}, + &mysql.JSONObject{Val: &feature.Targets}, + &mysql.JSONObject{Val: &feature.Rules}, + &mysql.JSONObject{Val: &feature.DefaultStrategy}, + &feature.OffVariation, + &mysql.JSONObject{Val: &feature.Tags}, + &feature.Maintainer, + &feature.SamplingSeed, + &mysql.JSONObject{Val: feature.Prerequisites}, + ) + if err != nil { + return nil, 0, 0, err + } + features = append(features, &feature) + } + if rows.Err() != nil { + return nil, 0, 0, err + } + nextOffset := offset + len(features) + var totalCount int64 + countQuery := fmt.Sprintf(` + SELECT + COUNT(1) + FROM + feature + %s %s + `, whereSQL, orderBySQL, + ) + err = s.qe.QueryRowContext(ctx, countQuery, whereArgs...).Scan(&totalCount) + if err != nil { + return nil, 0, 0, err + } + return features, nextOffset, totalCount, nil +} + +func (s *featureStorage) ListFeaturesFilteredByExperiment( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, +) ([]*proto.Feature, int, int64, error) { + whereSQL, whereArgs := mysql.ConstructWhereSQLString(whereParts) + orderBySQL := mysql.ConstructOrderBySQLString(orders) + limitOffsetSQL := mysql.ConstructLimitOffsetSQLString(limit, offset) + query := fmt.Sprintf(` + SELECT DISTINCT + feature.id, + feature.name, + feature.description, + feature.enabled, + feature.archived, + feature.deleted, + feature.evaluation_undelayable, + feature.ttl, + feature.version, + feature.created_at, + feature.updated_at, + feature.variation_type, + feature.variations, + feature.targets, + feature.rules, + feature.default_strategy, + feature.off_variation, + feature.tags, + feature.maintainer, + feature.sampling_seed, + feature.prerequisites + FROM + feature + LEFT OUTER JOIN + experiment + ON + feature.id = experiment.feature_id AND + feature.environment_namespace = experiment.environment_namespace + %s %s %s + `, whereSQL, orderBySQL, limitOffsetSQL, + ) + rows, err := s.qe.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, 0, 0, err + } + defer rows.Close() + features := make([]*proto.Feature, 0, limit) + for rows.Next() { + feature := proto.Feature{} + err := rows.Scan( + &feature.Id, + &feature.Name, + &feature.Description, + &feature.Enabled, + &feature.Archived, + &feature.Deleted, + &feature.EvaluationUndelayable, + &feature.Ttl, + &feature.Version, + &feature.CreatedAt, + &feature.UpdatedAt, + &feature.VariationType, + &mysql.JSONObject{Val: &feature.Variations}, + &mysql.JSONObject{Val: &feature.Targets}, + &mysql.JSONObject{Val: &feature.Rules}, + &mysql.JSONObject{Val: &feature.DefaultStrategy}, + &feature.OffVariation, + &mysql.JSONObject{Val: &feature.Tags}, + &feature.Maintainer, + &feature.SamplingSeed, + &mysql.JSONObject{Val: feature.Prerequisites}, + ) + if err != nil { + return nil, 0, 0, err + } + features = append(features, &feature) + } + if rows.Err() != nil { + return nil, 0, 0, err + } + nextOffset := offset + len(features) + var totalCount int64 + countQuery := fmt.Sprintf(` + SELECT + COUNT(DISTINCT feature.id) + FROM + feature + LEFT OUTER JOIN + experiment + ON + feature.id = experiment.feature_id AND + feature.environment_namespace = experiment.environment_namespace + %s %s + `, whereSQL, orderBySQL, + ) + err = s.qe.QueryRowContext(ctx, countQuery, whereArgs...).Scan(&totalCount) + if err != nil { + return nil, 0, 0, err + } + return features, nextOffset, totalCount, nil +} diff --git a/pkg/feature/storage/v2/feature_last_used_info.go b/pkg/feature/storage/v2/feature_last_used_info.go new file mode 100644 index 000000000..691eb4ab0 --- /dev/null +++ b/pkg/feature/storage/v2/feature_last_used_info.go @@ -0,0 +1,154 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v2 + +import ( + "context" + "fmt" + + "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + proto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +type FeatureLastUsedInfoStorage interface { + GetFeatureLastUsedInfos( + ctx context.Context, + ids []string, + environmentNamespace string, + ) ([]*domain.FeatureLastUsedInfo, error) + UpsertFeatureLastUsedInfo( + ctx context.Context, + featureLastUsedInfos *domain.FeatureLastUsedInfo, + environmentNamespace string, + ) error +} + +type featureLastUsedInfoStorage struct { + qe mysql.QueryExecer +} + +func NewFeatureLastUsedInfoStorage(qe mysql.QueryExecer) FeatureLastUsedInfoStorage { + return &featureLastUsedInfoStorage{qe: qe} +} + +func (s *featureLastUsedInfoStorage) GetFeatureLastUsedInfos( + ctx context.Context, + ids []string, + environmentNamespace string, +) ([]*domain.FeatureLastUsedInfo, error) { + inFilterIDs := make([]interface{}, 0, len(ids)) + for _, id := range ids { + inFilterIDs = append(inFilterIDs, id) + } + whereParts := []mysql.WherePart{ + mysql.NewInFilter("id", inFilterIDs), + mysql.NewFilter("environment_namespace", "=", environmentNamespace), + } + whereSQL, whereArgs := mysql.ConstructWhereSQLString(whereParts) + query := fmt.Sprintf(` + SELECT + feature_id, + version, + last_used_at, + client_oldest_version, + client_latest_version, + created_at + FROM + feature_last_used_info + %s + `, whereSQL, + ) + rows, err := s.qe.QueryContext( + ctx, + query, + whereArgs..., + ) + if err != nil { + return nil, err + } + defer rows.Close() + entries := make([]*proto.FeatureLastUsedInfo, 0, len(ids)) + for rows.Next() { + flui := proto.FeatureLastUsedInfo{} + err := rows.Scan( + &flui.FeatureId, + &flui.Version, + &flui.LastUsedAt, + &flui.ClientOldestVersion, + &flui.ClientLatestVersion, + &flui.CreatedAt, + ) + if err != nil { + return nil, err + } + entries = append(entries, &flui) + } + if rows.Err() != nil { + return nil, err + } + // NOTE: If the performance matters, remove the following loop and return protos. + domainFeatureLastUsedInfos := make([]*domain.FeatureLastUsedInfo, 0, len(entries)) + for _, e := range entries { + domainFeatureLastUsedInfos = append( + domainFeatureLastUsedInfos, + &domain.FeatureLastUsedInfo{FeatureLastUsedInfo: e}, + ) + } + return domainFeatureLastUsedInfos, nil +} + +func (s *featureLastUsedInfoStorage) UpsertFeatureLastUsedInfo( + ctx context.Context, + flui *domain.FeatureLastUsedInfo, + environmentNamespace string, +) error { + query := ` + INSERT INTO feature_last_used_info ( + id, + feature_id, + version, + last_used_at, + client_oldest_version, + client_latest_version, + created_at, + environment_namespace + ) VALUES ( + ?, ?, ?, ?, ?, ?, ?, ? + ) ON DUPLICATE KEY UPDATE + feature_id = VALUES(feature_id), + version = VALUES(version), + last_used_at = VALUES(last_used_at), + client_oldest_version = VALUES(client_oldest_version), + client_latest_version = VALUES(client_latest_version) + ` + _, err := s.qe.ExecContext( + ctx, + query, + flui.ID(), + flui.FeatureId, + flui.Version, + flui.LastUsedAt, + flui.ClientOldestVersion, + flui.ClientLatestVersion, + flui.CreatedAt, + environmentNamespace, + ) + if err != nil { + return err + } + return nil +} diff --git a/pkg/feature/storage/v2/feature_last_used_info_test.go b/pkg/feature/storage/v2/feature_last_used_info_test.go new file mode 100644 index 000000000..43e16fa39 --- /dev/null +++ b/pkg/feature/storage/v2/feature_last_used_info_test.go @@ -0,0 +1,32 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" +) + +func TestNewFeatureLastUsedInfoStorage(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + storage := NewFeatureLastUsedInfoStorage(mock.NewMockQueryExecer(mockController)) + assert.IsType(t, &featureLastUsedInfoStorage{}, storage) +} diff --git a/pkg/feature/storage/v2/feature_test.go b/pkg/feature/storage/v2/feature_test.go new file mode 100644 index 000000000..04b4cab81 --- /dev/null +++ b/pkg/feature/storage/v2/feature_test.go @@ -0,0 +1,32 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" +) + +func TestNewFeatureStorage(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + storage := NewFeatureStorage(mock.NewMockQueryExecer(mockController)) + assert.IsType(t, &featureStorage{}, storage) +} diff --git a/pkg/feature/storage/v2/mock/BUILD.bazel b/pkg/feature/storage/v2/mock/BUILD.bazel new file mode 100644 index 000000000..942ee3fd6 --- /dev/null +++ b/pkg/feature/storage/v2/mock/BUILD.bazel @@ -0,0 +1,20 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "feature.go", + "feature_last_used_info.go", + "segment.go", + "segment_user.go", + "tag.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/feature/storage/v2/mock", + visibility = ["//visibility:public"], + deps = [ + "//pkg/feature/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/feature:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + ], +) diff --git a/pkg/feature/storage/v2/mock/feature.go b/pkg/feature/storage/v2/mock/feature.go new file mode 100644 index 000000000..bbc01c1e6 --- /dev/null +++ b/pkg/feature/storage/v2/mock/feature.go @@ -0,0 +1,116 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: feature.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + domain "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + mysql "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + feature "github.com/bucketeer-io/bucketeer/proto/feature" +) + +// MockFeatureStorage is a mock of FeatureStorage interface. +type MockFeatureStorage struct { + ctrl *gomock.Controller + recorder *MockFeatureStorageMockRecorder +} + +// MockFeatureStorageMockRecorder is the mock recorder for MockFeatureStorage. +type MockFeatureStorageMockRecorder struct { + mock *MockFeatureStorage +} + +// NewMockFeatureStorage creates a new mock instance. +func NewMockFeatureStorage(ctrl *gomock.Controller) *MockFeatureStorage { + mock := &MockFeatureStorage{ctrl: ctrl} + mock.recorder = &MockFeatureStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockFeatureStorage) EXPECT() *MockFeatureStorageMockRecorder { + return m.recorder +} + +// CreateFeature mocks base method. +func (m *MockFeatureStorage) CreateFeature(ctx context.Context, feature *domain.Feature, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateFeature", ctx, feature, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateFeature indicates an expected call of CreateFeature. +func (mr *MockFeatureStorageMockRecorder) CreateFeature(ctx, feature, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateFeature", reflect.TypeOf((*MockFeatureStorage)(nil).CreateFeature), ctx, feature, environmentNamespace) +} + +// GetFeature mocks base method. +func (m *MockFeatureStorage) GetFeature(ctx context.Context, key, environmentNamespace string) (*domain.Feature, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFeature", ctx, key, environmentNamespace) + ret0, _ := ret[0].(*domain.Feature) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetFeature indicates an expected call of GetFeature. +func (mr *MockFeatureStorageMockRecorder) GetFeature(ctx, key, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFeature", reflect.TypeOf((*MockFeatureStorage)(nil).GetFeature), ctx, key, environmentNamespace) +} + +// ListFeatures mocks base method. +func (m *MockFeatureStorage) ListFeatures(ctx context.Context, whereParts []mysql.WherePart, orders []*mysql.Order, limit, offset int) ([]*feature.Feature, int, int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListFeatures", ctx, whereParts, orders, limit, offset) + ret0, _ := ret[0].([]*feature.Feature) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(int64) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 +} + +// ListFeatures indicates an expected call of ListFeatures. +func (mr *MockFeatureStorageMockRecorder) ListFeatures(ctx, whereParts, orders, limit, offset interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListFeatures", reflect.TypeOf((*MockFeatureStorage)(nil).ListFeatures), ctx, whereParts, orders, limit, offset) +} + +// ListFeaturesFilteredByExperiment mocks base method. +func (m *MockFeatureStorage) ListFeaturesFilteredByExperiment(ctx context.Context, whereParts []mysql.WherePart, orders []*mysql.Order, limit, offset int) ([]*feature.Feature, int, int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListFeaturesFilteredByExperiment", ctx, whereParts, orders, limit, offset) + ret0, _ := ret[0].([]*feature.Feature) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(int64) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 +} + +// ListFeaturesFilteredByExperiment indicates an expected call of ListFeaturesFilteredByExperiment. +func (mr *MockFeatureStorageMockRecorder) ListFeaturesFilteredByExperiment(ctx, whereParts, orders, limit, offset interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListFeaturesFilteredByExperiment", reflect.TypeOf((*MockFeatureStorage)(nil).ListFeaturesFilteredByExperiment), ctx, whereParts, orders, limit, offset) +} + +// UpdateFeature mocks base method. +func (m *MockFeatureStorage) UpdateFeature(ctx context.Context, feature *domain.Feature, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateFeature", ctx, feature, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateFeature indicates an expected call of UpdateFeature. +func (mr *MockFeatureStorageMockRecorder) UpdateFeature(ctx, feature, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateFeature", reflect.TypeOf((*MockFeatureStorage)(nil).UpdateFeature), ctx, feature, environmentNamespace) +} diff --git a/pkg/feature/storage/v2/mock/feature_last_used_info.go b/pkg/feature/storage/v2/mock/feature_last_used_info.go new file mode 100644 index 000000000..36cd1d880 --- /dev/null +++ b/pkg/feature/storage/v2/mock/feature_last_used_info.go @@ -0,0 +1,66 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: feature_last_used_info.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + domain "github.com/bucketeer-io/bucketeer/pkg/feature/domain" +) + +// MockFeatureLastUsedInfoStorage is a mock of FeatureLastUsedInfoStorage interface. +type MockFeatureLastUsedInfoStorage struct { + ctrl *gomock.Controller + recorder *MockFeatureLastUsedInfoStorageMockRecorder +} + +// MockFeatureLastUsedInfoStorageMockRecorder is the mock recorder for MockFeatureLastUsedInfoStorage. +type MockFeatureLastUsedInfoStorageMockRecorder struct { + mock *MockFeatureLastUsedInfoStorage +} + +// NewMockFeatureLastUsedInfoStorage creates a new mock instance. +func NewMockFeatureLastUsedInfoStorage(ctrl *gomock.Controller) *MockFeatureLastUsedInfoStorage { + mock := &MockFeatureLastUsedInfoStorage{ctrl: ctrl} + mock.recorder = &MockFeatureLastUsedInfoStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockFeatureLastUsedInfoStorage) EXPECT() *MockFeatureLastUsedInfoStorageMockRecorder { + return m.recorder +} + +// GetFeatureLastUsedInfos mocks base method. +func (m *MockFeatureLastUsedInfoStorage) GetFeatureLastUsedInfos(ctx context.Context, ids []string, environmentNamespace string) ([]*domain.FeatureLastUsedInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFeatureLastUsedInfos", ctx, ids, environmentNamespace) + ret0, _ := ret[0].([]*domain.FeatureLastUsedInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetFeatureLastUsedInfos indicates an expected call of GetFeatureLastUsedInfos. +func (mr *MockFeatureLastUsedInfoStorageMockRecorder) GetFeatureLastUsedInfos(ctx, ids, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFeatureLastUsedInfos", reflect.TypeOf((*MockFeatureLastUsedInfoStorage)(nil).GetFeatureLastUsedInfos), ctx, ids, environmentNamespace) +} + +// UpsertFeatureLastUsedInfo mocks base method. +func (m *MockFeatureLastUsedInfoStorage) UpsertFeatureLastUsedInfo(ctx context.Context, featureLastUsedInfos *domain.FeatureLastUsedInfo, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpsertFeatureLastUsedInfo", ctx, featureLastUsedInfos, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpsertFeatureLastUsedInfo indicates an expected call of UpsertFeatureLastUsedInfo. +func (mr *MockFeatureLastUsedInfoStorageMockRecorder) UpsertFeatureLastUsedInfo(ctx, featureLastUsedInfos, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertFeatureLastUsedInfo", reflect.TypeOf((*MockFeatureLastUsedInfoStorage)(nil).UpsertFeatureLastUsedInfo), ctx, featureLastUsedInfos, environmentNamespace) +} diff --git a/pkg/feature/storage/v2/mock/segment.go b/pkg/feature/storage/v2/mock/segment.go new file mode 100644 index 000000000..4a96d3780 --- /dev/null +++ b/pkg/feature/storage/v2/mock/segment.go @@ -0,0 +1,99 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: segment.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + domain "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + mysql "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + feature "github.com/bucketeer-io/bucketeer/proto/feature" +) + +// MockSegmentStorage is a mock of SegmentStorage interface. +type MockSegmentStorage struct { + ctrl *gomock.Controller + recorder *MockSegmentStorageMockRecorder +} + +// MockSegmentStorageMockRecorder is the mock recorder for MockSegmentStorage. +type MockSegmentStorageMockRecorder struct { + mock *MockSegmentStorage +} + +// NewMockSegmentStorage creates a new mock instance. +func NewMockSegmentStorage(ctrl *gomock.Controller) *MockSegmentStorage { + mock := &MockSegmentStorage{ctrl: ctrl} + mock.recorder = &MockSegmentStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSegmentStorage) EXPECT() *MockSegmentStorageMockRecorder { + return m.recorder +} + +// CreateSegment mocks base method. +func (m *MockSegmentStorage) CreateSegment(ctx context.Context, segment *domain.Segment, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateSegment", ctx, segment, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateSegment indicates an expected call of CreateSegment. +func (mr *MockSegmentStorageMockRecorder) CreateSegment(ctx, segment, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSegment", reflect.TypeOf((*MockSegmentStorage)(nil).CreateSegment), ctx, segment, environmentNamespace) +} + +// GetSegment mocks base method. +func (m *MockSegmentStorage) GetSegment(ctx context.Context, id, environmentNamespace string) (*domain.Segment, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSegment", ctx, id, environmentNamespace) + ret0, _ := ret[0].(*domain.Segment) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSegment indicates an expected call of GetSegment. +func (mr *MockSegmentStorageMockRecorder) GetSegment(ctx, id, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSegment", reflect.TypeOf((*MockSegmentStorage)(nil).GetSegment), ctx, id, environmentNamespace) +} + +// ListSegments mocks base method. +func (m *MockSegmentStorage) ListSegments(ctx context.Context, whereParts []mysql.WherePart, orders []*mysql.Order, limit, offset int, isInUseStatus *bool, environmentNamespace string) ([]*feature.Segment, int, int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListSegments", ctx, whereParts, orders, limit, offset, isInUseStatus, environmentNamespace) + ret0, _ := ret[0].([]*feature.Segment) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(int64) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 +} + +// ListSegments indicates an expected call of ListSegments. +func (mr *MockSegmentStorageMockRecorder) ListSegments(ctx, whereParts, orders, limit, offset, isInUseStatus, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSegments", reflect.TypeOf((*MockSegmentStorage)(nil).ListSegments), ctx, whereParts, orders, limit, offset, isInUseStatus, environmentNamespace) +} + +// UpdateSegment mocks base method. +func (m *MockSegmentStorage) UpdateSegment(ctx context.Context, segment *domain.Segment, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateSegment", ctx, segment, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateSegment indicates an expected call of UpdateSegment. +func (mr *MockSegmentStorageMockRecorder) UpdateSegment(ctx, segment, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSegment", reflect.TypeOf((*MockSegmentStorage)(nil).UpdateSegment), ctx, segment, environmentNamespace) +} diff --git a/pkg/feature/storage/v2/mock/segment_user.go b/pkg/feature/storage/v2/mock/segment_user.go new file mode 100644 index 000000000..3192d46d8 --- /dev/null +++ b/pkg/feature/storage/v2/mock/segment_user.go @@ -0,0 +1,84 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: segment_user.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + domain "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + mysql "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + feature "github.com/bucketeer-io/bucketeer/proto/feature" +) + +// MockSegmentUserStorage is a mock of SegmentUserStorage interface. +type MockSegmentUserStorage struct { + ctrl *gomock.Controller + recorder *MockSegmentUserStorageMockRecorder +} + +// MockSegmentUserStorageMockRecorder is the mock recorder for MockSegmentUserStorage. +type MockSegmentUserStorageMockRecorder struct { + mock *MockSegmentUserStorage +} + +// NewMockSegmentUserStorage creates a new mock instance. +func NewMockSegmentUserStorage(ctrl *gomock.Controller) *MockSegmentUserStorage { + mock := &MockSegmentUserStorage{ctrl: ctrl} + mock.recorder = &MockSegmentUserStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSegmentUserStorage) EXPECT() *MockSegmentUserStorageMockRecorder { + return m.recorder +} + +// GetSegmentUser mocks base method. +func (m *MockSegmentUserStorage) GetSegmentUser(ctx context.Context, id, environmentNamespace string) (*domain.SegmentUser, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSegmentUser", ctx, id, environmentNamespace) + ret0, _ := ret[0].(*domain.SegmentUser) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSegmentUser indicates an expected call of GetSegmentUser. +func (mr *MockSegmentUserStorageMockRecorder) GetSegmentUser(ctx, id, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSegmentUser", reflect.TypeOf((*MockSegmentUserStorage)(nil).GetSegmentUser), ctx, id, environmentNamespace) +} + +// ListSegmentUsers mocks base method. +func (m *MockSegmentUserStorage) ListSegmentUsers(ctx context.Context, whereParts []mysql.WherePart, orders []*mysql.Order, limit, offset int) ([]*feature.SegmentUser, int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListSegmentUsers", ctx, whereParts, orders, limit, offset) + ret0, _ := ret[0].([]*feature.SegmentUser) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// ListSegmentUsers indicates an expected call of ListSegmentUsers. +func (mr *MockSegmentUserStorageMockRecorder) ListSegmentUsers(ctx, whereParts, orders, limit, offset interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSegmentUsers", reflect.TypeOf((*MockSegmentUserStorage)(nil).ListSegmentUsers), ctx, whereParts, orders, limit, offset) +} + +// UpsertSegmentUsers mocks base method. +func (m *MockSegmentUserStorage) UpsertSegmentUsers(ctx context.Context, users []*feature.SegmentUser, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpsertSegmentUsers", ctx, users, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpsertSegmentUsers indicates an expected call of UpsertSegmentUsers. +func (mr *MockSegmentUserStorageMockRecorder) UpsertSegmentUsers(ctx, users, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertSegmentUsers", reflect.TypeOf((*MockSegmentUserStorage)(nil).UpsertSegmentUsers), ctx, users, environmentNamespace) +} diff --git a/pkg/feature/storage/v2/mock/tag.go b/pkg/feature/storage/v2/mock/tag.go new file mode 100644 index 000000000..570115059 --- /dev/null +++ b/pkg/feature/storage/v2/mock/tag.go @@ -0,0 +1,70 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: tag.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + domain "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + mysql "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + feature "github.com/bucketeer-io/bucketeer/proto/feature" +) + +// MockTagStorage is a mock of TagStorage interface. +type MockTagStorage struct { + ctrl *gomock.Controller + recorder *MockTagStorageMockRecorder +} + +// MockTagStorageMockRecorder is the mock recorder for MockTagStorage. +type MockTagStorageMockRecorder struct { + mock *MockTagStorage +} + +// NewMockTagStorage creates a new mock instance. +func NewMockTagStorage(ctrl *gomock.Controller) *MockTagStorage { + mock := &MockTagStorage{ctrl: ctrl} + mock.recorder = &MockTagStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockTagStorage) EXPECT() *MockTagStorageMockRecorder { + return m.recorder +} + +// ListTags mocks base method. +func (m *MockTagStorage) ListTags(ctx context.Context, whereParts []mysql.WherePart, orders []*mysql.Order, limit, offset int) ([]*feature.Tag, int, int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListTags", ctx, whereParts, orders, limit, offset) + ret0, _ := ret[0].([]*feature.Tag) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(int64) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 +} + +// ListTags indicates an expected call of ListTags. +func (mr *MockTagStorageMockRecorder) ListTags(ctx, whereParts, orders, limit, offset interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTags", reflect.TypeOf((*MockTagStorage)(nil).ListTags), ctx, whereParts, orders, limit, offset) +} + +// UpsertTag mocks base method. +func (m *MockTagStorage) UpsertTag(ctx context.Context, tag *domain.Tag, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpsertTag", ctx, tag, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpsertTag indicates an expected call of UpsertTag. +func (mr *MockTagStorageMockRecorder) UpsertTag(ctx, tag, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertTag", reflect.TypeOf((*MockTagStorage)(nil).UpsertTag), ctx, tag, environmentNamespace) +} diff --git a/pkg/feature/storage/v2/segment.go b/pkg/feature/storage/v2/segment.go new file mode 100644 index 000000000..814f56f84 --- /dev/null +++ b/pkg/feature/storage/v2/segment.go @@ -0,0 +1,343 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v2 + +import ( + "context" + "errors" + "fmt" + + "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + proto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +var ( + ErrSegmentAlreadyExists = errors.New("segment: already exists") + ErrSegmentNotFound = errors.New("segment: not found") + ErrSegmentUnexpectedAffectedRows = errors.New("segment: unexpected affected rows") +) + +type SegmentStorage interface { + CreateSegment(ctx context.Context, segment *domain.Segment, environmentNamespace string) error + UpdateSegment(ctx context.Context, segment *domain.Segment, environmentNamespace string) error + GetSegment(ctx context.Context, id, environmentNamespace string) (*domain.Segment, error) + ListSegments( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, + isInUseStatus *bool, + environmentNamespace string, + ) ([]*proto.Segment, int, int64, error) +} + +type segmentStorage struct { + qe mysql.QueryExecer +} + +func NewSegmentStorage(qe mysql.QueryExecer) SegmentStorage { + return &segmentStorage{qe: qe} +} + +func (s *segmentStorage) CreateSegment( + ctx context.Context, + segment *domain.Segment, + environmentNamespace string, +) error { + query := ` + INSERT INTO segment ( + id, + name, + description, + rules, + created_at, + updated_at, + version, + deleted, + included_user_count, + excluded_user_count, + status, + environment_namespace + ) VALUES ( + ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, + ?, ? + ) + ` + _, err := s.qe.ExecContext( + ctx, + query, + segment.Id, + segment.Name, + segment.Description, + mysql.JSONObject{Val: segment.Rules}, + segment.CreatedAt, + segment.UpdatedAt, + segment.Version, + segment.Deleted, + segment.IncludedUserCount, + segment.ExcludedUserCount, + int32(segment.Status), + environmentNamespace, + ) + if err != nil { + if err == mysql.ErrDuplicateEntry { + return ErrSegmentAlreadyExists + } + return err + } + return nil +} + +func (s *segmentStorage) UpdateSegment( + ctx context.Context, + segment *domain.Segment, + environmentNamespace string, +) error { + query := ` + UPDATE + segment + SET + name = ?, + description = ?, + rules = ?, + created_at = ?, + updated_at = ?, + version = ?, + deleted = ?, + included_user_count = ?, + excluded_user_count = ?, + status = ? + WHERE + id = ? AND + environment_namespace = ? + ` + result, err := s.qe.ExecContext( + ctx, + query, + segment.Name, + segment.Description, + mysql.JSONObject{Val: segment.Rules}, + segment.CreatedAt, + segment.UpdatedAt, + segment.Version, + segment.Deleted, + segment.IncludedUserCount, + segment.ExcludedUserCount, + int32(segment.Status), + segment.Id, + environmentNamespace, + ) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + if rowsAffected != 1 { + return ErrSegmentUnexpectedAffectedRows + } + return nil +} + +func (s *segmentStorage) GetSegment( + ctx context.Context, + id, environmentNamespace string, +) (*domain.Segment, error) { + segment := proto.Segment{} + var status int32 + query := ` + SELECT + id, + name, + description, + rules, + created_at, + updated_at, + version, + deleted, + included_user_count, + excluded_user_count, + status, + CASE + WHEN ( + SELECT + COUNT(1) + FROM + feature + WHERE + environment_namespace = ? AND + rules LIKE concat("%", segment.id, "%") + ) > 0 THEN TRUE + ELSE FALSE + END AS is_in_use_status + FROM + segment + WHERE + id = ? AND + environment_namespace = ? + ` + err := s.qe.QueryRowContext( + ctx, + query, + environmentNamespace, + id, + environmentNamespace, + ).Scan( + &segment.Id, + &segment.Name, + &segment.Description, + &mysql.JSONObject{Val: &segment.Rules}, + &segment.CreatedAt, + &segment.UpdatedAt, + &segment.Version, + &segment.Deleted, + &segment.IncludedUserCount, + &segment.ExcludedUserCount, + &status, + &segment.IsInUseStatus, + ) + if err != nil { + if err == mysql.ErrNoRows { + return nil, ErrSegmentNotFound + } + return nil, err + } + segment.Status = proto.Segment_Status(status) + return &domain.Segment{Segment: &segment}, nil +} + +func (s *segmentStorage) ListSegments( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, + isInUseStatus *bool, + environmentNamespace string, +) ([]*proto.Segment, int, int64, error) { + whereSQL, whereArgs := mysql.ConstructWhereSQLString(whereParts) + prepareArgs := make([]interface{}, 0, len(whereArgs)+1) + prepareArgs = append(prepareArgs, environmentNamespace) + prepareArgs = append(prepareArgs, whereArgs...) + orderBySQL := mysql.ConstructOrderBySQLString(orders) + limitOffsetSQL := mysql.ConstructLimitOffsetSQLString(limit, offset) + var isInUseStatusSQL string + if isInUseStatus != nil { + if *isInUseStatus { + isInUseStatusSQL = "HAVING is_in_use_status = TRUE" + } else { + isInUseStatusSQL = "HAVING is_in_use_status = FALSE" + } + } + query := fmt.Sprintf(` + SELECT + id, + name, + description, + rules, + created_at, + updated_at, + version, + deleted, + included_user_count, + excluded_user_count, + status, + CASE + WHEN ( + SELECT + COUNT(1) + FROM + feature + WHERE + environment_namespace = ? AND + rules LIKE concat("%%", segment.id, "%%") + ) > 0 THEN TRUE + ELSE FALSE + END AS is_in_use_status + FROM + segment + %s %s %s %s + `, whereSQL, isInUseStatusSQL, orderBySQL, limitOffsetSQL, + ) + rows, err := s.qe.QueryContext(ctx, query, prepareArgs...) + if err != nil { + return nil, 0, 0, err + } + defer rows.Close() + segments := make([]*proto.Segment, 0, limit) + for rows.Next() { + segment := proto.Segment{} + var status int32 + err := rows.Scan( + &segment.Id, + &segment.Name, + &segment.Description, + &mysql.JSONObject{Val: &segment.Rules}, + &segment.CreatedAt, + &segment.UpdatedAt, + &segment.Version, + &segment.Deleted, + &segment.IncludedUserCount, + &segment.ExcludedUserCount, + &status, + &segment.IsInUseStatus, + ) + if err != nil { + return nil, 0, 0, err + } + segment.Status = proto.Segment_Status(status) + segments = append(segments, &segment) + } + if rows.Err() != nil { + return nil, 0, 0, err + } + nextOffset := offset + len(segments) + var totalCount int64 + countConditionSQL := "> 0 THEN 1 ELSE 1" + if isInUseStatus != nil { + if *isInUseStatus { + countConditionSQL = "> 0 THEN 1 ELSE NULL" + } else { + countConditionSQL = "> 0 THEN NULL ELSE 1" + } + } + countQuery := fmt.Sprintf(` + SELECT + COUNT( + CASE + WHEN ( + SELECT + COUNT(1) + FROM + feature + WHERE + environment_namespace = ? AND + rules LIKE concat("%%", segment.id, "%%") + ) %s + END + ) + FROM + segment + %s %s + `, countConditionSQL, whereSQL, orderBySQL, + ) + err = s.qe.QueryRowContext(ctx, countQuery, prepareArgs...).Scan(&totalCount) + if err != nil { + return nil, 0, 0, err + } + return segments, nextOffset, totalCount, nil +} diff --git a/pkg/feature/storage/v2/segment_test.go b/pkg/feature/storage/v2/segment_test.go new file mode 100644 index 000000000..69c20fbfa --- /dev/null +++ b/pkg/feature/storage/v2/segment_test.go @@ -0,0 +1,32 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" +) + +func TestNewSegmentStorage(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + storage := NewSegmentStorage(mock.NewMockQueryExecer(mockController)) + assert.IsType(t, &segmentStorage{}, storage) +} diff --git a/pkg/feature/storage/v2/segment_user.go b/pkg/feature/storage/v2/segment_user.go new file mode 100644 index 000000000..4d1bd3d74 --- /dev/null +++ b/pkg/feature/storage/v2/segment_user.go @@ -0,0 +1,179 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v2 + +import ( + "context" + "errors" + "fmt" + + "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + proto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +var ( + ErrSegmentUserNotFound = errors.New("segmentUser: not found") +) + +type SegmentUserStorage interface { + UpsertSegmentUsers(ctx context.Context, users []*proto.SegmentUser, environmentNamespace string) error + GetSegmentUser(ctx context.Context, id, environmentNamespace string) (*domain.SegmentUser, error) + ListSegmentUsers( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, + ) ([]*proto.SegmentUser, int, error) +} + +type segmentUserStorage struct { + qe mysql.QueryExecer +} + +func NewSegmentUserStorage(qe mysql.QueryExecer) SegmentUserStorage { + return &segmentUserStorage{qe: qe} +} + +func (s *segmentUserStorage) UpsertSegmentUsers( + ctx context.Context, + users []*proto.SegmentUser, + environmentNamespace string, +) error { + for _, u := range users { + query := ` + INSERT INTO segment_user ( + id, + segment_id, + user_id, + state, + deleted, + environment_namespace + ) VALUES ( + ?, ?, ?, ?, ?, ? + ) ON DUPLICATE KEY UPDATE + segment_id = VALUES(segment_id), + user_id = VALUES(user_id), + state = VALUES(state), + deleted = VALUES(deleted) + ` + _, err := s.qe.ExecContext( + ctx, + query, + u.Id, + u.SegmentId, + u.UserId, + int32(u.State), + u.Deleted, + environmentNamespace, + ) + if err != nil { + return err + } + } + return nil +} + +func (s *segmentUserStorage) GetSegmentUser( + ctx context.Context, + id, environmentNamespace string, +) (*domain.SegmentUser, error) { + segmentUser := proto.SegmentUser{} + var state int32 + query := ` + SELECT + id, + segment_id, + user_id, + state, + deleted + FROM + segment_user + WHERE + id = ? AND + environment_namespace = ? + ` + err := s.qe.QueryRowContext( + ctx, + query, + id, + environmentNamespace, + ).Scan( + &segmentUser.Id, + &segmentUser.SegmentId, + &segmentUser.UserId, + &state, + &segmentUser.Deleted, + ) + if err != nil { + if err == mysql.ErrNoRows { + return nil, ErrSegmentUserNotFound + } + return nil, err + } + segmentUser.State = proto.SegmentUser_State(state) + return &domain.SegmentUser{SegmentUser: &segmentUser}, nil +} + +func (s *segmentUserStorage) ListSegmentUsers( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, +) ([]*proto.SegmentUser, int, error) { + whereSQL, whereArgs := mysql.ConstructWhereSQLString(whereParts) + orderBySQL := mysql.ConstructOrderBySQLString(orders) + limitOffsetSQL := mysql.ConstructLimitOffsetSQLString(limit, offset) + query := fmt.Sprintf(` + SELECT + id, + segment_id, + user_id, + state, + deleted + FROM + segment_user + %s %s %s + `, whereSQL, orderBySQL, limitOffsetSQL, + ) + rows, err := s.qe.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, 0, err + } + defer rows.Close() + segmentUsers := make([]*proto.SegmentUser, 0, limit) + for rows.Next() { + segmentUser := proto.SegmentUser{} + var state int32 + err := rows.Scan( + &segmentUser.Id, + &segmentUser.SegmentId, + &segmentUser.UserId, + &state, + &segmentUser.Deleted, + ) + if err != nil { + return nil, 0, err + } + segmentUser.State = proto.SegmentUser_State(state) + segmentUsers = append(segmentUsers, &segmentUser) + } + if rows.Err() != nil { + return nil, 0, err + } + nextOffset := offset + len(segmentUsers) + return segmentUsers, nextOffset, nil +} diff --git a/pkg/feature/storage/v2/segment_user_test.go b/pkg/feature/storage/v2/segment_user_test.go new file mode 100644 index 000000000..ef65f13e8 --- /dev/null +++ b/pkg/feature/storage/v2/segment_user_test.go @@ -0,0 +1,32 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" +) + +func TestNewSegmentUserStorage(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + storage := NewSegmentUserStorage(mock.NewMockQueryExecer(mockController)) + assert.IsType(t, &segmentUserStorage{}, storage) +} diff --git a/pkg/feature/storage/v2/tag.go b/pkg/feature/storage/v2/tag.go new file mode 100644 index 000000000..8ae75efc8 --- /dev/null +++ b/pkg/feature/storage/v2/tag.go @@ -0,0 +1,136 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v2 + +import ( + "context" + "errors" + "fmt" + + "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + proto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +var ( + ErrTagAlreadyExists = errors.New("tag: already exists") + ErrTagNotFound = errors.New("tag: not found") +) + +type TagStorage interface { + UpsertTag(ctx context.Context, tag *domain.Tag, environmentNamespace string) error + ListTags( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, + ) ([]*proto.Tag, int, int64, error) +} + +type tagStorage struct { + qe mysql.QueryExecer +} + +func NewTagStorage(qe mysql.QueryExecer) TagStorage { + return &tagStorage{qe: qe} +} + +func (s *tagStorage) UpsertTag( + ctx context.Context, + tag *domain.Tag, + environmentNamespace string, +) error { + // To get last tags, update `updated_at`. + query := ` + INSERT INTO tag ( + id, + created_at, + updated_at, + environment_namespace + ) VALUES ( + ?, ?, ?, ? + ) ON DUPLICATE KEY UPDATE + updated_at = VALUES(updated_at) + ` + _, err := s.qe.ExecContext( + ctx, + query, + tag.Id, + tag.CreatedAt, + tag.UpdatedAt, + environmentNamespace, + ) + if err != nil { + return err + } + return nil +} + +func (s *tagStorage) ListTags( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, +) ([]*proto.Tag, int, int64, error) { + whereSQL, whereArgs := mysql.ConstructWhereSQLString(whereParts) + orderBySQL := mysql.ConstructOrderBySQLString(orders) + limitOffsetSQL := mysql.ConstructLimitOffsetSQLString(limit, offset) + query := fmt.Sprintf(` + SELECT + id, + created_at, + updated_at + FROM + tag + %s %s %s + `, whereSQL, orderBySQL, limitOffsetSQL, + ) + rows, err := s.qe.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, 0, 0, err + } + defer rows.Close() + tags := make([]*proto.Tag, 0, limit) + for rows.Next() { + tag := proto.Tag{} + err := rows.Scan( + &tag.Id, + &tag.CreatedAt, + &tag.UpdatedAt, + ) + if err != nil { + return nil, 0, 0, err + } + tags = append(tags, &tag) + } + if rows.Err() != nil { + return nil, 0, 0, err + } + nextOffset := offset + len(tags) + countQuery := fmt.Sprintf(` + SELECT + COUNT(1) + FROM + tag + %s %s + `, whereSQL, orderBySQL, + ) + var totalCount int64 + if err := s.qe.QueryRowContext(ctx, countQuery, whereArgs...).Scan(&totalCount); err != nil { + return nil, 0, 0, err + } + return tags, nextOffset, totalCount, nil +} diff --git a/pkg/feature/storage/v2/tag_test.go b/pkg/feature/storage/v2/tag_test.go new file mode 100644 index 000000000..b164dadab --- /dev/null +++ b/pkg/feature/storage/v2/tag_test.go @@ -0,0 +1,32 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" +) + +func TestNewTagStorage(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + storage := NewTagStorage(mock.NewMockQueryExecer(mockController)) + assert.IsType(t, &tagStorage{}, storage) +} diff --git a/pkg/gateway/api/BUILD.bazel b/pkg/gateway/api/BUILD.bazel new file mode 100644 index 000000000..f27131553 --- /dev/null +++ b/pkg/gateway/api/BUILD.bazel @@ -0,0 +1,84 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "api.go", + "api_grpc.go", + "grpc_validation.go", + "metrics.go", + "trackhandler.go", + "validation.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/gateway/api", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/client:go_default_library", + "//pkg/cache:go_default_library", + "//pkg/cache/v3:go_default_library", + "//pkg/feature/client:go_default_library", + "//pkg/feature/domain:go_default_library", + "//pkg/feature/storage:go_default_library", + "//pkg/log:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/rest:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/storage/v2/bigtable:go_default_library", + "//pkg/uuid:go_default_library", + "//proto/account:go_default_library", + "//proto/event/client:go_default_library", + "//proto/event/service:go_default_library", + "//proto/feature:go_default_library", + "//proto/gateway:go_default_library", + "//proto/user:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//metadata:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + "@org_golang_google_protobuf//encoding/protojson:go_default_library", + "@org_golang_google_protobuf//types/known/anypb:go_default_library", + "@org_golang_x_sync//singleflight:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "api_grpc_test.go", + "api_test.go", + "trackhandler_test.go", + "validation_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/account/client/mock:go_default_library", + "//pkg/cache:go_default_library", + "//pkg/cache/v3/mock:go_default_library", + "//pkg/feature/client/mock:go_default_library", + "//pkg/feature/domain:go_default_library", + "//pkg/feature/storage/mock:go_default_library", + "//pkg/log:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub/publisher/mock:go_default_library", + "//pkg/uuid:go_default_library", + "//proto/account:go_default_library", + "//proto/event/client:go_default_library", + "//proto/feature:go_default_library", + "//proto/gateway:go_default_library", + "//proto/user:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + "@io_bazel_rules_go//proto/wkt:any_go_proto", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//metadata:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + "@org_golang_google_protobuf//encoding/protojson:go_default_library", + ], +) diff --git a/pkg/gateway/api/api.go b/pkg/gateway/api/api.go new file mode 100644 index 000000000..32a9e3219 --- /dev/null +++ b/pkg/gateway/api/api.go @@ -0,0 +1,1279 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "time" + + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/wrappers" + "go.uber.org/zap" + "golang.org/x/sync/singleflight" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/anypb" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + "github.com/bucketeer-io/bucketeer/pkg/cache" + cachev3 "github.com/bucketeer-io/bucketeer/pkg/cache/v3" + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + featuredomain "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + ftstorage "github.com/bucketeer-io/bucketeer/pkg/feature/storage" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + "github.com/bucketeer-io/bucketeer/pkg/rest" + bigtable "github.com/bucketeer-io/bucketeer/pkg/storage/v2/bigtable" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/client" + serviceeventproto "github.com/bucketeer-io/bucketeer/proto/event/service" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" + userproto "github.com/bucketeer-io/bucketeer/proto/user" +) + +type gatewayService struct { + userEvaluationStorage ftstorage.UserEvaluationsStorage + featureClient featureclient.Client + accountClient accountclient.Client + goalPublisher publisher.Publisher + goalBatchPublisher publisher.Publisher + evaluationPublisher publisher.Publisher + userPublisher publisher.Publisher + metricsPublisher publisher.Publisher + segmentUsersCache cachev3.SegmentUsersCache + featuresCache cachev3.FeaturesCache + environmentAPIKeyCache cachev3.EnvironmentAPIKeyCache + flightgroup singleflight.Group + opts *options + logger *zap.Logger +} + +func NewGatewayService( + bt bigtable.Client, + featureClient featureclient.Client, + accountClient accountclient.Client, + gp publisher.Publisher, + gbp publisher.Publisher, + ep publisher.Publisher, + up publisher.Publisher, + mp publisher.Publisher, + v3Cache cache.MultiGetCache, + opts ...Option, +) *gatewayService { + options := defaultOptions + for _, opt := range opts { + opt(&options) + } + if options.metrics != nil { + registerMetrics(options.metrics) + } + return &gatewayService{ + userEvaluationStorage: ftstorage.NewUserEvaluationsStorage(bt), + featureClient: featureClient, + accountClient: accountClient, + goalPublisher: gp, + goalBatchPublisher: gbp, + evaluationPublisher: ep, + userPublisher: up, + metricsPublisher: mp, + featuresCache: cachev3.NewFeaturesCache(v3Cache), + segmentUsersCache: cachev3.NewSegmentUsersCache(v3Cache), + environmentAPIKeyCache: cachev3.NewEnvironmentAPIKeyCache(v3Cache), + opts: &options, + logger: options.logger.Named("api"), + } +} + +type eventType int + +type metricsDetailEventType int + +const ( + goalEventType eventType = iota + 1 // eventType starts from 1 for validation. + goalBatchEventType + evaluationEventType + metricsEventType +) + +const ( + getEvaluationLatencyMetricsEventType metricsDetailEventType = iota + 1 + getEvaluationSizeMetricsEventType + timeoutErrorCountMetricsEventType + internalErrorCountMetricsEventType +) + +const ( + Version = "/v1" + Service = "/gateway" + pingAPI = "/ping" + evaluationsAPI = "/evaluations" + evaluationAPI = "/evaluation" + eventAPI = "/events" + authorizationKey = "authorization" +) + +var ( + errContextCanceled = rest.NewErrStatus(http.StatusBadRequest, "gateway: context canceled") + errMissingAPIKey = rest.NewErrStatus(http.StatusUnauthorized, "gateway: missing APIKey") + errInvalidAPIKey = rest.NewErrStatus(http.StatusUnauthorized, "gateway: invalid APIKey") + errInternal = rest.NewErrStatus(http.StatusInternalServerError, "gateway: internal") + errInvalidHttpMethod = rest.NewErrStatus(http.StatusMethodNotAllowed, "gateway: invalid http method") + errTagRequired = rest.NewErrStatus(http.StatusBadRequest, "gateway: tag is required") + errUserRequired = rest.NewErrStatus(http.StatusBadRequest, "gateway: user is required") + errUserIDRequired = rest.NewErrStatus(http.StatusBadRequest, "gateway: user id is required") + errBadRole = rest.NewErrStatus(http.StatusUnauthorized, "gateway: bad role") + errDisabledAPIKey = rest.NewErrStatus(http.StatusUnauthorized, "gateway: disabled APIKey") + errFeatureNotFound = rest.NewErrStatus(http.StatusNotFound, "gateway: feature not found") + errFeatureIDRequired = rest.NewErrStatus(http.StatusBadRequest, "gateway: feature id is required") + errMissingEventID = rest.NewErrStatus(http.StatusBadRequest, "gateway: missing event id") + errMissingEvents = rest.NewErrStatus(http.StatusBadRequest, "gateway: missing events") + errBodyRequired = rest.NewErrStatus(http.StatusBadRequest, "gateway: body is required") +) + +var ( + errInvalidType = errors.New("gateway: invalid message type") +) + +func (s *gatewayService) Register(mux *http.ServeMux) { + s.regist(mux, pingAPI, s.ping) + s.regist(mux, evaluationsAPI, s.getEvaluations) + s.regist(mux, evaluationAPI, s.getEvaluation) + s.regist(mux, eventAPI, s.registerEvents) +} + +func (*gatewayService) regist(mux *http.ServeMux, path string, handler func(http.ResponseWriter, *http.Request)) { + mux.HandleFunc(fmt.Sprintf("%s%s%s", Version, Service, path), handler) +} + +type pingResponse struct { + Time int64 `json:"time,omitempty"` +} + +type getEvaluationsRequest struct { + Tag string `json:"tag,omitempty"` + User *userproto.User `json:"user,omitempty"` + UserEvaluationsID string `json:"user_evaluations_id,omitempty"` + SourceID eventproto.SourceId `json:"source_id,omitempty"` +} + +type getEvaluationsResponse struct { + Evaluations *featureproto.UserEvaluations `json:"evaluations,omitempty"` + UserEvaluationsID string `json:"user_evaluations_id,omitempty"` +} + +type getEvaluationRequest struct { + Tag string `json:"tag,omitempty"` + User *userproto.User `json:"user,omitempty"` + FeatureID string `json:"feature_id,omitempty"` + SourceId eventproto.SourceId `json:"source_id,omitempty"` +} + +type registerEventsRequest struct { + Events []event `json:"events,omitempty"` +} + +type registerEventsResponse struct { + Errors map[string]*registerEventsResponseError `json:"errors,omitempty"` +} + +type registerEventsResponseError struct { + Retriable bool `json:"retriable"` // omitempty is not used intentionally + Message string `json:"message,omitempty"` +} + +type getEvaluationResponse struct { + Evaluation *featureproto.Evaluation `json:"evaluations,omitempty"` +} + +type event struct { + ID string `json:"id,omitempty"` + Event json.RawMessage `json:"event,omitempty"` + EnvironmentNamespace string `json:"environment_namespace,omitempty"` + Type eventType `json:"type,omitempty"` +} + +type metricsEvent struct { + Timestamp int64 `json:"timestamp,omitempty"` + Event json.RawMessage `json:"event,omitempty"` + Type metricsDetailEventType `json:"type,omitempty"` +} + +type getEvaluationLatencyMetricsEvent struct { + Labels map[string]string `json:"labels,omitempty"` + Duration time.Duration `json:"duration,omitempty"` +} + +func (s *gatewayService) ping(w http.ResponseWriter, req *http.Request) { + rest.ReturnSuccessResponse( + w, + &pingResponse{ + Time: time.Now().Unix(), + }, + ) +} + +func (s *gatewayService) getEvaluations(w http.ResponseWriter, req *http.Request) { + envAPIKey, reqBody, err := s.checkGetEvaluationsRequest(req) + if err != nil { + rest.ReturnFailureResponse(w, err) + return + } + s.publishUser(req.Context(), envAPIKey.EnvironmentNamespace, reqBody.Tag, reqBody.User, reqBody.SourceID) + f, err, _ := s.flightgroup.Do( + envAPIKey.EnvironmentNamespace, + func() (interface{}, error) { + return s.getFeatures(req.Context(), envAPIKey.EnvironmentNamespace) + }, + ) + if err != nil { + rest.ReturnFailureResponse(w, err) + return + } + features := f.([]*featureproto.Feature) + if len(features) == 0 { + rest.ReturnSuccessResponse( + w, + &getEvaluationsResponse{ + Evaluations: nil, + }, + ) + return + } + ueid := featuredomain.UserEvaluationsID(reqBody.User.Id, reqBody.User.Data, features) + if reqBody.UserEvaluationsID == ueid { + rest.ReturnSuccessResponse( + w, + &getEvaluationsResponse{ + Evaluations: nil, + UserEvaluationsID: ueid, + }, + ) + return + } + evaluations, err := s.evaluateFeatures( + req.Context(), + reqBody.User, + features, + envAPIKey.EnvironmentNamespace, + reqBody.Tag, + ) + if err != nil { + s.logger.Error( + "Failed to evaluate features", + log.FieldsFromImcomingContext(req.Context()).AddFields( + zap.Error(err), + zap.String("environmentNamespace", envAPIKey.EnvironmentNamespace), + zap.String("userId", reqBody.User.Id), + )..., + ) + rest.ReturnFailureResponse(w, err) + return + } + rest.ReturnSuccessResponse( + w, + &getEvaluationsResponse{ + Evaluations: evaluations, + UserEvaluationsID: ueid, + }, + ) +} + +func (s *gatewayService) getEvaluation(w http.ResponseWriter, req *http.Request) { + envAPIKey, reqBody, err := s.checkGetEvaluationRequest(req) + if err != nil { + rest.ReturnFailureResponse(w, err) + return + } + s.publishUser(req.Context(), envAPIKey.EnvironmentNamespace, reqBody.Tag, reqBody.User, reqBody.SourceId) + f, err, _ := s.flightgroup.Do( + envAPIKey.EnvironmentNamespace, + func() (interface{}, error) { + return s.getFeatures(req.Context(), envAPIKey.EnvironmentNamespace) + }, + ) + if err != nil { + rest.ReturnFailureResponse(w, err) + return + } + fs := f.([]*featureproto.Feature) + var features []*featureproto.Feature + for _, f := range fs { + if f.Id == reqBody.FeatureID { + features = append(features, f) + break + } + } + if len(features) == 0 { + rest.ReturnFailureResponse(w, errFeatureNotFound) + return + } + evaluations, err := s.evaluateFeatures( + req.Context(), + reqBody.User, + features, + envAPIKey.EnvironmentNamespace, + reqBody.Tag, + ) + if err != nil { + s.logger.Error( + "Failed to evaluate features", + log.FieldsFromImcomingContext(req.Context()).AddFields( + zap.Error(err), + zap.String("environmentNamespace", envAPIKey.EnvironmentNamespace), + zap.String("userId", reqBody.User.Id), + zap.String("featureId", reqBody.FeatureID), + )..., + ) + rest.ReturnFailureResponse(w, errInternal) + return + } + if err := s.upsertUserEvaluation( + req.Context(), + envAPIKey.EnvironmentNamespace, + reqBody.Tag, + evaluations.Evaluations[0], + ); err != nil { + restEventCounter.WithLabelValues(callerGatewayService, typeMetrics, codeUpsertUserEvaluationFailed).Inc() + s.logger.Error( + "Failed to upsert user evaluation while trying to get evaluation", + log.FieldsFromImcomingContext(req.Context()).AddFields( + zap.Error(err), + zap.String("environmentNamespace", envAPIKey.EnvironmentNamespace), + zap.String("userId", reqBody.User.Id), + zap.String("featureId", reqBody.FeatureID), + )..., + ) + rest.ReturnFailureResponse(w, errInternal) + return + } + rest.ReturnSuccessResponse( + w, + &getEvaluationResponse{ + Evaluation: evaluations.Evaluations[0], + }, + ) +} + +func (s *gatewayService) checkGetEvaluationsRequest( + req *http.Request, +) (*accountproto.EnvironmentAPIKey, getEvaluationsRequest, error) { + if req.Method != http.MethodPost { + return nil, getEvaluationsRequest{}, errInvalidHttpMethod + } + envAPIKey, err := s.checkRequest(req.Context(), req) + if err != nil { + return nil, getEvaluationsRequest{}, err + } + var body getEvaluationsRequest + if err := json.NewDecoder(req.Body).Decode(&body); err != nil { + s.logger.Error( + "Failed to decode request body", + log.FieldsFromImcomingContext(req.Context()).AddFields( + zap.Error(err), + )..., + ) + return nil, getEvaluationsRequest{}, errInternal + } + if err := s.validateGetEvaluationsRequest(&body); err != nil { + return nil, getEvaluationsRequest{}, err + } + return envAPIKey, body, nil +} + +func (s *gatewayService) checkGetEvaluationRequest( + req *http.Request, +) (*accountproto.EnvironmentAPIKey, getEvaluationRequest, error) { + if req.Method != http.MethodPost { + return nil, getEvaluationRequest{}, errInvalidHttpMethod + } + envAPIKey, err := s.checkRequest(req.Context(), req) + if err != nil { + return nil, getEvaluationRequest{}, err + } + var body getEvaluationRequest + if err := json.NewDecoder(req.Body).Decode(&body); err != nil { + s.logger.Error( + "Failed to decode request body", + log.FieldsFromImcomingContext(req.Context()).AddFields( + zap.Error(err), + )..., + ) + return nil, getEvaluationRequest{}, errInternal + } + if err := s.validateGetEvaluationRequest(&body); err != nil { + return nil, getEvaluationRequest{}, err + } + return envAPIKey, body, nil +} + +func (*gatewayService) validateGetEvaluationsRequest(body *getEvaluationsRequest) error { + if body.Tag == "" { + return errTagRequired + } + if body.User == nil { + return errUserRequired + } + if body.User.Id == "" { + return errUserIDRequired + } + return nil +} + +func (*gatewayService) validateGetEvaluationRequest(body *getEvaluationRequest) error { + if body.Tag == "" { + return errTagRequired + } + if body.User == nil { + return errUserRequired + } + if body.User.Id == "" { + return errUserIDRequired + } + if body.FeatureID == "" { + return errFeatureIDRequired + } + return nil +} + +func (s *gatewayService) publishUser( + ctx context.Context, + environmentNamespace, tag string, + user *userproto.User, + sourceID eventproto.SourceId, +) { + // TODO: using buffered channel to reduce the number of go routines + go func() { + ctx, cancel := context.WithTimeout(context.Background(), s.opts.pubsubTimeout) + defer cancel() + if err := s.publishUserEvent(ctx, user, tag, environmentNamespace, sourceID); err != nil { + s.logger.Error( + "Failed to publish UserEvent", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + } + }() +} + +func (s *gatewayService) publishUserEvent( + ctx context.Context, + user *userproto.User, + tag, environmentNamespace string, + sourceID eventproto.SourceId, +) error { + id, err := uuid.NewUUID() + if err != nil { + return err + } + userEvent := &serviceeventproto.UserEvent{ + Id: id.String(), + SourceId: sourceID, + Tag: tag, + UserId: user.Id, + LastSeen: time.Now().Unix(), + Data: user.Data, + EnvironmentNamespace: environmentNamespace, + } + ue, err := ptypes.MarshalAny(userEvent) + if err != nil { + return err + } + event := &eventproto.Event{ + Id: id.String(), + Event: ue, + EnvironmentNamespace: environmentNamespace, + } + return s.userPublisher.Publish(ctx, event) +} + +func (s *gatewayService) checkRequest( + ctx context.Context, + req *http.Request, +) (*accountproto.EnvironmentAPIKey, error) { + if isContextCanceled(ctx) { + s.logger.Warn( + "Request was canceled", + log.FieldsFromImcomingContext(ctx)..., + ) + return nil, errContextCanceled + } + envAPIKey, err := s.findEnvironmentAPIKey(ctx, req) + if err != nil { + return nil, err + } + if err := s.checkEnvironmentAPIKey(envAPIKey, accountproto.APIKey_SDK); err != nil { + return nil, err + } + return envAPIKey, nil +} + +func (*gatewayService) checkEnvironmentAPIKey( + environmentAPIKey *accountproto.EnvironmentAPIKey, + role accountproto.APIKey_Role, +) error { + if environmentAPIKey.ApiKey.Role != role { + return errBadRole + } + if environmentAPIKey.EnvironmentDisabled { + return errDisabledAPIKey + } + if environmentAPIKey.ApiKey.Disabled { + return errDisabledAPIKey + } + return nil +} + +func (s *gatewayService) findEnvironmentAPIKey( + ctx context.Context, + req *http.Request, +) (*accountproto.EnvironmentAPIKey, error) { + id := req.Header.Get(authorizationKey) + if id == "" { + return nil, errMissingAPIKey + } + k, err, _ := s.flightgroup.Do( + id, + func() (interface{}, error) { + return s.getEnvironmentAPIKey( + ctx, + id, + s.accountClient, + s.environmentAPIKeyCache, + callerGatewayService, + s.logger, + ) + }, + ) + if err != nil { + return nil, err + } + envAPIKey := k.(*accountproto.EnvironmentAPIKey) + return envAPIKey, nil +} + +func (s *gatewayService) getEnvironmentAPIKey( + ctx context.Context, + id string, + accountClient accountclient.Client, + environmentAPIKeyCache cachev3.EnvironmentAPIKeyCache, + caller string, + logger *zap.Logger, +) (*accountproto.EnvironmentAPIKey, error) { + envAPIKey, err := getEnvironmentAPIKeyFromCache(ctx, id, environmentAPIKeyCache, caller, cacheLayerExternal) + if err == nil { + return envAPIKey, nil + } + resp, err := accountClient.GetAPIKeyBySearchingAllEnvironments( + ctx, + &accountproto.GetAPIKeyBySearchingAllEnvironmentsRequest{Id: id}, + ) + if err != nil { + if code := status.Code(err); code == codes.NotFound { + return nil, errInvalidAPIKey + } + logger.Error( + "Failed to get environment APIKey from account service", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, errInternal + } + envAPIKey = resp.EnvironmentApiKey + if err := environmentAPIKeyCache.Put(envAPIKey); err != nil { + logger.Error( + "Failed to cache environment APIKey", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", envAPIKey.EnvironmentNamespace), + )..., + ) + } + return envAPIKey, nil +} + +func (s *gatewayService) evaluateFeatures( + ctx context.Context, + user *userproto.User, + features []*featureproto.Feature, + environmentNamespace, tag string, +) (*featureproto.UserEvaluations, error) { + mapIDs := make(map[string]struct{}) + for _, f := range features { + feature := &featuredomain.Feature{Feature: f} + for _, id := range feature.ListSegmentIDs() { + mapIDs[id] = struct{}{} + } + } + mapSegmentUsers, err := s.listSegmentUsers(ctx, user.Id, mapIDs, environmentNamespace) + if err != nil { + s.logger.Error( + "Failed to list segments", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, err + } + userEvaluations, err := featuredomain.EvaluateFeatures(features, user, mapSegmentUsers, tag) + if err != nil { + s.logger.Error( + "Failed to evaluate", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + } + return userEvaluations, nil +} + +func (s *gatewayService) listSegmentUsers( + ctx context.Context, + userID string, + mapSegmentIDs map[string]struct{}, + environmentNamespace string, +) (map[string][]*featureproto.SegmentUser, error) { + if len(mapSegmentIDs) == 0 { + return nil, nil + } + users := make(map[string][]*featureproto.SegmentUser) + for segmentID := range mapSegmentIDs { + s, err, _ := s.flightgroup.Do(s.segmentFlightID(environmentNamespace, segmentID), func() (interface{}, error) { + return s.getSegmentUsers(ctx, segmentID, environmentNamespace) + }) + if err != nil { + return nil, err + } + segmentUsers := s.([]*featureproto.SegmentUser) + users[segmentID] = segmentUsers + } + return users, nil +} + +func (s *gatewayService) segmentFlightID(environmentNamespace, segmentID string) string { + return fmt.Sprintf("%s:%s", environmentNamespace, segmentID) +} + +func (s *gatewayService) getSegmentUsers( + ctx context.Context, + segmentID, environmentNamespace string, +) ([]*featureproto.SegmentUser, error) { + segmentUsers, err := s.getSegmentUsersFromCache(segmentID, environmentNamespace) + if err == nil { + return segmentUsers, nil + } + s.logger.Info( + "No cached data for SegmentUsers", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("segmentId", segmentID), + )..., + ) + req := &featureproto.ListSegmentUsersRequest{ + SegmentId: segmentID, + EnvironmentNamespace: environmentNamespace, + } + res, err := s.featureClient.ListSegmentUsers(ctx, req) + if err != nil { + s.logger.Error( + "Failed to retrieve segment users from storage", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("segmentId", segmentID), + )..., + ) + return nil, errInternal + } + su := &featureproto.SegmentUsers{ + SegmentId: segmentID, + Users: res.Users, + } + if err := s.segmentUsersCache.Put(su, environmentNamespace); err != nil { + s.logger.Error( + "Failed to cache segment users", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("segmentId", segmentID), + )..., + ) + } + return res.Users, nil +} + +func (s *gatewayService) getSegmentUsersFromCache( + segmentID, environmentNamespace string, +) ([]*featureproto.SegmentUser, error) { + segment, err := s.segmentUsersCache.Get(segmentID, environmentNamespace) + if err == nil { + return segment.Users, nil + } + return nil, err +} + +func (s *gatewayService) getFeatures( + ctx context.Context, + environmentNamespace string, +) ([]*featureproto.Feature, error) { + fs, err := s.getFeaturesFromCache(ctx, environmentNamespace) + if err == nil { + return fs.Features, nil + } + s.logger.Info( + "No cached data for Features", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + features, err := s.listFeatures(ctx, environmentNamespace) + if err != nil { + s.logger.Error( + "Failed to retrieve features from storage", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, errInternal + } + if err := s.featuresCache.Put(&featureproto.Features{Features: features}, environmentNamespace); err != nil { + s.logger.Error( + "Failed to cache features", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + } + return features, nil +} + +func (s *gatewayService) getFeaturesFromCache( + ctx context.Context, + environmentNamespace string, +) (*featureproto.Features, error) { + features, err := s.featuresCache.Get(environmentNamespace) + if err == nil { + restCacheCounter.WithLabelValues(callerGatewayService, typeFeatures, cacheLayerExternal, codeHit).Inc() + return features, nil + } + restCacheCounter.WithLabelValues(callerGatewayService, typeFeatures, cacheLayerExternal, codeMiss).Inc() + return nil, err +} + +func (s *gatewayService) listFeatures( + ctx context.Context, + environmentNamespace string, +) ([]*featureproto.Feature, error) { + features := []*featureproto.Feature{} + cursor := "" + for { + resp, err := s.featureClient.ListFeatures(ctx, &featureproto.ListFeaturesRequest{ + PageSize: listRequestSize, + Cursor: cursor, + EnvironmentNamespace: environmentNamespace, + Archived: &wrappers.BoolValue{Value: false}, + }) + if err != nil { + return nil, err + } + for _, f := range resp.Features { + if !f.Enabled && f.OffVariation == "" { + continue + } + features = append(features, f) + } + featureSize := len(resp.Features) + if featureSize == 0 || featureSize < listRequestSize { + return features, nil + } + cursor = resp.Cursor + } +} + +func (s *gatewayService) upsertUserEvaluation( + ctx context.Context, + environmentNamespace, tag string, + evaluation *featureproto.Evaluation, +) error { + if err := s.userEvaluationStorage.UpsertUserEvaluation( + ctx, + evaluation, + environmentNamespace, + tag, + ); err != nil { + return err + } + return nil +} + +func (s *gatewayService) registerEvents(w http.ResponseWriter, req *http.Request) { + envAPIKey, reqBody, err := s.checkRegisterEvents(req) + if err != nil { + rest.ReturnFailureResponse(w, err) + return + } + errs := make(map[string]*registerEventsResponseError) + goalMessages := make([]publisher.Message, 0) + goalBatchMessages := make([]publisher.Message, 0) + evaluationMessages := make([]publisher.Message, 0) + metricsMessages := make([]publisher.Message, 0) + publish := func(p publisher.Publisher, messages []publisher.Message, typ string) { + errors := p.PublishMulti(req.Context(), messages) + var repeatableErrors, nonRepeateableErrors float64 + for id, err := range errors { + retriable := err != publisher.ErrBadMessage + if retriable { + repeatableErrors++ + } else { + nonRepeateableErrors++ + } + s.logger.Error( + "Failed to publish event", + log.FieldsFromImcomingContext(req.Context()).AddFields( + zap.Error(err), + zap.String("environmentNamespace", envAPIKey.EnvironmentNamespace), + zap.String("id", id), + )..., + ) + errs[id] = ®isterEventsResponseError{ + Retriable: retriable, + Message: "Failed to publish event", + } + } + restEventCounter.WithLabelValues(callerGatewayService, typ, codeNonRepeatableError).Add(nonRepeateableErrors) + restEventCounter.WithLabelValues(callerGatewayService, typ, codeRepeatableError).Add(repeatableErrors) + restEventCounter.WithLabelValues(callerGatewayService, typ, codeOK).Add(float64(len(messages) - len(errors))) + } + for _, event := range reqBody.Events { + event.EnvironmentNamespace = envAPIKey.EnvironmentNamespace + if event.ID == "" { + rest.ReturnFailureResponse(w, errMissingEventID) + return + } + switch event.Type { + case goalEventType: + goal, errCode, err := s.getGoalEvent(req.Context(), event) + if err != nil { + restEventCounter.WithLabelValues(callerGatewayService, typeMetrics, errCode).Inc() + errs[event.ID] = ®isterEventsResponseError{ + Retriable: false, + Message: err.Error(), + } + continue + } + goalAny, err := ptypes.MarshalAny(goal) + if err != nil { + restEventCounter.WithLabelValues(callerGatewayService, typeGoal, codeMarshalAnyFailed).Inc() + errs[event.ID] = ®isterEventsResponseError{ + Retriable: false, + Message: err.Error(), + } + continue + } + goalMessages = append(goalMessages, &eventproto.Event{ + Id: event.ID, + Event: goalAny, + EnvironmentNamespace: event.EnvironmentNamespace, + }) + case goalBatchEventType: + batch, errCode, err := s.getGoalBatchEvent(req.Context(), event) + if err != nil { + restEventCounter.WithLabelValues(callerGatewayService, typeMetrics, errCode).Inc() + errs[event.ID] = ®isterEventsResponseError{ + Retriable: false, + Message: err.Error(), + } + } + batchAny, err := ptypes.MarshalAny(batch) + if err != nil { + restEventCounter.WithLabelValues(callerGatewayService, typeGoalBatch, codeMarshalAnyFailed).Inc() + errs[event.ID] = ®isterEventsResponseError{ + Retriable: false, + Message: err.Error(), + } + continue + } + goalBatchMessages = append(goalBatchMessages, &eventproto.Event{ + Id: event.ID, + Event: batchAny, + EnvironmentNamespace: event.EnvironmentNamespace, + }) + case evaluationEventType: + eval, errCode, err := s.getEvaluationEvent(req.Context(), event) + if err != nil { + restEventCounter.WithLabelValues(callerGatewayService, typeMetrics, errCode).Inc() + errs[event.ID] = ®isterEventsResponseError{ + Retriable: false, + Message: err.Error(), + } + } + evaluation, tag, err := s.convToEvaluation(req.Context(), eval) + if err != nil { + eventCounter.WithLabelValues(callerGatewayService, typeEvaluation, codeEvaluationConversionFailed).Inc() + errs[event.ID] = ®isterEventsResponseError{ + Retriable: false, + Message: err.Error(), + } + continue + } + if err := s.upsertUserEvaluation(req.Context(), envAPIKey.EnvironmentNamespace, tag, evaluation); err != nil { + eventCounter.WithLabelValues(callerGatewayService, typeEvaluation, codeUpsertUserEvaluationFailed).Inc() + errs[event.ID] = ®isterEventsResponseError{ + Retriable: true, + Message: "Failed to upsert user evaluation", + } + continue + } + evalAny, err := ptypes.MarshalAny(eval) + if err != nil { + restEventCounter.WithLabelValues(callerGatewayService, typeEvaluation, codeMarshalAnyFailed).Inc() + errs[event.ID] = ®isterEventsResponseError{ + Retriable: false, + Message: err.Error(), + } + continue + } + evaluationMessages = append(evaluationMessages, &eventproto.Event{ + Id: event.ID, + Event: evalAny, + EnvironmentNamespace: event.EnvironmentNamespace, + }) + case metricsEventType: + metrics, errCode, err := s.getMetricsEvent(req.Context(), event) + if err != nil { + restEventCounter.WithLabelValues(callerGatewayService, typeMetrics, errCode).Inc() + errs[event.ID] = ®isterEventsResponseError{ + Retriable: false, + Message: err.Error(), + } + } + metricsAny, err := ptypes.MarshalAny(metrics) + if err != nil { + restEventCounter.WithLabelValues(callerGatewayService, typeMetrics, codeMarshalAnyFailed).Inc() + errs[event.ID] = ®isterEventsResponseError{ + Retriable: false, + Message: err.Error(), + } + continue + } + metricsMessages = append(metricsMessages, &eventproto.Event{ + Id: event.ID, + Event: metricsAny, + EnvironmentNamespace: event.EnvironmentNamespace, + }) + default: + errs[event.ID] = ®isterEventsResponseError{ + Retriable: false, + Message: errInvalidType.Error(), + } + restEventCounter.WithLabelValues(callerGatewayService, typeUnknown, codeInvalidType).Inc() + continue + } + } + publish(s.goalPublisher, goalMessages, typeGoal) + publish(s.goalBatchPublisher, goalBatchMessages, typeGoalBatch) + publish(s.evaluationPublisher, evaluationMessages, typeEvaluation) + publish(s.metricsPublisher, metricsMessages, typeMetrics) + if len(errs) > 0 { + if s.containsInvalidTimestampError(errs) { + restEventCounter.WithLabelValues(callerGatewayService, typeRegisterEvent, codeInvalidTimestampRequest).Inc() + } + } else { + restEventCounter.WithLabelValues(callerGatewayService, typeRegisterEvent, codeOK).Inc() + } + rest.ReturnSuccessResponse( + w, + registerEventsResponse{Errors: errs}, + ) +} + +/* Because we got the following error, `nolint` is added. After solving it, we'll remove it. + +pkg/gateway/api/api.go:829:47: cannot use ev +(variable of type *"github.com/bucketeer-io/bucketeer/proto/event/client".GoalEvent) +as protoreflect.ProtoMessage value in argument to protojson.Unmarshal: +missing method ProtoReflect (typecheck) + if err := protojson.Unmarshal(event.Event, ev); err != nil { + ^ +*/ +//nolint:typecheck +func (s *gatewayService) getGoalEvent(ctx context.Context, event event) (*eventproto.GoalEvent, string, error) { + ev := &eventproto.GoalEvent{} + if err := protojson.Unmarshal(event.Event, ev); err != nil { + s.logger.Error( + "Failed to extract goal event", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", event.ID), + )..., + ) + return nil, codeUnmarshalFailed, errUnmarshalFailed + } + errorCode, err := s.validateGoalEvent(ctx, event.ID, ev.Timestamp) + if err != nil { + return nil, errorCode, err + } + return ev, "", nil +} + +/* Because we got the following error, `nolint` is added. After solving it, we'll remove it. + +pkg/gateway/api/api.go:829:47: cannot use ev +(variable of type *"github.com/bucketeer-io/bucketeer/proto/event/client".GoalEvent) +as protoreflect.ProtoMessage value in argument to protojson.Unmarshal: +missing method ProtoReflect (typecheck) + if err := protojson.Unmarshal(event.Event, ev); err != nil { + ^ +*/ +//nolint:typecheck +func (s *gatewayService) getGoalBatchEvent( + ctx context.Context, + event event, +) (*eventproto.GoalBatchEvent, string, error) { + ev := &eventproto.GoalBatchEvent{} + if err := protojson.Unmarshal(event.Event, ev); err != nil { + s.logger.Error( + "Failed to extract goal batch event", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", event.ID), + )..., + ) + return nil, codeUnmarshalFailed, errUnmarshalFailed + } + errorCode, err := s.validateGoalBatchEvent(ctx, event.ID, ev) + if err != nil { + return nil, errorCode, err + } + return ev, "", nil +} + +/* Because we got the following error, `nolint` is added. After solving it, we'll remove it. + +pkg/gateway/api/api.go:829:47: cannot use ev +(variable of type *"github.com/bucketeer-io/bucketeer/proto/event/client".GoalEvent) +as protoreflect.ProtoMessage value in argument to protojson.Unmarshal: +missing method ProtoReflect (typecheck) + if err := protojson.Unmarshal(event.Event, ev); err != nil { + ^ +*/ +//nolint:typecheck +func (s *gatewayService) getEvaluationEvent( + ctx context.Context, + event event, +) (*eventproto.EvaluationEvent, string, error) { + ev := &eventproto.EvaluationEvent{} + if err := protojson.Unmarshal(event.Event, ev); err != nil { + s.logger.Error( + "Failed to extract evaluation event", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", event.ID), + )..., + ) + return nil, codeUnmarshalFailed, errUnmarshalFailed + } + errorCode, err := s.validateEvaluationEvent(ctx, event.ID, ev.Timestamp) + if err != nil { + return nil, errorCode, err + } + return ev, "", nil +} + +/* Because we got the following error, `nolint` is added. After solving it, we'll remove it. + +pkg/gateway/api/api.go:829:47: cannot use ev +(variable of type *"github.com/bucketeer-io/bucketeer/proto/event/client".GoalEvent) +as protoreflect.ProtoMessage value in argument to protojson.Unmarshal: +missing method ProtoReflect (typecheck) + if err := protojson.Unmarshal(event.Event, ev); err != nil { + ^ +*/ +//nolint:typecheck +func (s *gatewayService) getMetricsEvent( + ctx context.Context, + event event, +) (*eventproto.MetricsEvent, string, error) { + metricsEvt := &metricsEvent{} + if err := json.Unmarshal(event.Event, metricsEvt); err != nil { + s.logger.Error( + "Failed to extract metrics event", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", event.ID), + )..., + ) + return nil, codeUnmarshalFailed, errUnmarshalFailed + } + errorCode, err := s.validateMetricsEvent(ctx, event.ID) + if err != nil { + return nil, errorCode, err + } + var eventAny *anypb.Any + switch metricsEvt.Type { + case getEvaluationLatencyMetricsEventType: + latency := &getEvaluationLatencyMetricsEvent{} + if err := json.Unmarshal(metricsEvt.Event, latency); err != nil { + s.logger.Error( + "Failed to extract getEvaluationLatencyMetrics event", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", event.ID), + )..., + ) + return nil, codeUnmarshalFailed, errUnmarshalFailed + } + eventAny, err = ptypes.MarshalAny(&eventproto.GetEvaluationLatencyMetricsEvent{ + Labels: latency.Labels, + Duration: ptypes.DurationProto(latency.Duration), + }) + if err != nil { + return nil, codeMarshalAnyFailed, err + } + case getEvaluationSizeMetricsEventType: + size := &eventproto.GetEvaluationSizeMetricsEvent{} + if err := protojson.Unmarshal(metricsEvt.Event, size); err != nil { + s.logger.Error( + "Failed to extract getEvaluationSizeMetrics event", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", event.ID), + )..., + ) + return nil, codeUnmarshalFailed, errUnmarshalFailed + } + eventAny, err = ptypes.MarshalAny(size) + if err != nil { + return nil, codeMarshalAnyFailed, err + } + case timeoutErrorCountMetricsEventType: + timeout := &eventproto.TimeoutErrorCountMetricsEvent{} + if err := protojson.Unmarshal(metricsEvt.Event, timeout); err != nil { + s.logger.Error( + "Failed to extract timeoutErrorCountMetrics event", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", event.ID), + )..., + ) + return nil, codeUnmarshalFailed, errUnmarshalFailed + } + eventAny, err = ptypes.MarshalAny(timeout) + if err != nil { + return nil, codeMarshalAnyFailed, err + } + case internalErrorCountMetricsEventType: + internal := &eventproto.InternalErrorCountMetricsEvent{} + if err := protojson.Unmarshal(metricsEvt.Event, internal); err != nil { + s.logger.Error( + "Failed to extract internalErrorCountMetrics event", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", event.ID), + )..., + ) + return nil, codeUnmarshalFailed, errUnmarshalFailed + } + eventAny, err = ptypes.MarshalAny(internal) + if err != nil { + return nil, codeMarshalAnyFailed, err + } + default: + return nil, codeInvalidType, errInvalidType + } + + return &eventproto.MetricsEvent{ + Timestamp: metricsEvt.Timestamp, + Event: eventAny, + }, "", nil +} + +func (s *gatewayService) checkRegisterEvents( + req *http.Request, +) (*accountproto.EnvironmentAPIKey, registerEventsRequest, error) { + if req.Method != http.MethodPost { + return nil, registerEventsRequest{}, errInvalidHttpMethod + } + envAPIKey, err := s.checkRequest(req.Context(), req) + if err != nil { + return nil, registerEventsRequest{}, err + } + var body registerEventsRequest + if err := json.NewDecoder(req.Body).Decode(&body); err != nil { + if err == io.EOF { + return nil, registerEventsRequest{}, errBodyRequired + } + s.logger.Error( + "Failed to decode request body", + log.FieldsFromImcomingContext(req.Context()).AddFields( + zap.Error(err), + )..., + ) + return nil, registerEventsRequest{}, errInternal + } + if len(body.Events) == 0 { + return nil, registerEventsRequest{}, errMissingEvents + } + return envAPIKey, body, nil +} + +func (s *gatewayService) convToEvaluation( + ctx context.Context, + event *eventproto.EvaluationEvent, +) (*featureproto.Evaluation, string, error) { + evaluation := &featureproto.Evaluation{ + Id: featuredomain.EvaluationID( + event.FeatureId, + event.FeatureVersion, + event.UserId, + ), + FeatureId: event.FeatureId, + FeatureVersion: event.FeatureVersion, + UserId: event.UserId, + VariationId: event.VariationId, + Reason: event.Reason, + } + // For requests that doesn't have the tag info, + // it will insert none instead, until all SDK clients are updated + var tag string + if event.Tag == "" { + tag = "none" + } else { + tag = event.Tag + } + return evaluation, tag, nil +} + +func (s *gatewayService) containsInvalidTimestampError(errs map[string]*registerEventsResponseError) bool { + for _, v := range errs { + if v.Message == errInvalidTimestamp.Error() { + return true + } + } + return false +} diff --git a/pkg/gateway/api/api_grpc.go b/pkg/gateway/api/api_grpc.go new file mode 100644 index 000000000..ac9cb9710 --- /dev/null +++ b/pkg/gateway/api/api_grpc.go @@ -0,0 +1,935 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "time" + + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/wrappers" + "go.uber.org/zap" + "golang.org/x/sync/singleflight" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + gmetadata "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + "github.com/bucketeer-io/bucketeer/pkg/cache" + cachev3 "github.com/bucketeer-io/bucketeer/pkg/cache/v3" + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + featuredomain "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + ftstorage "github.com/bucketeer-io/bucketeer/pkg/feature/storage" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + bigtable "github.com/bucketeer-io/bucketeer/pkg/storage/v2/bigtable" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/client" + serviceeventproto "github.com/bucketeer-io/bucketeer/proto/event/service" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" + gwproto "github.com/bucketeer-io/bucketeer/proto/gateway" + userproto "github.com/bucketeer-io/bucketeer/proto/user" +) + +const ( + listRequestSize = 500 +) + +var ( + ErrUserRequired = status.Error(codes.InvalidArgument, "gateway: user is required") + ErrUserIDRequired = status.Error(codes.InvalidArgument, "gateway: user id is required") + ErrFeatureIDRequired = status.Error(codes.InvalidArgument, "gateway: feature id is required") + ErrTagRequired = status.Error(codes.InvalidArgument, "gateway: tag is required") + ErrMissingEvents = status.Error(codes.InvalidArgument, "gateway: missing events") + ErrMissingEventID = status.Error(codes.InvalidArgument, "gateway: missing event id") + ErrContextCanceled = status.Error(codes.Canceled, "gateway: context canceled") + ErrFeatureNotFound = status.Error(codes.NotFound, "gateway: feature not found") + ErrMissingAPIKey = status.Error(codes.Unauthenticated, "gateway: missing APIKey") + ErrInvalidAPIKey = status.Error(codes.PermissionDenied, "gateway: invalid APIKey") + ErrDisabledAPIKey = status.Error(codes.PermissionDenied, "gateway: disabled APIKey") + ErrBadRole = status.Error(codes.PermissionDenied, "gateway: bad role") + ErrInternal = status.Error(codes.Internal, "gateway: internal") + + grpcGoalEvent = &eventproto.GoalEvent{} + grpcGoalBatchEvent = &eventproto.GoalBatchEvent{} + grpcEvaluationEvent = &eventproto.EvaluationEvent{} + grpcMetricsEvent = &eventproto.MetricsEvent{} +) + +type options struct { + apiKeyMemoryCacheTTL time.Duration + apiKeyMemoryCacheEvictionInterval time.Duration + pubsubTimeout time.Duration + oldestEventTimestamp time.Duration + furthestEventTimestamp time.Duration + metrics metrics.Registerer + logger *zap.Logger +} + +var defaultOptions = options{ + apiKeyMemoryCacheTTL: 5 * time.Minute, + apiKeyMemoryCacheEvictionInterval: 30 * time.Second, + pubsubTimeout: 20 * time.Second, + oldestEventTimestamp: 24 * time.Hour, + furthestEventTimestamp: 24 * time.Hour, + logger: zap.NewNop(), +} + +type Option func(*options) + +func WithOldestEventTimestamp(d time.Duration) Option { + return func(opts *options) { + opts.oldestEventTimestamp = d + } +} + +func WithFurthestEventTimestamp(d time.Duration) Option { + return func(opts *options) { + opts.furthestEventTimestamp = d + } +} + +func WithAPIKeyMemoryCacheTTL(ttl time.Duration) Option { + return func(opts *options) { + opts.apiKeyMemoryCacheTTL = ttl + } +} + +func WithAPIKeyMemoryCacheEvictionInterval(interval time.Duration) Option { + return func(opts *options) { + opts.apiKeyMemoryCacheEvictionInterval = interval + } +} + +func WithMetrics(r metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = r + } +} + +func WithLogger(l *zap.Logger) Option { + return func(opts *options) { + opts.logger = l + } +} + +type grpcGatewayService struct { + userEvaluationStorage ftstorage.UserEvaluationsStorage + featureClient featureclient.Client + accountClient accountclient.Client + goalPublisher publisher.Publisher + goalBatchPublisher publisher.Publisher + evaluationPublisher publisher.Publisher + userPublisher publisher.Publisher + metricsPublisher publisher.Publisher + featuresCache cachev3.FeaturesCache + segmentUsersCache cachev3.SegmentUsersCache + environmentAPIKeyCache cachev3.EnvironmentAPIKeyCache + flightgroup singleflight.Group + opts *options + logger *zap.Logger +} + +func NewGrpcGatewayService( + bt bigtable.Client, + featureClient featureclient.Client, + accountClient accountclient.Client, + gp publisher.Publisher, + gbp publisher.Publisher, + ep publisher.Publisher, + up publisher.Publisher, + mp publisher.Publisher, + v3Cache cache.MultiGetCache, + opts ...Option, +) rpc.Service { + options := defaultOptions + for _, opt := range opts { + opt(&options) + } + if options.metrics != nil { + registerMetrics(options.metrics) + } + return &grpcGatewayService{ + userEvaluationStorage: ftstorage.NewUserEvaluationsStorage(bt), + featureClient: featureClient, + accountClient: accountClient, + goalPublisher: gp, + goalBatchPublisher: gbp, + evaluationPublisher: ep, + userPublisher: up, + metricsPublisher: mp, + featuresCache: cachev3.NewFeaturesCache(v3Cache), + segmentUsersCache: cachev3.NewSegmentUsersCache(v3Cache), + environmentAPIKeyCache: cachev3.NewEnvironmentAPIKeyCache(v3Cache), + opts: &options, + logger: options.logger.Named("api_grpc"), + } +} + +func (s *grpcGatewayService) Register(server *grpc.Server) { + gwproto.RegisterGatewayServer(server, s) +} + +func (s *grpcGatewayService) Ping(ctx context.Context, req *gwproto.PingRequest) (*gwproto.PingResponse, error) { + return &gwproto.PingResponse{Time: time.Now().Unix()}, nil +} + +func (s *grpcGatewayService) GetEvaluations( + ctx context.Context, + req *gwproto.GetEvaluationsRequest, +) (*gwproto.GetEvaluationsResponse, error) { + envAPIKey, err := s.checkRequest(ctx) + if err != nil { + return nil, err + } + if err := s.validateGetEvaluationsRequest(req); err != nil { + return nil, err + } + s.publishUser(ctx, envAPIKey.EnvironmentNamespace, req.Tag, req.User, req.SourceId) + f, err, _ := s.flightgroup.Do( + envAPIKey.EnvironmentNamespace, + func() (interface{}, error) { + return s.getFeatures(ctx, envAPIKey.EnvironmentNamespace) + }, + ) + if err != nil { + return nil, err + } + features := f.([]*featureproto.Feature) + if len(features) == 0 { + return &gwproto.GetEvaluationsResponse{ + State: featureproto.UserEvaluations_FULL, + Evaluations: nil, + }, nil + } + ueid := featuredomain.UserEvaluationsID(req.User.Id, req.User.Data, features) + if req.UserEvaluationsId == ueid { + return &gwproto.GetEvaluationsResponse{ + State: featureproto.UserEvaluations_FULL, + Evaluations: nil, + UserEvaluationsId: ueid, + }, nil + } + evaluations, err := s.evaluateFeatures(ctx, req.User, features, envAPIKey.EnvironmentNamespace, req.Tag) + if err != nil { + s.logger.Error( + "Failed to evaluate features", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", envAPIKey.EnvironmentNamespace), + zap.String("userId", req.User.Id), + )..., + ) + return nil, ErrInternal + } + return &gwproto.GetEvaluationsResponse{ + State: featureproto.UserEvaluations_FULL, + Evaluations: evaluations, + UserEvaluationsId: ueid, + }, nil +} + +func (s *grpcGatewayService) validateGetEvaluationsRequest(req *gwproto.GetEvaluationsRequest) error { + if req.Tag == "" { + return ErrTagRequired + } + if req.User == nil { + return ErrUserRequired + } + if req.User.Id == "" { + return ErrUserIDRequired + } + return nil +} + +func (s *grpcGatewayService) GetEvaluation( + ctx context.Context, + req *gwproto.GetEvaluationRequest, +) (*gwproto.GetEvaluationResponse, error) { + envAPIKey, err := s.checkRequest(ctx) + if err != nil { + return nil, err + } + if err := s.validateGetEvaluationRequest(req); err != nil { + return nil, err + } + s.publishUser(ctx, envAPIKey.EnvironmentNamespace, req.Tag, req.User, req.SourceId) + f, err, _ := s.flightgroup.Do( + envAPIKey.EnvironmentNamespace, + func() (interface{}, error) { + return s.getFeatures(ctx, envAPIKey.EnvironmentNamespace) + }, + ) + if err != nil { + return nil, err + } + fs := f.([]*featureproto.Feature) + var features []*featureproto.Feature + for _, f := range fs { + if f.Id == req.FeatureId { + features = append(features, f) + break + } + } + if len(features) == 0 { + return nil, ErrFeatureNotFound + } + evaluations, err := s.evaluateFeatures(ctx, req.User, features, envAPIKey.EnvironmentNamespace, req.Tag) + if err != nil { + s.logger.Error( + "Failed to evaluate features", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", envAPIKey.EnvironmentNamespace), + zap.String("userId", req.User.Id), + zap.String("featureId", req.FeatureId), + )..., + ) + return nil, ErrInternal + } + if err := s.upsertUserEvaluation( + ctx, + envAPIKey.EnvironmentNamespace, + req.Tag, + evaluations.Evaluations[0], + ); err != nil { + eventCounter.WithLabelValues(callerGatewayService, typeMetrics, codeUpsertUserEvaluationFailed).Inc() + s.logger.Error( + "Failed to upsert user evaluation while trying to get evaluation", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", envAPIKey.EnvironmentNamespace), + zap.String("userId", req.User.Id), + zap.String("featureId", req.FeatureId), + )..., + ) + return nil, ErrInternal + } + return &gwproto.GetEvaluationResponse{ + Evaluation: evaluations.Evaluations[0], + }, nil +} + +func (s *grpcGatewayService) validateGetEvaluationRequest(req *gwproto.GetEvaluationRequest) error { + if req.Tag == "" { + return ErrTagRequired + } + if req.User == nil { + return ErrUserRequired + } + if req.User.Id == "" { + return ErrUserIDRequired + } + if req.FeatureId == "" { + return ErrFeatureIDRequired + } + return nil +} + +func (s *grpcGatewayService) publishUser( + ctx context.Context, + environmentNamespace, + tag string, + user *userproto.User, + sourceID eventproto.SourceId, +) { + // TODO: using buffered channel to reduce the number of go routines + go func() { + ctx, cancel := context.WithTimeout(context.Background(), s.opts.pubsubTimeout) + defer cancel() + if err := s.publishUserEvent(ctx, user, tag, environmentNamespace, sourceID); err != nil { + s.logger.Error( + "Failed to publish UserEvent", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + } + }() +} + +func (s *grpcGatewayService) publishUserEvent( + ctx context.Context, + user *userproto.User, + tag, environmentNamespace string, + sourceID eventproto.SourceId, +) error { + id, err := uuid.NewUUID() + if err != nil { + return err + } + userEvent := &serviceeventproto.UserEvent{ + Id: id.String(), + SourceId: sourceID, + Tag: tag, + UserId: user.Id, + LastSeen: time.Now().Unix(), + Data: user.Data, + EnvironmentNamespace: environmentNamespace, + } + ue, err := ptypes.MarshalAny(userEvent) + if err != nil { + return err + } + event := &eventproto.Event{ + Id: id.String(), + Event: ue, + EnvironmentNamespace: environmentNamespace, + } + return s.userPublisher.Publish(ctx, event) +} + +func (s *grpcGatewayService) getFeatures( + ctx context.Context, + environmentNamespace string, +) ([]*featureproto.Feature, error) { + fs, err := s.getFeaturesFromCache(ctx, environmentNamespace) + if err == nil { + return fs.Features, nil + } + s.logger.Info( + "No cached data for Features", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + features, err := s.listFeatures(ctx, environmentNamespace) + if err != nil { + s.logger.Error( + "Failed to retrieve features from storage", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, ErrInternal + } + if err := s.featuresCache.Put(&featureproto.Features{Features: features}, environmentNamespace); err != nil { + s.logger.Error( + "Failed to cache features", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + } + return features, nil +} + +func (s *grpcGatewayService) listFeatures( + ctx context.Context, + environmentNamespace string, +) ([]*featureproto.Feature, error) { + features := []*featureproto.Feature{} + cursor := "" + for { + resp, err := s.featureClient.ListFeatures(ctx, &featureproto.ListFeaturesRequest{ + PageSize: listRequestSize, + Cursor: cursor, + EnvironmentNamespace: environmentNamespace, + Archived: &wrappers.BoolValue{Value: false}, + }) + if err != nil { + return nil, err + } + for _, f := range resp.Features { + if !f.Enabled && f.OffVariation == "" { + continue + } + features = append(features, f) + } + featureSize := len(resp.Features) + if featureSize == 0 || featureSize < listRequestSize { + return features, nil + } + cursor = resp.Cursor + } +} + +func (s *grpcGatewayService) getFeaturesFromCache( + ctx context.Context, + environmentNamespace string, +) (*featureproto.Features, error) { + features, err := s.featuresCache.Get(environmentNamespace) + if err == nil { + cacheCounter.WithLabelValues(callerGatewayService, typeFeatures, cacheLayerExternal, codeHit).Inc() + return features, nil + } + cacheCounter.WithLabelValues(callerGatewayService, typeFeatures, cacheLayerExternal, codeMiss).Inc() + return nil, err +} + +func (s *grpcGatewayService) evaluateFeatures( + ctx context.Context, + user *userproto.User, + features []*featureproto.Feature, + environmentNamespace, tag string, +) (*featureproto.UserEvaluations, error) { + mapIDs := make(map[string]struct{}) + for _, f := range features { + feature := &featuredomain.Feature{Feature: f} + for _, id := range feature.ListSegmentIDs() { + mapIDs[id] = struct{}{} + } + } + mapSegmentUsers, err := s.listSegmentUsers(ctx, user.Id, mapIDs, environmentNamespace) + if err != nil { + s.logger.Error( + "Failed to list segments", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, err + } + userEvaluations, err := featuredomain.EvaluateFeatures(features, user, mapSegmentUsers, tag) + if err != nil { + s.logger.Error( + "Failed to evaluate", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + } + return userEvaluations, nil +} + +func (s *grpcGatewayService) listSegmentUsers( + ctx context.Context, + userID string, + mapSegmentIDs map[string]struct{}, + environmentNamespace string, +) (map[string][]*featureproto.SegmentUser, error) { + if len(mapSegmentIDs) == 0 { + return nil, nil + } + users := make(map[string][]*featureproto.SegmentUser) + for segmentID := range mapSegmentIDs { + s, err, _ := s.flightgroup.Do(s.segmentFlightID(environmentNamespace, segmentID), func() (interface{}, error) { + return s.getSegmentUsers(ctx, segmentID, environmentNamespace) + }) + if err != nil { + return nil, err + } + segmentUsers := s.([]*featureproto.SegmentUser) + users[segmentID] = segmentUsers + } + return users, nil +} + +func (s *grpcGatewayService) segmentFlightID(environmentNamespace, segmentID string) string { + return environmentNamespace + ":" + segmentID +} + +func (s *grpcGatewayService) getSegmentUsers( + ctx context.Context, + segmentID, environmentNamespace string, +) ([]*featureproto.SegmentUser, error) { + segmentUsers, err := s.getSegmentUsersFromCache(segmentID, environmentNamespace) + if err == nil { + return segmentUsers, nil + } + s.logger.Info( + "No cached data for SegmentUsers", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("segmentId", segmentID), + )..., + ) + req := &featureproto.ListSegmentUsersRequest{ + SegmentId: segmentID, + EnvironmentNamespace: environmentNamespace, + } + res, err := s.featureClient.ListSegmentUsers(ctx, req) + if err != nil { + s.logger.Error( + "Failed to retrieve segment users from storage", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("segmentId", segmentID), + )..., + ) + return nil, ErrInternal + } + su := &featureproto.SegmentUsers{ + SegmentId: segmentID, + Users: res.Users, + } + if err := s.segmentUsersCache.Put(su, environmentNamespace); err != nil { + s.logger.Error( + "Failed to cache segment users", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("segmentId", segmentID), + )..., + ) + } + return res.Users, nil +} + +func (s *grpcGatewayService) getSegmentUsersFromCache( + segmentID, environmentNamespace string, +) ([]*featureproto.SegmentUser, error) { + segment, err := s.segmentUsersCache.Get(segmentID, environmentNamespace) + if err == nil { + cacheCounter.WithLabelValues(callerGatewayService, typeSegmentUsers, cacheLayerExternal, codeHit).Inc() + return segment.Users, nil + } + cacheCounter.WithLabelValues(callerGatewayService, typeSegmentUsers, cacheLayerExternal, codeMiss).Inc() + return nil, err +} + +func (s *grpcGatewayService) upsertUserEvaluation( + ctx context.Context, + environmentNamespace, tag string, + evaluation *featureproto.Evaluation, +) error { + if err := s.userEvaluationStorage.UpsertUserEvaluation( + ctx, + evaluation, + environmentNamespace, + tag, + ); err != nil { + return err + } + return nil +} + +func (s *grpcGatewayService) convToEvaluation( + ctx context.Context, + event *eventproto.Event, +) (*featureproto.Evaluation, string, error) { + ev := &eventproto.EvaluationEvent{} + if err := ptypes.UnmarshalAny(event.Event, ev); err != nil { + s.logger.Error( + "Failed to extract evaluation event for converting evaluation", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", event.Id), + )..., + ) + return nil, "", errUnmarshalFailed + } + evaluation := &featureproto.Evaluation{ + Id: featuredomain.EvaluationID( + ev.FeatureId, + ev.FeatureVersion, + ev.UserId, + ), + FeatureId: ev.FeatureId, + FeatureVersion: ev.FeatureVersion, + UserId: ev.UserId, + VariationId: ev.VariationId, + Reason: ev.Reason, + } + // For requests that doesn't have the tag info, + // it will insert none instead, until all SDK clients are updated + var tag string + if ev.Tag == "" { + tag = "none" + } else { + tag = ev.Tag + } + return evaluation, tag, nil +} + +func (s *grpcGatewayService) RegisterEvents( + ctx context.Context, + req *gwproto.RegisterEventsRequest, +) (*gwproto.RegisterEventsResponse, error) { + envAPIKey, err := s.checkRequest(ctx) + if err != nil { + return nil, err + } + if len(req.Events) == 0 { + return nil, ErrMissingEvents + } + errs := make(map[string]*gwproto.RegisterEventsResponse_Error) + goalMessages := make([]publisher.Message, 0) + goalBatchMessages := make([]publisher.Message, 0) + evaluationMessages := make([]publisher.Message, 0) + metricsMessages := make([]publisher.Message, 0) + publish := func(p publisher.Publisher, messages []publisher.Message, typ string) { + errors := p.PublishMulti(ctx, messages) + var repeatableErrors, nonRepeateableErrors float64 + for id, err := range errors { + retriable := err != publisher.ErrBadMessage + if retriable { + repeatableErrors++ + } else { + nonRepeateableErrors++ + } + s.logger.Error( + "Failed to publish event", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", envAPIKey.EnvironmentNamespace), + zap.String("id", id), + )..., + ) + errs[id] = &gwproto.RegisterEventsResponse_Error{ + Retriable: retriable, + Message: "Failed to publish event", + } + } + eventCounter.WithLabelValues(callerGatewayService, typ, codeNonRepeatableError).Add(nonRepeateableErrors) + eventCounter.WithLabelValues(callerGatewayService, typ, codeRepeatableError).Add(repeatableErrors) + eventCounter.WithLabelValues(callerGatewayService, typ, codeOK).Add(float64(len(messages) - len(errors))) + } + for _, event := range req.Events { + event.EnvironmentNamespace = envAPIKey.EnvironmentNamespace + if event.Id == "" { + return nil, ErrMissingEventID + } + validator := newEventValidator(event, s.opts.oldestEventTimestamp, s.opts.furthestEventTimestamp, s.logger) + if validator == nil { + errs[event.Id] = &gwproto.RegisterEventsResponse_Error{ + Retriable: false, + Message: "Invalid message type", + } + eventCounter.WithLabelValues(callerGatewayService, typeUnknown, codeInvalidType).Inc() + continue + } + if ptypes.Is(event.Event, grpcGoalEvent) { + errorCode, err := validator.validate(ctx) + if err != nil { + eventCounter.WithLabelValues(callerGatewayService, typeGoal, errorCode).Inc() + errs[event.Id] = &gwproto.RegisterEventsResponse_Error{ + Retriable: false, + Message: err.Error(), + } + continue + } + goalMessages = append(goalMessages, event) + continue + } + if ptypes.Is(event.Event, grpcGoalBatchEvent) { + errorCode, err := validator.validate(ctx) + if err != nil { + eventCounter.WithLabelValues(callerGatewayService, typeGoalBatch, errorCode).Inc() + errs[event.Id] = &gwproto.RegisterEventsResponse_Error{ + Retriable: false, + Message: err.Error(), + } + continue + } + goalBatchMessages = append(goalBatchMessages, event) + continue + } + if ptypes.Is(event.Event, grpcEvaluationEvent) { + errorCode, err := validator.validate(ctx) + if err != nil { + eventCounter.WithLabelValues(callerGatewayService, typeEvaluation, errorCode).Inc() + errs[event.Id] = &gwproto.RegisterEventsResponse_Error{ + Retriable: false, + Message: err.Error(), + } + continue + } + evaluation, tag, err := s.convToEvaluation(ctx, event) + if err != nil { + eventCounter.WithLabelValues(callerGatewayService, typeEvaluation, codeEvaluationConversionFailed).Inc() + errs[event.Id] = &gwproto.RegisterEventsResponse_Error{ + Retriable: false, + Message: err.Error(), + } + continue + } + if err := s.upsertUserEvaluation(ctx, envAPIKey.EnvironmentNamespace, tag, evaluation); err != nil { + eventCounter.WithLabelValues(callerGatewayService, typeEvaluation, codeUpsertUserEvaluationFailed).Inc() + errs[event.Id] = &gwproto.RegisterEventsResponse_Error{ + Retriable: true, + Message: "Failed to upsert user evaluation", + } + continue + } + evaluationMessages = append(evaluationMessages, event) + } + if ptypes.Is(event.Event, grpcMetricsEvent) { + errorCode, err := validator.validate(ctx) + if err != nil { + eventCounter.WithLabelValues(callerGatewayService, typeMetrics, errorCode).Inc() + errs[event.Id] = &gwproto.RegisterEventsResponse_Error{ + Retriable: false, + Message: err.Error(), + } + continue + } + metricsMessages = append(metricsMessages, event) + } + } + publish(s.goalPublisher, goalMessages, typeGoal) + publish(s.goalBatchPublisher, goalBatchMessages, typeGoalBatch) + publish(s.evaluationPublisher, evaluationMessages, typeEvaluation) + publish(s.metricsPublisher, metricsMessages, typeMetrics) + if len(errs) > 0 { + if s.containsInvalidTimestampError(errs) { + eventCounter.WithLabelValues(callerGatewayService, typeRegisterEvent, codeInvalidTimestampRequest).Inc() + } + } else { + eventCounter.WithLabelValues(callerGatewayService, typeRegisterEvent, codeOK).Inc() + } + return &gwproto.RegisterEventsResponse{Errors: errs}, nil +} + +func (s *grpcGatewayService) containsInvalidTimestampError(errs map[string]*gwproto.RegisterEventsResponse_Error) bool { + for _, v := range errs { + if v.Message == errInvalidTimestamp.Error() { + return true + } + } + return false +} + +func (s *grpcGatewayService) checkRequest(ctx context.Context) (*accountproto.EnvironmentAPIKey, error) { + if isContextCanceled(ctx) { + s.logger.Warn( + "Request was canceled", + log.FieldsFromImcomingContext(ctx)..., + ) + return nil, ErrContextCanceled + } + envAPIKey, err := s.getEnvironmentAPIKey(ctx) + if err != nil { + return nil, err + } + if err := checkEnvironmentAPIKey(envAPIKey, accountproto.APIKey_SDK); err != nil { + return nil, err + } + return envAPIKey, nil +} + +func (s *grpcGatewayService) getEnvironmentAPIKey(ctx context.Context) (*accountproto.EnvironmentAPIKey, error) { + id, err := s.extractAPIKeyID(ctx) + if err != nil { + return nil, err + } + k, err, _ := s.flightgroup.Do( + environmentAPIKeyFlightID(id), + func() (interface{}, error) { + return getEnvironmentAPIKey( + ctx, + id, + s.accountClient, + s.environmentAPIKeyCache, + callerGatewayService, + s.logger, + ) + }, + ) + if err != nil { + return nil, err + } + envAPIKey := k.(*accountproto.EnvironmentAPIKey) + return envAPIKey, nil +} + +func (s *grpcGatewayService) extractAPIKeyID(ctx context.Context) (string, error) { + md, ok := gmetadata.FromIncomingContext(ctx) + if !ok { + return "", ErrMissingAPIKey + } + keys, ok := md["authorization"] + if !ok || len(keys) == 0 || keys[0] == "" { + return "", ErrMissingAPIKey + } + return keys[0], nil +} + +func environmentAPIKeyFlightID(id string) string { + return id +} + +func getEnvironmentAPIKey( + ctx context.Context, + id string, + accountClient accountclient.Client, + environmentAPIKeyCache cachev3.EnvironmentAPIKeyCache, + caller string, + logger *zap.Logger, +) (*accountproto.EnvironmentAPIKey, error) { + envAPIKey, err := getEnvironmentAPIKeyFromCache(ctx, id, environmentAPIKeyCache, caller, cacheLayerExternal) + if err == nil { + return envAPIKey, nil + } + resp, err := accountClient.GetAPIKeyBySearchingAllEnvironments( + ctx, + &accountproto.GetAPIKeyBySearchingAllEnvironmentsRequest{Id: id}, + ) + if err != nil { + if code := status.Code(err); code == codes.NotFound { + return nil, ErrInvalidAPIKey + } + logger.Error( + "Failed to get environment APIKey from account service", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, ErrInternal + } + envAPIKey = resp.EnvironmentApiKey + if err := environmentAPIKeyCache.Put(envAPIKey); err != nil { + logger.Error( + "Failed to cache environment APIKey", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", envAPIKey.EnvironmentNamespace), + )..., + ) + } + return envAPIKey, nil +} + +func getEnvironmentAPIKeyFromCache( + ctx context.Context, + id string, + c cachev3.EnvironmentAPIKeyCache, + caller, layer string, +) (*accountproto.EnvironmentAPIKey, error) { + envAPIKey, err := c.Get(id) + if err == nil { + cacheCounter.WithLabelValues(caller, typeAPIKey, layer, codeHit).Inc() + return envAPIKey, nil + } + cacheCounter.WithLabelValues(caller, typeAPIKey, layer, codeMiss).Inc() + return nil, err +} + +func checkEnvironmentAPIKey(environmentAPIKey *accountproto.EnvironmentAPIKey, role accountproto.APIKey_Role) error { + if environmentAPIKey.ApiKey.Role != role { + return ErrBadRole + } + if environmentAPIKey.EnvironmentDisabled { + return ErrDisabledAPIKey + } + if environmentAPIKey.ApiKey.Disabled { + return ErrDisabledAPIKey + } + return nil +} + +func isContextCanceled(ctx context.Context) bool { + return ctx.Err() == context.Canceled +} diff --git a/pkg/gateway/api/api_grpc_test.go b/pkg/gateway/api/api_grpc_test.go new file mode 100644 index 000000000..3978fc099 --- /dev/null +++ b/pkg/gateway/api/api_grpc_test.go @@ -0,0 +1,2242 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + accountclientmock "github.com/bucketeer-io/bucketeer/pkg/account/client/mock" + "github.com/bucketeer-io/bucketeer/pkg/cache" + cachev3mock "github.com/bucketeer-io/bucketeer/pkg/cache/v3/mock" + featureclientmock "github.com/bucketeer-io/bucketeer/pkg/feature/client/mock" + featuredomain "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + ftsmock "github.com/bucketeer-io/bucketeer/pkg/feature/storage/mock" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + publishermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher/mock" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/client" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" + gwproto "github.com/bucketeer-io/bucketeer/proto/gateway" + userproto "github.com/bucketeer-io/bucketeer/proto/user" +) + +func TestWithAPIKeyMemoryCacheTTL(t *testing.T) { + t.Parallel() + dur := time.Second + f := WithAPIKeyMemoryCacheTTL(dur) + opt := &options{} + f(opt) + assert.Equal(t, dur, opt.apiKeyMemoryCacheTTL) +} + +func TestWithAPIKeyMemoryCacheEvictionInterval(t *testing.T) { + t.Parallel() + dur := time.Second + f := WithAPIKeyMemoryCacheEvictionInterval(dur) + opt := &options{} + f(opt) + assert.Equal(t, dur, opt.apiKeyMemoryCacheEvictionInterval) +} + +func TestWithMetrics(t *testing.T) { + t.Parallel() + metrics := metrics.NewMetrics( + 9999, + "/metrics", + ) + reg := metrics.DefaultRegisterer() + f := WithMetrics(reg) + opt := &options{} + f(opt) + assert.Equal(t, reg, opt.metrics) +} + +func TestWithLogger(t *testing.T) { + t.Parallel() + logger, err := log.NewLogger() + require.NoError(t, err) + f := WithLogger(logger) + opt := &options{} + f(opt) + assert.Equal(t, logger, opt.logger) +} + +func TestNewGrpcGatewayService(t *testing.T) { + t.Parallel() + g := NewGrpcGatewayService(nil, nil, nil, nil, nil, nil, nil, nil, nil) + assert.IsType(t, &grpcGatewayService{}, g) +} + +func TestGrpcExtractAPIKeyID(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + testcases := []struct { + ctx context.Context + key string + failed bool + }{ + { + ctx: metadata.NewIncomingContext(context.TODO(), metadata.MD{}), + key: "", + failed: true, + }, + { + ctx: metadata.NewIncomingContext(context.TODO(), metadata.MD{ + "authorization": []string{}, + }), + key: "", + failed: true, + }, + { + ctx: metadata.NewIncomingContext(context.TODO(), metadata.MD{ + "authorization": []string{""}, + }), + key: "", + failed: true, + }, + { + ctx: metadata.NewIncomingContext(context.TODO(), metadata.MD{ + "authorization": []string{"test-key"}, + }), + key: "test-key", + failed: false, + }, + } + for i, tc := range testcases { + des := fmt.Sprintf("index %d", i) + gs := newGrpcGatewayServiceWithMock(t, mockController) + key, err := gs.extractAPIKeyID(tc.ctx) + assert.Equal(t, tc.key, key, des) + assert.Equal(t, tc.failed, err != nil, des) + } +} + +func TestGrpcGetEnvironmentAPIKey(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*grpcGatewayService) + ctx context.Context + expected *accountproto.EnvironmentAPIKey + expectedErr error + }{ + "exists in redis": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{Id: "id-0"}, + }, nil) + }, + ctx: metadata.NewIncomingContext(context.TODO(), metadata.MD{ + "authorization": []string{"test-key"}, + }), + expected: &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{Id: "id-0"}, + }, + expectedErr: nil, + }, + "ErrInvalidAPIKey": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + nil, cache.ErrNotFound) + gs.accountClient.(*accountclientmock.MockClient).EXPECT().GetAPIKeyBySearchingAllEnvironments(gomock.Any(), gomock.Any()).Return( + nil, status.Errorf(codes.NotFound, "test")) + }, + ctx: metadata.NewIncomingContext(context.TODO(), metadata.MD{ + "authorization": []string{"test-key"}, + }), + expected: nil, + expectedErr: ErrInvalidAPIKey, + }, + "ErrInternal": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + nil, cache.ErrNotFound) + gs.accountClient.(*accountclientmock.MockClient).EXPECT().GetAPIKeyBySearchingAllEnvironments(gomock.Any(), gomock.Any()).Return( + nil, status.Errorf(codes.Unknown, "test")) + }, + ctx: metadata.NewIncomingContext(context.TODO(), metadata.MD{ + "authorization": []string{"test-key"}, + }), + expected: nil, + expectedErr: ErrInternal, + }, + "success": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + nil, cache.ErrNotFound) + gs.accountClient.(*accountclientmock.MockClient).EXPECT().GetAPIKeyBySearchingAllEnvironments(gomock.Any(), gomock.Any()).Return( + &accountproto.GetAPIKeyBySearchingAllEnvironmentsResponse{EnvironmentApiKey: &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{Id: "id-0"}, + }}, nil) + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Put(gomock.Any()).Return(nil) + }, + ctx: metadata.NewIncomingContext(context.TODO(), metadata.MD{ + "authorization": []string{"test-key"}, + }), + expected: &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{Id: "id-0"}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + gs := newGrpcGatewayServiceWithMock(t, mockController) + p.setup(gs) + actual, err := gs.getEnvironmentAPIKey(p.ctx) + assert.Equal(t, p.expected, actual, "%s", msg) + assert.Equal(t, p.expectedErr, err, "%s", msg) + } +} + +func TestGrpcGetEnvironmentAPIKeyFromCache(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*cachev3mock.MockEnvironmentAPIKeyCache) + expected *accountproto.EnvironmentAPIKey + expectedErr error + }{ + "no error": { + setup: func(mtf *cachev3mock.MockEnvironmentAPIKeyCache) { + mtf.EXPECT().Get(gomock.Any()).Return(&accountproto.EnvironmentAPIKey{}, nil) + }, + expected: &accountproto.EnvironmentAPIKey{}, + expectedErr: nil, + }, + "error": { + setup: func(mtf *cachev3mock.MockEnvironmentAPIKeyCache) { + mtf.EXPECT().Get(gomock.Any()).Return(nil, cache.ErrNotFound) + }, + expected: nil, + expectedErr: cache.ErrNotFound, + }, + } + for msg, p := range patterns { + mock := cachev3mock.NewMockEnvironmentAPIKeyCache(mockController) + p.setup(mock) + actual, err := getEnvironmentAPIKeyFromCache(context.Background(), "id", mock, "caller", "layer") + assert.Equal(t, p.expected, actual, "%s", msg) + assert.Equal(t, p.expectedErr, err, "%s", msg) + } +} + +func TestGrpcCheckEnvironmentAPIKey(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + inputEnvAPIKey *accountproto.EnvironmentAPIKey + inputRole accountproto.APIKey_Role + expected error + }{ + "ErrBadRole": { + inputEnvAPIKey: &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SERVICE, + Disabled: false, + }, + }, + inputRole: accountproto.APIKey_SDK, + expected: ErrBadRole, + }, + "ErrDisabledAPIKey: environment disabled": { + inputEnvAPIKey: &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + EnvironmentDisabled: true, + }, + inputRole: accountproto.APIKey_SDK, + expected: ErrDisabledAPIKey, + }, + "ErrDisabledAPIKey: api key disabled": { + inputEnvAPIKey: &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: true, + }, + EnvironmentDisabled: false, + }, + inputRole: accountproto.APIKey_SDK, + expected: ErrDisabledAPIKey, + }, + "no error": { + inputEnvAPIKey: &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, + inputRole: accountproto.APIKey_SDK, + expected: nil, + }, + } + for msg, p := range patterns { + actual := checkEnvironmentAPIKey(p.inputEnvAPIKey, p.inputRole) + assert.Equal(t, p.expected, actual, "%s", msg) + } +} + +func TestGrpcValidateGetEvaluationsRequest(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + input *gwproto.GetEvaluationsRequest + expected error + }{ + "tag is empty": { + input: &gwproto.GetEvaluationsRequest{}, + expected: ErrTagRequired, + }, + "user is empty": { + input: &gwproto.GetEvaluationsRequest{Tag: "test"}, + expected: ErrUserRequired, + }, + "user ID is empty": { + input: &gwproto.GetEvaluationsRequest{Tag: "test", User: &userproto.User{}}, + expected: ErrUserIDRequired, + }, + "pass": { + input: &gwproto.GetEvaluationsRequest{Tag: "test", User: &userproto.User{Id: "id"}}, + }, + } + gs := grpcGatewayService{} + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + actual := gs.validateGetEvaluationsRequest(p.input) + assert.Equal(t, p.expected, actual) + }) + } +} + +func TestGrpcValidateGetEvaluationRequest(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + input *gwproto.GetEvaluationRequest + expected error + }{ + "tag is empty": { + input: &gwproto.GetEvaluationRequest{}, + expected: ErrTagRequired, + }, + "user is empty": { + input: &gwproto.GetEvaluationRequest{Tag: "test"}, + expected: ErrUserRequired, + }, + "user ID is empty": { + input: &gwproto.GetEvaluationRequest{Tag: "test", User: &userproto.User{}}, + expected: ErrUserIDRequired, + }, + "feature ID is empty": { + input: &gwproto.GetEvaluationRequest{Tag: "test", User: &userproto.User{Id: "id"}}, + expected: ErrFeatureIDRequired, + }, + "pass": { + input: &gwproto.GetEvaluationRequest{Tag: "test", User: &userproto.User{Id: "id"}, FeatureId: "id"}, + }, + } + gs := grpcGatewayService{} + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + actual := gs.validateGetEvaluationRequest(p.input) + assert.Equal(t, p.expected, actual) + }) + } +} + +func TestGrpcGetFeaturesFromCache(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*cachev3mock.MockFeaturesCache) + environmentNamespace string + expected *featureproto.Features + expectedErr error + }{ + "no error": { + setup: func(mtf *cachev3mock.MockFeaturesCache) { + mtf.EXPECT().Get(gomock.Any()).Return(&featureproto.Features{}, nil) + }, + environmentNamespace: "ns0", + expected: &featureproto.Features{}, + expectedErr: nil, + }, + "error": { + setup: func(mtf *cachev3mock.MockFeaturesCache) { + mtf.EXPECT().Get(gomock.Any()).Return(nil, cache.ErrNotFound) + }, + environmentNamespace: "ns0", + expected: nil, + expectedErr: cache.ErrNotFound, + }, + } + for msg, p := range patterns { + mtfc := cachev3mock.NewMockFeaturesCache(mockController) + p.setup(mtfc) + gs := grpcGatewayService{featuresCache: mtfc} + actual, err := gs.getFeaturesFromCache(context.Background(), p.environmentNamespace) + assert.Equal(t, p.expected, actual, "%s", msg) + assert.Equal(t, p.expectedErr, err, "%s", msg) + } +} + +func TestGrpcGetFeatures(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*grpcGatewayService) + environmentNamespace string + expected []*featureproto.Feature + expectedErr error + }{ + "exists in redis": { + setup: func(gs *grpcGatewayService) { + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: []*featureproto.Feature{{}}, + }, nil) + }, + environmentNamespace: "ns0", + expectedErr: nil, + expected: []*featureproto.Feature{{}}, + }, + "listFeatures fails": { + setup: func(gs *grpcGatewayService) { + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + nil, cache.ErrNotFound) + gs.featureClient.(*featureclientmock.MockClient).EXPECT().ListFeatures(gomock.Any(), gomock.Any()).Return( + nil, errors.New("test")) + }, + environmentNamespace: "ns0", + expected: nil, + expectedErr: ErrInternal, + }, + "success": { + setup: func(gs *grpcGatewayService) { + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + nil, cache.ErrNotFound) + gs.featureClient.(*featureclientmock.MockClient).EXPECT().ListFeatures(gomock.Any(), gomock.Any()).Return( + &featureproto.ListFeaturesResponse{Features: []*featureproto.Feature{ + { + Id: "id-0", + Enabled: true, + }, + }}, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Put(gomock.Any(), gomock.Any()).Return(nil) + }, + environmentNamespace: "ns0", + expected: []*featureproto.Feature{ + { + Id: "id-0", + Enabled: true, + }, + }, + expectedErr: nil, + }, + // TODO: add test for off-variation features + } + for msg, p := range patterns { + gs := newGrpcGatewayServiceWithMock(t, mockController) + p.setup(gs) + actual, err := gs.getFeatures(context.Background(), p.environmentNamespace) + assert.Equal(t, p.expected, actual, "%s", msg) + assert.Equal(t, p.expectedErr, err, "%s", msg) + } +} + +func TestGrpcGetEvaluationsContextCanceled(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + cancel bool + expected *gwproto.GetEvaluationsResponse + expectedErr error + }{ + "error: context canceled": { + cancel: true, + expected: nil, + expectedErr: ErrContextCanceled, + }, + "error: missing API key": { + cancel: false, + expected: nil, + expectedErr: ErrMissingAPIKey, + }, + } + for msg, p := range patterns { + gs := newGrpcGatewayServiceWithMock(t, mockController) + ctx, cancel := context.WithCancel(context.Background()) + if p.cancel { + cancel() + } else { + defer cancel() + } + actual, err := gs.GetEvaluations(ctx, &gwproto.GetEvaluationsRequest{}) + assert.Equal(t, p.expected, actual, "%s", msg) + assert.Equal(t, p.expectedErr, err, "%s", msg) + } +} + +func TestGrpcGetEvaluationsValidation(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*grpcGatewayService) + input *gwproto.GetEvaluationsRequest + expected *gwproto.GetEvaluationsResponse + expectedErr error + }{ + "missing tag": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + }, + input: &gwproto.GetEvaluationsRequest{User: &userproto.User{Id: "id-0"}}, + expected: nil, + expectedErr: ErrTagRequired, + }, + "missing user id": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + }, + input: &gwproto.GetEvaluationsRequest{Tag: "test"}, + expected: nil, + expectedErr: ErrUserRequired, + }, + "success": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: []*featureproto.Feature{}, + }, nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + }, + input: &gwproto.GetEvaluationsRequest{Tag: "test", User: &userproto.User{Id: "id-0"}}, + expected: &gwproto.GetEvaluationsResponse{ + State: featureproto.UserEvaluations_FULL, + Evaluations: nil, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + gs := newGrpcGatewayServiceWithMock(t, mockController) + p.setup(gs) + ctx := metadata.NewIncomingContext(context.TODO(), metadata.MD{ + "authorization": []string{"test-key"}, + }) + actual, err := gs.GetEvaluations(ctx, p.input) + assert.Equal(t, p.expected, actual, "%s", msg) + assert.Equal(t, p.expectedErr, err, "%s", msg) + } +} + +func TestGrpcGetEvaluationsZeroFeature(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*grpcGatewayService) + input *gwproto.GetEvaluationsRequest + expected *gwproto.GetEvaluationsResponse + expectedErr error + }{ + "zero feature": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: []*featureproto.Feature{}, + }, nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + }, + input: &gwproto.GetEvaluationsRequest{Tag: "test", User: &userproto.User{Id: "id-0"}}, + expected: &gwproto.GetEvaluationsResponse{ + State: featureproto.UserEvaluations_FULL, + Evaluations: nil, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + gs := newGrpcGatewayServiceWithMock(t, mockController) + p.setup(gs) + ctx := metadata.NewIncomingContext(context.TODO(), metadata.MD{ + "authorization": []string{"test-key"}, + }) + actual, err := gs.GetEvaluations(ctx, p.input) + assert.Equal(t, p.expected, actual, "%s", msg) + assert.Equal(t, p.expected.State, actual.State, "%s", msg) + assert.Equal(t, p.expectedErr, err, "%s", msg) + assert.Empty(t, actual.UserEvaluationsId, "%s", msg) + } +} + +func TestGrpcGetEvaluationsUserEvaluationsID(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + vID1 := newUUID(t) + vID2 := newUUID(t) + vID3 := newUUID(t) + vID4 := newUUID(t) + vID5 := newUUID(t) + vID6 := newUUID(t) + + features := []*featureproto.Feature{ + { + Id: newUUID(t), + Variations: []*featureproto.Variation{ + { + Id: vID1, + Value: "true", + }, + { + Id: newUUID(t), + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID1, + }, + }, + Tags: []string{"android"}, + }, + { + Id: newUUID(t), + Variations: []*featureproto.Variation{ + { + Id: newUUID(t), + Value: "true", + }, + { + Id: vID2, + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID2, + }, + }, + Tags: []string{"android"}, + }, + } + + features2 := []*featureproto.Feature{ + { + Id: newUUID(t), + Variations: []*featureproto.Variation{ + { + Id: vID3, + Value: "true", + }, + { + Id: newUUID(t), + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID3, + }, + }, + Tags: []string{"ios"}, + }, + { + Id: newUUID(t), + Variations: []*featureproto.Variation{ + { + Id: newUUID(t), + Value: "true", + }, + { + Id: vID4, + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID4, + }, + }, + Tags: []string{"ios"}, + }, + } + + features3 := []*featureproto.Feature{ + { + Id: newUUID(t), + Variations: []*featureproto.Variation{ + { + Id: vID5, + Value: "true", + }, + { + Id: newUUID(t), + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID5, + }, + }, + Tags: []string{"web"}, + }, + { + Id: newUUID(t), + Variations: []*featureproto.Variation{ + { + Id: newUUID(t), + Value: "true", + }, + { + Id: vID6, + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID6, + }, + }, + Tags: []string{"web"}, + }, + } + multiFeatures := append(features, features2...) + multiFeatures = append(multiFeatures, features3...) + userID := "user-id-0" + userMetadata := map[string]string{"b": "value-b", "c": "value-c", "a": "value-a", "d": "value-d"} + ueid := featuredomain.UserEvaluationsID(userID, nil, features) + ueidWithData := featuredomain.UserEvaluationsID(userID, userMetadata, features) + + patterns := map[string]struct { + setup func(*grpcGatewayService) + input *gwproto.GetEvaluationsRequest + expected *gwproto.GetEvaluationsResponse + expectedErr error + expectedEvaluationsAssert func(assert.TestingT, interface{}, ...interface{}) bool + }{ + "user evaluations id not set": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: features, + }, nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil).MaxTimes(1) + }, + input: &gwproto.GetEvaluationsRequest{ + Tag: "test", + User: &userproto.User{ + Id: userID, + Data: userMetadata, + }, + }, + expected: &gwproto.GetEvaluationsResponse{ + State: featureproto.UserEvaluations_FULL, + UserEvaluationsId: ueidWithData, + }, + expectedErr: nil, + expectedEvaluationsAssert: assert.NotNil, + }, + "user evaluations id is the same": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: multiFeatures, + }, nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil).MaxTimes(1) + }, + input: &gwproto.GetEvaluationsRequest{ + Tag: "test", + User: &userproto.User{ + Id: userID, + Data: userMetadata, + }, + UserEvaluationsId: featuredomain.UserEvaluationsID(userID, userMetadata, multiFeatures), + }, + expected: &gwproto.GetEvaluationsResponse{ + State: featureproto.UserEvaluations_FULL, + UserEvaluationsId: featuredomain.UserEvaluationsID(userID, userMetadata, multiFeatures), + }, + expectedErr: nil, + expectedEvaluationsAssert: assert.Nil, + }, + "user evaluations id is different": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: features, + }, nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil).MaxTimes(1) + }, + input: &gwproto.GetEvaluationsRequest{ + Tag: "test", + User: &userproto.User{ + Id: userID, + Data: userMetadata, + }, + UserEvaluationsId: "evaluation-id", + }, + expected: &gwproto.GetEvaluationsResponse{ + State: featureproto.UserEvaluations_FULL, + UserEvaluationsId: ueidWithData, + }, + expectedErr: nil, + expectedEvaluationsAssert: assert.NotNil, + }, + "user_with_no_metadata_and_the_id_is_same": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: features, + }, nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil).MaxTimes(1) + }, + input: &gwproto.GetEvaluationsRequest{ + Tag: "test", + User: &userproto.User{Id: userID}, + UserEvaluationsId: ueid, + }, + expected: &gwproto.GetEvaluationsResponse{ + State: featureproto.UserEvaluations_FULL, + UserEvaluationsId: ueid, + }, + expectedErr: nil, + expectedEvaluationsAssert: assert.Nil, + }, + "user_with_no_metadata_and_the_id_is_different": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: features, + }, nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil).MaxTimes(1) + }, + input: &gwproto.GetEvaluationsRequest{ + Tag: "test", + User: &userproto.User{Id: userID}, + UserEvaluationsId: "evaluation-id", + }, + expected: &gwproto.GetEvaluationsResponse{ + State: featureproto.UserEvaluations_FULL, + UserEvaluationsId: ueid, + }, + expectedErr: nil, + expectedEvaluationsAssert: assert.NotNil, + }, + } + for msg, p := range patterns { + gs := newGrpcGatewayServiceWithMock(t, mockController) + p.setup(gs) + ctx := metadata.NewIncomingContext(context.TODO(), metadata.MD{ + "authorization": []string{"test-key"}, + }) + actual, err := gs.GetEvaluations(ctx, p.input) + assert.Equal(t, p.expected.State, actual.State, "%s", msg) + assert.Equal(t, p.expected.UserEvaluationsId, actual.UserEvaluationsId, "%s", msg) + p.expectedEvaluationsAssert(t, actual.Evaluations, "%s", msg) + assert.Equal(t, p.expectedErr, err, "%s", msg) + } +} + +func TestGrpcGetEvaluationsNoSegmentList(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + vID1 := newUUID(t) + vID2 := newUUID(t) + vID3 := newUUID(t) + vID4 := newUUID(t) + + patterns := map[string]struct { + setup func(*grpcGatewayService) + input *gwproto.GetEvaluationsRequest + expected *gwproto.GetEvaluationsResponse + expectedErr error + }{ + "state: full": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: []*featureproto.Feature{ + { + Id: "feature-a", + Variations: []*featureproto.Variation{ + { + Id: vID1, + Value: "true", + }, + { + Id: newUUID(t), + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID1, + }, + }, + Tags: []string{"android"}, + }, + { + Id: "feature-b", + Variations: []*featureproto.Variation{ + { + Id: newUUID(t), + Value: "true", + }, + { + Id: vID2, + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID2, + }, + }, + Tags: []string{"android"}, + }, + { + Id: "feature-c", + Variations: []*featureproto.Variation{ + { + Id: vID3, + Value: "true", + }, + { + Id: newUUID(t), + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID3, + }, + }, + Tags: []string{"ios"}, + }, + { + Id: "feature-d", + Variations: []*featureproto.Variation{ + { + Id: newUUID(t), + Value: "true", + }, + { + Id: vID4, + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID4, + }, + }, + Tags: []string{"ios"}, + }, + }, + }, nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil).MaxTimes(1) + }, + input: &gwproto.GetEvaluationsRequest{Tag: "ios", User: &userproto.User{Id: "id-0"}}, + expected: &gwproto.GetEvaluationsResponse{ + State: featureproto.UserEvaluations_FULL, + Evaluations: &featureproto.UserEvaluations{ + Evaluations: []*featureproto.Evaluation{ + { + VariationId: vID3, + }, + { + VariationId: vID4, + }, + }, + }, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + gs := newGrpcGatewayServiceWithMock(t, mockController) + p.setup(gs) + ctx := metadata.NewIncomingContext(context.TODO(), metadata.MD{ + "authorization": []string{"test-key"}, + }) + actual, err := gs.GetEvaluations(ctx, p.input) + ev := p.expected.Evaluations.Evaluations + av := actual.Evaluations.Evaluations + assert.Equal(t, len(ev), len(av), "%s", msg) + assert.Equal(t, p.expected.State, actual.State, "%s", msg) + assert.Equal(t, ev[0].VariationId, av[0].VariationId, "%s", msg) + assert.Equal(t, ev[1].VariationId, av[1].VariationId, "%s", msg) + assert.NotEmpty(t, actual.UserEvaluationsId, "%s", msg) + require.NoError(t, err) + } +} + +func TestGrpcGetEvaluationsEvaluteFeatures(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*grpcGatewayService) + input *gwproto.GetEvaluationsRequest + expected *gwproto.GetEvaluationsResponse + expectedErr error + }{ + "errInternal": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: []*featureproto.Feature{ + { + Variations: []*featureproto.Variation{ + { + Id: "variation-a", + Value: "true", + }, + { + Id: "variation-b", + Value: "false", + }, + }, + Rules: []*featureproto.Rule{ + { + Id: "rule-1", + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-b", + }, + }, + Clauses: []*featureproto.Clause{ + { + Id: "clause-1", + Attribute: "name", + Operator: featureproto.Clause_SEGMENT, + Values: []string{ + "id-0", + }, + }, + }, + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-b", + }, + }, + Tags: []string{"test"}, + }, + }, + }, nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.segmentUsersCache.(*cachev3mock.MockSegmentUsersCache).EXPECT().Get(gomock.Any(), gomock.Any()).Return( + nil, errors.New("random error")) + gs.featureClient.(*featureclientmock.MockClient).EXPECT().ListSegmentUsers(gomock.Any(), gomock.Any()).Return( + nil, ErrInternal) + }, + input: &gwproto.GetEvaluationsRequest{Tag: "test", User: &userproto.User{Id: "id-0"}}, + expected: nil, + expectedErr: ErrInternal, + }, + "state: full, evaluate features list segment from cache": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + + Features: []*featureproto.Feature{ + { + Variations: []*featureproto.Variation{ + { + Id: "variation-a", + Value: "true", + }, + { + Id: "variation-b", + Value: "false", + }, + }, + Rules: []*featureproto.Rule{ + { + Id: "rule-1", + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-b", + }, + }, + Clauses: []*featureproto.Clause{ + { + Id: "clause-1", + Attribute: "name", + Operator: featureproto.Clause_SEGMENT, + Values: []string{ + "id-0", + }, + }, + }, + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-a", + }, + }, + Tags: []string{"test"}, + }, + }, + }, nil) + gs.segmentUsersCache.(*cachev3mock.MockSegmentUsersCache).EXPECT().Get(gomock.Any(), gomock.Any()).Return( + &featureproto.SegmentUsers{ + SegmentId: "segment-id", + Users: []*featureproto.SegmentUser{ + { + SegmentId: "segment-id", + UserId: "user-id-1", + State: featureproto.SegmentUser_INCLUDED, + Deleted: false, + }, + { + SegmentId: "segment-id", + UserId: "user-id-2", + State: featureproto.SegmentUser_INCLUDED, + Deleted: false, + }, + }, + }, nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + }, + input: &gwproto.GetEvaluationsRequest{Tag: "test", User: &userproto.User{Id: "id-0"}}, + expected: &gwproto.GetEvaluationsResponse{ + State: featureproto.UserEvaluations_FULL, + Evaluations: &featureproto.UserEvaluations{ + Evaluations: []*featureproto.Evaluation{ + { + VariationId: "variation-b", + Reason: &featureproto.Reason{ + Type: featureproto.Reason_DEFAULT, + }, + }, + }, + }, + }, + expectedErr: nil, + }, + "state: full, evaluate features list segment from storage": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: []*featureproto.Feature{ + { + Variations: []*featureproto.Variation{ + { + Id: "variation-a", + Value: "true", + }, + { + Id: "variation-b", + Value: "false", + }, + }, + Rules: []*featureproto.Rule{ + { + Id: "rule-1", + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-b", + }, + }, + Clauses: []*featureproto.Clause{ + { + Id: "clause-1", + Attribute: "name", + Operator: featureproto.Clause_SEGMENT, + Values: []string{ + "id-0", + }, + }, + }, + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-b", + }, + }, + Tags: []string{"test"}, + }, + }, + }, nil) + gs.segmentUsersCache.(*cachev3mock.MockSegmentUsersCache).EXPECT().Get(gomock.Any(), gomock.Any()).Return( + nil, errors.New("random error")) + gs.segmentUsersCache.(*cachev3mock.MockSegmentUsersCache).EXPECT().Put(gomock.Any(), gomock.Any()).Return(nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.featureClient.(*featureclientmock.MockClient).EXPECT().ListSegmentUsers(gomock.Any(), gomock.Any()).Return( + &featureproto.ListSegmentUsersResponse{}, nil) + }, + input: &gwproto.GetEvaluationsRequest{Tag: "test", User: &userproto.User{Id: "id-0"}}, + expected: &gwproto.GetEvaluationsResponse{ + State: featureproto.UserEvaluations_FULL, + Evaluations: &featureproto.UserEvaluations{ + Evaluations: []*featureproto.Evaluation{ + { + VariationId: "variation-b", + Reason: &featureproto.Reason{ + Type: featureproto.Reason_DEFAULT, + }, + }, + }, + }, + }, + expectedErr: nil, + }, + "state: full, evaluate features": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: []*featureproto.Feature{ + { + Variations: []*featureproto.Variation{ + { + Id: "variation-a", + Value: "true", + }, + { + Id: "variation-b", + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-b", + }, + }, + Tags: []string{"test"}, + }, + }, + }, nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + }, + input: &gwproto.GetEvaluationsRequest{Tag: "test", User: &userproto.User{Id: "id-0"}}, + expected: &gwproto.GetEvaluationsResponse{ + State: featureproto.UserEvaluations_FULL, + Evaluations: &featureproto.UserEvaluations{ + Evaluations: []*featureproto.Evaluation{ + { + VariationId: "variation-b", + Reason: &featureproto.Reason{ + Type: featureproto.Reason_DEFAULT, + }, + }, + }, + }, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + gs := newGrpcGatewayServiceWithMock(t, mockController) + p.setup(gs) + ctx := metadata.NewIncomingContext(context.TODO(), metadata.MD{ + "authorization": []string{"test-key"}, + }) + actual, err := gs.GetEvaluations(ctx, p.input) + if err != nil { + assert.Equal(t, p.expected, actual, "%s", msg) + assert.Equal(t, p.expectedErr, err, "%s", msg) + } else { + assert.Equal(t, len(p.expected.Evaluations.Evaluations), 1, "%s", msg) + assert.Equal(t, p.expected.State, actual.State, "%s", msg) + assert.Equal(t, p.expected.Evaluations.Evaluations[0].VariationId, "variation-b", "%s", msg) + assert.Equal(t, p.expected.Evaluations.Evaluations[0].Reason, actual.Evaluations.Evaluations[0].Reason, msg) + assert.NotEmpty(t, actual.UserEvaluationsId, "%s", msg) + require.NoError(t, err) + } + } +} + +func TestGrpcGetEvaluation(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*grpcGatewayService) + input *gwproto.GetEvaluationRequest + expectedFeatureID string + expectedErr error + }{ + "errFeatureNotFound": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: []*featureproto.Feature{ + { + Id: "feature-id-1", + Variations: []*featureproto.Variation{ + { + Id: "variation-a", + Value: "true", + }, + { + Id: "variation-b", + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-b", + }, + }, + Tags: []string{"test"}, + }, + { + Id: "feature-id-2", + Variations: []*featureproto.Variation{ + { + Id: "variation-c", + Value: "true", + }, + { + Id: "variation-d", + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-d", + }, + }, + Tags: []string{"test"}, + }, + }, + }, nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + }, + input: &gwproto.GetEvaluationRequest{Tag: "test", User: &userproto.User{Id: "id-0"}, FeatureId: "feature-id-3"}, + expectedFeatureID: "", + expectedErr: ErrFeatureNotFound, + }, + "errInternal": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: []*featureproto.Feature{ + { + Id: "feature-id-1", + Variations: []*featureproto.Variation{ + { + Id: "variation-a", + Value: "true", + }, + { + Id: "variation-b", + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-b", + }, + }, + Tags: []string{"test"}, + }, + { + Id: "feature-id-2", + Variations: []*featureproto.Variation{ + { + Id: "variation-c", + Value: "true", + }, + { + Id: "variation-d", + Value: "false", + }, + }, + Rules: []*featureproto.Rule{ + { + Id: "rule-1", + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-b", + }, + }, + Clauses: []*featureproto.Clause{ + { + Id: "clause-1", + Attribute: "name", + Operator: featureproto.Clause_SEGMENT, + Values: []string{ + "id-0", + }, + }, + }, + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-d", + }, + }, + Tags: []string{"test"}, + }, + }, + }, nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.segmentUsersCache.(*cachev3mock.MockSegmentUsersCache).EXPECT().Get(gomock.Any(), gomock.Any()).Return( + nil, errors.New("random error")) + gs.featureClient.(*featureclientmock.MockClient).EXPECT().ListSegmentUsers(gomock.Any(), gomock.Any()).Return( + nil, ErrInternal) + }, + input: &gwproto.GetEvaluationRequest{Tag: "test", User: &userproto.User{Id: "id-0"}, FeatureId: "feature-id-2"}, + expectedFeatureID: "", + expectedErr: ErrInternal, + }, + "error while trying to upsert the user evaluation": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: []*featureproto.Feature{ + { + Id: "feature-id-1", + Variations: []*featureproto.Variation{ + { + Id: "variation-a", + Value: "true", + }, + { + Id: "variation-b", + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-b", + }, + }, + Tags: []string{"test"}, + }, + { + Id: "feature-id-2", + Variations: []*featureproto.Variation{ + { + Id: "variation-a", + Value: "true", + }, + { + Id: "variation-b", + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-b", + }, + }, + Tags: []string{"test"}, + }, + }, + }, nil) + gs.userEvaluationStorage.(*ftsmock.MockUserEvaluationsStorage).EXPECT().UpsertUserEvaluation( + gomock.Any(), + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(errors.New("storage: internal")).MaxTimes(1) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + }, + input: &gwproto.GetEvaluationRequest{Tag: "test", User: &userproto.User{Id: "id-0"}, FeatureId: "feature-id-2"}, + expectedFeatureID: "", + expectedErr: ErrInternal, + }, + "return evaluation": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: []*featureproto.Feature{ + { + Id: "feature-id-1", + Variations: []*featureproto.Variation{ + { + Id: "variation-a", + Value: "true", + }, + { + Id: "variation-b", + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-b", + }, + }, + Tags: []string{"test"}, + }, + { + Id: "feature-id-2", + Variations: []*featureproto.Variation{ + { + Id: "variation-a", + Value: "true", + }, + { + Id: "variation-b", + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-b", + }, + }, + Tags: []string{"test"}, + }, + }, + }, nil) + gs.userEvaluationStorage.(*ftsmock.MockUserEvaluationsStorage).EXPECT().UpsertUserEvaluation( + gomock.Any(), + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(nil).MaxTimes(1) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + }, + input: &gwproto.GetEvaluationRequest{Tag: "test", User: &userproto.User{Id: "id-0"}, FeatureId: "feature-id-2"}, + expectedFeatureID: "feature-id-2", + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + gs := newGrpcGatewayServiceWithMock(t, mockController) + p.setup(gs) + ctx := metadata.NewIncomingContext(context.TODO(), metadata.MD{ + "authorization": []string{"test-key"}, + }) + actual, err := gs.GetEvaluation(ctx, p.input) + assert.Equal(t, p.expectedErr, err) + if err == nil { + assert.Equal(t, p.expectedFeatureID, actual.Evaluation.FeatureId) + } + }) + } +} + +func TestGrpcRegisterEventsContextCanceled(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + cancel bool + expected *gwproto.RegisterEventsResponse + expectedErr error + }{ + "error: context canceled": { + cancel: true, + expected: nil, + expectedErr: ErrContextCanceled, + }, + "error: missing API key": { + cancel: false, + expected: nil, + expectedErr: ErrMissingAPIKey, + }, + } + for msg, p := range patterns { + gs := newGrpcGatewayServiceWithMock(t, mockController) + ctx, cancel := context.WithCancel(context.Background()) + if p.cancel { + cancel() + } else { + defer cancel() + } + actual, err := gs.RegisterEvents(ctx, &gwproto.RegisterEventsRequest{}) + assert.Equal(t, p.expected, actual, "%s", msg) + assert.Equal(t, p.expectedErr, err, "%s", msg) + } +} + +func TestGrcpRegisterEvents(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + bGoalEvent, err := proto.Marshal(&eventproto.GoalEvent{Timestamp: time.Now().Unix()}) + if err != nil { + t.Fatal("could not serialize goal event") + } + bGoalBatchEvent, err := proto.Marshal(&eventproto.GoalBatchEvent{ + UserId: "0efe416e-2fd2-4996-b5c3-194f05444f1f", + UserGoalEventsOverTags: []*eventproto.UserGoalEventsOverTag{ + { + Tag: "tag", + }, + }, + }) + if err != nil { + t.Fatal("could not serialize goal batch event") + } + bEvaluationEvent, err := proto.Marshal(&eventproto.EvaluationEvent{Timestamp: time.Now().Unix()}) + if err != nil { + t.Fatal("could not serialize evaluation event") + } + bInvalidEvent, err := proto.Marshal(&any.Any{}) + if err != nil { + t.Fatal("could not serialize experiment event") + } + bMetricsEvent, err := proto.Marshal(&eventproto.MetricsEvent{Timestamp: time.Now().Unix()}) + if err != nil { + t.Fatal("could not serialize metrics event") + } + uuid0 := newUUID(t) + uuid1 := newUUID(t) + uuid2 := newUUID(t) + uuid3 := newUUID(t) + + patterns := map[string]struct { + setup func(*grpcGatewayService) + input *gwproto.RegisterEventsRequest + expected *gwproto.RegisterEventsResponse + expectedErr error + }{ + "error: zero event": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + }, + input: &gwproto.RegisterEventsRequest{}, + expectedErr: ErrMissingEvents, + }, + "error: ErrMissingEventID": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + }, + input: &gwproto.RegisterEventsRequest{ + Events: []*eventproto.Event{ + { + Id: "", + }, + }, + }, + expectedErr: ErrMissingEventID, + }, + "error: invalid message type": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.goalPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.goalBatchPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.evaluationPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.metricsPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + }, + input: &gwproto.RegisterEventsRequest{ + Events: []*eventproto.Event{ + { + Id: uuid0, + Event: &any.Any{ + TypeUrl: "github.com/golang/protobuf/ptypes/any", + Value: bInvalidEvent, + }, + }, + }, + }, + expected: &gwproto.RegisterEventsResponse{ + Errors: map[string]*gwproto.RegisterEventsResponse_Error{ + uuid0: { + Retriable: false, + Message: "Invalid message type", + }, + }, + }, + expectedErr: nil, + }, + "error while trying to upsert the user evaluation": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.goalPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.goalBatchPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.evaluationPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.metricsPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.userEvaluationStorage.(*ftsmock.MockUserEvaluationsStorage).EXPECT().UpsertUserEvaluation( + gomock.Any(), + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(errors.New("storage: internal")).MaxTimes(1) + }, + input: &gwproto.RegisterEventsRequest{ + Events: []*eventproto.Event{ + { + Id: uuid0, + Event: &any.Any{ + TypeUrl: "github.com/bucketeer-io/bucketeer/proto/event/client/bucketeer.event.client.GoalEvent", + Value: bGoalEvent, + }, + }, + { + Id: uuid1, + Event: &any.Any{ + TypeUrl: "github.com/bucketeer-io/bucketeer/proto/event/client/bucketeer.event.client.EvaluationEvent", + Value: bEvaluationEvent, + }, + }, + { + Id: uuid2, + Event: &any.Any{ + TypeUrl: "github.com/bucketeer-io/bucketeer/proto/event/client/bucketeer.event.client.MetricsEvent", + Value: bMetricsEvent, + }, + }, + { + Id: uuid3, + Event: &any.Any{ + TypeUrl: "github.com/bucketeer-io/bucketeer/proto/event/client/bucketeer.event.client.GoalBatchEvent", + Value: bGoalBatchEvent, + }, + }, + }, + }, + expected: &gwproto.RegisterEventsResponse{ + Errors: map[string]*gwproto.RegisterEventsResponse_Error{ + uuid1: { + Retriable: true, + Message: "Failed to upsert user evaluation", + }, + }, + }, + expectedErr: nil, + }, + "success": { + setup: func(gs *grpcGatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.goalPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.goalBatchPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.evaluationPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.metricsPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.userEvaluationStorage.(*ftsmock.MockUserEvaluationsStorage).EXPECT().UpsertUserEvaluation( + gomock.Any(), + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(nil).MaxTimes(1) + }, + input: &gwproto.RegisterEventsRequest{ + Events: []*eventproto.Event{ + { + Id: uuid0, + Event: &any.Any{ + TypeUrl: "github.com/bucketeer-io/bucketeer/proto/event/client/bucketeer.event.client.GoalEvent", + Value: bGoalEvent, + }, + }, + { + Id: uuid1, + Event: &any.Any{ + TypeUrl: "github.com/bucketeer-io/bucketeer/proto/event/client/bucketeer.event.client.EvaluationEvent", + Value: bEvaluationEvent, + }, + }, + { + Id: uuid2, + Event: &any.Any{ + TypeUrl: "github.com/bucketeer-io/bucketeer/proto/event/client/bucketeer.event.client.MetricsEvent", + Value: bMetricsEvent, + }, + }, + { + Id: uuid3, + Event: &any.Any{ + TypeUrl: "github.com/bucketeer-io/bucketeer/proto/event/client/bucketeer.event.client.GoalBatchEvent", + Value: bGoalBatchEvent, + }, + }, + }, + }, + expected: &gwproto.RegisterEventsResponse{Errors: make(map[string]*gwproto.RegisterEventsResponse_Error)}, + expectedErr: nil, + }, + } + for msg, p := range patterns { + gs := newGrpcGatewayServiceWithMock(t, mockController) + p.setup(gs) + ctx := metadata.NewIncomingContext(context.TODO(), metadata.MD{ + "authorization": []string{"test-key"}, + }) + actual, err := gs.RegisterEvents(ctx, p.input) + assert.Equal(t, p.expected, actual, "%s", msg) + assert.Equal(t, p.expectedErr, err, "%s", msg) + } +} + +func TestGrpcConvToEvaluation(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + tag := "tag" + evaluationEvent := &eventproto.EvaluationEvent{ + FeatureId: "feature-id", + FeatureVersion: 2, + UserId: "user-id", + VariationId: "variation-id", + User: &userproto.User{Id: "user-id"}, + Reason: &featureproto.Reason{ + Type: featureproto.Reason_DEFAULT, + }, + Tag: tag, + Timestamp: time.Now().Unix(), + } + bEvaluationEventWithTag, err := proto.Marshal(evaluationEvent) + evaluationEvent.Tag = "" + bEvaluationEventWithoutTag, err := proto.Marshal(evaluationEvent) + assert.NoError(t, err) + bInvalidEvent, err := proto.Marshal(&any.Any{}) + assert.NoError(t, err) + + patterns := []struct { + desc string + input *eventproto.Event + expected *featureproto.Evaluation + expectedTag string + expectedErr error + }{ + { + desc: "error", + input: &eventproto.Event{ + Id: "id", + Event: &any.Any{ + TypeUrl: "github.com/golang/protobuf/ptypes/any", + Value: bInvalidEvent, + }, + }, + expected: nil, + expectedTag: "", + expectedErr: errUnmarshalFailed, + }, + { + desc: "success without tag", + input: &eventproto.Event{ + Id: "id", + Event: &any.Any{ + TypeUrl: "github.com/bucketeer-io/bucketeer/proto/event/client/bucketeer.event.client.EvaluationEvent", + Value: bEvaluationEventWithoutTag, + }, + }, + expected: &featureproto.Evaluation{ + Id: featuredomain.EvaluationID( + evaluationEvent.FeatureId, + evaluationEvent.FeatureVersion, + evaluationEvent.UserId, + ), + FeatureId: evaluationEvent.FeatureId, + FeatureVersion: evaluationEvent.FeatureVersion, + UserId: evaluationEvent.UserId, + VariationId: evaluationEvent.VariationId, + Reason: evaluationEvent.Reason, + }, + expectedTag: "none", + expectedErr: nil, + }, + { + desc: "success with tag", + input: &eventproto.Event{ + Id: "id", + Event: &any.Any{ + TypeUrl: "github.com/bucketeer-io/bucketeer/proto/event/client/bucketeer.event.client.EvaluationEvent", + Value: bEvaluationEventWithTag, + }, + }, + expected: &featureproto.Evaluation{ + Id: featuredomain.EvaluationID( + evaluationEvent.FeatureId, + evaluationEvent.FeatureVersion, + evaluationEvent.UserId, + ), + FeatureId: evaluationEvent.FeatureId, + FeatureVersion: evaluationEvent.FeatureVersion, + UserId: evaluationEvent.UserId, + VariationId: evaluationEvent.VariationId, + Reason: evaluationEvent.Reason, + }, + expectedTag: tag, + expectedErr: nil, + }, + } + for _, p := range patterns { + gs := newGrpcGatewayServiceWithMock(t, mockController) + ev, tag, err := gs.convToEvaluation(context.Background(), p.input) + assert.True(t, proto.Equal(p.expected, ev), p.desc) + assert.Equal(t, p.expectedTag, tag, p.desc) + assert.Equal(t, p.expectedErr, err, p.desc) + } +} + +func TestGrpcContainsInvalidTimestampError(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + errs map[string]*gwproto.RegisterEventsResponse_Error + expected bool + }{ + "error: invalid timestamp": { + errs: map[string]*gwproto.RegisterEventsResponse_Error{ + "id-test": { + Retriable: false, + Message: errInvalidTimestamp.Error(), + }, + }, + expected: true, + }, + "error: different error": { + errs: map[string]*gwproto.RegisterEventsResponse_Error{ + "id-test": { + Retriable: true, + Message: errUnmarshalFailed.Error(), + }, + }, + expected: false, + }, + "error: empty": { + errs: make(map[string]*gwproto.RegisterEventsResponse_Error), + expected: false, + }, + } + for msg, p := range patterns { + gs := newGrpcGatewayServiceWithMock(t, mockController) + actual := gs.containsInvalidTimestampError(p.errs) + assert.Equal(t, p.expected, actual, "%s", msg) + } +} + +func newGrpcGatewayServiceWithMock(t *testing.T, mockController *gomock.Controller) *grpcGatewayService { + logger, err := log.NewLogger() + require.NoError(t, err) + return &grpcGatewayService{ + userEvaluationStorage: ftsmock.NewMockUserEvaluationsStorage(mockController), + featureClient: featureclientmock.NewMockClient(mockController), + accountClient: accountclientmock.NewMockClient(mockController), + goalPublisher: publishermock.NewMockPublisher(mockController), + goalBatchPublisher: publishermock.NewMockPublisher(mockController), + userPublisher: publishermock.NewMockPublisher(mockController), + metricsPublisher: publishermock.NewMockPublisher(mockController), + evaluationPublisher: publishermock.NewMockPublisher(mockController), + featuresCache: cachev3mock.NewMockFeaturesCache(mockController), + segmentUsersCache: cachev3mock.NewMockSegmentUsersCache(mockController), + environmentAPIKeyCache: cachev3mock.NewMockEnvironmentAPIKeyCache(mockController), + opts: &defaultOptions, + logger: logger, + } +} + +func newUUID(t *testing.T) string { + t.Helper() + id, err := uuid.NewUUID() + if err != nil { + t.Fatal(err) + } + return id.String() +} diff --git a/pkg/gateway/api/api_test.go b/pkg/gateway/api/api_test.go new file mode 100644 index 000000000..039bb10b5 --- /dev/null +++ b/pkg/gateway/api/api_test.go @@ -0,0 +1,2511 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/encoding/protojson" + + accountclientmock "github.com/bucketeer-io/bucketeer/pkg/account/client/mock" + "github.com/bucketeer-io/bucketeer/pkg/cache" + cachev3mock "github.com/bucketeer-io/bucketeer/pkg/cache/v3/mock" + featureclientmock "github.com/bucketeer-io/bucketeer/pkg/feature/client/mock" + featuredomain "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + ftsmock "github.com/bucketeer-io/bucketeer/pkg/feature/storage/mock" + "github.com/bucketeer-io/bucketeer/pkg/log" + publishermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher/mock" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/client" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" + userproto "github.com/bucketeer-io/bucketeer/proto/user" +) + +const dummyURL = "http://example.com" + +func TestNewGatewayService(t *testing.T) { + t.Parallel() + g := NewGatewayService(nil, nil, nil, nil, nil, nil, nil, nil, nil) + assert.IsType(t, &gatewayService{}, g) +} + +func TestGetEnvironmentAPIKey(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*gatewayService) + auth string + expected *accountproto.EnvironmentAPIKey + expectedErr error + }{ + "exists in redis": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{Id: "id-0"}, + }, nil) + }, + auth: "test-key", + expected: &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{Id: "id-0"}, + }, + expectedErr: nil, + }, + "ErrInvalidAPIKey": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + nil, cache.ErrNotFound) + gs.accountClient.(*accountclientmock.MockClient).EXPECT().GetAPIKeyBySearchingAllEnvironments(gomock.Any(), gomock.Any()).Return( + nil, status.Errorf(codes.NotFound, "test")) + }, + auth: "test-key", + expected: nil, + expectedErr: errInvalidAPIKey, + }, + "ErrInternal": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + nil, cache.ErrNotFound) + gs.accountClient.(*accountclientmock.MockClient).EXPECT().GetAPIKeyBySearchingAllEnvironments(gomock.Any(), gomock.Any()).Return( + nil, status.Errorf(codes.Unknown, "test")) + }, + auth: "test-key", + expected: nil, + expectedErr: errInternal, + }, + "success": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + nil, cache.ErrNotFound) + gs.accountClient.(*accountclientmock.MockClient).EXPECT().GetAPIKeyBySearchingAllEnvironments(gomock.Any(), gomock.Any()).Return( + &accountproto.GetAPIKeyBySearchingAllEnvironmentsResponse{EnvironmentApiKey: &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{Id: "id-0"}, + }}, nil) + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Put(gomock.Any()).Return(nil) + }, + auth: "test-key", + expected: &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{Id: "id-0"}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + gs := newGatewayServiceWithMock(t, mockController) + p.setup(gs) + req := httptest.NewRequest( + "POST", + dummyURL, + nil, + ) + req.Header.Add(authorizationKey, p.auth) + actual, err := gs.findEnvironmentAPIKey(context.Background(), req) + assert.Equal(t, p.expected, actual, "%s", msg) + assert.Equal(t, p.expectedErr, err, "%s", msg) + } +} + +func TestGetEnvironmentAPIKeyFromCache(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*cachev3mock.MockEnvironmentAPIKeyCache) + expected *accountproto.EnvironmentAPIKey + expectedErr error + }{ + "no error": { + setup: func(mtf *cachev3mock.MockEnvironmentAPIKeyCache) { + mtf.EXPECT().Get(gomock.Any()).Return(&accountproto.EnvironmentAPIKey{}, nil) + }, + expected: &accountproto.EnvironmentAPIKey{}, + expectedErr: nil, + }, + "error": { + setup: func(mtf *cachev3mock.MockEnvironmentAPIKeyCache) { + mtf.EXPECT().Get(gomock.Any()).Return(nil, cache.ErrNotFound) + }, + expected: nil, + expectedErr: cache.ErrNotFound, + }, + } + for msg, p := range patterns { + mock := cachev3mock.NewMockEnvironmentAPIKeyCache(mockController) + p.setup(mock) + actual, err := getEnvironmentAPIKeyFromCache(context.Background(), "id", mock, "caller", "layer") + assert.Equal(t, p.expected, actual, "%s", msg) + assert.Equal(t, p.expectedErr, err, "%s", msg) + } +} + +func TestCheckEnvironmentAPIKey(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + inputEnvAPIKey *accountproto.EnvironmentAPIKey + inputRole accountproto.APIKey_Role + expected error + }{ + "ErrBadRole": { + inputEnvAPIKey: &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SERVICE, + Disabled: false, + }, + }, + inputRole: accountproto.APIKey_SDK, + expected: errBadRole, + }, + "ErrDisabledAPIKey: environment disabled": { + inputEnvAPIKey: &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + EnvironmentDisabled: true, + }, + inputRole: accountproto.APIKey_SDK, + expected: errDisabledAPIKey, + }, + "ErrDisabledAPIKey: api key disabled": { + inputEnvAPIKey: &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: true, + }, + EnvironmentDisabled: false, + }, + inputRole: accountproto.APIKey_SDK, + expected: errDisabledAPIKey, + }, + "no error": { + inputEnvAPIKey: &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, + inputRole: accountproto.APIKey_SDK, + expected: nil, + }, + } + gs := gatewayService{} + for msg, p := range patterns { + actual := gs.checkEnvironmentAPIKey(p.inputEnvAPIKey, p.inputRole) + assert.Equal(t, p.expected, actual, "%s", msg) + } +} + +func TestValidateGetEvaluationsRequest(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + input *getEvaluationsRequest + expected error + }{ + "tag is empty": { + input: &getEvaluationsRequest{}, + expected: errTagRequired, + }, + "user is empty": { + input: &getEvaluationsRequest{Tag: "test"}, + expected: errUserRequired, + }, + "user ID is empty": { + input: &getEvaluationsRequest{Tag: "test", User: &userproto.User{}}, + expected: errUserIDRequired, + }, + "pass": { + input: &getEvaluationsRequest{Tag: "test", User: &userproto.User{Id: "id"}}, + }, + } + gs := gatewayService{} + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + actual := gs.validateGetEvaluationsRequest(p.input) + assert.Equal(t, p.expected, actual) + }) + } +} + +func TestValidateGetEvaluationRequest(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + input *getEvaluationRequest + expected error + }{ + "tag is empty": { + input: &getEvaluationRequest{}, + expected: errTagRequired, + }, + "user is empty": { + input: &getEvaluationRequest{Tag: "test"}, + expected: errUserRequired, + }, + "user ID is empty": { + input: &getEvaluationRequest{Tag: "test", User: &userproto.User{}}, + expected: errUserIDRequired, + }, + "feature ID is empty": { + input: &getEvaluationRequest{Tag: "test", User: &userproto.User{Id: "id"}}, + expected: errFeatureIDRequired, + }, + "pass": { + input: &getEvaluationRequest{Tag: "test", User: &userproto.User{Id: "id"}, FeatureID: "id"}, + }, + } + gs := gatewayService{} + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + actual := gs.validateGetEvaluationRequest(p.input) + assert.Equal(t, p.expected, actual) + }) + } +} + +func TestGetFeaturesFromCache(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*cachev3mock.MockFeaturesCache) + environmentNamespace string + expected *featureproto.Features + expectedErr error + }{ + "no error": { + setup: func(mtf *cachev3mock.MockFeaturesCache) { + mtf.EXPECT().Get(gomock.Any()).Return(&featureproto.Features{}, nil) + }, + environmentNamespace: "ns0", + expected: &featureproto.Features{}, + expectedErr: nil, + }, + "error": { + setup: func(mtf *cachev3mock.MockFeaturesCache) { + mtf.EXPECT().Get(gomock.Any()).Return(nil, cache.ErrNotFound) + }, + environmentNamespace: "ns0", + expected: nil, + expectedErr: cache.ErrNotFound, + }, + } + for msg, p := range patterns { + mtfc := cachev3mock.NewMockFeaturesCache(mockController) + p.setup(mtfc) + gs := gatewayService{featuresCache: mtfc} + actual, err := gs.getFeaturesFromCache(context.Background(), p.environmentNamespace) + assert.Equal(t, p.expected, actual, "%s", msg) + assert.Equal(t, p.expectedErr, err, "%s", msg) + } +} + +func TestGetFeatures(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*gatewayService) + environmentNamespace string + expected []*featureproto.Feature + expectedErr error + }{ + "exists in redis": { + setup: func(gs *gatewayService) { + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: []*featureproto.Feature{{}}, + }, nil) + }, + environmentNamespace: "ns0", + expectedErr: nil, + expected: []*featureproto.Feature{{}}, + }, + "listFeatures fails": { + setup: func(gs *gatewayService) { + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + nil, cache.ErrNotFound) + gs.featureClient.(*featureclientmock.MockClient).EXPECT().ListFeatures(gomock.Any(), gomock.Any()).Return( + nil, errors.New("test")) + }, + environmentNamespace: "ns0", + expected: nil, + expectedErr: errInternal, + }, + "success": { + setup: func(gs *gatewayService) { + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + nil, cache.ErrNotFound) + gs.featureClient.(*featureclientmock.MockClient).EXPECT().ListFeatures(gomock.Any(), gomock.Any()).Return( + &featureproto.ListFeaturesResponse{Features: []*featureproto.Feature{ + { + Id: "id-0", + Enabled: true, + }, + }}, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Put(gomock.Any(), gomock.Any()).Return(nil) + }, + environmentNamespace: "ns0", + expected: []*featureproto.Feature{ + { + Id: "id-0", + Enabled: true, + }, + }, + expectedErr: nil, + }, + // TODO: add test for off-variation features + } + for msg, p := range patterns { + gs := newGatewayServiceWithMock(t, mockController) + p.setup(gs) + actual, err := gs.getFeatures(context.Background(), p.environmentNamespace) + assert.Equal(t, p.expected, actual, "%s", msg) + assert.Equal(t, p.expectedErr, err, "%s", msg) + } +} + +func TestGetEvaluationsContextCanceled(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + cancel bool + expected *getEvaluationsResponse + expectedErr error + }{ + "error: context canceled": { + cancel: true, + expected: nil, + expectedErr: errContextCanceled, + }, + "error: missing API key": { + cancel: false, + expected: nil, + expectedErr: errMissingAPIKey, + }, + } + for msg, p := range patterns { + gs := newGatewayServiceWithMock(t, mockController) + req := httptest.NewRequest( + "POST", + dummyURL, + nil, + ) + ctx, cancel := context.WithCancel(req.Context()) + if p.cancel { + cancel() + } else { + defer cancel() + } + actual := httptest.NewRecorder() + gs.getEvaluations(actual, req.WithContext(ctx)) + assert.Equal(t, newErrResponse(t, p.expectedErr), actual.Body.String(), "%s", msg) + } +} + +func TestGetEvaluationsValidation(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*gatewayService) + input *http.Request + expected *getEvaluationsResponse + expectedErr error + }{ + "missing tag": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + }, + input: httptest.NewRequest( + "POST", + dummyURL, + renderBody( + t, + getEvaluationsRequest{ + User: &userproto.User{Id: "id-0"}, + }, + ), + ), + expected: nil, + expectedErr: errTagRequired, + }, + "missing user id": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + }, + input: httptest.NewRequest( + "POST", + dummyURL, + renderBody( + t, + getEvaluationsRequest{ + Tag: "test", + }, + ), + ), + expected: nil, + expectedErr: errUserRequired, + }, + "success": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: []*featureproto.Feature{}, + }, nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + }, + input: httptest.NewRequest( + "POST", + dummyURL, + renderBody( + t, + getEvaluationsRequest{ + Tag: "test", + User: &userproto.User{Id: "id-0"}, + }, + ), + ), + expected: &getEvaluationsResponse{ + Evaluations: nil, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + gs := newGatewayServiceWithMock(t, mockController) + p.setup(gs) + actual := httptest.NewRecorder() + p.input.Header.Add(authorizationKey, "test-key") + gs.getEvaluations(actual, p.input) + if actual.Code != http.StatusOK { + assert.Equal(t, newErrResponse(t, p.expectedErr), actual.Body.String(), "%s", msg) + continue + } + var respBody getEvaluationsResponse + decoded := decodeSuccessResponse(t, actual.Body) + err := json.Unmarshal(decoded, &respBody) + assert.NoError(t, err) + assert.Equal(t, p.expected, &respBody, "%s", msg) + } +} + +func TestGetEvaluationsZeroFeature(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*gatewayService) + input *http.Request + expected *getEvaluationsResponse + expectedErr error + }{ + "zero feature": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: []*featureproto.Feature{}, + }, nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + }, + input: httptest.NewRequest( + "POST", + dummyURL, + renderBody( + t, + getEvaluationsRequest{ + Tag: "test", + User: &userproto.User{Id: "id-0"}, + UserEvaluationsID: "evaluation-id", + }, + ), + ), + expected: &getEvaluationsResponse{ + Evaluations: nil, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + gs := newGatewayServiceWithMock(t, mockController) + p.setup(gs) + actual := httptest.NewRecorder() + p.input.Header.Add(authorizationKey, "test-key") + gs.getEvaluations(actual, p.input) + var respBody getEvaluationsResponse + decoded := decodeSuccessResponse(t, actual.Body) + err := json.Unmarshal(decoded, &respBody) + assert.NoError(t, err) + assert.Equal(t, p.expected, &respBody, "%s", msg) + assert.Empty(t, respBody.UserEvaluationsID, "%s", msg) + } +} + +func TestGetEvaluationsUserEvaluationsID(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + vID1 := newUUID(t) + vID2 := newUUID(t) + vID3 := newUUID(t) + vID4 := newUUID(t) + vID5 := newUUID(t) + vID6 := newUUID(t) + + features := []*featureproto.Feature{ + { + Id: newUUID(t), + Variations: []*featureproto.Variation{ + { + Id: vID1, + Value: "true", + }, + { + Id: newUUID(t), + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID1, + }, + }, + Tags: []string{"android"}, + }, + { + Id: newUUID(t), + Variations: []*featureproto.Variation{ + { + Id: newUUID(t), + Value: "true", + }, + { + Id: vID2, + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID2, + }, + }, + Tags: []string{"android"}, + }, + } + + features2 := []*featureproto.Feature{ + { + Id: newUUID(t), + Variations: []*featureproto.Variation{ + { + Id: vID3, + Value: "true", + }, + { + Id: newUUID(t), + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID3, + }, + }, + Tags: []string{"ios"}, + }, + { + Id: newUUID(t), + Variations: []*featureproto.Variation{ + { + Id: newUUID(t), + Value: "true", + }, + { + Id: vID4, + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID4, + }, + }, + Tags: []string{"ios"}, + }, + } + + features3 := []*featureproto.Feature{ + { + Id: newUUID(t), + Variations: []*featureproto.Variation{ + { + Id: vID5, + Value: "true", + }, + { + Id: newUUID(t), + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID5, + }, + }, + Tags: []string{"web"}, + }, + { + Id: newUUID(t), + Variations: []*featureproto.Variation{ + { + Id: newUUID(t), + Value: "true", + }, + { + Id: vID6, + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID6, + }, + }, + Tags: []string{"web"}, + }, + } + multiFeatures := append(features, features2...) + multiFeatures = append(multiFeatures, features3...) + userID := "user-id-0" + userMetadata := map[string]string{"b": "value-b", "c": "value-c", "a": "value-a", "d": "value-d"} + ueid := featuredomain.UserEvaluationsID(userID, nil, features) + ueidWithData := featuredomain.UserEvaluationsID(userID, userMetadata, features) + + patterns := map[string]struct { + setup func(*gatewayService) + input *http.Request + expected *getEvaluationsResponse + expectedErr error + expectedEvaluationsAssert func(assert.TestingT, interface{}, ...interface{}) bool + }{ + "user evaluations id not set": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: features, + }, nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil).MaxTimes(1) + }, + input: httptest.NewRequest( + "POST", + dummyURL, + renderBody( + t, + getEvaluationsRequest{ + Tag: "test", + User: &userproto.User{ + Id: userID, + Data: userMetadata, + }, + }, + ), + ), + expected: &getEvaluationsResponse{ + UserEvaluationsID: ueidWithData, + }, + expectedErr: nil, + expectedEvaluationsAssert: assert.NotNil, + }, + "user evaluations id is the same": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: multiFeatures, + }, nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil).MaxTimes(1) + }, + input: httptest.NewRequest( + "POST", + dummyURL, + renderBody( + t, + getEvaluationsRequest{ + Tag: "test", + User: &userproto.User{ + Id: userID, + Data: userMetadata, + }, + UserEvaluationsID: featuredomain.UserEvaluationsID(userID, userMetadata, multiFeatures), + }, + ), + ), + expected: &getEvaluationsResponse{ + UserEvaluationsID: featuredomain.UserEvaluationsID(userID, userMetadata, multiFeatures), + }, + expectedErr: nil, + expectedEvaluationsAssert: assert.Nil, + }, + "user evaluations id is different": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: features, + }, nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil).MaxTimes(1) + }, + input: httptest.NewRequest( + "POST", + dummyURL, + renderBody( + t, + getEvaluationsRequest{ + Tag: "test", + User: &userproto.User{ + Id: userID, + Data: userMetadata, + }, + UserEvaluationsID: "evaluation-id", + }, + ), + ), + expected: &getEvaluationsResponse{ + UserEvaluationsID: ueidWithData, + }, + expectedErr: nil, + expectedEvaluationsAssert: assert.NotNil, + }, + "user_with_no_metadata_and_the_id_is_same": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: features, + }, nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil).MaxTimes(1) + }, + input: httptest.NewRequest( + "POST", + dummyURL, + renderBody( + t, + getEvaluationsRequest{ + Tag: "test", + User: &userproto.User{Id: userID}, + UserEvaluationsID: ueid, + }, + ), + ), + + expected: &getEvaluationsResponse{ + UserEvaluationsID: ueid, + }, + expectedErr: nil, + expectedEvaluationsAssert: assert.Nil, + }, + "user_with_no_metadata_and_the_id_is_different": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: features, + }, nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil).MaxTimes(1) + }, + input: httptest.NewRequest( + "POST", + dummyURL, + renderBody( + t, + getEvaluationsRequest{ + Tag: "test", + User: &userproto.User{Id: userID}, + UserEvaluationsID: "evaluation-id", + }, + ), + ), + expected: &getEvaluationsResponse{ + UserEvaluationsID: ueid, + }, + expectedErr: nil, + expectedEvaluationsAssert: assert.NotNil, + }, + } + for msg, p := range patterns { + gs := newGatewayServiceWithMock(t, mockController) + if p.setup != nil { + p.setup(gs) + } + actual := httptest.NewRecorder() + p.input.Header.Add(authorizationKey, "test-key") + gs.getEvaluations(actual, p.input) + if actual.Code != http.StatusOK { + assert.Equal(t, newErrResponse(t, p.expectedErr), actual.Body.String(), "%s", msg) + continue + } + var respBody getEvaluationsResponse + decoded := decodeSuccessResponse(t, actual.Body) + err := json.Unmarshal(decoded, &respBody) + assert.NoError(t, err) + assert.Equal(t, p.expected.UserEvaluationsID, respBody.UserEvaluationsID, "%s", msg) + p.expectedEvaluationsAssert(t, respBody.Evaluations, "%s", msg) + } +} + +func testGetEvaluationsNoSegmentList(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + vID1 := newUUID(t) + vID2 := newUUID(t) + vID3 := newUUID(t) + vID4 := newUUID(t) + + patterns := map[string]struct { + setup func(*gatewayService) + input *http.Request + expected *getEvaluationsResponse + expectedErr error + }{ + "state: full": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: []*featureproto.Feature{ + { + Id: "feature-a", + Variations: []*featureproto.Variation{ + { + Id: vID1, + Value: "true", + }, + { + Id: newUUID(t), + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID1, + }, + }, + Tags: []string{"android"}, + }, + { + Id: "feature-b", + Variations: []*featureproto.Variation{ + { + Id: newUUID(t), + Value: "true", + }, + { + Id: vID2, + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID2, + }, + }, + Tags: []string{"android"}, + }, + { + Id: "feature-c", + Variations: []*featureproto.Variation{ + { + Id: vID3, + Value: "true", + }, + { + Id: newUUID(t), + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID3, + }, + }, + Tags: []string{"ios"}, + }, + { + Id: "feature-d", + Variations: []*featureproto.Variation{ + { + Id: newUUID(t), + Value: "true", + }, + { + Id: vID4, + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: vID4, + }, + }, + Tags: []string{"ios"}, + }, + }, + }, nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil).MaxTimes(1) + }, + input: httptest.NewRequest( + "POST", + dummyURL, + renderBody( + t, + getEvaluationsRequest{ + Tag: "ios", + User: &userproto.User{Id: "id-0"}, + }, + ), + ), + expected: &getEvaluationsResponse{ + Evaluations: &featureproto.UserEvaluations{ + Evaluations: []*featureproto.Evaluation{ + { + VariationId: vID3, + }, + { + VariationId: vID4, + }, + }, + }, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + gs := newGatewayServiceWithMock(t, mockController) + if p.setup != nil { + p.setup(gs) + } + actual := httptest.NewRecorder() + p.input.Header.Add(authorizationKey, "test-key") + gs.getEvaluations(actual, p.input) + if actual.Code != http.StatusOK { + assert.Equal(t, newErrResponse(t, p.expectedErr), actual.Body.String(), "%s", msg) + return + } + var respBody getEvaluationsResponse + decoded := decodeSuccessResponse(t, actual.Body) + err := json.Unmarshal(decoded, &respBody) + assert.NoError(t, err) + ev := p.expected.Evaluations.Evaluations + av := respBody.Evaluations.Evaluations + assert.Equal(t, len(ev), len(av), "%s", msg) + assert.Equal(t, ev[0].VariationId, av[0].VariationId, "%s", msg) + assert.Equal(t, ev[1].VariationId, av[1].VariationId, "%s", msg) + assert.NotEmpty(t, respBody.UserEvaluationsID, "%s", msg) + } +} + +func TestGetEvaluationsEvaluteFeatures(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*gatewayService) + input *http.Request + expected *getEvaluationsResponse + expectedErr error + }{ + "errInternal": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: []*featureproto.Feature{ + { + Variations: []*featureproto.Variation{ + { + Id: "variation-a", + Value: "true", + }, + { + Id: "variation-b", + Value: "false", + }, + }, + Rules: []*featureproto.Rule{ + { + Id: "rule-1", + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-b", + }, + }, + Clauses: []*featureproto.Clause{ + { + Id: "clause-1", + Attribute: "name", + Operator: featureproto.Clause_SEGMENT, + Values: []string{ + "id-0", + }, + }, + }, + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-b", + }, + }, + Tags: []string{"test"}, + }, + }, + }, nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.segmentUsersCache.(*cachev3mock.MockSegmentUsersCache).EXPECT().Get(gomock.Any(), gomock.Any()).Return( + nil, errors.New("random error")) + gs.featureClient.(*featureclientmock.MockClient).EXPECT().ListSegmentUsers(gomock.Any(), gomock.Any()).Return( + nil, ErrInternal) + }, + input: httptest.NewRequest( + "POST", + dummyURL, + renderBody( + t, + getEvaluationsRequest{ + Tag: "user", + User: &userproto.User{Id: "id-0"}, + }, + ), + ), + expected: nil, + expectedErr: errInternal, + }, + "state: full, evaluate features list segment from cache": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + + Features: []*featureproto.Feature{ + { + Variations: []*featureproto.Variation{ + { + Id: "variation-a", + Value: "true", + }, + { + Id: "variation-b", + Value: "false", + }, + }, + Rules: []*featureproto.Rule{ + { + Id: "rule-1", + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-b", + }, + }, + Clauses: []*featureproto.Clause{ + { + Id: "clause-1", + Attribute: "name", + Operator: featureproto.Clause_SEGMENT, + Values: []string{ + "id-0", + }, + }, + }, + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-a", + }, + }, + Tags: []string{"test"}, + }, + }, + }, nil) + gs.segmentUsersCache.(*cachev3mock.MockSegmentUsersCache).EXPECT().Get(gomock.Any(), gomock.Any()).Return( + &featureproto.SegmentUsers{ + SegmentId: "segment-id", + Users: []*featureproto.SegmentUser{ + { + SegmentId: "segment-id", + UserId: "user-id-1", + State: featureproto.SegmentUser_INCLUDED, + Deleted: false, + }, + { + SegmentId: "segment-id", + UserId: "user-id-2", + State: featureproto.SegmentUser_INCLUDED, + Deleted: false, + }, + }, + }, nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + }, + input: httptest.NewRequest( + "POST", + dummyURL, + renderBody( + t, + getEvaluationsRequest{ + Tag: "test", + User: &userproto.User{Id: "id-0"}, + }, + ), + ), + expected: &getEvaluationsResponse{ + Evaluations: &featureproto.UserEvaluations{ + Evaluations: []*featureproto.Evaluation{ + { + VariationId: "variation-b", + Reason: &featureproto.Reason{ + Type: featureproto.Reason_DEFAULT, + }, + }, + }, + }, + }, + expectedErr: nil, + }, + "state: full, evaluate features list segment from storage": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: []*featureproto.Feature{ + { + Variations: []*featureproto.Variation{ + { + Id: "variation-a", + Value: "true", + }, + { + Id: "variation-b", + Value: "false", + }, + }, + Rules: []*featureproto.Rule{ + { + Id: "rule-1", + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-b", + }, + }, + Clauses: []*featureproto.Clause{ + { + Id: "clause-1", + Attribute: "name", + Operator: featureproto.Clause_SEGMENT, + Values: []string{ + "id-0", + }, + }, + }, + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-b", + }, + }, + Tags: []string{"test"}, + }, + }, + }, nil) + gs.segmentUsersCache.(*cachev3mock.MockSegmentUsersCache).EXPECT().Get(gomock.Any(), gomock.Any()).Return( + nil, errors.New("random error")) + gs.segmentUsersCache.(*cachev3mock.MockSegmentUsersCache).EXPECT().Put(gomock.Any(), gomock.Any()).Return(nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.featureClient.(*featureclientmock.MockClient).EXPECT().ListSegmentUsers(gomock.Any(), gomock.Any()).Return( + &featureproto.ListSegmentUsersResponse{}, nil) + }, + input: httptest.NewRequest( + "POST", + dummyURL, + renderBody( + t, + getEvaluationsRequest{ + Tag: "test", + User: &userproto.User{Id: "id-0"}, + }, + ), + ), + expected: &getEvaluationsResponse{ + Evaluations: &featureproto.UserEvaluations{ + Evaluations: []*featureproto.Evaluation{ + { + VariationId: "variation-b", + Reason: &featureproto.Reason{ + Type: featureproto.Reason_DEFAULT, + }, + }, + }, + }, + }, + expectedErr: nil, + }, + "state: full, evaluate features": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: []*featureproto.Feature{ + { + Variations: []*featureproto.Variation{ + { + Id: "variation-a", + Value: "true", + }, + { + Id: "variation-b", + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-b", + }, + }, + Tags: []string{"test"}, + }, + }, + }, nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + }, + input: httptest.NewRequest( + "POST", + dummyURL, + renderBody( + t, + getEvaluationsRequest{ + Tag: "test", + User: &userproto.User{Id: "id-0"}, + }, + ), + ), + expected: &getEvaluationsResponse{ + Evaluations: &featureproto.UserEvaluations{ + Evaluations: []*featureproto.Evaluation{ + { + VariationId: "variation-b", + Reason: &featureproto.Reason{ + Type: featureproto.Reason_DEFAULT, + }, + }, + }, + }, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + gs := newGatewayServiceWithMock(t, mockController) + if p.setup != nil { + p.setup(gs) + } + actual := httptest.NewRecorder() + p.input.Header.Add(authorizationKey, "test-key") + gs.getEvaluations(actual, p.input) + if actual.Code != http.StatusOK { + assert.Equal(t, newErrResponse(t, p.expectedErr), actual.Body.String(), "%s", msg) + continue + } + var respBody getEvaluationsResponse + decoded := decodeSuccessResponse(t, actual.Body) + err := json.Unmarshal(decoded, &respBody) + assert.NoError(t, err) + assert.Equal(t, len(p.expected.Evaluations.Evaluations), 1, "%s", msg) + assert.Equal(t, p.expected.Evaluations.Evaluations[0].VariationId, "variation-b", "%s", msg) + assert.Equal(t, p.expected.Evaluations.Evaluations[0].Reason, respBody.Evaluations.Evaluations[0].Reason, msg) + assert.NotEmpty(t, respBody.UserEvaluationsID, "%s", msg) + } +} + +func TestGetEvaluation(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*gatewayService) + input *http.Request + expectedFeatureID string + expectedErr error + }{ + "errFeatureNotFound": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: []*featureproto.Feature{ + { + Id: "feature-id-1", + Variations: []*featureproto.Variation{ + { + Id: "variation-a", + Value: "true", + }, + { + Id: "variation-b", + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-b", + }, + }, + Tags: []string{"test"}, + }, + { + Id: "feature-id-2", + Variations: []*featureproto.Variation{ + { + Id: "variation-c", + Value: "true", + }, + { + Id: "variation-d", + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-d", + }, + }, + Tags: []string{"test"}, + }, + }, + }, nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + }, + input: httptest.NewRequest( + "POST", + dummyURL, + renderBody( + t, + getEvaluationRequest{ + Tag: "test", + User: &userproto.User{Id: "id-0"}, + FeatureID: "feature-id-3", + }, + ), + ), + expectedFeatureID: "", + expectedErr: errFeatureNotFound, + }, + "errInternal": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: []*featureproto.Feature{ + { + Id: "feature-id-1", + Variations: []*featureproto.Variation{ + { + Id: "variation-a", + Value: "true", + }, + { + Id: "variation-b", + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-b", + }, + }, + Tags: []string{"test"}, + }, + { + Id: "feature-id-2", + Variations: []*featureproto.Variation{ + { + Id: "variation-c", + Value: "true", + }, + { + Id: "variation-d", + Value: "false", + }, + }, + Rules: []*featureproto.Rule{ + { + Id: "rule-1", + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-b", + }, + }, + Clauses: []*featureproto.Clause{ + { + Id: "clause-1", + Attribute: "name", + Operator: featureproto.Clause_SEGMENT, + Values: []string{ + "id-0", + }, + }, + }, + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-d", + }, + }, + Tags: []string{"test"}, + }, + }, + }, nil) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.segmentUsersCache.(*cachev3mock.MockSegmentUsersCache).EXPECT().Get(gomock.Any(), gomock.Any()).Return( + nil, errors.New("random error")) + gs.featureClient.(*featureclientmock.MockClient).EXPECT().ListSegmentUsers(gomock.Any(), gomock.Any()).Return( + nil, ErrInternal) + }, + input: httptest.NewRequest( + "POST", + dummyURL, + renderBody( + t, + getEvaluationRequest{ + Tag: "test", + User: &userproto.User{Id: "id-0"}, + FeatureID: "feature-id-2", + }, + ), + ), + expectedFeatureID: "", + expectedErr: errInternal, + }, + "error while trying to upsert the user evaluation": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: []*featureproto.Feature{ + { + Id: "feature-id-1", + Variations: []*featureproto.Variation{ + { + Id: "variation-a", + Value: "true", + }, + { + Id: "variation-b", + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-b", + }, + }, + Tags: []string{"test"}, + }, + { + Id: "feature-id-2", + Variations: []*featureproto.Variation{ + { + Id: "variation-a", + Value: "true", + }, + { + Id: "variation-b", + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-b", + }, + }, + Tags: []string{"test"}, + }, + }, + }, nil) + gs.userEvaluationStorage.(*ftsmock.MockUserEvaluationsStorage).EXPECT().UpsertUserEvaluation( + gomock.Any(), + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(errors.New("storage: internal")).MaxTimes(1) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + }, + input: httptest.NewRequest( + "POST", + dummyURL, + renderBody( + t, + getEvaluationRequest{ + Tag: "test", + User: &userproto.User{Id: "id-0"}, + FeatureID: "feature-id-2", + }, + ), + ), + expectedFeatureID: "", + expectedErr: errInternal, + }, + "return evaluation": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.featuresCache.(*cachev3mock.MockFeaturesCache).EXPECT().Get(gomock.Any()).Return( + &featureproto.Features{ + Features: []*featureproto.Feature{ + { + Id: "feature-id-1", + Variations: []*featureproto.Variation{ + { + Id: "variation-a", + Value: "true", + }, + { + Id: "variation-b", + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-b", + }, + }, + Tags: []string{"test"}, + }, + { + Id: "feature-id-2", + Variations: []*featureproto.Variation{ + { + Id: "variation-a", + Value: "true", + }, + { + Id: "variation-b", + Value: "false", + }, + }, + DefaultStrategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: "variation-b", + }, + }, + Tags: []string{"test"}, + }, + }, + }, nil) + gs.userEvaluationStorage.(*ftsmock.MockUserEvaluationsStorage).EXPECT().UpsertUserEvaluation( + gomock.Any(), + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(nil).MaxTimes(1) + gs.userPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + }, + input: httptest.NewRequest( + "POST", + dummyURL, + renderBody( + t, + getEvaluationRequest{ + Tag: "test", + User: &userproto.User{Id: "id-0"}, + FeatureID: "feature-id-2", + }, + ), + ), + expectedFeatureID: "feature-id-2", + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + gs := newGatewayServiceWithMock(t, mockController) + if p.setup != nil { + p.setup(gs) + } + actual := httptest.NewRecorder() + p.input.Header.Add(authorizationKey, "test-key") + gs.getEvaluation(actual, p.input) + if actual.Code != http.StatusOK { + assert.Equal(t, newErrResponse(t, p.expectedErr), actual.Body.String(), "%s", msg) + return + } + var respBody getEvaluationResponse + decoded := decodeSuccessResponse(t, actual.Body) + err := json.Unmarshal(decoded, &respBody) + assert.NoError(t, err) + assert.Equal(t, p.expectedFeatureID, respBody.Evaluation.FeatureId) + }) + } +} + +func TestRegisterEventsContextCanceled(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + cancel bool + expectedErr error + }{ + "error: context canceled": { + cancel: true, + expectedErr: errContextCanceled, + }, + "error: missing API key": { + cancel: false, + expectedErr: errMissingAPIKey, + }, + } + for msg, p := range patterns { + gs := newGatewayServiceWithMock(t, mockController) + req := httptest.NewRequest( + "POST", + dummyURL, + nil, + ) + ctx, cancel := context.WithCancel(req.Context()) + if p.cancel { + cancel() + } else { + defer cancel() + } + actual := httptest.NewRecorder() + gs.registerEvents( + actual, + req.WithContext(ctx), + ) + assert.Equal(t, newErrResponse(t, p.expectedErr), actual.Body.String(), "%s", msg) + } +} + +func TestRegisterEvents(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + bGoalEvent, err := protojson.Marshal(&eventproto.GoalEvent{Timestamp: time.Now().Unix()}) + if err != nil { + t.Fatal("could not serialize goal event") + } + bGoalBatchEvent, err := protojson.Marshal(&eventproto.GoalBatchEvent{ + UserId: "0efe416e-2fd2-4996-b5c3-194f05444f1f", + UserGoalEventsOverTags: []*eventproto.UserGoalEventsOverTag{ + { + Tag: "tag", + }, + }, + }) + if err != nil { + t.Fatal("could not serialize goal batch event") + } + bEvaluationEvent, err := protojson.Marshal(&eventproto.EvaluationEvent{Timestamp: time.Now().Unix()}) + if err != nil { + t.Fatal("could not serialize evaluation event") + } + bLatencyEvent, err := json.Marshal(&getEvaluationLatencyMetricsEvent{ + Labels: map[string]string{"tag": "test", "status": "success"}, + Duration: time.Duration(1), + }) + if err != nil { + t.Fatal("could not serialize goal event") + } + bMetricsEvent, err := json.Marshal(&metricsEvent{ + Timestamp: time.Now().Unix(), + Event: json.RawMessage(string(bLatencyEvent)), + Type: getEvaluationLatencyMetricsEventType, + }) + if err != nil { + t.Fatal("could not serialize metrics event") + } + uuid0 := newUUID(t) + uuid1 := newUUID(t) + uuid2 := newUUID(t) + uuid3 := newUUID(t) + + patterns := map[string]struct { + setup func(*gatewayService) + input *http.Request + expected *registerEventsResponse + expectedErr error + }{ + "error: invalid http method": { + setup: nil, + input: httptest.NewRequest( + "GET", + dummyURL, + nil, + ), + expectedErr: errInvalidHttpMethod, + }, + "error: body is nil": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + }, + input: httptest.NewRequest( + "POST", + dummyURL, + nil, + ), + expectedErr: errBodyRequired, + }, + "error: zero event": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + }, + input: httptest.NewRequest( + "POST", + dummyURL, + renderBody( + t, + registerEventsRequest{}, + ), + ), + expectedErr: errMissingEvents, + }, + "error: ErrMissingEventID": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + }, + input: httptest.NewRequest( + "POST", + dummyURL, + renderBody( + t, + registerEventsRequest{ + Events: []event{ + { + ID: "", + }, + }, + }, + ), + ), + expectedErr: errMissingEventID, + }, + "error: invalid message type": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.goalPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.goalBatchPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.evaluationPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.metricsPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + }, + input: httptest.NewRequest( + "POST", + dummyURL, + renderBody( + t, + registerEventsRequest{ + Events: []event{ + { + ID: uuid0, + Event: json.RawMessage(string(bGoalEvent)), + Type: 8, + }, + }, + }, + ), + ), + expected: ®isterEventsResponse{ + Errors: map[string]*registerEventsResponseError{ + uuid0: { + Retriable: false, + Message: errInvalidType.Error(), + }, + }, + }, + expectedErr: nil, + }, + "error while trying to upsert the user evaluation": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.goalPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.goalBatchPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.evaluationPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.metricsPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.userEvaluationStorage.(*ftsmock.MockUserEvaluationsStorage).EXPECT().UpsertUserEvaluation( + gomock.Any(), + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(errors.New("storage: internal")).MaxTimes(1) + }, + input: httptest.NewRequest( + "POST", + dummyURL, + renderBody( + t, + registerEventsRequest{ + Events: []event{ + { + ID: uuid0, + Event: json.RawMessage(bGoalEvent), + Type: goalEventType, + }, + { + ID: uuid1, + Event: json.RawMessage(bEvaluationEvent), + Type: evaluationEventType, + }, + { + ID: uuid2, + Event: json.RawMessage(bMetricsEvent), + Type: metricsEventType, + }, + { + ID: uuid3, + Event: json.RawMessage(bGoalBatchEvent), + Type: goalBatchEventType, + }, + }, + }, + ), + ), + expected: ®isterEventsResponse{ + Errors: map[string]*registerEventsResponseError{ + uuid1: { + Retriable: true, + Message: "Failed to upsert user evaluation", + }, + }, + }, + expectedErr: nil, + }, + "success": { + setup: func(gs *gatewayService) { + gs.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + gs.goalPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.goalBatchPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.evaluationPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.metricsPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti(gomock.Any(), gomock.Any()).Return( + nil).MaxTimes(1) + gs.userEvaluationStorage.(*ftsmock.MockUserEvaluationsStorage).EXPECT().UpsertUserEvaluation( + gomock.Any(), + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(nil).MaxTimes(1) + }, + input: httptest.NewRequest( + "POST", + dummyURL, + renderBody( + t, + registerEventsRequest{ + Events: []event{ + { + ID: uuid0, + Event: json.RawMessage(bGoalEvent), + Type: goalEventType, + }, + { + ID: uuid1, + Event: json.RawMessage(bEvaluationEvent), + Type: evaluationEventType, + }, + { + ID: uuid2, + Event: json.RawMessage(bMetricsEvent), + Type: metricsEventType, + }, + { + ID: uuid3, + Event: json.RawMessage(bGoalBatchEvent), + Type: goalBatchEventType, + }, + }, + }, + ), + ), + expected: ®isterEventsResponse{Errors: map[string]*registerEventsResponseError(nil)}, + expectedErr: nil, + }, + } + for msg, p := range patterns { + gs := newGatewayServiceWithMock(t, mockController) + if p.setup != nil { + p.setup(gs) + } + actual := httptest.NewRecorder() + p.input.Header.Add("authorization", "test-key") + gs.registerEvents(actual, p.input) + if actual.Code != http.StatusOK { + assert.Equal(t, newErrResponse(t, p.expectedErr), actual.Body.String(), "%s", msg) + continue + } + var respBody registerEventsResponse + decoded := decodeSuccessResponse(t, actual.Body) + err := json.Unmarshal(decoded, &respBody) + assert.NoError(t, err) + assert.Equal(t, p.expected, &respBody, msg) + } +} + +func TestConvToEvaluation(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + tag := "tag" + evaluationEventWithTag := &eventproto.EvaluationEvent{ + FeatureId: "feature-id", + FeatureVersion: 2, + UserId: "user-id", + VariationId: "variation-id", + User: &userproto.User{Id: "user-id"}, + Reason: &featureproto.Reason{ + Type: featureproto.Reason_DEFAULT, + }, + Tag: tag, + Timestamp: time.Now().Unix(), + } + evaluationEventWithoutTag := &eventproto.EvaluationEvent{ + FeatureId: "feature-id", + FeatureVersion: 2, + UserId: "user-id", + VariationId: "variation-id", + User: &userproto.User{Id: "user-id"}, + Reason: &featureproto.Reason{ + Type: featureproto.Reason_DEFAULT, + }, + Timestamp: time.Now().Unix(), + } + patterns := []struct { + desc string + input *eventproto.EvaluationEvent + expected *featureproto.Evaluation + expectedTag string + expectedErr error + }{ + { + desc: "success without tag", + input: evaluationEventWithoutTag, + expected: &featureproto.Evaluation{ + Id: featuredomain.EvaluationID( + evaluationEventWithoutTag.FeatureId, + evaluationEventWithoutTag.FeatureVersion, + evaluationEventWithoutTag.UserId, + ), + FeatureId: evaluationEventWithoutTag.FeatureId, + FeatureVersion: evaluationEventWithoutTag.FeatureVersion, + UserId: evaluationEventWithoutTag.UserId, + VariationId: evaluationEventWithoutTag.VariationId, + Reason: evaluationEventWithoutTag.Reason, + }, + expectedTag: "none", + expectedErr: nil, + }, + { + desc: "success with tag", + input: evaluationEventWithTag, + expected: &featureproto.Evaluation{ + Id: featuredomain.EvaluationID( + evaluationEventWithTag.FeatureId, + evaluationEventWithTag.FeatureVersion, + evaluationEventWithTag.UserId, + ), + FeatureId: evaluationEventWithTag.FeatureId, + FeatureVersion: evaluationEventWithTag.FeatureVersion, + UserId: evaluationEventWithTag.UserId, + VariationId: evaluationEventWithTag.VariationId, + Reason: evaluationEventWithTag.Reason, + }, + expectedTag: tag, + expectedErr: nil, + }, + } + for _, p := range patterns { + gs := newGatewayServiceWithMock(t, mockController) + ev, tag, err := gs.convToEvaluation(context.Background(), p.input) + assert.True(t, proto.Equal(p.expected, ev), p.desc) + assert.Equal(t, p.expectedTag, tag, p.desc) + assert.Equal(t, p.expectedErr, err, p.desc) + } +} + +func TestContainsInvalidTimestampError(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + errs map[string]*registerEventsResponseError + expected bool + }{ + "error: invalid timestamp": { + errs: map[string]*registerEventsResponseError{ + "id-test": { + Retriable: false, + Message: errInvalidTimestamp.Error(), + }, + }, + expected: true, + }, + "error: unmarshal failed": { + errs: map[string]*registerEventsResponseError{ + "id-test": { + Retriable: true, + Message: errUnmarshalFailed.Error(), + }, + }, + expected: false, + }, + "error: empty": { + errs: make(map[string]*registerEventsResponseError), + expected: false, + }, + } + for msg, p := range patterns { + gs := newGatewayServiceWithMock(t, mockController) + actual := gs.containsInvalidTimestampError(p.errs) + assert.Equal(t, p.expected, actual, "%s", msg) + } +} + +func TestGetMetricsEvent(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + bLatencyEvent, err := json.Marshal(&getEvaluationLatencyMetricsEvent{ + Labels: map[string]string{"tag": "test", "status": "success"}, + Duration: time.Duration(1), + }) + if err != nil { + t.Fatal("could not serialize goal event") + } + ctx := context.TODO() + patterns := map[string]struct { + input metricsEvent + expected *eventproto.MetricsEvent + expectedErr error + }{ + "error: invalid message type": { + input: metricsEvent{ + Timestamp: time.Now().Unix(), + Event: json.RawMessage(string(bLatencyEvent)), + Type: 0, + }, + expectedErr: errInvalidType, + }, + "error: failed to unmarshal": { + input: metricsEvent{ + Timestamp: time.Now().Unix(), + Event: json.RawMessage(string(bLatencyEvent)), + Type: getEvaluationSizeMetricsEventType, + }, + expectedErr: errUnmarshalFailed, + }, + "success": { + input: metricsEvent{ + Timestamp: time.Now().Unix(), + Event: json.RawMessage(string(bLatencyEvent)), + Type: getEvaluationLatencyMetricsEventType, + }, + expected: &eventproto.MetricsEvent{ + Timestamp: time.Now().Unix(), + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + gs := newGatewayServiceWithMock(t, mockController) + bMetricsEvent, err := json.Marshal(p.input) + assert.NoError(t, err) + ev := event{ + ID: newUUID(t), + Event: json.RawMessage(bMetricsEvent), + Type: metricsEventType, + } + event, _, err := gs.getMetricsEvent(ctx, ev) + if err != nil { + assert.Equal(t, p.expectedErr, err) + return + } + assert.Equal(t, event.Timestamp, p.expected.Timestamp) + assert.NotNil(t, event.Event) + }) + } +} + +type successResponse struct { + Data json.RawMessage `json:"data"` +} + +func decodeSuccessResponse(t *testing.T, body *bytes.Buffer) json.RawMessage { + t.Helper() + var resp successResponse + err := json.NewDecoder(body).Decode(&resp) + if err != nil { + t.Fatal(err) + } + return resp.Data +} + +type failureResponse struct { + Error errorResponse `json:"error"` +} + +type errorResponse struct { + Code int `json:"code"` + Message string `json:"message"` +} + +func newErrResponse(t *testing.T, err error) string { + t.Helper() + status, _ := convertToErrStatus(err) + res := &failureResponse{ + Error: errorResponse{ + Code: status.GetStatusCode(), + Message: status.GetErrMessage(), + }, + } + encoded, err := json.Marshal(res) + if err != nil { + t.Fatal(err) + } + return string(encoded) +} + +type errStatus interface { + GetErrMessage() string + GetStatusCode() int +} + +func convertToErrStatus(err error) (errStatus, bool) { + s, ok := err.(errStatus) + if !ok { + return nil, false + } + return s, true +} + +func renderBody(t *testing.T, res interface{}) io.Reader { + t.Helper() + encoded, err := json.Marshal(res) + if err != nil { + t.Fatal(err) + } + return bytes.NewReader(encoded) +} + +func newGatewayServiceWithMock(t *testing.T, mockController *gomock.Controller) *gatewayService { + t.Helper() + logger, err := log.NewLogger() + require.NoError(t, err) + return &gatewayService{ + userEvaluationStorage: ftsmock.NewMockUserEvaluationsStorage(mockController), + featureClient: featureclientmock.NewMockClient(mockController), + accountClient: accountclientmock.NewMockClient(mockController), + goalPublisher: publishermock.NewMockPublisher(mockController), + goalBatchPublisher: publishermock.NewMockPublisher(mockController), + userPublisher: publishermock.NewMockPublisher(mockController), + metricsPublisher: publishermock.NewMockPublisher(mockController), + evaluationPublisher: publishermock.NewMockPublisher(mockController), + featuresCache: cachev3mock.NewMockFeaturesCache(mockController), + segmentUsersCache: cachev3mock.NewMockSegmentUsersCache(mockController), + environmentAPIKeyCache: cachev3mock.NewMockEnvironmentAPIKeyCache(mockController), + opts: &defaultOptions, + logger: logger, + } +} diff --git a/pkg/gateway/api/grpc_validation.go b/pkg/gateway/api/grpc_validation.go new file mode 100644 index 000000000..59dbeda27 --- /dev/null +++ b/pkg/gateway/api/grpc_validation.go @@ -0,0 +1,299 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "errors" + "time" + + "github.com/golang/protobuf/ptypes" + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/client" +) + +var ( + errEmptyTag = errors.New("gateway: tag is empty") + errEmptyUserID = errors.New("gateway: user id is empty") + errInvalidIDFormat = errors.New("gateway: invalid event id format") + errInvalidTimestamp = errors.New("gateway: invalid event timestamp") + errUnmarshalFailed = errors.New("gateway: failed to unmarshal event") +) + +type eventValidator interface { + validate(ctx context.Context) (string, error) +} + +type eventEvaluationValidator struct { + event *eventproto.Event + oldestTimestampDuration time.Duration + furthestTimestampDuration time.Duration + logger *zap.Logger +} + +type eventGoalValidator struct { + event *eventproto.Event + oldestTimestampDuration time.Duration + furthestTimestampDuration time.Duration + logger *zap.Logger +} + +type eventGoalBatchValidator struct { + event *eventproto.Event + oldestTimestampDuration time.Duration + furthestTimestampDuration time.Duration + logger *zap.Logger +} + +type eventMetricsValidator struct { + event *eventproto.Event + oldestTimestampDuration time.Duration + furthestTimestampDuration time.Duration + logger *zap.Logger +} + +func newEventValidator( + event *eventproto.Event, + oldestTimestampDuration, furthestTimestampDuration time.Duration, + logger *zap.Logger, +) eventValidator { + if ptypes.Is(event.Event, grpcGoalEvent) { + return &eventGoalValidator{ + event: event, + oldestTimestampDuration: oldestTimestampDuration, + furthestTimestampDuration: furthestTimestampDuration, + logger: logger, + } + } + if ptypes.Is(event.Event, grpcGoalBatchEvent) { + return &eventGoalBatchValidator{ + event: event, + oldestTimestampDuration: oldestTimestampDuration, + furthestTimestampDuration: furthestTimestampDuration, + logger: logger, + } + } + if ptypes.Is(event.Event, grpcEvaluationEvent) { + return &eventEvaluationValidator{ + event: event, + oldestTimestampDuration: oldestTimestampDuration, + furthestTimestampDuration: furthestTimestampDuration, + logger: logger, + } + } + if ptypes.Is(event.Event, grpcMetricsEvent) { + return &eventMetricsValidator{ + event: event, + oldestTimestampDuration: oldestTimestampDuration, + furthestTimestampDuration: furthestTimestampDuration, + logger: logger, + } + } + return nil +} + +func (v *eventGoalValidator) validate(ctx context.Context) (string, error) { + if err := uuid.ValidateUUID(v.event.Id); err != nil { + v.logger.Warn( + "Failed to validate goal event id format", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", v.event.Id), + )..., + ) + return codeInvalidID, errInvalidIDFormat + } + ev, err := v.unmarshal(ctx) + if err != nil { + return codeUnmarshalFailed, errUnmarshalFailed + } + if !validateTimestamp(ev.Timestamp, v.oldestTimestampDuration, v.furthestTimestampDuration) { + v.logger.Debug( + "Failed to validate goal event timestamp", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.String("id", v.event.Id), + zap.Int64("timestamp", ev.Timestamp), + )..., + ) + return codeInvalidTimestamp, errInvalidTimestamp + } + return "", nil +} + +func (v *eventGoalValidator) unmarshal(ctx context.Context) (*eventproto.GoalEvent, error) { + ev := &eventproto.GoalEvent{} + if err := ptypes.UnmarshalAny(v.event.Event, ev); err != nil { + v.logger.Error( + "Failed to extract goal event", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", v.event.Id), + )..., + ) + return nil, err + } + return ev, nil +} + +func (v *eventGoalBatchValidator) validate(ctx context.Context) (string, error) { + if err := uuid.ValidateUUID(v.event.Id); err != nil { + v.logger.Warn( + "Failed to validate goal batch event id format", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", v.event.Id), + )..., + ) + return codeInvalidID, errInvalidIDFormat + } + ev, err := v.unmarshal(ctx) + if err != nil { + return codeUnmarshalFailed, errUnmarshalFailed + } + if ev.UserId == "" { + v.logger.Error( + "Failed to validate goal batch event. User id is empty", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(errEmptyUserID), + zap.String("id", v.event.Id), + )..., + ) + return codeEmptyUserID, errEmptyUserID + } + for _, ugeot := range ev.UserGoalEventsOverTags { + if ugeot.Tag == "" { + v.logger.Error( + "Failed to validate goal batch event. Tag is empty", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(errEmptyTag), + zap.String("id", v.event.Id), + zap.String("userId", ev.UserId), + )..., + ) + return codeEmptyTag, errEmptyTag + } + } + return "", nil +} + +func (v *eventGoalBatchValidator) unmarshal(ctx context.Context) (*eventproto.GoalBatchEvent, error) { + ev := &eventproto.GoalBatchEvent{} + if err := ptypes.UnmarshalAny(v.event.Event, ev); err != nil { + v.logger.Error( + "Failed to extract goal batch event", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", v.event.Id), + )..., + ) + return nil, err + } + return ev, nil +} + +func (v *eventEvaluationValidator) validate(ctx context.Context) (string, error) { + if err := uuid.ValidateUUID(v.event.Id); err != nil { + v.logger.Warn( + "Failed to validate evaluation event id format", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", v.event.Id), + )..., + ) + return codeInvalidID, errInvalidIDFormat + } + ev, err := v.unmarshal(ctx) + if err != nil { + return codeUnmarshalFailed, errUnmarshalFailed + } + if !validateTimestamp(ev.Timestamp, v.oldestTimestampDuration, v.furthestTimestampDuration) { + v.logger.Debug( + "Failed to validate evaluation event timestamp", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.String("id", v.event.Id), + zap.Int64("timestamp", ev.Timestamp), + )..., + ) + return codeInvalidTimestamp, errInvalidTimestamp + } + return "", nil +} + +func (v *eventEvaluationValidator) unmarshal(ctx context.Context) (*eventproto.EvaluationEvent, error) { + ev := &eventproto.EvaluationEvent{} + if err := ptypes.UnmarshalAny(v.event.Event, ev); err != nil { + v.logger.Error( + "Failed to extract evaluation event", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", v.event.Id), + )..., + ) + return nil, err + } + return ev, nil +} + +// For metrics events we don't need to validate the timestamp +func (v *eventMetricsValidator) validate(ctx context.Context) (string, error) { + if err := uuid.ValidateUUID(v.event.Id); err != nil { + v.logger.Warn( + "Failed to validate metrics event id format", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", v.event.Id), + )..., + ) + return codeInvalidID, errInvalidIDFormat + } + _, err := v.unmarshal(ctx) + if err != nil { + return codeUnmarshalFailed, errUnmarshalFailed + } + return "", nil +} + +func (v *eventMetricsValidator) unmarshal(ctx context.Context) (*eventproto.MetricsEvent, error) { + ev := &eventproto.MetricsEvent{} + if err := ptypes.UnmarshalAny(v.event.Event, ev); err != nil { + v.logger.Error( + "Failed to extract metrics event", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", v.event.Id), + )..., + ) + return nil, err + } + return ev, nil +} + +// validateTimestamp limits date range of the given timestamp +// because we can't stream data outside the allowed bounds into a persistent datastore. +func validateTimestamp( + timestamp int64, + oldestTimestampDuration, furthestTimestampDuration time.Duration, +) bool { + given := time.Unix(timestamp, 0) + maxPast := time.Now().Add(-oldestTimestampDuration) + if given.Before(maxPast) { + return false + } + maxFuture := time.Now().Add(furthestTimestampDuration) + return !given.After(maxFuture) +} diff --git a/pkg/gateway/api/metrics.go b/pkg/gateway/api/metrics.go new file mode 100644 index 000000000..049474248 --- /dev/null +++ b/pkg/gateway/api/metrics.go @@ -0,0 +1,110 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +const ( + callerGatewayService = "GatewayService" + callerTrackHandler = "TrackHandler" + + typeFeatures = "Features" + typeSegmentUsers = "SegmentUsers" + typeAPIKey = "APIKey" + typeRegisterEvent = "RegisterEvent" + typeEvaluation = "Evaluation" + typeGoal = "Goal" + typeGoalBatch = "GoalBatch" + typeMetrics = "Metrics" + typeUnknown = "Unknown" + typeHTTPTrack = "HTTPTrack" + + cacheLayerExternal = "External" + + codeHit = "Hit" + codeMiss = "Miss" + + codeOK = "OK" + codeInvalidID = "InvalidID" + codeInvalidTimestamp = "InvalidTimestamp" + codeEmptyTag = "EmptyTag" + codeEmptyUserID = "EmptyUserID" + codeInvalidTimestampRequest = "InvalidTimestampRequest" + codeUpsertUserEvaluationFailed = "UpsertUserEvaluationFailed" + codeUnmarshalFailed = "UnmarshalFailed" + codeMarshalAnyFailed = "MarshalAnyFailed" + codeEvaluationConversionFailed = "EvaluationConversionFailed" + codeInvalidType = "InvalidType" + codeNonRepeatableError = "NonRepeatableError" + codeRepeatableError = "RepeatableError" + codeInvalidURLParams = "InvalidURLParams" +) + +var ( + registerOnce sync.Once + + /* TODO: After deleting "gateway" service, we need to do the following things: + 1. Rename cacheCounter to grpccacheCounter + 2. Rename api_cache_requests_total to api_grpc_cache_requests_total + 3. Rename api_register_events_total to api_grpc_register_events_total + 4. Rename restCacheCounter to cacheCounter + 5. Rename api_rest_cache_requests_total to api_cache_requests_total + 6. Rename api_rest_register_events_total to api_register_events_total + */ + + cacheCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "gateway", + Name: "api_cache_requests_total", + Help: "Total number of cache requests", + }, []string{"caller", "type", "layer", "code"}) + + eventCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "gateway", + Name: "api_register_events_total", + Help: "Total number of registered events", + }, []string{"caller", "type", "code"}) + + restCacheCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "gateway", + Name: "api_rest_cache_requests_total", + Help: "Total number of cache requests", + }, []string{"caller", "type", "layer", "code"}) + + restEventCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "gateway", + Name: "api_rest_register_events_total", + Help: "Total number of registered events", + }, []string{"caller", "type", "code"}) +) + +func registerMetrics(r metrics.Registerer) { + registerOnce.Do(func() { + r.MustRegister(cacheCounter, eventCounter) + }) +} diff --git a/pkg/gateway/api/trackhandler.go b/pkg/gateway/api/trackhandler.go new file mode 100644 index 000000000..9555dcb6c --- /dev/null +++ b/pkg/gateway/api/trackhandler.go @@ -0,0 +1,275 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "errors" + "net/http" + "strconv" + + "github.com/golang/protobuf/ptypes" + "go.uber.org/zap" + "golang.org/x/sync/singleflight" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + "github.com/bucketeer-io/bucketeer/pkg/cache" + cachev3 "github.com/bucketeer-io/bucketeer/pkg/cache/v3" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/client" +) + +const ( + urlParamKeyAPIKey = "apikey" + urlParamKeyUserID = "userid" + urlParamKeyGoalID = "goalid" + urlParamKeyTag = "tag" + urlParamKeyTimestamp = "timestamp" + urlParamKeyValue = "value" +) + +var ( + errAPIKeyEmpty = errors.New("gateway: api key is empty") + errUserIDEmpty = errors.New("gateway: user id is empty") + errGoalIDEmpty = errors.New("gateway: goal id is empty") + errTagEmpty = errors.New("gateway: tag is empty") + errTimestampEmpty = errors.New("gateway: timestamp is empty") + errTimestampInvalid = errors.New("gateway: timestamp is invalid") + errValueInvalid = errors.New("gateway: value is invalid") +) + +type TrackHandler struct { + accountClient accountclient.Client + goalBatchPublisher publisher.Publisher + environmentAPIKeyCache cachev3.EnvironmentAPIKeyCache + flightgroup singleflight.Group + opts *options + logger *zap.Logger +} + +func NewTrackHandler( + accountClient accountclient.Client, + gbp publisher.Publisher, + v3Cache cache.MultiGetCache, + opts ...Option) *TrackHandler { + + options := defaultOptions + for _, opt := range opts { + opt(&options) + } + if options.metrics != nil { + registerMetrics(options.metrics) + } + return &TrackHandler{ + accountClient: accountClient, + goalBatchPublisher: gbp, + environmentAPIKeyCache: cachev3.NewEnvironmentAPIKeyCache(v3Cache), + opts: &options, + logger: options.logger.Named("trackhandler"), + } +} + +func (h *TrackHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + ctx := req.Context() + if isContextCanceled(ctx) { + h.logger.Warn( + "Request was canceled", + log.FieldsFromImcomingContext(ctx)..., + ) + resp.WriteHeader(http.StatusBadRequest) + return + } + params, err := h.validateParams(req) + if err != nil { + h.logger.Warn( + "Invalid url parameters", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + resp.WriteHeader(http.StatusBadRequest) + return + } + envAPIKey, err := h.getEnvironmentAPIKey(ctx, params.apiKey) + if err != nil { + h.logger.Error( + "Failed to get environment api key", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", envAPIKey.EnvironmentNamespace), + zap.String("apiKey", params.apiKey), + )..., + ) + eventCounter.WithLabelValues(callerTrackHandler, typeHTTPTrack, codeNonRepeatableError).Inc() + if err == ErrInvalidAPIKey { + resp.WriteHeader(http.StatusForbidden) + return + } + resp.WriteHeader(http.StatusInternalServerError) + return + } + if err := checkEnvironmentAPIKey(envAPIKey, accountproto.APIKey_SDK); err != nil { + eventCounter.WithLabelValues(callerTrackHandler, typeHTTPTrack, codeNonRepeatableError).Inc() + resp.WriteHeader(http.StatusForbidden) + return + } + goalBatchEvent, err := h.createGoalBatchEvent(envAPIKey.EnvironmentNamespace, params) + if err != nil { + h.logger.Error( + "Failed to create goal batch event", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", envAPIKey.EnvironmentNamespace), + zap.String("apiKey", params.apiKey), + zap.String("userId", params.userID), + zap.String("goalId", params.goalID), + zap.Int64("timestamp", params.timestamp), + zap.Float64("value", params.value), + zap.String("tag", params.tag), + )..., + ) + eventCounter.WithLabelValues(callerTrackHandler, typeHTTPTrack, codeNonRepeatableError).Inc() + resp.WriteHeader(http.StatusInternalServerError) + return + } + if err := h.goalBatchPublisher.Publish(ctx, goalBatchEvent); err != nil { + eventCounter.WithLabelValues(callerTrackHandler, typeHTTPTrack, codeNonRepeatableError).Inc() + h.logger.Error( + "Failed to publish event", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", envAPIKey.EnvironmentNamespace), + )..., + ) + resp.WriteHeader(http.StatusInternalServerError) + return + } + eventCounter.WithLabelValues(callerTrackHandler, typeHTTPTrack, codeOK).Inc() + resp.WriteHeader(http.StatusOK) +} + +type params struct { + apiKey string + userID string + goalID string + tag string + timestamp int64 + value float64 +} + +func (h *TrackHandler) validateParams(req *http.Request) (*params, error) { + params := ¶ms{} + q := req.URL.Query() + apikey := q.Get(urlParamKeyAPIKey) + if apikey == "" { + eventCounter.WithLabelValues(callerTrackHandler, typeHTTPTrack, codeInvalidURLParams).Inc() + return nil, errAPIKeyEmpty + } + params.apiKey = apikey + userID := q.Get(urlParamKeyUserID) + if userID == "" { + eventCounter.WithLabelValues(callerTrackHandler, typeHTTPTrack, codeInvalidURLParams).Inc() + return nil, errUserIDEmpty + } + params.userID = userID + goalID := q.Get(urlParamKeyGoalID) + if goalID == "" { + eventCounter.WithLabelValues(callerTrackHandler, typeHTTPTrack, codeInvalidURLParams).Inc() + return nil, errGoalIDEmpty + } + params.goalID = goalID + tag := q.Get(urlParamKeyTag) + if tag == "" { + eventCounter.WithLabelValues(callerTrackHandler, typeHTTPTrack, codeInvalidURLParams).Inc() + return nil, errTagEmpty + } + params.tag = tag + timestampStr := q.Get(urlParamKeyTimestamp) + if timestampStr == "" { + eventCounter.WithLabelValues(callerTrackHandler, typeHTTPTrack, codeInvalidURLParams).Inc() + return nil, errTimestampEmpty + } + timestamp, err := strconv.ParseInt(timestampStr, 10, 64) + if err != nil { + eventCounter.WithLabelValues(callerTrackHandler, typeHTTPTrack, codeInvalidURLParams).Inc() + return nil, errTimestampInvalid + } + if !validateTimestamp(timestamp, h.opts.oldestEventTimestamp, h.opts.furthestEventTimestamp) { + eventCounter.WithLabelValues(callerTrackHandler, typeHTTPTrack, codeInvalidTimestamp).Inc() + return nil, errTimestampInvalid + } + params.timestamp = timestamp + valueStr := q.Get(urlParamKeyValue) + if valueStr != "" { + value, err := strconv.ParseFloat(valueStr, 64) + if err != nil { + eventCounter.WithLabelValues(callerTrackHandler, typeHTTPTrack, codeInvalidURLParams).Inc() + return nil, errValueInvalid + } + params.value = value + } + return params, nil +} + +func (h *TrackHandler) createGoalBatchEvent(environmentNamespace string, params *params) (*eventproto.Event, error) { + goalBatchEvent := &eventproto.GoalBatchEvent{ + UserId: params.userID, + UserGoalEventsOverTags: []*eventproto.UserGoalEventsOverTag{{ + Tag: params.tag, + UserGoalEvents: []*eventproto.UserGoalEvent{{ + Timestamp: params.timestamp, + GoalId: params.goalID, + Value: params.value, + }}, + }}, + } + any, err := ptypes.MarshalAny(goalBatchEvent) + if err != nil { + return nil, err + } + id, err := uuid.NewUUID() + if err != nil { + return nil, err + } + return &eventproto.Event{ + Id: id.String(), + Event: any, + EnvironmentNamespace: environmentNamespace, + }, nil +} + +func (h *TrackHandler) getEnvironmentAPIKey(ctx context.Context, id string) (*accountproto.EnvironmentAPIKey, error) { + k, err, _ := h.flightgroup.Do( + environmentAPIKeyFlightID(id), + func() (interface{}, error) { + return getEnvironmentAPIKey( + ctx, + id, + h.accountClient, + h.environmentAPIKeyCache, + callerTrackHandler, + h.logger, + ) + }, + ) + if err != nil { + return nil, err + } + envAPIKey := k.(*accountproto.EnvironmentAPIKey) + return envAPIKey, nil +} diff --git a/pkg/gateway/api/trackhandler_test.go b/pkg/gateway/api/trackhandler_test.go new file mode 100644 index 000000000..1f6c3849a --- /dev/null +++ b/pkg/gateway/api/trackhandler_test.go @@ -0,0 +1,243 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "errors" + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + accountclientmock "github.com/bucketeer-io/bucketeer/pkg/account/client/mock" + cachev3mock "github.com/bucketeer-io/bucketeer/pkg/cache/v3/mock" + "github.com/bucketeer-io/bucketeer/pkg/log" + publishermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher/mock" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" +) + +func TestNewTrackHandler(t *testing.T) { + t.Parallel() + h := NewTrackHandler(nil, nil, nil) + assert.IsType(t, &TrackHandler{}, h) +} + +func TestServeHTTP(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + now := time.Now() + + patterns := map[string]struct { + setup func(*testing.T, *TrackHandler) + input *http.Request + expected int + }{ + "fail: bad params": { + input: httptest.NewRequest("GET", + "/track?apikey=akey&userid=uid&goalid=gid&tag=t×tamp=abc", + nil), + expected: http.StatusBadRequest, + }, + "fail: publish error": { + setup: func(t *testing.T, h *TrackHandler) { + h.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + h.goalBatchPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return(errors.New("internal")).MaxTimes(1) + }, + input: httptest.NewRequest("GET", + fmt.Sprintf("/track?apikey=akey&userid=uid&goalid=gid&tag=t×tamp=%d", now.Unix()), + nil), + expected: http.StatusInternalServerError, + }, + "success: without value": { + setup: func(t *testing.T, h *TrackHandler) { + h.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + h.goalBatchPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil).MaxTimes(1) + }, + input: httptest.NewRequest("GET", + fmt.Sprintf("/track?apikey=akey&userid=uid&goalid=gid&tag=t×tamp=%d", now.Unix()), + nil), + expected: http.StatusOK, + }, + "success: with value": { + setup: func(t *testing.T, h *TrackHandler) { + h.environmentAPIKeyCache.(*cachev3mock.MockEnvironmentAPIKeyCache).EXPECT().Get(gomock.Any()).Return( + &accountproto.EnvironmentAPIKey{ + EnvironmentNamespace: "ns0", + ApiKey: &accountproto.APIKey{ + Id: "id-0", + Role: accountproto.APIKey_SDK, + Disabled: false, + }, + }, nil) + h.goalBatchPublisher.(*publishermock.MockPublisher).EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil).MaxTimes(1) + }, + input: httptest.NewRequest("GET", + fmt.Sprintf("/track?apikey=akey&userid=uid&goalid=gid&tag=t×tamp=%d&value=1.234", now.Unix()), + nil), + expected: http.StatusOK, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + h := newTrackHandlerWithMock(t, mockController) + if p.setup != nil { + p.setup(t, h) + } + actual := httptest.NewRecorder() + h.ServeHTTP(actual, p.input) + assert.Equal(t, p.expected, actual.Code) + }) + } +} + +func TestValidateParams(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + now := time.Now() + + patterns := map[string]struct { + input *http.Request + expected *params + expectedErr error + }{ + "err: errAPIKeyEmpty": { + input: httptest.NewRequest("GET", + fmt.Sprintf("/track?userid=uid&goalid=gid&tag=t×tamp=%d&value=1.234", now.Unix()), + nil), + expected: nil, + expectedErr: errAPIKeyEmpty, + }, + "err: errUserIDEmpty": { + input: httptest.NewRequest("GET", + fmt.Sprintf("/track?apikey=akey&goalid=gid&tag=t×tamp=%d&value=1.234", now.Unix()), + nil), + expected: nil, + expectedErr: errUserIDEmpty, + }, + "err: errGoalIDEmpty": { + input: httptest.NewRequest("GET", + fmt.Sprintf("/track?apikey=akey&userid=uid&tag=t×tamp=%d&value=1.234", now.Unix()), + nil), + expected: nil, + expectedErr: errGoalIDEmpty, + }, + "err: errTagEmpty": { + input: httptest.NewRequest("GET", + fmt.Sprintf("/track?apikey=akey&userid=uid&goalid=gid×tamp=%d&value=1.234", now.Unix()), + nil), + expected: nil, + expectedErr: errTagEmpty, + }, + "err: errTimestampEmpty": { + input: httptest.NewRequest("GET", + "/track?apikey=akey&userid=uid&goalid=gid&tag=t&value=1.234", + nil), + expected: nil, + expectedErr: errTimestampEmpty, + }, + "err: errTimestampInvalid": { + input: httptest.NewRequest("GET", + "/track?apikey=akey&userid=uid&goalid=gid&tag=t×tamp=abc&value=1.234", + nil), + expected: nil, + expectedErr: errTimestampInvalid, + }, + "err: errTimestampInvalid: out of window": { + input: httptest.NewRequest("GET", + fmt.Sprintf("/track?apikey=akey&userid=uid&goalid=gid&tag=t×tamp=%d&value=1.234", now.AddDate(0, 0, 2).Unix()), + nil), + expected: nil, + expectedErr: errTimestampInvalid, + }, + "err: errValueInvalid": { + input: httptest.NewRequest("GET", + fmt.Sprintf("/track?apikey=akey&userid=uid&goalid=gid&tag=t×tamp=%d&value=abc", now.Unix()), + nil), + expected: nil, + expectedErr: errValueInvalid, + }, + "success: without value": { + input: httptest.NewRequest("GET", + fmt.Sprintf("/track?apikey=akey&userid=uid&goalid=gid&tag=t×tamp=%d", now.Unix()), + nil), + expected: ¶ms{ + apiKey: "akey", + userID: "uid", + goalID: "gid", + tag: "t", + timestamp: now.Unix(), + value: float64(0), + }, + expectedErr: nil, + }, + "success: with value": { + input: httptest.NewRequest("GET", + fmt.Sprintf("/track?apikey=akey&userid=uid&goalid=gid&tag=t×tamp=%d&value=1.234", now.Unix()), + nil), + expected: ¶ms{ + apiKey: "akey", + userID: "uid", + goalID: "gid", + tag: "t", + timestamp: now.Unix(), + value: float64(1.234), + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + h := newTrackHandlerWithMock(t, mockController) + actual, err := h.validateParams(p.input) + assert.Equal(t, p.expected, actual) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func newTrackHandlerWithMock(t *testing.T, mockController *gomock.Controller) *TrackHandler { + logger, err := log.NewLogger() + require.NoError(t, err) + return &TrackHandler{ + accountClient: accountclientmock.NewMockClient(mockController), + goalBatchPublisher: publishermock.NewMockPublisher(mockController), + environmentAPIKeyCache: cachev3mock.NewMockEnvironmentAPIKeyCache(mockController), + opts: &defaultOptions, + logger: logger, + } +} diff --git a/pkg/gateway/api/validation.go b/pkg/gateway/api/validation.go new file mode 100644 index 000000000..84fe47d13 --- /dev/null +++ b/pkg/gateway/api/validation.go @@ -0,0 +1,129 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/client" +) + +func (s *gatewayService) validateGoalEvent(ctx context.Context, id string, timeStamp int64) (string, error) { + if err := uuid.ValidateUUID(id); err != nil { + s.logger.Warn( + "Failed to validate goal event id format", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", id), + )..., + ) + return codeInvalidID, errInvalidIDFormat + } + if !validateTimestamp(timeStamp, s.opts.oldestEventTimestamp, s.opts.furthestEventTimestamp) { + s.logger.Debug( + "Failed to validate goal event timestamp", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.String("id", id), + zap.Int64("timestamp", timeStamp), + )..., + ) + return codeInvalidTimestamp, errInvalidTimestamp + } + return "", nil +} + +func (s *gatewayService) validateGoalBatchEvent( + ctx context.Context, + id string, + event *eventproto.GoalBatchEvent, +) (string, error) { + if err := uuid.ValidateUUID(id); err != nil { + s.logger.Warn( + "Failed to validate goal event id format", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", id), + )..., + ) + return codeInvalidID, errInvalidIDFormat + } + if event.UserId == "" { + s.logger.Error( + "Failed to validate goal batch event. User id is empty", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(errEmptyUserID), + zap.String("id", id), + )..., + ) + return codeEmptyUserID, errEmptyUserID + } + for _, ugeot := range event.UserGoalEventsOverTags { + if ugeot.Tag == "" { + s.logger.Error( + "Failed to validate goal batch event. Tag is empty", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(errEmptyTag), + zap.String("id", id), + zap.String("userId", event.UserId), + )..., + ) + return codeEmptyTag, errEmptyTag + } + } + return "", nil +} + +func (s *gatewayService) validateEvaluationEvent(ctx context.Context, id string, timeStamp int64) (string, error) { + if err := uuid.ValidateUUID(id); err != nil { + s.logger.Warn( + "Failed to validate evaluation event id format", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", id), + )..., + ) + return codeInvalidID, errInvalidIDFormat + } + if !validateTimestamp(timeStamp, s.opts.oldestEventTimestamp, s.opts.furthestEventTimestamp) { + s.logger.Debug( + "Failed to validate evaluation event timestamp", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.String("id", id), + zap.Int64("timestamp", timeStamp), + )..., + ) + return codeInvalidTimestamp, errInvalidTimestamp + } + return "", nil +} + +// For metrics events we don't need to validate the timestamp +func (s *gatewayService) validateMetricsEvent(ctx context.Context, id string) (string, error) { + if err := uuid.ValidateUUID(id); err != nil { + s.logger.Warn( + "Failed to validate evaluation event id format", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", id), + )..., + ) + return codeInvalidID, errInvalidIDFormat + } + return "", nil +} diff --git a/pkg/gateway/api/validation_test.go b/pkg/gateway/api/validation_test.go new file mode 100644 index 000000000..9a2062e0c --- /dev/null +++ b/pkg/gateway/api/validation_test.go @@ -0,0 +1,478 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/log" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/client" + "github.com/bucketeer-io/bucketeer/proto/feature" +) + +const ( + oldestTimestampDuration = 24 * time.Hour + furthestTimestampDuration = 24 * time.Hour +) + +func TestNewEventValidator(t *testing.T) { + t.Parallel() + bEvaluationEvent, err := proto.Marshal(&eventproto.EvaluationEvent{}) + if err != nil { + t.Fatal("could not serialize evaluation event") + } + bGoalEvent, err := proto.Marshal(&eventproto.GoalEvent{}) + if err != nil { + t.Fatal("could not serialize goal event") + } + bGoalBatchEvent, err := proto.Marshal(&eventproto.GoalBatchEvent{}) + if err != nil { + t.Fatal("could not serialize goal batch event") + } + bMetricsEvent, err := proto.Marshal(&eventproto.MetricsEvent{}) + if err != nil { + t.Fatal("could not serialize metrics event") + } + patterns := map[string]struct { + input *eventproto.Event + expected eventValidator + }{ + "evaluationValidator": { + input: &eventproto.Event{ + Id: newUUID(t), + Event: &any.Any{ + TypeUrl: "github.com/bucketeer-io/bucketeer/proto/event/client/bucketeer.event.client.EvaluationEvent", + Value: bEvaluationEvent, + }, + }, + expected: &eventEvaluationValidator{}, + }, + "GoalValidator": { + input: &eventproto.Event{ + Id: newUUID(t), + Event: &any.Any{ + TypeUrl: "github.com/bucketeer-io/bucketeer/proto/event/client/bucketeer.event.client.GoalEvent", + Value: bGoalEvent, + }, + }, + expected: &eventGoalValidator{}, + }, + "GoalBatchValidator": { + input: &eventproto.Event{ + Id: newUUID(t), + Event: &any.Any{ + TypeUrl: "github.com/bucketeer-io/bucketeer/proto/event/client/bucketeer.event.client.GoalBatchEvent", + Value: bGoalBatchEvent, + }, + }, + expected: &eventGoalBatchValidator{}, + }, + "MetricsEvent": { + input: &eventproto.Event{ + Id: newUUID(t), + Event: &any.Any{ + TypeUrl: "github.com/bucketeer-io/bucketeer/proto/event/client/bucketeer.event.client.MetricsEvent", + Value: bMetricsEvent, + }, + }, + expected: &eventMetricsValidator{}, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + logger, _ := log.NewLogger() + actual := newEventValidator(p.input, oldestTimestampDuration, furthestTimestampDuration, logger) + assert.IsType(t, p.expected, actual) + }) + } +} + +func TestValidateTimestamp(t *testing.T) { + testcases := []struct { + timestamp int64 + expected bool + }{ + { + timestamp: time.Now().Unix(), + expected: true, + }, + { + timestamp: time.Now().AddDate(0, 0, -2).Unix(), + expected: false, + }, + { + timestamp: time.Now().AddDate(0, 0, 2).Unix(), + expected: false, + }, + } + for i, tc := range testcases { + des := fmt.Sprintf("index %d", i) + res := validateTimestamp(tc.timestamp, oldestTimestampDuration, furthestTimestampDuration) + assert.Equal(t, tc.expected, res, des) + } +} + +func TestValidateGoalEvent(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + inputFunc func() *eventproto.Event + expected string + expectedErr error + }{ + "invalid uuid": { + inputFunc: func() *eventproto.Event { + return &eventproto.Event{ + Id: "0efe416e 2fd2 4996 c5c3 194f05444f1f", + } + }, + expected: codeInvalidID, + expectedErr: errInvalidIDFormat, + }, + "unmarshal fails": { + inputFunc: func() *eventproto.Event { + return &eventproto.Event{ + Id: "0efe416e-2fd2-4996-b5c3-194f05444f1f", + } + }, + expected: codeUnmarshalFailed, + expectedErr: errUnmarshalFailed, + }, + "invalid timestamp": { + inputFunc: func() *eventproto.Event { + bGoalEvent, err := proto.Marshal(&eventproto.GoalEvent{ + Timestamp: int64(999999999999999), + }) + if err != nil { + t.Fatal("could not serialize event") + } + return &eventproto.Event{ + Id: "0efe416e-2fd2-4996-b5c3-194f05444f1f", + Event: &any.Any{ + TypeUrl: "github.com/bucketeer-io/bucketeer/proto/event/client/bucketeer.event.client.GoalEvent", + Value: bGoalEvent, + }, + } + }, + expected: codeInvalidTimestamp, + expectedErr: errInvalidTimestamp, + }, + "success": { + inputFunc: func() *eventproto.Event { + bGoalEvent, err := proto.Marshal(&eventproto.GoalEvent{ + Timestamp: time.Now().Unix(), + Evaluations: []*feature.Evaluation{ + { + Id: "evaluation-id", + }, + }, + }) + if err != nil { + t.Fatal("could not serialize event") + } + return &eventproto.Event{ + Id: "0efe416e-2fd2-4996-b5c3-194f05444f1f", + Event: &any.Any{ + TypeUrl: "github.com/bucketeer-io/bucketeer/proto/event/client/bucketeer.event.client.GoalEvent", + Value: bGoalEvent, + }, + } + }, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + logger, _ := log.NewLogger() + v := &eventGoalValidator{ + event: p.inputFunc(), + logger: logger, + oldestTimestampDuration: 24 * time.Hour, + furthestTimestampDuration: 24 * time.Hour, + } + actual, err := v.validate(context.Background()) + assert.Equal(t, p.expected, actual) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestValidateGoalBatchEvent(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + inputFunc func() *eventproto.Event + expected string + expectedErr error + }{ + "err: invalid uuid": { + inputFunc: func() *eventproto.Event { + return &eventproto.Event{ + Id: "0efe416e 2fd2 4996 c5c3 194f05444f1f", + } + }, + expected: codeInvalidID, + expectedErr: errInvalidIDFormat, + }, + "err: unmarshal failed": { + inputFunc: func() *eventproto.Event { + return &eventproto.Event{ + Id: "0efe416e-2fd2-4996-b5c3-194f05444f1f", + } + }, + expected: codeUnmarshalFailed, + expectedErr: errUnmarshalFailed, + }, + "err: empty user id": { + inputFunc: func() *eventproto.Event { + bGoalBatchEvent, err := proto.Marshal(&eventproto.GoalBatchEvent{ + UserId: "", + }) + if err != nil { + t.Fatal("could not serialize event") + } + return &eventproto.Event{ + Id: "0efe416e-2fd2-4996-b5c3-194f05444f1f", + Event: &any.Any{ + TypeUrl: "github.com/bucketeer-io/bucketeer/proto/event/client/bucketeer.event.client.GoalBatchEvent", + Value: bGoalBatchEvent, + }, + } + }, + expected: codeEmptyUserID, + expectedErr: errEmptyUserID, + }, + "err: empty tag": { + inputFunc: func() *eventproto.Event { + bGoalBatchEvent, err := proto.Marshal(&eventproto.GoalBatchEvent{ + UserId: "0efe416e-2fd2-4996-b5c3-194f05444f1f", + UserGoalEventsOverTags: []*eventproto.UserGoalEventsOverTag{ + { + Tag: "", + }, + }, + }) + if err != nil { + t.Fatal("could not serialize event") + } + return &eventproto.Event{ + Id: "0efe416e-2fd2-4996-b5c3-194f05444f1f", + Event: &any.Any{ + TypeUrl: "github.com/bucketeer-io/bucketeer/proto/event/client/bucketeer.event.client.GoalBatchEvent", + Value: bGoalBatchEvent, + }, + } + }, + expected: codeEmptyTag, + expectedErr: errEmptyTag, + }, + "success": { + inputFunc: func() *eventproto.Event { + bGoalBatchEvent, err := proto.Marshal(&eventproto.GoalBatchEvent{ + UserId: "0efe416e-2fd2-4996-b5c3-194f05444f1f", + UserGoalEventsOverTags: []*eventproto.UserGoalEventsOverTag{ + { + Tag: "tag", + }, + }, + }) + if err != nil { + t.Fatal("could not serialize event") + } + return &eventproto.Event{ + Id: "0efe416e-2fd2-4996-b5c3-194f05444f1f", + Event: &any.Any{ + TypeUrl: "github.com/bucketeer-io/bucketeer/proto/event/client/bucketeer.event.client.GoalBatchEvent", + Value: bGoalBatchEvent, + }, + } + }, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + logger, _ := log.NewLogger() + v := &eventGoalBatchValidator{ + event: p.inputFunc(), + logger: logger, + oldestTimestampDuration: 24 * time.Hour, + furthestTimestampDuration: 24 * time.Hour, + } + actual, err := v.validate(context.Background()) + assert.Equal(t, p.expected, actual) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestValidateEvaluationEvent(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + inputFunc func() *eventproto.Event + expected string + expectedErr error + }{ + "invalid uuid": { + inputFunc: func() *eventproto.Event { + return &eventproto.Event{ + Id: "0efe416e 2fd2 4996 c5c3 194f05444f1f", + } + }, + expected: codeInvalidID, + expectedErr: errInvalidIDFormat, + }, + "unmarshal fails": { + inputFunc: func() *eventproto.Event { + return &eventproto.Event{ + Id: "0efe416e-2fd2-4996-b5c3-194f05444f1f", + } + }, + expected: codeUnmarshalFailed, + expectedErr: errUnmarshalFailed, + }, + "invalid timestamp": { + inputFunc: func() *eventproto.Event { + bEvaluationEvent, err := proto.Marshal(&eventproto.EvaluationEvent{ + Timestamp: int64(999999999999999), + }) + if err != nil { + t.Fatal("could not serialize event") + } + return &eventproto.Event{ + Id: "0efe416e-2fd2-4996-b5c3-194f05444f1f", + Event: &any.Any{ + TypeUrl: "github.com/bucketeer-io/bucketeer/proto/event/client/bucketeer.event.client.EvaluationEvent", + Value: bEvaluationEvent, + }, + } + }, + expected: codeInvalidTimestamp, + expectedErr: errInvalidTimestamp, + }, + "success": { + inputFunc: func() *eventproto.Event { + bEvaluationEvent, err := proto.Marshal(&eventproto.EvaluationEvent{ + Timestamp: time.Now().Unix(), + }) + if err != nil { + t.Fatal("could not serialize event") + } + return &eventproto.Event{ + Id: "0efe416e-2fd2-4996-b5c3-194f05444f1f", + Event: &any.Any{ + TypeUrl: "github.com/bucketeer-io/bucketeer/proto/event/client/bucketeer.event.client.EvaluationEvent", + Value: bEvaluationEvent, + }, + } + }, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + logger, _ := log.NewLogger() + v := &eventEvaluationValidator{ + event: p.inputFunc(), + logger: logger, + oldestTimestampDuration: 24 * time.Hour, + furthestTimestampDuration: 24 * time.Hour, + } + actual, err := v.validate(context.Background()) + assert.Equal(t, p.expected, actual) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestValidateMetrics(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + inputFunc func() *eventproto.Event + expected string + expectedErr error + }{ + "invalid uuid": { + inputFunc: func() *eventproto.Event { + return &eventproto.Event{ + Id: "0efe416e 2fd2 4996 c5c3 194f05444f1f", + } + }, + expected: codeInvalidID, + expectedErr: errInvalidIDFormat, + }, + "unmarshal fails": { + inputFunc: func() *eventproto.Event { + return &eventproto.Event{ + Id: "0efe416e-2fd2-4996-b5c3-194f05444f1f", + } + }, + expected: codeUnmarshalFailed, + expectedErr: errUnmarshalFailed, + }, + "invalid timestamp": { + inputFunc: func() *eventproto.Event { + b, err := proto.Marshal(&eventproto.MetricsEvent{ + Timestamp: int64(999999999999999), + }) + if err != nil { + t.Fatal("could not serialize event") + } + return &eventproto.Event{ + Id: "0efe416e-2fd2-4996-b5c3-194f05444f1f", + Event: &any.Any{ + TypeUrl: "github.com/bucketeer-io/bucketeer/proto/event/client/bucketeer.event.client.MetricsEvent", + Value: b, + }, + } + }, + expected: "", + expectedErr: nil, + }, + "success": { + inputFunc: func() *eventproto.Event { + b, err := proto.Marshal(&eventproto.MetricsEvent{ + Timestamp: time.Now().Unix(), + }) + if err != nil { + t.Fatal("could not serialize event") + } + return &eventproto.Event{ + Id: "0efe416e-2fd2-4996-b5c3-194f05444f1f", + Event: &any.Any{ + TypeUrl: "github.com/bucketeer-io/bucketeer/proto/event/client/bucketeer.event.client.MetricsEvent", + Value: b, + }, + } + }, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + logger, _ := log.NewLogger() + v := &eventMetricsValidator{ + event: p.inputFunc(), + oldestTimestampDuration: oldestTimestampDuration, + furthestTimestampDuration: furthestTimestampDuration, + logger: logger, + } + actual, err := v.validate(context.Background()) + assert.Equal(t, p.expected, actual) + assert.Equal(t, p.expectedErr, err) + }) + } +} diff --git a/pkg/gateway/client/BUILD.bazel b/pkg/gateway/client/BUILD.bazel new file mode 100644 index 000000000..c885d7369 --- /dev/null +++ b/pkg/gateway/client/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "client.go", + "credentials.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/gateway/client", + visibility = ["//visibility:public"], + deps = [ + "//pkg/rpc/client:go_default_library", + "//proto/gateway:go_default_library", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//credentials:go_default_library", + ], +) diff --git a/pkg/gateway/client/client.go b/pkg/gateway/client/client.go new file mode 100644 index 000000000..57ea6c0a2 --- /dev/null +++ b/pkg/gateway/client/client.go @@ -0,0 +1,49 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "google.golang.org/grpc" + + rpcclient "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + proto "github.com/bucketeer-io/bucketeer/proto/gateway" +) + +type Client interface { + proto.GatewayClient + Close() +} + +type client struct { + proto.GatewayClient + address string + connection *grpc.ClientConn +} + +func NewClient(addr, certPath string, opts ...rpcclient.Option) (Client, error) { + conn, err := rpcclient.NewClientConn(addr, certPath, opts...) + if err != nil { + return nil, err + } + return &client{ + GatewayClient: proto.NewGatewayClient(conn), + address: addr, + connection: conn, + }, nil +} + +func (c *client) Close() { + c.connection.Close() +} diff --git a/pkg/gateway/client/credentials.go b/pkg/gateway/client/credentials.go new file mode 100644 index 000000000..b44b856e0 --- /dev/null +++ b/pkg/gateway/client/credentials.go @@ -0,0 +1,47 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "io/ioutil" + "strings" + + "google.golang.org/grpc/credentials" +) + +type perRPCCredentials struct { + APIKey string +} + +func NewPerRPCCredentials(apiKeyPath string) (credentials.PerRPCCredentials, error) { + data, err := ioutil.ReadFile(apiKeyPath) + if err != nil { + return nil, err + } + return perRPCCredentials{ + APIKey: strings.TrimSpace(string(data)), + }, nil +} + +func (c perRPCCredentials) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + return map[string]string{ + "authorization": c.APIKey, + }, nil +} + +func (c perRPCCredentials) RequireTransportSecurity() bool { + return true +} diff --git a/pkg/gateway/cmd/BUILD.bazel b/pkg/gateway/cmd/BUILD.bazel new file mode 100644 index 000000000..465e70293 --- /dev/null +++ b/pkg/gateway/cmd/BUILD.bazel @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["server.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/gateway/cmd", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/client:go_default_library", + "//pkg/cache/v3:go_default_library", + "//pkg/cli:go_default_library", + "//pkg/feature/client:go_default_library", + "//pkg/gateway/api:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/redis/v3:go_default_library", + "//pkg/rest:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/rpc/client:go_default_library", + "//pkg/storage/v2/bigtable:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/gateway/cmd/server.go b/pkg/gateway/cmd/server.go new file mode 100644 index 000000000..9f8cef79a --- /dev/null +++ b/pkg/gateway/cmd/server.go @@ -0,0 +1,350 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import ( + "context" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + cachev3 "github.com/bucketeer-io/bucketeer/pkg/cache/v3" + "github.com/bucketeer-io/bucketeer/pkg/cli" + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + "github.com/bucketeer-io/bucketeer/pkg/gateway/api" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + redisv3 "github.com/bucketeer-io/bucketeer/pkg/redis/v3" + "github.com/bucketeer-io/bucketeer/pkg/rest" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + bigtable "github.com/bucketeer-io/bucketeer/pkg/storage/v2/bigtable" +) + +const command = "server" + +type server struct { + *kingpin.CmdClause + port *int + project *string + bigtableInstance *string + goalTopic *string + goalTopicProject *string + goalBatchTopic *string + evaluationTopic *string + evaluationTopicProject *string + userTopic *string + metricsTopic *string + publishNumGoroutines *int + publishTimeout *time.Duration + featureService *string + accountService *string + redisServerName *string + redisAddr *string + certPath *string + keyPath *string + serviceTokenPath *string + redisPoolMaxIdle *int + redisPoolMaxActive *int + oldestEventTimestamp *time.Duration + furthestEventTimestamp *time.Duration +} + +func RegisterCommand(r cli.CommandRegistry, p cli.ParentCommand) cli.Command { + cmd := p.Command(command, "Start the gRPC server") + server := &server{ + CmdClause: cmd, + port: cmd.Flag("port", "Port to bind to.").Default("9090").Int(), + project: cmd.Flag("project", "GCP Project id to use for PubSub.").Required().String(), + bigtableInstance: cmd.Flag("bigtable-instance", "Instance name to use Bigtable.").Required().String(), + goalTopic: cmd.Flag("goal-topic", "Topic to use for publishing GoalEvent.").Required().String(), + goalTopicProject: cmd.Flag( + "goal-topic-project", + "GCP Project id to use for PubSub to publish GoalEvent.", + ).String(), + goalBatchTopic: cmd.Flag( + "goal-batch-topic", + "Topic to use for publishing GoalBatchEvent.", + ).Required().String(), + evaluationTopic: cmd.Flag( + "evaluation-topic", + "Topic to use for publishing EvaluationEvent.", + ).Required().String(), + evaluationTopicProject: cmd.Flag( + "evaluation-topic-project", + "GCP Project id to use for PubSub to publish EvaluationEvent.", + ).String(), + // FIXME: This flag will be required once user feature is fully released. + userTopic: cmd.Flag("user-topic", "Topic to use for publishing UserEvent.").String(), + metricsTopic: cmd.Flag("metrics-topic", "Topic to use for publishing MetricsEvent.").String(), + publishNumGoroutines: cmd.Flag( + "publish-num-goroutines", + "The number of goroutines for publishing.", + ).Default("0").Int(), + publishTimeout: cmd.Flag( + "publish-timeout", + "The maximum time to publish a bundle of messages.", + ).Default("1m").Duration(), + featureService: cmd.Flag( + "feature-service", + "bucketeer-feature-service address.", + ).Default("feature:9090").String(), + accountService: cmd.Flag( + "account-service", + "bucketeer-account-service address.", + ).Default("account:9090").String(), + redisServerName: cmd.Flag("redis-server-name", "Name of the redis.").Required().String(), + redisAddr: cmd.Flag("redis-addr", "Address of the redis.").Required().String(), + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + keyPath: cmd.Flag("key", "Path to TLS key.").Required().String(), + serviceTokenPath: cmd.Flag("service-token", "Path to service token.").Required().String(), + redisPoolMaxIdle: cmd.Flag( + "redis-pool-max-idle", + "Maximum number of idle connections in the pool.", + ).Default("5").Int(), + redisPoolMaxActive: cmd.Flag( + "redis-pool-max-active", + "Maximum number of connections allocated by the pool at a given time.", + ).Default("10").Int(), + oldestEventTimestamp: cmd.Flag( + "oldest-event-timestamp", + "The duration of oldest event timestamp from processing time to allow.", + ).Default("24h").Duration(), + furthestEventTimestamp: cmd.Flag( + "furthest-event-timestamp", + "The duration of furthest event timestamp from processing time to allow.", + ).Default("24h").Duration(), + } + r.RegisterCommand(server) + return server +} + +func (s *server) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + registerer := metrics.DefaultRegisterer() + + btClient, err := s.createBigtableClient(ctx, registerer, logger) + if err != nil { + return err + } + defer btClient.Close() + + pubsubCtx, pubsubCancel := context.WithTimeout(ctx, 5*time.Second) + defer pubsubCancel() + pubsubClient, err := pubsub.NewClient( + pubsubCtx, + *s.project, + pubsub.WithMetrics(registerer), + pubsub.WithLogger(logger), + ) + if err != nil { + return err + } + + publishOptions := []pubsub.PublishOption{pubsub.WithPublishTimeout(*s.publishTimeout)} + if *s.publishNumGoroutines > 0 { + publishOptions = append(publishOptions, pubsub.WithPublishNumGoroutines(*s.publishNumGoroutines)) + } + + var goalTopicProject string + if *s.goalTopicProject == "" { + goalTopicProject = *s.project + } else { + goalTopicProject = *s.goalTopicProject + } + goalPublisher, err := pubsubClient.CreatePublisherInProject(*s.goalTopic, goalTopicProject, publishOptions...) + if err != nil { + return err + } + defer goalPublisher.Stop() + + var evaluationTopicProject string + if *s.evaluationTopicProject == "" { + evaluationTopicProject = *s.project + } else { + evaluationTopicProject = *s.evaluationTopicProject + } + evaluationPublisher, err := pubsubClient.CreatePublisherInProject( + *s.evaluationTopic, + evaluationTopicProject, + publishOptions..., + ) + if err != nil { + return nil + } + defer evaluationPublisher.Stop() + + // FIXME: This condition won't be necessary once user feature is fully released. + var userPublisher publisher.Publisher + if *s.userTopic != "" { + userPublisher, err = pubsubClient.CreatePublisherInProject(*s.userTopic, *s.project, publishOptions...) + if err != nil { + return err + } + defer userPublisher.Stop() + } + + // FIXME: This condition won't be necessary once user feature is fully released. + var metricsPublisher publisher.Publisher + if *s.metricsTopic != "" { + metricsPublisher, err = pubsubClient.CreatePublisherInProject(*s.metricsTopic, *s.project, publishOptions...) + if err != nil { + return err + } + defer metricsPublisher.Stop() + } + + goalBatchPublisher, err := pubsubClient.CreatePublisherInProject(*s.goalBatchTopic, *s.project, publishOptions...) + if err != nil { + return err + } + defer goalBatchPublisher.Stop() + + creds, err := client.NewPerRPCCredentials(*s.serviceTokenPath) + if err != nil { + return err + } + + featureClient, err := featureclient.NewClient(*s.featureService, *s.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer featureClient.Close() + + accountClient, err := accountclient.NewClient(*s.accountService, *s.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer accountClient.Close() + + redisV3Client, err := redisv3.NewClient( + *s.redisAddr, + redisv3.WithPoolSize(*s.redisPoolMaxActive), + redisv3.WithMinIdleConns(*s.redisPoolMaxIdle), + redisv3.WithServerName(*s.redisServerName), + redisv3.WithMetrics(registerer), + redisv3.WithLogger(logger), + ) + if err != nil { + return err + } + defer redisV3Client.Close() + redisV3Cache := cachev3.NewRedisCache(redisV3Client) + + service := api.NewGrpcGatewayService( + btClient, + featureClient, + accountClient, + goalPublisher, + goalBatchPublisher, + evaluationPublisher, + userPublisher, + metricsPublisher, + redisV3Cache, + api.WithOldestEventTimestamp(*s.oldestEventTimestamp), + api.WithFurthestEventTimestamp(*s.furthestEventTimestamp), + api.WithMetrics(registerer), + api.WithLogger(logger), + ) + + trackHandler := api.NewTrackHandler( + accountClient, + goalBatchPublisher, + redisV3Cache, + api.WithMetrics(registerer), + api.WithLogger(logger), + ) + + healthChecker := health.NewGrpcChecker( + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + ) + go healthChecker.Run(ctx) + + server := rpc.NewServer(service, *s.certPath, *s.keyPath, + rpc.WithPort(*s.port), + rpc.WithMetrics(registerer), + rpc.WithLogger(logger), + rpc.WithService(healthChecker), + rpc.WithHandler("/health", healthChecker), + rpc.WithHandler("/track", trackHandler), + ) + defer server.Stop(10 * time.Second) + go server.Run() + + restHealthChecker := health.NewRestChecker( + api.Version, api.Service, + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + ) + go restHealthChecker.Run(ctx) + + gatewayService := api.NewGatewayService( + btClient, + featureClient, + accountClient, + goalPublisher, + goalBatchPublisher, + evaluationPublisher, + userPublisher, + metricsPublisher, + redisV3Cache, + api.WithOldestEventTimestamp(*s.oldestEventTimestamp), + api.WithFurthestEventTimestamp(*s.furthestEventTimestamp), + api.WithMetrics(registerer), + api.WithLogger(logger), + ) + + httpServer := rest.NewServer( + *s.certPath, *s.keyPath, + rest.WithLogger(logger), + rest.WithService(gatewayService), + rest.WithService(restHealthChecker), + rest.WithMetrics(registerer), + ) + defer httpServer.Stop(10 * time.Second) + go httpServer.Run() + + <-ctx.Done() + return nil +} + +func (s *server) createBigtableClient( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (bigtable.Client, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + return bigtable.NewBigtableClient(ctx, *s.project, *s.bigtableInstance, + bigtable.WithMetrics(registerer), + bigtable.WithLogger(logger), + ) +} diff --git a/pkg/goalbatch/cmd/transformer/BUILD.bazel b/pkg/goalbatch/cmd/transformer/BUILD.bazel new file mode 100644 index 000000000..5524e3877 --- /dev/null +++ b/pkg/goalbatch/cmd/transformer/BUILD.bazel @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["transformer.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/goalbatch/cmd/transformer", + visibility = ["//visibility:public"], + deps = [ + "//pkg/cli:go_default_library", + "//pkg/goalbatch/transformer:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/pubsub/puller:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/rpc/client:go_default_library", + "//pkg/user/client:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/goalbatch/cmd/transformer/transformer.go b/pkg/goalbatch/cmd/transformer/transformer.go new file mode 100644 index 000000000..653d10878 --- /dev/null +++ b/pkg/goalbatch/cmd/transformer/transformer.go @@ -0,0 +1,177 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transformer + +import ( + "context" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + "github.com/bucketeer-io/bucketeer/pkg/cli" + tf "github.com/bucketeer-io/bucketeer/pkg/goalbatch/transformer" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + userclient "github.com/bucketeer-io/bucketeer/pkg/user/client" +) + +const command = "transformer" + +type transformer struct { + *kingpin.CmdClause + port *int + metricsTopic *string + project *string + userService *string + goalBatchTopic *string + goalBatchSubscription *string + goalTopic *string + maxMPS *int + numWorkers *int + certPath *string + keyPath *string + serviceTokenPath *string + pullerNumGoroutines *int + pullerMaxOutstandingMessages *int + pullerMaxOutstandingBytes *int +} + +func RegisterCommand(r cli.CommandRegistry, p cli.ParentCommand) cli.Command { + cmd := p.Command(command, "Start transformer server") + s := &transformer{ + CmdClause: cmd, + port: cmd.Flag("port", "Port to bind to.").Default("9090").Int(), + metricsTopic: cmd.Flag("metrics-topic", "Topic to use for publishing MetricsEvent.").String(), + project: cmd.Flag("project", "Google Cloud project name.").String(), + userService: cmd.Flag("user-service", "bucketeer-user-service address.").Default("user:9090").String(), + goalBatchTopic: cmd.Flag("goal-batch-topic", "Google PubSub topic name of incoming goal batch events.").String(), + goalBatchSubscription: cmd.Flag( + "goal-batch-subscription", + "Google PubSub subscription name of incoming goal batch event.", + ).String(), + goalTopic: cmd.Flag("goal-topic", "Google PubSub topic name of outgoing goal events.").String(), + maxMPS: cmd.Flag("max-mps", "Maximum messages should be handled in a second.").Default("5000").Int(), + numWorkers: cmd.Flag("num-workers", "Number of workers.").Default("1").Int(), + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + keyPath: cmd.Flag("key", "Path to TLS key.").Required().String(), + serviceTokenPath: cmd.Flag("service-token", "Path to service token.").Required().String(), + pullerNumGoroutines: cmd.Flag( + "puller-num-goroutines", + "Number of goroutines will be spawned to pull messages.", + ).Int(), + pullerMaxOutstandingMessages: cmd.Flag( + "puller-max-outstanding-messages", + "Maximum number of unprocessed messages.", + ).Int(), + pullerMaxOutstandingBytes: cmd.Flag("puller-max-outstanding-bytes", "Maximum size of unprocessed messages.").Int(), + } + r.RegisterCommand(s) + return s +} + +func (t *transformer) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + registerer := metrics.DefaultRegisterer() + + goalPublisher, goalBatchPuller, err := t.createPublisherPuller(ctx, registerer, logger) + if err != nil { + return err + } + defer goalPublisher.Stop() + + creds, err := client.NewPerRPCCredentials(*t.serviceTokenPath) + if err != nil { + return err + } + + userClient, err := userclient.NewClient(*t.userService, *t.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer userClient.Close() + + goalBatchTransformer := tf.NewTransformer( + userClient, + goalBatchPuller, + goalPublisher, + tf.WithMaxMPS(*t.maxMPS), + tf.WithNumWorkers(*t.numWorkers), + tf.WithMetrics(registerer), + tf.WithLogger(logger), + ) + defer goalBatchTransformer.Stop() + go goalBatchTransformer.Run() // nolint:errcheck + + healthChecker := health.NewGrpcChecker( + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + health.WithCheck("transformer", goalBatchTransformer.Check), + ) + go healthChecker.Run(ctx) + + server := rpc.NewServer(healthChecker, *t.certPath, *t.keyPath, + rpc.WithPort(*t.port), + rpc.WithMetrics(registerer), + rpc.WithLogger(logger), + rpc.WithHandler("/health", healthChecker), + ) + defer server.Stop(10 * time.Second) + go server.Run() + + <-ctx.Done() + return nil +} + +func (t *transformer) createPublisherPuller( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (publisher.Publisher, puller.Puller, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + client, err := pubsub.NewClient( + ctx, + *t.project, + pubsub.WithMetrics(registerer), + pubsub.WithLogger(logger), + ) + if err != nil { + return nil, nil, err + } + goalBatchPuller, err := client.CreatePuller(*t.goalBatchSubscription, *t.goalBatchTopic, + pubsub.WithNumGoroutines(*t.pullerNumGoroutines), + pubsub.WithMaxOutstandingMessages(*t.pullerMaxOutstandingMessages), + pubsub.WithMaxOutstandingBytes(*t.pullerMaxOutstandingBytes), + ) + if err != nil { + return nil, nil, err + } + goalPublisher, err := client.CreatePublisher(*t.goalTopic) + if err != nil { + return nil, nil, err + } + return goalPublisher, goalBatchPuller, nil +} diff --git a/pkg/goalbatch/transformer/BUILD.bazel b/pkg/goalbatch/transformer/BUILD.bazel new file mode 100644 index 000000000..d03aa4f67 --- /dev/null +++ b/pkg/goalbatch/transformer/BUILD.bazel @@ -0,0 +1,52 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "metrics.go", + "transformer.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/goalbatch/transformer", + visibility = ["//visibility:public"], + deps = [ + "//pkg/errgroup:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/pubsub/puller:go_default_library", + "//pkg/pubsub/puller/codes:go_default_library", + "//pkg/user/client:go_default_library", + "//pkg/user/domain:go_default_library", + "//pkg/uuid:go_default_library", + "//proto/event/client:go_default_library", + "//proto/user:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["transformer_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/health:go_default_library", + "//pkg/log:go_default_library", + "//pkg/pubsub/publisher/mock:go_default_library", + "//pkg/pubsub/puller/mock:go_default_library", + "//pkg/user/client/mock:go_default_library", + "//proto/event/client:go_default_library", + "//proto/user:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + "@io_bazel_rules_go//proto/wkt:any_go_proto", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + ], +) diff --git a/pkg/goalbatch/transformer/metrics.go b/pkg/goalbatch/transformer/metrics.go new file mode 100644 index 000000000..08c6ff380 --- /dev/null +++ b/pkg/goalbatch/transformer/metrics.go @@ -0,0 +1,83 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transformer + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +const ( + typeGoal = "Goal" + + codeOK = "OK" + codeFail = "Fail" +) + +var ( + receivedCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "goal_batch_event_transformer", + Name: "received_total", + Help: "Total number of received messages", + }, + ) + + handledCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "goal_batch_event_transformer", + Name: "handled_total", + Help: "Total number of handled messages", + }, []string{"code"}, + ) + + handledHistogram = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "bucketeer", + Subsystem: "goal_batch_event_transformer", + Name: "handled_seconds", + Help: "Histogram of message handling duration (seconds)", + Buckets: prometheus.DefBuckets, + }, []string{"code"}) + + cacheCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "goal_batch_event_transformer", + Name: "cache_requests_total", + Help: "Total number of cache requests", + }, []string{"type", "code"}) + + eventCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "goal_batch_event_transformer", + Name: "register_events_total", + Help: "Total number of registered events", + }, []string{"type", "code"}) +) + +func registerMetrics(r metrics.Registerer) { + r.MustRegister( + receivedCounter, + handledCounter, + handledHistogram, + cacheCounter, + eventCounter, + ) +} diff --git a/pkg/goalbatch/transformer/transformer.go b/pkg/goalbatch/transformer/transformer.go new file mode 100644 index 000000000..ec65870ca --- /dev/null +++ b/pkg/goalbatch/transformer/transformer.go @@ -0,0 +1,349 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transformer + +import ( + "context" + "errors" + "time" + + "github.com/golang/protobuf/proto" // nolint:staticcheck + "github.com/golang/protobuf/ptypes" + "go.uber.org/zap" + grpccodes "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/errgroup" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller/codes" + userclient "github.com/bucketeer-io/bucketeer/pkg/user/client" + userdomain "github.com/bucketeer-io/bucketeer/pkg/user/domain" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + clienteventproto "github.com/bucketeer-io/bucketeer/proto/event/client" + userproto "github.com/bucketeer-io/bucketeer/proto/user" +) + +const ( + transformTimeout = 30 * time.Second + publishMultiTimeout = 5 * time.Minute +) + +var ( + errFailedToMarshal = errors.New("goalBatch: failed to marshal event") + errFailedToCreateUUID = errors.New("goalBatch: failed to create UUID") + errFailedToPublish = errors.New("goalBatch: failed to publish events") +) + +type options struct { + maxMPS int + numWorkers int + metrics metrics.Registerer + logger *zap.Logger +} + +var defaultOptions = options{ + maxMPS: 1000, + numWorkers: 1, + logger: zap.NewNop(), +} + +type Option func(*options) + +func WithMaxMPS(mps int) Option { + return func(opts *options) { + opts.maxMPS = mps + } +} + +func WithNumWorkers(n int) Option { + return func(opts *options) { + opts.numWorkers = n + } +} + +func WithMetrics(r metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = r + } +} + +func WithLogger(logger *zap.Logger) Option { + return func(opts *options) { + opts.logger = logger + } +} + +type Transformer interface { + Check(context.Context) health.Status + Run() error + Stop() +} + +type transformer struct { + userClient userclient.Client + puller puller.RateLimitedPuller + publisher publisher.Publisher + errgroup errgroup.Group + opts *options + logger *zap.Logger + ctx context.Context + cancel func() + doneCh chan struct{} +} + +func NewTransformer( + userClient userclient.Client, + p puller.Puller, + publisher publisher.Publisher, + opts ...Option) Transformer { + + ctx, cancel := context.WithCancel(context.Background()) + options := defaultOptions + for _, opt := range opts { + opt(&options) + } + if options.metrics != nil { + registerMetrics(options.metrics) + } + return &transformer{ + userClient: userClient, + puller: puller.NewRateLimitedPuller(p, options.maxMPS), + publisher: publisher, + opts: &options, + logger: options.logger.Named("transformer"), + ctx: ctx, + cancel: cancel, + doneCh: make(chan struct{}), + } +} + +func (t *transformer) Run() error { + defer close(t.doneCh) + t.errgroup.Go(func() error { + return t.puller.Run(t.ctx) + }) + for i := 0; i < t.opts.numWorkers; i++ { + t.errgroup.Go(t.runWorker) + } + return t.errgroup.Wait() +} + +func (t *transformer) Stop() { + t.logger.Info("Stop started") + t.cancel() + <-t.doneCh + t.logger.Info("Stop finished") +} + +func (t *transformer) Check(ctx context.Context) health.Status { + select { + case <-t.ctx.Done(): + t.logger.Error("Unhealthy due to context Done is closed", zap.Error(t.ctx.Err())) + return health.Unhealthy + default: + if t.errgroup.FinishedCount() > 0 { + t.logger.Error("Unhealthy", zap.Int32("FinishedCount", t.errgroup.FinishedCount())) + return health.Unhealthy + } + return health.Healthy + } +} + +func (t *transformer) runWorker() error { + record := func(code codes.Code, startTime time.Time) { + handledCounter.WithLabelValues(code.String()).Inc() + handledHistogram.WithLabelValues(code.String()).Observe(time.Since(startTime).Seconds()) + } + for { + select { + case msg, ok := <-t.puller.MessageCh(): + if !ok { + return nil + } + receivedCounter.Inc() + startTime := time.Now() + if id := msg.Attributes["id"]; id == "" { + msg.Ack() + record(codes.MissingID, startTime) + continue + } + event, environmentNamespace, err := t.unmarshalMessage(msg) + if err != nil { + msg.Ack() + record(codes.BadMessage, startTime) + continue + } + err = t.handle(event, environmentNamespace) + if err != nil { + if err == errFailedToMarshal || err == errFailedToCreateUUID { + record(codes.NonRepeatableError, startTime) + msg.Ack() + continue + } + record(codes.RepeatableError, startTime) + msg.Nack() + continue + } + msg.Ack() + record(codes.OK, startTime) + case <-t.ctx.Done(): + return nil + } + } +} + +func (t *transformer) handle(event *clienteventproto.GoalBatchEvent, environmentNamespace string) error { + events, err := t.transform(event, environmentNamespace) + if err != nil { + return err + } + if len(events) == 0 { + return nil + } + messages := make([]publisher.Message, 0, len(events)) + for _, event := range events { + messages = append(messages, event) + } + ctx, cancel := context.WithTimeout(context.Background(), publishMultiTimeout) + defer cancel() + if errs := t.publisher.PublishMulti(ctx, messages); len(errs) > 0 { + t.logger.Error("Failed to publish goal events", zap.Any("errors", errs), + zap.String("environmentNamespace", environmentNamespace)) + eventCounter.WithLabelValues(typeGoal, codeFail).Inc() + return errFailedToPublish + } + eventCounter.WithLabelValues(typeGoal, codeOK).Inc() + return nil +} + +// In case the target user is not found, +// it will return the events empty +func (t *transformer) transform( + event *clienteventproto.GoalBatchEvent, + environmentNamespace string, +) ([]*clienteventproto.Event, error) { + events := make([]*clienteventproto.Event, 0) + if len(event.UserGoalEventsOverTags) == 0 { + return events, nil + } + ctx, cancel := context.WithTimeout(context.Background(), transformTimeout) + defer cancel() + user, err := t.getUser(ctx, environmentNamespace, event.UserId) + if err != nil { + st, _ := status.FromError(err) + if st.Code() == grpccodes.NotFound { + t.logger.Warn("User not found", zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("userId", event.UserId), + ) + return events, nil + } + t.logger.Error("Failed to get user", zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("userId", event.UserId), + ) + return nil, err + } + for _, ugeot := range event.UserGoalEventsOverTags { + tag := ugeot.Tag + u := t.getUserDataByTag(user, tag) + for _, uge := range ugeot.UserGoalEvents { + e, err := t.marshalGoalEvent(environmentNamespace, tag, uge, u) + if err != nil { + return nil, err + } + events = append(events, e) + } + } + return events, nil +} + +func (t *transformer) marshalGoalEvent( + environmentNamespace, tag string, + uge *clienteventproto.UserGoalEvent, + user *userproto.User, +) (*clienteventproto.Event, error) { + ge := &clienteventproto.GoalEvent{ + SourceId: clienteventproto.SourceId_GOAL_BATCH, + Tag: tag, + Timestamp: uge.Timestamp, + GoalId: uge.GoalId, + UserId: user.Id, + Value: uge.Value, + User: user, + } + any, err := ptypes.MarshalAny(ge) + if err != nil { + t.logger.Error("Failed to marshal goal event", zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("userId", user.Id), + zap.String("tag", tag), + ) + return nil, errFailedToMarshal + } + id, err := uuid.NewUUID() + if err != nil { + t.logger.Error("Failed to create UUID", zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("userId", user.Id), + zap.String("tag", tag), + ) + return nil, errFailedToCreateUUID + } + return &clienteventproto.Event{ + Id: id.String(), + Event: any, + EnvironmentNamespace: environmentNamespace, + }, nil +} + +func (t *transformer) unmarshalMessage(msg *puller.Message) (*clienteventproto.GoalBatchEvent, string, error) { + event := &clienteventproto.Event{} + if err := proto.Unmarshal(msg.Data, event); err != nil { + t.logger.Error("Failed to unmarshal message", zap.Error(err), zap.String("msgId", msg.ID)) + return nil, "", err + } + goalBatchEvent := &clienteventproto.GoalBatchEvent{} + if err := ptypes.UnmarshalAny(event.Event, goalBatchEvent); err != nil { + t.logger.Error("Failed to unmarshal goal event", zap.Error(err), zap.String("msgId", msg.ID)) + return nil, "", err + } + return goalBatchEvent, event.EnvironmentNamespace, nil +} + +func (t *transformer) getUser( + ctx context.Context, + environmentNamespace, userID string, +) (*userproto.User, error) { + resp, err := t.userClient.GetUser(ctx, &userproto.GetUserRequest{ + UserId: userID, + EnvironmentNamespace: environmentNamespace, + }) + if err != nil { + return nil, err + } + return resp.User, nil +} + +func (t *transformer) getUserDataByTag(user *userproto.User, tag string) *userproto.User { + u := &userdomain.User{User: user} + return &userproto.User{ + Id: user.Id, + Data: u.Data(tag), + } +} diff --git a/pkg/goalbatch/transformer/transformer_test.go b/pkg/goalbatch/transformer/transformer_test.go new file mode 100644 index 000000000..84bc7ee1e --- /dev/null +++ b/pkg/goalbatch/transformer/transformer_test.go @@ -0,0 +1,356 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transformer + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/any" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/log" + publishermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher/mock" + pullermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller/mock" + ucmock "github.com/bucketeer-io/bucketeer/pkg/user/client/mock" + clienteventproto "github.com/bucketeer-io/bucketeer/proto/event/client" + userproto "github.com/bucketeer-io/bucketeer/proto/user" +) + +func TestNewTransformer(t *testing.T) { + t.Parallel() + tf := NewTransformer(nil, nil, nil) + assert.IsType(t, &transformer{}, tf) +} + +func TestCheck(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(tf *transformer) + expected health.Status + }{ + { + setup: func(tf *transformer) { tf.cancel() }, + expected: health.Unhealthy, + }, + { + setup: func(tf *transformer) { + tf.errgroup.Go(func() error { return nil }) + time.Sleep(100 * time.Millisecond) // wait for p.group.FinishedCount() is incremented + }, + expected: health.Unhealthy, + }, + { + setup: nil, + expected: health.Healthy, + }, + } + + for _, p := range patterns { + tf := newTransformer(t, mockController) + if p.setup != nil { + p.setup(tf) + } + assert.Equal(t, p.expected, tf.Check(context.Background())) + } +} + +func TestHandle(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*transformer) + input *clienteventproto.GoalBatchEvent + expectedErr error + }{ + "error: transform": { + setup: nil, + input: &clienteventproto.GoalBatchEvent{UserId: "uid-0"}, + expectedErr: nil, + }, + "internal error": { + setup: func(t *transformer) { + t.userClient.(*ucmock.MockClient).EXPECT().GetUser(gomock.Any(), gomock.Any()).Return( + nil, errors.New("internal error")) + }, + input: &clienteventproto.GoalBatchEvent{ + UserId: "uid-0", + UserGoalEventsOverTags: []*clienteventproto.UserGoalEventsOverTag{ + { + Tag: "t-0", + UserGoalEvents: []*clienteventproto.UserGoalEvent{ + {Timestamp: 0, GoalId: "gid-0", Value: 0.0}, + }, + }, + }, + }, + expectedErr: errors.New("internal error"), + }, + "user not found": { + setup: func(t *transformer) { + t.userClient.(*ucmock.MockClient).EXPECT().GetUser(gomock.Any(), gomock.Any()).Return( + nil, status.Error(codes.NotFound, "user: not found")) + }, + input: &clienteventproto.GoalBatchEvent{ + UserId: "uid-0", + UserGoalEventsOverTags: []*clienteventproto.UserGoalEventsOverTag{ + { + Tag: "t-0", + UserGoalEvents: []*clienteventproto.UserGoalEvent{ + {Timestamp: 0, GoalId: "gid-0", Value: 0.0}, + }, + }, + }, + }, + expectedErr: nil, + }, + "success": { + setup: func(t *transformer) { + t.userClient.(*ucmock.MockClient).EXPECT().GetUser(gomock.Any(), gomock.Any()).Return( + &userproto.GetUserResponse{User: &userproto.User{ + Id: "uid-0", + TaggedData: map[string]*userproto.User_Data{ + "t-0": {Value: map[string]string{"key": "value"}}, + }, + }}, nil) + t.publisher.(*publishermock.MockPublisher).EXPECT().PublishMulti(gomock.Any(), gomock.Any()).Return(nil) + }, + input: &clienteventproto.GoalBatchEvent{ + UserId: "uid-0", + UserGoalEventsOverTags: []*clienteventproto.UserGoalEventsOverTag{ + { + Tag: "t-0", + UserGoalEvents: []*clienteventproto.UserGoalEvent{ + {Timestamp: 0, GoalId: "gid-0", Value: 0.0}, + }, + }, + }, + }, + expectedErr: nil, + }, + } + + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + tf := newTransformer(t, mockController) + if p.setup != nil { + p.setup(tf) + } + err := tf.handle(p.input, "n0") + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestTransform(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + goalEventWithData := &clienteventproto.GoalEvent{ + SourceId: clienteventproto.SourceId_GOAL_BATCH, + Tag: "t-0", + Timestamp: 0, + GoalId: "gid-0", + UserId: "uid-0", + Value: 0, + User: &userproto.User{Id: "uid-0", Data: map[string]string{"key": "value"}}, + Evaluations: nil, + } + goalEventWithDataAny, err := ptypes.MarshalAny(goalEventWithData) + require.NoError(t, err) + + goalEvent := &clienteventproto.GoalEvent{ + SourceId: clienteventproto.SourceId_GOAL_BATCH, + Tag: "t-0", + Timestamp: 0, + GoalId: "gid-0", + UserId: "uid-0", + Value: 0, + User: &userproto.User{Id: "uid-0"}, + Evaluations: nil, + } + goalEventAny, err := ptypes.MarshalAny(goalEvent) + require.NoError(t, err) + + patterns := map[string]struct { + setup func(*transformer) + input *clienteventproto.GoalBatchEvent + expected []*clienteventproto.Event + expectedErr error + }{ + "no UserGoalEventsOverTags": { + setup: nil, + input: &clienteventproto.GoalBatchEvent{ + UserId: "uid-0", + }, + expected: nil, + expectedErr: nil, + }, + "fail: getUser": { + setup: func(t *transformer) { + t.userClient.(*ucmock.MockClient).EXPECT().GetUser(gomock.Any(), gomock.Any()).Return( + nil, errors.New("internal error")) + }, + input: &clienteventproto.GoalBatchEvent{ + UserId: "uid-0", + UserGoalEventsOverTags: []*clienteventproto.UserGoalEventsOverTag{ + { + Tag: "t-0", + UserGoalEvents: []*clienteventproto.UserGoalEvent{ + {Timestamp: 0, GoalId: "gid-0", Value: 0.0}, + }, + }, + }, + }, + expected: nil, + expectedErr: errors.New("internal error"), + }, + "not found: getUser": { + setup: func(t *transformer) { + t.userClient.(*ucmock.MockClient).EXPECT().GetUser(gomock.Any(), gomock.Any()).Return( + nil, status.Error(codes.NotFound, "user: not found")) + }, + input: &clienteventproto.GoalBatchEvent{ + UserId: "uid-0", + UserGoalEventsOverTags: []*clienteventproto.UserGoalEventsOverTag{ + { + Tag: "t-0", + UserGoalEvents: []*clienteventproto.UserGoalEvent{ + {Timestamp: 0, GoalId: "gid-0", Value: 0.0}, + }, + }, + }, + }, + expected: nil, + expectedErr: nil, + }, + "tagged data not found": { + setup: func(t *transformer) { + t.userClient.(*ucmock.MockClient).EXPECT().GetUser(gomock.Any(), gomock.Any()).Return( + &userproto.GetUserResponse{User: &userproto.User{ + Id: "uid-0", + TaggedData: map[string]*userproto.User_Data{ + "t-1": {Value: map[string]string{"key": "value"}}, + }, + }}, nil) + }, + input: &clienteventproto.GoalBatchEvent{ + UserId: "uid-0", + UserGoalEventsOverTags: []*clienteventproto.UserGoalEventsOverTag{ + { + Tag: "t-0", + UserGoalEvents: []*clienteventproto.UserGoalEvent{ + {Timestamp: 0, GoalId: "gid-0", Value: 0.0}, + }, + }, + }, + }, + expected: []*clienteventproto.Event{ + { + EnvironmentNamespace: "n0", + Event: goalEventAny, + }, + }, + expectedErr: nil, + }, + "success": { + setup: func(t *transformer) { + t.userClient.(*ucmock.MockClient).EXPECT().GetUser(gomock.Any(), gomock.Any()).Return( + &userproto.GetUserResponse{User: &userproto.User{ + Id: "uid-0", + TaggedData: map[string]*userproto.User_Data{ + "t-0": {Value: map[string]string{"key": "value"}}, + }, + }}, nil) + }, + input: &clienteventproto.GoalBatchEvent{ + UserId: "uid-0", + UserGoalEventsOverTags: []*clienteventproto.UserGoalEventsOverTag{ + { + Tag: "t-0", + UserGoalEvents: []*clienteventproto.UserGoalEvent{ + {Timestamp: 0, GoalId: "gid-0", Value: 0.0}, + }, + }, + }, + }, + expected: []*clienteventproto.Event{ + { + EnvironmentNamespace: "n0", + Event: goalEventWithDataAny, + }, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + tf := newTransformer(t, mockController) + if p.setup != nil { + p.setup(tf) + } + actual, err := tf.transform(p.input, "n0") + if p.expected != nil || actual != nil { + for i := range p.expected { + assert.Equal(t, p.expected[i].EnvironmentNamespace, actual[i].EnvironmentNamespace) + expectedGoalEvent, err := unmarshalGoalEvent(p.expected[0].Event) + assert.NoError(t, err) + actualGoalEvent, err := unmarshalGoalEvent(actual[0].Event) + assert.NoError(t, err) + assert.Equal(t, expectedGoalEvent, actualGoalEvent) + } + } + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func unmarshalGoalEvent(event *any.Any) (*clienteventproto.GoalEvent, error) { + goalEvent := &clienteventproto.GoalEvent{} + if err := ptypes.UnmarshalAny(event, goalEvent); err != nil { + return nil, err + } + return goalEvent, nil +} + +func newTransformer(t *testing.T, mockController *gomock.Controller) *transformer { + t.Helper() + ctx, cancel := context.WithCancel(context.Background()) + logger, err := log.NewLogger() + require.NoError(t, err) + return &transformer{ + userClient: ucmock.NewMockClient(mockController), + puller: pullermock.NewMockRateLimitedPuller(mockController), + publisher: publishermock.NewMockPublisher(mockController), + logger: logger, + ctx: ctx, + cancel: cancel, + doneCh: make(chan struct{}), + } +} diff --git a/pkg/health/BUILD.bazel b/pkg/health/BUILD.bazel new file mode 100644 index 000000000..8946f15bf --- /dev/null +++ b/pkg/health/BUILD.bazel @@ -0,0 +1,25 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "grpc_health.go", + "health.go", + "rest_health.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/health", + visibility = ["//visibility:public"], + deps = [ + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//health/grpc_health_v1:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["health_test.go"], + embed = [":go_default_library"], + deps = ["@org_golang_google_grpc//health/grpc_health_v1:go_default_library"], +) diff --git a/pkg/health/grpc_health.go b/pkg/health/grpc_health.go new file mode 100644 index 000000000..0fb9ba67c --- /dev/null +++ b/pkg/health/grpc_health.go @@ -0,0 +1,55 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package health + +import ( + "context" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + pb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/status" +) + +type grpcChecker struct { + *checker +} + +func NewGrpcChecker(opts ...option) *grpcChecker { + checker := &grpcChecker{ + checker: newChecker(opts...), + } + return checker +} + +func (hc *grpcChecker) Register(server *grpc.Server) { + pb.RegisterHealthServer(server, hc) +} + +func (hc *grpcChecker) Check(ctx context.Context, req *pb.HealthCheckRequest) (*pb.HealthCheckResponse, error) { + if hc.getStatus() == Unhealthy { + return &pb.HealthCheckResponse{ + Status: pb.HealthCheckResponse_NOT_SERVING, + }, nil + } + return &pb.HealthCheckResponse{ + Status: pb.HealthCheckResponse_SERVING, + }, nil +} + +func (hc *grpcChecker) Watch(*pb.HealthCheckRequest, pb.Health_WatchServer) error { + // TODO: Implements here when needed. + return status.Errorf(codes.Unimplemented, "unsupported method") +} diff --git a/pkg/health/health.go b/pkg/health/health.go new file mode 100644 index 000000000..da40109e9 --- /dev/null +++ b/pkg/health/health.go @@ -0,0 +1,138 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package health + +import ( + "context" + "fmt" + "net/http" + "sync/atomic" + "time" +) + +type Status uint32 + +const ( + // Healthy is returned when the health check was successful + Healthy Status = 0 + + // Unhealthy is returned when the health check was unsuccessful + Unhealthy Status = 1 +) + +func (s Status) String() string { + switch s { + case Healthy: + return "Healthy" + case Unhealthy: + return "Unhealthy" + default: + return "Unknown" + } +} + +type check func(context.Context) Status + +type checker struct { + status uint32 + + interval time.Duration + timeout time.Duration + checks map[string]check +} + +type option func(*checker) + +func WithCheck(name string, check check) option { + return func(c *checker) { + if _, ok := c.checks[name]; ok { + panic(fmt.Sprintf("health: %s already registered", name)) + } + c.checks[name] = check + } +} + +func WithInterval(interval time.Duration) option { + return func(c *checker) { + c.interval = interval + } +} + +func WithTimeout(timeout time.Duration) option { + return func(c *checker) { + c.timeout = timeout + } +} + +func newChecker(opts ...option) *checker { + checker := &checker{ + status: uint32(Unhealthy), + interval: 10 * time.Second, + timeout: 5 * time.Second, + checks: make(map[string]check), + } + for _, o := range opts { + o(checker) + } + return checker +} + +func (hc *checker) Run(ctx context.Context) { + ticker := time.NewTicker(hc.interval) + defer ticker.Stop() + hc.check(ctx) + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + hc.check(ctx) + } + } +} + +func (hc *checker) check(ctx context.Context) { + resultChan := make(chan Status, len(hc.checks)) + ctx, cancel := context.WithTimeout(ctx, hc.timeout) + defer cancel() + for _, c := range hc.checks { + go func(c check) { + resultChan <- c(ctx) + }(c) + } + for i := 0; i < len(hc.checks); i++ { + if res := <-resultChan; res != Healthy { + hc.setStatus(Unhealthy) + return + } + } + hc.setStatus(Healthy) +} + +func (hc *checker) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + if hc.getStatus() == Unhealthy { + resp.WriteHeader(http.StatusServiceUnavailable) + return + } + resp.WriteHeader(http.StatusOK) +} + +func (hc *checker) getStatus() Status { + return Status(atomic.LoadUint32(&hc.status)) +} + +func (hc *checker) setStatus(s Status) { + atomic.StoreUint32(&hc.status, uint32(s)) +} diff --git a/pkg/health/health_test.go b/pkg/health/health_test.go new file mode 100644 index 000000000..ab229aa12 --- /dev/null +++ b/pkg/health/health_test.go @@ -0,0 +1,117 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package health + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + pb "google.golang.org/grpc/health/grpc_health_v1" +) + +const ( + version = "/v1" + service = "/gateway" +) + +func TestHTTPHealthyNoCheck(t *testing.T) { + checker := NewRestChecker(version, service) + checker.check(context.Background()) + req := httptest.NewRequest("GET", getTargetPath(t), nil) + resp := httptest.NewRecorder() + checker.ServeHTTP(resp, req) + if resp.Code != http.StatusOK { + t.Fail() + } +} + +func TestHTTPHealthy(t *testing.T) { + healthyCheck := func(ctx context.Context) Status { + return Healthy + } + checker := NewRestChecker(version, service, WithCheck("healthy", healthyCheck)) + checker.check(context.Background()) + req := httptest.NewRequest("GET", getTargetPath(t), nil) + resp := httptest.NewRecorder() + checker.ServeHTTP(resp, req) + if resp.Code != http.StatusOK { + t.Fail() + } +} + +func TestHTTPUnhealthy(t *testing.T) { + unhealthyCheck := func(ctx context.Context) Status { + return Unhealthy + } + checker := NewRestChecker(version, service, WithCheck("unhealthy", unhealthyCheck)) + checker.check(context.Background()) + req := httptest.NewRequest("GET", getTargetPath(t), nil) + resp := httptest.NewRecorder() + checker.ServeHTTP(resp, req) + if resp.Code != http.StatusServiceUnavailable { + t.Fail() + } +} + +func TestGRPCHealthyNoCheck(t *testing.T) { + checker := NewGrpcChecker(WithInterval(time.Millisecond)) + checker.check(context.Background()) + resp, err := checker.Check(context.Background(), &pb.HealthCheckRequest{}) + if err != nil { + t.Fail() + } + if resp.Status != pb.HealthCheckResponse_SERVING { + t.Fail() + } +} + +func TestGRPCHealthy(t *testing.T) { + healthyCheck := func(ctx context.Context) Status { + return Healthy + } + checker := NewGrpcChecker(WithCheck("healthy", healthyCheck)) + checker.check(context.Background()) + resp, err := checker.Check(context.Background(), &pb.HealthCheckRequest{}) + if err != nil { + t.Fail() + } + if resp.Status != pb.HealthCheckResponse_SERVING { + t.Fail() + } +} + +func TestGRPCUnhealthy(t *testing.T) { + unhealthyCheck := func(ctx context.Context) Status { + return Unhealthy + } + checker := NewGrpcChecker(WithCheck("unhealthy", unhealthyCheck)) + checker.check(context.Background()) + resp, err := checker.Check(context.Background(), &pb.HealthCheckRequest{}) + if err != nil { + t.Fail() + } + if resp.Status != pb.HealthCheckResponse_NOT_SERVING { + t.Fail() + } +} + +func getTargetPath(t *testing.T) string { + t.Helper() + return fmt.Sprintf("%s%s%s", version, service, healthPath) +} diff --git a/pkg/health/rest_health.go b/pkg/health/rest_health.go new file mode 100644 index 000000000..7cf47c8d6 --- /dev/null +++ b/pkg/health/rest_health.go @@ -0,0 +1,41 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package health + +import ( + "fmt" + "net/http" +) + +const healthPath = "/health" + +type restChecker struct { + *checker + version string + service string +} + +func NewRestChecker(version, service string, opts ...option) *restChecker { + checker := &restChecker{ + checker: newChecker(opts...), + version: version, + service: service, + } + return checker +} + +func (c *restChecker) Register(mux *http.ServeMux) { + mux.HandleFunc(fmt.Sprintf("%s%s%s", c.version, c.service, healthPath), c.ServeHTTP) +} diff --git a/pkg/job/BUILD.bazel b/pkg/job/BUILD.bazel new file mode 100644 index 000000000..83b7973e0 --- /dev/null +++ b/pkg/job/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "job.go", + "metrics.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/job", + visibility = ["//visibility:public"], + deps = [ + "//pkg/metrics:go_default_library", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@com_github_robfig_cron//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/job/job.go b/pkg/job/job.go new file mode 100644 index 000000000..c608811f5 --- /dev/null +++ b/pkg/job/job.go @@ -0,0 +1,112 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package job + +import ( + "context" + "time" + + "github.com/robfig/cron" + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +type options struct { + timeout time.Duration + metrics metrics.Registerer + logger *zap.Logger +} + +type Option func(*options) + +func WithTimeout(timeout time.Duration) Option { + return func(opts *options) { + opts.timeout = timeout + } +} + +func WithMetrics(r metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = r + } +} + +func WithLogger(l *zap.Logger) Option { + return func(opts *options) { + opts.logger = l + } +} + +type Job interface { + Run(context.Context) error +} + +type Manager struct { + cron *cron.Cron + metrics metrics.Registerer + logger *zap.Logger + ctx context.Context + cancel func() + doneCh chan struct{} +} + +func NewManager(r metrics.Registerer, subsystem string, logger *zap.Logger) *Manager { + ctx, cancel := context.WithCancel(context.Background()) + registerMetrics(r, subsystem) + return &Manager{ + cron: cron.New(), + metrics: r, + logger: logger.Named("jobmanager"), + ctx: ctx, + cancel: cancel, + doneCh: make(chan struct{}), + } +} + +func (m *Manager) Run() error { + m.logger.Info("Run started") + defer close(m.doneCh) + m.cron.Start() + <-m.ctx.Done() + m.logger.Info("Run finished") + return nil +} + +func (m *Manager) Stop() { + m.logger.Info("Stop started") + m.cancel() + m.cron.Stop() + <-m.doneCh + m.logger.Info("Stop finished") +} + +func (m *Manager) AddCronJob(name, cron string, job Job) error { + return m.cron.AddFunc(cron, func() { + m.logger.Info("Job started", zap.String("name", name)) + startTime := time.Now() + startedJobCounter.WithLabelValues(name).Inc() + err := job.Run(m.ctx) + code := codeSuccess + if err != nil { + code = codeFail + m.logger.Error("Job finished with an error", zap.String("name", name), zap.Error(err)) + } else { + m.logger.Info("Job finished", zap.String("name", name)) + } + finishedJobCounter.WithLabelValues(name, code).Inc() + finishedJobHistogram.WithLabelValues(name, code).Observe(time.Since(startTime).Seconds()) + }) +} diff --git a/pkg/job/metrics.go b/pkg/job/metrics.go new file mode 100644 index 000000000..be1508a94 --- /dev/null +++ b/pkg/job/metrics.go @@ -0,0 +1,67 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package job + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +const ( + codeSuccess = "Success" + codeFail = "Fail" +) + +var ( + startedJobCounter *prometheus.CounterVec + finishedJobCounter *prometheus.CounterVec + finishedJobHistogram *prometheus.HistogramVec +) + +func setSubsystem(subsystem string) { + startedJobCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: subsystem, + Name: "batch_started_jobs_total", + Help: "Total number of started jobs.", + }, []string{"name"}) + + finishedJobCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: subsystem, + Name: "batch_finished_jobs_total", + Help: "Total number of finished jobs.", + }, []string{"name", "code"}) + + finishedJobHistogram = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "bucketeer", + Subsystem: subsystem, + Name: "batch_job_running_time_seconds", + Help: "Histogram of the time job takes to run in seconds.", + }, []string{"name", "code"}) + +} +func registerMetrics(r metrics.Registerer, subsystem string) { + setSubsystem(subsystem) + r.MustRegister( + startedJobCounter, + finishedJobCounter, + finishedJobHistogram, + ) +} diff --git a/pkg/kafka/BUILD.bazel b/pkg/kafka/BUILD.bazel new file mode 100644 index 000000000..b4b43ae6f --- /dev/null +++ b/pkg/kafka/BUILD.bazel @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "topic.go", + "topic_creator.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/kafka", + visibility = ["//visibility:public"], + deps = [ + "//pkg/storage/kafka:go_default_library", + "@com_github_shopify_sarama//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/kafka/mock/BUILD.bazel b/pkg/kafka/mock/BUILD.bazel new file mode 100644 index 000000000..f6051c21f --- /dev/null +++ b/pkg/kafka/mock/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["topic_creator.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/kafka/mock", + visibility = ["//visibility:public"], + deps = ["@com_github_golang_mock//gomock:go_default_library"], +) diff --git a/pkg/kafka/mock/topic_creator.go b/pkg/kafka/mock/topic_creator.go new file mode 100644 index 000000000..d89f0be4c --- /dev/null +++ b/pkg/kafka/mock/topic_creator.go @@ -0,0 +1,49 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: topic_creator.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockTopicCreator is a mock of TopicCreator interface. +type MockTopicCreator struct { + ctrl *gomock.Controller + recorder *MockTopicCreatorMockRecorder +} + +// MockTopicCreatorMockRecorder is the mock recorder for MockTopicCreator. +type MockTopicCreatorMockRecorder struct { + mock *MockTopicCreator +} + +// NewMockTopicCreator creates a new mock instance. +func NewMockTopicCreator(ctrl *gomock.Controller) *MockTopicCreator { + mock := &MockTopicCreator{ctrl: ctrl} + mock.recorder = &MockTopicCreatorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockTopicCreator) EXPECT() *MockTopicCreatorMockRecorder { + return m.recorder +} + +// CreateTopics mocks base method. +func (m *MockTopicCreator) CreateTopics(ctx context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateTopics", ctx) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateTopics indicates an expected call of CreateTopics. +func (mr *MockTopicCreatorMockRecorder) CreateTopics(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateTopics", reflect.TypeOf((*MockTopicCreator)(nil).CreateTopics), ctx) +} diff --git a/pkg/kafka/topic.go b/pkg/kafka/topic.go new file mode 100644 index 000000000..b394a6d44 --- /dev/null +++ b/pkg/kafka/topic.go @@ -0,0 +1,23 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafka + +var ( + topics = []string{ + "evaluation-events", + "goal-events", + "user-events", + } +) diff --git a/pkg/kafka/topic_creator.go b/pkg/kafka/topic_creator.go new file mode 100644 index 000000000..3cbf84d8b --- /dev/null +++ b/pkg/kafka/topic_creator.go @@ -0,0 +1,106 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package kafka + +import ( + "context" + + "github.com/Shopify/sarama" + "go.uber.org/zap" + + storagekafka "github.com/bucketeer-io/bucketeer/pkg/storage/kafka" +) + +type TopicCreator interface { + CreateTopics(ctx context.Context) error +} + +type options struct { + logger *zap.Logger + partitionNum int32 + replicationFactor int16 + minInSyncReplicas string +} + +type Option func(*options) + +func WithLogger(l *zap.Logger) Option { + return func(opts *options) { + opts.logger = l + } +} + +func WithPartitionNum(n int32) Option { + return func(opts *options) { + opts.partitionNum = n + } +} + +func WithReplicationFactor(f int16) Option { + return func(opts *options) { + opts.replicationFactor = f + } +} + +func WithMinInSyncReplicas(n string) Option { + return func(opts *options) { + opts.minInSyncReplicas = n + } +} + +type topicCreator struct { + client *storagekafka.ClusterAdmin + topicPrefix string + opts *options + logger *zap.Logger +} + +func NewTopicCreator(client *storagekafka.ClusterAdmin, topicPrefix string, opts ...Option) TopicCreator { + dopts := &options{ + logger: zap.NewNop(), + partitionNum: 3, + replicationFactor: 3, + minInSyncReplicas: "2", + } + for _, opt := range opts { + opt(dopts) + } + return &topicCreator{ + client: client, + topicPrefix: topicPrefix, + opts: dopts, + logger: dopts.logger.Named("kafka"), + } +} + +func (tc *topicCreator) CreateTopics(ctx context.Context) error { + for _, topic := range topics { + topicName := storagekafka.TopicName(tc.topicPrefix, topic) + topicDetail := &sarama.TopicDetail{ + NumPartitions: tc.opts.partitionNum, + ReplicationFactor: tc.opts.replicationFactor, + ConfigEntries: map[string]*string{"min.insync.replicas": &tc.opts.minInSyncReplicas}, + } + if err := tc.client.CreateTopic(topicName, topicDetail); err != nil { + tc.logger.Error("Failed to create topic", zap.Error(err), + zap.String("topic", topicName)) + return err + } + tc.logger.Info("Suceeded to create topic", + zap.String("topic", topicName)) + } + return nil +} diff --git a/pkg/ldflags/BUILD.bazel b/pkg/ldflags/BUILD.bazel new file mode 100644 index 000000000..003e43f71 --- /dev/null +++ b/pkg/ldflags/BUILD.bazel @@ -0,0 +1,8 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["ldflags.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/ldflags", + visibility = ["//visibility:public"], +) diff --git a/pkg/ldflags/ldflags.go b/pkg/ldflags/ldflags.go new file mode 100644 index 000000000..5fffbb51b --- /dev/null +++ b/pkg/ldflags/ldflags.go @@ -0,0 +1,20 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ldflags + +var ( + Hash string + BuildDate string +) diff --git a/pkg/locale/BUILD.bazel b/pkg/locale/BUILD.bazel new file mode 100644 index 000000000..e060c1e6a --- /dev/null +++ b/pkg/locale/BUILD.bazel @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("@io_bazel_rules_go//extras:embed_data.bzl", "go_embed_data") + +go_library( + name = "go_default_library", + srcs = [ + "locale.go", + "localizer.go", + "options.go", + ":embed", # keep + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/locale", + visibility = ["//visibility:public"], + deps = [ + "@com_github_nicksnyder_go_i18n_v2//i18n:go_default_library", + "@in_gopkg_yaml_v2//:go_default_library", + "@org_golang_x_text//language:go_default_library", + ], +) + +go_embed_data( + name = "embed", + srcs = glob(["localizedata/*.yaml"]), + flatten = True, + package = "locale", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["localizer_test.go"], + embed = [":go_default_library"], + deps = ["@com_github_stretchr_testify//assert:go_default_library"], +) diff --git a/pkg/locale/locale.go b/pkg/locale/locale.go new file mode 100644 index 000000000..903a1b798 --- /dev/null +++ b/pkg/locale/locale.go @@ -0,0 +1,39 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package locale + +const ( + JaJP = "ja-JP" + EnUS = "en-US" +) + +type locale struct { + locale string + langs []string +} + +type Locale interface { + GetLocale() string +} + +func NewLocale(l string) Locale { + return &locale{ + locale: l, + } +} + +func (l *locale) GetLocale() string { + return l.locale +} diff --git a/pkg/locale/localizedata/en.yaml b/pkg/locale/localizedata/en.yaml new file mode 100644 index 000000000..c148b7c8e --- /dev/null +++ b/pkg/locale/localizedata/en.yaml @@ -0,0 +1,8 @@ +# Nouns +FeatureFlagID: "feature flag ID" + +# Error sentences +RequiredField: "{{ .Field_1 }} is required" +InternalServerError: "An internal error has occurred" +WebhookNotFoundError: "The requested {{ .Field_1 }} is not found" +InvalidArgumentError: "The argument {{ .Field_1 }} is invalid" diff --git a/pkg/locale/localizedata/ja.yaml b/pkg/locale/localizedata/ja.yaml new file mode 100644 index 000000000..7e51868de --- /dev/null +++ b/pkg/locale/localizedata/ja.yaml @@ -0,0 +1,34 @@ +# Nouns +FeatureFlagID: "フィーチャーフラグID" + +# Error sentences +RequiredField: "{{ .Field_1 }}は必須です" +InternalServerError: "内部エラーが発生しました" +NotFoundError: "リクエストされた{{ .Field_1 }}が見つかりません" +InvalidArgumentError: "不正な{{ .Field_1 }}です" +Unauthenticated: "認証されていません" +PermissionDenied: "権限がありません" +ExceededMaxError: "{{ .Field_1 }} の最大値 ({{ .Field_2 }}) を超えています" +AlreadyExistsError: "同じデータが既に存在しています" +AlreadyDeletedError: "データがすでに削除済みです" + +# feature error +DifferentVariationsSize: "featureのvariationsとrolloutのvariationsの数が異なります" +IncorrectVariationWeight: "weightは0から{{ .Field_1 }}の間である必要があります" +NothingChange: "変更点がありません" +SegmentUsersAlreadyUploading: "segment userのリストはすでにアップロード中です" +SegmentStatusNotSuceeded: "segmentのstatusがsuceededではありません" +SegmentInUse: "segmentがfeature flagで使用されているため、削除できません" +WaitingOrRunningExperimentExists: "開始予定、もしくは実行中のExperimentが存在します。更新する場合はExperimentを停止してください。" +InvalidArchive: 前提条件のフラグとして登録されているフラグをアーカイブすることはできません" +InvalidChangingVariation: "前提条件のフラグとして登録されているフラグのバリエーションを変更または削除することはできません" + +# autoops error +IncompatibleOpsType: "対象のオペレーションタイプに対応していない自動オペレーションルールがあります" + +# eventcounter error +StartAtIsAfterEnd: "start at はend at以前を指定してください。" +PeroidOutOfRange: "期間は過去30日以内を選択してください。" + +# environment error +ProjectDisabled: "projectのデータが無効化されています" diff --git a/pkg/locale/localizer.go b/pkg/locale/localizer.go new file mode 100644 index 000000000..fafbdce53 --- /dev/null +++ b/pkg/locale/localizer.go @@ -0,0 +1,100 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package locale + +import ( + "fmt" + "strconv" + + "github.com/nicksnyder/go-i18n/v2/i18n" + "golang.org/x/text/language" + "gopkg.in/yaml.v2" +) + +var ( + bundle *i18n.Bundle +) + +const ( + FeatureFlagID = "FeatureFlagID" + RequiredFieldTemplate = "RequiredField" + InternalServerError = "InternalServerError" + NotFoundError = "NotFoundError" + InvalidArgumentError = "InvalidArgumentError" +) + +func init() { + bundle = i18n.NewBundle(language.English) + bundle.RegisterUnmarshalFunc("yaml", yaml.Unmarshal) + files := []string{ + "en.yaml", + "ja.yaml", + } + for _, f := range files { + //nolint:typecheck + data, ok := Data[f] + if !ok { + panic(fmt.Errorf("Failed to load translation data: %s", f)) + } + bundle.MustParseMessageFileBytes(data, f) + } +} + +type localizer struct { + Locale + *i18n.Localizer +} + +type Localizer interface { + Locale + MustLocalize(id string) string + MustLocalizeWithTemplate(id string, fields ...string) string +} + +func NewLocalizer(locale Locale, fopts ...Option) Localizer { + opts := defaultOptions() + for _, fo := range fopts { + fo.apply(&opts) + } + return &localizer{ + locale, + i18n.NewLocalizer(opts.bundle, locale.GetLocale()), + } +} + +func (l *localizer) MustLocalize(id string) string { + return l.Localizer.MustLocalize(createLocalizeConfig(id)) +} + +func (l *localizer) MustLocalizeWithTemplate(id string, fields ...string) string { + return l.Localizer.MustLocalize(createLocalizeConfigWithTemplate(id, fields...)) +} + +func createLocalizeConfig(id string) *i18n.LocalizeConfig { + return &i18n.LocalizeConfig{ + MessageID: id, + } +} + +func createLocalizeConfigWithTemplate(id string, fields ...string) *i18n.LocalizeConfig { + td := make(map[string]interface{}, len(fields)) + for i, f := range fields { + td["Field_"+strconv.Itoa(i+1)] = f + } + return &i18n.LocalizeConfig{ + MessageID: id, + TemplateData: td, + } +} diff --git a/pkg/locale/localizer_test.go b/pkg/locale/localizer_test.go new file mode 100644 index 000000000..6c4bc3ac9 --- /dev/null +++ b/pkg/locale/localizer_test.go @@ -0,0 +1,85 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package locale + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMustLocalizeWithTemplate(t *testing.T) { + lJA := NewLocalizer(NewLocale(JaJP)) + lEN := NewLocalizer(NewLocale(EnUS)) + cases := []struct { + name string + id string + fields []string + l Localizer + expected string + }{ + { + name: "succeed", + id: RequiredFieldTemplate, + fields: []string{"field-1"}, + l: lJA, + expected: "field-1は必須です", + }, + { + name: "succeed", + id: RequiredFieldTemplate, + fields: []string{"field-1"}, + l: lEN, + expected: "field-1 is required", + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + actual := c.l.MustLocalizeWithTemplate(c.id, c.fields...) + assert.Equal(t, c.expected, actual) + }) + } +} + +func TestMustLocalize(t *testing.T) { + lJA := NewLocalizer(NewLocale(JaJP)) + lEN := NewLocalizer(NewLocale(EnUS)) + cases := []struct { + name string + id string + l Localizer + expected string + }{ + { + name: "succeed", + id: FeatureFlagID, + l: lJA, + expected: "フィーチャーフラグID", + }, + { + name: "succeed", + id: FeatureFlagID, + l: lEN, + expected: "feature flag ID", + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + actual := c.l.MustLocalize(c.id) + assert.Equal(t, c.expected, actual) + }) + } +} diff --git a/pkg/locale/options.go b/pkg/locale/options.go new file mode 100644 index 000000000..d64c96afc --- /dev/null +++ b/pkg/locale/options.go @@ -0,0 +1,53 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package locale + +import ( + "github.com/nicksnyder/go-i18n/v2/i18n" +) + +type options struct { + bundle *i18n.Bundle +} + +type Option interface { + apply(*options) +} + +type funcOption struct { + f func(*options) +} + +func (fo *funcOption) apply(o *options) { + fo.f(o) +} + +func newFuncOption(f func(*options)) *funcOption { + return &funcOption{ + f: f, + } +} + +func defaultOptions() options { + return options{ + bundle: bundle, + } +} + +func WithBundle(bundle *i18n.Bundle) Option { + return newFuncOption(func(o *options) { + o.bundle = bundle + }) +} diff --git a/pkg/log/BUILD.bazel b/pkg/log/BUILD.bazel new file mode 100644 index 000000000..0926eb9ab --- /dev/null +++ b/pkg/log/BUILD.bazel @@ -0,0 +1,24 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "field.go", + "log.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/log", + visibility = ["//visibility:public"], + deps = [ + "//pkg/rpc/metadata:go_default_library", + "@io_opencensus_go//trace:go_default_library", + "@org_uber_go_zap//:go_default_library", + "@org_uber_go_zap//zapcore:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["log_test.go"], + embed = [":go_default_library"], + deps = ["@com_github_stretchr_testify//assert:go_default_library"], +) diff --git a/pkg/log/field.go b/pkg/log/field.go new file mode 100644 index 000000000..16b81fa1a --- /dev/null +++ b/pkg/log/field.go @@ -0,0 +1,64 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "context" + "errors" + + "go.opencensus.io/trace" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + "github.com/bucketeer-io/bucketeer/pkg/rpc/metadata" +) + +type Fields []zap.Field + +func FieldsFromImcomingContext(ctx context.Context) Fields { + sc := trace.FromContext(ctx).SpanContext() + return Fields{ + zap.String("xRequestID", metadata.GetXRequestIDFromIncomingContext(ctx)), + zap.String("logging.googleapis.com/trace", sc.TraceID.String()), + zap.String("logging.googleapis.com/spanId", sc.SpanID.String()), + } +} + +func FieldsFromOutgoingContext(ctx context.Context) Fields { + sc := trace.FromContext(ctx).SpanContext() + return Fields{ + zap.String("xRequestID", metadata.GetXRequestIDFromOutgoingContext(ctx)), + zap.String("logging.googleapis.com/trace", sc.TraceID.String()), + zap.String("logging.googleapis.com/spanId", sc.SpanID.String()), + } +} + +func (fs Fields) AddFields(fields ...zap.Field) Fields { + return append(fs, fields...) +} + +type serviceContext struct { + service string + version string +} + +func (sc *serviceContext) MarshalLogObject(enc zapcore.ObjectEncoder) error { + if sc.service == "" { + return errors.New("service name is mandatory") + } + enc.AddString("service", sc.service) + enc.AddString("version", sc.version) + return nil +} diff --git a/pkg/log/log.go b/pkg/log/log.go new file mode 100644 index 000000000..71d211882 --- /dev/null +++ b/pkg/log/log.go @@ -0,0 +1,116 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +var Levels = []string{"debug", "info", "warn", "error", "dpanic", "panic", "fatal"} + +type options struct { + level string + serviceContext *serviceContext +} + +type Option func(*options) + +func WithLevel(level string) Option { + return func(opts *options) { + opts.level = level + } +} + +func WithServiceContext(service, version string) Option { + return func(opts *options) { + opts.serviceContext = &serviceContext{ + service: service, + version: version, + } + } +} + +func NewLogger(opts ...Option) (*zap.Logger, error) { + dopts := &options{ + level: "info", + } + for _, opt := range opts { + opt(dopts) + } + level := new(zapcore.Level) + if err := level.Set(dopts.level); err != nil { + return nil, err + } + if dopts.serviceContext == nil { + return newConfig(*level).Build() + } + option := zap.Fields(zap.Object("serviceContext", dopts.serviceContext)) + logger, err := newConfig(*level).Build(option) + if err != nil { + return nil, err + } + return logger.Named(dopts.serviceContext.service), nil +} + +func newConfig(level zapcore.Level) zap.Config { + return zap.Config{ + Level: zap.NewAtomicLevelAt(level), + Development: false, + Sampling: &zap.SamplingConfig{ + Initial: 100, + Thereafter: 100, + }, + Encoding: "json", + EncoderConfig: newEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } +} + +func newEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + TimeKey: "eventTime", + LevelKey: "severity", + NameKey: "logger", + CallerKey: "caller", + MessageKey: "message", + StacktraceKey: "stacktrace", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: encodeLevel, + EncodeTime: zapcore.EpochTimeEncoder, + EncodeDuration: zapcore.SecondsDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +func encodeLevel(l zapcore.Level, enc zapcore.PrimitiveArrayEncoder) { + switch l { + case zapcore.DebugLevel: + enc.AppendString("DEBUG") + case zapcore.InfoLevel: + enc.AppendString("INFO") + case zapcore.WarnLevel: + enc.AppendString("WARNING") + case zapcore.ErrorLevel: + enc.AppendString("ERROR") + case zapcore.DPanicLevel: + enc.AppendString("CRITICAL") + case zapcore.PanicLevel: + enc.AppendString("ALERT") + case zapcore.FatalLevel: + enc.AppendString("EMERGENCY") + } +} diff --git a/pkg/log/log_test.go b/pkg/log/log_test.go new file mode 100644 index 000000000..8c51d063d --- /dev/null +++ b/pkg/log/log_test.go @@ -0,0 +1,40 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewLoggerOK(t *testing.T) { + for _, level := range Levels { + logger, err := NewLogger( + WithLevel(level), + WithServiceContext("test-service", "1.0.0"), + ) + des := fmt.Sprintf("level: %s", level) + assert.Nil(t, err, des) + assert.NotNil(t, logger, des) + } +} + +func TestNewLoggerFailed(t *testing.T) { + logger, err := NewLogger(WithLevel("foo")) + assert.NotNil(t, err) + assert.Nil(t, logger) +} diff --git a/pkg/metrics/BUILD.bazel b/pkg/metrics/BUILD.bazel new file mode 100644 index 000000000..6e24c4f85 --- /dev/null +++ b/pkg/metrics/BUILD.bazel @@ -0,0 +1,24 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["metrics.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/metrics", + visibility = ["//visibility:public"], + deps = [ + "//pkg/health:go_default_library", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@com_github_prometheus_client_golang//prometheus/promhttp:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["metrics_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/health:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + ], +) diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go new file mode 100644 index 000000000..241225f57 --- /dev/null +++ b/pkg/metrics/metrics.go @@ -0,0 +1,163 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package metrics + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/health" +) + +type Registerer interface { + MustRegister(...prometheus.Collector) + Unregister(prometheus.Collector) bool +} + +type Metrics interface { + DefaultRegisterer() Registerer + Registerer(path string) Registerer + Check(ctx context.Context) health.Status + Run() error + Stop() +} + +type options struct { + healthCheckURL string + logger *zap.Logger +} + +type Option func(*options) + +func WithHealthCheckURL(url string) Option { + return func(opts *options) { + opts.healthCheckURL = url + } +} + +func WithLogger(l *zap.Logger) Option { + return func(opts *options) { + opts.logger = l + } +} + +type metrics struct { + mux *http.ServeMux + server *http.Server + defaultPath string + registries map[string]*registry + opts *options + logger *zap.Logger +} + +type registry struct { + *prometheus.Registry +} + +func NewMetrics(port int, path string, opts ...Option) Metrics { + dopts := &options{ + healthCheckURL: fmt.Sprintf("http://localhost:%d/health", port), + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + mux := http.NewServeMux() + m := &metrics{ + mux: mux, + server: &http.Server{ + Addr: fmt.Sprintf(":%d", port), + Handler: mux, + }, + defaultPath: path, + registries: make(map[string]*registry), + opts: dopts, + logger: dopts.logger.Named("metrics"), + } + r := m.Registerer(path) + r.MustRegister( + prometheus.NewGoCollector(), + prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}), + ) + return m +} + +func (m *metrics) DefaultRegisterer() Registerer { + r := m.registries[m.defaultPath] + return r +} + +func (m *metrics) Registerer(path string) Registerer { + if r, ok := m.registries[path]; ok { + return r + } + r := ®istry{Registry: prometheus.NewRegistry()} + m.registries[path] = r + return r +} + +func (m *metrics) Run() error { + m.logger.Info("Run started") + for p, r := range m.registries { + m.mux.Handle(p, promhttp.HandlerFor(r, promhttp.HandlerOpts{})) + } + m.mux.HandleFunc("/health", func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte("healthy")) // nolint:errcheck + }) + if err := m.server.ListenAndServe(); err != nil && err != http.ErrServerClosed { + m.logger.Error("Failed to listen and serve", zap.Error(err)) + return err + } + m.logger.Info("Run finished") + return nil +} + +func (m *metrics) Stop() { + m.logger.Info("Stop started") + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + m.server.Shutdown(ctx) // nolint:errcheck + m.logger.Info("Stop finished") +} + +func (m *metrics) Check(ctx context.Context) health.Status { + resultCh := make(chan health.Status, 1) + go func() { + resp, err := http.Get(m.opts.healthCheckURL) + if resp != nil { + defer resp.Body.Close() + } + if err != nil || resp.StatusCode != 200 { + m.logger.Error("Unhealthy", zap.Any("response", resp), zap.Error(err)) + resultCh <- health.Unhealthy + return + } + resultCh <- health.Healthy + }() + select { + case <-ctx.Done(): + m.logger.Error("Unhealthy due to context Done is closed", zap.Error(ctx.Err())) + return health.Unhealthy + case status := <-resultCh: + return status + } +} diff --git a/pkg/metrics/metrics_test.go b/pkg/metrics/metrics_test.go new file mode 100644 index 000000000..823bc2c71 --- /dev/null +++ b/pkg/metrics/metrics_test.go @@ -0,0 +1,40 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/health" +) + +func TestCheckHealthy(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("healthy")) + })) + defer ts.Close() + m := NewMetrics(9002, "/metrics", WithHealthCheckURL(ts.URL)) + assert.Equal(t, health.Healthy, m.Check(context.TODO())) +} + +func TestCheckUnhealthy(t *testing.T) { + m := NewMetrics(9002, "/metrics") + assert.Equal(t, health.Unhealthy, m.Check(context.TODO())) +} diff --git a/pkg/metrics/mock/BUILD.bazel b/pkg/metrics/mock/BUILD.bazel new file mode 100644 index 000000000..684066dc2 --- /dev/null +++ b/pkg/metrics/mock/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["metrics.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/metrics/mock", + visibility = ["//visibility:public"], + deps = [ + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + ], +) diff --git a/pkg/metrics/mock/metrics.go b/pkg/metrics/mock/metrics.go new file mode 100644 index 000000000..e1c26a84e --- /dev/null +++ b/pkg/metrics/mock/metrics.go @@ -0,0 +1,160 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: metrics.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + prometheus "github.com/prometheus/client_golang/prometheus" + + health "github.com/bucketeer-io/bucketeer/pkg/health" + metrics "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +// MockRegisterer is a mock of Registerer interface. +type MockRegisterer struct { + ctrl *gomock.Controller + recorder *MockRegistererMockRecorder +} + +// MockRegistererMockRecorder is the mock recorder for MockRegisterer. +type MockRegistererMockRecorder struct { + mock *MockRegisterer +} + +// NewMockRegisterer creates a new mock instance. +func NewMockRegisterer(ctrl *gomock.Controller) *MockRegisterer { + mock := &MockRegisterer{ctrl: ctrl} + mock.recorder = &MockRegistererMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockRegisterer) EXPECT() *MockRegistererMockRecorder { + return m.recorder +} + +// MustRegister mocks base method. +func (m *MockRegisterer) MustRegister(arg0 ...prometheus.Collector) { + m.ctrl.T.Helper() + varargs := []interface{}{} + for _, a := range arg0 { + varargs = append(varargs, a) + } + m.ctrl.Call(m, "MustRegister", varargs...) +} + +// MustRegister indicates an expected call of MustRegister. +func (mr *MockRegistererMockRecorder) MustRegister(arg0 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MustRegister", reflect.TypeOf((*MockRegisterer)(nil).MustRegister), arg0...) +} + +// Unregister mocks base method. +func (m *MockRegisterer) Unregister(arg0 prometheus.Collector) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Unregister", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// Unregister indicates an expected call of Unregister. +func (mr *MockRegistererMockRecorder) Unregister(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unregister", reflect.TypeOf((*MockRegisterer)(nil).Unregister), arg0) +} + +// MockMetrics is a mock of Metrics interface. +type MockMetrics struct { + ctrl *gomock.Controller + recorder *MockMetricsMockRecorder +} + +// MockMetricsMockRecorder is the mock recorder for MockMetrics. +type MockMetricsMockRecorder struct { + mock *MockMetrics +} + +// NewMockMetrics creates a new mock instance. +func NewMockMetrics(ctrl *gomock.Controller) *MockMetrics { + mock := &MockMetrics{ctrl: ctrl} + mock.recorder = &MockMetricsMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockMetrics) EXPECT() *MockMetricsMockRecorder { + return m.recorder +} + +// Check mocks base method. +func (m *MockMetrics) Check(ctx context.Context) health.Status { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Check", ctx) + ret0, _ := ret[0].(health.Status) + return ret0 +} + +// Check indicates an expected call of Check. +func (mr *MockMetricsMockRecorder) Check(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Check", reflect.TypeOf((*MockMetrics)(nil).Check), ctx) +} + +// DefaultRegisterer mocks base method. +func (m *MockMetrics) DefaultRegisterer() metrics.Registerer { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DefaultRegisterer") + ret0, _ := ret[0].(metrics.Registerer) + return ret0 +} + +// DefaultRegisterer indicates an expected call of DefaultRegisterer. +func (mr *MockMetricsMockRecorder) DefaultRegisterer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DefaultRegisterer", reflect.TypeOf((*MockMetrics)(nil).DefaultRegisterer)) +} + +// Registerer mocks base method. +func (m *MockMetrics) Registerer(path string) metrics.Registerer { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Registerer", path) + ret0, _ := ret[0].(metrics.Registerer) + return ret0 +} + +// Registerer indicates an expected call of Registerer. +func (mr *MockMetricsMockRecorder) Registerer(path interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Registerer", reflect.TypeOf((*MockMetrics)(nil).Registerer), path) +} + +// Run mocks base method. +func (m *MockMetrics) Run() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Run") + ret0, _ := ret[0].(error) + return ret0 +} + +// Run indicates an expected call of Run. +func (mr *MockMetricsMockRecorder) Run() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockMetrics)(nil).Run)) +} + +// Stop mocks base method. +func (m *MockMetrics) Stop() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Stop") +} + +// Stop indicates an expected call of Stop. +func (mr *MockMetricsMockRecorder) Stop() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockMetrics)(nil).Stop)) +} diff --git a/pkg/metricsevent/cmd/persister/BUILD.bazel b/pkg/metricsevent/cmd/persister/BUILD.bazel new file mode 100644 index 000000000..8f986ab6b --- /dev/null +++ b/pkg/metricsevent/cmd/persister/BUILD.bazel @@ -0,0 +1,19 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["persister.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/metricsevent/cmd/persister", + visibility = ["//visibility:public"], + deps = [ + "//pkg/cli:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/metricsevent/persister:go_default_library", + "//pkg/pubsub:go_default_library", + "//pkg/pubsub/puller:go_default_library", + "//pkg/rpc:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/metricsevent/cmd/persister/persister.go b/pkg/metricsevent/cmd/persister/persister.go new file mode 100644 index 000000000..8942447ed --- /dev/null +++ b/pkg/metricsevent/cmd/persister/persister.go @@ -0,0 +1,137 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persister + +import ( + "context" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + pst "github.com/bucketeer-io/bucketeer/pkg/metricsevent/persister" + "github.com/bucketeer-io/bucketeer/pkg/pubsub" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" + "github.com/bucketeer-io/bucketeer/pkg/rpc" +) + +const command = "persister" + +type Persister interface { + Run(context.Context, metrics.Metrics, *zap.Logger) error +} + +type persister struct { + *kingpin.CmdClause + port *int + project *string + subscription *string + maxMPS *int + numWorkers *int + topic *string + flushSize *int + flushInterval *time.Duration + certPath *string + keyPath *string + pullerNumGoroutines *int + pullerMaxOutstandingMessages *int + pullerMaxOutstandingBytes *int +} + +func RegisterCommand(r cli.CommandRegistry, p cli.ParentCommand) cli.Command { + cmd := p.Command(command, "Start metricsevent persister") + persister := &persister{ + CmdClause: cmd, + port: cmd.Flag("port", "Port to bind to.").Default("9090").Int(), + project: cmd.Flag("project", "Google Cloud project name.").String(), + subscription: cmd.Flag("subscription", "Google PubSub subscription name.").Required().String(), + topic: cmd.Flag("topic", "Google PubSub topic name.").Required().String(), + maxMPS: cmd.Flag("max-mps", "Maximum messages should be handled in a second.").Default("1000").Int(), + numWorkers: cmd.Flag("num-workers", "Number of workers.").Default("2").Int(), + flushSize: cmd.Flag("flush-size", "Maximum number of messages in one flush.").Default("100").Int(), + flushInterval: cmd.Flag("flush-interval", "Maximum interval between two flushes.").Default("2s").Duration(), + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + keyPath: cmd.Flag("key", "Path to TLS key.").Required().String(), + pullerNumGoroutines: cmd.Flag( + "puller-num-goroutines", + "Number of goroutines will be spawned to pull messages.", + ).Int(), + pullerMaxOutstandingMessages: cmd.Flag( + "puller-max-outstanding-messages", + "Maximum number of unprocessed messages.", + ).Int(), + pullerMaxOutstandingBytes: cmd.Flag( + "puller-max-outstanding-bytes", + "Maximum size of unprocessed messages.", + ).Int(), + } + r.RegisterCommand(persister) + return persister +} + +func (p *persister) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + registerer := metrics.DefaultRegisterer() + + puller, err := p.createPuller(ctx, logger) + if err != nil { + return err + } + + persister := pst.NewPersister( + puller, + pst.WithMaxMPS(*p.maxMPS), + pst.WithNumWorkers(*p.numWorkers), + pst.WithMetrics(registerer), + pst.WithLogger(logger), + ) + defer persister.Stop() + go persister.Run() // nolint:errcheck + + healthChecker := health.NewGrpcChecker( + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + health.WithCheck("persister", persister.Check), + ) + go healthChecker.Run(ctx) + + server := rpc.NewServer(healthChecker, *p.certPath, *p.keyPath, + rpc.WithPort(*p.port), + rpc.WithMetrics(registerer), + rpc.WithLogger(logger), + rpc.WithHandler("/health", healthChecker), + ) + defer server.Stop(10 * time.Second) + go server.Run() + + <-ctx.Done() + return nil +} + +func (p *persister) createPuller(ctx context.Context, logger *zap.Logger) (puller.Puller, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + client, err := pubsub.NewClient(ctx, *p.project, pubsub.WithLogger(logger)) + if err != nil { + return nil, err + } + return client.CreatePuller(*p.subscription, *p.topic, + pubsub.WithNumGoroutines(*p.pullerNumGoroutines), + pubsub.WithMaxOutstandingMessages(*p.pullerMaxOutstandingMessages), + pubsub.WithMaxOutstandingBytes(*p.pullerMaxOutstandingBytes), + ) +} diff --git a/pkg/metricsevent/persister/BUILD.bazel b/pkg/metricsevent/persister/BUILD.bazel new file mode 100644 index 000000000..5d88caa58 --- /dev/null +++ b/pkg/metricsevent/persister/BUILD.bazel @@ -0,0 +1,45 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "metrics.go", + "persister.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/metricsevent/persister", + visibility = ["//visibility:public"], + deps = [ + "//pkg/errgroup:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/metricsevent/storage:go_default_library", + "//pkg/pubsub/puller:go_default_library", + "//pkg/pubsub/puller/codes:go_default_library", + "//proto/event/client:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["persister_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/log:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/metrics/mock:go_default_library", + "//pkg/metricsevent/storage/mock:go_default_library", + "//pkg/pubsub/puller:go_default_library", + "//pkg/pubsub/puller/mock:go_default_library", + "//proto/event/client:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + "@io_bazel_rules_go//proto/wkt:duration_go_proto", + ], +) diff --git a/pkg/metricsevent/persister/metrics.go b/pkg/metricsevent/persister/metrics.go new file mode 100644 index 000000000..07f5edb56 --- /dev/null +++ b/pkg/metricsevent/persister/metrics.go @@ -0,0 +1,55 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persister + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +var ( + receivedCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "metrics_event", + Name: "persister_received_total", + Help: "Total number of received messages", + }) + + handledCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "metrics_event", + Name: "persister_handled_total", + Help: "Total number of handled messages", + }, []string{"code"}) + + handledHistogram = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "bucketeer", + Subsystem: "metrics_event", + Name: "handled_seconds", + Help: "Histogram of message handling duration (seconds)", + Buckets: prometheus.DefBuckets, + }, []string{"code"}) +) + +func registerMetrics(r metrics.Registerer) { + r.MustRegister( + receivedCounter, + handledCounter, + ) +} diff --git a/pkg/metricsevent/persister/persister.go b/pkg/metricsevent/persister/persister.go new file mode 100644 index 000000000..351644ba9 --- /dev/null +++ b/pkg/metricsevent/persister/persister.go @@ -0,0 +1,290 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persister + +import ( + "context" + "errors" + "time" + + "github.com/golang/protobuf/proto" // nolint:staticcheck + "github.com/golang/protobuf/ptypes" + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/errgroup" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/metricsevent/storage" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller/codes" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/client" +) + +var ( + ErrUnknownEvent = errors.New("metricsevent persister: unknown metrics event") + ErrInvalidDuration = errors.New("metricsevent persister: invalid duration") + + getEvaluationLatencyMetricsEvent = &eventproto.GetEvaluationLatencyMetricsEvent{} + getEvaluationSizeMetricsEvent = &eventproto.GetEvaluationSizeMetricsEvent{} + timeoutErrorCountMetricsEvent = &eventproto.TimeoutErrorCountMetricsEvent{} + internalErrorCountMetricsEvent = &eventproto.InternalErrorCountMetricsEvent{} +) + +type options struct { + maxMPS int + numWorkers int + pubsubTimeout time.Duration + metrics metrics.Registerer + logger *zap.Logger +} + +type Option func(*options) + +var defaultOptions = &options{ + maxMPS: 1000, + numWorkers: 1, + pubsubTimeout: 20 * time.Second, + logger: zap.NewNop(), +} + +func WithMaxMPS(mps int) Option { + return func(opts *options) { + opts.maxMPS = mps + } +} + +func WithNumWorkers(n int) Option { + return func(opts *options) { + opts.numWorkers = n + } +} + +func WithMetrics(r metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = r + } +} + +func WithLogger(logger *zap.Logger) Option { + return func(opts *options) { + opts.logger = logger + } +} + +type Persister interface { + Run() error + Stop() + Check(context.Context) health.Status +} + +type persister struct { + puller puller.RateLimitedPuller + storage storage.Storage + group errgroup.Group + opts *options + logger *zap.Logger + ctx context.Context + cancel func() + doneCh chan struct{} +} + +func NewPersister(p puller.Puller, opts ...Option) Persister { + dopts := defaultOptions + for _, opt := range opts { + opt(dopts) + } + ctx, cancel := context.WithCancel(context.Background()) + if dopts.metrics != nil { + registerMetrics(dopts.metrics) + } + logger := dopts.logger.Named("persister") + return &persister{ + puller: puller.NewRateLimitedPuller(p, dopts.maxMPS), + storage: storage.NewStorage(logger, dopts.metrics), + opts: dopts, + logger: logger, + ctx: ctx, + cancel: cancel, + doneCh: make(chan struct{}), + } +} + +func (p *persister) Run() error { + defer close(p.doneCh) + p.group.Go(func() error { + return p.puller.Run(p.ctx) + }) + for i := 0; i < p.opts.numWorkers; i++ { + p.group.Go(p.runWorker) + } + return p.group.Wait() +} + +func (p *persister) Stop() { + p.cancel() + <-p.doneCh +} + +func (p *persister) Check(ctx context.Context) health.Status { + select { + case <-p.ctx.Done(): + p.logger.Error("Unhealthy due to context Done is closed", zap.Error(p.ctx.Err())) + return health.Unhealthy + default: + if p.group.FinishedCount() > 0 { + p.logger.Error("Unhealthy", zap.Int32("FinishedCount", p.group.FinishedCount())) + return health.Unhealthy + } + return health.Healthy + } +} + +func (p *persister) runWorker() error { + record := func(code codes.Code, startTime time.Time) { + handledCounter.WithLabelValues(code.String()).Inc() + handledHistogram.WithLabelValues(code.String()).Observe(time.Since(startTime).Seconds()) + } + for { + select { + case msg, ok := <-p.puller.MessageCh(): + if !ok { + return nil + } + receivedCounter.Inc() + startTime := time.Now() + if id := msg.Attributes["id"]; id == "" { + p.logger.Error("message has no id") + msg.Ack() + record(codes.MissingID, startTime) + continue + } + err := p.handle(msg) + if err != nil { + msg.Ack() + record(codes.NonRepeatableError, startTime) + continue + } + msg.Ack() + record(codes.OK, startTime) + case <-p.ctx.Done(): + return nil + } + } +} + +func (p *persister) handle(message *puller.Message) error { + metricsEvents, err := p.unmarshalMessage(message) + if err != nil { + p.logger.Error("message is bad") + return err + } + err = p.saveMetrics(metricsEvents) + if err != nil { + p.logger.Error("could not store data to prometheus client", zap.Error(err)) + return err + } + return nil +} + +func (p *persister) unmarshalMessage(message *puller.Message) (*eventproto.MetricsEvent, error) { + event := &eventproto.Event{} + if err := proto.Unmarshal(message.Data, event); err != nil { + p.logger.Error("ummarshal event failed", + zap.Error(err), + zap.Any("msg", message), + ) + handledCounter.WithLabelValues(codes.BadMessage.String()).Inc() + return nil, err + } + me := &eventproto.MetricsEvent{} + if err := ptypes.UnmarshalAny(event.Event, me); err != nil { + p.logger.Error("ummarshal metrics event failed", + zap.Error(err), + zap.Any("msg", message), + ) + handledCounter.WithLabelValues(codes.BadMessage.String()).Inc() + return nil, err + } + return me, nil +} + +func (p *persister) saveMetrics(event *eventproto.MetricsEvent) error { + if ptypes.Is(event.Event, getEvaluationLatencyMetricsEvent) { + return p.saveGetEvaluationLatencyMetricsEvent(event) + } + if ptypes.Is(event.Event, getEvaluationSizeMetricsEvent) { + return p.saveGetEvaluationSizeMetricsEvent(event) + } + if ptypes.Is(event.Event, timeoutErrorCountMetricsEvent) { + return p.saveTimeoutErrorCountMetricsEvent(event) + } + if ptypes.Is(event.Event, internalErrorCountMetricsEvent) { + return p.saveInternalErrorCountMetricsEvent(event) + } + return ErrUnknownEvent +} + +func (p *persister) saveGetEvaluationLatencyMetricsEvent(event *eventproto.MetricsEvent) error { + ev := &eventproto.GetEvaluationLatencyMetricsEvent{} + if err := ptypes.UnmarshalAny(event.Event, ev); err != nil { + return err + } + if ev.Duration == nil { + return ErrInvalidDuration + } + var tag, status string + if ev.Labels != nil { + tag = ev.Labels["tag"] + status = ev.Labels["state"] + } + dur, err := ptypes.Duration(ev.Duration) + if err != nil { + return ErrInvalidDuration + } + p.storage.SaveGetEvaluationLatencyMetricsEvent(tag, status, dur) + return nil +} + +func (p *persister) saveGetEvaluationSizeMetricsEvent(event *eventproto.MetricsEvent) error { + ev := &eventproto.GetEvaluationSizeMetricsEvent{} + if err := ptypes.UnmarshalAny(event.Event, ev); err != nil { + return err + } + var tag, status string + if ev.Labels != nil { + tag = ev.Labels["tag"] + status = ev.Labels["state"] + } + p.storage.SaveGetEvaluationSizeMetricsEvent(tag, status, ev.SizeByte) + return nil +} + +func (p *persister) saveTimeoutErrorCountMetricsEvent(event *eventproto.MetricsEvent) error { + ev := &eventproto.TimeoutErrorCountMetricsEvent{} + if err := ptypes.UnmarshalAny(event.Event, ev); err != nil { + return err + } + p.storage.SaveTimeoutErrorCountMetricsEvent(ev.Tag) + return nil +} + +func (p *persister) saveInternalErrorCountMetricsEvent(event *eventproto.MetricsEvent) error { + ev := &eventproto.InternalErrorCountMetricsEvent{} + if err := ptypes.UnmarshalAny(event.Event, ev); err != nil { + return err + } + p.storage.SaveInternalErrorCountMetricsEvent(ev.Tag) + return nil +} diff --git a/pkg/metricsevent/persister/persister_test.go b/pkg/metricsevent/persister/persister_test.go new file mode 100644 index 000000000..50ea2cbeb --- /dev/null +++ b/pkg/metricsevent/persister/persister_test.go @@ -0,0 +1,260 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persister + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/duration" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + metricsmock "github.com/bucketeer-io/bucketeer/pkg/metrics/mock" + storagemock "github.com/bucketeer-io/bucketeer/pkg/metricsevent/storage/mock" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" + pullermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller/mock" + clientevent "github.com/bucketeer-io/bucketeer/proto/event/client" +) + +func TestWithMaxMPS(t *testing.T) { + t.Parallel() + opts := &options{} + require.Equal(t, 0, opts.maxMPS) + WithMaxMPS(1)(opts) + assert.Equal(t, 1, opts.maxMPS) +} + +func TestWithMetrics(t *testing.T) { + t.Parallel() + metrics := metrics.NewMetrics( + 9999, + "/metrics", + ) + reg := metrics.DefaultRegisterer() + f := WithMetrics(reg) + opt := &options{} + f(opt) + assert.Equal(t, reg, opt.metrics) +} + +func TestWithLogger(t *testing.T) { + t.Parallel() + logger, err := log.NewLogger() + require.NoError(t, err) + f := WithLogger(logger) + opt := &options{} + f(opt) + assert.Equal(t, logger, opt.logger) +} + +func TestNewPersister(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + puller := pullermock.NewMockPuller(mockController) + registerer := metricsmock.NewMockRegisterer(mockController) + registerer.EXPECT().MustRegister(gomock.Any()).Return().Times(2) + p := NewPersister(puller, + WithMetrics(registerer), + ) + assert.IsType(t, &persister{}, p) +} + +func TestUnmarshalMessage(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*testing.T) (*clientevent.MetricsEvent, *puller.Message) + expectedErr bool + }{ + "getEvaluationLatencyMetricsEvent: success": { + setup: func(t *testing.T) (*clientevent.MetricsEvent, *puller.Message) { + e, err := ptypes.MarshalAny(&clientevent.GetEvaluationLatencyMetricsEvent{ + Labels: map[string]string{"tag": "test", "status": "success"}, + Duration: &duration.Duration{Seconds: time.Now().Unix()}, + }) + require.NoError(t, err) + me := &clientevent.MetricsEvent{ + Timestamp: time.Now().Unix(), + Event: e, + } + any, err := ptypes.MarshalAny(me) + assert.NoError(t, err) + event := &clientevent.Event{Event: any} + data, err := proto.Marshal(event) + assert.NoError(t, err) + return me, &puller.Message{Data: data} + }, + expectedErr: false, + }, + "getEvaluationLatencyMetricsEvent: invalid message data": { + setup: func(t *testing.T) (*clientevent.MetricsEvent, *puller.Message) { + me := &clientevent.GoalEvent{} + data, err := proto.Marshal(me) + assert.NoError(t, err) + return nil, &puller.Message{Data: data} + }, + expectedErr: true, + }, + "getEvaluationLatencyMetricsEvent: invalid metrics event": { + setup: func(t *testing.T) (*clientevent.MetricsEvent, *puller.Message) { + me := &clientevent.GoalEvent{} + any, err := ptypes.MarshalAny(me) + assert.NoError(t, err) + event := &clientevent.Event{Event: any} + data, err := proto.Marshal(event) + assert.NoError(t, err) + return nil, &puller.Message{Data: data} + }, + expectedErr: true, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + pst := newPersister(t, mockController) + expected, input := p.setup(t) + e, err := pst.unmarshalMessage(input) + assert.Equal(t, p.expectedErr, err != nil) + if !p.expectedErr { + assert.Equal(t, expected.Timestamp, e.Timestamp) + } + }) + } +} + +func newPersister(t *testing.T, mockController *gomock.Controller) *persister { + t.Helper() + ctx, cancel := context.WithCancel(context.Background()) + logger, err := log.NewLogger() + require.NoError(t, err) + return &persister{ + puller: pullermock.NewMockRateLimitedPuller(mockController), + storage: storagemock.NewMockStorage(mockController), + logger: logger.Named("experiment-cacher"), + ctx: ctx, + cancel: cancel, + doneCh: make(chan struct{}), + } +} + +func TestSaveMetrics(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*testing.T, *persister) *clientevent.MetricsEvent + expectedErr error + }{ + "error: unknown event": { + setup: func(t *testing.T, pst *persister) *clientevent.MetricsEvent { + e, err := ptypes.MarshalAny(&clientevent.GoalEvent{}) + require.NoError(t, err) + return &clientevent.MetricsEvent{ + Timestamp: time.Now().Unix(), + Event: e, + } + }, + expectedErr: ErrUnknownEvent, + }, + "getEvaluationLatencyMetricsEvent: error: invalid duration": { + setup: func(t *testing.T, pst *persister) *clientevent.MetricsEvent { + e, err := ptypes.MarshalAny(&clientevent.GetEvaluationLatencyMetricsEvent{ + Labels: map[string]string{"tag": "test", "state": "FULL"}, + Duration: nil, + }) + require.NoError(t, err) + return &clientevent.MetricsEvent{ + Timestamp: time.Now().Unix(), + Event: e, + } + }, + expectedErr: ErrInvalidDuration, + }, + "getEvaluationLatencyMetricsEvent: success": { + setup: func(t *testing.T, pst *persister) *clientevent.MetricsEvent { + pst.storage.(*storagemock.MockStorage).EXPECT().SaveGetEvaluationLatencyMetricsEvent(gomock.Any(), gomock.Any(), gomock.Any()).Return().Times(1) + e, err := ptypes.MarshalAny(&clientevent.GetEvaluationLatencyMetricsEvent{ + Labels: map[string]string{"tag": "test", "state": "FULL"}, + Duration: &duration.Duration{Seconds: time.Now().Unix()}, + }) + require.NoError(t, err) + return &clientevent.MetricsEvent{ + Timestamp: time.Now().Unix(), + Event: e, + } + }, + expectedErr: nil, + }, + "getEvaluationSizeMetricsEvent: success": { + setup: func(t *testing.T, pst *persister) *clientevent.MetricsEvent { + pst.storage.(*storagemock.MockStorage).EXPECT().SaveGetEvaluationSizeMetricsEvent(gomock.Any(), gomock.Any(), gomock.Any()).Return().Times(1) + e, err := ptypes.MarshalAny(&clientevent.GetEvaluationSizeMetricsEvent{ + Labels: map[string]string{"tag": "test", "state": "FULL"}, + SizeByte: 100, + }) + require.NoError(t, err) + return &clientevent.MetricsEvent{ + Timestamp: time.Now().Unix(), + Event: e, + } + }, + expectedErr: nil, + }, + "TimeoutErrorCountMetricsEvent: success": { + setup: func(t *testing.T, pst *persister) *clientevent.MetricsEvent { + pst.storage.(*storagemock.MockStorage).EXPECT().SaveTimeoutErrorCountMetricsEvent(gomock.Any()).Return().Times(1) + e, err := ptypes.MarshalAny(&clientevent.TimeoutErrorCountMetricsEvent{ + Tag: "test", + }) + require.NoError(t, err) + return &clientevent.MetricsEvent{ + Timestamp: time.Now().Unix(), + Event: e, + } + }, + expectedErr: nil, + }, + "InternalErrorCountMetricsEvent: success": { + setup: func(t *testing.T, pst *persister) *clientevent.MetricsEvent { + pst.storage.(*storagemock.MockStorage).EXPECT().SaveInternalErrorCountMetricsEvent(gomock.Any()).Return().Times(1) + e, err := ptypes.MarshalAny(&clientevent.InternalErrorCountMetricsEvent{ + Tag: "test", + }) + require.NoError(t, err) + return &clientevent.MetricsEvent{ + Timestamp: time.Now().Unix(), + Event: e, + } + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + pst := newPersister(t, mockController) + input := p.setup(t, pst) + err := pst.saveMetrics(input) + assert.Equal(t, p.expectedErr, err) + }) + } +} diff --git a/pkg/metricsevent/storage/BUILD.bazel b/pkg/metricsevent/storage/BUILD.bazel new file mode 100644 index 000000000..e1444e197 --- /dev/null +++ b/pkg/metricsevent/storage/BUILD.bazel @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "event.go", + "metrics.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/metricsevent/storage", + visibility = ["//visibility:public"], + deps = [ + "//pkg/metrics:go_default_library", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["event_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/log:go_default_library", + "//pkg/metrics/mock:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + ], +) diff --git a/pkg/metricsevent/storage/event.go b/pkg/metricsevent/storage/event.go new file mode 100644 index 000000000..905f37d18 --- /dev/null +++ b/pkg/metricsevent/storage/event.go @@ -0,0 +1,56 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package storage + +import ( + "time" + + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +type Storage interface { + SaveGetEvaluationLatencyMetricsEvent(string, string, time.Duration) + SaveGetEvaluationSizeMetricsEvent(string, string, int32) + SaveTimeoutErrorCountMetricsEvent(string) + SaveInternalErrorCountMetricsEvent(string) +} + +type storage struct { + logger *zap.Logger +} + +func NewStorage(logger *zap.Logger, register metrics.Registerer) Storage { + registerMetrics(register) + return &storage{logger: logger.Named("storage")} +} + +func (s *storage) SaveGetEvaluationLatencyMetricsEvent(tag, status string, duration time.Duration) { + sdkGetEvaluationsLatencyHistogram.WithLabelValues(tag, status).Observe(duration.Seconds()) +} + +func (s *storage) SaveGetEvaluationSizeMetricsEvent(tag, status string, sizeByte int32) { + sdkGetEvaluationsSizeHistogram.WithLabelValues(tag, status).Observe(float64(sizeByte)) +} + +func (s *storage) SaveTimeoutErrorCountMetricsEvent(tag string) { + sdkTimeoutErrorCounter.WithLabelValues(tag).Inc() +} + +func (s *storage) SaveInternalErrorCountMetricsEvent(tag string) { + sdkInternalErrorCounter.WithLabelValues(tag).Inc() +} diff --git a/pkg/metricsevent/storage/event_test.go b/pkg/metricsevent/storage/event_test.go new file mode 100644 index 000000000..1e3789f27 --- /dev/null +++ b/pkg/metricsevent/storage/event_test.go @@ -0,0 +1,38 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/bucketeer-io/bucketeer/pkg/log" + metricsmock "github.com/bucketeer-io/bucketeer/pkg/metrics/mock" +) + +func TestNewStorage(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + registerer := metricsmock.NewMockRegisterer(mockController) + registerer.EXPECT().MustRegister(gomock.Any()).Return().Times(1) + logger, err := log.NewLogger() + require.NoError(t, err) + s := NewStorage(logger, registerer) + assert.IsType(t, &storage{}, s) +} diff --git a/pkg/metricsevent/storage/metrics.go b/pkg/metricsevent/storage/metrics.go new file mode 100644 index 000000000..34fd42864 --- /dev/null +++ b/pkg/metricsevent/storage/metrics.go @@ -0,0 +1,66 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +var ( + sdkGetEvaluationsLatencyHistogram = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "bucketeer", + Subsystem: "metrics_event", + Name: "sdk_get_evaluations_handling_seconds", + Help: "Histogram of get evaluations response latency (seconds).", + Buckets: prometheus.DefBuckets, + }, []string{"tag", "state"}) + + sdkGetEvaluationsSizeHistogram = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "bucketeer", + Subsystem: "metrics_event", + Name: "sdk_get_evaluations_size", + Help: "Histogram of get evaluations response size (byte).", + Buckets: prometheus.DefBuckets, + }, []string{"tag", "state"}) + + sdkTimeoutErrorCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "metrics_event", + Name: "sdk_timeout_error_total", + Help: "Total number of sdk timeout errors", + }, []string{"tag"}) + + sdkInternalErrorCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "metrics_event", + Name: "sdk_internal_error_total", + Help: "Total number of sdk internal errors", + }, []string{"tag"}) +) + +func registerMetrics(r metrics.Registerer) { + r.MustRegister( + sdkGetEvaluationsLatencyHistogram, + sdkGetEvaluationsSizeHistogram, + sdkTimeoutErrorCounter, + sdkInternalErrorCounter, + ) +} diff --git a/pkg/metricsevent/storage/mock/BUILD.bazel b/pkg/metricsevent/storage/mock/BUILD.bazel new file mode 100644 index 000000000..002f86324 --- /dev/null +++ b/pkg/metricsevent/storage/mock/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["event.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/metricsevent/storage/mock", + visibility = ["//visibility:public"], + deps = ["@com_github_golang_mock//gomock:go_default_library"], +) diff --git a/pkg/metricsevent/storage/mock/event.go b/pkg/metricsevent/storage/mock/event.go new file mode 100644 index 000000000..8c058185f --- /dev/null +++ b/pkg/metricsevent/storage/mock/event.go @@ -0,0 +1,83 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: event.go + +// Package mock is a generated GoMock package. +package mock + +import ( + reflect "reflect" + time "time" + + gomock "github.com/golang/mock/gomock" +) + +// MockStorage is a mock of Storage interface. +type MockStorage struct { + ctrl *gomock.Controller + recorder *MockStorageMockRecorder +} + +// MockStorageMockRecorder is the mock recorder for MockStorage. +type MockStorageMockRecorder struct { + mock *MockStorage +} + +// NewMockStorage creates a new mock instance. +func NewMockStorage(ctrl *gomock.Controller) *MockStorage { + mock := &MockStorage{ctrl: ctrl} + mock.recorder = &MockStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStorage) EXPECT() *MockStorageMockRecorder { + return m.recorder +} + +// SaveGetEvaluationLatencyMetricsEvent mocks base method. +func (m *MockStorage) SaveGetEvaluationLatencyMetricsEvent(arg0, arg1 string, arg2 time.Duration) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SaveGetEvaluationLatencyMetricsEvent", arg0, arg1, arg2) +} + +// SaveGetEvaluationLatencyMetricsEvent indicates an expected call of SaveGetEvaluationLatencyMetricsEvent. +func (mr *MockStorageMockRecorder) SaveGetEvaluationLatencyMetricsEvent(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveGetEvaluationLatencyMetricsEvent", reflect.TypeOf((*MockStorage)(nil).SaveGetEvaluationLatencyMetricsEvent), arg0, arg1, arg2) +} + +// SaveGetEvaluationSizeMetricsEvent mocks base method. +func (m *MockStorage) SaveGetEvaluationSizeMetricsEvent(arg0, arg1 string, arg2 int32) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SaveGetEvaluationSizeMetricsEvent", arg0, arg1, arg2) +} + +// SaveGetEvaluationSizeMetricsEvent indicates an expected call of SaveGetEvaluationSizeMetricsEvent. +func (mr *MockStorageMockRecorder) SaveGetEvaluationSizeMetricsEvent(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveGetEvaluationSizeMetricsEvent", reflect.TypeOf((*MockStorage)(nil).SaveGetEvaluationSizeMetricsEvent), arg0, arg1, arg2) +} + +// SaveInternalErrorCountMetricsEvent mocks base method. +func (m *MockStorage) SaveInternalErrorCountMetricsEvent(arg0 string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SaveInternalErrorCountMetricsEvent", arg0) +} + +// SaveInternalErrorCountMetricsEvent indicates an expected call of SaveInternalErrorCountMetricsEvent. +func (mr *MockStorageMockRecorder) SaveInternalErrorCountMetricsEvent(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveInternalErrorCountMetricsEvent", reflect.TypeOf((*MockStorage)(nil).SaveInternalErrorCountMetricsEvent), arg0) +} + +// SaveTimeoutErrorCountMetricsEvent mocks base method. +func (m *MockStorage) SaveTimeoutErrorCountMetricsEvent(arg0 string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SaveTimeoutErrorCountMetricsEvent", arg0) +} + +// SaveTimeoutErrorCountMetricsEvent indicates an expected call of SaveTimeoutErrorCountMetricsEvent. +func (mr *MockStorageMockRecorder) SaveTimeoutErrorCountMetricsEvent(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveTimeoutErrorCountMetricsEvent", reflect.TypeOf((*MockStorage)(nil).SaveTimeoutErrorCountMetricsEvent), arg0) +} diff --git a/pkg/migration/cmd/mysqlserver/BUILD.bazel b/pkg/migration/cmd/mysqlserver/BUILD.bazel new file mode 100644 index 000000000..3bd529802 --- /dev/null +++ b/pkg/migration/cmd/mysqlserver/BUILD.bazel @@ -0,0 +1,19 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["mysqlserver.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/migration/cmd/mysqlserver", + visibility = ["//visibility:public"], + deps = [ + "//pkg/cli:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/migration/mysql/api:go_default_library", + "//pkg/migration/mysql/migrate:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/token:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/migration/cmd/mysqlserver/mysqlserver.go b/pkg/migration/cmd/mysqlserver/mysqlserver.go new file mode 100644 index 000000000..27d2f0ad9 --- /dev/null +++ b/pkg/migration/cmd/mysqlserver/mysqlserver.go @@ -0,0 +1,122 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mysqlserver + +import ( + "context" + "time" + + "go.uber.org/zap" + "gopkg.in/alecthomas/kingpin.v2" + + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/migration/mysql/api" + "github.com/bucketeer-io/bucketeer/pkg/migration/mysql/migrate" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/token" +) + +const command = "mysql-server" + +type server struct { + *kingpin.CmdClause + port *int + githubUser *string + githubAccessTokenPath *string + githubMigrationSourcePath *string + mysqlUser *string + mysqlPass *string + mysqlHost *string + mysqlPort *int + mysqlDBName *string + certPath *string + keyPath *string + oauthKeyPath *string + oauthClientID *string + oauthIssuer *string +} + +func RegisterServerCommand(r cli.CommandRegistry, p cli.ParentCommand) cli.Command { + cmd := p.Command(command, "Start the gRPC server") + server := &server{ + CmdClause: cmd, + port: cmd.Flag("port", "Port to bind to.").Default("9090").Int(), + githubUser: cmd.Flag("github-user", "GitHub user.").Required().String(), + githubAccessTokenPath: cmd.Flag("github-access-token-path", "Path to GitHub access token.").Required().String(), + githubMigrationSourcePath: cmd.Flag( + "github-migration-source-path", + "Path to migration file in GitHub. (e.g. owner/repo/path#ref)", + ).Required().String(), + mysqlUser: cmd.Flag("mysql-user", "MySQL user.").Required().String(), + mysqlPass: cmd.Flag("mysql-pass", "MySQL password.").Required().String(), + mysqlHost: cmd.Flag("mysql-host", "MySQL host.").Required().String(), + mysqlPort: cmd.Flag("mysql-port", "MySQL port.").Required().Int(), + mysqlDBName: cmd.Flag("mysql-db-name", "MySQL database name.").Required().String(), + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + keyPath: cmd.Flag("key", "Path to TLS key.").Required().String(), + oauthKeyPath: cmd.Flag( + "oauth-key", + "Path to public key used to verify oauth token.", + ).Required().String(), + oauthClientID: cmd.Flag("oauth-client-id", "The oauth clientID registered at dex.").Required().String(), + oauthIssuer: cmd.Flag("oauth-issuer", "The url of dex issuer.").Required().String(), + } + r.RegisterCommand(server) + return server +} + +func (s *server) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + registerer := metrics.DefaultRegisterer() + + migrateClientFactory, err := migrate.NewClientFactory( + *s.githubUser, *s.githubAccessTokenPath, *s.githubMigrationSourcePath, + *s.mysqlUser, *s.mysqlPass, *s.mysqlHost, *s.mysqlPort, *s.mysqlDBName, + ) + if err != nil { + return err + } + + service := api.NewMySQLService( + migrateClientFactory, + api.WithLogger(logger), + ) + + verifier, err := token.NewVerifier(*s.oauthKeyPath, *s.oauthIssuer, *s.oauthClientID) + if err != nil { + return err + } + + healthChecker := health.NewGrpcChecker( + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + ) + go healthChecker.Run(ctx) + + server := rpc.NewServer(service, *s.certPath, *s.keyPath, + rpc.WithPort(*s.port), + rpc.WithVerifier(verifier), + rpc.WithMetrics(registerer), + rpc.WithLogger(logger), + rpc.WithService(healthChecker), + rpc.WithHandler("/health", healthChecker), + ) + defer server.Stop(10 * time.Second) + go server.Run() + + <-ctx.Done() + return nil +} diff --git a/pkg/migration/mysql/api/BUILD.bazel b/pkg/migration/mysql/api/BUILD.bazel new file mode 100644 index 000000000..1f1031d40 --- /dev/null +++ b/pkg/migration/mysql/api/BUILD.bazel @@ -0,0 +1,39 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["api.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/migration/mysql/api", + visibility = ["//visibility:public"], + deps = [ + "//pkg/log:go_default_library", + "//pkg/migration/mysql/migrate:go_default_library", + "//pkg/role:go_default_library", + "//proto/event/domain:go_default_library", + "//proto/migration:go_default_library", + "@com_github_golang_migrate_migrate_v4//:go_default_library", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["api_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/log:go_default_library", + "//pkg/migration/mysql/migrate/mock:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/token:go_default_library", + "//proto/account:go_default_library", + "//proto/migration:go_default_library", + "@com_github_golang_migrate_migrate_v4//:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/migration/mysql/api/api.go b/pkg/migration/mysql/api/api.go new file mode 100644 index 000000000..48f199dfb --- /dev/null +++ b/pkg/migration/mysql/api/api.go @@ -0,0 +1,157 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "errors" + + libmigrate "github.com/golang-migrate/migrate/v4" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/migration/mysql/migrate" + "github.com/bucketeer-io/bucketeer/pkg/role" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + migrationproto "github.com/bucketeer-io/bucketeer/proto/migration" +) + +var ( + errInternal = status.Error(codes.Internal, "migration-mysql: internal") + errUnauthenticated = status.Error(codes.Unauthenticated, "migration-mysql: unauthenticated") + errPermissionDenied = status.Error(codes.PermissionDenied, "migration-mysql: permission denied") +) + +type options struct { + logger *zap.Logger +} + +type Option func(*options) + +func WithLogger(l *zap.Logger) Option { + return func(opts *options) { + opts.logger = l + } +} + +type MySQLService struct { + migrateClientFactory migrate.ClientFactory + opts *options + logger *zap.Logger +} + +func NewMySQLService(migrateClientFactory migrate.ClientFactory, opts ...Option) *MySQLService { + dopts := &options{ + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + return &MySQLService{ + migrateClientFactory: migrateClientFactory, + opts: dopts, + logger: dopts.logger.Named("api"), + } +} + +func (s *MySQLService) Register(server *grpc.Server) { + migrationproto.RegisterMigrationMySQLServiceServer(server, s) +} + +func (s *MySQLService) MigrateAllMasterSchema( + ctx context.Context, + req *migrationproto.MigrateAllMasterSchemaRequest, +) (*migrationproto.MigrateAllMasterSchemaResponse, error) { + _, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + migrateClient, err := s.migrateClientFactory.New() + if err != nil { + s.logger.Error( + "Failed to new migrate client", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, errInternal + } + if err := migrateClient.Up(); err != nil { + if errors.Is(err, libmigrate.ErrNoChange) { + s.logger.Info("No change") + return &migrationproto.MigrateAllMasterSchemaResponse{}, nil + } + s.logger.Error( + "Failed to run migration", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, errInternal + } + return &migrationproto.MigrateAllMasterSchemaResponse{}, nil +} + +func (s *MySQLService) RollbackMasterSchema( + ctx context.Context, + req *migrationproto.RollbackMasterSchemaRequest, +) (*migrationproto.RollbackMasterSchemaResponse, error) { + _, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + migrateClient, err := s.migrateClientFactory.New() + if err != nil { + s.logger.Error( + "Failed to new migrate client", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, errInternal + } + if err := migrateClient.Steps(-int(req.Step)); err != nil { + s.logger.Error( + "Failed to run rollback", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, errInternal + } + return &migrationproto.RollbackMasterSchemaResponse{}, nil +} + +func (s *MySQLService) checkAdminRole(ctx context.Context) (*eventproto.Editor, error) { + editor, err := role.CheckAdminRole(ctx) + if err != nil { + switch status.Code(err) { + case codes.Unauthenticated: + s.logger.Info( + "Unauthenticated", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, errUnauthenticated + case codes.PermissionDenied: + s.logger.Info( + "Permission denied", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, errPermissionDenied + default: + s.logger.Error( + "Failed to check role", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, errInternal + } + } + return editor, nil +} diff --git a/pkg/migration/mysql/api/api_test.go b/pkg/migration/mysql/api/api_test.go new file mode 100644 index 000000000..d338ecfc3 --- /dev/null +++ b/pkg/migration/mysql/api/api_test.go @@ -0,0 +1,233 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "errors" + "testing" + "time" + + libmigrate "github.com/golang-migrate/migrate/v4" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/migration/mysql/migrate/mock" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/token" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + proto "github.com/bucketeer-io/bucketeer/proto/migration" +) + +func TestNewMySQLService(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + cf := mock.NewMockClientFactory(mockController) + logger := zap.NewNop() + s := NewMySQLService(cf, WithLogger(logger)) + assert.IsType(t, &MySQLService{}, s) +} + +func TestMigrateAllMasterSchema(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*MySQLService) + req *proto.MigrateAllMasterSchemaRequest + expectedErr error + }{ + "err: failed to new migrate client": { + setup: func(ms *MySQLService) { + cf := mock.NewMockClientFactory(mockController) + cf.EXPECT().New().Return(nil, errors.New("error")) + ms.migrateClientFactory = cf + }, + req: &proto.MigrateAllMasterSchemaRequest{}, + expectedErr: errInternal, + }, + "err: failed to run migration": { + setup: func(ms *MySQLService) { + c := mock.NewMockClient(mockController) + c.EXPECT().Up().Return(errors.New("error")) + cf := mock.NewMockClientFactory(mockController) + cf.EXPECT().New().Return(c, nil) + ms.migrateClientFactory = cf + }, + req: &proto.MigrateAllMasterSchemaRequest{}, + expectedErr: errInternal, + }, + "success: no change": { + setup: func(ms *MySQLService) { + c := mock.NewMockClient(mockController) + c.EXPECT().Up().Return(libmigrate.ErrNoChange) + cf := mock.NewMockClientFactory(mockController) + cf.EXPECT().New().Return(c, nil) + ms.migrateClientFactory = cf + }, + req: &proto.MigrateAllMasterSchemaRequest{}, + expectedErr: nil, + }, + "success": { + setup: func(ms *MySQLService) { + c := mock.NewMockClient(mockController) + c.EXPECT().Up().Return(nil) + cf := mock.NewMockClientFactory(mockController) + cf.EXPECT().New().Return(c, nil) + ms.migrateClientFactory = cf + }, + req: &proto.MigrateAllMasterSchemaRequest{}, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken(t) + service := newMySQLService(t) + p.setup(service) + _, err := service.MigrateAllMasterSchema(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestRollbackMasterSchema(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*MySQLService, int64) + req *proto.RollbackMasterSchemaRequest + expectedErr error + }{ + "err: failed to new migrate client": { + setup: func(ms *MySQLService, step int64) { + cf := mock.NewMockClientFactory(mockController) + cf.EXPECT().New().Return(nil, errors.New("error")) + ms.migrateClientFactory = cf + }, + req: &proto.RollbackMasterSchemaRequest{}, + expectedErr: errInternal, + }, + "err: failed to run migration": { + setup: func(ms *MySQLService, step int64) { + c := mock.NewMockClient(mockController) + c.EXPECT().Steps(-int(step)).Return(errors.New("error")) + cf := mock.NewMockClientFactory(mockController) + cf.EXPECT().New().Return(c, nil) + ms.migrateClientFactory = cf + }, + req: &proto.RollbackMasterSchemaRequest{Step: 1}, + expectedErr: errInternal, + }, + "success": { + setup: func(ms *MySQLService, step int64) { + c := mock.NewMockClient(mockController) + c.EXPECT().Steps(-int(step)).Return(nil) + cf := mock.NewMockClientFactory(mockController) + cf.EXPECT().New().Return(c, nil) + ms.migrateClientFactory = cf + }, + req: &proto.RollbackMasterSchemaRequest{Step: 1}, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken(t) + service := newMySQLService(t) + p.setup(service, p.req.Step) + _, err := service.RollbackMasterSchema(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestMySQLServicePermissionDenied(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + action func(context.Context, *MySQLService) error + expected error + }{ + "MigrateAllMasterSchema": { + action: func(ctx context.Context, ms *MySQLService) error { + _, err := ms.MigrateAllMasterSchema(ctx, &proto.MigrateAllMasterSchemaRequest{}) + return err + }, + expected: errPermissionDenied, + }, + "RollbackMasterSchema": { + action: func(ctx context.Context, ms *MySQLService) error { + _, err := ms.RollbackMasterSchema(ctx, &proto.RollbackMasterSchemaRequest{}) + return err + }, + expected: errPermissionDenied, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithTokenRoleUnassigned(t) + service := newMySQLService(t) + actual := p.action(ctx, service) + assert.Equal(t, p.expected, actual) + }) + } +} + +func newMySQLService(t *testing.T) *MySQLService { + t.Helper() + logger, err := log.NewLogger() + require.NoError(t, err) + return &MySQLService{ + logger: logger.Named("api"), + } +} + +func createContextWithToken(t *testing.T) context.Context { + t.Helper() + token := &token.IDToken{ + Issuer: "issuer", + Subject: "sub", + Audience: "audience", + Expiry: time.Now().AddDate(100, 0, 0), + IssuedAt: time.Now(), + Email: "email", + AdminRole: accountproto.Account_OWNER, + } + ctx := context.TODO() + return context.WithValue(ctx, rpc.Key, token) +} + +func createContextWithTokenRoleUnassigned(t *testing.T) context.Context { + t.Helper() + token := &token.IDToken{ + Issuer: "issuer", + Subject: "sub", + Audience: "audience", + Expiry: time.Now().AddDate(100, 0, 0), + IssuedAt: time.Now(), + Email: "email", + AdminRole: accountproto.Account_UNASSIGNED, + } + ctx := context.TODO() + return context.WithValue(ctx, rpc.Key, token) +} diff --git a/pkg/migration/mysql/migrate/BUILD.bazel b/pkg/migration/mysql/migrate/BUILD.bazel new file mode 100644 index 000000000..29549c018 --- /dev/null +++ b/pkg/migration/mysql/migrate/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["migrate.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/migration/mysql/migrate", + visibility = ["//visibility:public"], + deps = [ + "@com_github_golang_migrate_migrate_v4//:go_default_library", + "@com_github_golang_migrate_migrate_v4//database/mysql:go_default_library", + "@com_github_golang_migrate_migrate_v4//source/github:go_default_library", + ], +) diff --git a/pkg/migration/mysql/migrate/migrate.go b/pkg/migration/mysql/migrate/migrate.go new file mode 100644 index 000000000..07eb92c09 --- /dev/null +++ b/pkg/migration/mysql/migrate/migrate.go @@ -0,0 +1,88 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package migrate + +import ( + "fmt" + "io/ioutil" + "strings" + + libmigrate "github.com/golang-migrate/migrate/v4" + _ "github.com/golang-migrate/migrate/v4/database/mysql" + _ "github.com/golang-migrate/migrate/v4/source/github" +) + +const mysqlParams = "collation=utf8mb4_bin" + +type Client interface { + Up() error + Steps(n int) error +} + +type client struct { + *libmigrate.Migrate +} + +type ClientFactory interface { + New() (Client, error) +} + +type clientFactory struct { + githubUser string + githubAccessToken string + githubSourcePath string + mysqlUser string + mysqlPass string + mysqlHost string + mysqlPort int + mysqlDBName string +} + +func NewClientFactory( + githubUser, githubAccessTokenPath, githubSourcePath string, + mysqlUser, mysqlPass, mysqlHost string, mysqlPort int, mysqlDBName string, +) (ClientFactory, error) { + data, err := ioutil.ReadFile(githubAccessTokenPath) + if err != nil { + return nil, err + } + return &clientFactory{ + githubUser: githubUser, + githubAccessToken: strings.TrimSpace(string(data)), + githubSourcePath: githubSourcePath, + mysqlUser: mysqlUser, + mysqlPass: mysqlPass, + mysqlHost: mysqlHost, + mysqlPort: mysqlPort, + mysqlDBName: mysqlDBName, + }, nil +} + +func (cf *clientFactory) New() (Client, error) { + sourceURL := fmt.Sprintf( + "github://%s:%s@%s", + cf.githubUser, cf.githubAccessToken, cf.githubSourcePath, + ) + databaseURL := fmt.Sprintf( + "mysql://%s:%s@tcp(%s:%d)/%s?%s", + cf.mysqlUser, cf.mysqlPass, cf.mysqlHost, cf.mysqlPort, cf.mysqlDBName, mysqlParams, + ) + m, err := libmigrate.New(sourceURL, databaseURL) + if err != nil { + return nil, err + } + return &client{m}, nil +} diff --git a/pkg/migration/mysql/migrate/mock/BUILD.bazel b/pkg/migration/mysql/migrate/mock/BUILD.bazel new file mode 100644 index 000000000..155d4e7cb --- /dev/null +++ b/pkg/migration/mysql/migrate/mock/BUILD.bazel @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["migrate.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/migration/mysql/migrate/mock", + visibility = ["//visibility:public"], + deps = [ + "//pkg/migration/mysql/migrate:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + ], +) diff --git a/pkg/migration/mysql/migrate/mock/migrate.go b/pkg/migration/mysql/migrate/mock/migrate.go new file mode 100644 index 000000000..359c7a9b7 --- /dev/null +++ b/pkg/migration/mysql/migrate/mock/migrate.go @@ -0,0 +1,102 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: migrate.go + +// Package mock is a generated GoMock package. +package mock + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + migrate "github.com/bucketeer-io/bucketeer/pkg/migration/mysql/migrate" +) + +// MockClient is a mock of Client interface. +type MockClient struct { + ctrl *gomock.Controller + recorder *MockClientMockRecorder +} + +// MockClientMockRecorder is the mock recorder for MockClient. +type MockClientMockRecorder struct { + mock *MockClient +} + +// NewMockClient creates a new mock instance. +func NewMockClient(ctrl *gomock.Controller) *MockClient { + mock := &MockClient{ctrl: ctrl} + mock.recorder = &MockClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClient) EXPECT() *MockClientMockRecorder { + return m.recorder +} + +// Steps mocks base method. +func (m *MockClient) Steps(n int) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Steps", n) + ret0, _ := ret[0].(error) + return ret0 +} + +// Steps indicates an expected call of Steps. +func (mr *MockClientMockRecorder) Steps(n interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Steps", reflect.TypeOf((*MockClient)(nil).Steps), n) +} + +// Up mocks base method. +func (m *MockClient) Up() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Up") + ret0, _ := ret[0].(error) + return ret0 +} + +// Up indicates an expected call of Up. +func (mr *MockClientMockRecorder) Up() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Up", reflect.TypeOf((*MockClient)(nil).Up)) +} + +// MockClientFactory is a mock of ClientFactory interface. +type MockClientFactory struct { + ctrl *gomock.Controller + recorder *MockClientFactoryMockRecorder +} + +// MockClientFactoryMockRecorder is the mock recorder for MockClientFactory. +type MockClientFactoryMockRecorder struct { + mock *MockClientFactory +} + +// NewMockClientFactory creates a new mock instance. +func NewMockClientFactory(ctrl *gomock.Controller) *MockClientFactory { + mock := &MockClientFactory{ctrl: ctrl} + mock.recorder = &MockClientFactoryMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClientFactory) EXPECT() *MockClientFactoryMockRecorder { + return m.recorder +} + +// New mocks base method. +func (m *MockClientFactory) New() (migrate.Client, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "New") + ret0, _ := ret[0].(migrate.Client) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// New indicates an expected call of New. +func (mr *MockClientFactoryMockRecorder) New() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "New", reflect.TypeOf((*MockClientFactory)(nil).New)) +} diff --git a/pkg/notification/api/BUILD.bazel b/pkg/notification/api/BUILD.bazel new file mode 100644 index 000000000..ac0263dd7 --- /dev/null +++ b/pkg/notification/api/BUILD.bazel @@ -0,0 +1,61 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "admin_subscription.go", + "api.go", + "error.go", + "subscription.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/notification/api", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/client:go_default_library", + "//pkg/locale:go_default_library", + "//pkg/log:go_default_library", + "//pkg/notification/command:go_default_library", + "//pkg/notification/domain:go_default_library", + "//pkg/notification/storage/v2:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/role:go_default_library", + "//pkg/rpc/status:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/account:go_default_library", + "//proto/event/domain:go_default_library", + "//proto/notification:go_default_library", + "@go_googleapis//google/rpc:errdetails_go_proto", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "admin_subscription_test.go", + "api_test.go", + "subscription_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/account/client/mock:go_default_library", + "//pkg/locale:go_default_library", + "//pkg/notification/domain:go_default_library", + "//pkg/notification/storage/v2:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/pubsub/publisher/mock:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/storage:go_default_library", + "//pkg/storage/v2/mysql/mock:go_default_library", + "//pkg/token:go_default_library", + "//proto/account:go_default_library", + "//proto/notification:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/notification/api/admin_subscription.go b/pkg/notification/api/admin_subscription.go new file mode 100644 index 000000000..b06d643b9 --- /dev/null +++ b/pkg/notification/api/admin_subscription.go @@ -0,0 +1,579 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "strconv" + + "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/notification/command" + "github.com/bucketeer-io/bucketeer/pkg/notification/domain" + v2ss "github.com/bucketeer-io/bucketeer/pkg/notification/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + notificationproto "github.com/bucketeer-io/bucketeer/proto/notification" +) + +func (s *NotificationService) CreateAdminSubscription( + ctx context.Context, + req *notificationproto.CreateAdminSubscriptionRequest, +) (*notificationproto.CreateAdminSubscriptionResponse, error) { + editor, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + if err := s.validateCreateAdminSubscriptionRequest(req); err != nil { + return nil, err + } + subscription, err := domain.NewSubscription(req.Command.Name, req.Command.SourceTypes, req.Command.Recipient) + if err != nil { + s.logger.Error( + "Failed to create a new admin subscription", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.Any("sourceType", req.Command.SourceTypes), + zap.Any("recipient", req.Command.Recipient), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + var handler command.Handler = command.NewEmptyAdminSubscriptionCommandHandler() + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + adminSubscriptionStorage := v2ss.NewAdminSubscriptionStorage(tx) + if err := adminSubscriptionStorage.CreateAdminSubscription(ctx, subscription); err != nil { + return err + } + handler = command.NewAdminSubscriptionCommandHandler(editor, subscription) + if err := handler.Handle(ctx, req.Command); err != nil { + return err + } + return nil + }) + if err != nil { + if err == v2ss.ErrAdminSubscriptionAlreadyExists { + return nil, localizedError(statusAlreadyExists, locale.JaJP) + } + s.logger.Error( + "Failed to create admin subscription", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + if errs := s.publishDomainEvents(ctx, handler.Events()); len(errs) > 0 { + s.logger.Error( + "Failed to publish events", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Any("errors", errs), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return ¬ificationproto.CreateAdminSubscriptionResponse{}, nil +} + +func (s *NotificationService) validateCreateAdminSubscriptionRequest( + req *notificationproto.CreateAdminSubscriptionRequest, +) error { + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + if req.Command.Name == "" { + return localizedError(statusNameRequired, locale.JaJP) + } + if len(req.Command.SourceTypes) == 0 { + return localizedError(statusSourceTypesRequired, locale.JaJP) + } + if err := s.validateRecipient(req.Command.Recipient); err != nil { + return err + } + return nil +} + +func (s *NotificationService) UpdateAdminSubscription( + ctx context.Context, + req *notificationproto.UpdateAdminSubscriptionRequest, +) (*notificationproto.UpdateAdminSubscriptionResponse, error) { + editor, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + if err := s.validateUpdateAdminSubscriptionRequest(req); err != nil { + return nil, err + } + commands := s.createUpdateAdminSubscriptionCommands(req) + if err := s.updateAdminSubscription(ctx, commands, req.Id, editor); err != nil { + if status.Code(err) == codes.Internal { + s.logger.Error( + "Failed to update feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", req.Id), + )..., + ) + } + return nil, err + } + return ¬ificationproto.UpdateAdminSubscriptionResponse{}, nil +} + +func (s *NotificationService) validateUpdateAdminSubscriptionRequest( + req *notificationproto.UpdateAdminSubscriptionRequest, +) error { + if req.Id == "" { + return localizedError(statusIDRequired, locale.JaJP) + } + if s.isNoUpdateAdminSubscriptionCommand(req) { + return localizedError(statusNoCommand, locale.JaJP) + } + if req.AddSourceTypesCommand != nil && len(req.AddSourceTypesCommand.SourceTypes) == 0 { + return localizedError(statusSourceTypesRequired, locale.JaJP) + } + if req.DeleteSourceTypesCommand != nil && len(req.DeleteSourceTypesCommand.SourceTypes) == 0 { + return localizedError(statusSourceTypesRequired, locale.JaJP) + } + if req.RenameSubscriptionCommand != nil && req.RenameSubscriptionCommand.Name == "" { + return localizedError(statusNameRequired, locale.JaJP) + } + return nil +} + +func (s *NotificationService) isNoUpdateAdminSubscriptionCommand( + req *notificationproto.UpdateAdminSubscriptionRequest, +) bool { + return req.AddSourceTypesCommand == nil && + req.DeleteSourceTypesCommand == nil && + req.RenameSubscriptionCommand == nil +} + +func (s *NotificationService) EnableAdminSubscription( + ctx context.Context, + req *notificationproto.EnableAdminSubscriptionRequest, +) (*notificationproto.EnableAdminSubscriptionResponse, error) { + editor, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + if err := s.validateEnableAdminSubscriptionRequest(req); err != nil { + return nil, err + } + if err := s.updateAdminSubscription(ctx, []command.Command{req.Command}, req.Id, editor); err != nil { + if status.Code(err) == codes.Internal { + s.logger.Error( + "Failed to enable feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + } + return nil, err + } + return ¬ificationproto.EnableAdminSubscriptionResponse{}, nil +} + +func (s *NotificationService) validateEnableAdminSubscriptionRequest( + req *notificationproto.EnableAdminSubscriptionRequest, +) error { + if req.Id == "" { + return localizedError(statusIDRequired, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + return nil +} + +func (s *NotificationService) DisableAdminSubscription( + ctx context.Context, + req *notificationproto.DisableAdminSubscriptionRequest, +) (*notificationproto.DisableAdminSubscriptionResponse, error) { + editor, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + if err := s.validateDisableAdminSubscriptionRequest(req); err != nil { + return nil, err + } + if err := s.updateAdminSubscription(ctx, []command.Command{req.Command}, req.Id, editor); err != nil { + if status.Code(err) == codes.Internal { + s.logger.Error( + "Failed to disable feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + } + return nil, err + } + return ¬ificationproto.DisableAdminSubscriptionResponse{}, nil +} + +func (s *NotificationService) validateDisableAdminSubscriptionRequest( + req *notificationproto.DisableAdminSubscriptionRequest, +) error { + if req.Id == "" { + return localizedError(statusIDRequired, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + return nil +} + +func (s *NotificationService) updateAdminSubscription( + ctx context.Context, + commands []command.Command, + id string, + editor *eventproto.Editor, +) error { + var handler command.Handler = command.NewEmptyAdminSubscriptionCommandHandler() + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + adminSubscriptionStorage := v2ss.NewAdminSubscriptionStorage(tx) + subscription, err := adminSubscriptionStorage.GetAdminSubscription(ctx, id) + if err != nil { + return err + } + handler = command.NewAdminSubscriptionCommandHandler(editor, subscription) + for _, command := range commands { + if err := handler.Handle(ctx, command); err != nil { + return err + } + } + if err = adminSubscriptionStorage.UpdateAdminSubscription(ctx, subscription); err != nil { + return err + } + return nil + }) + if err != nil { + if err == v2ss.ErrAdminSubscriptionNotFound || err == v2ss.ErrAdminSubscriptionUnexpectedAffectedRows { + return localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to update admin subscription", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", id), + )..., + ) + return localizedError(statusInternal, locale.JaJP) + } + if errs := s.publishDomainEvents(ctx, handler.Events()); len(errs) > 0 { + s.logger.Error( + "Failed to publish events", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Any("errors", errs), + zap.String("id", id), + )..., + ) + return localizedError(statusInternal, locale.JaJP) + } + return nil +} + +func (s *NotificationService) DeleteAdminSubscription( + ctx context.Context, + req *notificationproto.DeleteAdminSubscriptionRequest, +) (*notificationproto.DeleteAdminSubscriptionResponse, error) { + editor, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + if err := validateDeleteAdminSubscriptionRequest(req); err != nil { + return nil, err + } + var handler command.Handler = command.NewEmptyAdminSubscriptionCommandHandler() + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + adminSubscriptionStorage := v2ss.NewAdminSubscriptionStorage(tx) + subscription, err := adminSubscriptionStorage.GetAdminSubscription(ctx, req.Id) + if err != nil { + return err + } + handler = command.NewAdminSubscriptionCommandHandler(editor, subscription) + if err := handler.Handle(ctx, req.Command); err != nil { + return err + } + if err = adminSubscriptionStorage.DeleteAdminSubscription(ctx, req.Id); err != nil { + return err + } + return nil + }) + if err != nil { + if err == v2ss.ErrAdminSubscriptionNotFound || err == v2ss.ErrAdminSubscriptionUnexpectedAffectedRows { + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to delete admin subscription", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", req.Id), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + if errs := s.publishDomainEvents(ctx, handler.Events()); len(errs) > 0 { + s.logger.Error( + "Failed to publish events", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Any("errors", errs), + zap.String("id", req.Id), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return ¬ificationproto.DeleteAdminSubscriptionResponse{}, nil +} + +func validateDeleteAdminSubscriptionRequest(req *notificationproto.DeleteAdminSubscriptionRequest) error { + if req.Id == "" { + return localizedError(statusIDRequired, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + return nil +} + +func (s *NotificationService) createUpdateAdminSubscriptionCommands( + req *notificationproto.UpdateAdminSubscriptionRequest, +) []command.Command { + commands := make([]command.Command, 0) + if req.AddSourceTypesCommand != nil { + commands = append(commands, req.AddSourceTypesCommand) + } + if req.DeleteSourceTypesCommand != nil { + commands = append(commands, req.DeleteSourceTypesCommand) + } + if req.RenameSubscriptionCommand != nil { + commands = append(commands, req.RenameSubscriptionCommand) + } + return commands +} + +func (s *NotificationService) GetAdminSubscription( + ctx context.Context, + req *notificationproto.GetAdminSubscriptionRequest, +) (*notificationproto.GetAdminSubscriptionResponse, error) { + _, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + if err := validateGetAdminSubscriptionRequest(req); err != nil { + return nil, err + } + adminSubscriptionStorage := v2ss.NewAdminSubscriptionStorage(s.mysqlClient) + subscription, err := adminSubscriptionStorage.GetAdminSubscription(ctx, req.Id) + if err != nil { + if err == v2ss.ErrAdminSubscriptionNotFound { + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to get admin subscription", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", req.Id), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return ¬ificationproto.GetAdminSubscriptionResponse{Subscription: subscription.Subscription}, nil +} + +func validateGetAdminSubscriptionRequest(req *notificationproto.GetAdminSubscriptionRequest) error { + if req.Id == "" { + return localizedError(statusIDRequired, locale.JaJP) + } + return nil +} + +func (s *NotificationService) ListAdminSubscriptions( + ctx context.Context, + req *notificationproto.ListAdminSubscriptionsRequest, +) (*notificationproto.ListAdminSubscriptionsResponse, error) { + _, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + var whereParts []mysql.WherePart + sourceTypesValues := make([]interface{}, len(req.SourceTypes)) + for i, st := range req.SourceTypes { + sourceTypesValues[i] = int32(st) + } + if len(sourceTypesValues) > 0 { + whereParts = append( + whereParts, + mysql.NewJSONFilter("source_types", mysql.JSONContainsNumber, sourceTypesValues), + ) + } + if req.Disabled != nil { + whereParts = append(whereParts, mysql.NewFilter("disabled", "=", req.Disabled.Value)) + } + if req.SearchKeyword != "" { + whereParts = append(whereParts, mysql.NewSearchQuery([]string{"name"}, req.SearchKeyword)) + } + orders, err := s.newAdminSubscriptionListOrders(req.OrderBy, req.OrderDirection) + if err != nil { + s.logger.Error( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, err + } + subscriptions, cursor, totalCount, err := s.listAdminSubscriptionsMySQL( + ctx, + whereParts, + orders, + req.PageSize, + req.Cursor, + ) + if err != nil { + return nil, err + } + return ¬ificationproto.ListAdminSubscriptionsResponse{ + Subscriptions: subscriptions, + Cursor: cursor, + TotalCount: totalCount, + }, nil +} + +func (s *NotificationService) newAdminSubscriptionListOrders( + orderBy notificationproto.ListAdminSubscriptionsRequest_OrderBy, + orderDirection notificationproto.ListAdminSubscriptionsRequest_OrderDirection, +) ([]*mysql.Order, error) { + var column string + switch orderBy { + case notificationproto.ListAdminSubscriptionsRequest_DEFAULT, + notificationproto.ListAdminSubscriptionsRequest_NAME: + column = "name" + case notificationproto.ListAdminSubscriptionsRequest_CREATED_AT: + column = "created_at" + case notificationproto.ListAdminSubscriptionsRequest_UPDATED_AT: + column = "updated_at" + default: + return nil, localizedError(statusInvalidOrderBy, locale.JaJP) + } + direction := mysql.OrderDirectionAsc + if orderDirection == notificationproto.ListAdminSubscriptionsRequest_DESC { + direction = mysql.OrderDirectionDesc + } + return []*mysql.Order{mysql.NewOrder(column, direction)}, nil +} + +func (s *NotificationService) ListEnabledAdminSubscriptions( + ctx context.Context, + req *notificationproto.ListEnabledAdminSubscriptionsRequest, +) (*notificationproto.ListEnabledAdminSubscriptionsResponse, error) { + _, err := s.checkAdminRole(ctx) + if err != nil { + return nil, err + } + var whereParts []mysql.WherePart + whereParts = append(whereParts, mysql.NewFilter("disabled", "=", false)) + sourceTypesValues := make([]interface{}, len(req.SourceTypes)) + for i, st := range req.SourceTypes { + sourceTypesValues[i] = int32(st) + } + if len(sourceTypesValues) > 0 { + whereParts = append( + whereParts, + mysql.NewJSONFilter("source_types", mysql.JSONContainsNumber, sourceTypesValues), + ) + } + subscriptions, cursor, _, err := s.listAdminSubscriptionsMySQL( + ctx, + whereParts, + nil, + req.PageSize, + req.Cursor, + ) + if err != nil { + return nil, err + } + return ¬ificationproto.ListEnabledAdminSubscriptionsResponse{ + Subscriptions: subscriptions, + Cursor: cursor, + }, nil +} + +func (s *NotificationService) listAdminSubscriptionsMySQL( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + pageSize int64, + cursor string, +) ([]*notificationproto.Subscription, string, int64, error) { + limit := int(pageSize) + if cursor == "" { + cursor = "0" + } + offset, err := strconv.Atoi(cursor) + if err != nil { + return nil, "", 0, localizedError(statusInvalidCursor, locale.JaJP) + } + adminSubscriptionStorage := v2ss.NewAdminSubscriptionStorage(s.mysqlClient) + subscriptions, nextCursor, totalCount, err := adminSubscriptionStorage.ListAdminSubscriptions( + ctx, + whereParts, + orders, + limit, + offset, + ) + if err != nil { + s.logger.Error( + "Failed to list admin subscriptions", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, "", 0, localizedError(statusInternal, locale.JaJP) + } + return subscriptions, strconv.Itoa(nextCursor), totalCount, nil +} diff --git a/pkg/notification/api/admin_subscription_test.go b/pkg/notification/api/admin_subscription_test.go new file mode 100644 index 000000000..372878d79 --- /dev/null +++ b/pkg/notification/api/admin_subscription_test.go @@ -0,0 +1,658 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/locale" + v2ss "github.com/bucketeer-io/bucketeer/pkg/notification/storage/v2" + publishermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher/mock" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + "github.com/bucketeer-io/bucketeer/pkg/token" + proto "github.com/bucketeer-io/bucketeer/proto/notification" +) + +func TestCreateAdminSubscriptionMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*NotificationService) + token *token.IDToken + input *proto.CreateAdminSubscriptionRequest + expectedErr error + }{ + "err: ErrUnauthenticated": { + setup: nil, + token: createOwnerToken(t), + input: &proto.CreateAdminSubscriptionRequest{ + Command: nil, + }, + expectedErr: localizedError(statusPermissionDenied, locale.JaJP), + }, + "err: ErrNoCommand": { + setup: nil, + token: createAdminToken(t), + input: &proto.CreateAdminSubscriptionRequest{ + Command: nil, + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "err: ErrSourceTypesRequired": { + token: createAdminToken(t), + input: &proto.CreateAdminSubscriptionRequest{ + Command: &proto.CreateAdminSubscriptionCommand{ + Name: "sname", + Recipient: &proto.Recipient{ + Type: proto.Recipient_SlackChannel, + SlackChannelRecipient: &proto.SlackChannelRecipient{WebhookUrl: "url"}, + }, + }, + }, + expectedErr: localizedError(statusSourceTypesRequired, locale.JaJP), + }, + "err: ErrRecipientRequired": { + token: createAdminToken(t), + input: &proto.CreateAdminSubscriptionRequest{ + Command: &proto.CreateAdminSubscriptionCommand{ + Name: "sname", + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + proto.Subscription_DOMAIN_EVENT_ADMIN_ACCOUNT, + }, + }, + }, + expectedErr: localizedError(statusRecipientRequired, locale.JaJP), + }, + "err: ErrSlackRecipientRequired": { + token: createAdminToken(t), + input: &proto.CreateAdminSubscriptionRequest{ + Command: &proto.CreateAdminSubscriptionCommand{ + Name: "sname", + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + proto.Subscription_DOMAIN_EVENT_ADMIN_ACCOUNT, + }, + Recipient: &proto.Recipient{ + Type: proto.Recipient_SlackChannel, + }, + }, + }, + expectedErr: localizedError(statusSlackRecipientRequired, locale.JaJP), + }, + "err: ErrSlackRecipientWebhookURLRequired": { + token: createAdminToken(t), + input: &proto.CreateAdminSubscriptionRequest{ + Command: &proto.CreateAdminSubscriptionCommand{ + Name: "sname", + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + proto.Subscription_DOMAIN_EVENT_ADMIN_ACCOUNT, + }, + Recipient: &proto.Recipient{ + Type: proto.Recipient_SlackChannel, + SlackChannelRecipient: &proto.SlackChannelRecipient{WebhookUrl: ""}, + }, + }, + }, + expectedErr: localizedError(statusSlackRecipientWebhookURLRequired, locale.JaJP), + }, + "err: ErrNameRequired": { + token: createAdminToken(t), + input: &proto.CreateAdminSubscriptionRequest{ + Command: &proto.CreateAdminSubscriptionCommand{ + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + proto.Subscription_DOMAIN_EVENT_ADMIN_ACCOUNT, + }, + Recipient: &proto.Recipient{ + Type: proto.Recipient_SlackChannel, + SlackChannelRecipient: &proto.SlackChannelRecipient{WebhookUrl: "url"}, + }, + }, + }, + expectedErr: localizedError(statusNameRequired, locale.JaJP), + }, + "success": { + setup: func(s *NotificationService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + s.domainEventPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti( + gomock.Any(), gomock.Any(), + ).Return(nil) + }, + token: createAdminToken(t), + input: &proto.CreateAdminSubscriptionRequest{ + Command: &proto.CreateAdminSubscriptionCommand{ + Name: "sname", + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + proto.Subscription_DOMAIN_EVENT_ADMIN_ACCOUNT, + }, + Recipient: &proto.Recipient{ + Type: proto.Recipient_SlackChannel, + SlackChannelRecipient: &proto.SlackChannelRecipient{WebhookUrl: "url"}, + }, + }, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken(t, p.token) + service := newNotificationServiceWithMock(t, mockController) + if p.setup != nil { + p.setup(service) + } + _, err := service.CreateAdminSubscription(ctx, p.input) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestUpdateAdminSubscriptionMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*NotificationService) + token *token.IDToken + input *proto.UpdateAdminSubscriptionRequest + expectedErr error + }{ + "err: ErrPermissionDenied": { + token: createOwnerToken(t), + input: &proto.UpdateAdminSubscriptionRequest{}, + expectedErr: localizedError(statusPermissionDenied, locale.JaJP), + }, + "err: ErrIDRequired": { + token: createAdminToken(t), + input: &proto.UpdateAdminSubscriptionRequest{}, + expectedErr: localizedError(statusIDRequired, locale.JaJP), + }, + "err: ErrNoCommand": { + token: createAdminToken(t), + input: &proto.UpdateAdminSubscriptionRequest{ + Id: "key-0", + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "err: add notification types: ErrSourceTypesRequired": { + token: createAdminToken(t), + input: &proto.UpdateAdminSubscriptionRequest{ + Id: "key-0", + AddSourceTypesCommand: &proto.AddAdminSubscriptionSourceTypesCommand{}, + }, + expectedErr: localizedError(statusSourceTypesRequired, locale.JaJP), + }, + "err: delete notification types: ErrSourceTypesRequired": { + token: createAdminToken(t), + input: &proto.UpdateAdminSubscriptionRequest{ + Id: "key-0", + DeleteSourceTypesCommand: &proto.DeleteAdminSubscriptionSourceTypesCommand{}, + }, + expectedErr: localizedError(statusSourceTypesRequired, locale.JaJP), + }, + "err: ErrNotFound": { + setup: func(s *NotificationService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2ss.ErrAdminSubscriptionNotFound) + }, + token: createAdminToken(t), + input: &proto.UpdateAdminSubscriptionRequest{ + Id: "key-1", + AddSourceTypesCommand: &proto.AddAdminSubscriptionSourceTypesCommand{ + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + proto.Subscription_DOMAIN_EVENT_ADMIN_ACCOUNT, + }, + }, + }, + expectedErr: localizedError(statusNotFound, locale.JaJP), + }, + "success: addSourceTypes": { + setup: func(s *NotificationService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + s.domainEventPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti( + gomock.Any(), gomock.Any(), + ).Return(nil) + }, + token: createAdminToken(t), + input: &proto.UpdateAdminSubscriptionRequest{ + Id: "key-0", + AddSourceTypesCommand: &proto.AddAdminSubscriptionSourceTypesCommand{ + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_FEATURE, + }, + }, + }, + expectedErr: nil, + }, + "success: deleteSourceTypes": { + setup: func(s *NotificationService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + s.domainEventPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti( + gomock.Any(), gomock.Any(), + ).Return(nil) + }, + token: createAdminToken(t), + input: &proto.UpdateAdminSubscriptionRequest{ + Id: "key-0", + DeleteSourceTypesCommand: &proto.DeleteAdminSubscriptionSourceTypesCommand{ + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + }, + }, + }, + expectedErr: nil, + }, + "success: all commands": { + setup: func(s *NotificationService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + s.domainEventPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti( + gomock.Any(), gomock.Any(), + ).Return(nil) + }, + token: createAdminToken(t), + input: &proto.UpdateAdminSubscriptionRequest{ + Id: "key-0", + AddSourceTypesCommand: &proto.AddAdminSubscriptionSourceTypesCommand{ + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_FEATURE, + }, + }, + DeleteSourceTypesCommand: &proto.DeleteAdminSubscriptionSourceTypesCommand{ + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + }, + }, + RenameSubscriptionCommand: &proto.RenameAdminSubscriptionCommand{ + Name: "rename", + }, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken(t, p.token) + service := newNotificationServiceWithMock(t, mockController) + if p.setup != nil { + p.setup(service) + } + _, err := service.UpdateAdminSubscription(ctx, p.input) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestEnableAdminSubscriptionMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*NotificationService) + token *token.IDToken + input *proto.EnableAdminSubscriptionRequest + expectedErr error + }{ + "err: ErrPermissionDenied": { + token: createOwnerToken(t), + input: &proto.EnableAdminSubscriptionRequest{}, + expectedErr: localizedError(statusPermissionDenied, locale.JaJP), + }, + "err: ErrIDRequired": { + token: createAdminToken(t), + input: &proto.EnableAdminSubscriptionRequest{}, + expectedErr: localizedError(statusIDRequired, locale.JaJP), + }, + "err: ErrNoCommand": { + token: createAdminToken(t), + input: &proto.EnableAdminSubscriptionRequest{ + Id: "key-0", + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "success": { + setup: func(s *NotificationService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + s.domainEventPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti( + gomock.Any(), gomock.Any(), + ).Return(nil) + }, + token: createAdminToken(t), + input: &proto.EnableAdminSubscriptionRequest{ + Id: "key-0", + Command: &proto.EnableAdminSubscriptionCommand{}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken(t, p.token) + service := newNotificationServiceWithMock(t, mockController) + if p.setup != nil { + p.setup(service) + } + _, err := service.EnableAdminSubscription(ctx, p.input) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestDisableAdminSubscriptionMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*NotificationService) + token *token.IDToken + input *proto.DisableAdminSubscriptionRequest + expectedErr error + }{ + "err: ErrIDRequired": { + token: createAdminToken(t), + input: &proto.DisableAdminSubscriptionRequest{}, + expectedErr: localizedError(statusIDRequired, locale.JaJP), + }, + "err: ErrNoCommand": { + token: createAdminToken(t), + input: &proto.DisableAdminSubscriptionRequest{ + Id: "key-0", + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "success": { + setup: func(s *NotificationService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + s.domainEventPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti( + gomock.Any(), gomock.Any(), + ).Return(nil) + }, + token: createAdminToken(t), + input: &proto.DisableAdminSubscriptionRequest{ + Id: "key-0", + Command: &proto.DisableAdminSubscriptionCommand{}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken(t, p.token) + service := newNotificationServiceWithMock(t, mockController) + if p.setup != nil { + p.setup(service) + } + _, err := service.DisableAdminSubscription(ctx, p.input) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestDeleteAdminSubscriptionMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*NotificationService) + token *token.IDToken + input *proto.DeleteAdminSubscriptionRequest + expectedErr error + }{ + "err: ErrIDRequired": { + token: createAdminToken(t), + input: &proto.DeleteAdminSubscriptionRequest{}, + expectedErr: localizedError(statusIDRequired, locale.JaJP), + }, + "err: ErrNoCommand": { + token: createAdminToken(t), + input: &proto.DeleteAdminSubscriptionRequest{ + Id: "key-0", + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "success": { + setup: func(s *NotificationService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + s.domainEventPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti( + gomock.Any(), gomock.Any(), + ).Return(nil) + }, + token: createAdminToken(t), + input: &proto.DeleteAdminSubscriptionRequest{ + Id: "key-0", + Command: &proto.DeleteAdminSubscriptionCommand{}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken(t, p.token) + service := newNotificationServiceWithMock(t, mockController) + if p.setup != nil { + p.setup(service) + } + _, err := service.DeleteAdminSubscription(ctx, p.input) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestGetAdminSubscriptionMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*NotificationService) + token *token.IDToken + input *proto.GetAdminSubscriptionRequest + expectedErr error + }{ + "err: ErrPermissionDenied": { + token: createOwnerToken(t), + input: &proto.GetAdminSubscriptionRequest{}, + expectedErr: localizedError(statusPermissionDenied, locale.JaJP), + }, + "err: ErrIDRequired": { + token: createAdminToken(t), + input: &proto.GetAdminSubscriptionRequest{}, + expectedErr: localizedError(statusIDRequired, locale.JaJP), + }, + "success": { + setup: func(s *NotificationService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + token: createAdminToken(t), + input: &proto.GetAdminSubscriptionRequest{Id: "key-0"}, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + service := newNotificationServiceWithMock(t, mockController) + if p.setup != nil { + p.setup(service) + } + ctx := createContextWithToken(t, p.token) + actual, err := service.GetAdminSubscription(ctx, p.input) + assert.Equal(t, p.expectedErr, err) + if err == nil { + assert.NotNil(t, actual) + } + }) + } +} + +func TestListAdminSubscriptionsMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*NotificationService) + token *token.IDToken + input *proto.ListAdminSubscriptionsRequest + expected *proto.ListAdminSubscriptionsResponse + expectedErr error + }{ + "err: ErrPermissionDenied": { + setup: nil, + token: createOwnerToken(t), + input: &proto.ListAdminSubscriptionsRequest{PageSize: 2, Cursor: ""}, + expected: nil, + expectedErr: localizedError(statusPermissionDenied, locale.JaJP), + }, + "success": { + setup: func(s *NotificationService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + token: createAdminToken(t), + input: &proto.ListAdminSubscriptionsRequest{ + PageSize: 2, + Cursor: "", + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ADMIN_ACCOUNT, + proto.Subscription_DOMAIN_EVENT_ADMIN_SUBSCRIPTION, + }, + }, + expected: &proto.ListAdminSubscriptionsResponse{Subscriptions: []*proto.Subscription{}, Cursor: "0", TotalCount: 0}, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := newNotificationServiceWithMock(t, mockController) + if p.setup != nil { + p.setup(s) + } + ctx := createContextWithToken(t, p.token) + actual, err := s.ListAdminSubscriptions(ctx, p.input) + assert.Equal(t, p.expectedErr, err) + assert.Equal(t, p.expected, actual) + }) + } +} + +func TestListEnabledAdminSubscriptionsMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*NotificationService) + token *token.IDToken + input *proto.ListEnabledAdminSubscriptionsRequest + expected *proto.ListEnabledAdminSubscriptionsResponse + expectedErr error + }{ + "err: ErrPermissionDenied": { + setup: nil, + token: createOwnerToken(t), + input: &proto.ListEnabledAdminSubscriptionsRequest{PageSize: 2, Cursor: ""}, + expected: nil, + expectedErr: localizedError(statusPermissionDenied, locale.JaJP), + }, + "success": { + setup: func(s *NotificationService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + token: createAdminToken(t), + input: &proto.ListEnabledAdminSubscriptionsRequest{ + PageSize: 2, + Cursor: "1", + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ADMIN_ACCOUNT, + proto.Subscription_DOMAIN_EVENT_ADMIN_SUBSCRIPTION, + }, + }, + expected: &proto.ListEnabledAdminSubscriptionsResponse{Subscriptions: []*proto.Subscription{}, Cursor: "1"}, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := newNotificationServiceWithMock(t, mockController) + if p.setup != nil { + p.setup(s) + } + ctx := createContextWithToken(t, p.token) + actual, err := s.ListEnabledAdminSubscriptions(ctx, p.input) + assert.Equal(t, p.expectedErr, err) + assert.Equal(t, p.expected, actual) + }) + } +} diff --git a/pkg/notification/api/api.go b/pkg/notification/api/api.go new file mode 100644 index 000000000..79afed0c5 --- /dev/null +++ b/pkg/notification/api/api.go @@ -0,0 +1,159 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + "github.com/bucketeer-io/bucketeer/pkg/role" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + notificationproto "github.com/bucketeer-io/bucketeer/proto/notification" +) + +type options struct { + logger *zap.Logger +} + +type Option func(*options) + +func WithLogger(l *zap.Logger) Option { + return func(opts *options) { + opts.logger = l + } +} + +type NotificationService struct { + mysqlClient mysql.Client + accountClient accountclient.Client + domainEventPublisher publisher.Publisher + opts *options + logger *zap.Logger +} + +func NewNotificationService( + mysqlClient mysql.Client, + accountClient accountclient.Client, + domainEventPublisher publisher.Publisher, + opts ...Option, +) *NotificationService { + dopts := &options{ + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + return &NotificationService{ + mysqlClient: mysqlClient, + accountClient: accountClient, + domainEventPublisher: domainEventPublisher, + opts: dopts, + logger: dopts.logger.Named("api"), + } +} + +func (s *NotificationService) Register(server *grpc.Server) { + notificationproto.RegisterNotificationServiceServer(server, s) +} + +func (s *NotificationService) checkAdminRole(ctx context.Context) (*eventproto.Editor, error) { + editor, err := role.CheckAdminRole(ctx) + if err != nil { + switch status.Code(err) { + case codes.Unauthenticated: + s.logger.Info( + "Unauthenticated", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusUnauthenticated, locale.JaJP) + case codes.PermissionDenied: + s.logger.Info( + "Permission denied", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusPermissionDenied, locale.JaJP) + default: + s.logger.Error( + "Failed to check role", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + } + return editor, nil +} + +func (s *NotificationService) checkRole( + ctx context.Context, + requiredRole accountproto.Account_Role, + environmentNamespace string, +) (*eventproto.Editor, error) { + editor, err := role.CheckRole(ctx, requiredRole, func(email string) (*accountproto.GetAccountResponse, error) { + return s.accountClient.GetAccount(ctx, &accountproto.GetAccountRequest{ + Email: email, + EnvironmentNamespace: environmentNamespace, + }) + }) + if err != nil { + switch status.Code(err) { + case codes.Unauthenticated: + s.logger.Info( + "Unauthenticated", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusUnauthenticated, locale.JaJP) + case codes.PermissionDenied: + s.logger.Info( + "Permission denied", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusPermissionDenied, locale.JaJP) + default: + s.logger.Error( + "Failed to check role", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + } + return editor, nil +} + +func (s *NotificationService) publishDomainEvents(ctx context.Context, events []*eventproto.Event) map[string]error { + messages := make([]publisher.Message, 0, len(events)) + for _, event := range events { + messages = append(messages, event) + } + return s.domainEventPublisher.PublishMulti(ctx, messages) +} diff --git a/pkg/notification/api/api_test.go b/pkg/notification/api/api_test.go new file mode 100644 index 000000000..6c7ec19b2 --- /dev/null +++ b/pkg/notification/api/api_test.go @@ -0,0 +1,132 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + accountclientmock "github.com/bucketeer-io/bucketeer/pkg/account/client/mock" + "github.com/bucketeer-io/bucketeer/pkg/notification/domain" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + publishermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher/mock" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/storage" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + "github.com/bucketeer-io/bucketeer/pkg/token" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + proto "github.com/bucketeer-io/bucketeer/proto/notification" +) + +const ( + adminSubscriptionKind = "AdminSubscription" + subscriptionKind = "Subscription" +) + +func TestNewNotificationService(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + mysqlClient := mysqlmock.NewMockClient(mockController) + accountClientMock := accountclientmock.NewMockClient(mockController) + pm := publishermock.NewMockPublisher(mockController) + logger := zap.NewNop() + s := NewNotificationService(mysqlClient, accountClientMock, pm, WithLogger(logger)) + assert.IsType(t, &NotificationService{}, s) +} + +func newNotificationServiceWithMock( + t *testing.T, + c *gomock.Controller, +) *NotificationService { + t.Helper() + return &NotificationService{ + mysqlClient: mysqlmock.NewMockClient(c), + accountClient: accountclientmock.NewMockClient(c), + domainEventPublisher: publishermock.NewMockPublisher(c), + logger: zap.NewNop(), + } +} + +func createContextWithToken(t *testing.T, token *token.IDToken) context.Context { + t.Helper() + ctx := context.TODO() + return context.WithValue(ctx, rpc.Key, token) +} + +func createAdminToken(t *testing.T) *token.IDToken { + t.Helper() + return &token.IDToken{ + Issuer: "issuer", + Subject: "sub", + Audience: "audience", + Expiry: time.Now().AddDate(100, 0, 0), + IssuedAt: time.Now(), + Email: "email", + AdminRole: accountproto.Account_OWNER, + } +} + +func createOwnerToken(t *testing.T) *token.IDToken { + t.Helper() + return &token.IDToken{ + Issuer: "issuer", + Subject: "sub", + Audience: "audience", + Expiry: time.Now().AddDate(100, 0, 0), + IssuedAt: time.Now(), + Email: "email", + AdminRole: accountproto.Account_UNASSIGNED, + } +} + +type msgLengthMatcher struct{ length int } + +func newMsgLengthMatcher(length int) gomock.Matcher { + return &msgLengthMatcher{length: length} +} + +func (m *msgLengthMatcher) Matches(x interface{}) bool { + return len(x.([]publisher.Message)) == m.length +} + +func (m *msgLengthMatcher) String() string { + return fmt.Sprintf("length: %d", m.length) +} + +func putSubscription(t *testing.T, s storage.Client, kind, namespace string, disabled bool) { + t.Helper() + key := storage.NewKey("key-0", kind, namespace) + sourceTypes := []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + proto.Subscription_DOMAIN_EVENT_ADMIN_ACCOUNT, + } + recipient := &proto.Recipient{ + Type: proto.Recipient_SlackChannel, + SlackChannelRecipient: &proto.SlackChannelRecipient{WebhookUrl: "url"}, + } + subscription, err := domain.NewSubscription("sname", sourceTypes, recipient) + subscription.Disabled = disabled + require.NoError(t, err) + err = s.Put(context.Background(), key, subscription.Subscription) + require.NoError(t, err) +} diff --git a/pkg/notification/api/error.go b/pkg/notification/api/error.go new file mode 100644 index 000000000..075efb664 --- /dev/null +++ b/pkg/notification/api/error.go @@ -0,0 +1,198 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + gstatus "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/rpc/status" +) + +var ( + statusInternal = gstatus.New(codes.Internal, "notification: internal") + statusIDRequired = gstatus.New(codes.InvalidArgument, "notification: id must be specified") + statusNameRequired = gstatus.New(codes.InvalidArgument, "notification: name must be specified") + statusSourceTypesRequired = gstatus.New( + codes.InvalidArgument, + "notification: notification types must be specified", + ) + statusUnknownRecipient = gstatus.New(codes.InvalidArgument, "notification: unknown recipient") + statusRecipientRequired = gstatus.New( + codes.InvalidArgument, + "notification: recipient must be specified", + ) + statusSlackRecipientRequired = gstatus.New( + codes.InvalidArgument, + "notification: slack recipient must be specified", + ) + statusSlackRecipientWebhookURLRequired = gstatus.New( + codes.InvalidArgument, + "notification: webhook URL must be specified", + ) + statusInvalidCursor = gstatus.New(codes.InvalidArgument, "notification: cursor is invalid") + statusNoCommand = gstatus.New(codes.InvalidArgument, "notification: no command") + statusInvalidOrderBy = gstatus.New(codes.InvalidArgument, "environment: order_by is invalid") + statusNotFound = gstatus.New(codes.NotFound, "notification: not found") + statusAlreadyExists = gstatus.New(codes.AlreadyExists, "notification: already exists") + statusUnauthenticated = gstatus.New(codes.Unauthenticated, "notification: unauthenticated") + statusPermissionDenied = gstatus.New(codes.PermissionDenied, "notification: permission denied") + + errInternalJaJP = status.MustWithDetails( + statusInternal, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "内部エラーが発生しました", + }, + ) + errIDRequiredJaJP = status.MustWithDetails( + statusIDRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "idは必須です", + }, + ) + errNameRequiredJaJP = status.MustWithDetails( + statusNameRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "nameは必須です", + }, + ) + errSourceTypesRequiredJaJP = status.MustWithDetails( + statusSourceTypesRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "notification typeのリストは必須です", + }, + ) + errUnknownRecipientJaJP = status.MustWithDetails( + statusUnknownRecipient, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不明なrecipientです", + }, + ) + errRecipientRequiredJaJP = status.MustWithDetails( + statusRecipientRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "recipientは必須です", + }, + ) + errSlackRecipientRequiredJaJP = status.MustWithDetails( + statusSlackRecipientRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "slack recipientは必須です", + }, + ) + errSlackRecipientWebhookURLRequiredJaJP = status.MustWithDetails( + statusSlackRecipientWebhookURLRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "slack recipientのwebhook urlは必須です", + }, + ) + errInvalidCursorJaJP = status.MustWithDetails( + statusInvalidCursor, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なcursorです", + }, + ) + errNoCommandJaJP = status.MustWithDetails( + statusNoCommand, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "commandは必須です", + }, + ) + errInvalidOrderByJaJP = status.MustWithDetails( + statusInvalidOrderBy, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なソート順の指定です", + }, + ) + errNotFoundJaJP = status.MustWithDetails( + statusNotFound, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "データが存在しません", + }, + ) + errAlreadyExistsJaJP = status.MustWithDetails( + statusAlreadyExists, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "同じidのデータがすでに存在します", + }, + ) + errUnauthenticatedJaJP = status.MustWithDetails( + statusUnauthenticated, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "認証されていません", + }, + ) + errPermissionDeniedJaJP = status.MustWithDetails( + statusPermissionDenied, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "権限がありません", + }, + ) +) + +func localizedError(s *gstatus.Status, loc string) error { + // handle loc if multi-lang is necessary + switch s { + case statusInternal: + return errInternalJaJP + case statusIDRequired: + return errIDRequiredJaJP + case statusNameRequired: + return errNameRequiredJaJP + case statusSourceTypesRequired: + return errSourceTypesRequiredJaJP + case statusUnknownRecipient: + return errUnknownRecipientJaJP + case statusRecipientRequired: + return errRecipientRequiredJaJP + case statusSlackRecipientRequired: + return errSlackRecipientRequiredJaJP + case statusSlackRecipientWebhookURLRequired: + return errSlackRecipientWebhookURLRequiredJaJP + case statusInvalidCursor: + return errInvalidCursorJaJP + case statusNoCommand: + return errNoCommandJaJP + case statusInvalidOrderBy: + return errInvalidOrderByJaJP + case statusNotFound: + return errNotFoundJaJP + case statusAlreadyExists: + return errAlreadyExistsJaJP + case statusUnauthenticated: + return errUnauthenticatedJaJP + case statusPermissionDenied: + return errPermissionDeniedJaJP + default: + return errInternalJaJP + } +} diff --git a/pkg/notification/api/subscription.go b/pkg/notification/api/subscription.go new file mode 100644 index 000000000..2835ac32e --- /dev/null +++ b/pkg/notification/api/subscription.go @@ -0,0 +1,623 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "strconv" + + "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/notification/command" + "github.com/bucketeer-io/bucketeer/pkg/notification/domain" + v2ss "github.com/bucketeer-io/bucketeer/pkg/notification/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + notificationproto "github.com/bucketeer-io/bucketeer/proto/notification" +) + +func (s *NotificationService) CreateSubscription( + ctx context.Context, + req *notificationproto.CreateSubscriptionRequest, +) (*notificationproto.CreateSubscriptionResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := s.validateCreateSubscriptionRequest(req); err != nil { + return nil, err + } + subscription, err := domain.NewSubscription(req.Command.Name, req.Command.SourceTypes, req.Command.Recipient) + if err != nil { + s.logger.Error( + "Failed to create a new subscription", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + zap.Any("sourceType", req.Command.SourceTypes), + zap.Any("recipient", req.Command.Recipient), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + var handler command.Handler = command.NewEmptySubscriptionCommandHandler() + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + subscriptionStorage := v2ss.NewSubscriptionStorage(tx) + if err := subscriptionStorage.CreateSubscription(ctx, subscription, req.EnvironmentNamespace); err != nil { + return err + } + handler = command.NewSubscriptionCommandHandler(editor, subscription, req.EnvironmentNamespace) + if err := handler.Handle(ctx, req.Command); err != nil { + return err + } + return nil + }) + if err != nil { + if err == v2ss.ErrSubscriptionAlreadyExists { + return nil, localizedError(statusAlreadyExists, locale.JaJP) + } + s.logger.Error( + "Failed to create subscription", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + if errs := s.publishDomainEvents(ctx, handler.Events()); len(errs) > 0 { + s.logger.Error( + "Failed to publish events", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Any("errors", errs), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return ¬ificationproto.CreateSubscriptionResponse{}, nil +} + +func (s *NotificationService) validateCreateSubscriptionRequest( + req *notificationproto.CreateSubscriptionRequest, +) error { + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + if req.Command.Name == "" { + return localizedError(statusNameRequired, locale.JaJP) + } + if len(req.Command.SourceTypes) == 0 { + return localizedError(statusSourceTypesRequired, locale.JaJP) + } + if err := s.validateRecipient(req.Command.Recipient); err != nil { + return err + } + return nil +} + +func (s *NotificationService) validateRecipient(recipient *notificationproto.Recipient) error { + if recipient == nil { + return localizedError(statusRecipientRequired, locale.JaJP) + } + if recipient.Type == notificationproto.Recipient_SlackChannel { + return s.validateSlackRecipient(recipient.SlackChannelRecipient) + } + return localizedError(statusUnknownRecipient, locale.JaJP) +} + +func (s *NotificationService) validateSlackRecipient(sr *notificationproto.SlackChannelRecipient) error { + // TODO: Check ping to the webhook URL? + if sr == nil { + return localizedError(statusSlackRecipientRequired, locale.JaJP) + } + if sr.WebhookUrl == "" { + return localizedError(statusSlackRecipientWebhookURLRequired, locale.JaJP) + } + return nil +} + +func (s *NotificationService) UpdateSubscription( + ctx context.Context, + req *notificationproto.UpdateSubscriptionRequest, +) (*notificationproto.UpdateSubscriptionResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := s.validateUpdateSubscriptionRequest(req); err != nil { + return nil, err + } + commands := s.createUpdateSubscriptionCommands(req) + if err := s.updateSubscription(ctx, commands, req.Id, req.EnvironmentNamespace, editor); err != nil { + if status.Code(err) == codes.Internal { + s.logger.Error( + "Failed to update feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + zap.String("id", req.Id), + )..., + ) + } + return nil, err + } + return ¬ificationproto.UpdateSubscriptionResponse{}, nil +} + +func (s *NotificationService) EnableSubscription( + ctx context.Context, + req *notificationproto.EnableSubscriptionRequest, +) (*notificationproto.EnableSubscriptionResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := s.validateEnableSubscriptionRequest(req); err != nil { + return nil, err + } + if err := s.updateSubscription( + ctx, + []command.Command{req.Command}, + req.Id, + req.EnvironmentNamespace, + editor, + ); err != nil { + if status.Code(err) == codes.Internal { + s.logger.Error( + "Failed to enable feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + } + return nil, err + } + return ¬ificationproto.EnableSubscriptionResponse{}, nil +} + +func (s *NotificationService) validateEnableSubscriptionRequest( + req *notificationproto.EnableSubscriptionRequest, +) error { + if req.Id == "" { + return localizedError(statusIDRequired, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + return nil +} + +func (s *NotificationService) DisableSubscription( + ctx context.Context, + req *notificationproto.DisableSubscriptionRequest, +) (*notificationproto.DisableSubscriptionResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := s.validateDisableSubscriptionRequest(req); err != nil { + return nil, err + } + if err := s.updateSubscription( + ctx, + []command.Command{req.Command}, + req.Id, + req.EnvironmentNamespace, + editor, + ); err != nil { + if status.Code(err) == codes.Internal { + s.logger.Error( + "Failed to disable feature", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + } + return nil, err + } + return ¬ificationproto.DisableSubscriptionResponse{}, nil +} + +func (s *NotificationService) validateDisableSubscriptionRequest( + req *notificationproto.DisableSubscriptionRequest, +) error { + if req.Id == "" { + return localizedError(statusIDRequired, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + return nil +} + +func (s *NotificationService) updateSubscription( + ctx context.Context, + commands []command.Command, + id, environmentNamespace string, + editor *eventproto.Editor, +) error { + var handler command.Handler = command.NewEmptySubscriptionCommandHandler() + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + subscriptionStorage := v2ss.NewSubscriptionStorage(tx) + subscription, err := subscriptionStorage.GetSubscription(ctx, id, environmentNamespace) + if err != nil { + return err + } + handler = command.NewSubscriptionCommandHandler(editor, subscription, environmentNamespace) + for _, command := range commands { + if err := handler.Handle(ctx, command); err != nil { + return err + } + } + if err = subscriptionStorage.UpdateSubscription(ctx, subscription, environmentNamespace); err != nil { + return err + } + return nil + }) + if err != nil { + if err == v2ss.ErrSubscriptionNotFound || err == v2ss.ErrSubscriptionUnexpectedAffectedRows { + return localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to update subscription", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", id), + )..., + ) + return localizedError(statusInternal, locale.JaJP) + } + if errs := s.publishDomainEvents(ctx, handler.Events()); len(errs) > 0 { + s.logger.Error( + "Failed to publish events", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Any("errors", errs), + zap.String("environmentNamespace", environmentNamespace), + zap.String("id", id), + )..., + ) + return localizedError(statusInternal, locale.JaJP) + } + return nil +} + +func (s *NotificationService) validateUpdateSubscriptionRequest( + req *notificationproto.UpdateSubscriptionRequest, +) error { + if req.Id == "" { + return localizedError(statusIDRequired, locale.JaJP) + } + if s.isNoUpdateSubscriptionCommand(req) { + return localizedError(statusNoCommand, locale.JaJP) + } + if req.AddSourceTypesCommand != nil && len(req.AddSourceTypesCommand.SourceTypes) == 0 { + return localizedError(statusSourceTypesRequired, locale.JaJP) + } + if req.DeleteSourceTypesCommand != nil && len(req.DeleteSourceTypesCommand.SourceTypes) == 0 { + return localizedError(statusSourceTypesRequired, locale.JaJP) + } + if req.RenameSubscriptionCommand != nil && req.RenameSubscriptionCommand.Name == "" { + return localizedError(statusNameRequired, locale.JaJP) + } + return nil +} + +func (s *NotificationService) isNoUpdateSubscriptionCommand(req *notificationproto.UpdateSubscriptionRequest) bool { + return req.AddSourceTypesCommand == nil && + req.DeleteSourceTypesCommand == nil && + req.RenameSubscriptionCommand == nil +} + +func (s *NotificationService) DeleteSubscription( + ctx context.Context, + req *notificationproto.DeleteSubscriptionRequest, +) (*notificationproto.DeleteSubscriptionResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateDeleteSubscriptionRequest(req); err != nil { + return nil, err + } + var handler command.Handler = command.NewEmptySubscriptionCommandHandler() + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + subscriptionStorage := v2ss.NewSubscriptionStorage(tx) + subscription, err := subscriptionStorage.GetSubscription(ctx, req.Id, req.EnvironmentNamespace) + if err != nil { + return err + } + handler = command.NewSubscriptionCommandHandler(editor, subscription, req.EnvironmentNamespace) + if err := handler.Handle(ctx, req.Command); err != nil { + return err + } + if err = subscriptionStorage.DeleteSubscription(ctx, req.Id, req.EnvironmentNamespace); err != nil { + return err + } + return nil + }) + if err != nil { + if err == v2ss.ErrSubscriptionNotFound || err == v2ss.ErrSubscriptionUnexpectedAffectedRows { + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to delete subscription", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", req.Id), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + if errs := s.publishDomainEvents(ctx, handler.Events()); len(errs) > 0 { + s.logger.Error( + "Failed to publish events", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Any("errors", errs), + zap.String("environmentNamespace", req.EnvironmentNamespace), + zap.String("id", req.Id), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return ¬ificationproto.DeleteSubscriptionResponse{}, nil +} + +func validateDeleteSubscriptionRequest(req *notificationproto.DeleteSubscriptionRequest) error { + if req.Id == "" { + return localizedError(statusIDRequired, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + return nil +} + +func (s *NotificationService) createUpdateSubscriptionCommands( + req *notificationproto.UpdateSubscriptionRequest, +) []command.Command { + commands := make([]command.Command, 0) + if req.AddSourceTypesCommand != nil { + commands = append(commands, req.AddSourceTypesCommand) + } + if req.DeleteSourceTypesCommand != nil { + commands = append(commands, req.DeleteSourceTypesCommand) + } + if req.RenameSubscriptionCommand != nil { + commands = append(commands, req.RenameSubscriptionCommand) + } + return commands +} + +func (s *NotificationService) GetSubscription( + ctx context.Context, + req *notificationproto.GetSubscriptionRequest, +) (*notificationproto.GetSubscriptionResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateGetSubscriptionRequest(req); err != nil { + return nil, err + } + subscriptionStorage := v2ss.NewSubscriptionStorage(s.mysqlClient) + subscription, err := subscriptionStorage.GetSubscription(ctx, req.Id, req.EnvironmentNamespace) + if err != nil { + if err == v2ss.ErrSubscriptionNotFound { + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to get subscription", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", req.Id), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return ¬ificationproto.GetSubscriptionResponse{Subscription: subscription.Subscription}, nil +} + +func validateGetSubscriptionRequest(req *notificationproto.GetSubscriptionRequest) error { + if req.Id == "" { + return localizedError(statusIDRequired, locale.JaJP) + } + return nil +} + +func (s *NotificationService) ListSubscriptions( + ctx context.Context, + req *notificationproto.ListSubscriptionsRequest, +) (*notificationproto.ListSubscriptionsResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + var whereParts []mysql.WherePart + whereParts = append(whereParts, mysql.NewFilter("environment_namespace", "=", req.EnvironmentNamespace)) + sourceTypesValues := make([]interface{}, len(req.SourceTypes)) + for i, st := range req.SourceTypes { + sourceTypesValues[i] = int32(st) + } + if len(sourceTypesValues) > 0 { + whereParts = append( + whereParts, + mysql.NewJSONFilter("source_types", mysql.JSONContainsNumber, sourceTypesValues), + ) + } + if req.Disabled != nil { + whereParts = append(whereParts, mysql.NewFilter("disabled", "=", req.Disabled.Value)) + } + if req.SearchKeyword != "" { + whereParts = append(whereParts, mysql.NewSearchQuery([]string{"name"}, req.SearchKeyword)) + } + orders, err := s.newSubscriptionListOrders(req.OrderBy, req.OrderDirection) + if err != nil { + s.logger.Error( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, err + } + subscriptions, cursor, totalCount, err := s.listSubscriptionsMySQL( + ctx, + whereParts, + orders, + req.PageSize, + req.Cursor, + ) + if err != nil { + return nil, err + } + return ¬ificationproto.ListSubscriptionsResponse{ + Subscriptions: subscriptions, + Cursor: cursor, + TotalCount: totalCount, + }, nil +} + +func (s *NotificationService) newSubscriptionListOrders( + orderBy notificationproto.ListSubscriptionsRequest_OrderBy, + orderDirection notificationproto.ListSubscriptionsRequest_OrderDirection, +) ([]*mysql.Order, error) { + var column string + switch orderBy { + case notificationproto.ListSubscriptionsRequest_DEFAULT, + notificationproto.ListSubscriptionsRequest_NAME: + column = "name" + case notificationproto.ListSubscriptionsRequest_CREATED_AT: + column = "created_at" + case notificationproto.ListSubscriptionsRequest_UPDATED_AT: + column = "updated_at" + default: + return nil, localizedError(statusInvalidOrderBy, locale.JaJP) + } + direction := mysql.OrderDirectionAsc + if orderDirection == notificationproto.ListSubscriptionsRequest_DESC { + direction = mysql.OrderDirectionDesc + } + return []*mysql.Order{mysql.NewOrder(column, direction)}, nil +} + +func (s *NotificationService) ListEnabledSubscriptions( + ctx context.Context, + req *notificationproto.ListEnabledSubscriptionsRequest, +) (*notificationproto.ListEnabledSubscriptionsResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + var whereParts []mysql.WherePart + whereParts = append( + whereParts, + mysql.NewFilter("environment_namespace", "=", req.EnvironmentNamespace), + mysql.NewFilter("disabled", "=", false), + ) + sourceTypesValues := make([]interface{}, len(req.SourceTypes)) + for i, st := range req.SourceTypes { + sourceTypesValues[i] = int32(st) + } + if len(sourceTypesValues) > 0 { + whereParts = append( + whereParts, + mysql.NewJSONFilter("source_types", mysql.JSONContainsNumber, sourceTypesValues), + ) + } + subscriptions, cursor, _, err := s.listSubscriptionsMySQL( + ctx, + whereParts, + nil, + req.PageSize, + req.Cursor, + ) + if err != nil { + return nil, err + } + return ¬ificationproto.ListEnabledSubscriptionsResponse{ + Subscriptions: subscriptions, + Cursor: cursor, + }, nil +} + +func (s *NotificationService) listSubscriptionsMySQL( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + pageSize int64, + cursor string, +) ([]*notificationproto.Subscription, string, int64, error) { + limit := int(pageSize) + if cursor == "" { + cursor = "0" + } + offset, err := strconv.Atoi(cursor) + if err != nil { + return nil, "", 0, localizedError(statusInvalidCursor, locale.JaJP) + } + subscriptionStorage := v2ss.NewSubscriptionStorage(s.mysqlClient) + subscriptions, nextCursor, totalCount, err := subscriptionStorage.ListSubscriptions( + ctx, + whereParts, + orders, + limit, + offset, + ) + if err != nil { + s.logger.Error( + "Failed to list subscriptions", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, "", 0, localizedError(statusInternal, locale.JaJP) + } + return subscriptions, strconv.Itoa(nextCursor), totalCount, nil +} diff --git a/pkg/notification/api/subscription_test.go b/pkg/notification/api/subscription_test.go new file mode 100644 index 000000000..35414112a --- /dev/null +++ b/pkg/notification/api/subscription_test.go @@ -0,0 +1,584 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/locale" + v2ss "github.com/bucketeer-io/bucketeer/pkg/notification/storage/v2" + publishermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher/mock" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + proto "github.com/bucketeer-io/bucketeer/proto/notification" +) + +func TestCreateSubscriptionMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*NotificationService) + input *proto.CreateSubscriptionRequest + expectedErr error + }{ + "err: ErrNoCommand": { + setup: nil, + input: &proto.CreateSubscriptionRequest{ + Command: nil, + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "err: ErrSourceTypesRequired": { + input: &proto.CreateSubscriptionRequest{ + Command: &proto.CreateSubscriptionCommand{ + Name: "sname", + Recipient: &proto.Recipient{ + Type: proto.Recipient_SlackChannel, + SlackChannelRecipient: &proto.SlackChannelRecipient{WebhookUrl: "url"}, + }, + }, + }, + expectedErr: localizedError(statusSourceTypesRequired, locale.JaJP), + }, + "err: ErrRecipientRequired": { + input: &proto.CreateSubscriptionRequest{ + Command: &proto.CreateSubscriptionCommand{ + Name: "sname", + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + proto.Subscription_DOMAIN_EVENT_FEATURE, + }, + }, + }, + expectedErr: localizedError(statusRecipientRequired, locale.JaJP), + }, + "err: ErrSlackRecipientRequired": { + input: &proto.CreateSubscriptionRequest{ + Command: &proto.CreateSubscriptionCommand{ + Name: "sname", + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + proto.Subscription_DOMAIN_EVENT_FEATURE, + }, + Recipient: &proto.Recipient{ + Type: proto.Recipient_SlackChannel, + }, + }, + }, + expectedErr: localizedError(statusSlackRecipientRequired, locale.JaJP), + }, + "err: ErrSlackRecipientWebhookURLRequired": { + input: &proto.CreateSubscriptionRequest{ + Command: &proto.CreateSubscriptionCommand{ + Name: "sname", + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + proto.Subscription_DOMAIN_EVENT_FEATURE, + }, + Recipient: &proto.Recipient{ + Type: proto.Recipient_SlackChannel, + SlackChannelRecipient: &proto.SlackChannelRecipient{WebhookUrl: ""}, + }, + }, + }, + expectedErr: localizedError(statusSlackRecipientWebhookURLRequired, locale.JaJP), + }, + "err: ErrNameRequired": { + input: &proto.CreateSubscriptionRequest{ + Command: &proto.CreateSubscriptionCommand{ + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + proto.Subscription_DOMAIN_EVENT_FEATURE, + }, + Recipient: &proto.Recipient{ + Type: proto.Recipient_SlackChannel, + SlackChannelRecipient: &proto.SlackChannelRecipient{WebhookUrl: "url"}, + }, + }, + }, + expectedErr: localizedError(statusNameRequired, locale.JaJP), + }, + "success": { + setup: func(s *NotificationService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + s.domainEventPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti( + gomock.Any(), gomock.Any(), + ).Return(nil) + }, + input: &proto.CreateSubscriptionRequest{ + Command: &proto.CreateSubscriptionCommand{ + Name: "sname", + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + proto.Subscription_DOMAIN_EVENT_FEATURE, + }, + Recipient: &proto.Recipient{ + Type: proto.Recipient_SlackChannel, + SlackChannelRecipient: &proto.SlackChannelRecipient{WebhookUrl: "url"}, + }, + }, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken(t, createAdminToken(t)) + service := newNotificationServiceWithMock(t, mockController) + if p.setup != nil { + p.setup(service) + } + _, err := service.CreateSubscription(ctx, p.input) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestUpdateSubscriptionMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*NotificationService) + input *proto.UpdateSubscriptionRequest + expectedErr error + }{ + "err: ErrIDRequired": { + input: &proto.UpdateSubscriptionRequest{}, + expectedErr: localizedError(statusIDRequired, locale.JaJP), + }, + "err: ErrNoCommand": { + input: &proto.UpdateSubscriptionRequest{ + Id: "key-0", + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "err: add notification types: ErrSourceTypesRequired": { + input: &proto.UpdateSubscriptionRequest{ + Id: "key-0", + AddSourceTypesCommand: &proto.AddSourceTypesCommand{}, + }, + expectedErr: localizedError(statusSourceTypesRequired, locale.JaJP), + }, + "err: delete notification types: ErrSourceTypesRequired": { + input: &proto.UpdateSubscriptionRequest{ + Id: "key-0", + DeleteSourceTypesCommand: &proto.DeleteSourceTypesCommand{}, + }, + expectedErr: localizedError(statusSourceTypesRequired, locale.JaJP), + }, + "err: ErrNotFound": { + setup: func(s *NotificationService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2ss.ErrSubscriptionNotFound) + }, + input: &proto.UpdateSubscriptionRequest{ + Id: "key-1", + AddSourceTypesCommand: &proto.AddSourceTypesCommand{ + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + proto.Subscription_DOMAIN_EVENT_FEATURE, + }, + }, + }, + expectedErr: localizedError(statusNotFound, locale.JaJP), + }, + "success: addSourceTypes": { + setup: func(s *NotificationService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + s.domainEventPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti( + gomock.Any(), gomock.Any(), + ).Return(nil) + }, + input: &proto.UpdateSubscriptionRequest{ + Id: "key-0", + AddSourceTypesCommand: &proto.AddSourceTypesCommand{ + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_FEATURE, + }, + }, + }, + expectedErr: nil, + }, + "success: deleteSourceTypes": { + setup: func(s *NotificationService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + s.domainEventPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti( + gomock.Any(), gomock.Any(), + ).Return(nil) + }, + input: &proto.UpdateSubscriptionRequest{ + Id: "key-0", + DeleteSourceTypesCommand: &proto.DeleteSourceTypesCommand{ + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + }, + }, + }, + expectedErr: nil, + }, + "success: all commands": { + setup: func(s *NotificationService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + s.domainEventPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti( + gomock.Any(), gomock.Any(), + ).Return(nil) + }, + input: &proto.UpdateSubscriptionRequest{ + Id: "key-0", + AddSourceTypesCommand: &proto.AddSourceTypesCommand{ + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_FEATURE, + }, + }, + DeleteSourceTypesCommand: &proto.DeleteSourceTypesCommand{ + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + }, + }, + RenameSubscriptionCommand: &proto.RenameSubscriptionCommand{ + Name: "rename", + }, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken(t, createAdminToken(t)) + service := newNotificationServiceWithMock(t, mockController) + if p.setup != nil { + p.setup(service) + } + _, err := service.UpdateSubscription(ctx, p.input) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestEnableSubscriptionMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*NotificationService) + input *proto.EnableSubscriptionRequest + expectedErr error + }{ + "err: ErrIDRequired": { + input: &proto.EnableSubscriptionRequest{}, + expectedErr: localizedError(statusIDRequired, locale.JaJP), + }, + "err: ErrNoCommand": { + input: &proto.EnableSubscriptionRequest{ + Id: "key-0", + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "success": { + setup: func(s *NotificationService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + s.domainEventPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti( + gomock.Any(), gomock.Any(), + ).Return(nil) + }, + input: &proto.EnableSubscriptionRequest{ + Id: "key-0", + Command: &proto.EnableSubscriptionCommand{}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken(t, createAdminToken(t)) + service := newNotificationServiceWithMock(t, mockController) + if p.setup != nil { + p.setup(service) + } + _, err := service.EnableSubscription(ctx, p.input) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestDisableSubscriptionMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*NotificationService) + input *proto.DisableSubscriptionRequest + expectedErr error + }{ + "err: ErrIDRequired": { + input: &proto.DisableSubscriptionRequest{}, + expectedErr: localizedError(statusIDRequired, locale.JaJP), + }, + "err: ErrNoCommand": { + input: &proto.DisableSubscriptionRequest{ + Id: "key-0", + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "success": { + setup: func(s *NotificationService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + s.domainEventPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti( + gomock.Any(), gomock.Any(), + ).Return(nil) + }, + input: &proto.DisableSubscriptionRequest{ + Id: "key-0", + Command: &proto.DisableSubscriptionCommand{}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken(t, createAdminToken(t)) + service := newNotificationServiceWithMock(t, mockController) + if p.setup != nil { + p.setup(service) + } + _, err := service.DisableSubscription(ctx, p.input) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestDeleteSubscriptionMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*NotificationService) + input *proto.DeleteSubscriptionRequest + expectedErr error + }{ + "err: ErrIDRequired": { + input: &proto.DeleteSubscriptionRequest{}, + expectedErr: localizedError(statusIDRequired, locale.JaJP), + }, + "err: ErrNoCommand": { + input: &proto.DeleteSubscriptionRequest{ + Id: "key-0", + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "success": { + setup: func(s *NotificationService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + s.domainEventPublisher.(*publishermock.MockPublisher).EXPECT().PublishMulti( + gomock.Any(), gomock.Any(), + ).Return(nil) + }, + input: &proto.DeleteSubscriptionRequest{ + Id: "key-0", + Command: &proto.DeleteSubscriptionCommand{}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken(t, createAdminToken(t)) + service := newNotificationServiceWithMock(t, mockController) + if p.setup != nil { + p.setup(service) + } + _, err := service.DeleteSubscription(ctx, p.input) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestGetSubscriptionMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*NotificationService) + input *proto.GetSubscriptionRequest + expectedErr error + }{ + "err: ErrIDRequired": { + input: &proto.GetSubscriptionRequest{}, + expectedErr: localizedError(statusIDRequired, locale.JaJP), + }, + "success": { + setup: func(s *NotificationService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + input: &proto.GetSubscriptionRequest{Id: "key-0"}, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + service := newNotificationServiceWithMock(t, mockController) + if p.setup != nil { + p.setup(service) + } + ctx := createContextWithToken(t, createAdminToken(t)) + actual, err := service.GetSubscription(ctx, p.input) + assert.Equal(t, p.expectedErr, err) + if err == nil { + assert.NotNil(t, actual) + } + }) + } +} + +func TestListSubscriptionsMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*NotificationService) + input *proto.ListSubscriptionsRequest + expected *proto.ListSubscriptionsResponse + expectedErr error + }{ + "success": { + setup: func(s *NotificationService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + input: &proto.ListSubscriptionsRequest{ + PageSize: 2, + Cursor: "", + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + proto.Subscription_DOMAIN_EVENT_SUBSCRIPTION, + }, + }, + expected: &proto.ListSubscriptionsResponse{Subscriptions: []*proto.Subscription{}, Cursor: "0"}, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := newNotificationServiceWithMock(t, mockController) + if p.setup != nil { + p.setup(s) + } + ctx := createContextWithToken(t, createAdminToken(t)) + actual, err := s.ListSubscriptions(ctx, p.input) + assert.Equal(t, p.expectedErr, err) + assert.Equal(t, p.expected, actual) + }) + } +} + +func TestListEnabledSubscriptionsMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*NotificationService) + input *proto.ListEnabledSubscriptionsRequest + expected *proto.ListEnabledSubscriptionsResponse + expectedErr error + }{ + "success": { + setup: func(s *NotificationService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + input: &proto.ListEnabledSubscriptionsRequest{ + PageSize: 2, + Cursor: "1", + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + proto.Subscription_DOMAIN_EVENT_SUBSCRIPTION, + }, + }, + expected: &proto.ListEnabledSubscriptionsResponse{Subscriptions: []*proto.Subscription{}, Cursor: "1"}, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := newNotificationServiceWithMock(t, mockController) + if p.setup != nil { + p.setup(s) + } + ctx := createContextWithToken(t, createAdminToken(t)) + actual, err := s.ListEnabledSubscriptions(ctx, p.input) + assert.Equal(t, p.expectedErr, err) + assert.Equal(t, p.expected, actual) + }) + } +} diff --git a/pkg/notification/client/BUILD.bazel b/pkg/notification/client/BUILD.bazel new file mode 100644 index 000000000..dbfc99ffc --- /dev/null +++ b/pkg/notification/client/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["client.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/notification/client", + visibility = ["//visibility:public"], + deps = [ + "//pkg/rpc/client:go_default_library", + "//proto/notification:go_default_library", + "@org_golang_google_grpc//:go_default_library", + ], +) diff --git a/pkg/notification/client/client.go b/pkg/notification/client/client.go new file mode 100644 index 000000000..414657688 --- /dev/null +++ b/pkg/notification/client/client.go @@ -0,0 +1,50 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package client + +import ( + "google.golang.org/grpc" + + rpcclient "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + proto "github.com/bucketeer-io/bucketeer/proto/notification" +) + +type Client interface { + proto.NotificationServiceClient + Close() +} + +type client struct { + proto.NotificationServiceClient + address string + connection *grpc.ClientConn +} + +func NewClient(addr, certPath string, opts ...rpcclient.Option) (Client, error) { + conn, err := rpcclient.NewClientConn(addr, certPath, opts...) + if err != nil { + return nil, err + } + return &client{ + NotificationServiceClient: proto.NewNotificationServiceClient(conn), + address: addr, + connection: conn, + }, nil +} + +func (c *client) Close() { + c.connection.Close() +} diff --git a/pkg/notification/client/mock/BUILD.bazel b/pkg/notification/client/mock/BUILD.bazel new file mode 100644 index 000000000..bbc83a648 --- /dev/null +++ b/pkg/notification/client/mock/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["client.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/notification/client/mock", + visibility = ["//visibility:public"], + deps = [ + "//proto/notification:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@org_golang_google_grpc//:go_default_library", + ], +) diff --git a/pkg/notification/client/mock/client.go b/pkg/notification/client/mock/client.go new file mode 100644 index 000000000..48f5c7823 --- /dev/null +++ b/pkg/notification/client/mock/client.go @@ -0,0 +1,370 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: client.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + grpc "google.golang.org/grpc" + + notification "github.com/bucketeer-io/bucketeer/proto/notification" +) + +// MockClient is a mock of Client interface. +type MockClient struct { + ctrl *gomock.Controller + recorder *MockClientMockRecorder +} + +// MockClientMockRecorder is the mock recorder for MockClient. +type MockClientMockRecorder struct { + mock *MockClient +} + +// NewMockClient creates a new mock instance. +func NewMockClient(ctrl *gomock.Controller) *MockClient { + mock := &MockClient{ctrl: ctrl} + mock.recorder = &MockClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClient) EXPECT() *MockClientMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockClient) Close() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Close") +} + +// Close indicates an expected call of Close. +func (mr *MockClientMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockClient)(nil).Close)) +} + +// CreateAdminSubscription mocks base method. +func (m *MockClient) CreateAdminSubscription(ctx context.Context, in *notification.CreateAdminSubscriptionRequest, opts ...grpc.CallOption) (*notification.CreateAdminSubscriptionResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateAdminSubscription", varargs...) + ret0, _ := ret[0].(*notification.CreateAdminSubscriptionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateAdminSubscription indicates an expected call of CreateAdminSubscription. +func (mr *MockClientMockRecorder) CreateAdminSubscription(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAdminSubscription", reflect.TypeOf((*MockClient)(nil).CreateAdminSubscription), varargs...) +} + +// CreateSubscription mocks base method. +func (m *MockClient) CreateSubscription(ctx context.Context, in *notification.CreateSubscriptionRequest, opts ...grpc.CallOption) (*notification.CreateSubscriptionResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateSubscription", varargs...) + ret0, _ := ret[0].(*notification.CreateSubscriptionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateSubscription indicates an expected call of CreateSubscription. +func (mr *MockClientMockRecorder) CreateSubscription(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSubscription", reflect.TypeOf((*MockClient)(nil).CreateSubscription), varargs...) +} + +// DeleteAdminSubscription mocks base method. +func (m *MockClient) DeleteAdminSubscription(ctx context.Context, in *notification.DeleteAdminSubscriptionRequest, opts ...grpc.CallOption) (*notification.DeleteAdminSubscriptionResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteAdminSubscription", varargs...) + ret0, _ := ret[0].(*notification.DeleteAdminSubscriptionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteAdminSubscription indicates an expected call of DeleteAdminSubscription. +func (mr *MockClientMockRecorder) DeleteAdminSubscription(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAdminSubscription", reflect.TypeOf((*MockClient)(nil).DeleteAdminSubscription), varargs...) +} + +// DeleteSubscription mocks base method. +func (m *MockClient) DeleteSubscription(ctx context.Context, in *notification.DeleteSubscriptionRequest, opts ...grpc.CallOption) (*notification.DeleteSubscriptionResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteSubscription", varargs...) + ret0, _ := ret[0].(*notification.DeleteSubscriptionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteSubscription indicates an expected call of DeleteSubscription. +func (mr *MockClientMockRecorder) DeleteSubscription(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSubscription", reflect.TypeOf((*MockClient)(nil).DeleteSubscription), varargs...) +} + +// DisableAdminSubscription mocks base method. +func (m *MockClient) DisableAdminSubscription(ctx context.Context, in *notification.DisableAdminSubscriptionRequest, opts ...grpc.CallOption) (*notification.DisableAdminSubscriptionResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DisableAdminSubscription", varargs...) + ret0, _ := ret[0].(*notification.DisableAdminSubscriptionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DisableAdminSubscription indicates an expected call of DisableAdminSubscription. +func (mr *MockClientMockRecorder) DisableAdminSubscription(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableAdminSubscription", reflect.TypeOf((*MockClient)(nil).DisableAdminSubscription), varargs...) +} + +// DisableSubscription mocks base method. +func (m *MockClient) DisableSubscription(ctx context.Context, in *notification.DisableSubscriptionRequest, opts ...grpc.CallOption) (*notification.DisableSubscriptionResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DisableSubscription", varargs...) + ret0, _ := ret[0].(*notification.DisableSubscriptionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DisableSubscription indicates an expected call of DisableSubscription. +func (mr *MockClientMockRecorder) DisableSubscription(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableSubscription", reflect.TypeOf((*MockClient)(nil).DisableSubscription), varargs...) +} + +// EnableAdminSubscription mocks base method. +func (m *MockClient) EnableAdminSubscription(ctx context.Context, in *notification.EnableAdminSubscriptionRequest, opts ...grpc.CallOption) (*notification.EnableAdminSubscriptionResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "EnableAdminSubscription", varargs...) + ret0, _ := ret[0].(*notification.EnableAdminSubscriptionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// EnableAdminSubscription indicates an expected call of EnableAdminSubscription. +func (mr *MockClientMockRecorder) EnableAdminSubscription(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableAdminSubscription", reflect.TypeOf((*MockClient)(nil).EnableAdminSubscription), varargs...) +} + +// EnableSubscription mocks base method. +func (m *MockClient) EnableSubscription(ctx context.Context, in *notification.EnableSubscriptionRequest, opts ...grpc.CallOption) (*notification.EnableSubscriptionResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "EnableSubscription", varargs...) + ret0, _ := ret[0].(*notification.EnableSubscriptionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// EnableSubscription indicates an expected call of EnableSubscription. +func (mr *MockClientMockRecorder) EnableSubscription(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableSubscription", reflect.TypeOf((*MockClient)(nil).EnableSubscription), varargs...) +} + +// GetAdminSubscription mocks base method. +func (m *MockClient) GetAdminSubscription(ctx context.Context, in *notification.GetAdminSubscriptionRequest, opts ...grpc.CallOption) (*notification.GetAdminSubscriptionResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetAdminSubscription", varargs...) + ret0, _ := ret[0].(*notification.GetAdminSubscriptionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAdminSubscription indicates an expected call of GetAdminSubscription. +func (mr *MockClientMockRecorder) GetAdminSubscription(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAdminSubscription", reflect.TypeOf((*MockClient)(nil).GetAdminSubscription), varargs...) +} + +// GetSubscription mocks base method. +func (m *MockClient) GetSubscription(ctx context.Context, in *notification.GetSubscriptionRequest, opts ...grpc.CallOption) (*notification.GetSubscriptionResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetSubscription", varargs...) + ret0, _ := ret[0].(*notification.GetSubscriptionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSubscription indicates an expected call of GetSubscription. +func (mr *MockClientMockRecorder) GetSubscription(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubscription", reflect.TypeOf((*MockClient)(nil).GetSubscription), varargs...) +} + +// ListAdminSubscriptions mocks base method. +func (m *MockClient) ListAdminSubscriptions(ctx context.Context, in *notification.ListAdminSubscriptionsRequest, opts ...grpc.CallOption) (*notification.ListAdminSubscriptionsResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListAdminSubscriptions", varargs...) + ret0, _ := ret[0].(*notification.ListAdminSubscriptionsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListAdminSubscriptions indicates an expected call of ListAdminSubscriptions. +func (mr *MockClientMockRecorder) ListAdminSubscriptions(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAdminSubscriptions", reflect.TypeOf((*MockClient)(nil).ListAdminSubscriptions), varargs...) +} + +// ListEnabledAdminSubscriptions mocks base method. +func (m *MockClient) ListEnabledAdminSubscriptions(ctx context.Context, in *notification.ListEnabledAdminSubscriptionsRequest, opts ...grpc.CallOption) (*notification.ListEnabledAdminSubscriptionsResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListEnabledAdminSubscriptions", varargs...) + ret0, _ := ret[0].(*notification.ListEnabledAdminSubscriptionsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListEnabledAdminSubscriptions indicates an expected call of ListEnabledAdminSubscriptions. +func (mr *MockClientMockRecorder) ListEnabledAdminSubscriptions(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListEnabledAdminSubscriptions", reflect.TypeOf((*MockClient)(nil).ListEnabledAdminSubscriptions), varargs...) +} + +// ListEnabledSubscriptions mocks base method. +func (m *MockClient) ListEnabledSubscriptions(ctx context.Context, in *notification.ListEnabledSubscriptionsRequest, opts ...grpc.CallOption) (*notification.ListEnabledSubscriptionsResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListEnabledSubscriptions", varargs...) + ret0, _ := ret[0].(*notification.ListEnabledSubscriptionsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListEnabledSubscriptions indicates an expected call of ListEnabledSubscriptions. +func (mr *MockClientMockRecorder) ListEnabledSubscriptions(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListEnabledSubscriptions", reflect.TypeOf((*MockClient)(nil).ListEnabledSubscriptions), varargs...) +} + +// ListSubscriptions mocks base method. +func (m *MockClient) ListSubscriptions(ctx context.Context, in *notification.ListSubscriptionsRequest, opts ...grpc.CallOption) (*notification.ListSubscriptionsResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListSubscriptions", varargs...) + ret0, _ := ret[0].(*notification.ListSubscriptionsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListSubscriptions indicates an expected call of ListSubscriptions. +func (mr *MockClientMockRecorder) ListSubscriptions(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSubscriptions", reflect.TypeOf((*MockClient)(nil).ListSubscriptions), varargs...) +} + +// UpdateAdminSubscription mocks base method. +func (m *MockClient) UpdateAdminSubscription(ctx context.Context, in *notification.UpdateAdminSubscriptionRequest, opts ...grpc.CallOption) (*notification.UpdateAdminSubscriptionResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateAdminSubscription", varargs...) + ret0, _ := ret[0].(*notification.UpdateAdminSubscriptionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateAdminSubscription indicates an expected call of UpdateAdminSubscription. +func (mr *MockClientMockRecorder) UpdateAdminSubscription(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAdminSubscription", reflect.TypeOf((*MockClient)(nil).UpdateAdminSubscription), varargs...) +} + +// UpdateSubscription mocks base method. +func (m *MockClient) UpdateSubscription(ctx context.Context, in *notification.UpdateSubscriptionRequest, opts ...grpc.CallOption) (*notification.UpdateSubscriptionResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateSubscription", varargs...) + ret0, _ := ret[0].(*notification.UpdateSubscriptionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateSubscription indicates an expected call of UpdateSubscription. +func (mr *MockClientMockRecorder) UpdateSubscription(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSubscription", reflect.TypeOf((*MockClient)(nil).UpdateSubscription), varargs...) +} diff --git a/pkg/notification/cmd/sender/BUILD.bazel b/pkg/notification/cmd/sender/BUILD.bazel new file mode 100644 index 000000000..737cd0056 --- /dev/null +++ b/pkg/notification/cmd/sender/BUILD.bazel @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["sender.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/notification/cmd/sender", + visibility = ["//visibility:public"], + deps = [ + "//pkg/cli:go_default_library", + "//pkg/environment/client:go_default_library", + "//pkg/eventcounter/client:go_default_library", + "//pkg/experiment/client:go_default_library", + "//pkg/feature/client:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/notification/client:go_default_library", + "//pkg/notification/sender:go_default_library", + "//pkg/notification/sender/informer/batch:go_default_library", + "//pkg/notification/sender/informer/batch/job:go_default_library", + "//pkg/notification/sender/informer/domainevent:go_default_library", + "//pkg/notification/sender/notifier:go_default_library", + "//pkg/pubsub:go_default_library", + "//pkg/pubsub/puller:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/rpc/client:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/notification/cmd/sender/sender.go b/pkg/notification/cmd/sender/sender.go new file mode 100644 index 000000000..f79eae98b --- /dev/null +++ b/pkg/notification/cmd/sender/sender.go @@ -0,0 +1,343 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sender + +import ( + "context" + "os" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + "github.com/bucketeer-io/bucketeer/pkg/cli" + environmentclient "github.com/bucketeer-io/bucketeer/pkg/environment/client" + ecclient "github.com/bucketeer-io/bucketeer/pkg/eventcounter/client" + experimentclient "github.com/bucketeer-io/bucketeer/pkg/experiment/client" + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + notificationclient "github.com/bucketeer-io/bucketeer/pkg/notification/client" + notificationsender "github.com/bucketeer-io/bucketeer/pkg/notification/sender" + batchinformer "github.com/bucketeer-io/bucketeer/pkg/notification/sender/informer/batch" + "github.com/bucketeer-io/bucketeer/pkg/notification/sender/informer/batch/job" + domaineventinformer "github.com/bucketeer-io/bucketeer/pkg/notification/sender/informer/domainevent" + "github.com/bucketeer-io/bucketeer/pkg/notification/sender/notifier" + "github.com/bucketeer-io/bucketeer/pkg/pubsub" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/rpc/client" +) + +const command = "sender" + +type sender struct { + *kingpin.CmdClause + port *int + project *string + domainTopic *string + domainSubscription *string + notificationService *string + environmentService *string + eventCounterService *string + featureService *string + experimentService *string + scheduleFeatureStaleWatcher *string + scheduleExperimentRunningWatcher *string + scheduleMAUCountWatcher *string + maxMPS *int + numWorkers *int + certPath *string + keyPath *string + serviceTokenPath *string + pullerNumGoroutines *int + pullerMaxOutstandingMessages *int + pullerMaxOutstandingBytes *int + webURL *string +} + +func RegisterCommand(r cli.CommandRegistry, p cli.ParentCommand) cli.Command { + cmd := p.Command(command, "Start the Notification Sender") + sender := &sender{ + CmdClause: cmd, + port: cmd.Flag("port", "Port to bind to.").Default("9090").Int(), + project: cmd.Flag("project", "Google Cloud project name.").Required().String(), + domainTopic: cmd.Flag("domain-topic", "Google PubSub topic name of incoming domain events.").String(), + domainSubscription: cmd.Flag( + "domain-subscription", + "Google PubSub subscription name of incoming domain event.", + ).String(), + notificationService: cmd.Flag( + "notification-service", + "bucketeer-notification-service address.", + ).Default("notification:9090").String(), + environmentService: cmd.Flag( + "environment-service", + "bucketeer-environment-service address.", + ).Default("environment:9090").String(), + eventCounterService: cmd.Flag( + "event-counter-service", + "bucketeer-event-counter-service address.", + ).Default("event-counter:9090").String(), + featureService: cmd.Flag( + "feature-service", + "bucketeer-feature-service address.", + ).Default("feature:9090").String(), + experimentService: cmd.Flag( + "experiment-service", + "bucketeer-experiment-service address.", + ).Default("experiment:9090").String(), + scheduleFeatureStaleWatcher: cmd.Flag( + "schedule-feature-stale-watcher", + "Cron format schedule for feature stale watcher.", + ).Default("0 0 1 * * MON").String(), // on every Monday 10:00am JST + scheduleExperimentRunningWatcher: cmd.Flag( + "schedule-experiment-running-watcher", + "Cron format schedule for experiment running watcher.", + ).Default("0 0 1 * * *").String(), // on every day 10:00am JST + scheduleMAUCountWatcher: cmd.Flag( + "schedule-mau-count-watcher", + "Cron format schedule for mau count watcher.", + ).Default("0 0 1 1 * *").String(), // on every month 1st 10:00am JST + maxMPS: cmd.Flag("max-mps", "Maximum messages should be handled in a second.").Default("5000").Int(), + numWorkers: cmd.Flag("num-workers", "Number of workers.").Default("1").Int(), + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + keyPath: cmd.Flag("key", "Path to TLS key.").Required().String(), + serviceTokenPath: cmd.Flag("service-token", "Path to service token.").Required().String(), + pullerNumGoroutines: cmd.Flag( + "puller-num-goroutines", + "Number of goroutines will be spawned to pull messages.", + ).Int(), + pullerMaxOutstandingMessages: cmd.Flag( + "puller-max-outstanding-messages", + "Maximum number of unprocessed messages.", + ).Int(), + pullerMaxOutstandingBytes: cmd.Flag("puller-max-outstanding-bytes", "Maximum size of unprocessed messages.").Int(), + webURL: cmd.Flag("web-url", "Web console URL.").Required().String(), + } + r.RegisterCommand(sender) + return sender +} + +func (s *sender) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + registerer := metrics.DefaultRegisterer() + + *s.serviceTokenPath = s.insertTelepresenceMoutRoot(*s.serviceTokenPath) + *s.keyPath = s.insertTelepresenceMoutRoot(*s.keyPath) + *s.certPath = s.insertTelepresenceMoutRoot(*s.certPath) + + domainEventPuller, err := s.createPuller(ctx, registerer, logger) + if err != nil { + return err + } + + creds, err := client.NewPerRPCCredentials(*s.serviceTokenPath) + if err != nil { + return err + } + + notificationClient, err := notificationclient.NewClient(*s.notificationService, *s.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer notificationClient.Close() + + environmentClient, err := environmentclient.NewClient(*s.environmentService, *s.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer environmentClient.Close() + + eventCounterClient, err := ecclient.NewClient(*s.eventCounterService, *s.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer eventCounterClient.Close() + + featureClient, err := featureclient.NewClient(*s.featureService, *s.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer featureClient.Close() + + experimentClient, err := experimentclient.NewClient(*s.experimentService, *s.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer experimentClient.Close() + + slackNotifier := notifier.NewSlackNotifier(*s.webURL) + + notificationSender := notificationsender.NewSender( + notificationClient, + []notifier.Notifier{slackNotifier}, + notificationsender.WithLogger(logger), + ) + + domainEventInformer := domaineventinformer.NewDomainEventInformer( + environmentClient, + domainEventPuller, + notificationSender, + domaineventinformer.WithMetrics(registerer), + domaineventinformer.WithLogger(logger), + ) + defer domainEventInformer.Stop() + go domainEventInformer.Run() // nolint:errcheck + + jobs := s.createJobs( + environmentClient, + featureClient, + experimentClient, + eventCounterClient, + notificationSender, + logger, + ) + batchInformer, err := batchinformer.NewJobInformer( + jobs, + batchinformer.WithMetrics(registerer), + batchinformer.WithLogger(logger), + ) + if err != nil { + return err + } + defer batchInformer.Stop() + go batchInformer.Run() // nolint:errcheck + + healthChecker := health.NewGrpcChecker( + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + health.WithCheck("domain_event_informer", domainEventInformer.Check), + health.WithCheck("batch_informer", batchInformer.Check), + ) + go healthChecker.Run(ctx) + + server := rpc.NewServer(healthChecker, *s.certPath, *s.keyPath, + rpc.WithPort(*s.port), + rpc.WithMetrics(registerer), + rpc.WithLogger(logger), + rpc.WithHandler("/health", healthChecker), + ) + defer server.Stop(10 * time.Second) + go server.Run() + + <-ctx.Done() + return nil +} + +func (s *sender) createPuller( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (puller.Puller, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + client, err := pubsub.NewClient( + ctx, + *s.project, + pubsub.WithMetrics(registerer), + pubsub.WithLogger(logger), + ) + if err != nil { + return nil, err + } + puller, err := client.CreatePuller(*s.domainSubscription, *s.domainTopic, + pubsub.WithNumGoroutines(*s.pullerNumGoroutines), + pubsub.WithMaxOutstandingMessages(*s.pullerMaxOutstandingMessages), + pubsub.WithMaxOutstandingBytes(*s.pullerMaxOutstandingBytes), + ) + if err != nil { + return nil, err + } + return puller, nil +} + +func (s *sender) createJobs( + environmentClient environmentclient.Client, + featureClient featureclient.Client, + experimentClient experimentclient.Client, + eventCounterClient ecclient.Client, + notificationSender notificationsender.Sender, + logger *zap.Logger) []*batchinformer.Job { + return []*batchinformer.Job{ + { + Cron: *s.scheduleFeatureStaleWatcher, + Name: "feature_stale_watcher", + Job: job.NewFeatureWatcher( + environmentClient, + featureClient, + notificationSender, + job.WithTimeout(1*time.Minute), + job.WithLogger(logger)), + }, + { + Cron: *s.scheduleExperimentRunningWatcher, + Name: "experiment_running_watcher", + Job: job.NewExperimentRunningWatcher( + environmentClient, + experimentClient, + notificationSender, + job.WithTimeout(1*time.Minute), + job.WithLogger(logger)), + }, + { + Cron: *s.scheduleMAUCountWatcher, + Name: "mau_count", + Job: job.NewMAUCountWatcher( + environmentClient, + eventCounterClient, + notificationSender, + job.WithTimeout(60*time.Minute), + job.WithLogger(logger)), + }, + } +} + +func (s *sender) insertTelepresenceMoutRoot(path string) string { + volumeRoot := os.Getenv("TELEPRESENCE_ROOT") + if volumeRoot == "" { + return path + } + return volumeRoot + path +} diff --git a/pkg/notification/cmd/server/BUILD.bazel b/pkg/notification/cmd/server/BUILD.bazel new file mode 100644 index 000000000..b5db6c12a --- /dev/null +++ b/pkg/notification/cmd/server/BUILD.bazel @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["server.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/notification/cmd/server", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/client:go_default_library", + "//pkg/cli:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/notification/api:go_default_library", + "//pkg/pubsub:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/rpc/client:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/token:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/notification/cmd/server/server.go b/pkg/notification/cmd/server/server.go new file mode 100644 index 000000000..9049fb485 --- /dev/null +++ b/pkg/notification/cmd/server/server.go @@ -0,0 +1,205 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "os" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/notification/api" + "github.com/bucketeer-io/bucketeer/pkg/pubsub" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/token" +) + +const command = "server" + +type server struct { + *kingpin.CmdClause + port *int + project *string + mysqlUser *string + mysqlPass *string + mysqlHost *string + mysqlPort *int + mysqlDBName *string + domainEventTopic *string + accountService *string + certPath *string + keyPath *string + serviceTokenPath *string + oauthKeyPath *string + oauthClientID *string + oauthIssuer *string +} + +func RegisterCommand(r cli.CommandRegistry, p cli.ParentCommand) cli.Command { + cmd := p.Command(command, "Start the gRPC server") + server := &server{ + CmdClause: cmd, + port: cmd.Flag("port", "Port to bind to.").Default("9090").Int(), + project: cmd.Flag("project", "Google Cloud project name.").Required().String(), + mysqlUser: cmd.Flag("mysql-user", "MySQL user.").Required().String(), + mysqlPass: cmd.Flag("mysql-pass", "MySQL password.").Required().String(), + mysqlHost: cmd.Flag("mysql-host", "MySQL host.").Required().String(), + mysqlPort: cmd.Flag("mysql-port", "MySQL port.").Required().Int(), + mysqlDBName: cmd.Flag("mysql-db-name", "MySQL database name.").Required().String(), + domainEventTopic: cmd.Flag( + "domain-event-topic", + "PubSub topic to publish domain events.", + ).Required().String(), + accountService: cmd.Flag( + "account-service", + "bucketeer-account-service address.", + ).Default("account:9090").String(), + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + keyPath: cmd.Flag("key", "Path to TLS key.").Required().String(), + serviceTokenPath: cmd.Flag("service-token", "Path to service token.").Required().String(), + oauthKeyPath: cmd.Flag("oauth-key", "Path to public key used to verify oauth token.").Required().String(), + oauthClientID: cmd.Flag("oauth-client-id", "The oauth clientID registered at dex.").Required().String(), + oauthIssuer: cmd.Flag("oauth-issuer", "The url of dex issuer.").Required().String(), + } + r.RegisterCommand(server) + return server +} + +func (s *server) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + registerer := metrics.DefaultRegisterer() + + *s.serviceTokenPath = s.insertTelepresenceMoutRoot(*s.serviceTokenPath) + *s.oauthKeyPath = s.insertTelepresenceMoutRoot(*s.oauthKeyPath) + *s.keyPath = s.insertTelepresenceMoutRoot(*s.keyPath) + *s.certPath = s.insertTelepresenceMoutRoot(*s.certPath) + + mysqlClient, err := s.createMySQLClient(ctx, registerer, logger) + if err != nil { + return err + } + defer mysqlClient.Close() + + domainEventPublisher, err := s.createDomainEventPublisher(ctx, registerer, logger) + if err != nil { + return err + } + defer domainEventPublisher.Stop() + + creds, err := client.NewPerRPCCredentials(*s.serviceTokenPath) + if err != nil { + return err + } + + accountClient, err := accountclient.NewClient(*s.accountService, *s.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer accountClient.Close() + + service := api.NewNotificationService( + mysqlClient, + accountClient, + domainEventPublisher, + api.WithLogger(logger), + ) + + verifier, err := token.NewVerifier(*s.oauthKeyPath, *s.oauthIssuer, *s.oauthClientID) + if err != nil { + return err + } + + healthChecker := health.NewGrpcChecker( + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + ) + go healthChecker.Run(ctx) + + server := rpc.NewServer(service, *s.certPath, *s.keyPath, + rpc.WithPort(*s.port), + rpc.WithVerifier(verifier), + rpc.WithMetrics(registerer), + rpc.WithLogger(logger), + rpc.WithService(healthChecker), + rpc.WithHandler("/health", healthChecker), + ) + defer server.Stop(10 * time.Second) + go server.Run() + + <-ctx.Done() + return nil +} + +func (s *server) createMySQLClient( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (mysql.Client, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + return mysql.NewClient( + ctx, + *s.mysqlUser, *s.mysqlPass, *s.mysqlHost, + *s.mysqlPort, + *s.mysqlDBName, + mysql.WithLogger(logger), + mysql.WithMetrics(registerer), + ) +} + +func (s *server) createDomainEventPublisher( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (publisher.Publisher, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + client, err := pubsub.NewClient( + ctx, + *s.project, + pubsub.WithMetrics(registerer), + pubsub.WithLogger(logger), + ) + if err != nil { + return nil, err + } + domainPublisher, err := client.CreatePublisher(*s.domainEventTopic) + if err != nil { + return nil, err + } + return domainPublisher, nil +} + +func (s *server) insertTelepresenceMoutRoot(path string) string { + volumeRoot := os.Getenv("TELEPRESENCE_ROOT") + if volumeRoot == "" { + return path + } + return volumeRoot + path +} diff --git a/pkg/notification/command/BUILD.bazel b/pkg/notification/command/BUILD.bazel new file mode 100644 index 000000000..e686709ce --- /dev/null +++ b/pkg/notification/command/BUILD.bazel @@ -0,0 +1,36 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "admin_subscription.go", + "command.go", + "subscription.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/notification/command", + visibility = ["//visibility:public"], + deps = [ + "//pkg/domainevent/domain:go_default_library", + "//pkg/notification/domain:go_default_library", + "//proto/event/domain:go_default_library", + "//proto/notification:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "admin_subscription_test.go", + "subscription_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/notification/domain:go_default_library", + "//proto/account:go_default_library", + "//proto/event/domain:go_default_library", + "//proto/notification:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + ], +) diff --git a/pkg/notification/command/admin_subscription.go b/pkg/notification/command/admin_subscription.go new file mode 100644 index 000000000..8ba97fe87 --- /dev/null +++ b/pkg/notification/command/admin_subscription.go @@ -0,0 +1,160 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + + pb "github.com/golang/protobuf/proto" // nolint:staticcheck + + domainevent "github.com/bucketeer-io/bucketeer/pkg/domainevent/domain" + "github.com/bucketeer-io/bucketeer/pkg/notification/domain" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + proto "github.com/bucketeer-io/bucketeer/proto/notification" +) + +type adminSubscriptionCommandHandler struct { + editor *eventproto.Editor + subscription *domain.Subscription + events []*eventproto.Event +} + +func NewAdminSubscriptionCommandHandler( + editor *eventproto.Editor, + subscription *domain.Subscription) Handler { + return &adminSubscriptionCommandHandler{ + editor: editor, + subscription: subscription, + events: []*eventproto.Event{}, + } +} + +// for unit test +func NewEmptyAdminSubscriptionCommandHandler() Handler { + return &adminSubscriptionCommandHandler{} +} + +func (h *adminSubscriptionCommandHandler) Handle(ctx context.Context, cmd Command) error { + switch c := cmd.(type) { + case *proto.CreateAdminSubscriptionCommand: + return h.create(ctx, c) + case *proto.DeleteAdminSubscriptionCommand: + return h.delete(ctx, c) + case *proto.AddAdminSubscriptionSourceTypesCommand: + return h.addSourceTypes(ctx, c) + case *proto.DeleteAdminSubscriptionSourceTypesCommand: + return h.deleteSourceTypes(ctx, c) + case *proto.EnableAdminSubscriptionCommand: + return h.enable(ctx, c) + case *proto.DisableAdminSubscriptionCommand: + return h.disable(ctx, c) + case *proto.RenameAdminSubscriptionCommand: + return h.rename(ctx, c) + } + return errUnknownCommand +} + +func (h *adminSubscriptionCommandHandler) create(ctx context.Context, cmd *proto.CreateAdminSubscriptionCommand) error { + return h.createEvent(ctx, eventproto.Event_ADMIN_SUBSCRIPTION_CREATED, &eventproto.AdminSubscriptionCreatedEvent{ + SourceTypes: h.subscription.SourceTypes, + Recipient: h.subscription.Recipient, + Name: h.subscription.Name, + }) +} + +func (h *adminSubscriptionCommandHandler) delete(ctx context.Context, cmd *proto.DeleteAdminSubscriptionCommand) error { + return h.createEvent(ctx, eventproto.Event_ADMIN_SUBSCRIPTION_DELETED, &eventproto.AdminSubscriptionDeletedEvent{}) +} + +func (h *adminSubscriptionCommandHandler) addSourceTypes( + ctx context.Context, + cmd *proto.AddAdminSubscriptionSourceTypesCommand, +) error { + err := h.subscription.AddSourceTypes(cmd.SourceTypes) + if err != nil { + return err + } + return h.createEvent( + ctx, + eventproto.Event_ADMIN_SUBSCRIPTION_SOURCE_TYPE_ADDED, + &eventproto.AdminSubscriptionSourceTypesAddedEvent{ + SourceTypes: cmd.SourceTypes, + }, + ) +} + +func (h *adminSubscriptionCommandHandler) deleteSourceTypes( + ctx context.Context, + cmd *proto.DeleteAdminSubscriptionSourceTypesCommand, +) error { + err := h.subscription.DeleteSourceTypes(cmd.SourceTypes) + if err != nil { + return err + } + return h.createEvent( + ctx, + eventproto.Event_ADMIN_SUBSCRIPTION_SOURCE_TYPE_DELETED, + &eventproto.AdminSubscriptionSourceTypesDeletedEvent{ + SourceTypes: cmd.SourceTypes, + }, + ) +} + +func (h *adminSubscriptionCommandHandler) enable(ctx context.Context, cmd *proto.EnableAdminSubscriptionCommand) error { + if err := h.subscription.Enable(); err != nil { + return err + } + return h.createEvent(ctx, eventproto.Event_ADMIN_SUBSCRIPTION_ENABLED, &eventproto.AdminSubscriptionEnabledEvent{}) +} + +func (h *adminSubscriptionCommandHandler) disable( + ctx context.Context, + cmd *proto.DisableAdminSubscriptionCommand, +) error { + err := h.subscription.Disable() + if err != nil { + return err + } + return h.createEvent(ctx, eventproto.Event_ADMIN_SUBSCRIPTION_DISABLED, &eventproto.AdminSubscriptionDisabledEvent{}) +} + +func (h *adminSubscriptionCommandHandler) rename(ctx context.Context, cmd *proto.RenameAdminSubscriptionCommand) error { + err := h.subscription.Rename(cmd.Name) + if err != nil { + return err + } + return h.createEvent( + ctx, + eventproto.Event_ADMIN_SUBSCRIPTION_RENAMED, + &eventproto.AdminSubscriptionRenamedEvent{Name: cmd.Name}, + ) +} + +func (h *adminSubscriptionCommandHandler) createEvent( + ctx context.Context, + eventType eventproto.Event_Type, + event pb.Message, +) error { + e, err := domainevent.NewAdminEvent(h.editor, eventproto.Event_ADMIN_SUBSCRIPTION, h.subscription.Id, eventType, event) + if err != nil { + return err + } + h.events = append(h.events, e) + return nil +} + +func (h *adminSubscriptionCommandHandler) Events() []*eventproto.Event { + return h.events +} diff --git a/pkg/notification/command/admin_subscription_test.go b/pkg/notification/command/admin_subscription_test.go new file mode 100644 index 000000000..e7ad55b93 --- /dev/null +++ b/pkg/notification/command/admin_subscription_test.go @@ -0,0 +1,193 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/notification/domain" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + proto "github.com/bucketeer-io/bucketeer/proto/notification" +) + +func TestAdminCreate(t *testing.T) { + patterns := []*struct { + input *proto.CreateAdminSubscriptionCommand + expected error + }{ + { + input: &proto.CreateAdminSubscriptionCommand{ + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + proto.Subscription_DOMAIN_EVENT_ADMIN_ACCOUNT, + }, + Recipient: &proto.Recipient{ + Type: proto.Recipient_SlackChannel, + SlackChannelRecipient: &proto.SlackChannelRecipient{WebhookUrl: "url"}, + }, + }, + expected: nil, + }, + } + for _, p := range patterns { + s := newSubscription(t, false) + h := newAdminSubscriptionCommandHandler(t, s) + err := h.Handle(context.Background(), p.input) + assert.Equal(t, p.expected, err) + assert.Equal(t, 1, len(h.Events())) + } +} + +func TestAdminDelete(t *testing.T) { + patterns := []*struct { + input *proto.DeleteAdminSubscriptionCommand + expected error + }{ + { + input: &proto.DeleteAdminSubscriptionCommand{}, + expected: nil, + }, + } + for _, p := range patterns { + s := newSubscription(t, false) + h := newAdminSubscriptionCommandHandler(t, s) + err := h.Handle(context.Background(), p.input) + assert.Equal(t, p.expected, err) + assert.Equal(t, 1, len(h.Events())) + } +} + +func TestAdminAddSourceTypes(t *testing.T) { + patterns := []*struct { + input *proto.AddAdminSubscriptionSourceTypesCommand + expected error + }{ + { + input: &proto.AddAdminSubscriptionSourceTypesCommand{ + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_FEATURE, + }, + }, + expected: nil, + }, + } + for _, p := range patterns { + s := newSubscription(t, false) + h := newAdminSubscriptionCommandHandler(t, s) + err := h.Handle(context.Background(), p.input) + assert.Equal(t, p.expected, err) + assert.Equal(t, 1, len(h.Events())) + } +} + +func TestAdminDeleteSourceTypes(t *testing.T) { + patterns := []*struct { + input *proto.DeleteAdminSubscriptionSourceTypesCommand + expected error + }{ + { + input: &proto.DeleteAdminSubscriptionSourceTypesCommand{ + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ADMIN_ACCOUNT, + }, + }, + expected: nil, + }, + } + for _, p := range patterns { + s := newSubscription(t, false) + h := newAdminSubscriptionCommandHandler(t, s) + err := h.Handle(context.Background(), p.input) + assert.Equal(t, p.expected, err) + assert.Equal(t, 1, len(h.Events())) + } +} + +func TestAdminEnable(t *testing.T) { + patterns := []*struct { + originDisabled bool + input *proto.EnableAdminSubscriptionCommand + expected error + }{ + { + originDisabled: true, + input: &proto.EnableAdminSubscriptionCommand{}, + expected: nil, + }, + } + for _, p := range patterns { + s := newSubscription(t, p.originDisabled) + h := newAdminSubscriptionCommandHandler(t, s) + err := h.Handle(context.Background(), p.input) + assert.Equal(t, p.expected, err) + assert.Equal(t, 1, len(h.Events())) + } +} + +func TestAdminDisable(t *testing.T) { + patterns := []*struct { + originDisabled bool + input *proto.DisableAdminSubscriptionCommand + expected error + }{ + { + originDisabled: false, + input: &proto.DisableAdminSubscriptionCommand{}, + expected: nil, + }, + } + for _, p := range patterns { + s := newSubscription(t, p.originDisabled) + h := newAdminSubscriptionCommandHandler(t, s) + err := h.Handle(context.Background(), p.input) + assert.Equal(t, p.expected, err) + assert.Equal(t, 1, len(h.Events())) + } +} + +func TestAdminRename(t *testing.T) { + patterns := []*struct { + input *proto.RenameAdminSubscriptionCommand + expected error + }{ + { + input: &proto.RenameAdminSubscriptionCommand{Name: "renamed"}, + expected: nil, + }, + } + for _, p := range patterns { + s := newSubscription(t, false) + h := newAdminSubscriptionCommandHandler(t, s) + err := h.Handle(context.Background(), p.input) + assert.Equal(t, p.expected, err) + assert.Equal(t, 1, len(h.Events())) + } +} + +func newAdminSubscriptionCommandHandler(t *testing.T, subscription *domain.Subscription) Handler { + t.Helper() + return NewAdminSubscriptionCommandHandler( + &eventproto.Editor{ + Email: "email", + Role: accountproto.Account_OWNER, + IsAdmin: true, + }, + subscription, + ) +} diff --git a/pkg/notification/command/command.go b/pkg/notification/command/command.go new file mode 100644 index 000000000..35d9c209f --- /dev/null +++ b/pkg/notification/command/command.go @@ -0,0 +1,33 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + "errors" + + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +var ( + errUnknownCommand = errors.New("command: unknown command") +) + +type Command interface{} + +type Handler interface { + Handle(ctx context.Context, cmd Command) error + Events() []*eventproto.Event +} diff --git a/pkg/notification/command/subscription.go b/pkg/notification/command/subscription.go new file mode 100644 index 000000000..a54c3c0b3 --- /dev/null +++ b/pkg/notification/command/subscription.go @@ -0,0 +1,157 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + + pb "github.com/golang/protobuf/proto" // nolint:staticcheck + + domainevent "github.com/bucketeer-io/bucketeer/pkg/domainevent/domain" + "github.com/bucketeer-io/bucketeer/pkg/notification/domain" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + proto "github.com/bucketeer-io/bucketeer/proto/notification" +) + +type subscriptionCommandHandler struct { + editor *eventproto.Editor + subscription *domain.Subscription + environmentNamespace string + events []*eventproto.Event +} + +func NewSubscriptionCommandHandler( + editor *eventproto.Editor, + subscription *domain.Subscription, + environmentNamespace string) Handler { + return &subscriptionCommandHandler{ + editor: editor, + subscription: subscription, + environmentNamespace: environmentNamespace, + events: []*eventproto.Event{}, + } +} + +// for unit test +func NewEmptySubscriptionCommandHandler() Handler { + return &subscriptionCommandHandler{} +} + +func (h *subscriptionCommandHandler) Handle(ctx context.Context, cmd Command) error { + switch c := cmd.(type) { + case *proto.CreateSubscriptionCommand: + return h.create(ctx, c) + case *proto.DeleteSubscriptionCommand: + return h.delete(ctx, c) + case *proto.AddSourceTypesCommand: + return h.addSourceTypes(ctx, c) + case *proto.DeleteSourceTypesCommand: + return h.deleteSourceTypes(ctx, c) + case *proto.EnableSubscriptionCommand: + return h.enable(ctx, c) + case *proto.DisableSubscriptionCommand: + return h.disable(ctx, c) + case *proto.RenameSubscriptionCommand: + return h.rename(ctx, c) + } + return errUnknownCommand +} + +func (h *subscriptionCommandHandler) create(ctx context.Context, cmd *proto.CreateSubscriptionCommand) error { + return h.createEvent(ctx, eventproto.Event_SUBSCRIPTION_CREATED, &eventproto.SubscriptionCreatedEvent{ + SourceTypes: h.subscription.SourceTypes, + Recipient: h.subscription.Recipient, + Name: h.subscription.Name, + }) +} + +func (h *subscriptionCommandHandler) delete(ctx context.Context, cmd *proto.DeleteSubscriptionCommand) error { + return h.createEvent(ctx, eventproto.Event_SUBSCRIPTION_DELETED, &eventproto.SubscriptionDeletedEvent{}) +} + +func (h *subscriptionCommandHandler) addSourceTypes(ctx context.Context, cmd *proto.AddSourceTypesCommand) error { + err := h.subscription.AddSourceTypes(cmd.SourceTypes) + if err != nil { + return err + } + return h.createEvent( + ctx, + eventproto.Event_SUBSCRIPTION_SOURCE_TYPE_ADDED, + &eventproto.SubscriptionSourceTypesAddedEvent{ + SourceTypes: cmd.SourceTypes, + }, + ) +} + +func (h *subscriptionCommandHandler) deleteSourceTypes(ctx context.Context, cmd *proto.DeleteSourceTypesCommand) error { + err := h.subscription.DeleteSourceTypes(cmd.SourceTypes) + if err != nil { + return err + } + return h.createEvent( + ctx, + eventproto.Event_SUBSCRIPTION_SOURCE_TYPE_DELETED, + &eventproto.SubscriptionSourceTypesDeletedEvent{ + SourceTypes: cmd.SourceTypes, + }, + ) +} + +func (h *subscriptionCommandHandler) enable(ctx context.Context, cmd *proto.EnableSubscriptionCommand) error { + if err := h.subscription.Enable(); err != nil { + return err + } + return h.createEvent(ctx, eventproto.Event_SUBSCRIPTION_ENABLED, &eventproto.SubscriptionEnabledEvent{}) +} + +func (h *subscriptionCommandHandler) disable(ctx context.Context, cmd *proto.DisableSubscriptionCommand) error { + err := h.subscription.Disable() + if err != nil { + return err + } + return h.createEvent(ctx, eventproto.Event_SUBSCRIPTION_DISABLED, &eventproto.SubscriptionDisabledEvent{}) +} + +func (h *subscriptionCommandHandler) rename(ctx context.Context, cmd *proto.RenameSubscriptionCommand) error { + err := h.subscription.Rename(cmd.Name) + if err != nil { + return err + } + return h.createEvent(ctx, eventproto.Event_SUBSCRIPTION_RENAMED, &eventproto.SubscriptionRenamedEvent{Name: cmd.Name}) +} + +func (h *subscriptionCommandHandler) createEvent( + ctx context.Context, + eventType eventproto.Event_Type, + event pb.Message, +) error { + e, err := domainevent.NewEvent( + h.editor, + eventproto.Event_SUBSCRIPTION, + h.subscription.Id, + eventType, + event, + h.environmentNamespace, + ) + if err != nil { + return err + } + h.events = append(h.events, e) + return nil +} + +func (h *subscriptionCommandHandler) Events() []*eventproto.Event { + return h.events +} diff --git a/pkg/notification/command/subscription_test.go b/pkg/notification/command/subscription_test.go new file mode 100644 index 000000000..f462d6794 --- /dev/null +++ b/pkg/notification/command/subscription_test.go @@ -0,0 +1,209 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/bucketeer-io/bucketeer/pkg/notification/domain" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + proto "github.com/bucketeer-io/bucketeer/proto/notification" +) + +func TestCreate(t *testing.T) { + patterns := []*struct { + input *proto.CreateSubscriptionCommand + expected error + }{ + { + input: &proto.CreateSubscriptionCommand{ + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + proto.Subscription_DOMAIN_EVENT_ADMIN_ACCOUNT, + }, + Recipient: &proto.Recipient{ + Type: proto.Recipient_SlackChannel, + SlackChannelRecipient: &proto.SlackChannelRecipient{WebhookUrl: "url"}, + }, + }, + expected: nil, + }, + } + for _, p := range patterns { + s := newSubscription(t, false) + h := newSubscriptionCommandHandler(t, s) + err := h.Handle(context.Background(), p.input) + assert.Equal(t, p.expected, err) + assert.Equal(t, 1, len(h.Events())) + } +} + +func TestDelete(t *testing.T) { + patterns := []*struct { + input *proto.DeleteSubscriptionCommand + expected error + }{ + { + input: &proto.DeleteSubscriptionCommand{}, + expected: nil, + }, + } + for _, p := range patterns { + s := newSubscription(t, false) + h := newSubscriptionCommandHandler(t, s) + err := h.Handle(context.Background(), p.input) + assert.Equal(t, p.expected, err) + assert.Equal(t, 1, len(h.Events())) + } +} + +func TestAddSourceTypes(t *testing.T) { + patterns := []*struct { + input *proto.AddSourceTypesCommand + expected error + }{ + { + input: &proto.AddSourceTypesCommand{ + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_FEATURE, + }, + }, + expected: nil, + }, + } + for _, p := range patterns { + s := newSubscription(t, false) + h := newSubscriptionCommandHandler(t, s) + err := h.Handle(context.Background(), p.input) + assert.Equal(t, p.expected, err) + assert.Equal(t, 1, len(h.Events())) + } +} + +func TestDeleteSourceTypes(t *testing.T) { + patterns := []*struct { + input *proto.DeleteSourceTypesCommand + expected error + }{ + { + input: &proto.DeleteSourceTypesCommand{ + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ADMIN_ACCOUNT, + }, + }, + expected: nil, + }, + } + for _, p := range patterns { + s := newSubscription(t, false) + h := newSubscriptionCommandHandler(t, s) + err := h.Handle(context.Background(), p.input) + assert.Equal(t, p.expected, err) + assert.Equal(t, 1, len(h.Events())) + } +} + +func TestEnable(t *testing.T) { + patterns := []*struct { + originDisabled bool + input *proto.EnableSubscriptionCommand + expected error + }{ + { + originDisabled: true, + input: &proto.EnableSubscriptionCommand{}, + expected: nil, + }, + } + for _, p := range patterns { + s := newSubscription(t, p.originDisabled) + h := newSubscriptionCommandHandler(t, s) + err := h.Handle(context.Background(), p.input) + assert.Equal(t, p.expected, err) + assert.Equal(t, 1, len(h.Events())) + } +} + +func TestDisable(t *testing.T) { + patterns := []*struct { + originDisabled bool + input *proto.DisableSubscriptionCommand + expected error + }{ + { + originDisabled: false, + input: &proto.DisableSubscriptionCommand{}, + expected: nil, + }, + } + for _, p := range patterns { + s := newSubscription(t, p.originDisabled) + h := newSubscriptionCommandHandler(t, s) + err := h.Handle(context.Background(), p.input) + assert.Equal(t, p.expected, err) + assert.Equal(t, 1, len(h.Events())) + } +} + +func TestRename(t *testing.T) { + patterns := []*struct { + input *proto.RenameSubscriptionCommand + expected error + }{ + { + input: &proto.RenameSubscriptionCommand{Name: "renamed"}, + expected: nil, + }, + } + for _, p := range patterns { + s := newSubscription(t, false) + h := newSubscriptionCommandHandler(t, s) + err := h.Handle(context.Background(), p.input) + assert.Equal(t, p.expected, err) + assert.Equal(t, 1, len(h.Events())) + } +} + +func newSubscription(t *testing.T, disabled bool) *domain.Subscription { + sourceTypes := []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + proto.Subscription_DOMAIN_EVENT_ADMIN_ACCOUNT, + } + recipient := &proto.Recipient{ + Type: proto.Recipient_SlackChannel, + SlackChannelRecipient: &proto.SlackChannelRecipient{WebhookUrl: "url"}, + } + s, err := domain.NewSubscription("sname", sourceTypes, recipient) + s.Disabled = disabled + require.NoError(t, err) + return s +} + +func newSubscriptionCommandHandler(t *testing.T, subscription *domain.Subscription) Handler { + t.Helper() + return NewSubscriptionCommandHandler( + &eventproto.Editor{ + Email: "email", + Role: accountproto.Account_EDITOR, + }, + subscription, + "ns0", + ) +} diff --git a/pkg/notification/domain/BUILD.bazel b/pkg/notification/domain/BUILD.bazel new file mode 100644 index 000000000..82ec1c2ab --- /dev/null +++ b/pkg/notification/domain/BUILD.bazel @@ -0,0 +1,19 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["subscription.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/notification/domain", + visibility = ["//visibility:public"], + deps = ["//proto/notification:go_default_library"], +) + +go_test( + name = "go_default_test", + srcs = ["subscription_test.go"], + embed = [":go_default_library"], + deps = [ + "//proto/notification:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + ], +) diff --git a/pkg/notification/domain/subscription.go b/pkg/notification/domain/subscription.go new file mode 100644 index 000000000..6c9199c01 --- /dev/null +++ b/pkg/notification/domain/subscription.go @@ -0,0 +1,146 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "crypto/sha256" + "encoding/hex" + "errors" + "sort" + "time" + + proto "github.com/bucketeer-io/bucketeer/proto/notification" +) + +var ( + ErrUnknownRecipient = errors.New("subscription: unknown recipient") + ErrSourceTypesMustHaveAtLeastOne = errors.New("subscription: notification types must have at least one") + ErrSourceTypeNotFound = errors.New("subscription: notification not found") + ErrAlreadyEnabled = errors.New("subscription: already enabled") + ErrAlreadyDisabled = errors.New("subscription: already disabled") +) + +type Subscription struct { + *proto.Subscription +} + +func NewSubscription( + name string, + sourceTypes []proto.Subscription_SourceType, + recipient *proto.Recipient) (*Subscription, error) { + + sid, err := ID(recipient) + if err != nil { + return nil, err + } + now := time.Now().Unix() + s := &Subscription{&proto.Subscription{ + Name: name, + CreatedAt: now, + UpdatedAt: now, + Id: sid, + SourceTypes: sourceTypes, + Recipient: recipient, + }} + return s, nil +} + +func ID(recipient *proto.Recipient) (string, error) { + if recipient.Type == proto.Recipient_SlackChannel { + return SlackChannelRecipientID(recipient.SlackChannelRecipient.WebhookUrl), nil + } + return "", ErrUnknownRecipient +} + +func SlackChannelRecipientID(webhookURL string) string { + hashed := sha256.Sum256([]byte(webhookURL)) + return hex.EncodeToString(hashed[:]) +} + +func (s *Subscription) Enable() error { + if !s.Disabled { + return ErrAlreadyEnabled + } + s.Disabled = false + s.UpdatedAt = time.Now().Unix() + return nil +} + +func (s *Subscription) Disable() error { + if s.Disabled { + return ErrAlreadyDisabled + } + s.Disabled = true + s.UpdatedAt = time.Now().Unix() + return nil +} + +func (s *Subscription) Rename(name string) error { + s.Name = name + s.UpdatedAt = time.Now().Unix() + return nil +} + +func (s *Subscription) AddSourceTypes(sourceTypes []proto.Subscription_SourceType) error { + for _, nt := range sourceTypes { + if containsSourceType(nt, s.SourceTypes) { + continue + } + s.SourceTypes = append(s.SourceTypes, nt) + } + sortSourceType(s.SourceTypes) + s.UpdatedAt = time.Now().Unix() + return nil +} + +func (s *Subscription) DeleteSourceTypes(sourceTypes []proto.Subscription_SourceType) error { + if len(s.SourceTypes) <= 1 { + return ErrSourceTypesMustHaveAtLeastOne + } + for _, nt := range sourceTypes { + idx, err := indexSourceType(nt, s.SourceTypes) + if err != nil { + return err + } + s.SourceTypes = append(s.SourceTypes[:idx], s.SourceTypes[idx+1:]...) + } + sortSourceType(s.SourceTypes) + s.UpdatedAt = time.Now().Unix() + return nil +} + +func indexSourceType(needle proto.Subscription_SourceType, haystack []proto.Subscription_SourceType) (int, error) { + for i := range haystack { + if haystack[i] == needle { + return i, nil + } + } + return -1, ErrSourceTypeNotFound +} + +func containsSourceType(needle proto.Subscription_SourceType, haystack []proto.Subscription_SourceType) bool { + for i := range haystack { + if haystack[i] == needle { + return true + } + } + return false +} + +func sortSourceType(sourceTypes []proto.Subscription_SourceType) { + sort.Slice(sourceTypes, func(i, j int) bool { + return sourceTypes[i] < sourceTypes[j] + }) +} diff --git a/pkg/notification/domain/subscription_test.go b/pkg/notification/domain/subscription_test.go new file mode 100644 index 000000000..752800ed2 --- /dev/null +++ b/pkg/notification/domain/subscription_test.go @@ -0,0 +1,91 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + proto "github.com/bucketeer-io/bucketeer/proto/notification" +) + +func TestNewNotification(t *testing.T) { + t.Parallel() + sourceTypes := []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + proto.Subscription_DOMAIN_EVENT_ADMIN_ACCOUNT, + } + recipient := &proto.Recipient{ + Type: proto.Recipient_SlackChannel, + SlackChannelRecipient: &proto.SlackChannelRecipient{WebhookUrl: "url"}, + } + name := "sname" + actual, err := NewSubscription(name, sourceTypes, recipient) + assert.NoError(t, err) + assert.IsType(t, &Subscription{}, actual) + assert.NotEqual(t, "", actual.Id) + assert.Equal(t, sourceTypes, actual.SourceTypes) + assert.Equal(t, recipient, actual.Recipient) + assert.Equal(t, false, actual.Disabled) + assert.NotEqual(t, 0, actual.CreatedAt) + assert.NotEqual(t, 0, actual.UpdatedAt) + assert.Equal(t, name, actual.Name) +} + +func TestDisable(t *testing.T) { + t.Parallel() + actual := &Subscription{&proto.Subscription{Disabled: false}} + actual.Disable() + assert.Equal(t, true, actual.Disabled) +} + +func TestEnable(t *testing.T) { + t.Parallel() + actual := &Subscription{&proto.Subscription{Disabled: true}} + actual.Enable() + assert.Equal(t, false, actual.Disabled) +} + +func TestAddSourceTypes(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + origin *Subscription + input []proto.Subscription_SourceType + expectedErr error + expected []proto.Subscription_SourceType + }{ + "success: one": { + origin: &Subscription{&proto.Subscription{SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + proto.Subscription_DOMAIN_EVENT_ADMIN_ACCOUNT, + }}}, + input: []proto.Subscription_SourceType{proto.Subscription_DOMAIN_EVENT_FEATURE}, + expectedErr: nil, + expected: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_FEATURE, + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + proto.Subscription_DOMAIN_EVENT_ADMIN_ACCOUNT, + }, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + err := p.origin.AddSourceTypes(p.input) + assert.Equal(t, p.expectedErr, err) + assert.Equal(t, p.expected, p.origin.SourceTypes) + }) + } +} diff --git a/pkg/notification/sender/BUILD.bazel b/pkg/notification/sender/BUILD.bazel new file mode 100644 index 000000000..0cadc98ff --- /dev/null +++ b/pkg/notification/sender/BUILD.bazel @@ -0,0 +1,38 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "metrics.go", + "sender.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/notification/sender", + visibility = ["//visibility:public"], + deps = [ + "//pkg/metrics:go_default_library", + "//pkg/notification/client:go_default_library", + "//pkg/notification/sender/notifier:go_default_library", + "//proto/notification:go_default_library", + "//proto/notification/sender:go_default_library", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["sender_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/log:go_default_library", + "//pkg/notification/client/mock:go_default_library", + "//pkg/notification/sender/notifier:go_default_library", + "//pkg/notification/sender/notifier/mock:go_default_library", + "//pkg/storage:go_default_library", + "//proto/notification:go_default_library", + "//proto/notification/sender:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + ], +) diff --git a/pkg/notification/sender/informer/BUILD.bazel b/pkg/notification/sender/informer/BUILD.bazel new file mode 100644 index 000000000..916e3b328 --- /dev/null +++ b/pkg/notification/sender/informer/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["informer.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/notification/sender/informer", + visibility = ["//visibility:public"], + deps = ["//pkg/health:go_default_library"], +) diff --git a/pkg/notification/sender/informer/batch/BUILD.bazel b/pkg/notification/sender/informer/batch/BUILD.bazel new file mode 100644 index 000000000..ae992f29c --- /dev/null +++ b/pkg/notification/sender/informer/batch/BUILD.bazel @@ -0,0 +1,15 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["job.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/notification/sender/informer/batch", + visibility = ["//visibility:public"], + deps = [ + "//pkg/health:go_default_library", + "//pkg/job:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/notification/sender/informer:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/notification/sender/informer/batch/job.go b/pkg/notification/sender/informer/batch/job.go new file mode 100644 index 000000000..1c0d15435 --- /dev/null +++ b/pkg/notification/sender/informer/batch/job.go @@ -0,0 +1,115 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package batch + +import ( + "context" + + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/job" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/notification/sender/informer" +) + +type options struct { + metrics metrics.Registerer + logger *zap.Logger +} + +var defaultOptions = options{ + logger: zap.NewNop(), +} + +type Option func(*options) + +func WithMetrics(r metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = r + } +} + +func WithLogger(logger *zap.Logger) Option { + return func(opts *options) { + opts.logger = logger + } +} + +type jobInformer struct { + manager *job.Manager + opts *options + logger *zap.Logger + ctx context.Context + cancel func() + doneCh chan struct{} +} + +type Job struct { + Name string + Cron string + Job job.Job +} + +func NewJobInformer( + jobs []*Job, + opts ...Option) (informer.Informer, error) { + + ctx, cancel := context.WithCancel(context.Background()) + options := defaultOptions + for _, opt := range opts { + opt(&options) + } + manager := job.NewManager(options.metrics, "ops_event_batch", options.logger) + cji := &jobInformer{ + manager: manager, + opts: &options, + logger: options.logger.Named("sender"), + ctx: ctx, + cancel: cancel, + doneCh: make(chan struct{}), + } + if err := cji.registerJobs(jobs); err != nil { + return nil, err + } + return cji, nil +} + +func (i *jobInformer) Run() error { + return i.manager.Run() +} + +func (i *jobInformer) Stop() { + i.manager.Stop() +} + +// Check always returns healthy status. +// TODO: Implement Check() on job.Manager and do it here too. +func (i *jobInformer) Check(ctx context.Context) health.Status { + return health.Healthy +} + +func (i *jobInformer) registerJobs(jobs []*Job) error { + for _, j := range jobs { + if err := i.manager.AddCronJob(j.Name, j.Cron, j.Job); err != nil { + i.logger.Error("Failed to add cron job", + zap.String("name", j.Name), + zap.String("cron", j.Cron), + zap.Error(err)) + return err + } + } + return nil +} diff --git a/pkg/notification/sender/informer/batch/job/BUILD.bazel b/pkg/notification/sender/informer/batch/job/BUILD.bazel new file mode 100644 index 000000000..a48743c65 --- /dev/null +++ b/pkg/notification/sender/informer/batch/job/BUILD.bazel @@ -0,0 +1,56 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "experiment_running_watcher.go", + "feature_watcher.go", + "job.go", + "mau_count_watcher.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/notification/sender/informer/batch/job", + visibility = ["//visibility:public"], + deps = [ + "//pkg/environment/client:go_default_library", + "//pkg/eventcounter/client:go_default_library", + "//pkg/experiment/client:go_default_library", + "//pkg/feature/client:go_default_library", + "//pkg/feature/domain:go_default_library", + "//pkg/job:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/notification/sender:go_default_library", + "//pkg/uuid:go_default_library", + "//proto/environment:go_default_library", + "//proto/eventcounter:go_default_library", + "//proto/experiment:go_default_library", + "//proto/feature:go_default_library", + "//proto/notification:go_default_library", + "//proto/notification/sender:go_default_library", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "experiment_running_watcher_test.go", + "feature_watcher_test.go", + "mau_count_watcher_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/environment/client/mock:go_default_library", + "//pkg/eventcounter/client/mock:go_default_library", + "//pkg/experiment/client/mock:go_default_library", + "//pkg/feature/client/mock:go_default_library", + "//pkg/notification/sender/mock:go_default_library", + "//proto/environment:go_default_library", + "//proto/eventcounter:go_default_library", + "//proto/experiment:go_default_library", + "//proto/feature:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/notification/sender/informer/batch/job/experiment_running_watcher.go b/pkg/notification/sender/informer/batch/job/experiment_running_watcher.go new file mode 100644 index 000000000..9e7374b8c --- /dev/null +++ b/pkg/notification/sender/informer/batch/job/experiment_running_watcher.go @@ -0,0 +1,158 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package job + +import ( + "context" + "time" + + wrappersproto "github.com/golang/protobuf/ptypes/wrappers" + "go.uber.org/zap" + + environmentclient "github.com/bucketeer-io/bucketeer/pkg/environment/client" + experimentclient "github.com/bucketeer-io/bucketeer/pkg/experiment/client" + "github.com/bucketeer-io/bucketeer/pkg/job" + "github.com/bucketeer-io/bucketeer/pkg/notification/sender" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + environmentproto "github.com/bucketeer-io/bucketeer/proto/environment" + experimentproto "github.com/bucketeer-io/bucketeer/proto/experiment" + notificationproto "github.com/bucketeer-io/bucketeer/proto/notification" + senderproto "github.com/bucketeer-io/bucketeer/proto/notification/sender" +) + +type ExperimentRunningWatcher struct { + environmentClient environmentclient.Client + experimentClient experimentclient.Client + sender sender.Sender + opts *options + logger *zap.Logger +} + +func NewExperimentRunningWatcher( + environmentClient environmentclient.Client, + experimentClient experimentclient.Client, + sender sender.Sender, + opts ...Option) job.Job { + + dopts := &options{ + timeout: 5 * time.Minute, + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + return &ExperimentRunningWatcher{ + environmentClient: environmentClient, + experimentClient: experimentClient, + sender: sender, + opts: dopts, + logger: dopts.logger.Named("experiment-result-watcher"), + } +} + +func (w *ExperimentRunningWatcher) Run(ctx context.Context) (lastErr error) { + ctx, cancel := context.WithTimeout(ctx, w.opts.timeout) + defer cancel() + environments, err := w.listEnvironments(ctx) + if err != nil { + return err + } + for _, env := range environments { + experiments, err := w.listExperiments(ctx, env.Namespace) + if err != nil { + return err + } + if len(experiments) == 0 { + continue + } + ne, err := w.createNotificationEvent(env, experiments) + if err != nil { + lastErr = err + } + if err := w.sender.Send(ctx, ne); err != nil { + lastErr = err + } + } + return +} + +func (w *ExperimentRunningWatcher) createNotificationEvent( + environment *environmentproto.Environment, + experiments []*experimentproto.Experiment, +) (*senderproto.NotificationEvent, error) { + id, err := uuid.NewUUID() + if err != nil { + return nil, err + } + ne := &senderproto.NotificationEvent{ + Id: id.String(), + EnvironmentNamespace: environment.Namespace, + SourceType: notificationproto.Subscription_EXPERIMENT_RUNNING, + Notification: &senderproto.Notification{ + Type: senderproto.Notification_ExperimentRunning, + ExperimentRunningNotification: &senderproto.ExperimentRunningNotification{ + EnvironmentId: environment.Id, + Experiments: experiments, + }, + }, + IsAdminEvent: false, + } + return ne, nil +} + +func (w *ExperimentRunningWatcher) listEnvironments(ctx context.Context) ([]*environmentproto.Environment, error) { + environments := []*environmentproto.Environment{} + cursor := "" + for { + resp, err := w.environmentClient.ListEnvironments(ctx, &environmentproto.ListEnvironmentsRequest{ + PageSize: listRequestSize, + Cursor: cursor, + }) + if err != nil { + return nil, err + } + environments = append(environments, resp.Environments...) + environmentSize := len(resp.Environments) + if environmentSize == 0 || environmentSize < listRequestSize { + return environments, nil + } + cursor = resp.Cursor + } +} + +func (w *ExperimentRunningWatcher) listExperiments( + ctx context.Context, + environmentNamespace string, +) ([]*experimentproto.Experiment, error) { + experiments := []*experimentproto.Experiment{} + cursor := "" + for { + resp, err := w.experimentClient.ListExperiments(ctx, &experimentproto.ListExperimentsRequest{ + PageSize: listRequestSize, + Cursor: cursor, + EnvironmentNamespace: environmentNamespace, + Status: &wrappersproto.Int32Value{Value: int32(experimentproto.Experiment_RUNNING)}, + }) + if err != nil { + return nil, err + } + experiments = append(experiments, resp.Experiments...) + size := len(resp.Experiments) + if size == 0 || size < listRequestSize { + return experiments, nil + } + cursor = resp.Cursor + } +} diff --git a/pkg/notification/sender/informer/batch/job/experiment_running_watcher_test.go b/pkg/notification/sender/informer/batch/job/experiment_running_watcher_test.go new file mode 100644 index 000000000..08dda0fb5 --- /dev/null +++ b/pkg/notification/sender/informer/batch/job/experiment_running_watcher_test.go @@ -0,0 +1,104 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package job + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + environmentclientmock "github.com/bucketeer-io/bucketeer/pkg/environment/client/mock" + experimentclientmock "github.com/bucketeer-io/bucketeer/pkg/experiment/client/mock" + sendermock "github.com/bucketeer-io/bucketeer/pkg/notification/sender/mock" + environmentproto "github.com/bucketeer-io/bucketeer/proto/environment" + experimentproto "github.com/bucketeer-io/bucketeer/proto/experiment" +) + +func TestCreateExperimentRunningNotification(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*testing.T, *ExperimentRunningWatcher) + expectedErr error + }{ + "no experiment": { + setup: func(t *testing.T, w *ExperimentRunningWatcher) { + w.environmentClient.(*environmentclientmock.MockClient).EXPECT().ListEnvironments( + gomock.Any(), gomock.Any()).Return( + &environmentproto.ListEnvironmentsResponse{ + Environments: []*environmentproto.Environment{{Id: "ns0", Namespace: "ns0"}}, + Cursor: "", + }, nil) + w.experimentClient.(*experimentclientmock.MockClient).EXPECT().ListExperiments( + gomock.Any(), gomock.Any()).Return( + &experimentproto.ListExperimentsResponse{ + Experiments: []*experimentproto.Experiment{}, + }, nil) + }, + }, + "experiments exist": { + setup: func(t *testing.T, w *ExperimentRunningWatcher) { + w.environmentClient.(*environmentclientmock.MockClient).EXPECT().ListEnvironments( + gomock.Any(), gomock.Any()).Return( + &environmentproto.ListEnvironmentsResponse{ + Environments: []*environmentproto.Environment{{Id: "ns0", Namespace: "ns0"}}, + Cursor: "", + }, nil) + w.experimentClient.(*experimentclientmock.MockClient).EXPECT().ListExperiments( + gomock.Any(), gomock.Any()).Return( + &experimentproto.ListExperimentsResponse{ + Experiments: []*experimentproto.Experiment{{ + Id: "eid", + Name: "ename", + }, { + Id: "eid1", + Name: "ename1", + }}, + }, nil) + w.sender.(*sendermock.MockSender).EXPECT().Send(gomock.Any(), gomock.Any()).Return(nil).Times(1) + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + w := newExperimentRunningWatcherWithMock(t, mockController) + if p.setup != nil { + p.setup(t, w) + } + err := w.Run(context.Background()) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func newExperimentRunningWatcherWithMock(t *testing.T, c *gomock.Controller) *ExperimentRunningWatcher { + t.Helper() + return &ExperimentRunningWatcher{ + environmentClient: environmentclientmock.NewMockClient(c), + experimentClient: experimentclientmock.NewMockClient(c), + sender: sendermock.NewMockSender(c), + logger: zap.NewNop(), + opts: &options{ + timeout: 5 * time.Minute, + }, + } +} diff --git a/pkg/notification/sender/informer/batch/job/feature_watcher.go b/pkg/notification/sender/informer/batch/job/feature_watcher.go new file mode 100644 index 000000000..91e78a3d6 --- /dev/null +++ b/pkg/notification/sender/informer/batch/job/feature_watcher.go @@ -0,0 +1,175 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package job + +import ( + "context" + "time" + + "go.uber.org/zap" + + "github.com/golang/protobuf/ptypes/wrappers" + + environmentclient "github.com/bucketeer-io/bucketeer/pkg/environment/client" + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + featuredomain "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + "github.com/bucketeer-io/bucketeer/pkg/job" + "github.com/bucketeer-io/bucketeer/pkg/notification/sender" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + environmentproto "github.com/bucketeer-io/bucketeer/proto/environment" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" + notificationproto "github.com/bucketeer-io/bucketeer/proto/notification" + senderproto "github.com/bucketeer-io/bucketeer/proto/notification/sender" +) + +type featureWatcher struct { + environmentClient environmentclient.Client + featureClient featureclient.Client + sender sender.Sender + opts *options + logger *zap.Logger +} + +func NewFeatureWatcher( + environmentClient environmentclient.Client, + featureClient featureclient.Client, + sender sender.Sender, + opts ...Option) job.Job { + + dopts := &options{ + timeout: 5 * time.Minute, + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + return &featureWatcher{ + environmentClient: environmentClient, + featureClient: featureClient, + sender: sender, + opts: dopts, + logger: dopts.logger.Named("count-watcher"), + } +} + +func (w *featureWatcher) Run(ctx context.Context) (lastErr error) { + ctx, cancel := context.WithTimeout(ctx, w.opts.timeout) + defer cancel() + environments, err := w.listEnvironments(ctx) + if err != nil { + return err + } + for _, env := range environments { + features, err := w.listFeatures(ctx, env.Namespace) + if err != nil { + return err + } + staleFeatures := []*featureproto.Feature{} + for _, f := range features { + fd := &featuredomain.Feature{Feature: f} + now := time.Now() + stale := fd.IsStale(now) + if !stale { + continue + } + staleFeatures = append(staleFeatures, fd.Feature) + } + if len(staleFeatures) == 0 { + continue + } + ne, err := w.createNotificationEvent(env, staleFeatures) + if err != nil { + lastErr = err + } + if err := w.sender.Send(ctx, ne); err != nil { + lastErr = err + } + } + return +} + +func (w *featureWatcher) createNotificationEvent( + environment *environmentproto.Environment, + features []*featureproto.Feature, +) (*senderproto.NotificationEvent, error) { + id, err := uuid.NewUUID() + if err != nil { + return nil, err + } + ne := &senderproto.NotificationEvent{ + Id: id.String(), + EnvironmentNamespace: environment.Namespace, + SourceType: notificationproto.Subscription_FEATURE_STALE, + Notification: &senderproto.Notification{ + Type: senderproto.Notification_FeatureStale, + FeatureStaleNotification: &senderproto.FeatureStaleNotification{ + EnvironmentId: environment.Id, + Features: features, + }, + }, + IsAdminEvent: false, + } + return ne, nil +} + +func (w *featureWatcher) listEnvironments(ctx context.Context) ([]*environmentproto.Environment, error) { + environments := []*environmentproto.Environment{} + cursor := "" + for { + resp, err := w.environmentClient.ListEnvironments(ctx, &environmentproto.ListEnvironmentsRequest{ + PageSize: listRequestSize, + Cursor: cursor, + }) + if err != nil { + return nil, err + } + environments = append(environments, resp.Environments...) + environmentSize := len(resp.Environments) + if environmentSize == 0 || environmentSize < listRequestSize { + return environments, nil + } + cursor = resp.Cursor + } +} + +func (w *featureWatcher) listFeatures( + ctx context.Context, + environmentNamespace string, +) ([]*featureproto.Feature, error) { + features := []*featureproto.Feature{} + cursor := "" + for { + resp, err := w.featureClient.ListFeatures(ctx, &featureproto.ListFeaturesRequest{ + PageSize: listRequestSize, + Cursor: cursor, + EnvironmentNamespace: environmentNamespace, + Archived: &wrappers.BoolValue{Value: false}, + }) + if err != nil { + return nil, err + } + for _, f := range resp.Features { + if !f.Enabled && f.OffVariation == "" { + continue + } + features = append(features, f) + } + featureSize := len(resp.Features) + if featureSize == 0 || featureSize < listRequestSize { + return features, nil + } + cursor = resp.Cursor + } +} diff --git a/pkg/notification/sender/informer/batch/job/feature_watcher_test.go b/pkg/notification/sender/informer/batch/job/feature_watcher_test.go new file mode 100644 index 000000000..7c9fbdf9e --- /dev/null +++ b/pkg/notification/sender/informer/batch/job/feature_watcher_test.go @@ -0,0 +1,134 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package job + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + environmentclientmock "github.com/bucketeer-io/bucketeer/pkg/environment/client/mock" + featureclientmock "github.com/bucketeer-io/bucketeer/pkg/feature/client/mock" + sendermock "github.com/bucketeer-io/bucketeer/pkg/notification/sender/mock" + environmentproto "github.com/bucketeer-io/bucketeer/proto/environment" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +func TestCreateNotification(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*testing.T, *featureWatcher) + expectedErr error + }{ + "no featres": { + setup: func(t *testing.T, w *featureWatcher) { + w.environmentClient.(*environmentclientmock.MockClient).EXPECT().ListEnvironments( + gomock.Any(), gomock.Any()).Return( + &environmentproto.ListEnvironmentsResponse{ + Environments: []*environmentproto.Environment{{Id: "ns0", Namespace: "ns0"}}, + Cursor: "", + }, nil) + w.featureClient.(*featureclientmock.MockClient).EXPECT().ListFeatures( + gomock.Any(), gomock.Any()).Return( + &featureproto.ListFeaturesResponse{ + Features: []*featureproto.Feature{}, + }, nil) + }, + }, + "no stale featres": { + setup: func(t *testing.T, w *featureWatcher) { + w.environmentClient.(*environmentclientmock.MockClient).EXPECT().ListEnvironments( + gomock.Any(), gomock.Any()).Return( + &environmentproto.ListEnvironmentsResponse{ + Environments: []*environmentproto.Environment{{Id: "ns0", Namespace: "ns0"}}, + Cursor: "", + }, nil) + w.featureClient.(*featureclientmock.MockClient).EXPECT().ListFeatures( + gomock.Any(), gomock.Any()).Return( + &featureproto.ListFeaturesResponse{ + Features: []*featureproto.Feature{{ + Id: "fid", + Name: "fname", + Enabled: true, + LastUsedInfo: &featureproto.FeatureLastUsedInfo{ + LastUsedAt: time.Now().Unix(), + }, + }}, + }, nil) + }, + }, + "stale exists": { + setup: func(t *testing.T, w *featureWatcher) { + w.environmentClient.(*environmentclientmock.MockClient).EXPECT().ListEnvironments( + gomock.Any(), gomock.Any()).Return( + &environmentproto.ListEnvironmentsResponse{ + Environments: []*environmentproto.Environment{{Id: "ns0", Namespace: "ns0"}}, + Cursor: "", + }, nil) + w.featureClient.(*featureclientmock.MockClient).EXPECT().ListFeatures( + gomock.Any(), gomock.Any()).Return( + &featureproto.ListFeaturesResponse{ + Features: []*featureproto.Feature{{ + Id: "fid", + Name: "fname", + Enabled: true, + LastUsedInfo: &featureproto.FeatureLastUsedInfo{ + LastUsedAt: time.Now().Unix() - 120*24*60*60, + }, + }, { + Id: "fid1", + Name: "fname1", + Enabled: true, + LastUsedInfo: &featureproto.FeatureLastUsedInfo{ + LastUsedAt: time.Now().Unix() - 120*24*60*60, + }, + }}, + }, nil) + w.sender.(*sendermock.MockSender).EXPECT().Send(gomock.Any(), gomock.Any()).Return(nil) + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + w := newFeatureWatcherWithMock(t, mockController) + if p.setup != nil { + p.setup(t, w) + } + err := w.Run(context.Background()) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func newFeatureWatcherWithMock(t *testing.T, c *gomock.Controller) *featureWatcher { + t.Helper() + return &featureWatcher{ + environmentClient: environmentclientmock.NewMockClient(c), + featureClient: featureclientmock.NewMockClient(c), + sender: sendermock.NewMockSender(c), + logger: zap.NewNop(), + opts: &options{ + timeout: 5 * time.Minute, + }, + } +} diff --git a/pkg/notification/sender/informer/batch/job/job.go b/pkg/notification/sender/informer/batch/job/job.go new file mode 100644 index 000000000..93506c694 --- /dev/null +++ b/pkg/notification/sender/informer/batch/job/job.go @@ -0,0 +1,53 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package job + +import ( + "time" + + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +const ( + listRequestSize = 500 +) + +type options struct { + timeout time.Duration + metrics metrics.Registerer + logger *zap.Logger +} + +type Option func(*options) + +func WithTimeout(t time.Duration) Option { + return func(opts *options) { + opts.timeout = t + } +} + +func WithMetrics(r metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = r + } +} + +func WithLogger(logger *zap.Logger) Option { + return func(opts *options) { + opts.logger = logger + } +} diff --git a/pkg/notification/sender/informer/batch/job/mau_count_watcher.go b/pkg/notification/sender/informer/batch/job/mau_count_watcher.go new file mode 100644 index 000000000..6b7b54d7b --- /dev/null +++ b/pkg/notification/sender/informer/batch/job/mau_count_watcher.go @@ -0,0 +1,214 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package job + +import ( + "context" + "time" + + "go.uber.org/zap" + + environmentclient "github.com/bucketeer-io/bucketeer/pkg/environment/client" + ecclient "github.com/bucketeer-io/bucketeer/pkg/eventcounter/client" + "github.com/bucketeer-io/bucketeer/pkg/job" + "github.com/bucketeer-io/bucketeer/pkg/notification/sender" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + environmentproto "github.com/bucketeer-io/bucketeer/proto/environment" + ecproto "github.com/bucketeer-io/bucketeer/proto/eventcounter" + notificationproto "github.com/bucketeer-io/bucketeer/proto/notification" + senderproto "github.com/bucketeer-io/bucketeer/proto/notification/sender" +) + +var ( + jpLocation = time.FixedZone("Asia/Tokyo", 9*60*60) +) + +type mauCountWatcher struct { + environmentClient environmentclient.Client + eventCounterClient ecclient.Client + sender sender.Sender + opts *options + logger *zap.Logger +} + +func NewMAUCountWatcher( + environmentClient environmentclient.Client, + eventCounterClient ecclient.Client, + sender sender.Sender, + opts ...Option) job.Job { + dopts := &options{ + timeout: 5 * time.Minute, + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + return &mauCountWatcher{ + environmentClient: environmentClient, + eventCounterClient: eventCounterClient, + sender: sender, + opts: dopts, + logger: dopts.logger.Named("mau-count-watcher"), + } +} + +func (w *mauCountWatcher) Run(ctx context.Context) (lastErr error) { + ctx, cancel := context.WithTimeout(ctx, w.opts.timeout) + defer cancel() + projects, err := w.listProjects(ctx) + if err != nil { + return err + } + now := time.Now() + lastMonth := int32(now.AddDate(0, -1, 0).Month()) + startAt, endAt := w.getMAUInterval(now) + for _, pj := range projects { + environments, err := w.listEnvironments(ctx, pj.Id) + if err != nil { + return err + } + for _, env := range environments { + eventCount, userCount, err := w.getUserCount(ctx, env.Namespace, startAt, endAt) + if err != nil { + return err + } + if err := w.sendNotification(ctx, env, eventCount, userCount, lastMonth); err != nil { + w.logger.Error("Failed to send notification", + zap.Error(err), + zap.String("projectId", pj.Id), + zap.String("environmentId", env.Id), + zap.Int64("eventCount", eventCount), + zap.Int64("userCount", userCount), + zap.Int32("lastMonth", lastMonth), + ) + lastErr = err + } + } + } + return +} + +func (w *mauCountWatcher) listProjects(ctx context.Context) ([]*environmentproto.Project, error) { + projects := []*environmentproto.Project{} + cursor := "" + for { + resp, err := w.environmentClient.ListProjects(ctx, &environmentproto.ListProjectsRequest{ + PageSize: listRequestSize, + Cursor: cursor, + }) + if err != nil { + return nil, err + } + projects = append(projects, resp.Projects...) + projectSize := len(resp.Projects) + if projectSize == 0 || projectSize < listRequestSize { + return projects, nil + } + cursor = resp.Cursor + } +} + +func (w *mauCountWatcher) getMAUInterval(now time.Time) (startAt int64, endAt int64) { + currentYear, currentMonth, _ := now.In(jpLocation).Date() + startAt = time.Date(currentYear, currentMonth-1, 1, 0, 0, 0, 0, jpLocation).Unix() + endAt = time.Date(currentYear, currentMonth, 1, 0, 0, 0, 0, jpLocation).Unix() + return +} + +func (w *mauCountWatcher) listEnvironments( + ctx context.Context, + projectID string, +) ([]*environmentproto.Environment, error) { + environments := []*environmentproto.Environment{} + cursor := "" + for { + resp, err := w.environmentClient.ListEnvironments(ctx, &environmentproto.ListEnvironmentsRequest{ + PageSize: listRequestSize, + Cursor: cursor, + ProjectId: projectID, + }) + if err != nil { + return nil, err + } + environments = append(environments, resp.Environments...) + environmentSize := len(resp.Environments) + if environmentSize == 0 || environmentSize < listRequestSize { + return environments, nil + } + cursor = resp.Cursor + } +} + +func (w *mauCountWatcher) getUserCount( + ctx context.Context, + environmentNamespace string, + startAt, endAt int64, +) (eventCount, userCount int64, err error) { + resp, e := w.eventCounterClient.GetUserCountV2(ctx, &ecproto.GetUserCountV2Request{ + EnvironmentNamespace: environmentNamespace, + StartAt: startAt, + EndAt: endAt, + }) + if e != nil { + err = e + return + } + eventCount = resp.EventCount + userCount = resp.UserCount + return +} + +func (w *mauCountWatcher) sendNotification( + ctx context.Context, + environment *environmentproto.Environment, + eventCount, userCount int64, + month int32, +) error { + ne, err := w.createNotificationEvent(environment, eventCount, userCount, month) + if err != nil { + return err + } + if err := w.sender.Send(ctx, ne); err != nil { + return err + } + return nil +} + +func (w *mauCountWatcher) createNotificationEvent( + environment *environmentproto.Environment, + eventCount, userCount int64, + month int32, +) (*senderproto.NotificationEvent, error) { + id, err := uuid.NewUUID() + if err != nil { + return nil, err + } + ne := &senderproto.NotificationEvent{ + Id: id.String(), + EnvironmentNamespace: environment.Namespace, + SourceType: notificationproto.Subscription_MAU_COUNT, + Notification: &senderproto.Notification{ + Type: senderproto.Notification_MauCount, + MauCountNotification: &senderproto.MauCountNotification{ + EnvironmentId: environment.Id, + EventCount: eventCount, + UserCount: userCount, + Month: month, + }, + }, + IsAdminEvent: false, + } + return ne, nil +} diff --git a/pkg/notification/sender/informer/batch/job/mau_count_watcher_test.go b/pkg/notification/sender/informer/batch/job/mau_count_watcher_test.go new file mode 100644 index 000000000..e8fa23f70 --- /dev/null +++ b/pkg/notification/sender/informer/batch/job/mau_count_watcher_test.go @@ -0,0 +1,214 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package job + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + environmentclientmock "github.com/bucketeer-io/bucketeer/pkg/environment/client/mock" + ecclientmock "github.com/bucketeer-io/bucketeer/pkg/eventcounter/client/mock" + sendermock "github.com/bucketeer-io/bucketeer/pkg/notification/sender/mock" + environmentproto "github.com/bucketeer-io/bucketeer/proto/environment" + ecproto "github.com/bucketeer-io/bucketeer/proto/eventcounter" +) + +func TestCreateMAUNotification(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + errInternal := errors.New("internal error") + patterns := map[string]struct { + setup func(*testing.T, *mauCountWatcher) + expectedErr error + }{ + "err project": { + setup: func(t *testing.T, w *mauCountWatcher) { + w.environmentClient.(*environmentclientmock.MockClient).EXPECT().ListProjects( + gomock.Any(), gomock.Any()).Return( + nil, errInternal).Times(1) + w.environmentClient.(*environmentclientmock.MockClient).EXPECT().ListEnvironments( + gomock.Any(), gomock.Any()).Times(0) + w.eventCounterClient.(*ecclientmock.MockClient).EXPECT().GetUserCountV2( + gomock.Any(), gomock.Any()).Times(0) + w.sender.(*sendermock.MockSender).EXPECT().Send(gomock.Any(), gomock.Any()).Times(0) + }, + expectedErr: errInternal, + }, + "no projects": { + setup: func(t *testing.T, w *mauCountWatcher) { + w.environmentClient.(*environmentclientmock.MockClient).EXPECT().ListProjects( + gomock.Any(), gomock.Any()).Return( + &environmentproto.ListProjectsResponse{}, nil).Times(1) + w.environmentClient.(*environmentclientmock.MockClient).EXPECT().ListEnvironments( + gomock.Any(), gomock.Any()).Times(0) + w.eventCounterClient.(*ecclientmock.MockClient).EXPECT().GetUserCountV2( + gomock.Any(), gomock.Any()).Times(0) + w.sender.(*sendermock.MockSender).EXPECT().Send(gomock.Any(), gomock.Any()).Times(0) + }, + expectedErr: nil, + }, + "err environments": { + setup: func(t *testing.T, w *mauCountWatcher) { + w.environmentClient.(*environmentclientmock.MockClient).EXPECT().ListProjects( + gomock.Any(), gomock.Any()).Return( + &environmentproto.ListProjectsResponse{ + Projects: []*environmentproto.Project{{Id: "pj0"}}, + Cursor: "cursor", + }, nil).Times(1) + w.environmentClient.(*environmentclientmock.MockClient).EXPECT().ListEnvironments( + gomock.Any(), gomock.Any()).Return( + nil, errInternal).Times(1) + w.eventCounterClient.(*ecclientmock.MockClient).EXPECT().GetUserCountV2( + gomock.Any(), gomock.Any()).Times(0) + w.sender.(*sendermock.MockSender).EXPECT().Send(gomock.Any(), gomock.Any()).Times(0) + }, + expectedErr: errInternal, + }, + "no environments": { + setup: func(t *testing.T, w *mauCountWatcher) { + w.environmentClient.(*environmentclientmock.MockClient).EXPECT().ListProjects( + gomock.Any(), gomock.Any()).Return( + &environmentproto.ListProjectsResponse{ + Projects: []*environmentproto.Project{{Id: "pj0"}}, + Cursor: "cursor", + }, nil).Times(1) + w.environmentClient.(*environmentclientmock.MockClient).EXPECT().ListEnvironments( + gomock.Any(), gomock.Any()).Return( + &environmentproto.ListEnvironmentsResponse{}, nil).Times(1) + w.eventCounterClient.(*ecclientmock.MockClient).EXPECT().GetUserCountV2( + gomock.Any(), gomock.Any()).Times(0) + w.sender.(*sendermock.MockSender).EXPECT().Send(gomock.Any(), gomock.Any()).Times(0) + }, + expectedErr: nil, + }, + "err counts": { + setup: func(t *testing.T, w *mauCountWatcher) { + w.environmentClient.(*environmentclientmock.MockClient).EXPECT().ListProjects( + gomock.Any(), gomock.Any()).Return( + &environmentproto.ListProjectsResponse{ + Projects: []*environmentproto.Project{{Id: "pj0"}}, + Cursor: "cursor", + }, nil).Times(1) + w.environmentClient.(*environmentclientmock.MockClient).EXPECT().ListEnvironments( + gomock.Any(), gomock.Any()).Return( + &environmentproto.ListEnvironmentsResponse{ + Environments: []*environmentproto.Environment{{Id: "eID", Namespace: "eNamespace"}}, + Cursor: "", + }, nil).Times(1) + w.eventCounterClient.(*ecclientmock.MockClient).EXPECT().GetUserCountV2( + gomock.Any(), gomock.Any()).Return( + nil, errInternal).Times(1) + w.sender.(*sendermock.MockSender).EXPECT().Send(gomock.Any(), gomock.Any()).Times(0) + }, + expectedErr: errInternal, + }, + "err sender": { + setup: func(t *testing.T, w *mauCountWatcher) { + w.environmentClient.(*environmentclientmock.MockClient).EXPECT().ListProjects( + gomock.Any(), gomock.Any()).Return( + &environmentproto.ListProjectsResponse{ + Projects: []*environmentproto.Project{{Id: "pj0"}}, + Cursor: "cursor", + }, nil).Times(1) + w.environmentClient.(*environmentclientmock.MockClient).EXPECT().ListEnvironments( + gomock.Any(), gomock.Any()).Return( + &environmentproto.ListEnvironmentsResponse{ + Environments: []*environmentproto.Environment{{Id: "eID", Namespace: "eNamespace"}}, + Cursor: "", + }, nil).Times(1) + w.eventCounterClient.(*ecclientmock.MockClient).EXPECT().GetUserCountV2( + gomock.Any(), gomock.Any()).Return( + &ecproto.GetUserCountV2Response{ + EventCount: 4, + UserCount: 2, + }, nil).Times(1) + w.sender.(*sendermock.MockSender).EXPECT().Send(gomock.Any(), gomock.Any()).Return(errInternal).Times(1) + }, + expectedErr: errInternal, + }, + "success": { + setup: func(t *testing.T, w *mauCountWatcher) { + // list projects + w.environmentClient.(*environmentclientmock.MockClient).EXPECT().ListProjects( + gomock.Any(), gomock.Any()).Return( + &environmentproto.ListProjectsResponse{ + Projects: []*environmentproto.Project{{Id: "pj0"}}, + Cursor: "cursor", + }, nil).Times(1) + // list environments + w.environmentClient.(*environmentclientmock.MockClient).EXPECT().ListEnvironments( + gomock.Any(), + &environmentproto.ListEnvironmentsRequest{ + PageSize: listRequestSize, + Cursor: "", + ProjectId: "pj0", + }, + ).Return( + &environmentproto.ListEnvironmentsResponse{ + Environments: []*environmentproto.Environment{{Id: "eID", Namespace: "eNamespace"}}, + Cursor: "", + }, nil).Times(1) + // get user count + startAt, endAt := w.getMAUInterval(time.Now()) + w.eventCounterClient.(*ecclientmock.MockClient).EXPECT().GetUserCountV2( + gomock.Any(), + &ecproto.GetUserCountV2Request{ + EnvironmentNamespace: "eNamespace", + StartAt: startAt, + EndAt: endAt, + }, + ).Return( + &ecproto.GetUserCountV2Response{ + EventCount: 4, + UserCount: 2, + }, nil).Times(1) + // send notification + w.sender.(*sendermock.MockSender).EXPECT().Send( + gomock.Any(), gomock.Any()).Return(nil).Times(1) + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + w := newMAUCountWatcherWithMock(t, mockController) + if p.setup != nil { + p.setup(t, w) + } + err := w.Run(context.Background()) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func newMAUCountWatcherWithMock(t *testing.T, c *gomock.Controller) *mauCountWatcher { + t.Helper() + return &mauCountWatcher{ + environmentClient: environmentclientmock.NewMockClient(c), + eventCounterClient: ecclientmock.NewMockClient(c), + sender: sendermock.NewMockSender(c), + logger: zap.NewNop(), + opts: &options{ + timeout: 5 * time.Minute, + }, + } +} diff --git a/pkg/notification/sender/informer/domainevent/BUILD.bazel b/pkg/notification/sender/informer/domainevent/BUILD.bazel new file mode 100644 index 000000000..c3cb8ea1d --- /dev/null +++ b/pkg/notification/sender/informer/domainevent/BUILD.bazel @@ -0,0 +1,45 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "domain_event.go", + "metrics.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/notification/sender/informer/domainevent", + visibility = ["//visibility:public"], + deps = [ + "//pkg/environment/client:go_default_library", + "//pkg/errgroup:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/notification/sender:go_default_library", + "//pkg/notification/sender/informer:go_default_library", + "//pkg/pubsub/puller:go_default_library", + "//pkg/pubsub/puller/codes:go_default_library", + "//pkg/uuid:go_default_library", + "//proto/environment:go_default_library", + "//proto/event/domain:go_default_library", + "//proto/notification:go_default_library", + "//proto/notification/sender:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["domain_event_test.go"], + embed = [":go_default_library"], + deps = [ + "//proto/event/domain:go_default_library", + "//proto/notification:go_default_library", + "//proto/notification/sender:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/notification/sender/informer/domainevent/domain_event.go b/pkg/notification/sender/informer/domainevent/domain_event.go new file mode 100644 index 000000000..522d72c62 --- /dev/null +++ b/pkg/notification/sender/informer/domainevent/domain_event.go @@ -0,0 +1,312 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domainevent + +import ( + "context" + "errors" + "time" + + "github.com/golang/protobuf/proto" // nolint:staticcheck + "go.uber.org/zap" + gcodes "google.golang.org/grpc/codes" + gstatus "google.golang.org/grpc/status" + + environmentclient "github.com/bucketeer-io/bucketeer/pkg/environment/client" + "github.com/bucketeer-io/bucketeer/pkg/errgroup" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/notification/sender" + "github.com/bucketeer-io/bucketeer/pkg/notification/sender/informer" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller/codes" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + environmentproto "github.com/bucketeer-io/bucketeer/proto/environment" + domaineventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + notificationproto "github.com/bucketeer-io/bucketeer/proto/notification" + senderproto "github.com/bucketeer-io/bucketeer/proto/notification/sender" +) + +var ( + ErrUnknownSourceType = errors.New("domain-event-informer: unknown source type") +) + +type options struct { + maxMPS int + numWorkers int + metrics metrics.Registerer + logger *zap.Logger +} + +var defaultOptions = options{ + maxMPS: 10, + numWorkers: 1, + logger: zap.NewNop(), +} + +type Option func(*options) + +func WithMaxMPS(mps int) Option { + return func(opts *options) { + opts.maxMPS = mps + } +} + +func WithNumWorkers(n int) Option { + return func(opts *options) { + opts.numWorkers = n + } +} + +func WithMetrics(r metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = r + } +} + +func WithLogger(logger *zap.Logger) Option { + return func(opts *options) { + opts.logger = logger + } +} + +type domainEventInformer struct { + environmentClient environmentclient.Client + puller puller.RateLimitedPuller + sender sender.Sender + group errgroup.Group + opts *options + logger *zap.Logger + ctx context.Context + cancel func() + doneCh chan struct{} +} + +func NewDomainEventInformer( + environmentClient environmentclient.Client, + p puller.Puller, + sender sender.Sender, + opts ...Option) informer.Informer { + + ctx, cancel := context.WithCancel(context.Background()) + options := defaultOptions + for _, opt := range opts { + opt(&options) + } + if options.metrics != nil { + registerMetrics(options.metrics) + } + return &domainEventInformer{ + environmentClient: environmentClient, + puller: puller.NewRateLimitedPuller(p, options.maxMPS), + sender: sender, + opts: &options, + logger: options.logger.Named("sender"), + ctx: ctx, + cancel: cancel, + doneCh: make(chan struct{}), + } +} + +func (i *domainEventInformer) Run() error { + defer close(i.doneCh) + i.logger.Info("DomainEventInformer start running") + i.group.Go(func() error { + return i.puller.Run(i.ctx) + }) + for idx := 0; idx < i.opts.numWorkers; idx++ { + i.group.Go(i.runWorker) + } + err := i.group.Wait() + i.logger.Info("DomainEventInformer start stopping") + return err +} + +func (i *domainEventInformer) Stop() { + i.logger.Info("DomainEventInformer start stopping") + i.cancel() + <-i.doneCh +} + +func (i *domainEventInformer) Check(ctx context.Context) health.Status { + select { + case <-i.ctx.Done(): + i.logger.Error("Unhealthy due to context Done is closed", zap.Error(i.ctx.Err())) + return health.Unhealthy + default: + if i.group.FinishedCount() > 0 { + i.logger.Error("Unhealthy", zap.Int32("FinishedCount", i.group.FinishedCount())) + return health.Unhealthy + } + return health.Healthy + } +} + +func (i *domainEventInformer) runWorker() error { + for { + select { + case msg, ok := <-i.puller.MessageCh(): + if !ok { + return nil + } + receivedCounter.WithLabelValues(typeDomainEvent).Inc() + i.handleMessage(msg) + case <-i.ctx.Done(): + return nil + } + } +} + +func (i *domainEventInformer) handleMessage(msg *puller.Message) { + if id := msg.Attributes["id"]; id == "" { + msg.Ack() + handledCounter.WithLabelValues(codes.MissingID.String()).Inc() + return + } + domainEvent, err := i.unmarshalMessage(msg) + if err != nil { + handledCounter.WithLabelValues(typeDomainEvent, codes.BadMessage.String()).Inc() + msg.Ack() + return + } + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + environmentID := "" + if !domainEvent.IsAdminEvent { + environment, err := i.getEnvironment(ctx, domainEvent.EnvironmentNamespace) + if err != nil { + if code := gstatus.Code(err); code == gcodes.NotFound { + handledCounter.WithLabelValues(typeDomainEvent, codes.BadMessage.String()).Inc() + msg.Ack() + return + } + handledCounter.WithLabelValues(typeDomainEvent, codes.RepeatableError.String()).Inc() + msg.Nack() + return + } + environmentID = environment.Id + } + ne, err := i.createNotificationEvent(domainEvent, environmentID, domainEvent.IsAdminEvent) + if err != nil { + handledCounter.WithLabelValues(typeDomainEvent, codes.BadMessage.String()).Inc() + msg.Ack() + return + } + if err := i.sender.Send(ctx, ne); err != nil { + handledCounter.WithLabelValues(typeDomainEvent, codes.NonRepeatableError.String()).Inc() + msg.Ack() + i.logger.Error("Failed to send notification event", zap.Error(err)) + return + } + handledCounter.WithLabelValues(typeDomainEvent, codes.OK.String()).Inc() + msg.Ack() +} + +func (i *domainEventInformer) unmarshalMessage(msg *puller.Message) (*domaineventproto.Event, error) { + event := &domaineventproto.Event{} + err := proto.Unmarshal(msg.Data, event) + if err != nil { + i.logger.Error("Failed to unmarshal message", zap.Error(err), zap.String("msgID", msg.ID)) + return nil, err + } + return event, nil +} + +func (i *domainEventInformer) createNotificationEvent( + event *domaineventproto.Event, + environmentID string, + isAdminEvent bool, +) (*senderproto.NotificationEvent, error) { + id, err := uuid.NewUUID() + if err != nil { + return nil, err + } + st, err := i.convSourceType(event.EntityType) + if err != nil { + i.logger.Error("Failed to convert source type", zap.Error(err)) + return nil, err + } + ne := &senderproto.NotificationEvent{ + Id: id.String(), + EnvironmentNamespace: event.EnvironmentNamespace, + SourceType: st, + Notification: &senderproto.Notification{ + Type: senderproto.Notification_DomainEvent, + DomainEventNotification: &senderproto.DomainEventNotification{ + EnvironmentId: environmentID, + Editor: event.Editor, + EntityType: event.EntityType, + EntityId: event.EntityId, + Type: event.Type, + }, + }, + IsAdminEvent: isAdminEvent, + } + return ne, nil +} + +func (i *domainEventInformer) convSourceType( + entityType domaineventproto.Event_EntityType, +) (notificationproto.Subscription_SourceType, error) { + switch entityType { + case domaineventproto.Event_FEATURE: + return notificationproto.Subscription_DOMAIN_EVENT_FEATURE, nil + case domaineventproto.Event_GOAL: + return notificationproto.Subscription_DOMAIN_EVENT_GOAL, nil + case domaineventproto.Event_EXPERIMENT: + return notificationproto.Subscription_DOMAIN_EVENT_EXPERIMENT, nil + case domaineventproto.Event_ACCOUNT: + return notificationproto.Subscription_DOMAIN_EVENT_ACCOUNT, nil + case domaineventproto.Event_APIKEY: + return notificationproto.Subscription_DOMAIN_EVENT_APIKEY, nil + case domaineventproto.Event_SEGMENT: + return notificationproto.Subscription_DOMAIN_EVENT_SEGMENT, nil + case domaineventproto.Event_ENVIRONMENT: + return notificationproto.Subscription_DOMAIN_EVENT_ENVIRONMENT, nil + case domaineventproto.Event_ADMIN_ACCOUNT: + return notificationproto.Subscription_DOMAIN_EVENT_ADMIN_ACCOUNT, nil + case domaineventproto.Event_AUTOOPS_RULE: + return notificationproto.Subscription_DOMAIN_EVENT_AUTOOPS_RULE, nil + case domaineventproto.Event_PUSH: + return notificationproto.Subscription_DOMAIN_EVENT_PUSH, nil + case domaineventproto.Event_SUBSCRIPTION: + return notificationproto.Subscription_DOMAIN_EVENT_SUBSCRIPTION, nil + case domaineventproto.Event_ADMIN_SUBSCRIPTION: + return notificationproto.Subscription_DOMAIN_EVENT_ADMIN_SUBSCRIPTION, nil + case domaineventproto.Event_PROJECT: + return notificationproto.Subscription_DOMAIN_EVENT_PROJECT, nil + case domaineventproto.Event_WEBHOOK: + return notificationproto.Subscription_DOMAIN_EVENT_WEBHOOK, nil + } + return notificationproto.Subscription_SourceType(0), ErrUnknownSourceType +} + +func (i *domainEventInformer) getEnvironment( + ctx context.Context, + environmentNamespace string, +) (*environmentproto.Environment, error) { + resp, err := i.environmentClient.GetEnvironmentByNamespace(ctx, &environmentproto.GetEnvironmentByNamespaceRequest{ + Namespace: environmentNamespace, + }) + if err != nil { + i.logger.Error( + "Failed to get environment", + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + ) + return nil, err + } + return resp.Environment, nil +} diff --git a/pkg/notification/sender/informer/domainevent/domain_event_test.go b/pkg/notification/sender/informer/domainevent/domain_event_test.go new file mode 100644 index 000000000..8a73ca1e5 --- /dev/null +++ b/pkg/notification/sender/informer/domainevent/domain_event_test.go @@ -0,0 +1,137 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domainevent + +import ( + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + domaineventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + notificationproto "github.com/bucketeer-io/bucketeer/proto/notification" + senderproto "github.com/bucketeer-io/bucketeer/proto/notification/sender" +) + +func TestCreateNotificationEvent(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + input *domaineventproto.Event + environmentID string + expected *senderproto.NotificationEvent + expectedErr error + }{ + "success: DomainEvent": { + input: &domaineventproto.Event{ + Id: "did", + EntityType: domaineventproto.Event_FEATURE, + EntityId: "fid", + Type: domaineventproto.Event_FEATURE_CREATED, + Editor: &domaineventproto.Editor{Email: "test@test.com"}, + EnvironmentNamespace: "ns0", + IsAdminEvent: false, + }, + environmentID: "nsid", + expected: &senderproto.NotificationEvent{ + Id: "id", + EnvironmentNamespace: "ns0", + SourceType: notificationproto.Subscription_DOMAIN_EVENT_FEATURE, + Notification: &senderproto.Notification{ + Type: senderproto.Notification_DomainEvent, + DomainEventNotification: &senderproto.DomainEventNotification{ + EnvironmentId: "nsid", + Editor: &domaineventproto.Editor{Email: "test@test.com"}, + EntityType: domaineventproto.Event_FEATURE, + EntityId: "fid", + Type: domaineventproto.Event_FEATURE_CREATED, + }, + }, + IsAdminEvent: false, + }, + expectedErr: nil, + }, + "success: Admin DomainEvent": { + input: &domaineventproto.Event{ + Id: "did", + EntityType: domaineventproto.Event_PROJECT, + EntityId: "pid", + Type: domaineventproto.Event_PROJECT_CREATED, + Editor: &domaineventproto.Editor{Email: "test@test.com"}, + EnvironmentNamespace: "", + IsAdminEvent: true, + }, + environmentID: "nsid", + expected: &senderproto.NotificationEvent{ + Id: "id", + EnvironmentNamespace: "", + SourceType: notificationproto.Subscription_DOMAIN_EVENT_PROJECT, + Notification: &senderproto.Notification{ + Type: senderproto.Notification_DomainEvent, + DomainEventNotification: &senderproto.DomainEventNotification{ + EnvironmentId: "nsid", + Editor: &domaineventproto.Editor{Email: "test@test.com"}, + EntityType: domaineventproto.Event_PROJECT, + EntityId: "pid", + Type: domaineventproto.Event_PROJECT_CREATED, + }, + }, + IsAdminEvent: true, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + i := newDomainEventInformer(t, mockController) + actual, err := i.createNotificationEvent(p.input, p.environmentID, p.input.IsAdminEvent) + assert.Equal(t, p.expectedErr, err) + if p.expected != nil { + assert.Equal(t, p.expected.EnvironmentNamespace, actual.EnvironmentNamespace) + assert.Equal(t, p.expected.SourceType, actual.SourceType) + assert.Equal(t, p.expected.IsAdminEvent, actual.IsAdminEvent) + assert.Equal(t, p.expected.Notification.Type, actual.Notification.Type) + assert.Equal(t, p.expected.Notification.DomainEventNotification.EnvironmentId, actual.Notification.DomainEventNotification.EnvironmentId) + assert.Equal(t, p.expected.Notification.DomainEventNotification.Editor, actual.Notification.DomainEventNotification.Editor) + assert.Equal(t, p.expected.Notification.DomainEventNotification.EntityType, actual.Notification.DomainEventNotification.EntityType) + assert.Equal(t, p.expected.Notification.DomainEventNotification.EntityId, actual.Notification.DomainEventNotification.EntityId) + assert.Equal(t, p.expected.Notification.DomainEventNotification.Type, actual.Notification.DomainEventNotification.Type) + } + }) + } +} + +func TestConvSourceType(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + for k, v := range domaineventproto.Event_EntityType_name { + t.Run(v, func(t *testing.T) { + i := newDomainEventInformer(t, mockController) + _, err := i.convSourceType(domaineventproto.Event_EntityType(k)) + assert.NoError(t, err) + }) + } +} + +func newDomainEventInformer(t *testing.T, c *gomock.Controller) *domainEventInformer { + t.Helper() + return &domainEventInformer{ + logger: zap.NewNop(), + } +} diff --git a/pkg/notification/sender/informer/domainevent/metrics.go b/pkg/notification/sender/informer/domainevent/metrics.go new file mode 100644 index 000000000..4994818ec --- /dev/null +++ b/pkg/notification/sender/informer/domainevent/metrics.go @@ -0,0 +1,50 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domainevent + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +const ( + typeDomainEvent = "DomainEvent" +) + +var ( + receivedCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "notification", + Name: "domain_event_informer_received_event_total", + Help: "Total number of received domain events", + }, []string{"type"}) + + handledCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "notification", + Name: "domain_event_informer_handle_event_total", + Help: "Total number of handled messages", + }, []string{"type", "code"}) +) + +func registerMetrics(r metrics.Registerer) { + r.MustRegister( + receivedCounter, + handledCounter, + ) +} diff --git a/pkg/notification/sender/informer/informer.go b/pkg/notification/sender/informer/informer.go new file mode 100644 index 000000000..7893cdc37 --- /dev/null +++ b/pkg/notification/sender/informer/informer.go @@ -0,0 +1,28 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package informer + +import ( + "context" + + "github.com/bucketeer-io/bucketeer/pkg/health" +) + +type Informer interface { + Check(context.Context) health.Status + Run() error + Stop() +} diff --git a/pkg/notification/sender/informer/mock/BUILD.bazel b/pkg/notification/sender/informer/mock/BUILD.bazel new file mode 100644 index 000000000..3c32cc5a8 --- /dev/null +++ b/pkg/notification/sender/informer/mock/BUILD.bazel @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["informer.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/notification/sender/informer/mock", + visibility = ["//visibility:public"], + deps = [ + "//pkg/health:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + ], +) diff --git a/pkg/notification/sender/informer/mock/informer.go b/pkg/notification/sender/informer/mock/informer.go new file mode 100644 index 000000000..c3c78cf85 --- /dev/null +++ b/pkg/notification/sender/informer/mock/informer.go @@ -0,0 +1,77 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: informer.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + health "github.com/bucketeer-io/bucketeer/pkg/health" +) + +// MockInformer is a mock of Informer interface. +type MockInformer struct { + ctrl *gomock.Controller + recorder *MockInformerMockRecorder +} + +// MockInformerMockRecorder is the mock recorder for MockInformer. +type MockInformerMockRecorder struct { + mock *MockInformer +} + +// NewMockInformer creates a new mock instance. +func NewMockInformer(ctrl *gomock.Controller) *MockInformer { + mock := &MockInformer{ctrl: ctrl} + mock.recorder = &MockInformerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockInformer) EXPECT() *MockInformerMockRecorder { + return m.recorder +} + +// Check mocks base method. +func (m *MockInformer) Check(arg0 context.Context) health.Status { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Check", arg0) + ret0, _ := ret[0].(health.Status) + return ret0 +} + +// Check indicates an expected call of Check. +func (mr *MockInformerMockRecorder) Check(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Check", reflect.TypeOf((*MockInformer)(nil).Check), arg0) +} + +// Run mocks base method. +func (m *MockInformer) Run() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Run") + ret0, _ := ret[0].(error) + return ret0 +} + +// Run indicates an expected call of Run. +func (mr *MockInformerMockRecorder) Run() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockInformer)(nil).Run)) +} + +// Stop mocks base method. +func (m *MockInformer) Stop() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Stop") +} + +// Stop indicates an expected call of Stop. +func (mr *MockInformerMockRecorder) Stop() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockInformer)(nil).Stop)) +} diff --git a/pkg/notification/sender/metrics.go b/pkg/notification/sender/metrics.go new file mode 100644 index 000000000..3162fc540 --- /dev/null +++ b/pkg/notification/sender/metrics.go @@ -0,0 +1,51 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sender + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +const ( + codeSuccess = "Success" + codeFail = "Fail" +) + +var ( + receivedCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "notification", + Name: "sender_received_event_total", + Help: "Total number of received events", + }) + + handledCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "notification", + Name: "sender_handle_event_total", + Help: "Total number of handled messages", + }, []string{"code"}) +) + +func registerMetrics(r metrics.Registerer) { + r.MustRegister( + receivedCounter, + handledCounter, + ) +} diff --git a/pkg/notification/sender/mock/BUILD.bazel b/pkg/notification/sender/mock/BUILD.bazel new file mode 100644 index 000000000..4ad7a5e31 --- /dev/null +++ b/pkg/notification/sender/mock/BUILD.bazel @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["sender.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/notification/sender/mock", + visibility = ["//visibility:public"], + deps = [ + "//proto/notification/sender:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + ], +) diff --git a/pkg/notification/sender/mock/sender.go b/pkg/notification/sender/mock/sender.go new file mode 100644 index 000000000..2b7ff907b --- /dev/null +++ b/pkg/notification/sender/mock/sender.go @@ -0,0 +1,51 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: sender.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + sender "github.com/bucketeer-io/bucketeer/proto/notification/sender" +) + +// MockSender is a mock of Sender interface. +type MockSender struct { + ctrl *gomock.Controller + recorder *MockSenderMockRecorder +} + +// MockSenderMockRecorder is the mock recorder for MockSender. +type MockSenderMockRecorder struct { + mock *MockSender +} + +// NewMockSender creates a new mock instance. +func NewMockSender(ctrl *gomock.Controller) *MockSender { + mock := &MockSender{ctrl: ctrl} + mock.recorder = &MockSenderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSender) EXPECT() *MockSenderMockRecorder { + return m.recorder +} + +// Send mocks base method. +func (m *MockSender) Send(arg0 context.Context, arg1 *sender.NotificationEvent) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Send", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Send indicates an expected call of Send. +func (mr *MockSenderMockRecorder) Send(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockSender)(nil).Send), arg0, arg1) +} diff --git a/pkg/notification/sender/notifier/BUILD.bazel b/pkg/notification/sender/notifier/BUILD.bazel new file mode 100644 index 000000000..18c703c24 --- /dev/null +++ b/pkg/notification/sender/notifier/BUILD.bazel @@ -0,0 +1,36 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "message.go", + "metrics.go", + "notifier.go", + "slack.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/notification/sender/notifier", + visibility = ["//visibility:public"], + deps = [ + "//pkg/domainevent/domain:go_default_library", + "//pkg/feature/domain:go_default_library", + "//pkg/locale:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/notification/domain:go_default_library", + "//proto/event/domain:go_default_library", + "//proto/notification:go_default_library", + "//proto/notification/sender:go_default_library", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@com_github_slack_go_slack//:go_default_library", + "@go_googleapis//google/rpc:errdetails_go_proto", + "@org_golang_x_text//language:go_default_library", + "@org_golang_x_text//message:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["slack_test.go"], + embed = [":go_default_library"], + deps = ["@com_github_stretchr_testify//assert:go_default_library"], +) diff --git a/pkg/notification/sender/notifier/message.go b/pkg/notification/sender/notifier/message.go new file mode 100644 index 000000000..648b179a3 --- /dev/null +++ b/pkg/notification/sender/notifier/message.go @@ -0,0 +1,62 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package notifier + +import ( + "errors" + + "google.golang.org/genproto/googleapis/rpc/errdetails" + + "github.com/bucketeer-io/bucketeer/pkg/locale" +) + +type msgType int + +const ( + _ msgType = iota + msgTypeFeatureStale + msgTypeExperimentResult + msgTypeMAUCount +) + +var ( + errUnknownMsgType = errors.New("notification: unknown message type") + msgFeatureStaleJaJP = &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "%d日以上使用されていないフィーチャーフラグがあります。", + } + msgExperimentResultJaJP = &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "実行中のエクスペリメントがあります。", + } + msgMAUCountJaJP = &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "%d月のMAUです。", + } +) + +func localizedMessage(t msgType, loc string) (*errdetails.LocalizedMessage, error) { + // handle loc if multi-lang is necessary + switch t { + case msgTypeFeatureStale: + return msgFeatureStaleJaJP, nil + case msgTypeExperimentResult: + return msgExperimentResultJaJP, nil + case msgTypeMAUCount: + return msgMAUCountJaJP, nil + default: + return nil, errUnknownMsgType + } +} diff --git a/pkg/notification/sender/notifier/metrics.go b/pkg/notification/sender/notifier/metrics.go new file mode 100644 index 000000000..c15a29511 --- /dev/null +++ b/pkg/notification/sender/notifier/metrics.go @@ -0,0 +1,52 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package notifier + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +const ( + typeSlack = "Slack" + codeSuccess = "Success" + codeFail = "Fail" +) + +var ( + receivedCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "notification", + Name: "notifier_received_total", + Help: "Total number of received messages", + }, []string{"type"}) + + handledCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "notification", + Name: "notifier_handled_total", + Help: "Total number of handled messages", + }, []string{"type", "code"}) +) + +func registerMetrics(r metrics.Registerer) { + r.MustRegister( + receivedCounter, + handledCounter, + ) +} diff --git a/pkg/notification/sender/notifier/mock/BUILD.bazel b/pkg/notification/sender/notifier/mock/BUILD.bazel new file mode 100644 index 000000000..93599852e --- /dev/null +++ b/pkg/notification/sender/notifier/mock/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["notifier.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/notification/sender/notifier/mock", + visibility = ["//visibility:public"], + deps = [ + "//proto/notification:go_default_library", + "//proto/notification/sender:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + ], +) diff --git a/pkg/notification/sender/notifier/mock/notifier.go b/pkg/notification/sender/notifier/mock/notifier.go new file mode 100644 index 000000000..d19650345 --- /dev/null +++ b/pkg/notification/sender/notifier/mock/notifier.go @@ -0,0 +1,52 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: notifier.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + notification "github.com/bucketeer-io/bucketeer/proto/notification" + sender "github.com/bucketeer-io/bucketeer/proto/notification/sender" +) + +// MockNotifier is a mock of Notifier interface. +type MockNotifier struct { + ctrl *gomock.Controller + recorder *MockNotifierMockRecorder +} + +// MockNotifierMockRecorder is the mock recorder for MockNotifier. +type MockNotifierMockRecorder struct { + mock *MockNotifier +} + +// NewMockNotifier creates a new mock instance. +func NewMockNotifier(ctrl *gomock.Controller) *MockNotifier { + mock := &MockNotifier{ctrl: ctrl} + mock.recorder = &MockNotifierMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockNotifier) EXPECT() *MockNotifierMockRecorder { + return m.recorder +} + +// Notify mocks base method. +func (m *MockNotifier) Notify(ctx context.Context, notification *sender.Notification, recipient *notification.Recipient) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Notify", ctx, notification, recipient) + ret0, _ := ret[0].(error) + return ret0 +} + +// Notify indicates an expected call of Notify. +func (mr *MockNotifierMockRecorder) Notify(ctx, notification, recipient interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Notify", reflect.TypeOf((*MockNotifier)(nil).Notify), ctx, notification, recipient) +} diff --git a/pkg/notification/sender/notifier/notifier.go b/pkg/notification/sender/notifier/notifier.go new file mode 100644 index 000000000..da2b17ebf --- /dev/null +++ b/pkg/notification/sender/notifier/notifier.go @@ -0,0 +1,27 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package notifier + +import ( + "context" + + notificationproto "github.com/bucketeer-io/bucketeer/proto/notification" + senderproto "github.com/bucketeer-io/bucketeer/proto/notification/sender" +) + +type Notifier interface { + Notify(ctx context.Context, notification *senderproto.Notification, recipient *notificationproto.Recipient) error +} diff --git a/pkg/notification/sender/notifier/slack.go b/pkg/notification/sender/notifier/slack.go new file mode 100644 index 000000000..16fcd55af --- /dev/null +++ b/pkg/notification/sender/notifier/slack.go @@ -0,0 +1,283 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package notifier + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/slack-go/slack" + "go.uber.org/zap" + "golang.org/x/text/language" + "golang.org/x/text/message" + + domainevent "github.com/bucketeer-io/bucketeer/pkg/domainevent/domain" + featuredomain "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + notificationdomain "github.com/bucketeer-io/bucketeer/pkg/notification/domain" + domainproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + notificationproto "github.com/bucketeer-io/bucketeer/proto/notification" + "github.com/bucketeer-io/bucketeer/proto/notification/sender" + senderproto "github.com/bucketeer-io/bucketeer/proto/notification/sender" +) + +const ( + linkTemplate = "<%s|%s>" +) + +var ( + ErrUnknownNotification = errors.New("slacknotifier: unknown notification") +) + +type options struct { + metrics metrics.Registerer + logger *zap.Logger +} + +var defaultOptions = options{ + logger: zap.NewNop(), +} + +type Option func(*options) + +func WithMetrics(r metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = r + } +} + +func WithLogger(logger *zap.Logger) Option { + return func(opts *options) { + opts.logger = logger + } +} + +type slackNotifier struct { + webURL string + logger *zap.Logger + opts *options +} + +func NewSlackNotifier(webURL string, opts ...Option) Notifier { + options := defaultOptions + for _, opt := range opts { + opt(&options) + } + if options.metrics != nil { + registerMetrics(options.metrics) + } + return &slackNotifier{ + webURL: webURL, + opts: &options, + logger: options.logger.Named("sender"), + } +} + +func (n *slackNotifier) Notify( + ctx context.Context, + notification *senderproto.Notification, + recipient *notificationproto.Recipient, +) error { + if recipient.Type != notificationproto.Recipient_SlackChannel { + return nil + } + receivedCounter.WithLabelValues(typeSlack).Inc() + if err := n.notify(ctx, notification, recipient.SlackChannelRecipient); err != nil { + n.logger.Error("Failed to notify", + zap.Error(err), + ) + handledCounter.WithLabelValues(typeSlack, codeFail).Inc() + return err + } + handledCounter.WithLabelValues(typeSlack, codeSuccess).Inc() + return nil +} + +func (n *slackNotifier) notify( + ctx context.Context, + notification *sender.Notification, + slackRecipient *notificationproto.SlackChannelRecipient, +) error { + msg, err := n.createMessage(notification, slackRecipient) + if err != nil { + return err + } + if err = n.postWebhook(ctx, msg, slackRecipient.WebhookUrl); err != nil { + // FIXME: Retry? + return err + } + return nil +} + +func (n *slackNotifier) createMessage( + notification *sender.Notification, + slackRecipient *notificationproto.SlackChannelRecipient, +) (*slack.WebhookMessage, error) { + attachment, err := n.createAttachment(notification) + if err != nil { + return nil, err + } + msg := &slack.WebhookMessage{ + Attachments: []slack.Attachment{*attachment}, + } + return msg, nil +} + +func (n *slackNotifier) createAttachment(notification *sender.Notification) (*slack.Attachment, error) { + switch notification.Type { + case sender.Notification_DomainEvent: + return n.createDomainEventAttachment(notification.DomainEventNotification) + case sender.Notification_FeatureStale: + return n.createFeatureStaleAttachment(notification.FeatureStaleNotification) + case sender.Notification_ExperimentRunning: + return n.createExperimentRunningAttachment(notification.ExperimentRunningNotification) + case sender.Notification_MauCount: + return n.createMAUCountAttachment(notification.MauCountNotification) + } + return nil, ErrUnknownNotification +} + +func (n *slackNotifier) createDomainEventAttachment( + notification *senderproto.DomainEventNotification, +) (*slack.Attachment, error) { + // handle loc if multi-lang is necessary + localizedMessage := domainevent.LocalizedMessage(notification.Type, locale.JaJP) + url, err := domainevent.URL( + notification.EntityType, + n.webURL, + notification.EnvironmentId, + notification.EntityId, + ) + if err != nil { + return nil, err + } + attachment := &slack.Attachment{ + Color: "#36a64f", + AuthorName: notification.Editor.Email, + Text: localizedMessage.Message + "\n\n" + + "Environment: " + notification.EnvironmentId + "\n" + + "Entity ID: " + notification.EntityId + "\n" + + "URL: " + url, + } + return attachment, nil +} + +func (n *slackNotifier) createFeatureStaleAttachment( + notification *senderproto.FeatureStaleNotification, +) (*slack.Attachment, error) { + featureListMsg := "" + for _, feature := range notification.Features { + url, err := domainevent.URL( + domainproto.Event_FEATURE, + n.webURL, + notification.EnvironmentId, + feature.Id, + ) + if err != nil { + return nil, err + } + newLine := "- ID: `" + feature.Id + "`, Name: *" + fmt.Sprintf(linkTemplate, url, feature.Name) + "*\n" + featureListMsg = featureListMsg + newLine + } + // handle loc if multi-lang is necessary + msg, err := localizedMessage(msgTypeFeatureStale, locale.JaJP) + if err != nil { + return nil, err + } + replacedMsg := fmt.Sprintf(msg.Message, featuredomain.SecondsToStale/24/60/60) + attachment := &slack.Attachment{ + Color: "#F4D03F", + MarkdownIn: []string{"text"}, + Text: replacedMsg + "\n\n" + + "Environment: " + notification.EnvironmentId + "\n\n" + + "Feature flags: \n\n" + + featureListMsg, + } + return attachment, nil +} + +func (n *slackNotifier) createExperimentRunningAttachment( + notification *senderproto.ExperimentRunningNotification, +) (*slack.Attachment, error) { + listMsg := "" + now := time.Now() + for _, e := range notification.Experiments { + url, err := domainevent.URL( + domainproto.Event_EXPERIMENT, + n.webURL, + notification.EnvironmentId, + e.Id, + ) + if err != nil { + return nil, err + } + nameLink := fmt.Sprintf(linkTemplate, url, e.Name) + newLine := fmt.Sprintf("- 残り `%d` 日, Name: *%s*\n", lastDays(now, time.Unix(e.StopAt, 0)), nameLink) + listMsg = listMsg + newLine + } + // handle loc if multi-lang is necessary + msg, err := localizedMessage(msgTypeExperimentResult, locale.JaJP) + if err != nil { + return nil, err + } + attachment := &slack.Attachment{ + Color: "#3498DB", + MarkdownIn: []string{"text"}, + Text: msg.Message + "\n\n" + + "Environment: " + notification.EnvironmentId + "\n\n" + + "Experiments: \n\n" + + listMsg, + } + return attachment, nil +} + +func (n *slackNotifier) createMAUCountAttachment( + notification *senderproto.MauCountNotification, +) (*slack.Attachment, error) { + msg, err := localizedMessage(msgTypeMAUCount, locale.JaJP) + if err != nil { + return nil, err + } + replacedMsg := fmt.Sprintf(msg.Message, notification.Month) + p := message.NewPrinter(language.English) + attachment := &slack.Attachment{ + Color: "#3498DB", + MarkdownIn: []string{"text"}, + Text: replacedMsg + "\n\n" + + "Environment: " + notification.EnvironmentId + "\n" + + p.Sprintf("Event count: %d", notification.EventCount) + "\n" + + p.Sprintf("User count: %d", notification.UserCount), + } + return attachment, nil +} + +func lastDays(now, stopAt time.Time) int { + return int(stopAt.Sub(now).Hours() / 24) +} + +func (n *slackNotifier) postWebhook(ctx context.Context, msg *slack.WebhookMessage, webhookURL string) error { + if err := slack.PostWebhook(webhookURL, msg); err != nil { + n.logger.Error("Failed to post a message", + zap.Error(err), + // Avoid logging a webhook URL which contains secret. + zap.String("slackRecipientId", notificationdomain.SlackChannelRecipientID(webhookURL)), + ) + return err + } + return nil +} diff --git a/pkg/notification/sender/notifier/slack_test.go b/pkg/notification/sender/notifier/slack_test.go new file mode 100644 index 000000000..54156a77c --- /dev/null +++ b/pkg/notification/sender/notifier/slack_test.go @@ -0,0 +1,53 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package notifier + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestLastDays(t *testing.T) { + patterns := map[string]struct { + inputNow time.Time + expected int + }{ + "now is after stopAt": { + inputNow: time.Date(2019, 12, 26, 00, 00, 00, 0, time.UTC), + expected: 0, + }, + "now equals to stopAt": { + inputNow: time.Date(2019, 12, 25, 23, 59, 59, 0, time.UTC), + expected: 0, + }, + "0": { + inputNow: time.Date(2019, 12, 25, 23, 00, 00, 0, time.UTC), + expected: 0, + }, + "1": { + inputNow: time.Date(2019, 12, 24, 00, 00, 00, 0, time.UTC), + expected: 1, + }, + } + stopAt := time.Date(2019, 12, 25, 23, 59, 59, 0, time.UTC) + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + actual := lastDays(p.inputNow, stopAt) + assert.Equal(t, p.expected, actual) + }) + } +} diff --git a/pkg/notification/sender/sender.go b/pkg/notification/sender/sender.go new file mode 100644 index 000000000..349b7ca57 --- /dev/null +++ b/pkg/notification/sender/sender.go @@ -0,0 +1,197 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package sender + +import ( + "context" + + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" + notificationclient "github.com/bucketeer-io/bucketeer/pkg/notification/client" + "github.com/bucketeer-io/bucketeer/pkg/notification/sender/notifier" + notificationproto "github.com/bucketeer-io/bucketeer/proto/notification" + senderproto "github.com/bucketeer-io/bucketeer/proto/notification/sender" +) + +type options struct { + metrics metrics.Registerer + logger *zap.Logger +} + +const ( + listRequestSize = 500 +) + +var ( + defaultOptions = options{ + logger: zap.NewNop(), + } +) + +type Option func(*options) + +func WithMetrics(r metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = r + } +} + +func WithLogger(logger *zap.Logger) Option { + return func(opts *options) { + opts.logger = logger + } +} + +type Sender interface { + Send(context.Context, *senderproto.NotificationEvent) error +} + +type sender struct { + notificationClient notificationclient.Client + notifiers []notifier.Notifier + opts *options + logger *zap.Logger +} + +func NewSender( + notificationClient notificationclient.Client, + notifiers []notifier.Notifier, + opts ...Option) Sender { + + options := defaultOptions + for _, opt := range opts { + opt(&options) + } + if options.metrics != nil { + registerMetrics(options.metrics) + } + return &sender{ + notificationClient: notificationClient, + notifiers: notifiers, + opts: &options, + logger: options.logger.Named("sender"), + } +} + +func (s *sender) Send(ctx context.Context, notificationEvent *senderproto.NotificationEvent) error { + receivedCounter.Inc() + subscriptions := []*notificationproto.Subscription{} + if notificationEvent.IsAdminEvent { + adminSubs, err := s.listEnabledAdminSubscriptions(ctx, notificationEvent.SourceType) + if err != nil { + handledCounter.WithLabelValues(codeFail).Inc() + return err + } + subscriptions = append(subscriptions, adminSubs...) + } else { + subs, err := s.listEnabledSubscriptions( + ctx, + notificationEvent.EnvironmentNamespace, + notificationEvent.SourceType, + ) + if err != nil { + handledCounter.WithLabelValues(codeFail).Inc() + return err + } + subscriptions = append(subscriptions, subs...) + } + var lastErr error + for _, subscription := range subscriptions { + if err := s.send(ctx, notificationEvent.Notification, subscription.Recipient); err != nil { + s.logger.Error("Failed to send notification", zap.Error(err), + zap.String("environmentNamespace", notificationEvent.EnvironmentNamespace), + ) + lastErr = err + continue + } + s.logger.Info("Succeeded to send notification", + zap.String("environmentNamespace", notificationEvent.EnvironmentNamespace), + ) + } + if lastErr != nil { + handledCounter.WithLabelValues(codeFail).Inc() + return lastErr + } + handledCounter.WithLabelValues(codeSuccess).Inc() + return nil +} + +func (s *sender) send( + ctx context.Context, + notification *senderproto.Notification, + recipient *notificationproto.Recipient, +) error { + for _, notifier := range s.notifiers { + if err := notifier.Notify(ctx, notification, recipient); err != nil { + return err + } + } + return nil +} + +func (s *sender) listEnabledSubscriptions( + ctx context.Context, + environmentNamespace string, + sourceType notificationproto.Subscription_SourceType) ([]*notificationproto.Subscription, error) { + + subscriptions := []*notificationproto.Subscription{} + cursor := "" + for { + resp, err := s.notificationClient.ListEnabledSubscriptions(ctx, ¬ificationproto.ListEnabledSubscriptionsRequest{ + EnvironmentNamespace: environmentNamespace, + SourceTypes: []notificationproto.Subscription_SourceType{sourceType}, + PageSize: listRequestSize, + Cursor: cursor, + }) + if err != nil { + return nil, err + } + subscriptions = append(subscriptions, resp.Subscriptions...) + size := len(resp.Subscriptions) + if size == 0 || size < listRequestSize { + return subscriptions, nil + } + cursor = resp.Cursor + } +} + +func (s *sender) listEnabledAdminSubscriptions( + ctx context.Context, + sourceType notificationproto.Subscription_SourceType) ([]*notificationproto.Subscription, error) { + + subscriptions := []*notificationproto.Subscription{} + cursor := "" + for { + resp, err := s.notificationClient.ListEnabledAdminSubscriptions( + ctx, + ¬ificationproto.ListEnabledAdminSubscriptionsRequest{ + SourceTypes: []notificationproto.Subscription_SourceType{sourceType}, + PageSize: listRequestSize, + Cursor: cursor, + }, + ) + if err != nil { + return nil, err + } + subscriptions = append(subscriptions, resp.Subscriptions...) + size := len(resp.Subscriptions) + if size == 0 || size < listRequestSize { + return subscriptions, nil + } + cursor = resp.Cursor + } +} diff --git a/pkg/notification/sender/sender_test.go b/pkg/notification/sender/sender_test.go new file mode 100644 index 000000000..65e1621f2 --- /dev/null +++ b/pkg/notification/sender/sender_test.go @@ -0,0 +1,403 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sender + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/golang/mock/gomock" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/bucketeer-io/bucketeer/pkg/log" + ncmock "github.com/bucketeer-io/bucketeer/pkg/notification/client/mock" + "github.com/bucketeer-io/bucketeer/pkg/notification/sender/notifier" + nmock "github.com/bucketeer-io/bucketeer/pkg/notification/sender/notifier/mock" + "github.com/bucketeer-io/bucketeer/pkg/storage" + notificationproto "github.com/bucketeer-io/bucketeer/proto/notification" + senderproto "github.com/bucketeer-io/bucketeer/proto/notification/sender" +) + +func TestHandle(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(t *testing.T, s *sender) + input *senderproto.NotificationEvent + expected error + }{ + "error: list subscriptions": { + setup: func(t *testing.T, s *sender) { + s.notificationClient.(*ncmock.MockClient).EXPECT().ListEnabledSubscriptions(gomock.Any(), gomock.Any()).Return( + nil, errors.New("test")) + }, + input: &senderproto.NotificationEvent{ + Id: "id", + EnvironmentNamespace: "ns0", + SourceType: notificationproto.Subscription_DOMAIN_EVENT_ACCOUNT, + Notification: &senderproto.Notification{ + Type: senderproto.Notification_DomainEvent, + DomainEventNotification: &senderproto.DomainEventNotification{}, + }, + IsAdminEvent: false, + }, + expected: errors.New("test"), + }, + "success: 0 subscription": { + setup: func(t *testing.T, s *sender) { + s.notificationClient.(*ncmock.MockClient).EXPECT().ListEnabledSubscriptions(gomock.Any(), gomock.Any()).Return( + ¬ificationproto.ListEnabledSubscriptionsResponse{}, nil) + }, + input: &senderproto.NotificationEvent{ + Id: "id", + EnvironmentNamespace: "ns0", + SourceType: notificationproto.Subscription_DOMAIN_EVENT_ACCOUNT, + Notification: &senderproto.Notification{ + Type: senderproto.Notification_DomainEvent, + DomainEventNotification: &senderproto.DomainEventNotification{}, + }, + IsAdminEvent: false, + }, + expected: nil, + }, + "success: 0 admin subscription": { + setup: func(t *testing.T, s *sender) { + s.notificationClient.(*ncmock.MockClient).EXPECT().ListEnabledAdminSubscriptions(gomock.Any(), gomock.Any()).Return( + ¬ificationproto.ListEnabledAdminSubscriptionsResponse{}, nil) + }, + input: &senderproto.NotificationEvent{ + Id: "id", + EnvironmentNamespace: storage.AdminEnvironmentNamespace, + SourceType: notificationproto.Subscription_DOMAIN_EVENT_ENVIRONMENT, + Notification: &senderproto.Notification{ + Type: senderproto.Notification_DomainEvent, + DomainEventNotification: &senderproto.DomainEventNotification{}, + }, + IsAdminEvent: true, + }, + expected: nil, + }, + "error: notify": { + setup: func(t *testing.T, s *sender) { + s.notificationClient.(*ncmock.MockClient).EXPECT().ListEnabledSubscriptions(gomock.Any(), gomock.Any()).Return( + ¬ificationproto.ListEnabledSubscriptionsResponse{Subscriptions: []*notificationproto.Subscription{ + {Id: "sid0"}, + }}, nil) + s.notifiers[0].(*nmock.MockNotifier).EXPECT().Notify(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("test")) + }, + input: &senderproto.NotificationEvent{ + Id: "id", + EnvironmentNamespace: "ns0", + SourceType: notificationproto.Subscription_DOMAIN_EVENT_ACCOUNT, + Notification: &senderproto.Notification{ + Type: senderproto.Notification_DomainEvent, + DomainEventNotification: &senderproto.DomainEventNotification{}, + }, + IsAdminEvent: false, + }, + expected: errors.New("test"), + }, + "success: 1 subscription": { + setup: func(t *testing.T, s *sender) { + s.notificationClient.(*ncmock.MockClient).EXPECT().ListEnabledSubscriptions(gomock.Any(), gomock.Any()).Return( + ¬ificationproto.ListEnabledSubscriptionsResponse{Subscriptions: []*notificationproto.Subscription{ + {Id: "sid0"}, + }}, nil) + s.notifiers[0].(*nmock.MockNotifier).EXPECT().Notify(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + }, + input: &senderproto.NotificationEvent{ + Id: "id", + EnvironmentNamespace: "ns0", + SourceType: notificationproto.Subscription_DOMAIN_EVENT_ACCOUNT, + Notification: &senderproto.Notification{ + Type: senderproto.Notification_DomainEvent, + DomainEventNotification: &senderproto.DomainEventNotification{}, + }, + IsAdminEvent: false, + }, + expected: nil, + }, + "success: 2 subscription": { + setup: func(t *testing.T, s *sender) { + s.notificationClient.(*ncmock.MockClient).EXPECT().ListEnabledSubscriptions(gomock.Any(), gomock.Any()).Return( + ¬ificationproto.ListEnabledSubscriptionsResponse{Subscriptions: []*notificationproto.Subscription{ + {Id: "sid0"}, {Id: "sid1"}, + }}, nil) + s.notifiers[0].(*nmock.MockNotifier).EXPECT().Notify(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(2) + }, + input: &senderproto.NotificationEvent{ + Id: "id", + EnvironmentNamespace: "ns0", + SourceType: notificationproto.Subscription_DOMAIN_EVENT_ACCOUNT, + Notification: &senderproto.Notification{ + Type: senderproto.Notification_DomainEvent, + DomainEventNotification: &senderproto.DomainEventNotification{}, + }, + IsAdminEvent: false, + }, + expected: nil, + }, + "success: 1 admin subscription": { + setup: func(t *testing.T, s *sender) { + s.notificationClient.(*ncmock.MockClient).EXPECT().ListEnabledAdminSubscriptions(gomock.Any(), gomock.Any()).Return( + ¬ificationproto.ListEnabledAdminSubscriptionsResponse{Subscriptions: []*notificationproto.Subscription{ + {Id: "sid0"}, + }}, nil) + s.notifiers[0].(*nmock.MockNotifier).EXPECT().Notify(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + }, + input: &senderproto.NotificationEvent{ + Id: "id", + EnvironmentNamespace: storage.AdminEnvironmentNamespace, + SourceType: notificationproto.Subscription_DOMAIN_EVENT_PROJECT, + Notification: &senderproto.Notification{ + Type: senderproto.Notification_DomainEvent, + DomainEventNotification: &senderproto.DomainEventNotification{}, + }, + IsAdminEvent: true, + }, + expected: nil, + }, + "success: 2 admin subscription": { + setup: func(t *testing.T, s *sender) { + s.notificationClient.(*ncmock.MockClient).EXPECT().ListEnabledAdminSubscriptions(gomock.Any(), gomock.Any()).Return( + ¬ificationproto.ListEnabledAdminSubscriptionsResponse{Subscriptions: []*notificationproto.Subscription{ + {Id: "sid0"}, {Id: "sid1"}, + }}, nil) + s.notifiers[0].(*nmock.MockNotifier).EXPECT().Notify(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(2) + }, + input: &senderproto.NotificationEvent{ + Id: "id", + EnvironmentNamespace: storage.AdminEnvironmentNamespace, + SourceType: notificationproto.Subscription_DOMAIN_EVENT_PROJECT, + Notification: &senderproto.Notification{ + Type: senderproto.Notification_DomainEvent, + DomainEventNotification: &senderproto.DomainEventNotification{}, + }, + IsAdminEvent: true, + }, + expected: nil, + }, + } + + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + sender := createSender(t, mockController) + if p.setup != nil { + p.setup(t, sender) + } + err := sender.Send(context.Background(), p.input) + assert.Equal(t, p.expected, err) + }) + } +} + +func TestListEnabledSubscriptions(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(t *testing.T, s *sender) + input notificationproto.Subscription_SourceType + expected []*notificationproto.Subscription + expectedErr error + }{ + "error": { + setup: func(t *testing.T, s *sender) { + s.notificationClient.(*ncmock.MockClient).EXPECT().ListEnabledSubscriptions(gomock.Any(), ¬ificationproto.ListEnabledSubscriptionsRequest{ + EnvironmentNamespace: "ns0", + SourceTypes: []notificationproto.Subscription_SourceType{notificationproto.Subscription_DOMAIN_EVENT_ACCOUNT}, + PageSize: listRequestSize, + Cursor: "", + }).Return(nil, errors.New("test")) + }, + input: notificationproto.Subscription_DOMAIN_EVENT_ACCOUNT, + expected: nil, + expectedErr: errors.New("test"), + }, + "success: 0 entity": { + setup: func(t *testing.T, s *sender) { + s.notificationClient.(*ncmock.MockClient).EXPECT().ListEnabledSubscriptions(gomock.Any(), ¬ificationproto.ListEnabledSubscriptionsRequest{ + EnvironmentNamespace: "ns0", + SourceTypes: []notificationproto.Subscription_SourceType{notificationproto.Subscription_DOMAIN_EVENT_ACCOUNT}, + PageSize: listRequestSize, + Cursor: "", + }).Return( + ¬ificationproto.ListEnabledSubscriptionsResponse{Subscriptions: []*notificationproto.Subscription{}}, nil) + }, + input: notificationproto.Subscription_DOMAIN_EVENT_ACCOUNT, + expected: []*notificationproto.Subscription{}, + expectedErr: nil, + }, + "success: 1 entity": { + setup: func(t *testing.T, s *sender) { + s.notificationClient.(*ncmock.MockClient).EXPECT().ListEnabledSubscriptions(gomock.Any(), ¬ificationproto.ListEnabledSubscriptionsRequest{ + EnvironmentNamespace: "ns0", + SourceTypes: []notificationproto.Subscription_SourceType{notificationproto.Subscription_DOMAIN_EVENT_ACCOUNT}, + PageSize: listRequestSize, + Cursor: "", + }).Return( + ¬ificationproto.ListEnabledSubscriptionsResponse{Subscriptions: []*notificationproto.Subscription{ + {Id: "sid0"}, + }}, nil) + }, + input: notificationproto.Subscription_DOMAIN_EVENT_ACCOUNT, + expected: []*notificationproto.Subscription{{Id: "sid0"}}, + expectedErr: nil, + }, + "success: listRequestSize + 1 entity": { + setup: func(t *testing.T, s *sender) { + subs := createSubscriptions(t, listRequestSize+1) + s.notificationClient.(*ncmock.MockClient).EXPECT().ListEnabledSubscriptions(gomock.Any(), ¬ificationproto.ListEnabledSubscriptionsRequest{ + EnvironmentNamespace: "ns0", + SourceTypes: []notificationproto.Subscription_SourceType{notificationproto.Subscription_DOMAIN_EVENT_ACCOUNT}, + PageSize: listRequestSize, + Cursor: "", + }).Return(¬ificationproto.ListEnabledSubscriptionsResponse{Subscriptions: subs[:listRequestSize]}, nil) + s.notificationClient.(*ncmock.MockClient).EXPECT().ListEnabledSubscriptions(gomock.Any(), ¬ificationproto.ListEnabledSubscriptionsRequest{ + EnvironmentNamespace: "ns0", + SourceTypes: []notificationproto.Subscription_SourceType{notificationproto.Subscription_DOMAIN_EVENT_ACCOUNT}, + PageSize: listRequestSize, + Cursor: "", + }).Return(¬ificationproto.ListEnabledSubscriptionsResponse{Subscriptions: subs[listRequestSize : listRequestSize+1]}, nil) + }, + input: notificationproto.Subscription_DOMAIN_EVENT_ACCOUNT, + expected: createSubscriptions(t, listRequestSize+1), + expectedErr: nil, + }, + } + + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + sender := createSender(t, mockController) + if p.setup != nil { + p.setup(t, sender) + } + actual, err := sender.listEnabledSubscriptions(context.Background(), "ns0", p.input) + assert.Equal(t, p.expected, actual) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestListEnabledAdminSubscriptions(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(t *testing.T, s *sender) + input notificationproto.Subscription_SourceType + expected []*notificationproto.Subscription + expectedErr error + }{ + "error": { + setup: func(t *testing.T, s *sender) { + s.notificationClient.(*ncmock.MockClient).EXPECT().ListEnabledAdminSubscriptions(gomock.Any(), ¬ificationproto.ListEnabledAdminSubscriptionsRequest{ + SourceTypes: []notificationproto.Subscription_SourceType{notificationproto.Subscription_DOMAIN_EVENT_ACCOUNT}, + PageSize: listRequestSize, + Cursor: "", + }).Return(nil, errors.New("test")) + }, + input: notificationproto.Subscription_DOMAIN_EVENT_ACCOUNT, + expected: nil, + expectedErr: errors.New("test"), + }, + "success: 0 entity": { + setup: func(t *testing.T, s *sender) { + s.notificationClient.(*ncmock.MockClient).EXPECT().ListEnabledAdminSubscriptions(gomock.Any(), ¬ificationproto.ListEnabledAdminSubscriptionsRequest{ + SourceTypes: []notificationproto.Subscription_SourceType{notificationproto.Subscription_DOMAIN_EVENT_ACCOUNT}, + PageSize: listRequestSize, + Cursor: "", + }).Return( + ¬ificationproto.ListEnabledAdminSubscriptionsResponse{Subscriptions: []*notificationproto.Subscription{}}, nil) + }, + input: notificationproto.Subscription_DOMAIN_EVENT_ACCOUNT, + expected: []*notificationproto.Subscription{}, + expectedErr: nil, + }, + "success: 1 entity": { + setup: func(t *testing.T, s *sender) { + s.notificationClient.(*ncmock.MockClient).EXPECT().ListEnabledAdminSubscriptions(gomock.Any(), ¬ificationproto.ListEnabledAdminSubscriptionsRequest{ + SourceTypes: []notificationproto.Subscription_SourceType{notificationproto.Subscription_DOMAIN_EVENT_ACCOUNT}, + PageSize: listRequestSize, + Cursor: "", + }).Return( + ¬ificationproto.ListEnabledAdminSubscriptionsResponse{Subscriptions: []*notificationproto.Subscription{ + {Id: "sid0"}, + }}, nil) + }, + input: notificationproto.Subscription_DOMAIN_EVENT_ACCOUNT, + expected: []*notificationproto.Subscription{{Id: "sid0"}}, + expectedErr: nil, + }, + "success: listRequestSize + 1 entity": { + setup: func(t *testing.T, s *sender) { + subs := createSubscriptions(t, listRequestSize+1) + s.notificationClient.(*ncmock.MockClient).EXPECT().ListEnabledAdminSubscriptions(gomock.Any(), ¬ificationproto.ListEnabledAdminSubscriptionsRequest{ + SourceTypes: []notificationproto.Subscription_SourceType{notificationproto.Subscription_DOMAIN_EVENT_ACCOUNT}, + PageSize: listRequestSize, + Cursor: "", + }).Return(¬ificationproto.ListEnabledAdminSubscriptionsResponse{Subscriptions: subs[:listRequestSize]}, nil) + s.notificationClient.(*ncmock.MockClient).EXPECT().ListEnabledAdminSubscriptions(gomock.Any(), ¬ificationproto.ListEnabledAdminSubscriptionsRequest{ + SourceTypes: []notificationproto.Subscription_SourceType{notificationproto.Subscription_DOMAIN_EVENT_ACCOUNT}, + PageSize: listRequestSize, + Cursor: "", + }).Return(¬ificationproto.ListEnabledAdminSubscriptionsResponse{Subscriptions: subs[listRequestSize : listRequestSize+1]}, nil) + }, + input: notificationproto.Subscription_DOMAIN_EVENT_ACCOUNT, + expected: createSubscriptions(t, listRequestSize+1), + expectedErr: nil, + }, + } + + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + sender := createSender(t, mockController) + if p.setup != nil { + p.setup(t, sender) + } + actual, err := sender.listEnabledAdminSubscriptions(context.Background(), p.input) + assert.Equal(t, p.expected, actual) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func createSubscriptions(t *testing.T, size int) []*notificationproto.Subscription { + subscriptions := []*notificationproto.Subscription{} + for i := 0; i < size; i++ { + subscriptions = append(subscriptions, ¬ificationproto.Subscription{Id: fmt.Sprintf("sid%d", i)}) + } + return subscriptions +} + +func createSender(t *testing.T, c *gomock.Controller) *sender { + ncMock := ncmock.NewMockClient(c) + nMock := nmock.NewMockNotifier(c) + logger, err := log.NewLogger() + require.NoError(t, err) + return &sender{ + notificationClient: ncMock, + notifiers: []notifier.Notifier{nMock}, + logger: logger, + } +} diff --git a/pkg/notification/storage/v2/BUILD.bazel b/pkg/notification/storage/v2/BUILD.bazel new file mode 100644 index 000000000..1e4193e19 --- /dev/null +++ b/pkg/notification/storage/v2/BUILD.bazel @@ -0,0 +1,33 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "admin_subscription.go", + "subscription.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/notification/storage/v2", + visibility = ["//visibility:public"], + deps = [ + "//pkg/notification/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/notification:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "admin_subscription_test.go", + "subscription_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/notification/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/storage/v2/mysql/mock:go_default_library", + "//proto/notification:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + ], +) diff --git a/pkg/notification/storage/v2/admin_subscription.go b/pkg/notification/storage/v2/admin_subscription.go new file mode 100644 index 000000000..dc6776d79 --- /dev/null +++ b/pkg/notification/storage/v2/admin_subscription.go @@ -0,0 +1,253 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v2 + +import ( + "context" + "errors" + "fmt" + + "github.com/bucketeer-io/bucketeer/pkg/notification/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + proto "github.com/bucketeer-io/bucketeer/proto/notification" +) + +var ( + ErrAdminSubscriptionAlreadyExists = errors.New("subscription: admin subscription already exists") + ErrAdminSubscriptionNotFound = errors.New("subscription: admin subscription not found") + ErrAdminSubscriptionUnexpectedAffectedRows = errors.New("subscription: admin subscription unexpected affected rows") +) + +type AdminSubscriptionStorage interface { + CreateAdminSubscription(ctx context.Context, e *domain.Subscription) error + UpdateAdminSubscription(ctx context.Context, e *domain.Subscription) error + DeleteAdminSubscription(ctx context.Context, id string) error + GetAdminSubscription(ctx context.Context, id string) (*domain.Subscription, error) + ListAdminSubscriptions( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, + ) ([]*proto.Subscription, int, int64, error) +} + +type adminSubscriptionStorage struct { + qe mysql.QueryExecer +} + +func NewAdminSubscriptionStorage(qe mysql.QueryExecer) AdminSubscriptionStorage { + return &adminSubscriptionStorage{qe} +} + +func (s *adminSubscriptionStorage) CreateAdminSubscription(ctx context.Context, e *domain.Subscription) error { + query := ` + INSERT INTO admin_subscription ( + id, + created_at, + updated_at, + disabled, + source_types, + recipient, + name + ) VALUES ( + ?, ?, ?, ?, ?, ?, ? + ) + ` + _, err := s.qe.ExecContext( + ctx, + query, + e.Id, + e.CreatedAt, + e.UpdatedAt, + e.Disabled, + mysql.JSONObject{Val: e.SourceTypes}, + mysql.JSONObject{Val: e.Recipient}, + e.Name, + ) + if err != nil { + if err == mysql.ErrDuplicateEntry { + return ErrAdminSubscriptionAlreadyExists + } + return err + } + return nil +} + +func (s *adminSubscriptionStorage) UpdateAdminSubscription(ctx context.Context, e *domain.Subscription) error { + query := ` + UPDATE + admin_subscription + SET + created_at = ?, + updated_at = ?, + disabled = ?, + source_types = ?, + recipient = ?, + name = ? + WHERE + id = ? + ` + result, err := s.qe.ExecContext( + ctx, + query, + e.CreatedAt, + e.UpdatedAt, + e.Disabled, + mysql.JSONObject{Val: e.SourceTypes}, + mysql.JSONObject{Val: e.Recipient}, + e.Name, + e.Id, + ) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + if rowsAffected != 1 { + return ErrAdminSubscriptionUnexpectedAffectedRows + } + return nil +} + +func (s *adminSubscriptionStorage) DeleteAdminSubscription(ctx context.Context, id string) error { + query := ` + DELETE FROM + admin_subscription + WHERE + id = ? + ` + result, err := s.qe.ExecContext( + ctx, + query, + id, + ) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + if rowsAffected != 1 { + return ErrAdminSubscriptionUnexpectedAffectedRows + } + return nil +} + +func (s *adminSubscriptionStorage) GetAdminSubscription(ctx context.Context, id string) (*domain.Subscription, error) { + subscription := proto.Subscription{} + query := ` + SELECT + id, + created_at, + updated_at, + disabled, + source_types, + recipient, + name + FROM + admin_subscription + WHERE + id = ? + ` + err := s.qe.QueryRowContext( + ctx, + query, + id, + ).Scan( + &subscription.Id, + &subscription.CreatedAt, + &subscription.UpdatedAt, + &subscription.Disabled, + &mysql.JSONObject{Val: &subscription.SourceTypes}, + &mysql.JSONObject{Val: &subscription.Recipient}, + &subscription.Name, + ) + if err != nil { + if err == mysql.ErrNoRows { + return nil, ErrAdminSubscriptionNotFound + } + return nil, err + } + return &domain.Subscription{Subscription: &subscription}, nil +} + +func (s *adminSubscriptionStorage) ListAdminSubscriptions( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, +) ([]*proto.Subscription, int, int64, error) { + whereSQL, whereArgs := mysql.ConstructWhereSQLString(whereParts) + orderBySQL := mysql.ConstructOrderBySQLString(orders) + limitOffsetSQL := mysql.ConstructLimitOffsetSQLString(limit, offset) + query := fmt.Sprintf(` + SELECT + id, + created_at, + updated_at, + disabled, + source_types, + recipient, + name + FROM + admin_subscription + %s %s %s + `, whereSQL, orderBySQL, limitOffsetSQL, + ) + rows, err := s.qe.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, 0, 0, err + } + defer rows.Close() + subscriptions := make([]*proto.Subscription, 0, limit) + for rows.Next() { + subscription := proto.Subscription{} + err := rows.Scan( + &subscription.Id, + &subscription.CreatedAt, + &subscription.UpdatedAt, + &subscription.Disabled, + &mysql.JSONObject{Val: &subscription.SourceTypes}, + &mysql.JSONObject{Val: &subscription.Recipient}, + &subscription.Name, + ) + if err != nil { + return nil, 0, 0, err + } + subscriptions = append(subscriptions, &subscription) + } + if rows.Err() != nil { + return nil, 0, 0, err + } + nextOffset := offset + len(subscriptions) + var totalCount int64 + countQuery := fmt.Sprintf(` + SELECT + COUNT(1) + FROM + admin_subscription + %s %s + `, whereSQL, orderBySQL, + ) + err = s.qe.QueryRowContext(ctx, countQuery, whereArgs...).Scan(&totalCount) + if err != nil { + return nil, 0, 0, err + } + return subscriptions, nextOffset, totalCount, nil +} diff --git a/pkg/notification/storage/v2/admin_subscription_test.go b/pkg/notification/storage/v2/admin_subscription_test.go new file mode 100644 index 000000000..337b12c51 --- /dev/null +++ b/pkg/notification/storage/v2/admin_subscription_test.go @@ -0,0 +1,344 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "context" + "errors" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/notification/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + proto "github.com/bucketeer-io/bucketeer/proto/notification" +) + +func TestNewAdminSubscriptionStorage(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + storage := NewAdminSubscriptionStorage(mock.NewMockQueryExecer(mockController)) + assert.IsType(t, &adminSubscriptionStorage{}, storage) +} + +func TestCreateAdminSubscription(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*adminSubscriptionStorage) + input *domain.Subscription + expectedErr error + }{ + "ErrAdminSubscriptionAlreadyExists": { + setup: func(s *adminSubscriptionStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, mysql.ErrDuplicateEntry) + }, + input: &domain.Subscription{ + Subscription: &proto.Subscription{Id: "id-0"}, + }, + expectedErr: ErrAdminSubscriptionAlreadyExists, + }, + "Error": { + setup: func(s *adminSubscriptionStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + + }, + input: &domain.Subscription{ + Subscription: &proto.Subscription{Id: "id-0"}, + }, + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *adminSubscriptionStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, nil) + }, + input: &domain.Subscription{ + Subscription: &proto.Subscription{Id: "id-0"}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newAdminSubscriptionStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + err := storage.CreateAdminSubscription(context.Background(), p.input) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestUpdateAdminSubscription(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*adminSubscriptionStorage) + input *domain.Subscription + expectedErr error + }{ + "ErrAdminSubscriptionUnexpectedAffectedRows": { + setup: func(s *adminSubscriptionStorage) { + result := mock.NewMockResult(mockController) + result.EXPECT().RowsAffected().Return(int64(0), nil) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + input: &domain.Subscription{ + Subscription: &proto.Subscription{Id: "id-0"}, + }, + expectedErr: ErrAdminSubscriptionUnexpectedAffectedRows, + }, + "Error": { + setup: func(s *adminSubscriptionStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + + }, + input: &domain.Subscription{ + Subscription: &proto.Subscription{Id: "id-0"}, + }, + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *adminSubscriptionStorage) { + result := mock.NewMockResult(mockController) + result.EXPECT().RowsAffected().Return(int64(1), nil) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + input: &domain.Subscription{ + Subscription: &proto.Subscription{Id: "id-0"}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newAdminSubscriptionStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + err := storage.UpdateAdminSubscription(context.Background(), p.input) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestDeleteAdminSubscription(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*adminSubscriptionStorage) + id string + expectedErr error + }{ + "ErrAdminSubscriptionUnexpectedAffectedRows": { + setup: func(s *adminSubscriptionStorage) { + result := mock.NewMockResult(mockController) + result.EXPECT().RowsAffected().Return(int64(0), nil) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + id: "id-0", + expectedErr: ErrAdminSubscriptionUnexpectedAffectedRows, + }, + "Error": { + setup: func(s *adminSubscriptionStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + + }, + id: "id-0", + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *adminSubscriptionStorage) { + result := mock.NewMockResult(mockController) + result.EXPECT().RowsAffected().Return(int64(1), nil) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + id: "id-0", + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newAdminSubscriptionStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + err := storage.DeleteAdminSubscription(context.Background(), p.id) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestGetAdminSubscription(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*adminSubscriptionStorage) + id string + expectedErr error + }{ + "ErrAdminSubscriptionNotFound": { + setup: func(s *adminSubscriptionStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id-0", + expectedErr: ErrAdminSubscriptionNotFound, + }, + "Error": { + setup: func(s *adminSubscriptionStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(errors.New("error")) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + + }, + id: "id-0", + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *adminSubscriptionStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id-0", + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newAdminSubscriptionStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + _, err := storage.GetAdminSubscription(context.Background(), p.id) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestListAdminSubscriptions(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*adminSubscriptionStorage) + whereParts []mysql.WherePart + orders []*mysql.Order + limit int + offset int + expected []*proto.Subscription + expectedCursor int + expectedErr error + }{ + "Error": { + setup: func(s *adminSubscriptionStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + whereParts: nil, + orders: nil, + limit: 0, + offset: 0, + expected: nil, + expectedCursor: 0, + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *adminSubscriptionStorage) { + rows := mock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + whereParts: []mysql.WherePart{ + mysql.NewFilter("num", ">=", 5), + }, + orders: []*mysql.Order{ + mysql.NewOrder("id", mysql.OrderDirectionAsc), + }, + limit: 10, + offset: 5, + expected: []*proto.Subscription{}, + expectedCursor: 5, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newAdminSubscriptionStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + subscriptions, cursor, _, err := storage.ListAdminSubscriptions( + context.Background(), + p.whereParts, + p.orders, + p.limit, + p.offset, + ) + assert.Equal(t, p.expected, subscriptions) + assert.Equal(t, p.expectedCursor, cursor) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func newAdminSubscriptionStorageWithMock(t *testing.T, mockController *gomock.Controller) *adminSubscriptionStorage { + t.Helper() + return &adminSubscriptionStorage{mock.NewMockQueryExecer(mockController)} +} diff --git a/pkg/notification/storage/v2/mock/BUILD.bazel b/pkg/notification/storage/v2/mock/BUILD.bazel new file mode 100644 index 000000000..2c6231763 --- /dev/null +++ b/pkg/notification/storage/v2/mock/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "admin_subscription.go", + "subscription.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/notification/storage/v2/mock", + visibility = ["//visibility:public"], + deps = [ + "//pkg/notification/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/notification:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + ], +) diff --git a/pkg/notification/storage/v2/mock/admin_subscription.go b/pkg/notification/storage/v2/mock/admin_subscription.go new file mode 100644 index 000000000..ab2c2fa7c --- /dev/null +++ b/pkg/notification/storage/v2/mock/admin_subscription.go @@ -0,0 +1,113 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: admin_subscription.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + domain "github.com/bucketeer-io/bucketeer/pkg/notification/domain" + mysql "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + notification "github.com/bucketeer-io/bucketeer/proto/notification" +) + +// MockAdminSubscriptionStorage is a mock of AdminSubscriptionStorage interface. +type MockAdminSubscriptionStorage struct { + ctrl *gomock.Controller + recorder *MockAdminSubscriptionStorageMockRecorder +} + +// MockAdminSubscriptionStorageMockRecorder is the mock recorder for MockAdminSubscriptionStorage. +type MockAdminSubscriptionStorageMockRecorder struct { + mock *MockAdminSubscriptionStorage +} + +// NewMockAdminSubscriptionStorage creates a new mock instance. +func NewMockAdminSubscriptionStorage(ctrl *gomock.Controller) *MockAdminSubscriptionStorage { + mock := &MockAdminSubscriptionStorage{ctrl: ctrl} + mock.recorder = &MockAdminSubscriptionStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockAdminSubscriptionStorage) EXPECT() *MockAdminSubscriptionStorageMockRecorder { + return m.recorder +} + +// CreateAdminSubscription mocks base method. +func (m *MockAdminSubscriptionStorage) CreateAdminSubscription(ctx context.Context, e *domain.Subscription) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateAdminSubscription", ctx, e) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateAdminSubscription indicates an expected call of CreateAdminSubscription. +func (mr *MockAdminSubscriptionStorageMockRecorder) CreateAdminSubscription(ctx, e interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAdminSubscription", reflect.TypeOf((*MockAdminSubscriptionStorage)(nil).CreateAdminSubscription), ctx, e) +} + +// DeleteAdminSubscription mocks base method. +func (m *MockAdminSubscriptionStorage) DeleteAdminSubscription(ctx context.Context, id string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteAdminSubscription", ctx, id) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteAdminSubscription indicates an expected call of DeleteAdminSubscription. +func (mr *MockAdminSubscriptionStorageMockRecorder) DeleteAdminSubscription(ctx, id interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAdminSubscription", reflect.TypeOf((*MockAdminSubscriptionStorage)(nil).DeleteAdminSubscription), ctx, id) +} + +// GetAdminSubscription mocks base method. +func (m *MockAdminSubscriptionStorage) GetAdminSubscription(ctx context.Context, id string) (*domain.Subscription, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAdminSubscription", ctx, id) + ret0, _ := ret[0].(*domain.Subscription) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAdminSubscription indicates an expected call of GetAdminSubscription. +func (mr *MockAdminSubscriptionStorageMockRecorder) GetAdminSubscription(ctx, id interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAdminSubscription", reflect.TypeOf((*MockAdminSubscriptionStorage)(nil).GetAdminSubscription), ctx, id) +} + +// ListAdminSubscriptions mocks base method. +func (m *MockAdminSubscriptionStorage) ListAdminSubscriptions(ctx context.Context, whereParts []mysql.WherePart, orders []*mysql.Order, limit, offset int) ([]*notification.Subscription, int, int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListAdminSubscriptions", ctx, whereParts, orders, limit, offset) + ret0, _ := ret[0].([]*notification.Subscription) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(int64) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 +} + +// ListAdminSubscriptions indicates an expected call of ListAdminSubscriptions. +func (mr *MockAdminSubscriptionStorageMockRecorder) ListAdminSubscriptions(ctx, whereParts, orders, limit, offset interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAdminSubscriptions", reflect.TypeOf((*MockAdminSubscriptionStorage)(nil).ListAdminSubscriptions), ctx, whereParts, orders, limit, offset) +} + +// UpdateAdminSubscription mocks base method. +func (m *MockAdminSubscriptionStorage) UpdateAdminSubscription(ctx context.Context, e *domain.Subscription) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateAdminSubscription", ctx, e) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateAdminSubscription indicates an expected call of UpdateAdminSubscription. +func (mr *MockAdminSubscriptionStorageMockRecorder) UpdateAdminSubscription(ctx, e interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAdminSubscription", reflect.TypeOf((*MockAdminSubscriptionStorage)(nil).UpdateAdminSubscription), ctx, e) +} diff --git a/pkg/notification/storage/v2/mock/subscription.go b/pkg/notification/storage/v2/mock/subscription.go new file mode 100644 index 000000000..15e8afd25 --- /dev/null +++ b/pkg/notification/storage/v2/mock/subscription.go @@ -0,0 +1,113 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: subscription.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + domain "github.com/bucketeer-io/bucketeer/pkg/notification/domain" + mysql "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + notification "github.com/bucketeer-io/bucketeer/proto/notification" +) + +// MockSubscriptionStorage is a mock of SubscriptionStorage interface. +type MockSubscriptionStorage struct { + ctrl *gomock.Controller + recorder *MockSubscriptionStorageMockRecorder +} + +// MockSubscriptionStorageMockRecorder is the mock recorder for MockSubscriptionStorage. +type MockSubscriptionStorageMockRecorder struct { + mock *MockSubscriptionStorage +} + +// NewMockSubscriptionStorage creates a new mock instance. +func NewMockSubscriptionStorage(ctrl *gomock.Controller) *MockSubscriptionStorage { + mock := &MockSubscriptionStorage{ctrl: ctrl} + mock.recorder = &MockSubscriptionStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSubscriptionStorage) EXPECT() *MockSubscriptionStorageMockRecorder { + return m.recorder +} + +// CreateSubscription mocks base method. +func (m *MockSubscriptionStorage) CreateSubscription(ctx context.Context, e *domain.Subscription, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateSubscription", ctx, e, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateSubscription indicates an expected call of CreateSubscription. +func (mr *MockSubscriptionStorageMockRecorder) CreateSubscription(ctx, e, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSubscription", reflect.TypeOf((*MockSubscriptionStorage)(nil).CreateSubscription), ctx, e, environmentNamespace) +} + +// DeleteSubscription mocks base method. +func (m *MockSubscriptionStorage) DeleteSubscription(ctx context.Context, id, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteSubscription", ctx, id, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteSubscription indicates an expected call of DeleteSubscription. +func (mr *MockSubscriptionStorageMockRecorder) DeleteSubscription(ctx, id, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSubscription", reflect.TypeOf((*MockSubscriptionStorage)(nil).DeleteSubscription), ctx, id, environmentNamespace) +} + +// GetSubscription mocks base method. +func (m *MockSubscriptionStorage) GetSubscription(ctx context.Context, id, environmentNamespace string) (*domain.Subscription, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSubscription", ctx, id, environmentNamespace) + ret0, _ := ret[0].(*domain.Subscription) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSubscription indicates an expected call of GetSubscription. +func (mr *MockSubscriptionStorageMockRecorder) GetSubscription(ctx, id, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubscription", reflect.TypeOf((*MockSubscriptionStorage)(nil).GetSubscription), ctx, id, environmentNamespace) +} + +// ListSubscriptions mocks base method. +func (m *MockSubscriptionStorage) ListSubscriptions(ctx context.Context, whereParts []mysql.WherePart, orders []*mysql.Order, limit, offset int) ([]*notification.Subscription, int, int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListSubscriptions", ctx, whereParts, orders, limit, offset) + ret0, _ := ret[0].([]*notification.Subscription) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(int64) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 +} + +// ListSubscriptions indicates an expected call of ListSubscriptions. +func (mr *MockSubscriptionStorageMockRecorder) ListSubscriptions(ctx, whereParts, orders, limit, offset interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSubscriptions", reflect.TypeOf((*MockSubscriptionStorage)(nil).ListSubscriptions), ctx, whereParts, orders, limit, offset) +} + +// UpdateSubscription mocks base method. +func (m *MockSubscriptionStorage) UpdateSubscription(ctx context.Context, e *domain.Subscription, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateSubscription", ctx, e, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateSubscription indicates an expected call of UpdateSubscription. +func (mr *MockSubscriptionStorageMockRecorder) UpdateSubscription(ctx, e, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSubscription", reflect.TypeOf((*MockSubscriptionStorage)(nil).UpdateSubscription), ctx, e, environmentNamespace) +} diff --git a/pkg/notification/storage/v2/subscription.go b/pkg/notification/storage/v2/subscription.go new file mode 100644 index 000000000..9a1364d08 --- /dev/null +++ b/pkg/notification/storage/v2/subscription.go @@ -0,0 +1,275 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v2 + +import ( + "context" + "errors" + "fmt" + + "github.com/bucketeer-io/bucketeer/pkg/notification/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + proto "github.com/bucketeer-io/bucketeer/proto/notification" +) + +var ( + ErrSubscriptionAlreadyExists = errors.New("subscription: subscription already exists") + ErrSubscriptionNotFound = errors.New("subscription: subscription not found") + ErrSubscriptionUnexpectedAffectedRows = errors.New("subscription: subscription unexpected affected rows") +) + +type SubscriptionStorage interface { + CreateSubscription(ctx context.Context, e *domain.Subscription, environmentNamespace string) error + UpdateSubscription(ctx context.Context, e *domain.Subscription, environmentNamespace string) error + DeleteSubscription(ctx context.Context, id, environmentNamespace string) error + GetSubscription(ctx context.Context, id, environmentNamespace string) (*domain.Subscription, error) + ListSubscriptions( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, + ) ([]*proto.Subscription, int, int64, error) +} + +type subscriptionStorage struct { + qe mysql.QueryExecer +} + +func NewSubscriptionStorage(qe mysql.QueryExecer) SubscriptionStorage { + return &subscriptionStorage{qe} +} + +func (s *subscriptionStorage) CreateSubscription( + ctx context.Context, + e *domain.Subscription, + environmentNamespace string, +) error { + query := ` + INSERT INTO subscription ( + id, + created_at, + updated_at, + disabled, + source_types, + recipient, + name, + environment_namespace + ) VALUES ( + ?, ?, ?, ?, ?, ?, ?, ? + ) + ` + _, err := s.qe.ExecContext( + ctx, + query, + e.Id, + e.CreatedAt, + e.UpdatedAt, + e.Disabled, + mysql.JSONObject{Val: e.SourceTypes}, + mysql.JSONObject{Val: e.Recipient}, + e.Name, + environmentNamespace, + ) + if err != nil { + if err == mysql.ErrDuplicateEntry { + return ErrSubscriptionAlreadyExists + } + return err + } + return nil +} + +func (s *subscriptionStorage) UpdateSubscription( + ctx context.Context, + e *domain.Subscription, + environmentNamespace string, +) error { + query := ` + UPDATE + subscription + SET + created_at = ?, + updated_at = ?, + disabled = ?, + source_types = ?, + recipient = ?, + name = ? + WHERE + id = ? AND + environment_namespace = ? + ` + result, err := s.qe.ExecContext( + ctx, + query, + e.CreatedAt, + e.UpdatedAt, + e.Disabled, + mysql.JSONObject{Val: e.SourceTypes}, + mysql.JSONObject{Val: e.Recipient}, + e.Name, + e.Id, + environmentNamespace, + ) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + if rowsAffected != 1 { + return ErrSubscriptionUnexpectedAffectedRows + } + return nil +} + +func (s *subscriptionStorage) DeleteSubscription( + ctx context.Context, + id, environmentNamespace string, +) error { + query := ` + DELETE FROM + subscription + WHERE + id = ? AND + environment_namespace = ? + ` + result, err := s.qe.ExecContext( + ctx, + query, + id, + environmentNamespace, + ) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + if rowsAffected != 1 { + return ErrSubscriptionUnexpectedAffectedRows + } + return nil +} + +func (s *subscriptionStorage) GetSubscription( + ctx context.Context, + id, environmentNamespace string, +) (*domain.Subscription, error) { + subscription := proto.Subscription{} + query := ` + SELECT + id, + created_at, + updated_at, + disabled, + source_types, + recipient, + name + FROM + subscription + WHERE + id = ? AND + environment_namespace = ? + ` + err := s.qe.QueryRowContext( + ctx, + query, + id, + environmentNamespace, + ).Scan( + &subscription.Id, + &subscription.CreatedAt, + &subscription.UpdatedAt, + &subscription.Disabled, + &mysql.JSONObject{Val: &subscription.SourceTypes}, + &mysql.JSONObject{Val: &subscription.Recipient}, + &subscription.Name, + ) + if err != nil { + if err == mysql.ErrNoRows { + return nil, ErrSubscriptionNotFound + } + return nil, err + } + return &domain.Subscription{Subscription: &subscription}, nil +} + +func (s *subscriptionStorage) ListSubscriptions( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, +) ([]*proto.Subscription, int, int64, error) { + whereSQL, whereArgs := mysql.ConstructWhereSQLString(whereParts) + orderBySQL := mysql.ConstructOrderBySQLString(orders) + limitOffsetSQL := mysql.ConstructLimitOffsetSQLString(limit, offset) + query := fmt.Sprintf(` + SELECT + id, + created_at, + updated_at, + disabled, + source_types, + recipient, + name + FROM + subscription + %s %s %s + `, whereSQL, orderBySQL, limitOffsetSQL, + ) + rows, err := s.qe.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, 0, 0, err + } + defer rows.Close() + subscriptions := make([]*proto.Subscription, 0, limit) + for rows.Next() { + subscription := proto.Subscription{} + err := rows.Scan( + &subscription.Id, + &subscription.CreatedAt, + &subscription.UpdatedAt, + &subscription.Disabled, + &mysql.JSONObject{Val: &subscription.SourceTypes}, + &mysql.JSONObject{Val: &subscription.Recipient}, + &subscription.Name, + ) + if err != nil { + return nil, 0, 0, err + } + subscriptions = append(subscriptions, &subscription) + } + if rows.Err() != nil { + return nil, 0, 0, err + } + nextOffset := offset + len(subscriptions) + var totalCount int64 + countQuery := fmt.Sprintf(` + SELECT + COUNT(1) + FROM + subscription + %s %s + `, whereSQL, orderBySQL, + ) + err = s.qe.QueryRowContext(ctx, countQuery, whereArgs...).Scan(&totalCount) + if err != nil { + return nil, 0, 0, err + } + return subscriptions, nextOffset, totalCount, nil +} diff --git a/pkg/notification/storage/v2/subscription_test.go b/pkg/notification/storage/v2/subscription_test.go new file mode 100644 index 000000000..b82cd98fc --- /dev/null +++ b/pkg/notification/storage/v2/subscription_test.go @@ -0,0 +1,360 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "context" + "errors" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/notification/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + proto "github.com/bucketeer-io/bucketeer/proto/notification" +) + +func TestNewSubscriptionStorage(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + storage := NewSubscriptionStorage(mock.NewMockQueryExecer(mockController)) + assert.IsType(t, &subscriptionStorage{}, storage) +} + +func TestCreateSubscription(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*subscriptionStorage) + input *domain.Subscription + environmentNamespace string + expectedErr error + }{ + "ErrSubscriptionAlreadyExists": { + setup: func(s *subscriptionStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, mysql.ErrDuplicateEntry) + }, + input: &domain.Subscription{ + Subscription: &proto.Subscription{Id: "id-0"}, + }, + environmentNamespace: "ns", + expectedErr: ErrSubscriptionAlreadyExists, + }, + "Error": { + setup: func(s *subscriptionStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + + }, + input: &domain.Subscription{ + Subscription: &proto.Subscription{Id: "id-0"}, + }, + environmentNamespace: "ns", + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *subscriptionStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, nil) + }, + input: &domain.Subscription{ + Subscription: &proto.Subscription{Id: "id-0"}, + }, + environmentNamespace: "ns", + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newsubscriptionStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + err := storage.CreateSubscription(context.Background(), p.input, p.environmentNamespace) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestUpdateSubscription(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*subscriptionStorage) + input *domain.Subscription + environmentNamespace string + expectedErr error + }{ + "ErrSubscriptionUnexpectedAffectedRows": { + setup: func(s *subscriptionStorage) { + result := mock.NewMockResult(mockController) + result.EXPECT().RowsAffected().Return(int64(0), nil) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + input: &domain.Subscription{ + Subscription: &proto.Subscription{Id: "id-0"}, + }, + environmentNamespace: "ns", + expectedErr: ErrSubscriptionUnexpectedAffectedRows, + }, + "Error": { + setup: func(s *subscriptionStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + + }, + input: &domain.Subscription{ + Subscription: &proto.Subscription{Id: "id-0"}, + }, + environmentNamespace: "ns", + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *subscriptionStorage) { + result := mock.NewMockResult(mockController) + result.EXPECT().RowsAffected().Return(int64(1), nil) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + input: &domain.Subscription{ + Subscription: &proto.Subscription{Id: "id-0"}, + }, + environmentNamespace: "ns", + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newsubscriptionStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + err := storage.UpdateSubscription(context.Background(), p.input, p.environmentNamespace) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestDeleteSubscription(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*subscriptionStorage) + id string + environmentNamespace string + expectedErr error + }{ + "ErrSubscriptionUnexpectedAffectedRows": { + setup: func(s *subscriptionStorage) { + result := mock.NewMockResult(mockController) + result.EXPECT().RowsAffected().Return(int64(0), nil) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + id: "id-0", + environmentNamespace: "ns", + expectedErr: ErrSubscriptionUnexpectedAffectedRows, + }, + "Error": { + setup: func(s *subscriptionStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + + }, + id: "id-0", + environmentNamespace: "ns", + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *subscriptionStorage) { + result := mock.NewMockResult(mockController) + result.EXPECT().RowsAffected().Return(int64(1), nil) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + id: "id-0", + environmentNamespace: "ns", + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newsubscriptionStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + err := storage.DeleteSubscription(context.Background(), p.id, p.environmentNamespace) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestGetSubscription(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*subscriptionStorage) + id string + environmentNamespace string + expectedErr error + }{ + "ErrSubscriptionNotFound": { + setup: func(s *subscriptionStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id-0", + environmentNamespace: "ns", + expectedErr: ErrSubscriptionNotFound, + }, + "Error": { + setup: func(s *subscriptionStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(errors.New("error")) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + + }, + id: "id-0", + environmentNamespace: "ns", + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *subscriptionStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id-0", + environmentNamespace: "ns", + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newsubscriptionStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + _, err := storage.GetSubscription(context.Background(), p.id, p.environmentNamespace) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestListSubscriptions(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*subscriptionStorage) + whereParts []mysql.WherePart + orders []*mysql.Order + limit int + offset int + expected []*proto.Subscription + expectedCursor int + expectedErr error + }{ + "Error": { + setup: func(s *subscriptionStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + whereParts: nil, + orders: nil, + limit: 0, + offset: 0, + expected: nil, + expectedCursor: 0, + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *subscriptionStorage) { + rows := mock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + whereParts: []mysql.WherePart{ + mysql.NewFilter("num", ">=", 5), + }, + orders: []*mysql.Order{ + mysql.NewOrder("id", mysql.OrderDirectionAsc), + }, + limit: 10, + offset: 5, + expected: []*proto.Subscription{}, + expectedCursor: 5, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newsubscriptionStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + subscriptions, cursor, _, err := storage.ListSubscriptions( + context.Background(), + p.whereParts, + p.orders, + p.limit, + p.offset, + ) + assert.Equal(t, p.expected, subscriptions) + assert.Equal(t, p.expectedCursor, cursor) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func newsubscriptionStorageWithMock(t *testing.T, mockController *gomock.Controller) *subscriptionStorage { + t.Helper() + return &subscriptionStorage{mock.NewMockQueryExecer(mockController)} +} diff --git a/pkg/opsevent/batch/executor/BUILD.bazel b/pkg/opsevent/batch/executor/BUILD.bazel new file mode 100644 index 000000000..6d7625e59 --- /dev/null +++ b/pkg/opsevent/batch/executor/BUILD.bazel @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["executor.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/opsevent/batch/executor", + visibility = ["//visibility:public"], + deps = [ + "//pkg/autoops/client:go_default_library", + "//proto/autoops:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["executor_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/autoops/client/mock:go_default_library", + "//pkg/log:go_default_library", + "//proto/autoops:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + ], +) diff --git a/pkg/opsevent/batch/executor/executor.go b/pkg/opsevent/batch/executor/executor.go new file mode 100644 index 000000000..c69e675a3 --- /dev/null +++ b/pkg/opsevent/batch/executor/executor.go @@ -0,0 +1,81 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package executor + +import ( + "context" + + "go.uber.org/zap" + + autoopsclient "github.com/bucketeer-io/bucketeer/pkg/autoops/client" + autoopsproto "github.com/bucketeer-io/bucketeer/proto/autoops" +) + +type options struct { + logger *zap.Logger +} + +type Option func(*options) + +func WithLogger(l *zap.Logger) Option { + return func(opts *options) { + opts.logger = l + } +} + +type AutoOpsExecutor interface { + Execute(ctx context.Context, environmentNamespace, ruleID string) error +} + +type autoOpsExecutor struct { + autoOpsClient autoopsclient.Client + logger *zap.Logger +} + +func NewAutoOpsExecutor(autoOpsClient autoopsclient.Client, opts ...Option) AutoOpsExecutor { + dopts := &options{ + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + return &autoOpsExecutor{ + autoOpsClient: autoOpsClient, + logger: dopts.logger.Named("auto-ops-executor"), + } +} + +func (e *autoOpsExecutor) Execute(ctx context.Context, environmentNamespace, ruleID string) error { + resp, err := e.autoOpsClient.ExecuteAutoOps(ctx, &autoopsproto.ExecuteAutoOpsRequest{ + EnvironmentNamespace: environmentNamespace, + Id: ruleID, + ChangeAutoOpsRuleTriggeredAtCommand: &autoopsproto.ChangeAutoOpsRuleTriggeredAtCommand{}, + }) + if err != nil { + e.logger.Error("Failed to execute auto ops", zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("ruleID", ruleID), + ) + return err + } + if resp.AlreadyTriggered { + e.logger.Debug("autoOpsRule has already triggered", + zap.String("environmentNamespace", environmentNamespace), + zap.String("ruleID", ruleID), + ) + } + return nil +} diff --git a/pkg/opsevent/batch/executor/executor_test.go b/pkg/opsevent/batch/executor/executor_test.go new file mode 100644 index 000000000..a3df2735b --- /dev/null +++ b/pkg/opsevent/batch/executor/executor_test.go @@ -0,0 +1,92 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + autoopsclientmock "github.com/bucketeer-io/bucketeer/pkg/autoops/client/mock" + "github.com/bucketeer-io/bucketeer/pkg/log" + autoopsproto "github.com/bucketeer-io/bucketeer/proto/autoops" +) + +func TestNewAutoOpsExecutor(t *testing.T) { + t.Parallel() + e := NewAutoOpsExecutor(nil) + assert.IsType(t, &autoOpsExecutor{}, e) +} + +func TestExecute(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*autoOpsExecutor) + expectedErr error + }{ + "error: ExecuteAutoOps fails": { + setup: func(e *autoOpsExecutor) { + e.autoOpsClient.(*autoopsclientmock.MockClient).EXPECT().ExecuteAutoOps(gomock.Any(), gomock.Any()).Return( + nil, status.Errorf(codes.Internal, "internal error")) + }, + expectedErr: status.Errorf(codes.Internal, "internal error"), + }, + "success: AlreadyTriggered: true": { + setup: func(e *autoOpsExecutor) { + e.autoOpsClient.(*autoopsclientmock.MockClient).EXPECT().ExecuteAutoOps(gomock.Any(), gomock.Any()).Return( + &autoopsproto.ExecuteAutoOpsResponse{AlreadyTriggered: true}, + nil, + ) + }, + expectedErr: nil, + }, + "success: AlreadyTriggered: false": { + setup: func(e *autoOpsExecutor) { + e.autoOpsClient.(*autoopsclientmock.MockClient).EXPECT().ExecuteAutoOps(gomock.Any(), gomock.Any()).Return( + &autoopsproto.ExecuteAutoOpsResponse{AlreadyTriggered: false}, + nil, + ) + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + e := newNewAutoOpsExecutor(t, mockController) + if p.setup != nil { + p.setup(e) + } + err := e.Execute(context.Background(), "ns0", "rid1") + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func newNewAutoOpsExecutor(t *testing.T, mockController *gomock.Controller) *autoOpsExecutor { + logger, err := log.NewLogger() + require.NoError(t, err) + return &autoOpsExecutor{ + autoOpsClient: autoopsclientmock.NewMockClient(mockController), + logger: logger, + } +} diff --git a/pkg/opsevent/batch/executor/mock/BUILD.bazel b/pkg/opsevent/batch/executor/mock/BUILD.bazel new file mode 100644 index 000000000..e5e98fd5d --- /dev/null +++ b/pkg/opsevent/batch/executor/mock/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["executor.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/opsevent/batch/executor/mock", + visibility = ["//visibility:public"], + deps = ["@com_github_golang_mock//gomock:go_default_library"], +) diff --git a/pkg/opsevent/batch/executor/mock/executor.go b/pkg/opsevent/batch/executor/mock/executor.go new file mode 100644 index 000000000..6f9a1246e --- /dev/null +++ b/pkg/opsevent/batch/executor/mock/executor.go @@ -0,0 +1,49 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: executor.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockAutoOpsExecutor is a mock of AutoOpsExecutor interface. +type MockAutoOpsExecutor struct { + ctrl *gomock.Controller + recorder *MockAutoOpsExecutorMockRecorder +} + +// MockAutoOpsExecutorMockRecorder is the mock recorder for MockAutoOpsExecutor. +type MockAutoOpsExecutorMockRecorder struct { + mock *MockAutoOpsExecutor +} + +// NewMockAutoOpsExecutor creates a new mock instance. +func NewMockAutoOpsExecutor(ctrl *gomock.Controller) *MockAutoOpsExecutor { + mock := &MockAutoOpsExecutor{ctrl: ctrl} + mock.recorder = &MockAutoOpsExecutorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockAutoOpsExecutor) EXPECT() *MockAutoOpsExecutorMockRecorder { + return m.recorder +} + +// Execute mocks base method. +func (m *MockAutoOpsExecutor) Execute(ctx context.Context, environmentNamespace, ruleID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Execute", ctx, environmentNamespace, ruleID) + ret0, _ := ret[0].(error) + return ret0 +} + +// Execute indicates an expected call of Execute. +func (mr *MockAutoOpsExecutorMockRecorder) Execute(ctx, environmentNamespace, ruleID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Execute", reflect.TypeOf((*MockAutoOpsExecutor)(nil).Execute), ctx, environmentNamespace, ruleID) +} diff --git a/pkg/opsevent/batch/job/BUILD.bazel b/pkg/opsevent/batch/job/BUILD.bazel new file mode 100644 index 000000000..be6e9d527 --- /dev/null +++ b/pkg/opsevent/batch/job/BUILD.bazel @@ -0,0 +1,58 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "count_watcher.go", + "datetime_watcher.go", + "job.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/opsevent/batch/job", + visibility = ["//visibility:public"], + deps = [ + "//pkg/autoops/domain:go_default_library", + "//pkg/environment/domain:go_default_library", + "//pkg/eventcounter/client:go_default_library", + "//pkg/feature/client:go_default_library", + "//pkg/job:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/opsevent/batch/executor:go_default_library", + "//pkg/opsevent/batch/targetstore:go_default_library", + "//pkg/opsevent/domain:go_default_library", + "//pkg/opsevent/storage/v2:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/autoops:go_default_library", + "//proto/eventcounter:go_default_library", + "//proto/feature:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "count_watcher_test.go", + "datetime_watcher_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/autoops/domain:go_default_library", + "//pkg/environment/domain:go_default_library", + "//pkg/eventcounter/client/mock:go_default_library", + "//pkg/feature/client/mock:go_default_library", + "//pkg/log:go_default_library", + "//pkg/opsevent/batch/executor/mock:go_default_library", + "//pkg/opsevent/batch/targetstore/mock:go_default_library", + "//pkg/storage/v2/mysql/mock:go_default_library", + "//proto/autoops:go_default_library", + "//proto/environment:go_default_library", + "//proto/eventcounter:go_default_library", + "//proto/feature:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + ], +) diff --git a/pkg/opsevent/batch/job/count_watcher.go b/pkg/opsevent/batch/job/count_watcher.go new file mode 100644 index 000000000..cc9c89096 --- /dev/null +++ b/pkg/opsevent/batch/job/count_watcher.go @@ -0,0 +1,354 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package job + +import ( + "context" + "time" + + "go.uber.org/zap" + + autoopsdomain "github.com/bucketeer-io/bucketeer/pkg/autoops/domain" + environmentdomain "github.com/bucketeer-io/bucketeer/pkg/environment/domain" + ecclient "github.com/bucketeer-io/bucketeer/pkg/eventcounter/client" + ftclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + "github.com/bucketeer-io/bucketeer/pkg/job" + "github.com/bucketeer-io/bucketeer/pkg/opsevent/batch/executor" + "github.com/bucketeer-io/bucketeer/pkg/opsevent/batch/targetstore" + opseventdomain "github.com/bucketeer-io/bucketeer/pkg/opsevent/domain" + v2os "github.com/bucketeer-io/bucketeer/pkg/opsevent/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + autoopsproto "github.com/bucketeer-io/bucketeer/proto/autoops" + ecproto "github.com/bucketeer-io/bucketeer/proto/eventcounter" + ftproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +const ( + queryTimeRange = -30 * 24 * time.Hour +) + +type countWatcher struct { + mysqlClient mysql.Client + environmentLister targetstore.EnvironmentLister + autoOpsRuleLister targetstore.AutoOpsRuleLister + eventCounterClient ecclient.Client + featureClient ftclient.Client + autoOpsExecutor executor.AutoOpsExecutor + opts *options + logger *zap.Logger +} + +func NewCountWatcher( + mysqlClient mysql.Client, + targetStore targetstore.TargetStore, + eventCounterClient ecclient.Client, + featureClient ftclient.Client, + autoOpsExecutor executor.AutoOpsExecutor, + opts ...Option, +) job.Job { + dopts := &options{ + timeout: 5 * time.Minute, + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + return &countWatcher{ + mysqlClient: mysqlClient, + environmentLister: targetStore, + autoOpsRuleLister: targetStore, + eventCounterClient: eventCounterClient, + featureClient: featureClient, + autoOpsExecutor: autoOpsExecutor, + opts: dopts, + logger: dopts.logger.Named("count-watcher"), + } +} + +func (w *countWatcher) Run(ctx context.Context) (lastErr error) { + ctx, cancel := context.WithTimeout(ctx, w.opts.timeout) + defer cancel() + environments := w.environmentLister.GetEnvironments(ctx) + for _, env := range environments { + autoOpsRules := w.autoOpsRuleLister.GetAutoOpsRules(ctx, env.Namespace) + for _, a := range autoOpsRules { + asmt, err := w.assessAutoOpsRule(ctx, env, a) + if err != nil { + lastErr = err + } + if !asmt { + continue + } + if err = w.autoOpsExecutor.Execute(ctx, env.Namespace, a.Id); err != nil { + lastErr = err + } + } + } + return +} + +func (w *countWatcher) assessAutoOpsRule( + ctx context.Context, + env *environmentdomain.Environment, + a *autoopsdomain.AutoOpsRule, +) (bool, error) { + opsEventRateClauses, err := a.ExtractOpsEventRateClauses() + if err != nil { + w.logger.Error("Failed to extract ops event rate clauses", zap.Error(err), + zap.String("environmentNamespace", env.Namespace), + zap.String("featureId", a.FeatureId), + zap.String("autoOpsRuleId", a.Id), + ) + return false, err + } + featureVersion, err := w.getLatestFeatureVersion(ctx, a.FeatureId, env.Namespace) + if err != nil { + w.logger.Error("Failed to get the latest feature version", zap.Error(err), + zap.String("environmentNamespace", env.Namespace), + zap.String("featureId", a.FeatureId), + zap.String("autoOpsRuleId", a.Id), + ) + return false, err + } + var lastErr error + for id, c := range opsEventRateClauses { + logFunc := func(msg string) { + w.logger.Debug(msg, + zap.String("environmentNamespace", env.Namespace), + zap.String("featureId", a.FeatureId), + zap.String("autoOpsRuleId", a.Id), + zap.Any("opsEventRateClause", c), + ) + } + evaluationCount, err := w.getTargetEvaluationCount(ctx, + logFunc, + env.Namespace, + a.FeatureId, + c.VariationId, + featureVersion, + ) + if err != nil { + lastErr = err + continue + } + if evaluationCount == nil { + continue + } + opsEventCount, err := w.getTargetOpsEventCount( + ctx, + logFunc, + env.Namespace, + a.FeatureId, + c.VariationId, + c.GoalId, + featureVersion, + ) + if err != nil { + lastErr = err + continue + } + if opsEventCount == nil { + continue + } + opsCount := opseventdomain.NewOpsCount(a.FeatureId, a.Id, id, opsEventCount.UserCount, evaluationCount.UserCount) + if err = w.persistOpsCount(ctx, env.Namespace, opsCount); err != nil { + lastErr = err + continue + } + if asmt := w.assessRule(c, evaluationCount, opsEventCount); asmt { + w.logger.Info("Clause satisfies condition", + zap.String("environmentNamespace", env.Namespace), + zap.String("featureId", a.FeatureId), + zap.String("autoOpsRuleId", a.Id), + zap.Any("opsEventRateClause", c), + ) + return true, nil + } + } + return false, lastErr +} + +func (w *countWatcher) getLatestFeatureVersion( + ctx context.Context, + featureID, environmentNamespace string, +) (int32, error) { + req := &ftproto.GetFeatureRequest{ + Id: featureID, + EnvironmentNamespace: environmentNamespace, + } + resp, err := w.featureClient.GetFeature(ctx, req) + if err != nil { + return 0, err + } + return resp.Feature.Version, nil +} + +func (w *countWatcher) assessRule( + opsEventRateClause *autoopsproto.OpsEventRateClause, + evaluationCount, + opsCount *ecproto.VariationCount, +) bool { + rate := float64(opsCount.UserCount) / float64(evaluationCount.UserCount) + if opsCount.UserCount < opsEventRateClause.MinCount { + return false + } + switch opsEventRateClause.Operator { + case autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL: + if rate >= opsEventRateClause.ThreadsholdRate { + return true + } + case autoopsproto.OpsEventRateClause_LESS_OR_EQUAL: + if rate <= opsEventRateClause.ThreadsholdRate { + return true + } + } + return false +} + +func (w *countWatcher) getTargetEvaluationCount( + ctx context.Context, + logFunc func(string), + environmentNamespace, FeatureID, variationID string, + featureVersion int32, +) (*ecproto.VariationCount, error) { + evaluationCount, err := w.getEvaluationCount( + ctx, + environmentNamespace, + FeatureID, + variationID, + featureVersion, + ) + if err != nil { + return nil, err + } + if evaluationCount == nil { + logFunc("evaluationCount is nil") + return nil, nil + } + if evaluationCount.UserCount == 0 { + logFunc("evaluationCount.UserCount is zero") + return nil, nil + } + return evaluationCount, nil +} + +func (w *countWatcher) getEvaluationCount( + ctx context.Context, + environmentNamespace, FeatureID, variationID string, + featureVersion int32, +) (*ecproto.VariationCount, error) { + endAt := time.Now() + startAt := endAt.Add(queryTimeRange) + resp, err := w.eventCounterClient.GetEvaluationCountV2(ctx, &ecproto.GetEvaluationCountV2Request{ + EnvironmentNamespace: environmentNamespace, + StartAt: startAt.Unix(), + EndAt: endAt.Unix(), + FeatureId: FeatureID, + FeatureVersion: featureVersion, + VariationIds: []string{variationID}, + }) + if err != nil { + w.logger.Error("Failed to get evaluation realtime count", zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("featureId", FeatureID), + zap.Int32("featureVersion", featureVersion), + zap.String("variationId", variationID), + ) + return nil, err + } + if len(resp.Count.RealtimeCounts) == 0 { + return nil, nil + } + for _, vc := range resp.Count.RealtimeCounts { + if vc.VariationId == variationID { + return vc, nil + } + } + return nil, nil +} + +func (w *countWatcher) getTargetOpsEventCount( + ctx context.Context, + logFunc func(string), + environmentNamespace, FeatureID, variationID, goalID string, + featureVersion int32, +) (*ecproto.VariationCount, error) { + opsCount, err := w.getOpsEventCount( + ctx, + environmentNamespace, + FeatureID, + variationID, + goalID, + featureVersion, + ) + if err != nil { + return nil, err + } + if opsCount == nil { + logFunc("opsCount is nil") + return nil, nil + } + return opsCount, nil +} + +func (w *countWatcher) getOpsEventCount( + ctx context.Context, + environmentNamespace, FeatureID, variationID, goalID string, + featureVersion int32, +) (*ecproto.VariationCount, error) { + endAt := time.Now() + startAt := endAt.Add(queryTimeRange) + resp, err := w.eventCounterClient.GetGoalCountV2(ctx, &ecproto.GetGoalCountV2Request{ + EnvironmentNamespace: environmentNamespace, + StartAt: startAt.Unix(), + EndAt: endAt.Unix(), + FeatureId: FeatureID, + FeatureVersion: featureVersion, + VariationIds: []string{variationID}, + GoalId: goalID, + }) + if err != nil { + w.logger.Error("Failed to get ops realtime variation count", zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("featureId", FeatureID), + zap.Int32("featureVersion", featureVersion), + zap.String("variationId", variationID), + zap.String("goalId", goalID), + ) + return nil, err + } + for _, vc := range resp.GoalCounts.RealtimeCounts { + if vc.VariationId == variationID { + return vc, nil + } + } + return nil, nil +} + +func (w *countWatcher) persistOpsCount( + ctx context.Context, + environmentNamespace string, + oc *opseventdomain.OpsCount, +) error { + opsCountStorage := v2os.NewOpsCountStorage(w.mysqlClient) + if err := opsCountStorage.UpsertOpsCount(ctx, environmentNamespace, oc); err != nil { + w.logger.Error("Failed to upsert ops count", zap.Error(err), + zap.String("autoOpsRuleId", oc.AutoOpsRuleId), + zap.String("clauseId", oc.ClauseId), + zap.String("environmentNamespace", environmentNamespace)) + return err + } + return nil +} diff --git a/pkg/opsevent/batch/job/count_watcher_test.go b/pkg/opsevent/batch/job/count_watcher_test.go new file mode 100644 index 000000000..af9c25a37 --- /dev/null +++ b/pkg/opsevent/batch/job/count_watcher_test.go @@ -0,0 +1,309 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package job + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/ptypes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + autoopsdomain "github.com/bucketeer-io/bucketeer/pkg/autoops/domain" + environmentdomain "github.com/bucketeer-io/bucketeer/pkg/environment/domain" + eccmock "github.com/bucketeer-io/bucketeer/pkg/eventcounter/client/mock" + ftmock "github.com/bucketeer-io/bucketeer/pkg/feature/client/mock" + "github.com/bucketeer-io/bucketeer/pkg/log" + executormock "github.com/bucketeer-io/bucketeer/pkg/opsevent/batch/executor/mock" + targetstoremock "github.com/bucketeer-io/bucketeer/pkg/opsevent/batch/targetstore/mock" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + autoopsproto "github.com/bucketeer-io/bucketeer/proto/autoops" + environmentproto "github.com/bucketeer-io/bucketeer/proto/environment" + ecproto "github.com/bucketeer-io/bucketeer/proto/eventcounter" + ftproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +func TestNewEvaluationRealtimeCountPersister(t *testing.T) { + g := NewCountWatcher(nil, nil, nil, nil, nil) + assert.IsType(t, &countWatcher{}, g) +} + +func newNewCountWatcherWithMock(t *testing.T, mockController *gomock.Controller) *countWatcher { + logger, err := log.NewLogger() + require.NoError(t, err) + return &countWatcher{ + mysqlClient: mysqlmock.NewMockClient(mockController), + environmentLister: targetstoremock.NewMockEnvironmentLister(mockController), + autoOpsRuleLister: targetstoremock.NewMockAutoOpsRuleLister(mockController), + eventCounterClient: eccmock.NewMockClient(mockController), + featureClient: ftmock.NewMockClient(mockController), + autoOpsExecutor: executormock.NewMockAutoOpsExecutor(mockController), + logger: logger, + opts: &options{ + timeout: time.Minute, + }, + } +} + +func TestRunCountWatcher(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*testing.T, *countWatcher) + expectedErr error + }{ + "error: GetFeature fails": { + setup: func(t *testing.T, w *countWatcher) { + w.environmentLister.(*targetstoremock.MockEnvironmentLister).EXPECT().GetEnvironments(gomock.Any()).Return( + []*environmentdomain.Environment{ + {Environment: &environmentproto.Environment{Id: "ns0", Namespace: "ns0"}}, + }, + ) + oerc1, _ := newOpsEventRateClauses(t) + c1, err := ptypes.MarshalAny(oerc1) + require.NoError(t, err) + w.autoOpsRuleLister.(*targetstoremock.MockAutoOpsRuleLister).EXPECT().GetAutoOpsRules(gomock.Any(), "ns0").Return( + []*autoopsdomain.AutoOpsRule{ + {AutoOpsRule: &autoopsproto.AutoOpsRule{ + Id: "id-0", + FeatureId: "fid-0", + Clauses: []*autoopsproto.Clause{{Clause: c1}}, + }}, + }, + ) + w.featureClient.(*ftmock.MockClient).EXPECT().GetFeature(gomock.Any(), gomock.Any()).Return( + nil, status.Errorf(codes.Internal, "test")) + }, + expectedErr: status.Errorf(codes.Internal, "test"), + }, + "error: GetEvaluationRealtimeCount fails": { + setup: func(t *testing.T, w *countWatcher) { + w.environmentLister.(*targetstoremock.MockEnvironmentLister).EXPECT().GetEnvironments(gomock.Any()).Return( + []*environmentdomain.Environment{ + {Environment: &environmentproto.Environment{Id: "ns0", Namespace: "ns0"}}, + }, + ) + oerc1, _ := newOpsEventRateClauses(t) + c1, err := ptypes.MarshalAny(oerc1) + require.NoError(t, err) + w.autoOpsRuleLister.(*targetstoremock.MockAutoOpsRuleLister).EXPECT().GetAutoOpsRules(gomock.Any(), "ns0").Return( + []*autoopsdomain.AutoOpsRule{ + {AutoOpsRule: &autoopsproto.AutoOpsRule{ + Id: "id-0", + FeatureId: "fid-0", + Clauses: []*autoopsproto.Clause{{Clause: c1}}, + }}, + }, + ) + w.eventCounterClient.(*eccmock.MockClient).EXPECT().GetEvaluationCountV2(gomock.Any(), gomock.Any()).Return( + nil, status.Errorf(codes.NotFound, "test")) + w.featureClient.(*ftmock.MockClient).EXPECT().GetFeature(gomock.Any(), gomock.Any()).Return( + &ftproto.GetFeatureResponse{ + Feature: &ftproto.Feature{ + Version: 1, + }, + }, nil) + }, + expectedErr: status.Errorf(codes.NotFound, "test"), + }, + "error: GetOpsRealtimeVariationCount fails": { + setup: func(t *testing.T, w *countWatcher) { + w.environmentLister.(*targetstoremock.MockEnvironmentLister).EXPECT().GetEnvironments(gomock.Any()).Return( + []*environmentdomain.Environment{ + {Environment: &environmentproto.Environment{Id: "ns0", Namespace: "ns0"}}, + }, + ) + oerc1, _ := newOpsEventRateClauses(t) + c1, err := ptypes.MarshalAny(oerc1) + require.NoError(t, err) + w.autoOpsRuleLister.(*targetstoremock.MockAutoOpsRuleLister).EXPECT().GetAutoOpsRules(gomock.Any(), "ns0").Return( + []*autoopsdomain.AutoOpsRule{ + {AutoOpsRule: &autoopsproto.AutoOpsRule{ + Id: "id-0", + FeatureId: "fid-0", + Clauses: []*autoopsproto.Clause{{Clause: c1}}, + }}, + }, + ) + w.eventCounterClient.(*eccmock.MockClient).EXPECT().GetEvaluationCountV2(gomock.Any(), gomock.Any()).Return( + &ecproto.GetEvaluationCountV2Response{Count: &ecproto.EvaluationCount{ + RealtimeCounts: []*ecproto.VariationCount{{VariationId: "vid1", UserCount: 1}}, + }}, nil) + w.eventCounterClient.(*eccmock.MockClient).EXPECT().GetGoalCountV2(gomock.Any(), gomock.Any()).Return( + nil, status.Errorf(codes.NotFound, "test")) + w.featureClient.(*ftmock.MockClient).EXPECT().GetFeature(gomock.Any(), gomock.Any()).Return( + &ftproto.GetFeatureResponse{ + Feature: &ftproto.Feature{ + Version: 1, + }, + }, nil) + }, + expectedErr: status.Errorf(codes.NotFound, "test"), + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := newNewCountWatcherWithMock(t, mockController) + if p.setup != nil { + p.setup(t, s) + } + err := s.Run(context.Background()) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestCountWatcherAssessRule(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + opsEventRateClause *autoopsproto.OpsEventRateClause + evaluationCount *ecproto.VariationCount + opsCount *ecproto.VariationCount + expected bool + }{ + "GREATER_OR_EQUAL: false: not enough count": { + opsEventRateClause: &autoopsproto.OpsEventRateClause{ + VariationId: "vid1", + GoalId: "gid1", + MinCount: int64(5), + ThreadsholdRate: float64(0.5), + Operator: autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL, + }, + evaluationCount: &ecproto.VariationCount{UserCount: 10}, + opsCount: &ecproto.VariationCount{UserCount: 4}, + expected: false, + }, + "GREATER_OR_EQUAL: false: less than": { + opsEventRateClause: &autoopsproto.OpsEventRateClause{ + VariationId: "vid1", + GoalId: "gid1", + MinCount: int64(5), + ThreadsholdRate: float64(0.5), + Operator: autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL, + }, + evaluationCount: &ecproto.VariationCount{UserCount: 11}, + opsCount: &ecproto.VariationCount{UserCount: 5}, + expected: false, + }, + "GREATER_OR_EQUAL: true: equal": { + opsEventRateClause: &autoopsproto.OpsEventRateClause{ + VariationId: "vid1", + GoalId: "gid1", + MinCount: int64(5), + ThreadsholdRate: float64(0.5), + Operator: autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL, + }, + evaluationCount: &ecproto.VariationCount{UserCount: 10}, + opsCount: &ecproto.VariationCount{UserCount: 5}, + expected: true, + }, + "GREATER_OR_EQUAL: true: greater": { + opsEventRateClause: &autoopsproto.OpsEventRateClause{ + VariationId: "vid1", + GoalId: "gid1", + MinCount: int64(5), + ThreadsholdRate: float64(0.5), + Operator: autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL, + }, + evaluationCount: &ecproto.VariationCount{UserCount: 10}, + opsCount: &ecproto.VariationCount{UserCount: 6}, + expected: true, + }, + "LESS_OR_EQUAL: false: not enough count": { + opsEventRateClause: &autoopsproto.OpsEventRateClause{ + VariationId: "vid1", + GoalId: "gid1", + MinCount: int64(5), + ThreadsholdRate: float64(0.5), + Operator: autoopsproto.OpsEventRateClause_LESS_OR_EQUAL, + }, + evaluationCount: &ecproto.VariationCount{UserCount: 10}, + opsCount: &ecproto.VariationCount{UserCount: 4}, + expected: false, + }, + "LESS_OR_EQUAL: false: greater than": { + opsEventRateClause: &autoopsproto.OpsEventRateClause{ + VariationId: "vid1", + GoalId: "gid1", + MinCount: int64(5), + ThreadsholdRate: float64(0.5), + Operator: autoopsproto.OpsEventRateClause_LESS_OR_EQUAL, + }, + evaluationCount: &ecproto.VariationCount{UserCount: 10}, + opsCount: &ecproto.VariationCount{UserCount: 6}, + expected: false, + }, + "LESS_OR_EQUAL: true: equal": { + opsEventRateClause: &autoopsproto.OpsEventRateClause{ + VariationId: "vid1", + GoalId: "gid1", + MinCount: int64(5), + ThreadsholdRate: float64(0.5), + Operator: autoopsproto.OpsEventRateClause_LESS_OR_EQUAL, + }, + evaluationCount: &ecproto.VariationCount{UserCount: 10}, + opsCount: &ecproto.VariationCount{UserCount: 5}, + expected: true, + }, + "LESS_OR_EQUAL: true: less": { + opsEventRateClause: &autoopsproto.OpsEventRateClause{ + VariationId: "vid1", + GoalId: "gid1", + MinCount: int64(5), + ThreadsholdRate: float64(0.5), + Operator: autoopsproto.OpsEventRateClause_LESS_OR_EQUAL, + }, + evaluationCount: &ecproto.VariationCount{UserCount: 11}, + opsCount: &ecproto.VariationCount{UserCount: 5}, + expected: true, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := newNewCountWatcherWithMock(t, mockController) + actual := s.assessRule(p.opsEventRateClause, p.evaluationCount, p.opsCount) + assert.Equal(t, p.expected, actual) + }) + } +} + +func newOpsEventRateClauses(t *testing.T) (*autoopsproto.OpsEventRateClause, *autoopsproto.OpsEventRateClause) { + t.Helper() + oerc1 := &autoopsproto.OpsEventRateClause{ + VariationId: "vid1", + GoalId: "gid1", + MinCount: int64(10), + ThreadsholdRate: float64(0.5), + Operator: autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL, + } + oerc2 := &autoopsproto.OpsEventRateClause{ + VariationId: "vid1", + GoalId: "gid2", + MinCount: int64(10), + ThreadsholdRate: float64(0.5), + Operator: autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL, + } + return oerc1, oerc2 +} diff --git a/pkg/opsevent/batch/job/datetime_watcher.go b/pkg/opsevent/batch/job/datetime_watcher.go new file mode 100644 index 000000000..789876204 --- /dev/null +++ b/pkg/opsevent/batch/job/datetime_watcher.go @@ -0,0 +1,114 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package job + +import ( + "context" + "time" + + "go.uber.org/zap" + + autoopsdomain "github.com/bucketeer-io/bucketeer/pkg/autoops/domain" + environmentdomain "github.com/bucketeer-io/bucketeer/pkg/environment/domain" + "github.com/bucketeer-io/bucketeer/pkg/job" + "github.com/bucketeer-io/bucketeer/pkg/opsevent/batch/executor" + "github.com/bucketeer-io/bucketeer/pkg/opsevent/batch/targetstore" + autoopsproto "github.com/bucketeer-io/bucketeer/proto/autoops" +) + +type datetimeWatcher struct { + environmentLister targetstore.EnvironmentLister + autoOpsRuleLister targetstore.AutoOpsRuleLister + autoOpsExecutor executor.AutoOpsExecutor + opts *options + logger *zap.Logger +} + +func NewDatetimeWatcher( + targetStore targetstore.TargetStore, + autoOpsExecutor executor.AutoOpsExecutor, + opts ...Option) job.Job { + + dopts := &options{ + timeout: 5 * time.Minute, + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + return &datetimeWatcher{ + environmentLister: targetStore, + autoOpsRuleLister: targetStore, + autoOpsExecutor: autoOpsExecutor, + opts: dopts, + logger: dopts.logger.Named("datetime-watcher"), + } +} + +func (w *datetimeWatcher) Run(ctx context.Context) (lastErr error) { + ctx, cancel := context.WithTimeout(ctx, w.opts.timeout) + defer cancel() + environments := w.environmentLister.GetEnvironments(ctx) + for _, env := range environments { + autoOpsRules := w.autoOpsRuleLister.GetAutoOpsRules(ctx, env.Namespace) + for _, a := range autoOpsRules { + asmt, err := w.assessAutoOpsRule(ctx, env, a) + if err != nil { + lastErr = err + } + if !asmt { + continue + } + if err = w.autoOpsExecutor.Execute(ctx, env.Namespace, a.Id); err != nil { + lastErr = err + } + } + } + return +} + +func (w *datetimeWatcher) assessAutoOpsRule( + ctx context.Context, + env *environmentdomain.Environment, + a *autoopsdomain.AutoOpsRule, +) (bool, error) { + datetimeClauses, err := a.ExtractDatetimeClauses() + if err != nil { + w.logger.Error("Failed to extract datetime clauses", zap.Error(err), + zap.String("environmentNamespace", env.Namespace), + zap.String("featureId", a.FeatureId), + zap.String("autoOpsRuleId", a.Id), + ) + return false, err + } + var lastErr error + nowTimestamp := time.Now().Unix() + for _, c := range datetimeClauses { + if asmt := w.assessRule(c, nowTimestamp); asmt { + w.logger.Info("Clause satisfies condition", + zap.String("environmentNamespace", env.Namespace), + zap.String("featureId", a.FeatureId), + zap.String("autoOpsRuleId", a.Id), + zap.Any("datetimeClause", c), + ) + return true, nil + } + } + return false, lastErr +} + +func (w *datetimeWatcher) assessRule(datetimeClause *autoopsproto.DatetimeClause, nowTimestamp int64) bool { + return datetimeClause.Time <= nowTimestamp +} diff --git a/pkg/opsevent/batch/job/datetime_watcher_test.go b/pkg/opsevent/batch/job/datetime_watcher_test.go new file mode 100644 index 000000000..a6872f7d5 --- /dev/null +++ b/pkg/opsevent/batch/job/datetime_watcher_test.go @@ -0,0 +1,153 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package job + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/ptypes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + autoopsdomain "github.com/bucketeer-io/bucketeer/pkg/autoops/domain" + environmentdomain "github.com/bucketeer-io/bucketeer/pkg/environment/domain" + "github.com/bucketeer-io/bucketeer/pkg/log" + executormock "github.com/bucketeer-io/bucketeer/pkg/opsevent/batch/executor/mock" + targetstoremock "github.com/bucketeer-io/bucketeer/pkg/opsevent/batch/targetstore/mock" + autoopsproto "github.com/bucketeer-io/bucketeer/proto/autoops" + environmentproto "github.com/bucketeer-io/bucketeer/proto/environment" +) + +func TestNewDatetimeWatcher(t *testing.T) { + w := NewDatetimeWatcher(nil, nil) + assert.IsType(t, &datetimeWatcher{}, w) +} + +func newNewDatetimeWatcherWithMock(t *testing.T, mockController *gomock.Controller) *datetimeWatcher { + logger, err := log.NewLogger() + require.NoError(t, err) + return &datetimeWatcher{ + environmentLister: targetstoremock.NewMockEnvironmentLister(mockController), + autoOpsRuleLister: targetstoremock.NewMockAutoOpsRuleLister(mockController), + autoOpsExecutor: executormock.NewMockAutoOpsExecutor(mockController), + logger: logger, + opts: &options{ + timeout: time.Minute, + }, + } +} + +func TestRunDatetimeWatcher(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*datetimeWatcher) + expectedErr error + }{ + "success: assess: false": { + setup: func(w *datetimeWatcher) { + w.environmentLister.(*targetstoremock.MockEnvironmentLister).EXPECT().GetEnvironments(gomock.Any()).Return( + []*environmentdomain.Environment{ + {Environment: &environmentproto.Environment{Id: "ns0", Namespace: "ns0"}}, + }, + ) + dc := &autoopsproto.DatetimeClause{Time: time.Now().AddDate(0, 0, 1).Unix()} + c, err := ptypes.MarshalAny(dc) + require.NoError(t, err) + w.autoOpsRuleLister.(*targetstoremock.MockAutoOpsRuleLister).EXPECT().GetAutoOpsRules(gomock.Any(), "ns0").Return( + []*autoopsdomain.AutoOpsRule{ + {AutoOpsRule: &autoopsproto.AutoOpsRule{ + Id: "id-0", + FeatureId: "fid-0", + Clauses: []*autoopsproto.Clause{{Clause: c}}, + }}, + }, + ) + }, + expectedErr: nil, + }, + "success: assess: true": { + setup: func(w *datetimeWatcher) { + w.environmentLister.(*targetstoremock.MockEnvironmentLister).EXPECT().GetEnvironments(gomock.Any()).Return( + []*environmentdomain.Environment{ + {Environment: &environmentproto.Environment{Id: "ns0", Namespace: "ns0"}}, + }, + ) + dc := &autoopsproto.DatetimeClause{Time: time.Now().Unix()} + c, err := ptypes.MarshalAny(dc) + require.NoError(t, err) + w.autoOpsRuleLister.(*targetstoremock.MockAutoOpsRuleLister).EXPECT().GetAutoOpsRules(gomock.Any(), "ns0").Return( + []*autoopsdomain.AutoOpsRule{ + {AutoOpsRule: &autoopsproto.AutoOpsRule{ + Id: "id-0", + FeatureId: "fid-0", + Clauses: []*autoopsproto.Clause{{Clause: c}}, + }}, + }, + ) + w.autoOpsExecutor.(*executormock.MockAutoOpsExecutor).EXPECT().Execute(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + w := newNewDatetimeWatcherWithMock(t, mockController) + if p.setup != nil { + p.setup(w) + } + err := w.Run(context.Background()) + assert.Equal(t, p.expectedErr, err) + }) + } +} +func TestDatetimeWatcherAssessRule(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + datetimeClause *autoopsproto.DatetimeClause + nowTimestamp int64 + expected bool + }{ + "false": { + datetimeClause: &autoopsproto.DatetimeClause{ + Time: 1000000001, + }, + nowTimestamp: 1000000000, + expected: false, + }, + "true": { + datetimeClause: &autoopsproto.DatetimeClause{ + Time: 1000000000, + }, + nowTimestamp: 1000000000, + expected: true, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + w := newNewDatetimeWatcherWithMock(t, mockController) + actual := w.assessRule(p.datetimeClause, p.nowTimestamp) + assert.Equal(t, p.expected, actual) + }) + } +} diff --git a/pkg/opsevent/batch/job/job.go b/pkg/opsevent/batch/job/job.go new file mode 100644 index 000000000..43e10650b --- /dev/null +++ b/pkg/opsevent/batch/job/job.go @@ -0,0 +1,49 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package job + +import ( + "time" + + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +type options struct { + timeout time.Duration + metrics metrics.Registerer + logger *zap.Logger +} + +type Option func(*options) + +func WithTimeout(timeout time.Duration) Option { + return func(opts *options) { + opts.timeout = timeout + } +} + +func WithMetrics(r metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = r + } +} + +func WithLogger(l *zap.Logger) Option { + return func(opts *options) { + opts.logger = l + } +} diff --git a/pkg/opsevent/batch/targetstore/BUILD.bazel b/pkg/opsevent/batch/targetstore/BUILD.bazel new file mode 100644 index 000000000..a9aeefb54 --- /dev/null +++ b/pkg/opsevent/batch/targetstore/BUILD.bazel @@ -0,0 +1,45 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "metrics.go", + "targetstore.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/opsevent/batch/targetstore", + visibility = ["//visibility:public"], + deps = [ + "//pkg/autoops/client:go_default_library", + "//pkg/autoops/domain:go_default_library", + "//pkg/environment/client:go_default_library", + "//pkg/environment/domain:go_default_library", + "//pkg/metrics:go_default_library", + "//proto/autoops:go_default_library", + "//proto/environment:go_default_library", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["targetstore_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/autoops/client/mock:go_default_library", + "//pkg/autoops/domain:go_default_library", + "//pkg/environment/client/mock:go_default_library", + "//pkg/environment/domain:go_default_library", + "//pkg/log:go_default_library", + "//pkg/metrics:go_default_library", + "//proto/autoops:go_default_library", + "//proto/environment:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + "@io_bazel_rules_go//proto/wkt:any_go_proto", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + ], +) diff --git a/pkg/opsevent/batch/targetstore/metrics.go b/pkg/opsevent/batch/targetstore/metrics.go new file mode 100644 index 000000000..f7451b597 --- /dev/null +++ b/pkg/opsevent/batch/targetstore/metrics.go @@ -0,0 +1,40 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package targetstore + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +const ( + typeEnvironment = "Environment" + typeAutoOpsRule = "AutoOpsRule" +) + +var ( + itemsGauge = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: "bucketeer", + Subsystem: "ops_event_batch", + Name: "target_object", + Help: "Total number of target objects", + }, []string{"type"}) +) + +func registerMetrics(r metrics.Registerer) { + r.MustRegister(itemsGauge) +} diff --git a/pkg/opsevent/batch/targetstore/mock/BUILD.bazel b/pkg/opsevent/batch/targetstore/mock/BUILD.bazel new file mode 100644 index 000000000..9c9ecf13b --- /dev/null +++ b/pkg/opsevent/batch/targetstore/mock/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["targetstore.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/opsevent/batch/targetstore/mock", + visibility = ["//visibility:public"], + deps = [ + "//pkg/autoops/domain:go_default_library", + "//pkg/environment/domain:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + ], +) diff --git a/pkg/opsevent/batch/targetstore/mock/targetstore.go b/pkg/opsevent/batch/targetstore/mock/targetstore.go new file mode 100644 index 000000000..6f3ef97b9 --- /dev/null +++ b/pkg/opsevent/batch/targetstore/mock/targetstore.go @@ -0,0 +1,164 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: targetstore.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + domain "github.com/bucketeer-io/bucketeer/pkg/autoops/domain" + domain0 "github.com/bucketeer-io/bucketeer/pkg/environment/domain" +) + +// MockEnvironmentLister is a mock of EnvironmentLister interface. +type MockEnvironmentLister struct { + ctrl *gomock.Controller + recorder *MockEnvironmentListerMockRecorder +} + +// MockEnvironmentListerMockRecorder is the mock recorder for MockEnvironmentLister. +type MockEnvironmentListerMockRecorder struct { + mock *MockEnvironmentLister +} + +// NewMockEnvironmentLister creates a new mock instance. +func NewMockEnvironmentLister(ctrl *gomock.Controller) *MockEnvironmentLister { + mock := &MockEnvironmentLister{ctrl: ctrl} + mock.recorder = &MockEnvironmentListerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockEnvironmentLister) EXPECT() *MockEnvironmentListerMockRecorder { + return m.recorder +} + +// GetEnvironments mocks base method. +func (m *MockEnvironmentLister) GetEnvironments(ctx context.Context) []*domain0.Environment { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEnvironments", ctx) + ret0, _ := ret[0].([]*domain0.Environment) + return ret0 +} + +// GetEnvironments indicates an expected call of GetEnvironments. +func (mr *MockEnvironmentListerMockRecorder) GetEnvironments(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEnvironments", reflect.TypeOf((*MockEnvironmentLister)(nil).GetEnvironments), ctx) +} + +// MockAutoOpsRuleLister is a mock of AutoOpsRuleLister interface. +type MockAutoOpsRuleLister struct { + ctrl *gomock.Controller + recorder *MockAutoOpsRuleListerMockRecorder +} + +// MockAutoOpsRuleListerMockRecorder is the mock recorder for MockAutoOpsRuleLister. +type MockAutoOpsRuleListerMockRecorder struct { + mock *MockAutoOpsRuleLister +} + +// NewMockAutoOpsRuleLister creates a new mock instance. +func NewMockAutoOpsRuleLister(ctrl *gomock.Controller) *MockAutoOpsRuleLister { + mock := &MockAutoOpsRuleLister{ctrl: ctrl} + mock.recorder = &MockAutoOpsRuleListerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockAutoOpsRuleLister) EXPECT() *MockAutoOpsRuleListerMockRecorder { + return m.recorder +} + +// GetAutoOpsRules mocks base method. +func (m *MockAutoOpsRuleLister) GetAutoOpsRules(ctx context.Context, environmentNamespace string) []*domain.AutoOpsRule { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAutoOpsRules", ctx, environmentNamespace) + ret0, _ := ret[0].([]*domain.AutoOpsRule) + return ret0 +} + +// GetAutoOpsRules indicates an expected call of GetAutoOpsRules. +func (mr *MockAutoOpsRuleListerMockRecorder) GetAutoOpsRules(ctx, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAutoOpsRules", reflect.TypeOf((*MockAutoOpsRuleLister)(nil).GetAutoOpsRules), ctx, environmentNamespace) +} + +// MockTargetStore is a mock of TargetStore interface. +type MockTargetStore struct { + ctrl *gomock.Controller + recorder *MockTargetStoreMockRecorder +} + +// MockTargetStoreMockRecorder is the mock recorder for MockTargetStore. +type MockTargetStoreMockRecorder struct { + mock *MockTargetStore +} + +// NewMockTargetStore creates a new mock instance. +func NewMockTargetStore(ctrl *gomock.Controller) *MockTargetStore { + mock := &MockTargetStore{ctrl: ctrl} + mock.recorder = &MockTargetStoreMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockTargetStore) EXPECT() *MockTargetStoreMockRecorder { + return m.recorder +} + +// GetAutoOpsRules mocks base method. +func (m *MockTargetStore) GetAutoOpsRules(ctx context.Context, environmentNamespace string) []*domain.AutoOpsRule { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAutoOpsRules", ctx, environmentNamespace) + ret0, _ := ret[0].([]*domain.AutoOpsRule) + return ret0 +} + +// GetAutoOpsRules indicates an expected call of GetAutoOpsRules. +func (mr *MockTargetStoreMockRecorder) GetAutoOpsRules(ctx, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAutoOpsRules", reflect.TypeOf((*MockTargetStore)(nil).GetAutoOpsRules), ctx, environmentNamespace) +} + +// GetEnvironments mocks base method. +func (m *MockTargetStore) GetEnvironments(ctx context.Context) []*domain0.Environment { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEnvironments", ctx) + ret0, _ := ret[0].([]*domain0.Environment) + return ret0 +} + +// GetEnvironments indicates an expected call of GetEnvironments. +func (mr *MockTargetStoreMockRecorder) GetEnvironments(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEnvironments", reflect.TypeOf((*MockTargetStore)(nil).GetEnvironments), ctx) +} + +// Run mocks base method. +func (m *MockTargetStore) Run() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Run") +} + +// Run indicates an expected call of Run. +func (mr *MockTargetStoreMockRecorder) Run() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockTargetStore)(nil).Run)) +} + +// Stop mocks base method. +func (m *MockTargetStore) Stop() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Stop") +} + +// Stop indicates an expected call of Stop. +func (mr *MockTargetStoreMockRecorder) Stop() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockTargetStore)(nil).Stop)) +} diff --git a/pkg/opsevent/batch/targetstore/targetstore.go b/pkg/opsevent/batch/targetstore/targetstore.go new file mode 100644 index 000000000..32722743d --- /dev/null +++ b/pkg/opsevent/batch/targetstore/targetstore.go @@ -0,0 +1,273 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package targetstore + +import ( + "context" + "sync" + "sync/atomic" + "time" + + "go.uber.org/zap" + + autoopsservice "github.com/bucketeer-io/bucketeer/pkg/autoops/client" + autoopsdomain "github.com/bucketeer-io/bucketeer/pkg/autoops/domain" + environmentservice "github.com/bucketeer-io/bucketeer/pkg/environment/client" + environmentdomain "github.com/bucketeer-io/bucketeer/pkg/environment/domain" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + autoopsproto "github.com/bucketeer-io/bucketeer/proto/autoops" + environmentproto "github.com/bucketeer-io/bucketeer/proto/environment" +) + +const ( + listRequestSize = 500 +) + +type EnvironmentLister interface { + GetEnvironments(ctx context.Context) []*environmentdomain.Environment +} + +type AutoOpsRuleLister interface { + GetAutoOpsRules(ctx context.Context, environmentNamespace string) []*autoopsdomain.AutoOpsRule +} + +type TargetStore interface { + Run() + Stop() + EnvironmentLister + AutoOpsRuleLister +} + +type options struct { + refreshInterval time.Duration + metrics metrics.Registerer + logger *zap.Logger +} + +type Option func(*options) + +func WithRefreshInterval(interval time.Duration) Option { + return func(opts *options) { + opts.refreshInterval = interval + } +} + +func WithMetrics(r metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = r + } +} + +func WithLogger(l *zap.Logger) Option { + return func(opts *options) { + opts.logger = l + } +} + +type targetStore struct { + timeNow func() time.Time + environmentClient environmentservice.Client + autoOpsClient autoopsservice.Client + autoOpsRules map[string][]*autoopsdomain.AutoOpsRule + autoOpsRulesMtx sync.Mutex + environments atomic.Value + opts *options + logger *zap.Logger + ctx context.Context + cancel func() + doneCh chan struct{} +} + +func NewTargetStore( + environmentClient environmentservice.Client, + autoOpsClient autoopsservice.Client, + opts ...Option, +) TargetStore { + dopts := &options{ + refreshInterval: 2 * time.Minute, + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + if dopts.metrics != nil { + registerMetrics(dopts.metrics) + } + ctx, cancel := context.WithCancel(context.Background()) + store := &targetStore{ + timeNow: time.Now, + environmentClient: environmentClient, + autoOpsClient: autoOpsClient, + autoOpsRules: make(map[string][]*autoopsdomain.AutoOpsRule), + opts: dopts, + logger: dopts.logger.Named("targetstore"), + ctx: ctx, + cancel: cancel, + doneCh: make(chan struct{}), + } + store.environments.Store(make([]*environmentdomain.Environment, 0)) + return store +} + +func (s *targetStore) Run() { + s.logger.Info("Run started") + defer close(s.doneCh) + s.refresh() + ticker := time.NewTicker(s.opts.refreshInterval) + defer func() { + ticker.Stop() + s.logger.Info("Run finished") + }() + for { + select { + case <-ticker.C: + s.refresh() + case <-s.ctx.Done(): + return + } + } +} + +func (s *targetStore) Stop() { + s.logger.Info("ops-event: transformer: targetstore: stop started") + s.cancel() + <-s.doneCh + s.logger.Info("ops-event: transformer: targetstore: stop finished") +} + +func (s *targetStore) refresh() { + ctx, cancel := context.WithTimeout(s.ctx, time.Minute) + defer cancel() + err := s.refreshEnvironments(ctx) + if err != nil { + s.logger.Error("Failed to refresh environments", zap.Error(err)) + } + err = s.refreshAutoOpsRules(ctx) + if err != nil { + s.logger.Error("Failed to refresh auto ops rules", zap.Error(err)) + } +} + +func (s *targetStore) refreshEnvironments(ctx context.Context) error { + pbEnvironments, err := s.listEnvironments(ctx) + if err != nil { + s.logger.Error("Failed to list environments", zap.Error(err)) + return err + } + domainEnvironments := []*environmentdomain.Environment{} + for _, e := range pbEnvironments { + domainEnvironments = append(domainEnvironments, &environmentdomain.Environment{Environment: e}) + } + s.environments.Store(domainEnvironments) + itemsGauge.WithLabelValues(typeEnvironment).Set(float64(len(domainEnvironments))) + return nil +} + +func (s *targetStore) listEnvironments(ctx context.Context) ([]*environmentproto.Environment, error) { + environments := []*environmentproto.Environment{} + cursor := "" + for { + resp, err := s.environmentClient.ListEnvironments(ctx, &environmentproto.ListEnvironmentsRequest{ + PageSize: listRequestSize, + Cursor: cursor, + }) + if err != nil { + return nil, err + } + environments = append(environments, resp.Environments...) + environmentSize := len(resp.Environments) + if environmentSize == 0 || environmentSize < listRequestSize { + return environments, nil + } + cursor = resp.Cursor + } +} + +func (s *targetStore) refreshAutoOpsRules(ctx context.Context) error { + autoOpsRulesMap := make(map[string][]*autoopsdomain.AutoOpsRule) + environments := s.GetEnvironments(ctx) + for _, e := range environments { + autoOpsRules, err := s.listTargetAutoOpsRules(ctx, e.Namespace) + if err != nil { + s.logger.Error("Failed to list auto ops rules", zap.Error(err), zap.String("environmentNamespace", e.Namespace)) + continue + } + s.logger.Debug("Succeeded to list auto ops rules", zap.String("environmentNamespace", e.Namespace)) + autoOpsRulesMap[e.Namespace] = autoOpsRules + } + s.autoOpsRulesMtx.Lock() + s.autoOpsRules = autoOpsRulesMap + s.autoOpsRulesMtx.Unlock() + itemsGauge.WithLabelValues(typeAutoOpsRule).Set(float64(len(autoOpsRulesMap))) + return nil +} + +func (s *targetStore) listTargetAutoOpsRules( + ctx context.Context, + environmentNamespace string, +) ([]*autoopsdomain.AutoOpsRule, error) { + pbAutoOpsRules, err := s.listAutoOpsRules(ctx, environmentNamespace) + if err != nil { + return nil, err + } + targetAutoOpsRules := []*autoopsdomain.AutoOpsRule{} + for _, a := range pbAutoOpsRules { + da := &autoopsdomain.AutoOpsRule{AutoOpsRule: a} + if da.AlreadyTriggered() { + continue + } + targetAutoOpsRules = append(targetAutoOpsRules, da) + } + return targetAutoOpsRules, nil +} + +func (s *targetStore) listAutoOpsRules( + ctx context.Context, + environmentNamespace string, +) ([]*autoopsproto.AutoOpsRule, error) { + autoOpsRules := []*autoopsproto.AutoOpsRule{} + cursor := "" + for { + resp, err := s.autoOpsClient.ListAutoOpsRules(ctx, &autoopsproto.ListAutoOpsRulesRequest{ + EnvironmentNamespace: environmentNamespace, + PageSize: listRequestSize, + Cursor: cursor, + }) + if err != nil { + return nil, err + } + autoOpsRules = append(autoOpsRules, resp.AutoOpsRules...) + autoOpsRulesSize := len(resp.AutoOpsRules) + if autoOpsRulesSize == 0 || autoOpsRulesSize < listRequestSize { + return autoOpsRules, nil + } + cursor = resp.Cursor + } +} + +func (s *targetStore) GetEnvironments(ctx context.Context) []*environmentdomain.Environment { + return s.environments.Load().([]*environmentdomain.Environment) +} + +func (s *targetStore) GetAutoOpsRules(ctx context.Context, environmentNamespace string) []*autoopsdomain.AutoOpsRule { + s.autoOpsRulesMtx.Lock() + autoOpsRules, ok := s.autoOpsRules[environmentNamespace] + s.autoOpsRulesMtx.Unlock() + if !ok { + return nil + } + return autoOpsRules +} diff --git a/pkg/opsevent/batch/targetstore/targetstore_test.go b/pkg/opsevent/batch/targetstore/targetstore_test.go new file mode 100644 index 000000000..534eb2d29 --- /dev/null +++ b/pkg/opsevent/batch/targetstore/targetstore_test.go @@ -0,0 +1,279 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package targetstore + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/any" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + autoopsclientmock "github.com/bucketeer-io/bucketeer/pkg/autoops/client/mock" + autoopsdomain "github.com/bucketeer-io/bucketeer/pkg/autoops/domain" + environmentclientmock "github.com/bucketeer-io/bucketeer/pkg/environment/client/mock" + environmentdomain "github.com/bucketeer-io/bucketeer/pkg/environment/domain" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + autoopsproto "github.com/bucketeer-io/bucketeer/proto/autoops" + environmentproto "github.com/bucketeer-io/bucketeer/proto/environment" +) + +func TestWithRefreshInterval(t *testing.T) { + t.Parallel() + dur := time.Second + f := WithRefreshInterval(dur) + opt := &options{} + f(opt) + assert.Equal(t, dur, opt.refreshInterval) +} + +func TestWithMetrics(t *testing.T) { + t.Parallel() + metrics := metrics.NewMetrics( + 9999, + "/metrics", + ) + reg := metrics.DefaultRegisterer() + f := WithMetrics(reg) + opt := &options{} + f(opt) + assert.Equal(t, reg, opt.metrics) +} + +func TestWithLogger(t *testing.T) { + t.Parallel() + logger, err := log.NewLogger() + require.NoError(t, err) + f := WithLogger(logger) + opt := &options{} + f(opt) + assert.Equal(t, logger, opt.logger) +} + +func TestNewTargetStore(t *testing.T) { + g := NewTargetStore(nil, nil) + assert.IsType(t, &targetStore{}, g) +} + +func TestListEnvironments(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*targetStore) + expected []*environmentproto.Environment + expectedErr error + }{ + "enable": { + setup: func(ts *targetStore) { + ts.environmentClient.(*environmentclientmock.MockClient).EXPECT().ListEnvironments(gomock.Any(), gomock.Any()).Return( + &environmentproto.ListEnvironmentsResponse{Environments: []*environmentproto.Environment{ + {Id: "ns0", Namespace: "ns0"}, + }}, nil) + }, + expected: []*environmentproto.Environment{{Id: "ns0", Namespace: "ns0"}}, + }, + "list environments fails": { + setup: func(ts *targetStore) { + ts.environmentClient.(*environmentclientmock.MockClient).EXPECT().ListEnvironments(gomock.Any(), gomock.Any()).Return( + nil, status.Errorf(codes.Unknown, "test")) + }, + expectedErr: status.Errorf(codes.Unknown, "test"), + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := newTargetStoreWithMock(t, mockController) + p.setup(s) + actual, err := s.listEnvironments(context.Background()) + assert.Equal(t, p.expected, actual) + if err != nil { + assert.Equal(t, p.expectedErr, err) + } + }) + } +} + +func TestListAutoOpsRules(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*targetStore) + environmentNamespace string + expected []*autoopsproto.AutoOpsRule + expectedErr error + }{ + "success": { + setup: func(ts *targetStore) { + ts.autoOpsClient.(*autoopsclientmock.MockClient).EXPECT().ListAutoOpsRules(gomock.Any(), gomock.Any()).Return( + &autoopsproto.ListAutoOpsRulesResponse{AutoOpsRules: []*autoopsproto.AutoOpsRule{ + { + Id: "id-0", + FeatureId: "fid-0", + Clauses: []*autoopsproto.Clause{}, + }, + }}, nil) + }, + expected: []*autoopsproto.AutoOpsRule{{Id: "id-0", FeatureId: "fid-0", Clauses: []*autoopsproto.Clause{}}}, + }, + "failure": { + setup: func(ts *targetStore) { + ts.autoOpsClient.(*autoopsclientmock.MockClient).EXPECT().ListAutoOpsRules(gomock.Any(), gomock.Any()).Return( + nil, status.Errorf(codes.Unknown, "test")) + }, + expectedErr: status.Errorf(codes.Unknown, "test"), + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := newTargetStoreWithMock(t, mockController) + p.setup(s) + actual, err := s.listAutoOpsRules(context.Background(), p.environmentNamespace) + assert.Equal(t, p.expected, actual) + if err != nil { + assert.Equal(t, p.expectedErr, err) + } + }) + } +} + +func TestRefreshEnvironments(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*targetStore) + expected []*environmentdomain.Environment + }{ + "enable": { + setup: func(ts *targetStore) { + ts.environmentClient.(*environmentclientmock.MockClient).EXPECT().ListEnvironments(gomock.Any(), gomock.Any()).Return( + &environmentproto.ListEnvironmentsResponse{Environments: []*environmentproto.Environment{ + {Id: "ns0", Namespace: "ns0"}, + {Id: "ns1", Namespace: "ns1"}, + }}, nil) + }, + expected: []*environmentdomain.Environment{ + {Environment: &environmentproto.Environment{Id: "ns0", Namespace: "ns0"}}, + {Environment: &environmentproto.Environment{Id: "ns1", Namespace: "ns1"}}, + }, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := newTargetStoreWithMock(t, mockController) + p.setup(s) + _ = s.refreshEnvironments(context.Background()) + actual := s.GetEnvironments(context.Background()) + assert.Equal(t, p.expected, actual) + }) + } +} + +func TestRefreshAutoOpsRules(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + c1, c2, c3 := newOpsEventRateClauses(t) + patterns := map[string]struct { + setup func(*targetStore) + expected []*autoopsdomain.AutoOpsRule + }{ + "enable": { + setup: func(ts *targetStore) { + ts.environments.Store([]*environmentdomain.Environment{{Environment: &environmentproto.Environment{Id: "ns0", Namespace: "ns0"}}}) + ts.autoOpsClient.(*autoopsclientmock.MockClient).EXPECT().ListAutoOpsRules(gomock.Any(), gomock.Any()).Return( + &autoopsproto.ListAutoOpsRulesResponse{AutoOpsRules: []*autoopsproto.AutoOpsRule{ + {Id: "id-0", FeatureId: "fid-0", + Clauses: []*autoopsproto.Clause{{Clause: c1}, {Clause: c2}, {Clause: c3}}, + }, + {Id: "id-1", FeatureId: "fid-1", TriggeredAt: time.Now().Unix(), + Clauses: []*autoopsproto.Clause{{Clause: c1}, {Clause: c2}, {Clause: c3}}, + }, + }}, nil) + }, + expected: []*autoopsdomain.AutoOpsRule{ + {AutoOpsRule: &autoopsproto.AutoOpsRule{Id: "id-0", FeatureId: "fid-0", + Clauses: []*autoopsproto.Clause{{Clause: c1}, {Clause: c2}, {Clause: c3}}, + }}, + }, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := newTargetStoreWithMock(t, mockController) + p.setup(s) + _ = s.refreshAutoOpsRules(context.Background()) + actual := s.GetAutoOpsRules(context.Background(), "ns0") + assert.Equal(t, p.expected, actual) + }) + } +} + +func newTargetStoreWithMock(t *testing.T, mockController *gomock.Controller) *targetStore { + logger, err := log.NewLogger() + require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + store := &targetStore{ + environmentClient: environmentclientmock.NewMockClient(mockController), + autoOpsClient: autoopsclientmock.NewMockClient(mockController), + autoOpsRules: make(map[string][]*autoopsdomain.AutoOpsRule), + logger: logger, + ctx: ctx, + cancel: cancel, + doneCh: make(chan struct{}), + } + store.environments.Store(make([]*environmentdomain.Environment, 0)) + return store +} + +func newOpsEventRateClauses(t *testing.T) (*any.Any, *any.Any, *any.Any) { + c1, err := ptypes.MarshalAny(&autoopsproto.OpsEventRateClause{ + VariationId: "vid1", + GoalId: "gid1", + MinCount: int64(10), + ThreadsholdRate: float64(0.5), + Operator: autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL, + }) + require.NoError(t, err) + c2, err := ptypes.MarshalAny(&autoopsproto.OpsEventRateClause{ + VariationId: "vid1", + GoalId: "gid2", + MinCount: int64(10), + ThreadsholdRate: float64(0.5), + Operator: autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL, + }) + require.NoError(t, err) + c3, err := ptypes.MarshalAny(&autoopsproto.OpsEventRateClause{ + VariationId: "vid1", + GoalId: "gid1", + MinCount: int64(10), + ThreadsholdRate: float64(0.5), + Operator: autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL, + }) + return c1, c2, c3 +} diff --git a/pkg/opsevent/cmd/batch/BUILD.bazel b/pkg/opsevent/cmd/batch/BUILD.bazel new file mode 100644 index 000000000..218b9bf3a --- /dev/null +++ b/pkg/opsevent/cmd/batch/BUILD.bazel @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["batch.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/opsevent/cmd/batch", + visibility = ["//visibility:public"], + deps = [ + "//pkg/autoops/client:go_default_library", + "//pkg/cli:go_default_library", + "//pkg/environment/client:go_default_library", + "//pkg/eventcounter/client:go_default_library", + "//pkg/feature/client:go_default_library", + "//pkg/health:go_default_library", + "//pkg/job:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/opsevent/batch/executor:go_default_library", + "//pkg/opsevent/batch/job:go_default_library", + "//pkg/opsevent/batch/targetstore:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/rpc/client:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/opsevent/cmd/batch/batch.go b/pkg/opsevent/cmd/batch/batch.go new file mode 100644 index 000000000..25b18a89c --- /dev/null +++ b/pkg/opsevent/cmd/batch/batch.go @@ -0,0 +1,288 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package batch + +import ( + "context" + "os" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + autoopsclient "github.com/bucketeer-io/bucketeer/pkg/autoops/client" + "github.com/bucketeer-io/bucketeer/pkg/cli" + environmentclient "github.com/bucketeer-io/bucketeer/pkg/environment/client" + ecclient "github.com/bucketeer-io/bucketeer/pkg/eventcounter/client" + ftclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/job" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/opsevent/batch/executor" + opseventjob "github.com/bucketeer-io/bucketeer/pkg/opsevent/batch/job" + "github.com/bucketeer-io/bucketeer/pkg/opsevent/batch/targetstore" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" +) + +const command = "batch" + +type batch struct { + *kingpin.CmdClause + port *int + project *string + mysqlUser *string + mysqlPass *string + mysqlHost *string + mysqlPort *int + mysqlDBName *string + environmentService *string + autoOpsService *string + eventCounterService *string + featureService *string + certPath *string + keyPath *string + serviceTokenPath *string + refreshInterval *time.Duration + scheduleCountWatcher *string + scheduleDatetimeWatcher *string +} + +func RegisterCommand(r cli.CommandRegistry, p cli.ParentCommand) cli.Command { + cmd := p.Command(command, "Start batch layer") + batch := &batch{ + CmdClause: cmd, + port: cmd.Flag("port", "Port to bind to.").Default("9090").Int(), + project: cmd.Flag("project", "Google Cloud project name.").Required().String(), + mysqlUser: cmd.Flag("mysql-user", "MySQL user.").Required().String(), + mysqlPass: cmd.Flag("mysql-pass", "MySQL password.").Required().String(), + mysqlHost: cmd.Flag("mysql-host", "MySQL host.").Required().String(), + mysqlPort: cmd.Flag("mysql-port", "MySQL port.").Required().Int(), + mysqlDBName: cmd.Flag("mysql-db-name", "MySQL database name.").Required().String(), + environmentService: cmd.Flag( + "environment-service", + "bucketeer-environment-service address.", + ).Default("environment:9090").String(), + autoOpsService: cmd.Flag( + "auto-ops-service", + "bucketeer-auto-ops-service address.", + ).Default("auto-ops:9090").String(), + eventCounterService: cmd.Flag( + "event-counter-service", + "bucketeer-event-counter-service address.", + ).Default("event-counter-server:9090").String(), + featureService: cmd.Flag( + "feature-service", + "bucketeer-feature-service address.", + ).Default("feature:9090").String(), + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + keyPath: cmd.Flag("key", "Path to TLS key.").Required().String(), + serviceTokenPath: cmd.Flag("service-token", "Path to service token.").Required().String(), + refreshInterval: cmd.Flag( + "refresh-interval", + "Interval between refreshing target objects.", + ).Default("10m").Duration(), + scheduleCountWatcher: cmd.Flag( + "schedule-count-watcher", + "Cron style schedule for count watcher.", + ).Default("0,10,20,30,40,50 * * * * *").String(), + scheduleDatetimeWatcher: cmd.Flag( + "schedule-datetime-watcher", + "Cron style schedule for datetime watcher.", + ).Default("0,10,20,30,40,50 * * * * *").String(), + } + r.RegisterCommand(batch) + return batch +} + +func (b *batch) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + *b.serviceTokenPath = b.insertTelepresenceMountRoot(*b.serviceTokenPath) + *b.keyPath = b.insertTelepresenceMountRoot(*b.keyPath) + *b.certPath = b.insertTelepresenceMountRoot(*b.certPath) + + registerer := metrics.DefaultRegisterer() + + mysqlClient, err := b.createMySQLClient(ctx, registerer, logger) + if err != nil { + return err + } + defer mysqlClient.Close() + + creds, err := client.NewPerRPCCredentials(*b.serviceTokenPath) + if err != nil { + return err + } + + clientOptions := []client.Option{ + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30 * time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + } + + environmentClient, err := environmentclient.NewClient(*b.environmentService, *b.certPath, clientOptions...) + if err != nil { + return err + } + defer environmentClient.Close() + + autoOpsClient, err := autoopsclient.NewClient(*b.autoOpsService, *b.certPath, clientOptions...) + if err != nil { + return err + } + defer autoOpsClient.Close() + + eventCounterClient, err := ecclient.NewClient(*b.eventCounterService, *b.certPath, clientOptions...) + if err != nil { + return err + } + defer eventCounterClient.Close() + + featureClient, err := ftclient.NewClient(*b.featureService, *b.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer featureClient.Close() + + targetStore := targetstore.NewTargetStore( + environmentClient, + autoOpsClient, + targetstore.WithRefreshInterval(*b.refreshInterval), + targetstore.WithMetrics(registerer), + targetstore.WithLogger(logger), + ) + defer targetStore.Stop() + go targetStore.Run() + + autoOpsExecutor := executor.NewAutoOpsExecutor( + autoOpsClient, + executor.WithLogger(logger), + ) + + manager := job.NewManager(registerer, "ops_event_batch", logger) + defer manager.Stop() + err = b.registerJobs(manager, + mysqlClient, + targetStore, + eventCounterClient, + featureClient, + autoOpsExecutor, + logger, + ) + if err != nil { + return err + } + go manager.Run() // nolint:errcheck + + healthChecker := health.NewGrpcChecker( + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + ) + go healthChecker.Run(ctx) + + server := rpc.NewServer(healthChecker, *b.certPath, *b.keyPath, + rpc.WithPort(*b.port), + rpc.WithMetrics(registerer), + rpc.WithLogger(logger), + rpc.WithHandler("/health", healthChecker), + ) + defer server.Stop(10 * time.Second) + go server.Run() + + <-ctx.Done() + return nil +} + +func (b *batch) registerJobs( + m *job.Manager, + mysqlClient mysql.Client, + targetStore targetstore.TargetStore, + eventCounterClient ecclient.Client, + featureClient ftclient.Client, + autoOpsExecutor executor.AutoOpsExecutor, + logger *zap.Logger) error { + + jobs := []struct { + name string + cron string + job job.Job + }{ + { + cron: *b.scheduleCountWatcher, + name: "ops_event_count_watcher", + job: opseventjob.NewCountWatcher( + mysqlClient, + targetStore, + eventCounterClient, + featureClient, + autoOpsExecutor, + opseventjob.WithTimeout(5*time.Minute), + opseventjob.WithLogger(logger)), + }, + { + cron: *b.scheduleDatetimeWatcher, + name: "datetime_watcher", + job: opseventjob.NewDatetimeWatcher( + targetStore, + autoOpsExecutor, + opseventjob.WithTimeout(5*time.Minute), + opseventjob.WithLogger(logger)), + }, + } + for i := range jobs { + if err := m.AddCronJob(jobs[i].name, jobs[i].cron, jobs[i].job); err != nil { + logger.Error("Failed to add cron job", + zap.String("name", jobs[i].name), + zap.String("cron", jobs[i].cron), + zap.Error(err)) + return err + } + } + return nil +} + +// for telepresence --swap-deployment +func (b *batch) insertTelepresenceMountRoot(path string) string { + volumeRoot := os.Getenv("TELEPRESENCE_ROOT") + if volumeRoot == "" { + return path + } + return volumeRoot + path +} + +func (b *batch) createMySQLClient( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (mysql.Client, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + return mysql.NewClient( + ctx, + *b.mysqlUser, *b.mysqlPass, *b.mysqlHost, + *b.mysqlPort, + *b.mysqlDBName, + mysql.WithLogger(logger), + mysql.WithMetrics(registerer), + ) +} diff --git a/pkg/opsevent/domain/BUILD.bazel b/pkg/opsevent/domain/BUILD.bazel new file mode 100644 index 000000000..26f143523 --- /dev/null +++ b/pkg/opsevent/domain/BUILD.bazel @@ -0,0 +1,19 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["ops_count.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/opsevent/domain", + visibility = ["//visibility:public"], + deps = [ + "//proto/autoops:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["ops_count_test.go"], + embed = [":go_default_library"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/opsevent/domain", + deps = ["@com_github_stretchr_testify//assert:go_default_library"], +) diff --git a/pkg/opsevent/domain/ops_count.go b/pkg/opsevent/domain/ops_count.go new file mode 100644 index 000000000..0bc076848 --- /dev/null +++ b/pkg/opsevent/domain/ops_count.go @@ -0,0 +1,44 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "time" + + autoopsproto "github.com/bucketeer-io/bucketeer/proto/autoops" +) + +type OpsCount struct { + *autoopsproto.OpsCount +} + +func NewOpsCount( + featureID string, + autoOpsRuleID string, + clauseID string, + opsEventCount, evaluationCount int64, +) *OpsCount { + return &OpsCount{ + OpsCount: &autoopsproto.OpsCount{ + Id: clauseID, + FeatureId: featureID, + AutoOpsRuleId: autoOpsRuleID, + ClauseId: clauseID, + UpdatedAt: time.Now().Unix(), + OpsEventCount: opsEventCount, + EvaluationCount: evaluationCount, + }, + } +} diff --git a/pkg/opsevent/domain/ops_count_test.go b/pkg/opsevent/domain/ops_count_test.go new file mode 100644 index 000000000..21c448f50 --- /dev/null +++ b/pkg/opsevent/domain/ops_count_test.go @@ -0,0 +1,33 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewOpsCount(t *testing.T) { + t.Parallel() + oc := NewOpsCount("fid", "aid", "cid", int64(1), int64(2)) + assert.Equal(t, "fid", oc.FeatureId) + assert.Equal(t, "cid", oc.Id) + assert.Equal(t, "aid", oc.AutoOpsRuleId) + assert.Equal(t, "cid", oc.ClauseId) + assert.Equal(t, int64(1), oc.OpsEventCount) + assert.Equal(t, int64(2), oc.EvaluationCount) + assert.NotEqual(t, int64(0), oc.UpdatedAt) +} diff --git a/pkg/opsevent/storage/v2/BUILD.bazel b/pkg/opsevent/storage/v2/BUILD.bazel new file mode 100644 index 000000000..dd0a89d30 --- /dev/null +++ b/pkg/opsevent/storage/v2/BUILD.bazel @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["ops_count.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/opsevent/storage/v2", + visibility = ["//visibility:public"], + deps = [ + "//pkg/opsevent/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/autoops:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["ops_count_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/opsevent/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/storage/v2/mysql/mock:go_default_library", + "//proto/autoops:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + ], +) diff --git a/pkg/opsevent/storage/v2/mock/BUILD.bazel b/pkg/opsevent/storage/v2/mock/BUILD.bazel new file mode 100644 index 000000000..ece67f1af --- /dev/null +++ b/pkg/opsevent/storage/v2/mock/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["ops_count.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/opsevent/storage/v2/mock", + visibility = ["//visibility:public"], + deps = [ + "//pkg/opsevent/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/autoops:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + ], +) diff --git a/pkg/opsevent/storage/v2/mock/ops_count.go b/pkg/opsevent/storage/v2/mock/ops_count.go new file mode 100644 index 000000000..4f4d7767f --- /dev/null +++ b/pkg/opsevent/storage/v2/mock/ops_count.go @@ -0,0 +1,69 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: ops_count.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + domain "github.com/bucketeer-io/bucketeer/pkg/opsevent/domain" + mysql "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + autoops "github.com/bucketeer-io/bucketeer/proto/autoops" +) + +// MockOpsCountStorage is a mock of OpsCountStorage interface. +type MockOpsCountStorage struct { + ctrl *gomock.Controller + recorder *MockOpsCountStorageMockRecorder +} + +// MockOpsCountStorageMockRecorder is the mock recorder for MockOpsCountStorage. +type MockOpsCountStorageMockRecorder struct { + mock *MockOpsCountStorage +} + +// NewMockOpsCountStorage creates a new mock instance. +func NewMockOpsCountStorage(ctrl *gomock.Controller) *MockOpsCountStorage { + mock := &MockOpsCountStorage{ctrl: ctrl} + mock.recorder = &MockOpsCountStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockOpsCountStorage) EXPECT() *MockOpsCountStorageMockRecorder { + return m.recorder +} + +// ListOpsCounts mocks base method. +func (m *MockOpsCountStorage) ListOpsCounts(ctx context.Context, whereParts []mysql.WherePart, orders []*mysql.Order, limit, offset int) ([]*autoops.OpsCount, int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListOpsCounts", ctx, whereParts, orders, limit, offset) + ret0, _ := ret[0].([]*autoops.OpsCount) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// ListOpsCounts indicates an expected call of ListOpsCounts. +func (mr *MockOpsCountStorageMockRecorder) ListOpsCounts(ctx, whereParts, orders, limit, offset interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListOpsCounts", reflect.TypeOf((*MockOpsCountStorage)(nil).ListOpsCounts), ctx, whereParts, orders, limit, offset) +} + +// UpsertOpsCount mocks base method. +func (m *MockOpsCountStorage) UpsertOpsCount(ctx context.Context, environmentNamespace string, oc *domain.OpsCount) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpsertOpsCount", ctx, environmentNamespace, oc) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpsertOpsCount indicates an expected call of UpsertOpsCount. +func (mr *MockOpsCountStorageMockRecorder) UpsertOpsCount(ctx, environmentNamespace, oc interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertOpsCount", reflect.TypeOf((*MockOpsCountStorage)(nil).UpsertOpsCount), ctx, environmentNamespace, oc) +} diff --git a/pkg/opsevent/storage/v2/ops_count.go b/pkg/opsevent/storage/v2/ops_count.go new file mode 100644 index 000000000..9910129b0 --- /dev/null +++ b/pkg/opsevent/storage/v2/ops_count.go @@ -0,0 +1,134 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v2 + +import ( + "context" + "fmt" + + "github.com/bucketeer-io/bucketeer/pkg/opsevent/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + proto "github.com/bucketeer-io/bucketeer/proto/autoops" +) + +type OpsCountStorage interface { + UpsertOpsCount(ctx context.Context, environmentNamespace string, oc *domain.OpsCount) error + ListOpsCounts( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, + ) ([]*proto.OpsCount, int, error) +} + +type opsCountStorage struct { + qe mysql.QueryExecer +} + +func NewOpsCountStorage(qe mysql.QueryExecer) OpsCountStorage { + return &opsCountStorage{qe: qe} +} + +func (s *opsCountStorage) UpsertOpsCount(ctx context.Context, environmentNamespace string, oc *domain.OpsCount) error { + query := ` + INSERT INTO ops_count ( + id, + auto_ops_rule_id, + clause_id, + updated_at, + ops_event_count, + evaluation_count, + feature_id, + environment_namespace + ) VALUES ( + ?, ?, ?, ?, ?, ?, ?, ? + ) ON DUPLICATE KEY UPDATE + auto_ops_rule_id = VALUES(auto_ops_rule_id), + clause_id = VALUES(clause_id), + updated_at = VALUES(updated_at), + ops_event_count = VALUES(ops_event_count), + evaluation_count = VALUES(evaluation_count), + feature_id = VALUES(feature_id) + ` + _, err := s.qe.ExecContext( + ctx, + query, + oc.Id, + oc.AutoOpsRuleId, + oc.ClauseId, + oc.UpdatedAt, + oc.OpsEventCount, + oc.EvaluationCount, + oc.FeatureId, + environmentNamespace, + ) + if err != nil { + return err + } + return nil +} + +func (s *opsCountStorage) ListOpsCounts( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, +) ([]*proto.OpsCount, int, error) { + whereSQL, whereArgs := mysql.ConstructWhereSQLString(whereParts) + orderBySQL := mysql.ConstructOrderBySQLString(orders) + limitOffsetSQL := mysql.ConstructLimitOffsetSQLString(limit, offset) + query := fmt.Sprintf(` + SELECT + id, + auto_ops_rule_id, + clause_id, + updated_at, + ops_event_count, + evaluation_count, + feature_id + FROM + ops_count + %s %s %s + `, whereSQL, orderBySQL, limitOffsetSQL, + ) + rows, err := s.qe.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, 0, err + } + defer rows.Close() + opsCounts := make([]*proto.OpsCount, 0, limit) + for rows.Next() { + opsCount := proto.OpsCount{} + err := rows.Scan( + &opsCount.Id, + &opsCount.AutoOpsRuleId, + &opsCount.ClauseId, + &opsCount.UpdatedAt, + &opsCount.OpsEventCount, + &opsCount.EvaluationCount, + &opsCount.FeatureId, + ) + if err != nil { + return nil, 0, err + } + opsCounts = append(opsCounts, &opsCount) + } + if rows.Err() != nil { + return nil, 0, err + } + nextOffset := offset + len(opsCounts) + return opsCounts, nextOffset, nil +} diff --git a/pkg/opsevent/storage/v2/ops_count_test.go b/pkg/opsevent/storage/v2/ops_count_test.go new file mode 100644 index 000000000..634a1d153 --- /dev/null +++ b/pkg/opsevent/storage/v2/ops_count_test.go @@ -0,0 +1,144 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "context" + "errors" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/opsevent/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + proto "github.com/bucketeer-io/bucketeer/proto/autoops" +) + +func TestNewOpsEventStorage(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + db := NewOpsCountStorage(mock.NewMockQueryExecer(mockController)) + assert.IsType(t, &opsCountStorage{}, db) +} + +func TestUpsertOpsCount(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + setup func(*opsCountStorage) + input *domain.OpsCount + environmentNamespace string + expectedErr error + }{ + { + setup: func(s *opsCountStorage) { + result := mock.NewMockResult(mockController) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + input: &domain.OpsCount{OpsCount: &proto.OpsCount{}}, + environmentNamespace: "ns", + expectedErr: nil, + }, + } + for _, p := range patterns { + storage := newOpsCountStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + err := storage.UpsertOpsCount(context.Background(), p.environmentNamespace, p.input) + assert.Equal(t, p.expectedErr, err) + } +} + +func TestListOpsCounts(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := []struct { + setup func(*opsCountStorage) + whereParts []mysql.WherePart + orders []*mysql.Order + limit int + offset int + expected []*proto.OpsCount + expectedCursor int + expectedErr error + }{ + { + setup: func(s *opsCountStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + whereParts: nil, + orders: nil, + limit: 0, + offset: 0, + expected: nil, + expectedCursor: 0, + expectedErr: errors.New("error"), + }, + { + setup: func(s *opsCountStorage) { + rows := mock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + }, + whereParts: []mysql.WherePart{ + mysql.NewFilter("num", ">=", 5), + }, + orders: []*mysql.Order{ + mysql.NewOrder("id", mysql.OrderDirectionAsc), + }, + limit: 10, + offset: 5, + expected: []*proto.OpsCount{}, + expectedCursor: 5, + expectedErr: nil, + }, + } + for _, p := range patterns { + storage := newOpsCountStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + opsCounts, cursor, err := storage.ListOpsCounts( + context.Background(), + p.whereParts, + p.orders, + p.limit, + p.offset, + ) + assert.Equal(t, p.expected, opsCounts) + assert.Equal(t, p.expectedCursor, cursor) + assert.Equal(t, p.expectedErr, err) + } +} + +func newOpsCountStorageWithMock(t *testing.T, mockController *gomock.Controller) *opsCountStorage { + t.Helper() + return &opsCountStorage{mock.NewMockQueryExecer(mockController)} +} diff --git a/pkg/pubsub/BUILD.bazel b/pkg/pubsub/BUILD.bazel new file mode 100644 index 000000000..aa470238c --- /dev/null +++ b/pkg/pubsub/BUILD.bazel @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["pubsub.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/pubsub", + visibility = ["//visibility:public"], + deps = [ + "//pkg/backoff:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/pubsub/puller:go_default_library", + "@com_google_cloud_go_pubsub//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/pubsub/publisher/BUILD.bazel b/pkg/pubsub/publisher/BUILD.bazel new file mode 100644 index 000000000..f043d062d --- /dev/null +++ b/pkg/pubsub/publisher/BUILD.bazel @@ -0,0 +1,19 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "metrics.go", + "publisher.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher", + visibility = ["//visibility:public"], + deps = [ + "//pkg/metrics:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@com_google_cloud_go_pubsub//:go_default_library", + "@org_golang_google_protobuf//runtime/protoiface:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/pubsub/publisher/metrics.go b/pkg/pubsub/publisher/metrics.go new file mode 100644 index 000000000..37b5bc25a --- /dev/null +++ b/pkg/pubsub/publisher/metrics.go @@ -0,0 +1,81 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package publisher + +import ( + "context" + "sync" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +const ( + methodPublish = "Publish" + methodPublishMulti = "PublishMulti" + + codeOK = "OK" + codeBadMessage = "BadMessage" + codeDeadlineExceeded = "DeadlineExceeded" + codeCanceled = "Canceled" + codeUnknown = "Unknown" +) + +var ( + registerOnce sync.Once + + handledCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "pubsub_publisher", + Name: "handled_total", + Help: "Total number of handled messages", + }, []string{"topic", "method", "code"}, + ) + + handledHistogram = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "bucketeer", + Subsystem: "pubsub_publisher", + Name: "handled_seconds", + Help: "Histogram of message handling duration (seconds)", + Buckets: prometheus.DefBuckets, + }, []string{"topic", "method", "code"}) +) + +func convertErrorToCode(err error) string { + switch err { + case nil: + return codeOK + case ErrBadMessage: + return codeBadMessage + case context.DeadlineExceeded: + return codeDeadlineExceeded + case context.Canceled: + return codeCanceled + default: + return codeUnknown + } +} + +func registerMetrics(r metrics.Registerer) { + registerOnce.Do(func() { + r.MustRegister( + handledCounter, + handledHistogram, + ) + }) +} diff --git a/pkg/pubsub/publisher/mock/BUILD.bazel b/pkg/pubsub/publisher/mock/BUILD.bazel new file mode 100644 index 000000000..ff33fa2e4 --- /dev/null +++ b/pkg/pubsub/publisher/mock/BUILD.bazel @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["publisher.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher/mock", + visibility = ["//visibility:public"], + deps = [ + "//pkg/pubsub/publisher:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + ], +) diff --git a/pkg/pubsub/publisher/mock/publisher.go b/pkg/pubsub/publisher/mock/publisher.go new file mode 100644 index 000000000..260881b00 --- /dev/null +++ b/pkg/pubsub/publisher/mock/publisher.go @@ -0,0 +1,152 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: publisher.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + publisher "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" +) + +// MockMessage is a mock of Message interface. +type MockMessage struct { + ctrl *gomock.Controller + recorder *MockMessageMockRecorder +} + +// MockMessageMockRecorder is the mock recorder for MockMessage. +type MockMessageMockRecorder struct { + mock *MockMessage +} + +// NewMockMessage creates a new mock instance. +func NewMockMessage(ctrl *gomock.Controller) *MockMessage { + mock := &MockMessage{ctrl: ctrl} + mock.recorder = &MockMessageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockMessage) EXPECT() *MockMessageMockRecorder { + return m.recorder +} + +// GetId mocks base method. +func (m *MockMessage) GetId() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetId") + ret0, _ := ret[0].(string) + return ret0 +} + +// GetId indicates an expected call of GetId. +func (mr *MockMessageMockRecorder) GetId() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetId", reflect.TypeOf((*MockMessage)(nil).GetId)) +} + +// ProtoMessage mocks base method. +func (m *MockMessage) ProtoMessage() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ProtoMessage") +} + +// ProtoMessage indicates an expected call of ProtoMessage. +func (mr *MockMessageMockRecorder) ProtoMessage() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProtoMessage", reflect.TypeOf((*MockMessage)(nil).ProtoMessage)) +} + +// Reset mocks base method. +func (m *MockMessage) Reset() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Reset") +} + +// Reset indicates an expected call of Reset. +func (mr *MockMessageMockRecorder) Reset() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockMessage)(nil).Reset)) +} + +// String mocks base method. +func (m *MockMessage) String() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "String") + ret0, _ := ret[0].(string) + return ret0 +} + +// String indicates an expected call of String. +func (mr *MockMessageMockRecorder) String() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "String", reflect.TypeOf((*MockMessage)(nil).String)) +} + +// MockPublisher is a mock of Publisher interface. +type MockPublisher struct { + ctrl *gomock.Controller + recorder *MockPublisherMockRecorder +} + +// MockPublisherMockRecorder is the mock recorder for MockPublisher. +type MockPublisherMockRecorder struct { + mock *MockPublisher +} + +// NewMockPublisher creates a new mock instance. +func NewMockPublisher(ctrl *gomock.Controller) *MockPublisher { + mock := &MockPublisher{ctrl: ctrl} + mock.recorder = &MockPublisherMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPublisher) EXPECT() *MockPublisherMockRecorder { + return m.recorder +} + +// Publish mocks base method. +func (m *MockPublisher) Publish(ctx context.Context, msg publisher.Message) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Publish", ctx, msg) + ret0, _ := ret[0].(error) + return ret0 +} + +// Publish indicates an expected call of Publish. +func (mr *MockPublisherMockRecorder) Publish(ctx, msg interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Publish", reflect.TypeOf((*MockPublisher)(nil).Publish), ctx, msg) +} + +// PublishMulti mocks base method. +func (m *MockPublisher) PublishMulti(ctx context.Context, messages []publisher.Message) map[string]error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PublishMulti", ctx, messages) + ret0, _ := ret[0].(map[string]error) + return ret0 +} + +// PublishMulti indicates an expected call of PublishMulti. +func (mr *MockPublisherMockRecorder) PublishMulti(ctx, messages interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublishMulti", reflect.TypeOf((*MockPublisher)(nil).PublishMulti), ctx, messages) +} + +// Stop mocks base method. +func (m *MockPublisher) Stop() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Stop") +} + +// Stop indicates an expected call of Stop. +func (mr *MockPublisherMockRecorder) Stop() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockPublisher)(nil).Stop)) +} diff --git a/pkg/pubsub/publisher/publisher.go b/pkg/pubsub/publisher/publisher.go new file mode 100644 index 000000000..c0e44382b --- /dev/null +++ b/pkg/pubsub/publisher/publisher.go @@ -0,0 +1,153 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package publisher + +import ( + "context" + "errors" + "time" + + "cloud.google.com/go/pubsub" + "github.com/golang/protobuf/proto" // nolint:staticcheck + "go.uber.org/zap" + "google.golang.org/protobuf/runtime/protoiface" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +const ( + idAttribute = "id" +) + +var ( + ErrBadMessage = errors.New("publisher: bad message") +) + +type Message interface { + GetId() string + protoiface.MessageV1 +} + +type Publisher interface { + Publish(ctx context.Context, msg Message) error + PublishMulti(ctx context.Context, messages []Message) map[string]error + Stop() +} + +type publisher struct { + topic *pubsub.Topic + logger *zap.Logger +} + +type options struct { + metrics metrics.Registerer + logger *zap.Logger +} + +type Option func(*options) + +func WithMetrics(registerer metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = registerer + } +} + +func WithLogger(logger *zap.Logger) Option { + return func(opts *options) { + opts.logger = logger + } +} + +func NewPublisher(topic *pubsub.Topic, opts ...Option) Publisher { + dopts := &options{ + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + if dopts.metrics != nil { + registerMetrics(dopts.metrics) + } + return &publisher{ + topic: topic, + logger: dopts.logger.Named("publisher"), + } +} + +func (p *publisher) Publish(ctx context.Context, msg Message) (err error) { + startTime := time.Now() + defer func() { + topicID := p.topic.ID() + code := convertErrorToCode(err) + handledCounter.WithLabelValues(topicID, methodPublish, code).Inc() + handledHistogram.WithLabelValues(topicID, methodPublish, code).Observe(time.Since(startTime).Seconds()) + }() + data, err := proto.Marshal(msg) + if err != nil { + p.logger.Error("Failed to marshal message", zap.Error(err), zap.Any("message", msg)) + return ErrBadMessage + } + res := p.topic.Publish(ctx, &pubsub.Message{ + Data: data, + Attributes: map[string]string{idAttribute: msg.GetId()}, + }) + _, err = res.Get(ctx) + return +} + +func (p *publisher) PublishMulti(ctx context.Context, messages []Message) (errors map[string]error) { + startTime := time.Now() + defer func() { + topicID := p.topic.ID() + for _, err := range errors { + code := convertErrorToCode(err) + handledCounter.WithLabelValues(topicID, methodPublishMulti, code).Inc() + } + if successes := len(messages) - len(errors); successes > 0 { + handledCounter.WithLabelValues(topicID, methodPublishMulti, codeOK).Add(float64(successes)) + } + histogramCode := codeOK + if len(errors) > 0 { + histogramCode = codeUnknown + } + handledHistogram.WithLabelValues(topicID, methodPublishMulti, histogramCode).Observe(time.Since(startTime).Seconds()) + }() + errors = make(map[string]error) + results := make(map[string]*pubsub.PublishResult, len(messages)) + for _, msg := range messages { + id := msg.GetId() + data, err := proto.Marshal(msg) + if err != nil { + p.logger.Error("Failed to marshal message", zap.Error(err), zap.Any("message", msg)) + errors[id] = ErrBadMessage + continue + } + results[id] = p.topic.Publish(ctx, &pubsub.Message{ + Data: data, + Attributes: map[string]string{idAttribute: id}, + }) + } + for id, result := range results { + if _, err := result.Get(ctx); err != nil { + errors[id] = err + } + } + return +} + +func (p *publisher) Stop() { + p.topic.Stop() +} diff --git a/pkg/pubsub/pubsub.go b/pkg/pubsub/pubsub.go new file mode 100644 index 000000000..f42876f68 --- /dev/null +++ b/pkg/pubsub/pubsub.go @@ -0,0 +1,252 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "context" + "errors" + "time" + + "cloud.google.com/go/pubsub" + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/backoff" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" +) + +var ( + ErrInvalidTopic = errors.New("pubsub: invalid topic") +) + +type Client struct { + *pubsub.Client + opts *options + logger *zap.Logger +} + +type options struct { + backoff backoff.Backoff + retries int + metrics metrics.Registerer + logger *zap.Logger +} + +func defaultOptions() *options { + return &options{ + backoff: backoff.NewExponential(time.Second, 20*time.Second), + retries: 3, + logger: zap.NewNop(), + } +} + +type Option func(*options) + +func WithBackoff(bf backoff.Backoff) Option { + return func(opts *options) { + opts.backoff = bf + } +} + +func WithRetries(retries int) Option { + return func(opts *options) { + opts.retries = retries + } +} + +func WithMetrics(registerer metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = registerer + } +} + +func WithLogger(l *zap.Logger) Option { + return func(opts *options) { + opts.logger = l + } +} + +type receiveOptions = pubsub.ReceiveSettings + +type ReceiveOption func(*receiveOptions) + +func WithMaxExtension(d time.Duration) ReceiveOption { + return func(opts *receiveOptions) { + opts.MaxExtension = d + } +} + +func WithMaxOutstandingMessages(n int) ReceiveOption { + return func(opts *receiveOptions) { + opts.MaxOutstandingMessages = n + } +} + +func WithMaxOutstandingBytes(b int) ReceiveOption { + return func(opts *receiveOptions) { + opts.MaxOutstandingBytes = b + } +} + +func WithNumGoroutines(n int) ReceiveOption { + return func(opts *receiveOptions) { + opts.NumGoroutines = n + } +} + +type publishOptions = pubsub.PublishSettings + +type PublishOption func(*publishOptions) + +func WithPublishNumGoroutines(n int) PublishOption { + return func(opts *publishOptions) { + opts.NumGoroutines = n + } +} + +func WithPublishTimeout(timeout time.Duration) PublishOption { + return func(opts *publishOptions) { + opts.Timeout = timeout + } +} + +func NewClient(ctx context.Context, project string, opts ...Option) (*Client, error) { + c, err := pubsub.NewClient(ctx, project) + if err != nil { + return nil, err + } + options := defaultOptions() + for _, opt := range opts { + opt(options) + } + return &Client{ + Client: c, + opts: options, + logger: options.logger.Named("pubsub"), + }, nil +} + +func (c *Client) CreatePublisher(topic string, opts ...PublishOption) (publisher.Publisher, error) { + t, err := c.topic(topic) + if err != nil { + c.logger.Error("Failed to create topic", + zap.String("topic", topic), + zap.Error(err)) + return nil, err + } + return c.createPublisher(t, opts...) +} + +func (c *Client) CreatePublisherInProject(topic, project string, opts ...PublishOption) (publisher.Publisher, error) { + t, err := c.topicInProject(topic, project) + if err != nil { + c.logger.Error("Failed to create topic", + zap.String("topic", topic), + zap.String("project", project), + zap.Error(err)) + return nil, err + } + return c.createPublisher(t, opts...) +} + +func (c *Client) createPublisher(topic *pubsub.Topic, opts ...PublishOption) (publisher.Publisher, error) { + settings := (publishOptions)(pubsub.DefaultPublishSettings) + for _, opt := range opts { + opt(&settings) + } + topic.PublishSettings = settings + options := []publisher.Option{publisher.WithLogger(c.logger)} + if c.opts.metrics != nil { + options = append(options, publisher.WithMetrics(c.opts.metrics)) + } + return publisher.NewPublisher(topic, options...), nil +} + +func (c *Client) CreatePuller(subscription, topic string, opts ...ReceiveOption) (puller.Puller, error) { + s, err := c.subscription(subscription, topic) + if err != nil { + c.logger.Error("Failed to create puller", + zap.String("subscription", subscription), + zap.String("topic", topic), + zap.Error(err)) + return nil, err + } + options := (receiveOptions)(pubsub.DefaultReceiveSettings) + for _, opt := range opts { + opt(&options) + } + s.ReceiveSettings = options + c.logger.Info("Create a new puller", zap.Any("receiveSettings", options)) + return puller.NewPuller( + s, + puller.WithLogger(c.logger), + ), nil +} + +func (c *Client) topic(id string) (*pubsub.Topic, error) { + topic := c.Client.Topic(id) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + ok, err := topic.Exists(ctx) + if err != nil { + return nil, err + } + if ok { + return topic, nil + } + return nil, ErrInvalidTopic +} + +func (c *Client) topicInProject(topicID, projectID string) (*pubsub.Topic, error) { + topic := c.Client.TopicInProject(topicID, projectID) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + ok, err := topic.Exists(ctx) + if err != nil { + return nil, err + } + if ok { + return topic, nil + } + return nil, ErrInvalidTopic +} + +// TODO: add metrics +func (c *Client) subscription(id, topicID string) (*pubsub.Subscription, error) { + sub := c.Client.Subscription(id) + topic := c.Client.Topic(topicID) + var lastErr error + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + retry := backoff.NewRetry(ctx, c.opts.retries, c.opts.backoff.Clone()) + for retry.WaitNext() { + ok, err := sub.Exists(ctx) + if err != nil { + continue + } + if ok { + return sub, nil + } + _, err = c.Client.CreateSubscription(ctx, id, pubsub.SubscriptionConfig{ + Topic: topic, + }) + if err == nil { + return sub, nil + } + lastErr = err + } + return nil, lastErr +} diff --git a/pkg/pubsub/puller/BUILD.bazel b/pkg/pubsub/puller/BUILD.bazel new file mode 100644 index 000000000..92844156b --- /dev/null +++ b/pkg/pubsub/puller/BUILD.bazel @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "puller.go", + "rate_limited_puller.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller", + visibility = ["//visibility:public"], + deps = [ + "@com_google_cloud_go_pubsub//:go_default_library", + "@org_golang_x_time//rate:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/pubsub/puller/codes/BUILD.bazel b/pkg/pubsub/puller/codes/BUILD.bazel new file mode 100644 index 000000000..1ad1dc613 --- /dev/null +++ b/pkg/pubsub/puller/codes/BUILD.bazel @@ -0,0 +1,11 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "code_string.go", + "codes.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller/codes", + visibility = ["//visibility:public"], +) diff --git a/pkg/pubsub/puller/codes/code_string.go b/pkg/pubsub/puller/codes/code_string.go new file mode 100644 index 000000000..1e6a89247 --- /dev/null +++ b/pkg/pubsub/puller/codes/code_string.go @@ -0,0 +1,30 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by "stringer -type=Code"; DO NOT EDIT. + +package codes + +import "fmt" + +const _Code_name = "OKDuplicateIDMissingIDBadMessageMissingEntityIDRepeatableErrorNonRepeatableError" + +var _Code_index = [...]uint8{0, 2, 13, 22, 32, 47, 62, 80} + +func (i Code) String() string { + if i >= Code(len(_Code_index)-1) { + return fmt.Sprintf("Code(%d)", i) + } + return _Code_name[_Code_index[i]:_Code_index[i+1]] +} diff --git a/pkg/pubsub/puller/codes/codes.go b/pkg/pubsub/puller/codes/codes.go new file mode 100644 index 000000000..029531928 --- /dev/null +++ b/pkg/pubsub/puller/codes/codes.go @@ -0,0 +1,29 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package codes + +type Code uint32 + +//go:generate stringer -type=Code +const ( + OK Code = 0 + DuplicateID Code = 1 + MissingID Code = 2 + BadMessage Code = 3 + MissingEntityID Code = 4 + RepeatableError Code = 5 + NonRepeatableError Code = 6 + NewID Code = 7 +) diff --git a/pkg/pubsub/puller/mock/BUILD.bazel b/pkg/pubsub/puller/mock/BUILD.bazel new file mode 100644 index 000000000..e07d34511 --- /dev/null +++ b/pkg/pubsub/puller/mock/BUILD.bazel @@ -0,0 +1,15 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "puller.go", + "rate_limited_puller.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller/mock", + visibility = ["//visibility:public"], + deps = [ + "//pkg/pubsub/puller:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + ], +) diff --git a/pkg/pubsub/puller/mock/puller.go b/pkg/pubsub/puller/mock/puller.go new file mode 100644 index 000000000..cd59868fc --- /dev/null +++ b/pkg/pubsub/puller/mock/puller.go @@ -0,0 +1,51 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: puller.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + puller "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" +) + +// MockPuller is a mock of Puller interface. +type MockPuller struct { + ctrl *gomock.Controller + recorder *MockPullerMockRecorder +} + +// MockPullerMockRecorder is the mock recorder for MockPuller. +type MockPullerMockRecorder struct { + mock *MockPuller +} + +// NewMockPuller creates a new mock instance. +func NewMockPuller(ctrl *gomock.Controller) *MockPuller { + mock := &MockPuller{ctrl: ctrl} + mock.recorder = &MockPullerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPuller) EXPECT() *MockPullerMockRecorder { + return m.recorder +} + +// Pull mocks base method. +func (m *MockPuller) Pull(arg0 context.Context, arg1 func(context.Context, *puller.Message)) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Pull", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Pull indicates an expected call of Pull. +func (mr *MockPullerMockRecorder) Pull(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Pull", reflect.TypeOf((*MockPuller)(nil).Pull), arg0, arg1) +} diff --git a/pkg/pubsub/puller/mock/rate_limited_puller.go b/pkg/pubsub/puller/mock/rate_limited_puller.go new file mode 100644 index 000000000..fd5662a47 --- /dev/null +++ b/pkg/pubsub/puller/mock/rate_limited_puller.go @@ -0,0 +1,65 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: rate_limited_puller.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + puller "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" +) + +// MockRateLimitedPuller is a mock of RateLimitedPuller interface. +type MockRateLimitedPuller struct { + ctrl *gomock.Controller + recorder *MockRateLimitedPullerMockRecorder +} + +// MockRateLimitedPullerMockRecorder is the mock recorder for MockRateLimitedPuller. +type MockRateLimitedPullerMockRecorder struct { + mock *MockRateLimitedPuller +} + +// NewMockRateLimitedPuller creates a new mock instance. +func NewMockRateLimitedPuller(ctrl *gomock.Controller) *MockRateLimitedPuller { + mock := &MockRateLimitedPuller{ctrl: ctrl} + mock.recorder = &MockRateLimitedPullerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockRateLimitedPuller) EXPECT() *MockRateLimitedPullerMockRecorder { + return m.recorder +} + +// MessageCh mocks base method. +func (m *MockRateLimitedPuller) MessageCh() <-chan *puller.Message { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MessageCh") + ret0, _ := ret[0].(<-chan *puller.Message) + return ret0 +} + +// MessageCh indicates an expected call of MessageCh. +func (mr *MockRateLimitedPullerMockRecorder) MessageCh() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MessageCh", reflect.TypeOf((*MockRateLimitedPuller)(nil).MessageCh)) +} + +// Run mocks base method. +func (m *MockRateLimitedPuller) Run(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Run", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Run indicates an expected call of Run. +func (mr *MockRateLimitedPullerMockRecorder) Run(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockRateLimitedPuller)(nil).Run), arg0) +} diff --git a/pkg/pubsub/puller/puller.go b/pkg/pubsub/puller/puller.go new file mode 100644 index 000000000..8e8292a3c --- /dev/null +++ b/pkg/pubsub/puller/puller.go @@ -0,0 +1,82 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package puller + +import ( + "context" + + "cloud.google.com/go/pubsub" + "go.uber.org/zap" +) + +type Message struct { + ID string + Data []byte + Attributes map[string]string + + Ack func() + Nack func() +} + +type Puller interface { + Pull(context.Context, func(context.Context, *Message)) error +} + +type options struct { + logger *zap.Logger +} + +type Option func(*options) + +func WithLogger(logger *zap.Logger) Option { + return func(opts *options) { + opts.logger = logger + } +} + +type puller struct { + subscription *pubsub.Subscription + logger *zap.Logger +} + +func NewPuller(sub *pubsub.Subscription, opts ...Option) Puller { + dopts := &options{ + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + return &puller{ + subscription: sub, + logger: dopts.logger.Named("puller"), + } +} + +func (p *puller) Pull(ctx context.Context, f func(context.Context, *Message)) error { + err := p.subscription.Receive(ctx, func(ctx context.Context, msg *pubsub.Message) { + f(ctx, &Message{ + ID: msg.ID, + Data: msg.Data, + Attributes: msg.Attributes, + Ack: msg.Ack, + Nack: msg.Nack}) + }) + if err != nil { + p.logger.Error("Failed to receive message", zap.Error(err)) + return err + } + return nil +} diff --git a/pkg/pubsub/puller/rate_limited_puller.go b/pkg/pubsub/puller/rate_limited_puller.go new file mode 100644 index 000000000..583249f46 --- /dev/null +++ b/pkg/pubsub/puller/rate_limited_puller.go @@ -0,0 +1,59 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package puller + +import ( + "context" + "time" + + "golang.org/x/time/rate" +) + +type RateLimitedPuller interface { + Run(context.Context) error + MessageCh() <-chan *Message +} + +type rateLimitedPuller struct { + puller Puller + msgCh chan *Message + limiter *rate.Limiter +} + +func NewRateLimitedPuller(puller Puller, maxMPS int) RateLimitedPuller { + return &rateLimitedPuller{ + puller: puller, + msgCh: make(chan *Message), + limiter: rate.NewLimiter(rate.Limit(maxMPS), maxMPS), + } +} + +func (p *rateLimitedPuller) Run(ctx context.Context) error { + err := p.puller.Pull(ctx, func(ctx context.Context, msg *Message) { + rv := p.limiter.Reserve() + time.Sleep(rv.Delay()) + select { + case p.msgCh <- msg: + case <-ctx.Done(): + } + }) + close(p.msgCh) + return err +} + +func (p *rateLimitedPuller) MessageCh() <-chan *Message { + return p.msgCh +} diff --git a/pkg/push/api/BUILD.bazel b/pkg/push/api/BUILD.bazel new file mode 100644 index 000000000..4fe897e48 --- /dev/null +++ b/pkg/push/api/BUILD.bazel @@ -0,0 +1,57 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "api.go", + "error.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/push/api", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/client:go_default_library", + "//pkg/experiment/client:go_default_library", + "//pkg/feature/client:go_default_library", + "//pkg/locale:go_default_library", + "//pkg/log:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/push/command:go_default_library", + "//pkg/push/domain:go_default_library", + "//pkg/push/storage/v2:go_default_library", + "//pkg/role:go_default_library", + "//pkg/rpc/status:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/account:go_default_library", + "//proto/event/domain:go_default_library", + "//proto/push:go_default_library", + "@go_googleapis//google/rpc:errdetails_go_proto", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["api_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/account/client/mock:go_default_library", + "//pkg/experiment/client/mock:go_default_library", + "//pkg/feature/client/mock:go_default_library", + "//pkg/locale:go_default_library", + "//pkg/pubsub/publisher/mock:go_default_library", + "//pkg/push/storage/v2:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/storage:go_default_library", + "//pkg/storage/testing:go_default_library", + "//pkg/storage/v2/mysql/mock:go_default_library", + "//pkg/token:go_default_library", + "//proto/account:go_default_library", + "//proto/push:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/push/api/api.go b/pkg/push/api/api.go new file mode 100644 index 000000000..a1f83de67 --- /dev/null +++ b/pkg/push/api/api.go @@ -0,0 +1,586 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "errors" + "strconv" + + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + experimentclient "github.com/bucketeer-io/bucketeer/pkg/experiment/client" + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + "github.com/bucketeer-io/bucketeer/pkg/push/command" + "github.com/bucketeer-io/bucketeer/pkg/push/domain" + v2ps "github.com/bucketeer-io/bucketeer/pkg/push/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/role" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + pushproto "github.com/bucketeer-io/bucketeer/proto/push" +) + +const listRequestSize = 500 + +var errTagDuplicated = errors.New("push: tag is duplicated") + +type options struct { + logger *zap.Logger +} + +type Option func(*options) + +func WithLogger(l *zap.Logger) Option { + return func(opts *options) { + opts.logger = l + } +} + +type PushService struct { + mysqlClient mysql.Client + featureClient featureclient.Client + experimentClient experimentclient.Client + accountClient accountclient.Client + publisher publisher.Publisher + opts *options + logger *zap.Logger +} + +func NewPushService( + mysqlClient mysql.Client, + featureClient featureclient.Client, + experimentClient experimentclient.Client, + accountClient accountclient.Client, + publisher publisher.Publisher, + opts ...Option, +) *PushService { + dopts := &options{ + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + return &PushService{ + mysqlClient: mysqlClient, + featureClient: featureClient, + experimentClient: experimentClient, + accountClient: accountClient, + publisher: publisher, + opts: dopts, + logger: dopts.logger.Named("api"), + } +} + +func (s *PushService) Register(server *grpc.Server) { + pushproto.RegisterPushServiceServer(server, s) +} + +func (s *PushService) CreatePush( + ctx context.Context, + req *pushproto.CreatePushRequest, +) (*pushproto.CreatePushResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := s.validateCreatePushRequest(req); err != nil { + return nil, err + } + push, err := domain.NewPush(req.Command.Name, req.Command.FcmApiKey, req.Command.Tags) + if err != nil { + s.logger.Error( + "Failed to create a new push", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + zap.Strings("tags", req.Command.Tags), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + pushes, err := s.listAllPushes(ctx, req.EnvironmentNamespace) + if err != nil { + return nil, localizedError(statusInternal, locale.JaJP) + } + if s.containsFCMKey(ctx, pushes, req.Command.FcmApiKey) { + return nil, localizedError(statusFCMKeyAlreadyExists, locale.JaJP) + } + err = s.containsTags(ctx, pushes, req.Command.Tags) + if err != nil { + if status.Code(err) == codes.AlreadyExists { + return nil, localizedError(statusTagAlreadyExists, locale.JaJP) + } + s.logger.Error( + "Failed to validate tag existence", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + zap.Strings("tags", req.Command.Tags), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + pushStorage := v2ps.NewPushStorage(tx) + if err := pushStorage.CreatePush(ctx, push, req.EnvironmentNamespace); err != nil { + return err + } + handler := command.NewPushCommandHandler(editor, push, s.publisher, req.EnvironmentNamespace) + if err := handler.Handle(ctx, req.Command); err != nil { + return err + } + return nil + + }) + if err != nil { + if err == v2ps.ErrPushAlreadyExists { + return nil, localizedError(statusAlreadyExists, locale.JaJP) + } + s.logger.Error( + "Failed to create push", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &pushproto.CreatePushResponse{}, nil +} + +func (s *PushService) validateCreatePushRequest(req *pushproto.CreatePushRequest) error { + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + if req.Command.FcmApiKey == "" { + return localizedError(statusFCMAPIKeyRequired, locale.JaJP) + } + if len(req.Command.Tags) == 0 { + return localizedError(statusTagsRequired, locale.JaJP) + } + if req.Command.Name == "" { + return localizedError(statusNameRequired, locale.JaJP) + } + return nil +} + +func (s *PushService) UpdatePush( + ctx context.Context, + req *pushproto.UpdatePushRequest, +) (*pushproto.UpdatePushResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := s.validateUpdatePushRequest(ctx, req); err != nil { + return nil, err + } + commands := s.createUpdatePushCommands(req) + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + pushStorage := v2ps.NewPushStorage(tx) + push, err := pushStorage.GetPush(ctx, req.Id, req.EnvironmentNamespace) + if err != nil { + return err + } + handler := command.NewPushCommandHandler(editor, push, s.publisher, req.EnvironmentNamespace) + for _, command := range commands { + if err := handler.Handle(ctx, command); err != nil { + return err + } + } + return pushStorage.UpdatePush(ctx, push, req.EnvironmentNamespace) + }) + if err != nil { + if err == v2ps.ErrPushNotFound || err == v2ps.ErrPushUnexpectedAffectedRows { + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to update push", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + zap.String("id", req.Id), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &pushproto.UpdatePushResponse{}, nil +} + +func (s *PushService) validateUpdatePushRequest(ctx context.Context, req *pushproto.UpdatePushRequest) error { + if req.Id == "" { + return localizedError(statusIDRequired, locale.JaJP) + } + if s.isNoUpdatePushCommand(req) { + return localizedError(statusNoCommand, locale.JaJP) + } + if req.DeletePushTagsCommand != nil && len(req.DeletePushTagsCommand.Tags) == 0 { + return localizedError(statusTagsRequired, locale.JaJP) + } + if err := s.validateAddPushTagsCommand(ctx, req); err != nil { + return err + } + if req.RenamePushCommand != nil && req.RenamePushCommand.Name == "" { + return localizedError(statusNameRequired, locale.JaJP) + } + return nil +} + +func (s *PushService) validateAddPushTagsCommand(ctx context.Context, req *pushproto.UpdatePushRequest) error { + if req.AddPushTagsCommand == nil { + return nil + } + if len(req.AddPushTagsCommand.Tags) == 0 { + return localizedError(statusTagsRequired, locale.JaJP) + } + pushes, err := s.listAllPushes(ctx, req.EnvironmentNamespace) + if err != nil { + return localizedError(statusInternal, locale.JaJP) + } + err = s.containsTags(ctx, pushes, req.AddPushTagsCommand.Tags) + if err != nil { + if status.Code(err) == codes.AlreadyExists { + return localizedError(statusTagAlreadyExists, locale.JaJP) + } + s.logger.Error( + "Failed to validate tag existence", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + zap.String("id", req.Id), + zap.Strings("tags", req.AddPushTagsCommand.Tags), + )..., + ) + return localizedError(statusInternal, locale.JaJP) + } + return nil +} + +func (s *PushService) isNoUpdatePushCommand(req *pushproto.UpdatePushRequest) bool { + return req.AddPushTagsCommand == nil && + req.DeletePushTagsCommand == nil && + req.RenamePushCommand == nil +} + +func (s *PushService) DeletePush( + ctx context.Context, + req *pushproto.DeletePushRequest, +) (*pushproto.DeletePushResponse, error) { + editor, err := s.checkRole(ctx, accountproto.Account_EDITOR, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := validateDeletePushRequest(req); err != nil { + return nil, err + } + tx, err := s.mysqlClient.BeginTx(ctx) + if err != nil { + s.logger.Error( + "Failed to begin transaction", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + err = s.mysqlClient.RunInTransaction(ctx, tx, func() error { + pushStorage := v2ps.NewPushStorage(tx) + push, err := pushStorage.GetPush(ctx, req.Id, req.EnvironmentNamespace) + if err != nil { + return err + } + handler := command.NewPushCommandHandler(editor, push, s.publisher, req.EnvironmentNamespace) + if err := handler.Handle(ctx, req.Command); err != nil { + return err + } + return pushStorage.UpdatePush(ctx, push, req.EnvironmentNamespace) + }) + if err != nil { + if err == v2ps.ErrPushNotFound || err == v2ps.ErrPushUnexpectedAffectedRows { + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to delete push", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("id", req.Id), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &pushproto.DeletePushResponse{}, nil +} + +func validateDeletePushRequest(req *pushproto.DeletePushRequest) error { + if req.Id == "" { + return localizedError(statusIDRequired, locale.JaJP) + } + if req.Command == nil { + return localizedError(statusNoCommand, locale.JaJP) + } + return nil +} + +func (s *PushService) createUpdatePushCommands(req *pushproto.UpdatePushRequest) []command.Command { + commands := make([]command.Command, 0) + if req.DeletePushTagsCommand != nil { + commands = append(commands, req.DeletePushTagsCommand) + } + if req.AddPushTagsCommand != nil { + commands = append(commands, req.AddPushTagsCommand) + } + if req.RenamePushCommand != nil { + commands = append(commands, req.RenamePushCommand) + } + return commands +} + +func (s *PushService) containsTags(ctx context.Context, pushes []*pushproto.Push, tags []string) error { + m, err := s.tagMap(pushes) + if err != nil { + return err + } + for _, t := range tags { + if _, ok := m[t]; ok { + return localizedError(statusTagAlreadyExists, locale.JaJP) + } + } + return nil +} + +func (s *PushService) containsFCMKey(ctx context.Context, pushes []*pushproto.Push, fcmAPIKey string) bool { + for _, push := range pushes { + if push.FcmApiKey == fcmAPIKey { + return true + } + } + return false +} + +func (s *PushService) tagMap(pushes []*pushproto.Push) (map[string]struct{}, error) { + m := make(map[string]struct{}) + for _, p := range pushes { + for _, t := range p.Tags { + if _, ok := m[t]; ok { + return nil, errTagDuplicated + } + m[t] = struct{}{} + } + } + return m, nil +} + +func (s *PushService) listAllPushes(ctx context.Context, environmentNamespace string) ([]*pushproto.Push, error) { + pushes := []*pushproto.Push{} + cursor := "" + whereParts := []mysql.WherePart{ + mysql.NewFilter("deleted", "=", false), + mysql.NewFilter("environment_namespace", "=", environmentNamespace), + } + for { + ps, curCursor, _, err := s.listPushes( + ctx, + listRequestSize, + cursor, + environmentNamespace, + whereParts, + nil, + ) + if err != nil { + return nil, err + } + pushes = append(pushes, ps...) + psSize := len(ps) + if psSize == 0 || psSize < listRequestSize { + return pushes, nil + } + cursor = curCursor + } +} + +func (s *PushService) ListPushes( + ctx context.Context, + req *pushproto.ListPushesRequest, +) (*pushproto.ListPushesResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + whereParts := []mysql.WherePart{ + mysql.NewFilter("deleted", "=", false), + mysql.NewFilter("environment_namespace", "=", req.EnvironmentNamespace), + } + if req.SearchKeyword != "" { + whereParts = append(whereParts, mysql.NewSearchQuery([]string{"name"}, req.SearchKeyword)) + } + orders, err := s.newListOrders(req.OrderBy, req.OrderDirection) + if err != nil { + s.logger.Error( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields(zap.Error(err))..., + ) + return nil, err + } + pushes, cursor, totalCount, err := s.listPushes( + ctx, + req.PageSize, + req.Cursor, + req.EnvironmentNamespace, + whereParts, + orders, + ) + if err != nil { + return nil, err + } + return &pushproto.ListPushesResponse{ + Pushes: pushes, + Cursor: cursor, + TotalCount: totalCount, + }, nil +} + +func (s *PushService) newListOrders( + orderBy pushproto.ListPushesRequest_OrderBy, + orderDirection pushproto.ListPushesRequest_OrderDirection, +) ([]*mysql.Order, error) { + var column string + switch orderBy { + case pushproto.ListPushesRequest_DEFAULT, + pushproto.ListPushesRequest_NAME: + column = "name" + case pushproto.ListPushesRequest_CREATED_AT: + column = "created_at" + case pushproto.ListPushesRequest_UPDATED_AT: + column = "updated_at" + default: + return nil, localizedError(statusInvalidOrderBy, locale.JaJP) + } + direction := mysql.OrderDirectionAsc + if orderDirection == pushproto.ListPushesRequest_DESC { + direction = mysql.OrderDirectionDesc + } + return []*mysql.Order{mysql.NewOrder(column, direction)}, nil +} + +func (s *PushService) listPushes( + ctx context.Context, + pageSize int64, + cursor string, + environmentNamespace string, + whereParts []mysql.WherePart, + orders []*mysql.Order, +) ([]*pushproto.Push, string, int64, error) { + limit := int(pageSize) + if cursor == "" { + cursor = "0" + } + offset, err := strconv.Atoi(cursor) + if err != nil { + return nil, "", 0, localizedError(statusInvalidCursor, locale.JaJP) + } + pushStorage := v2ps.NewPushStorage(s.mysqlClient) + pushes, nextCursor, totalCount, err := pushStorage.ListPushes( + ctx, + whereParts, + orders, + limit, + offset, + ) + if err != nil { + s.logger.Error( + "Failed to list pushes", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, "", 0, localizedError(statusInternal, locale.JaJP) + } + return pushes, strconv.Itoa(nextCursor), totalCount, nil +} + +func (s *PushService) checkRole( + ctx context.Context, + requiredRole accountproto.Account_Role, + environmentNamespace string, +) (*eventproto.Editor, error) { + editor, err := role.CheckRole(ctx, requiredRole, func(email string) (*accountproto.GetAccountResponse, error) { + return s.accountClient.GetAccount(ctx, &accountproto.GetAccountRequest{ + Email: email, + EnvironmentNamespace: environmentNamespace, + }) + }) + if err != nil { + switch status.Code(err) { + case codes.Unauthenticated: + s.logger.Info( + "Unauthenticated", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusUnauthenticated, locale.JaJP) + case codes.PermissionDenied: + s.logger.Info( + "Permission denied", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusPermissionDenied, locale.JaJP) + default: + s.logger.Error( + "Failed to check role", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + } + return editor, nil +} diff --git a/pkg/push/api/api_test.go b/pkg/push/api/api_test.go new file mode 100644 index 000000000..5ce8b29f6 --- /dev/null +++ b/pkg/push/api/api_test.go @@ -0,0 +1,488 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + accountclientmock "github.com/bucketeer-io/bucketeer/pkg/account/client/mock" + experimentclientmock "github.com/bucketeer-io/bucketeer/pkg/experiment/client/mock" + featureclientmock "github.com/bucketeer-io/bucketeer/pkg/feature/client/mock" + "github.com/bucketeer-io/bucketeer/pkg/locale" + publishermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher/mock" + v2ps "github.com/bucketeer-io/bucketeer/pkg/push/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/storage" + storagetesting "github.com/bucketeer-io/bucketeer/pkg/storage/testing" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + "github.com/bucketeer-io/bucketeer/pkg/token" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + pushproto "github.com/bucketeer-io/bucketeer/proto/push" +) + +func TestNewPushService(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + mysqlClient := mysqlmock.NewMockClient(mockController) + featureClientMock := featureclientmock.NewMockClient(mockController) + experimentClientMock := experimentclientmock.NewMockClient(mockController) + accountClientMock := accountclientmock.NewMockClient(mockController) + pm := publishermock.NewMockPublisher(mockController) + logger := zap.NewNop() + s := NewPushService( + mysqlClient, + featureClientMock, + experimentClientMock, + accountClientMock, + pm, + WithLogger(logger), + ) + assert.IsType(t, &PushService{}, s) +} + +func TestCreatePushMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*PushService) + req *pushproto.CreatePushRequest + expectedErr error + }{ + "err: ErrNoCommand": { + setup: nil, + req: &pushproto.CreatePushRequest{ + Command: nil, + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "err: ErrFCMAPIKeyRequired": { + setup: nil, + req: &pushproto.CreatePushRequest{ + Command: &pushproto.CreatePushCommand{}, + }, + expectedErr: localizedError(statusFCMAPIKeyRequired, locale.JaJP), + }, + "err: ErrTagsRequired": { + setup: nil, + req: &pushproto.CreatePushRequest{ + Command: &pushproto.CreatePushCommand{ + FcmApiKey: "key-0", + }, + }, + expectedErr: localizedError(statusTagsRequired, locale.JaJP), + }, + "err: ErrNameRequired": { + setup: nil, + req: &pushproto.CreatePushRequest{ + Command: &pushproto.CreatePushCommand{ + FcmApiKey: "key-1", + Tags: []string{"tag-0"}, + }, + }, + expectedErr: localizedError(statusNameRequired, locale.JaJP), + }, + "err: ErrAlreadyExists": { + setup: func(s *PushService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2ps.ErrPushAlreadyExists) + }, + req: &pushproto.CreatePushRequest{ + EnvironmentNamespace: "ns0", + Command: &pushproto.CreatePushCommand{ + FcmApiKey: "key-0", + Tags: []string{"tag-0"}, + Name: "name-1", + }, + }, + expectedErr: localizedError(statusAlreadyExists, locale.JaJP), + }, + "success": { + setup: func(s *PushService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &pushproto.CreatePushRequest{ + EnvironmentNamespace: "ns0", + Command: &pushproto.CreatePushCommand{ + FcmApiKey: "key-1", + Tags: []string{"tag-0"}, + Name: "name-1", + }, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken(t) + service := newPushServiceWithMock(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.CreatePush(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestUpdatePushMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*PushService) + req *pushproto.UpdatePushRequest + expectedErr error + }{ + "err: ErrIDRequired": { + req: &pushproto.UpdatePushRequest{}, + expectedErr: localizedError(statusIDRequired, locale.JaJP), + }, + "err: ErrNoCommand": { + req: &pushproto.UpdatePushRequest{ + Id: "key-0", + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "err: ErrTagsRequired: delete": { + req: &pushproto.UpdatePushRequest{ + Id: "key-0", + DeletePushTagsCommand: &pushproto.DeletePushTagsCommand{Tags: []string{}}, + }, + expectedErr: localizedError(statusTagsRequired, locale.JaJP), + }, + "err: ErrTagsRequired: add": { + req: &pushproto.UpdatePushRequest{ + Id: "key-0", + AddPushTagsCommand: &pushproto.AddPushTagsCommand{Tags: []string{}}, + }, + expectedErr: localizedError(statusTagsRequired, locale.JaJP), + }, + "err: ErrNameRequired: add": { + req: &pushproto.UpdatePushRequest{ + Id: "key-0", + RenamePushCommand: &pushproto.RenamePushCommand{Name: ""}, + }, + expectedErr: localizedError(statusNameRequired, locale.JaJP), + }, + "err: ErrNotFound": { + setup: func(s *PushService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2ps.ErrPushNotFound) + }, + req: &pushproto.UpdatePushRequest{ + Id: "key-1", + AddPushTagsCommand: &pushproto.AddPushTagsCommand{Tags: []string{"tag-1"}}, + }, + expectedErr: localizedError(statusNotFound, locale.JaJP), + }, + "success: rename": { + setup: func(s *PushService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &pushproto.UpdatePushRequest{ + EnvironmentNamespace: "ns0", + Id: "key-0", + RenamePushCommand: &pushproto.RenamePushCommand{Name: "name-1"}, + }, + expectedErr: nil, + }, + "success: deletePushTags": { + setup: func(s *PushService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &pushproto.UpdatePushRequest{ + EnvironmentNamespace: "ns0", + Id: "key-0", + DeletePushTagsCommand: &pushproto.DeletePushTagsCommand{Tags: []string{"tag-0"}}, + }, + expectedErr: nil, + }, + "success: addPushTags": { + setup: func(s *PushService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &pushproto.UpdatePushRequest{ + EnvironmentNamespace: "ns0", + Id: "key-0", + AddPushTagsCommand: &pushproto.AddPushTagsCommand{Tags: []string{"tag-2"}}, + }, + expectedErr: nil, + }, + "success": { + setup: func(s *PushService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &pushproto.UpdatePushRequest{ + EnvironmentNamespace: "ns0", + Id: "key-0", + AddPushTagsCommand: &pushproto.AddPushTagsCommand{Tags: []string{"tag-2"}}, + DeletePushTagsCommand: &pushproto.DeletePushTagsCommand{Tags: []string{"tag-0"}}, + RenamePushCommand: &pushproto.RenamePushCommand{Name: "name-1"}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken(t) + service := newPushServiceWithMock(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.UpdatePush(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestDeletePushMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*PushService) + req *pushproto.DeletePushRequest + expectedErr error + }{ + "err: ErrIDRequired": { + req: &pushproto.DeletePushRequest{}, + expectedErr: localizedError(statusIDRequired, locale.JaJP), + }, + "err: ErrNoCommand": { + req: &pushproto.DeletePushRequest{ + Id: "key-0", + }, + expectedErr: localizedError(statusNoCommand, locale.JaJP), + }, + "err: ErrNotFound": { + setup: func(s *PushService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(v2ps.ErrPushNotFound) + }, + req: &pushproto.DeletePushRequest{ + EnvironmentNamespace: "ns0", + Id: "key-1", + Command: &pushproto.DeletePushCommand{}, + }, + expectedErr: localizedError(statusNotFound, locale.JaJP), + }, + "success": { + setup: func(s *PushService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().BeginTx(gomock.Any()).Return(nil, nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().RunInTransaction( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil) + }, + req: &pushproto.DeletePushRequest{ + EnvironmentNamespace: "ns0", + Id: "key-0", + Command: &pushproto.DeletePushCommand{}, + }, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + ctx := createContextWithToken(t) + service := newPushServiceWithMock(t, mockController, nil) + if p.setup != nil { + p.setup(service) + } + _, err := service.DeletePush(ctx, p.req) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestListPushesMySQL(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := map[string]struct { + setup func(*PushService) + input *pushproto.ListPushesRequest + expected *pushproto.ListPushesResponse + expectedErr error + }{ + "err: ErrInvalidCursor": { + setup: nil, + input: &pushproto.ListPushesRequest{Cursor: "XXX"}, + expected: nil, + expectedErr: localizedError(statusInvalidCursor, locale.JaJP), + }, + "err: ErrInternal": { + setup: func(s *PushService) { + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + input: &pushproto.ListPushesRequest{}, + expected: nil, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + "success": { + setup: func(s *PushService) { + rows := mysqlmock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + input: &pushproto.ListPushesRequest{PageSize: 2, Cursor: ""}, + expected: &pushproto.ListPushesResponse{Pushes: []*pushproto.Push{}, Cursor: "0"}, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := newPushServiceWithMock(t, mockController, storagetesting.NewInMemoryStorage()) + if p.setup != nil { + p.setup(s) + } + actual, err := s.ListPushes(createContextWithToken(t), p.input) + assert.Equal(t, p.expectedErr, err) + assert.Equal(t, p.expected, actual) + }) + } +} + +func newPushServiceWithMock(t *testing.T, c *gomock.Controller, s storage.Client) *PushService { + t.Helper() + return &PushService{ + mysqlClient: mysqlmock.NewMockClient(c), + featureClient: featureclientmock.NewMockClient(c), + experimentClient: experimentclientmock.NewMockClient(c), + accountClient: accountclientmock.NewMockClient(c), + publisher: publishermock.NewMockPublisher(c), + logger: zap.NewNop(), + } +} + +func createContextWithToken(t *testing.T) context.Context { + t.Helper() + token := &token.IDToken{ + Issuer: "issuer", + Subject: "sub", + Audience: "audience", + Expiry: time.Now().AddDate(100, 0, 0), + IssuedAt: time.Now(), + Email: "email", + AdminRole: accountproto.Account_OWNER, + } + ctx := context.TODO() + return context.WithValue(ctx, rpc.Key, token) +} diff --git a/pkg/push/api/error.go b/pkg/push/api/error.go new file mode 100644 index 000000000..9c87fb247 --- /dev/null +++ b/pkg/push/api/error.go @@ -0,0 +1,186 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + gstatus "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/rpc/status" +) + +var ( + statusInternal = gstatus.New(codes.Internal, "push: internal") + statusIDRequired = gstatus.New(codes.InvalidArgument, "push: id must be specified") + statusNameRequired = gstatus.New(codes.InvalidArgument, "push: name must be specified") + statusFCMAPIKeyRequired = gstatus.New(codes.InvalidArgument, "push: fcm api key must be specified") + statusTagsRequired = gstatus.New(codes.InvalidArgument, "push: tags must be specified") + statusInvalidCursor = gstatus.New(codes.InvalidArgument, "push: cursor is invalid") + statusNoCommand = gstatus.New(codes.InvalidArgument, "push: no command") + statusInvalidOrderBy = gstatus.New(codes.InvalidArgument, "push: order_by is invalid") + statusNotFound = gstatus.New(codes.NotFound, "push: not found") + statusAlreadyDeleted = gstatus.New(codes.NotFound, "push: already deleted") + statusAlreadyExists = gstatus.New(codes.AlreadyExists, "push: already exists") + statusFCMKeyAlreadyExists = gstatus.New(codes.AlreadyExists, "push: fcm key already exists") + statusTagAlreadyExists = gstatus.New(codes.AlreadyExists, "push: tag already exists") + statusUnauthenticated = gstatus.New(codes.Unauthenticated, "push: unauthenticated") + statusPermissionDenied = gstatus.New(codes.PermissionDenied, "push: permission denied") + + errInternalJaJP = status.MustWithDetails( + statusInternal, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "内部エラーが発生しました", + }, + ) + errIDRequiredJaJP = status.MustWithDetails( + statusIDRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "idは必須です", + }, + ) + errNameRequiredJaJP = status.MustWithDetails( + statusNameRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "nameは必須です", + }, + ) + errFCMAPIKeyRequiredJaJP = status.MustWithDetails( + statusFCMAPIKeyRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "fcm api keyは必須です", + }, + ) + errTagsRequiredJaJP = status.MustWithDetails( + statusTagsRequired, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "tagsは必須です", + }, + ) + errInvalidCursorJaJP = status.MustWithDetails( + statusInvalidCursor, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なcursorです", + }, + ) + errNoCommandJaJP = status.MustWithDetails( + statusNoCommand, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "commandは必須です", + }, + ) + errInvalidOrderByJaJP = status.MustWithDetails( + statusInvalidOrderBy, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なソート順の指定です", + }, + ) + errNotFoundJaJP = status.MustWithDetails( + statusNotFound, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "データが存在しません", + }, + ) + errAlreadyDeletedJaJP = status.MustWithDetails( + statusAlreadyDeleted, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "データがすでに削除済みです", + }, + ) + errAlreadyExistsJaJP = status.MustWithDetails( + statusAlreadyExists, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "同じidのデータがすでに存在します", + }, + ) + errFCMKeyAlreadyExistsJaJP = status.MustWithDetails( + statusFCMKeyAlreadyExists, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "同じfcm keyがすでに存在します", + }, + ) + errTagAlreadyExistsJaJP = status.MustWithDetails( + statusTagAlreadyExists, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "同じtagがすでに存在します", + }, + ) + errUnauthenticatedJaJP = status.MustWithDetails( + statusUnauthenticated, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "認証されていません", + }, + ) + errPermissionDeniedJaJP = status.MustWithDetails( + statusPermissionDenied, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "権限がありません", + }, + ) +) + +func localizedError(s *gstatus.Status, loc string) error { + // handle loc if multi-lang is necessary + switch s { + case statusInternal: + return errInternalJaJP + case statusIDRequired: + return errIDRequiredJaJP + case statusNameRequired: + return errNameRequiredJaJP + case statusFCMAPIKeyRequired: + return errFCMAPIKeyRequiredJaJP + case statusTagsRequired: + return errTagsRequiredJaJP + case statusInvalidCursor: + return errInvalidCursorJaJP + case statusNoCommand: + return errNoCommandJaJP + case statusInvalidOrderBy: + return errInvalidOrderByJaJP + case statusNotFound: + return errNotFoundJaJP + case statusAlreadyDeleted: + return errAlreadyDeletedJaJP + case statusAlreadyExists: + return errAlreadyExistsJaJP + case statusFCMKeyAlreadyExists: + return errFCMKeyAlreadyExistsJaJP + case statusTagAlreadyExists: + return errTagAlreadyExistsJaJP + case statusUnauthenticated: + return errUnauthenticatedJaJP + case statusPermissionDenied: + return errPermissionDeniedJaJP + default: + return errInternalJaJP + } +} diff --git a/pkg/push/client/BUILD.bazel b/pkg/push/client/BUILD.bazel new file mode 100644 index 000000000..a540cfd1d --- /dev/null +++ b/pkg/push/client/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["client.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/push/client", + visibility = ["//visibility:public"], + deps = [ + "//pkg/rpc/client:go_default_library", + "//proto/push:go_default_library", + "@org_golang_google_grpc//:go_default_library", + ], +) diff --git a/pkg/push/client/client.go b/pkg/push/client/client.go new file mode 100644 index 000000000..e85785f1d --- /dev/null +++ b/pkg/push/client/client.go @@ -0,0 +1,50 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package client + +import ( + "google.golang.org/grpc" + + rpcclient "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + proto "github.com/bucketeer-io/bucketeer/proto/push" +) + +type Client interface { + proto.PushServiceClient + Close() +} + +type client struct { + proto.PushServiceClient + address string + connection *grpc.ClientConn +} + +func NewClient(addr, certPath string, opts ...rpcclient.Option) (Client, error) { + conn, err := rpcclient.NewClientConn(addr, certPath, opts...) + if err != nil { + return nil, err + } + return &client{ + PushServiceClient: proto.NewPushServiceClient(conn), + address: addr, + connection: conn, + }, nil +} + +func (c *client) Close() { + c.connection.Close() +} diff --git a/pkg/push/client/mock/BUILD.bazel b/pkg/push/client/mock/BUILD.bazel new file mode 100644 index 000000000..e981ce312 --- /dev/null +++ b/pkg/push/client/mock/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["client.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/push/client/mock", + visibility = ["//visibility:public"], + deps = [ + "//proto/push:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@org_golang_google_grpc//:go_default_library", + ], +) diff --git a/pkg/push/client/mock/client.go b/pkg/push/client/mock/client.go new file mode 100644 index 000000000..c0c9e322a --- /dev/null +++ b/pkg/push/client/mock/client.go @@ -0,0 +1,130 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: client.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + grpc "google.golang.org/grpc" + + push "github.com/bucketeer-io/bucketeer/proto/push" +) + +// MockClient is a mock of Client interface. +type MockClient struct { + ctrl *gomock.Controller + recorder *MockClientMockRecorder +} + +// MockClientMockRecorder is the mock recorder for MockClient. +type MockClientMockRecorder struct { + mock *MockClient +} + +// NewMockClient creates a new mock instance. +func NewMockClient(ctrl *gomock.Controller) *MockClient { + mock := &MockClient{ctrl: ctrl} + mock.recorder = &MockClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClient) EXPECT() *MockClientMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockClient) Close() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Close") +} + +// Close indicates an expected call of Close. +func (mr *MockClientMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockClient)(nil).Close)) +} + +// CreatePush mocks base method. +func (m *MockClient) CreatePush(ctx context.Context, in *push.CreatePushRequest, opts ...grpc.CallOption) (*push.CreatePushResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreatePush", varargs...) + ret0, _ := ret[0].(*push.CreatePushResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreatePush indicates an expected call of CreatePush. +func (mr *MockClientMockRecorder) CreatePush(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePush", reflect.TypeOf((*MockClient)(nil).CreatePush), varargs...) +} + +// DeletePush mocks base method. +func (m *MockClient) DeletePush(ctx context.Context, in *push.DeletePushRequest, opts ...grpc.CallOption) (*push.DeletePushResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeletePush", varargs...) + ret0, _ := ret[0].(*push.DeletePushResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeletePush indicates an expected call of DeletePush. +func (mr *MockClientMockRecorder) DeletePush(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePush", reflect.TypeOf((*MockClient)(nil).DeletePush), varargs...) +} + +// ListPushes mocks base method. +func (m *MockClient) ListPushes(ctx context.Context, in *push.ListPushesRequest, opts ...grpc.CallOption) (*push.ListPushesResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListPushes", varargs...) + ret0, _ := ret[0].(*push.ListPushesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListPushes indicates an expected call of ListPushes. +func (mr *MockClientMockRecorder) ListPushes(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPushes", reflect.TypeOf((*MockClient)(nil).ListPushes), varargs...) +} + +// UpdatePush mocks base method. +func (m *MockClient) UpdatePush(ctx context.Context, in *push.UpdatePushRequest, opts ...grpc.CallOption) (*push.UpdatePushResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdatePush", varargs...) + ret0, _ := ret[0].(*push.UpdatePushResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdatePush indicates an expected call of UpdatePush. +func (mr *MockClientMockRecorder) UpdatePush(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePush", reflect.TypeOf((*MockClient)(nil).UpdatePush), varargs...) +} diff --git a/pkg/push/cmd/sender/BUILD.bazel b/pkg/push/cmd/sender/BUILD.bazel new file mode 100644 index 000000000..1440bdc0c --- /dev/null +++ b/pkg/push/cmd/sender/BUILD.bazel @@ -0,0 +1,24 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["sender.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/push/cmd/sender", + visibility = ["//visibility:public"], + deps = [ + "//pkg/cache/v3:go_default_library", + "//pkg/cli:go_default_library", + "//pkg/feature/client:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub:go_default_library", + "//pkg/pubsub/puller:go_default_library", + "//pkg/push/client:go_default_library", + "//pkg/push/sender:go_default_library", + "//pkg/redis/v3:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/rpc/client:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/push/cmd/sender/sender.go b/pkg/push/cmd/sender/sender.go new file mode 100644 index 000000000..998e3e878 --- /dev/null +++ b/pkg/push/cmd/sender/sender.go @@ -0,0 +1,234 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sender + +import ( + "context" + "os" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + cachev3 "github.com/bucketeer-io/bucketeer/pkg/cache/v3" + "github.com/bucketeer-io/bucketeer/pkg/cli" + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" + pushclient "github.com/bucketeer-io/bucketeer/pkg/push/client" + tf "github.com/bucketeer-io/bucketeer/pkg/push/sender" + redisv3 "github.com/bucketeer-io/bucketeer/pkg/redis/v3" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/rpc/client" +) + +const command = "sender" + +type server struct { + *kingpin.CmdClause + port *int + project *string + domainTopic *string + domainSubscription *string + pushService *string + featureService *string + maxMPS *int + numWorkers *int + certPath *string + keyPath *string + serviceTokenPath *string + pullerNumGoroutines *int + pullerMaxOutstandingMessages *int + pullerMaxOutstandingBytes *int + redisServerName *string + redisAddr *string + redisPoolMaxIdle *int + redisPoolMaxActive *int +} + +func RegisterCommand(r cli.CommandRegistry, p cli.ParentCommand) cli.Command { + cmd := p.Command(command, "Start ops event sender") + s := &server{ + CmdClause: cmd, + port: cmd.Flag("port", "Port to bind to.").Default("9090").Int(), + project: cmd.Flag("project", "Google Cloud project name.").String(), + domainTopic: cmd.Flag( + "domain-topic", + "Google PubSub topic name of incoming domain events.", + ).String(), + domainSubscription: cmd.Flag( + "domain-subscription", + "Google PubSub subscription name of incoming domain event.", + ).String(), + pushService: cmd.Flag( + "push-service", + "bucketeer-push-service address.", + ).Default("push:9090").String(), + featureService: cmd.Flag( + "feature-service", + "bucketeer-feature-service address.", + ).Default("push:9090").String(), + maxMPS: cmd.Flag( + "max-mps", + "Maximum messages should be handled in a second.", + ).Default("5000").Int(), + numWorkers: cmd.Flag("num-workers", "Number of workers.").Default("1").Int(), + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + keyPath: cmd.Flag("key", "Path to TLS key.").Required().String(), + serviceTokenPath: cmd.Flag("service-token", "Path to service token.").Required().String(), + pullerNumGoroutines: cmd.Flag( + "puller-num-goroutines", + "Number of goroutines will be spawned to pull messages.", + ).Int(), + pullerMaxOutstandingMessages: cmd.Flag( + "puller-max-outstanding-messages", + "Maximum number of unprocessed messages.", + ).Int(), + pullerMaxOutstandingBytes: cmd.Flag("puller-max-outstanding-bytes", "Maximum size of unprocessed messages.").Int(), + redisServerName: cmd.Flag("redis-server-name", "Name of the redis.").Required().String(), + redisAddr: cmd.Flag("redis-addr", "Address of the redis.").Required().String(), + redisPoolMaxIdle: cmd.Flag( + "redis-pool-max-idle", + "Maximum number of idle connections in the pool.", + ).Default("5").Int(), + redisPoolMaxActive: cmd.Flag( + "redis-pool-max-active", + "Maximum number of connections allocated by the pool at a given time.", + ).Default("10").Int(), + } + r.RegisterCommand(s) + return s +} + +func (s *server) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + *s.serviceTokenPath = s.insertTelepresenceMountRoot(*s.serviceTokenPath) + *s.keyPath = s.insertTelepresenceMountRoot(*s.keyPath) + *s.certPath = s.insertTelepresenceMountRoot(*s.certPath) + + registerer := metrics.DefaultRegisterer() + + domainPuller, err := s.createPuller(ctx, registerer, logger) + if err != nil { + return err + } + + creds, err := client.NewPerRPCCredentials(*s.serviceTokenPath) + if err != nil { + return err + } + + clientOptions := []client.Option{ + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30 * time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + } + pushClient, err := pushclient.NewClient(*s.pushService, *s.certPath, clientOptions...) + if err != nil { + return err + } + defer pushClient.Close() + + featureClient, err := featureclient.NewClient(*s.featureService, *s.certPath, clientOptions...) + if err != nil { + return err + } + defer featureClient.Close() + + redisV3Client, err := redisv3.NewClient( + *s.redisAddr, + redisv3.WithPoolSize(*s.redisPoolMaxActive), + redisv3.WithMinIdleConns(*s.redisPoolMaxIdle), + redisv3.WithServerName(*s.redisServerName), + redisv3.WithMetrics(registerer), + redisv3.WithLogger(logger), + ) + if err != nil { + return err + } + defer redisV3Client.Close() + redisV3Cache := cachev3.NewRedisCache(redisV3Client) + + t := tf.NewSender( + domainPuller, + pushClient, + featureClient, + redisV3Cache, + tf.WithMaxMPS(*s.maxMPS), + tf.WithNumWorkers(*s.numWorkers), + tf.WithMetrics(registerer), + tf.WithLogger(logger), + ) + defer t.Stop() + go t.Run() // nolint:errcheck + + healthChecker := health.NewGrpcChecker( + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + health.WithCheck("sender", t.Check), + ) + go healthChecker.Run(ctx) + + server := rpc.NewServer(healthChecker, *s.certPath, *s.keyPath, + rpc.WithPort(*s.port), + rpc.WithMetrics(registerer), + rpc.WithLogger(logger), + rpc.WithHandler("/health", healthChecker), + ) + defer server.Stop(10 * time.Second) + go server.Run() + + <-ctx.Done() + return nil +} + +func (s *server) createPuller( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (puller.Puller, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + client, err := pubsub.NewClient( + ctx, + *s.project, + pubsub.WithMetrics(registerer), + pubsub.WithLogger(logger), + ) + if err != nil { + return nil, err + } + puller, err := client.CreatePuller(*s.domainSubscription, *s.domainTopic, + pubsub.WithNumGoroutines(*s.pullerNumGoroutines), + pubsub.WithMaxOutstandingMessages(*s.pullerMaxOutstandingMessages), + pubsub.WithMaxOutstandingBytes(*s.pullerMaxOutstandingBytes), + ) + if err != nil { + return nil, err + } + return puller, nil +} + +// for telepresence --swap-deployment +func (s *server) insertTelepresenceMountRoot(path string) string { + volumeRoot := os.Getenv("TELEPRESENCE_ROOT") + if volumeRoot == "" { + return path + } + return volumeRoot + path +} diff --git a/pkg/push/cmd/server/BUILD.bazel b/pkg/push/cmd/server/BUILD.bazel new file mode 100644 index 000000000..9fae510e4 --- /dev/null +++ b/pkg/push/cmd/server/BUILD.bazel @@ -0,0 +1,25 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["server.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/push/cmd/server", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/client:go_default_library", + "//pkg/cli:go_default_library", + "//pkg/experiment/client:go_default_library", + "//pkg/feature/client:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/push/api:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/rpc/client:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/token:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/push/cmd/server/server.go b/pkg/push/cmd/server/server.go new file mode 100644 index 000000000..489589f4d --- /dev/null +++ b/pkg/push/cmd/server/server.go @@ -0,0 +1,235 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "os" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + "github.com/bucketeer-io/bucketeer/pkg/cli" + experimentclient "github.com/bucketeer-io/bucketeer/pkg/experiment/client" + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + "github.com/bucketeer-io/bucketeer/pkg/push/api" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/token" +) + +const command = "server" + +type server struct { + *kingpin.CmdClause + port *int + project *string + mysqlUser *string + mysqlPass *string + mysqlHost *string + mysqlPort *int + mysqlDBName *string + domainEventTopic *string + accountService *string + featureService *string + experimentService *string + certPath *string + keyPath *string + serviceTokenPath *string + + oauthKeyPath *string + oauthClientID *string + oauthIssuer *string +} + +func RegisterCommand(r cli.CommandRegistry, p cli.ParentCommand) cli.Command { + cmd := p.Command(command, "Start the gRPC server") + server := &server{ + CmdClause: cmd, + port: cmd.Flag("port", "Port to bind to.").Default("9090").Int(), + project: cmd.Flag("project", "Google Cloud project name.").Required().String(), + mysqlUser: cmd.Flag("mysql-user", "MySQL user.").Required().String(), + mysqlPass: cmd.Flag("mysql-pass", "MySQL password.").Required().String(), + mysqlHost: cmd.Flag("mysql-host", "MySQL host.").Required().String(), + mysqlPort: cmd.Flag("mysql-port", "MySQL port.").Required().Int(), + mysqlDBName: cmd.Flag("mysql-db-name", "MySQL database name.").Required().String(), + domainEventTopic: cmd.Flag("domain-event-topic", "PubSub topic to publish domain events.").Required().String(), + accountService: cmd.Flag("account-service", "bucketeer-account-service address.").Default("account:9090").String(), + featureService: cmd.Flag("feature-service", "bucketeer-feature-service address.").Default("feature:9090").String(), + experimentService: cmd.Flag( + "experiment-service", + "bucketeer-experiment-service address.", + ).Default("experiment:9090").String(), + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + keyPath: cmd.Flag("key", "Path to TLS key.").Required().String(), + serviceTokenPath: cmd.Flag("service-token", "Path to service token.").Required().String(), + oauthKeyPath: cmd.Flag("oauth-key", "Path to public key used to verify oauth token.").Required().String(), + oauthClientID: cmd.Flag("oauth-client-id", "The oauth clientID registered at dex.").Required().String(), + oauthIssuer: cmd.Flag("oauth-issuer", "The url of dex issuer.").Required().String(), + } + r.RegisterCommand(server) + return server +} + +func (s *server) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + registerer := metrics.DefaultRegisterer() + + *s.serviceTokenPath = s.insertTelepresenceMoutRoot(*s.serviceTokenPath) + *s.oauthKeyPath = s.insertTelepresenceMoutRoot(*s.oauthKeyPath) + *s.keyPath = s.insertTelepresenceMoutRoot(*s.keyPath) + *s.certPath = s.insertTelepresenceMoutRoot(*s.certPath) + + mysqlClient, err := s.createMySQLClient(ctx, registerer, logger) + if err != nil { + return err + } + defer mysqlClient.Close() + + publisher, err := s.createDomainEventPublisher(ctx, registerer, logger) + if err != nil { + return err + } + defer publisher.Stop() + + creds, err := client.NewPerRPCCredentials(*s.serviceTokenPath) + if err != nil { + return err + } + + featureClient, err := featureclient.NewClient(*s.featureService, *s.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer featureClient.Close() + + experimentClient, err := experimentclient.NewClient(*s.experimentService, *s.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer experimentClient.Close() + + accountClient, err := accountclient.NewClient(*s.accountService, *s.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer accountClient.Close() + + service := api.NewPushService( + mysqlClient, + featureClient, + experimentClient, + accountClient, + publisher, + api.WithLogger(logger), + ) + + verifier, err := token.NewVerifier(*s.oauthKeyPath, *s.oauthIssuer, *s.oauthClientID) + if err != nil { + return err + } + + healthChecker := health.NewGrpcChecker( + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + ) + go healthChecker.Run(ctx) + + server := rpc.NewServer(service, *s.certPath, *s.keyPath, + rpc.WithPort(*s.port), + rpc.WithVerifier(verifier), + rpc.WithMetrics(registerer), + rpc.WithLogger(logger), + rpc.WithService(healthChecker), + rpc.WithHandler("/health", healthChecker), + ) + defer server.Stop(10 * time.Second) + go server.Run() + + <-ctx.Done() + return nil +} + +func (s *server) createMySQLClient( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (mysql.Client, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + return mysql.NewClient( + ctx, + *s.mysqlUser, *s.mysqlPass, *s.mysqlHost, + *s.mysqlPort, + *s.mysqlDBName, + mysql.WithLogger(logger), + mysql.WithMetrics(registerer), + ) +} + +func (s *server) createDomainEventPublisher( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (publisher.Publisher, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + client, err := pubsub.NewClient( + ctx, + *s.project, + pubsub.WithMetrics(registerer), + pubsub.WithLogger(logger), + ) + if err != nil { + return nil, err + } + domainPublisher, err := client.CreatePublisher(*s.domainEventTopic) + if err != nil { + return nil, err + } + return domainPublisher, nil +} + +func (s *server) insertTelepresenceMoutRoot(path string) string { + volumeRoot := os.Getenv("TELEPRESENCE_ROOT") + if volumeRoot == "" { + return path + } + return volumeRoot + path +} diff --git a/pkg/push/command/BUILD.bazel b/pkg/push/command/BUILD.bazel new file mode 100644 index 000000000..d7a8cf3f5 --- /dev/null +++ b/pkg/push/command/BUILD.bazel @@ -0,0 +1,36 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "command.go", + "push.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/push/command", + visibility = ["//visibility:public"], + deps = [ + "//pkg/domainevent/domain:go_default_library", + "//pkg/pubsub/publisher:go_default_library", + "//pkg/push/domain:go_default_library", + "//proto/event/domain:go_default_library", + "//proto/push:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["push_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/pubsub/publisher:go_default_library", + "//pkg/pubsub/publisher/mock:go_default_library", + "//pkg/push/domain:go_default_library", + "//proto/account:go_default_library", + "//proto/event/domain:go_default_library", + "//proto/push:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + ], +) diff --git a/pkg/push/command/command.go b/pkg/push/command/command.go new file mode 100644 index 000000000..352f9f6d9 --- /dev/null +++ b/pkg/push/command/command.go @@ -0,0 +1,30 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + "errors" +) + +var ( + errUnknownCommand = errors.New("command: unknown command") +) + +type Command interface{} + +type Handler interface { + Handle(ctx context.Context, cmd Command) error +} diff --git a/pkg/push/command/push.go b/pkg/push/command/push.go new file mode 100644 index 000000000..b8b5d316e --- /dev/null +++ b/pkg/push/command/push.go @@ -0,0 +1,117 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + + pb "github.com/golang/protobuf/proto" // nolint:staticcheck + + domainevent "github.com/bucketeer-io/bucketeer/pkg/domainevent/domain" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + "github.com/bucketeer-io/bucketeer/pkg/push/domain" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + proto "github.com/bucketeer-io/bucketeer/proto/push" +) + +type pushCommandHandler struct { + editor *eventproto.Editor + push *domain.Push + publisher publisher.Publisher + environmentNamespace string +} + +func NewPushCommandHandler( + editor *eventproto.Editor, + push *domain.Push, + p publisher.Publisher, + environmentNamespace string, +) Handler { + return &pushCommandHandler{ + editor: editor, + push: push, + publisher: p, + environmentNamespace: environmentNamespace, + } +} + +func (h *pushCommandHandler) Handle(ctx context.Context, cmd Command) error { + switch c := cmd.(type) { + case *proto.CreatePushCommand: + return h.create(ctx, c) + case *proto.DeletePushCommand: + return h.delete(ctx, c) + case *proto.AddPushTagsCommand: + return h.addTags(ctx, c) + case *proto.DeletePushTagsCommand: + return h.deleteTags(ctx, c) + case *proto.RenamePushCommand: + return h.rename(ctx, c) + } + return errUnknownCommand +} + +func (h *pushCommandHandler) create(ctx context.Context, cmd *proto.CreatePushCommand) error { + return h.send(ctx, eventproto.Event_PUSH_CREATED, &eventproto.PushCreatedEvent{ + Name: h.push.Name, + FcmApiKey: h.push.FcmApiKey, + Tags: h.push.Tags, + }) +} + +func (h *pushCommandHandler) delete(ctx context.Context, cmd *proto.DeletePushCommand) error { + h.push.SetDeleted() + return h.send(ctx, eventproto.Event_PUSH_DELETED, &eventproto.PushDeletedEvent{}) +} + +func (h *pushCommandHandler) addTags(ctx context.Context, cmd *proto.AddPushTagsCommand) error { + err := h.push.AddTags(cmd.Tags) + if err != nil { + return err + } + return h.send(ctx, eventproto.Event_PUSH_TAGS_ADDED, &eventproto.PushTagsAddedEvent{ + Tags: cmd.Tags, + }) +} + +func (h *pushCommandHandler) deleteTags(ctx context.Context, cmd *proto.DeletePushTagsCommand) error { + err := h.push.DeleteTags(cmd.Tags) + if err != nil { + return err + } + return h.send(ctx, eventproto.Event_PUSH_TAGS_DELETED, &eventproto.PushTagsDeletedEvent{ + Tags: cmd.Tags, + }) +} + +func (h pushCommandHandler) rename(ctx context.Context, cmd *proto.RenamePushCommand) error { + if err := h.push.Rename(cmd.Name); err != nil { + return err + } + return h.send(ctx, eventproto.Event_PUSH_RENAMED, &eventproto.PushRenamedEvent{ + Name: cmd.Name, + }) +} + +func (h *pushCommandHandler) send(ctx context.Context, eventType eventproto.Event_Type, event pb.Message) error { + e, err := domainevent.NewEvent(h.editor, eventproto.Event_PUSH, h.push.Id, eventType, event, h.environmentNamespace) + if err != nil { + return err + } + if err := h.publisher.Publish(ctx, e); err != nil { + return err + } + return nil +} diff --git a/pkg/push/command/push_test.go b/pkg/push/command/push_test.go new file mode 100644 index 000000000..75b90a1ed --- /dev/null +++ b/pkg/push/command/push_test.go @@ -0,0 +1,165 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher" + publishermock "github.com/bucketeer-io/bucketeer/pkg/pubsub/publisher/mock" + domain "github.com/bucketeer-io/bucketeer/pkg/push/domain" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + proto "github.com/bucketeer-io/bucketeer/proto/push" +) + +func TestCreate(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := []*struct { + expected error + }{ + { + expected: nil, + }, + } + for _, p := range patterns { + pm := publishermock.NewMockPublisher(mockController) + pd := newPush(t) + ch := newPushCommandHandler(t, pm, pd) + if p.expected == nil { + pm.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil).Times(1) + } + cmd := &proto.CreatePushCommand{Name: "name-1", FcmApiKey: "key-0", Tags: []string{"tag-0", "tag-1"}} + err := ch.Handle(context.Background(), cmd) + assert.Equal(t, p.expected, err) + } +} + +func TestDelete(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := []*struct { + expected error + }{ + { + expected: nil, + }, + } + for _, p := range patterns { + pm := publishermock.NewMockPublisher(mockController) + pd := newPush(t) + ch := newPushCommandHandler(t, pm, pd) + if p.expected == nil { + pm.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil).Times(1) + } + cmd := &proto.DeletePushCommand{} + err := ch.Handle(context.Background(), cmd) + assert.Equal(t, p.expected, err) + } +} + +func TestAddTags(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := []*struct { + expected error + }{ + { + expected: nil, + }, + } + for _, p := range patterns { + pm := publishermock.NewMockPublisher(mockController) + pd := newPush(t) + ch := newPushCommandHandler(t, pm, pd) + if p.expected == nil { + pm.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil).Times(1) + } + cmd := &proto.AddPushTagsCommand{Tags: []string{"tag-2"}} + err := ch.Handle(context.Background(), cmd) + assert.Equal(t, p.expected, err) + } +} + +func TestDeleteTags(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := []*struct { + expected error + }{ + { + expected: nil, + }, + } + for _, p := range patterns { + pm := publishermock.NewMockPublisher(mockController) + pd := newPush(t) + ch := newPushCommandHandler(t, pm, pd) + if p.expected == nil { + pm.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil).Times(1) + } + cmd := &proto.DeletePushTagsCommand{Tags: []string{"tag-0"}} + err := ch.Handle(context.Background(), cmd) + assert.Equal(t, p.expected, err) + } +} + +func TestRename(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := []*struct { + expected error + }{ + { + expected: nil, + }, + } + for _, p := range patterns { + pm := publishermock.NewMockPublisher(mockController) + pd := newPush(t) + ch := newPushCommandHandler(t, pm, pd) + if p.expected == nil { + pm.EXPECT().Publish(gomock.Any(), gomock.Any()).Return(nil).Times(1) + } + cmd := &proto.RenamePushCommand{Name: "name-2"} + err := ch.Handle(context.Background(), cmd) + assert.Equal(t, p.expected, err) + } +} + +func newPush(t *testing.T) *domain.Push { + d, err := domain.NewPush("name-1", "key-0", []string{"tag-0", "tag-1"}) + require.NoError(t, err) + return d +} + +func newPushCommandHandler(t *testing.T, publisher publisher.Publisher, push *domain.Push) Handler { + t.Helper() + return NewPushCommandHandler( + &eventproto.Editor{ + Email: "email", + Role: accountproto.Account_EDITOR, + }, + push, + publisher, + "ns0", + ) +} diff --git a/pkg/push/domain/BUILD.bazel b/pkg/push/domain/BUILD.bazel new file mode 100644 index 000000000..b5aeed5f8 --- /dev/null +++ b/pkg/push/domain/BUILD.bazel @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["push.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/push/domain", + visibility = ["//visibility:public"], + deps = [ + "//pkg/uuid:go_default_library", + "//proto/push:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["push_test.go"], + embed = [":go_default_library"], + deps = [ + "//proto/push:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + ], +) diff --git a/pkg/push/domain/push.go b/pkg/push/domain/push.go new file mode 100644 index 000000000..c4d4841c6 --- /dev/null +++ b/pkg/push/domain/push.go @@ -0,0 +1,125 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "errors" + "time" + + "github.com/bucketeer-io/bucketeer/pkg/uuid" + proto "github.com/bucketeer-io/bucketeer/proto/push" +) + +var ( + ErrTagDuplicated = errors.New("push: tag is duplicated") + ErrTagAlreadyExists = errors.New("push: tag already exists") + ErrTagNotFound = errors.New("push: tag not found") +) + +type Push struct { + *proto.Push +} + +func NewPush(name, fcmAPIKey string, tags []string) (*Push, error) { + _, err := convMap(tags) + if err != nil { + return nil, err + } + id, err := uuid.NewUUID() + if err != nil { + return nil, err + } + now := time.Now().Unix() + p := &Push{&proto.Push{ + Name: name, + Id: id.String(), + FcmApiKey: fcmAPIKey, + Tags: tags, + CreatedAt: now, + UpdatedAt: now, + }} + return p, nil +} + +func (p *Push) Rename(name string) error { + p.Name = name + p.UpdatedAt = time.Now().Unix() + return nil +} + +func convMap(ss []string) (map[string]int, error) { + m := make(map[string]int, len(ss)) + for idx, t := range ss { + if _, ok := m[t]; ok { + return nil, ErrTagDuplicated + } + m[t] = idx + } + return m, nil +} + +func (p *Push) AddTags(newTags []string) error { + mtag, err := convMap(p.Tags) + if err != nil { + return err + } + for _, t := range newTags { + if _, ok := mtag[t]; ok { + return ErrTagAlreadyExists + } + p.Tags = append(p.Tags, t) + } + p.UpdatedAt = time.Now().Unix() + return nil +} + +func (p *Push) DeleteTags(tags []string) error { + tagMap, err := convMap(tags) + if err != nil { + return err + } + existMap, err := convMap(p.Tags) + if err != nil { + return err + } + for _, t := range tags { + if _, ok := existMap[t]; !ok { + return ErrTagNotFound + } + } + newTags := []string{} + for _, t := range p.Tags { + if _, ok := tagMap[t]; !ok { + newTags = append(newTags, t) + } + } + p.Tags = newTags + p.UpdatedAt = time.Now().Unix() + return nil +} + +func (p *Push) SetDeleted() { + p.Deleted = true + p.UpdatedAt = time.Now().Unix() +} + +func (p *Push) ExistTag(findTag string) bool { + for _, t := range p.Tags { + if t == findTag { + return true + } + } + return false +} diff --git a/pkg/push/domain/push_test.go b/pkg/push/domain/push_test.go new file mode 100644 index 000000000..61fd55ddb --- /dev/null +++ b/pkg/push/domain/push_test.go @@ -0,0 +1,181 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "sort" + "testing" + + "github.com/stretchr/testify/assert" + + pushproto "github.com/bucketeer-io/bucketeer/proto/push" +) + +func TestNewPush(t *testing.T) { + t.Parallel() + name := "name-1" + key := "key-1" + tags := []string{"tag-1", "tag-2"} + actual, err := NewPush(name, key, tags) + assert.NoError(t, err) + assert.IsType(t, &Push{}, actual) + assert.NotEqual(t, "", actual.Id) + assert.NotEqual(t, key, actual.Id) + assert.Equal(t, key, actual.FcmApiKey) + assert.Equal(t, tags, actual.Tags) +} + +func TestSetDeleted(t *testing.T) { + t.Parallel() + name := "name-1" + key := "key-1" + tags := []string{"tag-1", "tag-2"} + actual, err := NewPush(name, key, tags) + assert.NoError(t, err) + assert.Equal(t, false, actual.Deleted) + actual.SetDeleted() + assert.Equal(t, true, actual.Deleted) +} + +func TestAddTags(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + origin *Push + input []string + expectedErr error + expected []string + }{ + "success: one": { + origin: &Push{&pushproto.Push{Tags: []string{"tag-0", "tag-1"}}}, + input: []string{"tag-2"}, + expectedErr: nil, + expected: []string{"tag-0", "tag-1", "tag-2"}, + }, + "success: two": { + origin: &Push{&pushproto.Push{Tags: []string{"tag-0", "tag-1"}}}, + input: []string{"tag-2", "tag-3"}, + expectedErr: nil, + expected: []string{"tag-0", "tag-1", "tag-2", "tag-3"}, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + err := p.origin.AddTags(p.input) + assert.Equal(t, p.expectedErr, err) + sort.Strings(p.expected) + sort.Strings(p.origin.Tags) + assert.Equal(t, p.expected, p.origin.Tags) + }) + } +} + +func TestDeleteTags(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + origin *Push + input []string + expectedErr error + expected []string + }{ + "success: one": { + origin: &Push{&pushproto.Push{Tags: []string{"tag-0", "tag-1"}}}, + input: []string{"tag-1"}, + expectedErr: nil, + expected: []string{"tag-0"}, + }, + "success: two": { + origin: &Push{&pushproto.Push{Tags: []string{"tag-0", "tag-1"}}}, + input: []string{"tag-0", "tag-1"}, + expectedErr: nil, + expected: []string{}, + }, + "fail: not found: one": { + origin: &Push{&pushproto.Push{Tags: []string{"tag-0", "tag-1"}}}, + input: []string{"tag-2"}, + expectedErr: ErrTagNotFound, + expected: []string{"tag-0", "tag-1"}, + }, + "fail: not found: two": { + origin: &Push{&pushproto.Push{Tags: []string{"tag-0", "tag-1"}}}, + input: []string{"tag-0", "tag-2"}, + expectedErr: ErrTagNotFound, + expected: []string{"tag-0", "tag-1"}, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + err := p.origin.DeleteTags(p.input) + assert.Equal(t, p.expectedErr, err) + sort.Strings(p.expected) + sort.Strings(p.origin.Tags) + assert.Equal(t, p.expected, p.origin.Tags) + }) + } +} + +func TestExistTag(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + origin *Push + input string + expected bool + }{ + "true": { + origin: &Push{&pushproto.Push{Tags: []string{"tag-0", "tag-1"}}}, + input: "tag-1", + expected: true, + }, + "false: no tags": { + origin: &Push{&pushproto.Push{}}, + input: "tag-1", + expected: false, + }, + "false: not found": { + origin: &Push{&pushproto.Push{Tags: []string{"tag-0", "tag-1"}}}, + input: "tag-2", + expected: false, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + actual := p.origin.ExistTag(p.input) + assert.Equal(t, p.expected, actual) + }) + } +} + +func TestRename(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + origin *Push + input string + expectedErr error + expected string + }{ + "success": { + origin: &Push{&pushproto.Push{Name: "a"}}, + input: "b", + expectedErr: nil, + expected: "b", + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + err := p.origin.Rename(p.input) + assert.Equal(t, p.expectedErr, err) + assert.Equal(t, p.expected, p.origin.Name) + }) + } +} diff --git a/pkg/push/sender/BUILD.bazel b/pkg/push/sender/BUILD.bazel new file mode 100644 index 000000000..df62492d1 --- /dev/null +++ b/pkg/push/sender/BUILD.bazel @@ -0,0 +1,41 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "metrics.go", + "sender.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/push/sender", + visibility = ["//visibility:public"], + deps = [ + "//pkg/cache:go_default_library", + "//pkg/cache/v3:go_default_library", + "//pkg/errgroup:go_default_library", + "//pkg/feature/client:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub/puller:go_default_library", + "//pkg/pubsub/puller/codes:go_default_library", + "//pkg/push/client:go_default_library", + "//pkg/push/domain:go_default_library", + "//proto/event/domain:go_default_library", + "//proto/feature:go_default_library", + "//proto/push:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["sender_test.go"], + embed = [":go_default_library"], + deps = [ + "//proto/event/domain:go_default_library", + "//proto/feature:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + ], +) diff --git a/pkg/push/sender/metrics.go b/pkg/push/sender/metrics.go new file mode 100644 index 000000000..6d262bae9 --- /dev/null +++ b/pkg/push/sender/metrics.go @@ -0,0 +1,56 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sender + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +var ( + receivedCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "push", + Name: "sender_received_total", + Help: "Total number of received messages", + }) + + handledCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "push", + Name: "sender_handled_total", + Help: "Total number of handled messages", + }, []string{"code"}) + + handledHistogram = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "bucketeer", + Subsystem: "push", + Name: "sender_handled_seconds", + Help: "Histogram of message handling duration (seconds)", + Buckets: prometheus.DefBuckets, + }, []string{"code"}) +) + +func registerMetrics(r metrics.Registerer) { + r.MustRegister( + receivedCounter, + handledCounter, + handledHistogram, + ) +} diff --git a/pkg/push/sender/sender.go b/pkg/push/sender/sender.go new file mode 100644 index 000000000..332fa59e0 --- /dev/null +++ b/pkg/push/sender/sender.go @@ -0,0 +1,433 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sender + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/golang/protobuf/proto" // nolint:staticcheck + "github.com/golang/protobuf/ptypes/wrappers" + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/cache" + cachev3 "github.com/bucketeer-io/bucketeer/pkg/cache/v3" + "github.com/bucketeer-io/bucketeer/pkg/errgroup" + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller/codes" + pushclient "github.com/bucketeer-io/bucketeer/pkg/push/client" + pushdomain "github.com/bucketeer-io/bucketeer/pkg/push/domain" + domaineventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" + pushproto "github.com/bucketeer-io/bucketeer/proto/push" +) + +type options struct { + maxMPS int + numWorkers int + metrics metrics.Registerer + logger *zap.Logger +} + +const ( + listRequestSize = 500 + fcmSendURL = "https://fcm.googleapis.com/fcm/send" + topicPrefix = "bucketeer-" +) + +var defaultOptions = options{ + maxMPS: 1000, + numWorkers: 1, + logger: zap.NewNop(), +} + +type Option func(*options) + +func WithMaxMPS(mps int) Option { + return func(opts *options) { + opts.maxMPS = mps + } +} + +func WithNumWorkers(n int) Option { + return func(opts *options) { + opts.numWorkers = n + } +} + +func WithMetrics(r metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = r + } +} + +func WithLogger(logger *zap.Logger) Option { + return func(opts *options) { + opts.logger = logger + } +} + +type Sender interface { + Check(context.Context) health.Status + Run() error + Stop() +} + +type sender struct { + puller puller.RateLimitedPuller + pushClient pushclient.Client + featureClient featureclient.Client + featuresCache cachev3.FeaturesCache + group errgroup.Group + opts *options + logger *zap.Logger + ctx context.Context + cancel func() + doneCh chan struct{} +} + +func NewSender( + p puller.Puller, + pushClient pushclient.Client, + featureClient featureclient.Client, + v3Cache cache.MultiGetCache, + opts ...Option) Sender { + + ctx, cancel := context.WithCancel(context.Background()) + options := defaultOptions + for _, opt := range opts { + opt(&options) + } + if options.metrics != nil { + registerMetrics(options.metrics) + } + return &sender{ + puller: puller.NewRateLimitedPuller(p, options.maxMPS), + pushClient: pushClient, + featureClient: featureClient, + featuresCache: cachev3.NewFeaturesCache(v3Cache), + opts: &options, + logger: options.logger.Named("sender"), + ctx: ctx, + cancel: cancel, + doneCh: make(chan struct{}), + } +} + +func (s *sender) Run() error { + defer close(s.doneCh) + s.group.Go(func() error { + return s.puller.Run(s.ctx) + }) + for i := 0; i < s.opts.numWorkers; i++ { + s.group.Go(s.runWorker) + } + return s.group.Wait() +} + +func (s *sender) Stop() { + s.cancel() + <-s.doneCh +} + +func (s *sender) Check(ctx context.Context) health.Status { + select { + case <-s.ctx.Done(): + s.logger.Error("Unhealthy due to context Done is closed", zap.Error(s.ctx.Err())) + return health.Unhealthy + default: + if s.group.FinishedCount() > 0 { + s.logger.Error("Unhealthy", zap.Int32("FinishedCount", s.group.FinishedCount())) + return health.Unhealthy + } + return health.Healthy + } +} + +func (s *sender) runWorker() error { + record := func(code codes.Code, startTime time.Time) { + handledCounter.WithLabelValues(code.String()).Inc() + handledHistogram.WithLabelValues(code.String()).Observe(time.Since(startTime).Seconds()) + } + for { + select { + case msg, ok := <-s.puller.MessageCh(): + if !ok { + return nil + } + receivedCounter.Inc() + startTime := time.Now() + if id := msg.Attributes["id"]; id == "" { + msg.Ack() + record(codes.MissingID, startTime) + continue + } + s.handle(msg) + msg.Ack() + record(codes.OK, startTime) + case <-s.ctx.Done(): + return nil + } + } +} + +func (s *sender) handle(msg *puller.Message) { + event, err := s.unmarshalMessage(msg) + if err != nil { + msg.Ack() + handledCounter.WithLabelValues(codes.BadMessage.String()).Inc() + return + } + featureID, isTarget := s.extractFeatureID(event) + if !isTarget { + msg.Ack() + handledCounter.WithLabelValues(codes.OK.String()).Inc() + return + } + if featureID == "" { + msg.Ack() + handledCounter.WithLabelValues(codes.BadMessage.String()).Inc() + s.logger.Warn("Message contains an empty FeatureID", zap.Any("event", event)) + return + } + if err := s.send(featureID, event.EnvironmentNamespace); err != nil { + msg.Ack() + handledCounter.WithLabelValues(codes.NonRepeatableError.String()).Inc() + return + } + msg.Ack() + handledCounter.WithLabelValues(codes.OK.String()).Inc() +} + +func (s *sender) send(featureID, environmentNamespace string) error { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + resp, err := s.featureClient.GetFeature(ctx, &featureproto.GetFeatureRequest{ + Id: featureID, + EnvironmentNamespace: environmentNamespace, + }) + if err != nil { + return err + } + pushes, err := s.listPushes(ctx, environmentNamespace) + if err != nil { + return err + } + if len(pushes) == 0 { + s.logger.Info("No pushes", + zap.String("featureId", featureID), + zap.String("environmentNamespace", environmentNamespace), + ) + return nil + } + var lastErr error + for _, p := range pushes { + d := pushdomain.Push{Push: p} + for _, t := range resp.Feature.Tags { + if !d.ExistTag(t) { + continue + } + if !s.isFeaturesCacheLatest(ctx, environmentNamespace, t, resp.Feature.Id, resp.Feature.Version) { + if err = s.updateFeatures(ctx, environmentNamespace, t); err != nil { + s.logger.Error("Failed to update features", zap.Error(err), + zap.String("featureId", featureID), + zap.String("tag", t), + zap.String("pushId", d.Push.Id), + zap.String("environmentNamespace", environmentNamespace), + ) + } + } + topic := topicPrefix + t + if err = s.pushFCM(ctx, d.FcmApiKey, topic); err != nil { + s.logger.Error("Failed to push notification", zap.Error(err), + zap.String("featureId", featureID), + zap.String("tag", t), + zap.String("topic", topic), + zap.String("pushId", d.Push.Id), + zap.String("environmentNamespace", environmentNamespace), + ) + lastErr = err + continue + } + s.logger.Info("Succeeded to push notification", + zap.String("featureId", featureID), + zap.String("tag", t), + zap.String("topic", topic), + zap.String("pushId", d.Push.Id), + zap.String("environmentNamespace", environmentNamespace), + ) + } + } + return lastErr +} + +func (s *sender) pushFCM(ctx context.Context, fcmAPIKey, topic string) error { + requestBody, err := json.Marshal(map[string]interface{}{ + "to": "/topics/" + topic, + "data": map[string]interface{}{ + "bucketeer_feature_flag_updated": true, + }, + }) + if err != nil { + return err + } + req, err := http.NewRequest("POST", fcmSendURL, bytes.NewBuffer(requestBody)) + if err != nil { + return err + } + req.Header.Set("Authorization", fmt.Sprintf("key=%s", fcmAPIKey)) + req.Header.Set("Content-Type", "application/json") + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + return nil +} + +func (s *sender) listPushes(ctx context.Context, environmentNamespace string) ([]*pushproto.Push, error) { + pushes := []*pushproto.Push{} + cursor := "" + for { + resp, err := s.pushClient.ListPushes(ctx, &pushproto.ListPushesRequest{ + PageSize: listRequestSize, + Cursor: cursor, + EnvironmentNamespace: environmentNamespace, + }) + if err != nil { + return nil, err + } + pushes = append(pushes, resp.Pushes...) + pushSize := len(resp.Pushes) + if pushSize == 0 || pushSize < listRequestSize { + return pushes, nil + } + cursor = resp.Cursor + } +} + +func (s *sender) unmarshalMessage(msg *puller.Message) (*domaineventproto.Event, error) { + event := &domaineventproto.Event{} + err := proto.Unmarshal(msg.Data, event) + if err != nil { + s.logger.Error("Failed to unmarshal message", zap.Error(err), zap.String("msgID", msg.ID)) + return nil, err + } + return event, nil +} + +func (s *sender) extractFeatureID(event *domaineventproto.Event) (string, bool) { + if event.EntityType != domaineventproto.Event_FEATURE { + return "", false + } + if event.Type != domaineventproto.Event_FEATURE_VERSION_INCREMENTED { + return "", false + } + return event.EntityId, true +} + +func (s *sender) isFeaturesCacheLatest( + ctx context.Context, + environmentNamespace, + tag, featureID string, + featureVersion int32, +) bool { + features, err := s.featuresCache.Get(environmentNamespace) + if err != nil { + s.logger.Info( + "Failed to get Features", + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("tag", tag), + zap.String("featureId", featureID), + zap.Int32("featureVersion", featureVersion), + ) + return false + } + return s.isFeaturesLatest(features, featureID, featureVersion) +} + +func (s *sender) updateFeatures(ctx context.Context, environmentNamespace, tag string) error { + fs, err := s.listFeatures(ctx, environmentNamespace) + if err != nil { + s.logger.Error( + "Failed to retrieve features from storage", + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + zap.String("tag", tag), + ) + return err + } + features := &featureproto.Features{ + Features: fs, + } + if err := s.featuresCache.Put(features, environmentNamespace); err != nil { + s.logger.Error( + "Failed to cache features", + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + ) + return err + } + return nil +} + +func (s *sender) isFeaturesLatest( + features *featureproto.Features, + featureID string, + featureVersion int32, +) bool { + for _, f := range features.Features { + if f.Id == featureID { + return f.Version >= featureVersion + } + } + return false +} + +func (s *sender) listFeatures(ctx context.Context, environmentNamespace string) ([]*featureproto.Feature, error) { + features := []*featureproto.Feature{} + cursor := "" + for { + resp, err := s.featureClient.ListFeatures(ctx, &featureproto.ListFeaturesRequest{ + PageSize: listRequestSize, + Cursor: cursor, + EnvironmentNamespace: environmentNamespace, + Archived: &wrappers.BoolValue{Value: false}, + }) + if err != nil { + return nil, err + } + for _, f := range resp.Features { + if !f.Enabled && f.OffVariation == "" { + continue + } + features = append(features, f) + } + featureSize := len(resp.Features) + if featureSize == 0 || featureSize < listRequestSize { + return features, nil + } + cursor = resp.Cursor + } +} diff --git a/pkg/push/sender/sender_test.go b/pkg/push/sender/sender_test.go new file mode 100644 index 000000000..7886fa68f --- /dev/null +++ b/pkg/push/sender/sender_test.go @@ -0,0 +1,111 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sender + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + domaineventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +func TestIsFeaturesLatest(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + features *featureproto.Features + featureID string + featureVersion int32 + expected bool + }{ + "no feature": { + features: &featureproto.Features{ + Features: []*featureproto.Feature{{Id: "wrong", Version: int32(1)}}, + }, + featureID: "fid", + featureVersion: int32(1), + expected: false, + }, + "not the latest version": { + features: &featureproto.Features{ + Features: []*featureproto.Feature{{Id: "fid", Version: int32(1)}}, + }, + featureID: "fid", + featureVersion: int32(2), + expected: false, + }, + "the latest version": { + features: &featureproto.Features{ + Features: []*featureproto.Feature{{Id: "fid", Version: int32(2)}}, + }, + featureID: "fid", + featureVersion: int32(2), + expected: true, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := &sender{} + actual := s.isFeaturesLatest(p.features, p.featureID, p.featureVersion) + assert.Equal(t, p.expected, actual) + }) + } +} + +func TestExtractFeatureID(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + input *domaineventproto.Event + expectedID string + expectedIsTarget bool + }{ + "not feature entity": { + input: &domaineventproto.Event{ + EntityType: domaineventproto.Event_EXPERIMENT, + EntityId: "fid", + Type: domaineventproto.Event_FEATURE_VERSION_INCREMENTED, + }, + expectedID: "", + expectedIsTarget: false, + }, + "not version incremented": { + input: &domaineventproto.Event{ + EntityType: domaineventproto.Event_EXPERIMENT, + EntityId: "fid", + Type: domaineventproto.Event_FEATURE_DESCRIPTION_CHANGED, + }, + expectedID: "", + expectedIsTarget: false, + }, + "is target": { + input: &domaineventproto.Event{ + EntityType: domaineventproto.Event_FEATURE, + EntityId: "fid", + Type: domaineventproto.Event_FEATURE_VERSION_INCREMENTED, + }, + expectedID: "fid", + expectedIsTarget: true, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + s := &sender{} + actualID, actualIsTarget := s.extractFeatureID(p.input) + assert.Equal(t, p.expectedID, actualID) + assert.Equal(t, p.expectedIsTarget, actualIsTarget) + }) + } +} diff --git a/pkg/push/storage/v2/BUILD.bazel b/pkg/push/storage/v2/BUILD.bazel new file mode 100644 index 000000000..ad6e18264 --- /dev/null +++ b/pkg/push/storage/v2/BUILD.bazel @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["push.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/push/storage/v2", + visibility = ["//visibility:public"], + deps = [ + "//pkg/push/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/push:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["push_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/push/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/storage/v2/mysql/mock:go_default_library", + "//proto/push:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + ], +) diff --git a/pkg/push/storage/v2/mock/BUILD.bazel b/pkg/push/storage/v2/mock/BUILD.bazel new file mode 100644 index 000000000..6c8f85764 --- /dev/null +++ b/pkg/push/storage/v2/mock/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["push.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/push/storage/v2/mock", + visibility = ["//visibility:public"], + deps = [ + "//pkg/push/domain:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//proto/push:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + ], +) diff --git a/pkg/push/storage/v2/mock/push.go b/pkg/push/storage/v2/mock/push.go new file mode 100644 index 000000000..dfa048ef2 --- /dev/null +++ b/pkg/push/storage/v2/mock/push.go @@ -0,0 +1,99 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: push.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + domain "github.com/bucketeer-io/bucketeer/pkg/push/domain" + mysql "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + push "github.com/bucketeer-io/bucketeer/proto/push" +) + +// MockPushStorage is a mock of PushStorage interface. +type MockPushStorage struct { + ctrl *gomock.Controller + recorder *MockPushStorageMockRecorder +} + +// MockPushStorageMockRecorder is the mock recorder for MockPushStorage. +type MockPushStorageMockRecorder struct { + mock *MockPushStorage +} + +// NewMockPushStorage creates a new mock instance. +func NewMockPushStorage(ctrl *gomock.Controller) *MockPushStorage { + mock := &MockPushStorage{ctrl: ctrl} + mock.recorder = &MockPushStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPushStorage) EXPECT() *MockPushStorageMockRecorder { + return m.recorder +} + +// CreatePush mocks base method. +func (m *MockPushStorage) CreatePush(ctx context.Context, e *domain.Push, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreatePush", ctx, e, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreatePush indicates an expected call of CreatePush. +func (mr *MockPushStorageMockRecorder) CreatePush(ctx, e, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePush", reflect.TypeOf((*MockPushStorage)(nil).CreatePush), ctx, e, environmentNamespace) +} + +// GetPush mocks base method. +func (m *MockPushStorage) GetPush(ctx context.Context, id, environmentNamespace string) (*domain.Push, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPush", ctx, id, environmentNamespace) + ret0, _ := ret[0].(*domain.Push) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPush indicates an expected call of GetPush. +func (mr *MockPushStorageMockRecorder) GetPush(ctx, id, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPush", reflect.TypeOf((*MockPushStorage)(nil).GetPush), ctx, id, environmentNamespace) +} + +// ListPushes mocks base method. +func (m *MockPushStorage) ListPushes(ctx context.Context, whereParts []mysql.WherePart, orders []*mysql.Order, limit, offset int) ([]*push.Push, int, int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListPushes", ctx, whereParts, orders, limit, offset) + ret0, _ := ret[0].([]*push.Push) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(int64) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 +} + +// ListPushes indicates an expected call of ListPushes. +func (mr *MockPushStorageMockRecorder) ListPushes(ctx, whereParts, orders, limit, offset interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPushes", reflect.TypeOf((*MockPushStorage)(nil).ListPushes), ctx, whereParts, orders, limit, offset) +} + +// UpdatePush mocks base method. +func (m *MockPushStorage) UpdatePush(ctx context.Context, e *domain.Push, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdatePush", ctx, e, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdatePush indicates an expected call of UpdatePush. +func (mr *MockPushStorageMockRecorder) UpdatePush(ctx, e, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePush", reflect.TypeOf((*MockPushStorage)(nil).UpdatePush), ctx, e, environmentNamespace) +} diff --git a/pkg/push/storage/v2/push.go b/pkg/push/storage/v2/push.go new file mode 100644 index 000000000..231d833d0 --- /dev/null +++ b/pkg/push/storage/v2/push.go @@ -0,0 +1,233 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v2 + +import ( + "context" + "errors" + "fmt" + + "github.com/bucketeer-io/bucketeer/pkg/push/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + proto "github.com/bucketeer-io/bucketeer/proto/push" +) + +var ( + ErrPushAlreadyExists = errors.New("push: push already exists") + ErrPushNotFound = errors.New("push: push not found") + ErrPushUnexpectedAffectedRows = errors.New("push: push unexpected affected rows") +) + +type PushStorage interface { + CreatePush(ctx context.Context, e *domain.Push, environmentNamespace string) error + UpdatePush(ctx context.Context, e *domain.Push, environmentNamespace string) error + GetPush(ctx context.Context, id, environmentNamespace string) (*domain.Push, error) + ListPushes( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, + ) ([]*proto.Push, int, int64, error) +} + +type pushStorage struct { + qe mysql.QueryExecer +} + +func NewPushStorage(qe mysql.QueryExecer) PushStorage { + return &pushStorage{qe: qe} +} + +func (s *pushStorage) CreatePush(ctx context.Context, e *domain.Push, environmentNamespace string) error { + query := ` + INSERT INTO push ( + id, + fcm_api_key, + tags, + deleted, + name, + created_at, + updated_at, + environment_namespace + ) VALUES ( + ?, ?, ?, ?, ?, ?, ?, ? + ) + ` + _, err := s.qe.ExecContext( + ctx, + query, + e.Id, + e.FcmApiKey, + mysql.JSONObject{Val: e.Tags}, + e.Deleted, + e.Name, + e.CreatedAt, + e.UpdatedAt, + environmentNamespace, + ) + if err != nil { + if err == mysql.ErrDuplicateEntry { + return ErrPushAlreadyExists + } + return err + } + return nil +} + +func (s *pushStorage) UpdatePush(ctx context.Context, e *domain.Push, environmentNamespace string) error { + query := ` + UPDATE + push + SET + fcm_api_key = ?, + tags = ?, + deleted = ?, + name = ?, + created_at = ?, + updated_at = ? + WHERE + id = ? AND + environment_namespace = ? + ` + result, err := s.qe.ExecContext( + ctx, + query, + e.FcmApiKey, + mysql.JSONObject{Val: e.Tags}, + e.Deleted, + e.Name, + e.CreatedAt, + e.UpdatedAt, + e.Id, + environmentNamespace, + ) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + if rowsAffected != 1 { + return ErrPushUnexpectedAffectedRows + } + return nil +} + +func (s *pushStorage) GetPush(ctx context.Context, id, environmentNamespace string) (*domain.Push, error) { + push := proto.Push{} + query := ` + SELECT + id, + fcm_api_key, + tags, + deleted, + name, + created_at, + updated_at + FROM + push + WHERE + id = ? AND + environment_namespace = ? + ` + err := s.qe.QueryRowContext( + ctx, + query, + id, + environmentNamespace, + ).Scan( + &push.Id, + &push.FcmApiKey, + &mysql.JSONObject{Val: &push.Tags}, + &push.Deleted, + &push.Name, + &push.CreatedAt, + &push.UpdatedAt, + ) + if err != nil { + if err == mysql.ErrNoRows { + return nil, ErrPushNotFound + } + return nil, err + } + return &domain.Push{Push: &push}, nil +} + +func (s *pushStorage) ListPushes( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, +) ([]*proto.Push, int, int64, error) { + whereSQL, whereArgs := mysql.ConstructWhereSQLString(whereParts) + orderBySQL := mysql.ConstructOrderBySQLString(orders) + limitOffsetSQL := mysql.ConstructLimitOffsetSQLString(limit, offset) + query := fmt.Sprintf(` + SELECT + id, + fcm_api_key, + tags, + deleted, + name, + created_at, + updated_at + FROM + push + %s %s %s + `, whereSQL, orderBySQL, limitOffsetSQL, + ) + rows, err := s.qe.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, 0, 0, err + } + defer rows.Close() + pushes := make([]*proto.Push, 0, limit) + for rows.Next() { + push := proto.Push{} + err := rows.Scan( + &push.Id, + &push.FcmApiKey, + &mysql.JSONObject{Val: &push.Tags}, + &push.Deleted, + &push.Name, + &push.CreatedAt, + &push.UpdatedAt, + ) + if err != nil { + return nil, 0, 0, err + } + pushes = append(pushes, &push) + } + if rows.Err() != nil { + return nil, 0, 0, err + } + nextOffset := offset + len(pushes) + var totalCount int64 + countQuery := fmt.Sprintf(` + SELECT + COUNT(1) + FROM + push + %s %s + `, whereSQL, orderBySQL, + ) + err = s.qe.QueryRowContext(ctx, countQuery, whereArgs...).Scan(&totalCount) + if err != nil { + return nil, 0, 0, err + } + return pushes, nextOffset, totalCount, nil +} diff --git a/pkg/push/storage/v2/push_test.go b/pkg/push/storage/v2/push_test.go new file mode 100644 index 000000000..ffd96eda7 --- /dev/null +++ b/pkg/push/storage/v2/push_test.go @@ -0,0 +1,302 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "context" + "errors" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/push/domain" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + proto "github.com/bucketeer-io/bucketeer/proto/push" +) + +func TestNewPushStorage(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + storage := NewPushStorage(mock.NewMockQueryExecer(mockController)) + assert.IsType(t, &pushStorage{}, storage) +} + +func TestCreatePush(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*pushStorage) + input *domain.Push + environmentNamespace string + expectedErr error + }{ + "ErrPushAlreadyExists": { + setup: func(s *pushStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, mysql.ErrDuplicateEntry) + }, + input: &domain.Push{ + Push: &proto.Push{Id: "id-0"}, + }, + environmentNamespace: "ns", + expectedErr: ErrPushAlreadyExists, + }, + "Error": { + setup: func(s *pushStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + + }, + input: &domain.Push{ + Push: &proto.Push{Id: "id-0"}, + }, + environmentNamespace: "ns", + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *pushStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, nil) + }, + input: &domain.Push{ + Push: &proto.Push{Id: "id-0"}, + }, + environmentNamespace: "ns", + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newpushStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + err := storage.CreatePush(context.Background(), p.input, p.environmentNamespace) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestUpdatePush(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*pushStorage) + input *domain.Push + environmentNamespace string + expectedErr error + }{ + "ErrPushUnexpectedAffectedRows": { + setup: func(s *pushStorage) { + result := mock.NewMockResult(mockController) + result.EXPECT().RowsAffected().Return(int64(0), nil) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + input: &domain.Push{ + Push: &proto.Push{Id: "id-0"}, + }, + environmentNamespace: "ns", + expectedErr: ErrPushUnexpectedAffectedRows, + }, + "Error": { + setup: func(s *pushStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + + }, + input: &domain.Push{ + Push: &proto.Push{Id: "id-0"}, + }, + environmentNamespace: "ns", + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *pushStorage) { + result := mock.NewMockResult(mockController) + result.EXPECT().RowsAffected().Return(int64(1), nil) + s.qe.(*mock.MockQueryExecer).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(result, nil) + }, + input: &domain.Push{ + Push: &proto.Push{Id: "id-0"}, + }, + environmentNamespace: "ns", + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newpushStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + err := storage.UpdatePush(context.Background(), p.input, p.environmentNamespace) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestGetPush(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*pushStorage) + id string + environmentNamespace string + expectedErr error + }{ + "ErrPushNotFound": { + setup: func(s *pushStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id-0", + environmentNamespace: "ns", + expectedErr: ErrPushNotFound, + }, + "Error": { + setup: func(s *pushStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(errors.New("error")) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + + }, + id: "id-0", + environmentNamespace: "ns", + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *pushStorage) { + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + id: "id-0", + environmentNamespace: "ns", + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newpushStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + _, err := storage.GetPush(context.Background(), p.id, p.environmentNamespace) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestListPushs(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := map[string]struct { + setup func(*pushStorage) + whereParts []mysql.WherePart + orders []*mysql.Order + limit int + offset int + expected []*proto.Push + expectedCursor int + expectedErr error + }{ + "Error": { + setup: func(s *pushStorage) { + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("error")) + }, + whereParts: nil, + orders: nil, + limit: 0, + offset: 0, + expected: nil, + expectedCursor: 0, + expectedErr: errors.New("error"), + }, + "Success": { + setup: func(s *pushStorage) { + rows := mock.NewMockRows(mockController) + rows.EXPECT().Close().Return(nil) + rows.EXPECT().Next().Return(false) + rows.EXPECT().Err().Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(rows, nil) + row := mock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.qe.(*mock.MockQueryExecer).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + whereParts: []mysql.WherePart{ + mysql.NewFilter("num", ">=", 5), + }, + orders: []*mysql.Order{ + mysql.NewOrder("id", mysql.OrderDirectionAsc), + }, + limit: 10, + offset: 5, + expected: []*proto.Push{}, + expectedCursor: 5, + expectedErr: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + storage := newpushStorageWithMock(t, mockController) + if p.setup != nil { + p.setup(storage) + } + pushs, cursor, _, err := storage.ListPushes( + context.Background(), + p.whereParts, + p.orders, + p.limit, + p.offset, + ) + assert.Equal(t, p.expected, pushs) + assert.Equal(t, p.expectedCursor, cursor) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func newpushStorageWithMock(t *testing.T, mockController *gomock.Controller) *pushStorage { + t.Helper() + return &pushStorage{mock.NewMockQueryExecer(mockController)} +} diff --git a/pkg/redis/BUILD.bazel b/pkg/redis/BUILD.bazel new file mode 100644 index 000000000..ab1853b85 --- /dev/null +++ b/pkg/redis/BUILD.bazel @@ -0,0 +1,33 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "conn.go", + "metrics.go", + "redis.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/redis", + visibility = ["//visibility:public"], + deps = [ + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "@com_github_gomodule_redigo//redis:go_default_library", + "@com_github_mna_redisc//:go_default_library", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["redis_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/health:go_default_library", + "@com_github_gomodule_redigo//redis:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/redis/conn.go b/pkg/redis/conn.go new file mode 100644 index 000000000..4b9822a90 --- /dev/null +++ b/pkg/redis/conn.go @@ -0,0 +1,44 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package redis + +import ( + "time" + + "github.com/gomodule/redigo/redis" +) + +type conn struct { + redis.Conn + clientVersion string + serverName string +} + +func (c *conn) Do(commandName string, args ...interface{}) (interface{}, error) { + startTime := time.Now() + ReceivedCounter.WithLabelValues(c.clientVersion, c.serverName, commandName).Inc() + reply, err := c.Conn.Do(commandName, args...) + code := CodeFail + switch err { + case nil: + code = CodeSuccess + case ErrNil: + code = CodeNotFound + } + HandledCounter.WithLabelValues(c.clientVersion, c.serverName, commandName, code).Inc() + HandledHistogram.WithLabelValues(c.clientVersion, c.serverName, commandName, code).Observe( + time.Since(startTime).Seconds()) + return reply, err +} diff --git a/pkg/redis/metrics.go b/pkg/redis/metrics.go new file mode 100644 index 000000000..36dceddd9 --- /dev/null +++ b/pkg/redis/metrics.go @@ -0,0 +1,131 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package redis + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +const ( + CodeSuccess = "Success" + CodeFail = "Fail" + CodeNotFound = "NotFound" + CodeInvalidType = "InvalidType" +) + +var ( + ReceivedCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "redis", + Name: "received_total", + Help: "Total number of received commands.", + }, []string{"version", "server", "command"}) + + HandledCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "redis", + Name: "handled_total", + Help: "Total number of completed commands.", + }, []string{"version", "server", "command", "code"}) + + HandledHistogram = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "bucketeer", + Subsystem: "redis", + Name: "handling_seconds", + Help: "Histogram of command response latency (seconds).", + Buckets: prometheus.DefBuckets, + }, []string{"version", "server", "command", "code"}) + + poolActiveConnectionsDesc = prometheus.NewDesc( + "bucketeer_redis_pool_active_connections", + "Number of connections in the pool.", + []string{"version", "server"}, + nil, + ) + + poolIdleConnectionsDesc = prometheus.NewDesc( + "bucketeer_redis_pool_idle_connections", + "Number of idle connections in the pool.", + []string{"version", "server"}, + nil, + ) + + registerOnce sync.Once + clients sync.Map +) + +type PoolStater interface { + Stats() PoolStats +} + +type PoolStats interface { + ActiveCount() int + IdleCount() int +} + +type metricsKey struct { + version string + server string +} + +func RegisterMetrics(r metrics.Registerer, version, server string, stater PoolStater) { + clients.Store(metricsKey{version: version, server: server}, stater) + registerOnce.Do(func() { + r.MustRegister( + ReceivedCounter, + HandledCounter, + HandledHistogram, + &poolCollector{}, + ) + }) +} + +type poolCollector struct { +} + +func (c *poolCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- poolActiveConnectionsDesc + ch <- poolIdleConnectionsDesc +} + +func (c *poolCollector) Collect(ch chan<- prometheus.Metric) { + clients.Range(func(key, value interface{}) bool { + mKey := key.(metricsKey) + stater := value.(PoolStater) + stats := stater.Stats() + ch <- prometheus.MustNewConstMetric( + poolActiveConnectionsDesc, + prometheus.GaugeValue, + float64(stats.ActiveCount()), + mKey.version, + mKey.server, + ) + ch <- prometheus.MustNewConstMetric( + poolIdleConnectionsDesc, + prometheus.GaugeValue, + float64(stats.IdleCount()), + mKey.version, + mKey.server, + ) + return true + }) +} diff --git a/pkg/redis/redis.go b/pkg/redis/redis.go new file mode 100644 index 000000000..e569463b9 --- /dev/null +++ b/pkg/redis/redis.go @@ -0,0 +1,273 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package redis + +import ( + "context" + "time" + + "github.com/gomodule/redigo/redis" + "github.com/mna/redisc" + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +const ( + clientVersion = "v1" +) + +var ErrNil = redis.ErrNil + +type poolStats map[string]redis.PoolStats + +func (ps poolStats) ActiveCount() int { + conns := 0 + for _, stat := range ps { + conns += stat.ActiveCount + } + return conns +} + +func (ps poolStats) IdleCount() int { + conns := 0 + for _, stat := range ps { + conns += stat.IdleCount + } + return conns +} + +type Cluster interface { + Get(opts ...ConnectionOptions) redis.Conn + Check(context.Context) health.Status + Stats() PoolStats + Close() error +} + +type cluster struct { + redisCluster + opts *options + logger *zap.Logger +} + +type redisCluster interface { + Get() redis.Conn + Stats() map[string]redis.PoolStats + Close() error +} + +type options struct { + dialPassword string + dialConnectTimeout time.Duration + poolMaxIdle int + poolMaxActive int + poolIdleTimeout time.Duration + serverName string + metrics metrics.Registerer + logger *zap.Logger +} + +func defaultOptions() *options { + return &options{ + dialConnectTimeout: 5 * time.Second, + poolMaxIdle: 5, + poolMaxActive: 10, + poolIdleTimeout: time.Minute, + logger: zap.NewNop(), + } +} + +type Option func(*options) + +func WithDialPassword(password string) Option { + return func(opts *options) { + opts.dialPassword = password + } +} + +func WithDialConnectTimeout(timeout time.Duration) Option { + return func(opts *options) { + opts.dialConnectTimeout = timeout + } +} + +func WithPoolMaxIdle(num int) Option { + return func(opts *options) { + opts.poolMaxIdle = num + } +} + +func WithPoolMaxActive(num int) Option { + return func(opts *options) { + opts.poolMaxActive = num + } +} + +func WithPoolIdleTimeout(timeout time.Duration) Option { + return func(opts *options) { + opts.poolIdleTimeout = timeout + } +} + +func WithServerName(serverName string) Option { + return func(opts *options) { + opts.serverName = serverName + } +} + +func WithMetrics(r metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = r + } +} + +func WithLogger(logger *zap.Logger) Option { + return func(opts *options) { + opts.logger = logger + } +} + +type connectionOptions struct { + readOnly bool + retries int + tryAgainDelay time.Duration +} + +type ConnectionOptions func(*connectionOptions) + +var defaultConnectionOptions = connectionOptions{ + retries: 3, + tryAgainDelay: 250 * time.Millisecond, +} + +func WithReadOnly() ConnectionOptions { + return func(opts *connectionOptions) { + opts.readOnly = true + } +} + +func WithRetry(retries int, tryAgainDelay time.Duration) ConnectionOptions { + return func(opts *connectionOptions) { + opts.retries = retries + opts.tryAgainDelay = tryAgainDelay + } +} + +func WithoutRetry() ConnectionOptions { + return func(opts *connectionOptions) { + opts.retries = 0 + } +} + +func NewCluster(nodes []string, opts ...Option) (Cluster, error) { + options := defaultOptions() + for _, opt := range opts { + opt(options) + } + logger := options.logger.Named("redis-v1") + c := &redisc.Cluster{ + StartupNodes: nodes, + DialOptions: []redis.DialOption{ + redis.DialConnectTimeout(options.dialConnectTimeout), + redis.DialPassword(options.dialPassword), + }, + CreatePool: createPool(options), + } + if err := c.Refresh(); err != nil { + logger.Error("Failed to refresh", zap.Error(err)) + return nil, err + } + cluster := &cluster{ + redisCluster: c, + opts: options, + logger: logger, + } + if options.metrics != nil { + RegisterMetrics(options.metrics, clientVersion, options.serverName, cluster) + } + return cluster, nil +} + +func (c *cluster) Check(ctx context.Context) health.Status { + resultCh := make(chan health.Status, 1) + go func() { + conn := c.Get(WithoutRetry()) + defer conn.Close() + _, err := conn.Do("PING") + if err != nil { + c.logger.Error("Unhealthy", zap.Error(err)) + resultCh <- health.Unhealthy + return + } + resultCh <- health.Healthy + }() + select { + case <-ctx.Done(): + c.logger.Error("Unhealthy due to context Done is closed", zap.Error(ctx.Err())) + return health.Unhealthy + case status := <-resultCh: + return status + } +} + +// This function does not return the error, that occurs while altering the original connection. +func (c *cluster) Get(opts ...ConnectionOptions) redis.Conn { + options := defaultConnectionOptions + for _, opt := range opts { + opt(&options) + } + connection := c.redisCluster.Get() + if options.readOnly { + if err := redisc.ReadOnlyConn(connection); err != nil { + c.logger.Error("Failed to create read-only connection", + zap.Error(err), + zap.Any("options", options)) + } + } + if options.retries > 0 { + retryConnection, err := redisc.RetryConn(connection, options.retries, options.tryAgainDelay) + if err != nil { + c.logger.Error("Failed to create retry connection", + zap.Error(err), + zap.Any("options", options)) + } else { + connection = retryConnection + } + } + return &conn{Conn: connection, clientVersion: clientVersion, serverName: c.opts.serverName} +} + +func (c *cluster) Stats() PoolStats { + return poolStats(c.redisCluster.Stats()) +} + +func createPool(opts *options) func(string, ...redis.DialOption) (*redis.Pool, error) { + return func(address string, options ...redis.DialOption) (*redis.Pool, error) { + p := &redis.Pool{ + MaxIdle: opts.poolMaxIdle, + MaxActive: opts.poolMaxActive, + IdleTimeout: opts.poolIdleTimeout, + Dial: func() (redis.Conn, error) { + return redis.Dial("tcp", address, options...) + }, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + _, err := c.Do("PING") + return err + }, + } + return p, nil + } +} diff --git a/pkg/redis/redis_test.go b/pkg/redis/redis_test.go new file mode 100644 index 000000000..1dbb4d1d6 --- /dev/null +++ b/pkg/redis/redis_test.go @@ -0,0 +1,163 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package redis + +import ( + "context" + "errors" + "testing" + "time" + + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/health" + + "github.com/gomodule/redigo/redis" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type dummyCluster struct { + healthy bool +} + +func (c *dummyCluster) Get() redis.Conn { + return &dummyConn{healthy: c.healthy} +} + +func (c *dummyCluster) Stats() map[string]redis.PoolStats { + return nil +} + +func (c *dummyCluster) Close() error { + return nil +} + +type dummyConn struct { + healthy bool +} + +func (c *dummyConn) Close() error { + return nil +} + +func (c *dummyConn) Err() error { + return nil +} + +func (c *dummyConn) Do(commandName string, args ...interface{}) (reply interface{}, err error) { + if c.healthy { + return nil, nil + } + return nil, errors.New("error") +} + +func (c *dummyConn) Send(commandName string, args ...interface{}) error { + return nil +} + +func (c *dummyConn) Flush() error { + return nil +} + +func (c *dummyConn) Receive() (reply interface{}, err error) { + return nil, nil +} + +func TestRedisCheckHealthy(t *testing.T) { + cluster := &cluster{ + redisCluster: &dummyCluster{healthy: true}, + opts: &options{}, + logger: zap.NewNop(), + } + status := cluster.Check(context.TODO()) + if status != health.Healthy { + t.Fail() + } +} + +func TestRedisCheckUnealthy(t *testing.T) { + cluster := &cluster{ + redisCluster: &dummyCluster{healthy: false}, + opts: &options{}, + logger: zap.NewNop(), + } + status := cluster.Check(context.TODO()) + if status != health.Unhealthy { + t.Fail() + } +} + +func TestRedisCheckTimeout(t *testing.T) { + cluster := &cluster{ + redisCluster: &dummyCluster{healthy: true}, + opts: &options{}, + logger: zap.NewNop(), + } + ctx, cancel := context.WithCancel(context.Background()) + cancel() + status := cluster.Check(ctx) + if status != health.Unhealthy { + t.Fail() + } +} + +func TestWithDialPassword(t *testing.T) { + t.Parallel() + opts := &options{} + require.Equal(t, "", opts.dialPassword) + WithDialPassword("test-password")(opts) + assert.Equal(t, "test-password", opts.dialPassword) +} + +func TestWithDialConnectTimeout(t *testing.T) { + t.Parallel() + opts := &options{} + require.Equal(t, time.Duration(0), opts.dialConnectTimeout) + WithDialConnectTimeout(time.Minute)(opts) + assert.Equal(t, time.Minute, opts.dialConnectTimeout) +} + +func TestWithPoolMaxIdle(t *testing.T) { + t.Parallel() + opts := &options{} + require.Equal(t, 0, opts.poolMaxIdle) + WithPoolMaxIdle(1)(opts) + assert.Equal(t, 1, opts.poolMaxIdle) +} + +func TestWithPoolMaxActive(t *testing.T) { + t.Parallel() + opts := &options{} + require.Equal(t, 0, opts.poolMaxActive) + WithPoolMaxActive(1)(opts) + assert.Equal(t, 1, opts.poolMaxActive) +} + +func TestWithPoolIdleTimeout(t *testing.T) { + t.Parallel() + opts := &options{} + require.Equal(t, time.Duration(0), opts.poolIdleTimeout) + WithPoolIdleTimeout(time.Second)(opts) + assert.Equal(t, time.Second, opts.poolIdleTimeout) +} + +func TestWithServerName(t *testing.T) { + t.Parallel() + opts := &options{} + require.Equal(t, "", opts.serverName) + WithServerName("non-persistent-redis")(opts) + assert.Equal(t, "non-persistent-redis", opts.serverName) +} diff --git a/pkg/redis/v2/BUILD.bazel b/pkg/redis/v2/BUILD.bazel new file mode 100644 index 000000000..87fdf4bc7 --- /dev/null +++ b/pkg/redis/v2/BUILD.bazel @@ -0,0 +1,15 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["redis.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/redis/v2", + visibility = ["//visibility:public"], + deps = [ + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/redis:go_default_library", + "@com_github_go_redis_redis//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/redis/v2/redis.go b/pkg/redis/v2/redis.go new file mode 100644 index 000000000..735c162c5 --- /dev/null +++ b/pkg/redis/v2/redis.go @@ -0,0 +1,248 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "context" + "time" + + goredis "github.com/go-redis/redis" + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/redis" +) + +const ( + clientVersion = "v2" + + getCmdName = "GET" + setCmdName = "SET" + delCmdName = "DEL" + forEachMasterCmdName = "FOR_EACH_MASTER" +) + +var ErrNil = goredis.Nil + +type poolStats goredis.PoolStats + +func (ps *poolStats) ActiveCount() int { + return int(ps.TotalConns) +} + +func (ps *poolStats) IdleCount() int { + return int(ps.IdleConns) +} + +type Cluster interface { + Get(key string) ([]byte, error) + Set(key string, val interface{}, expiration time.Duration) error + Del(key string) error + ForEachMaster(fn func(client *goredis.Client) error) error + Check(context.Context) health.Status + Stats() redis.PoolStats + Close() error +} + +type cluster struct { + cc *goredis.ClusterClient + opts *options + logger *zap.Logger +} + +type options struct { + dialPassword string + dialConnectTimeout time.Duration + poolMaxIdle int + poolIdleTimeout time.Duration + routeByLatency bool + serverName string + metrics metrics.Registerer + logger *zap.Logger +} + +func defaultOptions() *options { + return &options{ + dialConnectTimeout: 5 * time.Second, + poolMaxIdle: 5, + poolIdleTimeout: time.Minute, + routeByLatency: true, + logger: zap.NewNop(), + } +} + +type Option func(*options) + +func WithDialPassword(password string) Option { + return func(opts *options) { + opts.dialPassword = password + } +} + +func WithDialConnectTimeout(timeout time.Duration) Option { + return func(opts *options) { + opts.dialConnectTimeout = timeout + } +} + +func WithPoolIdleTimeout(timeout time.Duration) Option { + return func(opts *options) { + opts.poolIdleTimeout = timeout + } +} + +func WithServerName(serverName string) Option { + return func(opts *options) { + opts.serverName = serverName + } +} + +func WithMetrics(r metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = r + } +} + +func WithLogger(logger *zap.Logger) Option { + return func(opts *options) { + opts.logger = logger + } +} + +func NewCluster(nodes []string, opts ...Option) (Cluster, error) { + options := defaultOptions() + for _, opt := range opts { + opt(options) + } + logger := options.logger.Named("redis-v2") + cc := goredis.NewClusterClient(&goredis.ClusterOptions{ + Password: options.dialPassword, + Addrs: nodes, + IdleTimeout: options.poolIdleTimeout, + DialTimeout: options.dialConnectTimeout, + RouteByLatency: options.routeByLatency, + }) + if err := cc.ReloadState(); err != nil { + logger.Error("Failed to refresh", zap.Error(err)) + return nil, err + } + cluster := &cluster{ + cc: cc, + opts: options, + logger: logger, + } + if options.metrics != nil { + redis.RegisterMetrics(options.metrics, clientVersion, options.serverName, cluster) + } + cluster.logger.Debug("redis/v2 client was initialized") + return cluster, nil +} + +func (c *cluster) Get(key string) ([]byte, error) { + startTime := time.Now() + redis.ReceivedCounter.WithLabelValues(clientVersion, c.opts.serverName, getCmdName).Inc() + reply, err := c.cc.Get(key).Bytes() + code := redis.CodeFail + switch err { + case nil: + code = redis.CodeSuccess + case ErrNil: + code = redis.CodeNotFound + } + redis.HandledCounter.WithLabelValues(clientVersion, c.opts.serverName, getCmdName, code).Inc() + redis.HandledHistogram.WithLabelValues(clientVersion, c.opts.serverName, getCmdName, code).Observe( + time.Since(startTime).Seconds()) + return reply, err +} + +func (c *cluster) Set(key string, val interface{}, expiration time.Duration) error { + startTime := time.Now() + redis.ReceivedCounter.WithLabelValues(clientVersion, c.opts.serverName, setCmdName).Inc() + _, err := c.cc.Set(key, val, expiration).Result() + code := redis.CodeFail + switch err { + case nil: + code = redis.CodeSuccess + case ErrNil: + code = redis.CodeNotFound + } + redis.HandledCounter.WithLabelValues(clientVersion, c.opts.serverName, setCmdName, code).Inc() + redis.HandledHistogram.WithLabelValues(clientVersion, c.opts.serverName, setCmdName, code).Observe( + time.Since(startTime).Seconds()) + return err +} + +func (c *cluster) Del(key string) error { + startTime := time.Now() + redis.ReceivedCounter.WithLabelValues(clientVersion, c.opts.serverName, delCmdName).Inc() + _, err := c.cc.Del(key).Result() + code := redis.CodeFail + switch err { + case nil: + code = redis.CodeSuccess + case ErrNil: + code = redis.CodeNotFound + } + redis.HandledCounter.WithLabelValues(clientVersion, c.opts.serverName, delCmdName, code).Inc() + redis.HandledHistogram.WithLabelValues(clientVersion, c.opts.serverName, delCmdName, code).Observe( + time.Since(startTime).Seconds()) + return err +} + +func (c *cluster) ForEachMaster(fn func(client *goredis.Client) error) error { + startTime := time.Now() + redis.ReceivedCounter.WithLabelValues(clientVersion, c.opts.serverName, forEachMasterCmdName).Inc() + err := c.cc.ForEachMaster(fn) + code := redis.CodeFail + switch err { + case nil: + code = redis.CodeSuccess + case ErrNil: + code = redis.CodeNotFound + } + redis.HandledCounter.WithLabelValues(clientVersion, c.opts.serverName, forEachMasterCmdName, code).Inc() + redis.HandledHistogram.WithLabelValues(clientVersion, c.opts.serverName, forEachMasterCmdName, code).Observe( + time.Since(startTime).Seconds()) + return err +} + +func (c *cluster) Check(ctx context.Context) health.Status { + resultCh := make(chan health.Status, 1) + go func() { + _, err := c.cc.Ping().Result() + if err != nil { + c.logger.Error("Unhealthy", zap.Error(err)) + resultCh <- health.Unhealthy + return + } + resultCh <- health.Healthy + }() + select { + case <-ctx.Done(): + c.logger.Error("Unhealthy due to context Done is closed", zap.Error(ctx.Err())) + return health.Unhealthy + case status := <-resultCh: + return status + } +} + +func (c *cluster) Stats() redis.PoolStats { + return (*poolStats)(c.cc.PoolStats()) +} + +func (c *cluster) Close() error { + return c.cc.Close() +} diff --git a/pkg/redis/v3/BUILD.bazel b/pkg/redis/v3/BUILD.bazel new file mode 100644 index 000000000..64012b492 --- /dev/null +++ b/pkg/redis/v3/BUILD.bazel @@ -0,0 +1,15 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["redis.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/redis/v3", + visibility = ["//visibility:public"], + deps = [ + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/redis:go_default_library", + "@com_github_go_redis_redis//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/redis/v3/redis.go b/pkg/redis/v3/redis.go new file mode 100644 index 000000000..751a56c8f --- /dev/null +++ b/pkg/redis/v3/redis.go @@ -0,0 +1,355 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3 + +import ( + "context" + "errors" + "time" + + goredis "github.com/go-redis/redis" + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/redis" +) + +const ( + clientVersion = "v3" + + scanCmdName = "SCAN" + getCmdName = "GET" + getMultiCmdName = "GET_MULTI" + setCmdName = "SET" + pfAddCmdName = "PFADD" + pfCountCmdName = "PFCOUNT" + incrByFloatCmdName = "INCR_BY_FLOAT" + delCmdName = "DEL" +) + +var ( + ErrNil = goredis.Nil + ErrInvalidType = errors.New("redis: invalid type") +) + +type poolStats goredis.PoolStats + +func (ps *poolStats) ActiveCount() int { + return int(ps.TotalConns) +} + +func (ps *poolStats) IdleCount() int { + return int(ps.IdleConns) +} + +type Client interface { + Close() error + Check(context.Context) health.Status + Stats() redis.PoolStats + Scan(cursor uint64, key string, count int64) (uint64, []string, error) + Get(key string) ([]byte, error) + GetMulti(keys []string) ([]interface{}, error) + Set(key string, val interface{}, expiration time.Duration) error + PFAdd(key string, els []string) (int64, error) + PFCount(keys ...string) (int64, error) + IncrByFloat(key string, value float64) (float64, error) + Del(key string) error +} + +type client struct { + rc *goredis.Client + opts *options + logger *zap.Logger +} + +type options struct { + password string + maxRetries int + dialTimeout time.Duration + poolSize int + minIdleConns int + poolTimeout time.Duration + serverName string + metrics metrics.Registerer + logger *zap.Logger +} + +func defaultOptions() *options { + return &options{ + maxRetries: 5, + dialTimeout: 5 * time.Second, + poolSize: 10, + minIdleConns: 5, + poolTimeout: 5 * time.Second, + logger: zap.NewNop(), + } +} + +type Option func(*options) + +func WithPassword(password string) Option { + return func(opts *options) { + opts.password = password + } +} + +func WithMaxRetries(maxRetries int) Option { + return func(opts *options) { + opts.maxRetries = maxRetries + } +} + +func WithDialTimeout(dialTimeout time.Duration) Option { + return func(opts *options) { + opts.dialTimeout = dialTimeout + } +} + +func WithPoolSize(poolSize int) Option { + return func(opts *options) { + opts.poolSize = poolSize + } +} + +func WithMinIdleConns(minIdleConns int) Option { + return func(opts *options) { + opts.minIdleConns = minIdleConns + } +} + +func WithPoolTimeout(poolTimeout time.Duration) Option { + return func(opts *options) { + opts.poolTimeout = poolTimeout + } +} + +func WithServerName(serverName string) Option { + return func(opts *options) { + opts.serverName = serverName + } +} + +func WithMetrics(r metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = r + } +} + +func WithLogger(logger *zap.Logger) Option { + return func(opts *options) { + opts.logger = logger + } +} + +func NewClient(addr string, opts ...Option) (Client, error) { + options := defaultOptions() + for _, opt := range opts { + opt(options) + } + logger := options.logger.Named("redis-v3") + rc := goredis.NewClient(&goredis.Options{ + Addr: addr, + Password: options.password, + MaxRetries: options.maxRetries, + DialTimeout: options.dialTimeout, + PoolSize: options.poolSize, + MinIdleConns: options.minIdleConns, + PoolTimeout: options.poolTimeout, + }) + _, err := rc.Ping().Result() + if err != nil { + logger.Error("Failed to ping", zap.Error(err)) + return nil, err + } + client := &client{ + rc: rc, + opts: options, + logger: logger, + } + if options.metrics != nil { + redis.RegisterMetrics(options.metrics, clientVersion, options.serverName, client) + } + return client, nil +} + +func (c *client) Close() error { + return c.rc.Close() +} + +func (c *client) Check(ctx context.Context) health.Status { + resultCh := make(chan health.Status, 1) + go func() { + _, err := c.rc.Ping().Result() + if err != nil { + c.logger.Error("Unhealthy", zap.Error(err)) + resultCh <- health.Unhealthy + return + } + resultCh <- health.Healthy + }() + select { + case <-ctx.Done(): + c.logger.Error("Unhealthy due to context Done is closed", zap.Error(ctx.Err())) + return health.Unhealthy + case status := <-resultCh: + return status + } +} + +func (c *client) Stats() redis.PoolStats { + return (*poolStats)(c.rc.PoolStats()) +} + +func (c *client) Scan(cursor uint64, key string, count int64) (uint64, []string, error) { + startTime := time.Now() + redis.ReceivedCounter.WithLabelValues(clientVersion, c.opts.serverName, scanCmdName).Inc() + keys, cursor, err := c.rc.Scan(cursor, key, count).Result() + code := redis.CodeFail + switch err { + case nil: + code = redis.CodeSuccess + case ErrNil: + code = redis.CodeNotFound + } + redis.HandledCounter.WithLabelValues(clientVersion, c.opts.serverName, scanCmdName, code).Inc() + redis.HandledHistogram.WithLabelValues(clientVersion, c.opts.serverName, scanCmdName, code).Observe( + time.Since(startTime).Seconds()) + return cursor, keys, err +} + +func (c *client) Get(key string) ([]byte, error) { + startTime := time.Now() + redis.ReceivedCounter.WithLabelValues(clientVersion, c.opts.serverName, getCmdName).Inc() + reply, err := c.rc.Get(key).Bytes() + code := redis.CodeFail + switch err { + case nil: + code = redis.CodeSuccess + case ErrNil: + code = redis.CodeNotFound + } + redis.HandledCounter.WithLabelValues(clientVersion, c.opts.serverName, getCmdName, code).Inc() + redis.HandledHistogram.WithLabelValues(clientVersion, c.opts.serverName, getCmdName, code).Observe( + time.Since(startTime).Seconds()) + return reply, err +} + +func (c *client) GetMulti(keys []string) ([]interface{}, error) { + startTime := time.Now() + redis.ReceivedCounter.WithLabelValues(clientVersion, c.opts.serverName, getMultiCmdName).Inc() + reply, err := c.rc.MGet(keys...).Result() + code := redis.CodeFail + values := make([]interface{}, 0, len(reply)) + switch err { + case nil: + code = redis.CodeSuccess + for _, r := range reply { + s, ok := r.(string) + if !ok { + code = redis.CodeInvalidType + values = nil + err = ErrInvalidType + break + } + values = append(values, []byte(s)) + } + case ErrNil: + code = redis.CodeNotFound + } + redis.HandledCounter.WithLabelValues(clientVersion, c.opts.serverName, getMultiCmdName, code).Inc() + redis.HandledHistogram.WithLabelValues(clientVersion, c.opts.serverName, getMultiCmdName, code).Observe( + time.Since(startTime).Seconds()) + return values, err +} + +func (c *client) Set(key string, val interface{}, expiration time.Duration) error { + startTime := time.Now() + redis.ReceivedCounter.WithLabelValues(clientVersion, c.opts.serverName, setCmdName).Inc() + err := c.rc.Set(key, val, expiration).Err() + code := redis.CodeFail + switch err { + case nil: + code = redis.CodeSuccess + case ErrNil: + code = redis.CodeNotFound + } + redis.HandledCounter.WithLabelValues(clientVersion, c.opts.serverName, setCmdName, code).Inc() + redis.HandledHistogram.WithLabelValues(clientVersion, c.opts.serverName, setCmdName, code).Observe( + time.Since(startTime).Seconds()) + return err +} + +func (c *client) PFAdd(key string, els []string) (int64, error) { + startTime := time.Now() + redis.ReceivedCounter.WithLabelValues(clientVersion, c.opts.serverName, pfAddCmdName).Inc() + result, err := c.rc.PFAdd(key, els).Result() + code := redis.CodeFail + switch err { + case nil: + code = redis.CodeSuccess + } + redis.HandledCounter.WithLabelValues(clientVersion, c.opts.serverName, pfAddCmdName, code).Inc() + redis.HandledHistogram.WithLabelValues(clientVersion, c.opts.serverName, pfAddCmdName, code).Observe( + time.Since(startTime).Seconds()) + return result, err +} + +func (c *client) PFCount(keys ...string) (int64, error) { + startTime := time.Now() + redis.ReceivedCounter.WithLabelValues(clientVersion, c.opts.serverName, pfCountCmdName).Inc() + count, err := c.rc.PFCount(keys...).Result() + code := redis.CodeFail + switch err { + case nil: + code = redis.CodeSuccess + } + redis.HandledCounter.WithLabelValues(clientVersion, c.opts.serverName, pfCountCmdName, code).Inc() + redis.HandledHistogram.WithLabelValues(clientVersion, c.opts.serverName, pfCountCmdName, code).Observe( + time.Since(startTime).Seconds()) + return count, err +} + +func (c *client) IncrByFloat(key string, value float64) (float64, error) { + startTime := time.Now() + redis.ReceivedCounter.WithLabelValues(clientVersion, c.opts.serverName, incrByFloatCmdName).Inc() + v, err := c.rc.IncrByFloat(key, value).Result() + code := redis.CodeFail + switch err { + case nil: + code = redis.CodeSuccess + } + redis.HandledCounter.WithLabelValues(clientVersion, c.opts.serverName, incrByFloatCmdName, code).Inc() + redis.HandledHistogram.WithLabelValues(clientVersion, c.opts.serverName, incrByFloatCmdName, code).Observe( + time.Since(startTime).Seconds()) + return v, err +} + +func (c *client) Del(key string) error { + startTime := time.Now() + redis.ReceivedCounter.WithLabelValues(clientVersion, c.opts.serverName, delCmdName).Inc() + _, err := c.rc.Del(key).Result() + code := redis.CodeFail + switch err { + case nil: + code = redis.CodeSuccess + case ErrNil: + code = redis.CodeNotFound + } + redis.HandledCounter.WithLabelValues(clientVersion, c.opts.serverName, delCmdName, code).Inc() + redis.HandledHistogram.WithLabelValues(clientVersion, c.opts.serverName, delCmdName, code).Observe( + time.Since(startTime).Seconds()) + return err +} diff --git a/pkg/rest/BUILD.bazel b/pkg/rest/BUILD.bazel new file mode 100644 index 000000000..f553c10fa --- /dev/null +++ b/pkg/rest/BUILD.bazel @@ -0,0 +1,39 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "error.go", + "handler.go", + "log.go", + "metrics.go", + "middleware.go", + "response.go", + "server.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/rest", + visibility = ["//visibility:public"], + deps = [ + "//pkg/log:go_default_library", + "//pkg/metrics:go_default_library", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@org_uber_go_zap//:go_default_library", + "@org_uber_go_zap//zapcore:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "log_test.go", + "middleware_test.go", + "server_test.go", + ], + data = glob(["testdata/**"]), + embed = [":go_default_library"], + deps = [ + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/rest/error.go b/pkg/rest/error.go new file mode 100644 index 000000000..29c670ee1 --- /dev/null +++ b/pkg/rest/error.go @@ -0,0 +1,57 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rest + +import ( + "errors" +) + +type status struct { + code int + err error +} + +type errStatus interface { + GetErrMessage() string + GetStatusCode() int +} + +func NewErrStatus(code int, msg string) error { + s := &status{ + code: code, + err: errors.New(msg), + } + return s +} + +func (s *status) Error() string { + return s.err.Error() +} + +func (s *status) GetErrMessage() string { + return s.err.Error() +} + +func (s *status) GetStatusCode() int { + return s.code +} + +func convertToErrStatus(err error) (errStatus, bool) { + s, ok := err.(errStatus) + if !ok { + return nil, false + } + return s, true +} diff --git a/pkg/rest/handler.go b/pkg/rest/handler.go new file mode 100644 index 000000000..bf4a31781 --- /dev/null +++ b/pkg/rest/handler.go @@ -0,0 +1,21 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rest + +import "net/http" + +type Service interface { + Register(mux *http.ServeMux) +} diff --git a/pkg/rest/log.go b/pkg/rest/log.go new file mode 100644 index 000000000..b044e6a54 --- /dev/null +++ b/pkg/rest/log.go @@ -0,0 +1,86 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rest + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "time" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + "github.com/bucketeer-io/bucketeer/pkg/log" +) + +func LogServerMiddleware(logger *zap.Logger) middleware { + return middleware( + func(next http.Handler) http.Handler { + logger = logger.Named("http_server") + return http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + startTime := time.Now() + rr := &responseRecorder{ + ResponseWriter: w, + statusCode: 200, + body: new(bytes.Buffer), + } + next.ServeHTTP(rr, r) + if rr.statusCode == http.StatusOK { + return + } + var level zapcore.Level + switch rr.statusCode { + case http.StatusBadRequest, http.StatusNotFound, http.StatusUnauthorized: + level = zap.WarnLevel + default: + level = zap.ErrorLevel + } + apiVersion, serviceName, apiName := splitURLPath(r.URL.Path) + reqBody, err := decodeBody(r.Body) + if err != nil { + logger.Error("Failed to parse request body", zap.Error(err)) + } + logger.Check(level, "").Write( + log.FieldsFromImcomingContext(r.Context()).AddFields( + zap.String("requestURI", r.RequestURI), + zap.String("apiVersion", apiVersion), + zap.String("serviceName", serviceName), + zap.String("apiName", apiName), + zap.String("httpMethod", r.Method), + zap.Int("statusCode", rr.statusCode), + zap.Duration("duration", time.Since(startTime)), + zap.Any("request", reqBody), + zap.String("response", rr.body.String()), + )..., + ) + }, + ) + }, + ) +} + +func decodeBody(body io.Reader) (interface{}, error) { + var decoded interface{} + if err := json.NewDecoder(body).Decode(&decoded); err != nil { + if err == io.EOF { + return decoded, nil + } + return nil, err + } + return decoded, nil +} diff --git a/pkg/rest/log_test.go b/pkg/rest/log_test.go new file mode 100644 index 000000000..fc8e836b6 --- /dev/null +++ b/pkg/rest/log_test.go @@ -0,0 +1,64 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rest + +import ( + "bytes" + "io" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDecodeBody(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + body io.Reader + expected interface{} + expectedErr bool + }{ + "err: not json": { + body: strings.NewReader(`{tag: "ios", user: {id: "pingdom", data: {foo: "bar"}}}`), + expected: nil, + expectedErr: true, + }, + "success: nil": { + body: bytes.NewReader(nil), + expected: nil, + expectedErr: false, + }, + "success: json": { + body: strings.NewReader(`{"tag":"ios","user":{"id":"pingdom","data":{"foo":"bar"}}}`), + expected: map[string]interface{}{ + "tag": "ios", + "user": map[string]interface{}{ + "id": "pingdom", + "data": map[string]interface{}{ + "foo": "bar", + }, + }, + }, + expectedErr: false, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + decoded, err := decodeBody(p.body) + assert.Equal(t, p.expected, decoded) + assert.Equal(t, p.expectedErr, err != nil) + }) + } +} diff --git a/pkg/rest/metrics.go b/pkg/rest/metrics.go new file mode 100644 index 000000000..f15bf30e0 --- /dev/null +++ b/pkg/rest/metrics.go @@ -0,0 +1,80 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rest + +import ( + "bytes" + "net/http" + "strconv" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +var ( + registerOnce sync.Once + + serverStartedCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "rest", + Name: "server_started_total", + Help: "Total number of REST started on the server.", + }, []string{"version", "service", "method"}) + + serverHandledCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "rest", + Name: "server_handled_total", + Help: "Total number of REST completed on the server, regardless of success or failure.", + }, []string{"version", "service", "method", "code"}) + + serverHandledHistogram = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "bucketeer", + Subsystem: "rest", + Name: "server_handling_seconds", + Help: "Histogram of response latency (seconds) of REST that had been application-level handled by the server.", + Buckets: prometheus.DefBuckets, + }, []string{"version", "service", "method"}) +) + +func registerMetrics(r metrics.Registerer) { + registerOnce.Do(func() { + r.MustRegister( + serverStartedCounter, + serverHandledCounter, + serverHandledHistogram, + ) + }) +} + +func MetricsServerMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + startTime := time.Now() + rr := &responseRecorder{ResponseWriter: w, body: new(bytes.Buffer)} + apiVersion, serviceName, apiName := splitURLPath(r.URL.Path) + serverStartedCounter.WithLabelValues(apiVersion, serviceName, apiName).Inc() + next.ServeHTTP(rr, r) + serverHandledCounter.WithLabelValues(apiVersion, serviceName, apiName, strconv.Itoa(rr.statusCode)).Inc() + serverHandledHistogram.WithLabelValues(apiVersion, serviceName, apiName).Observe(time.Since(startTime).Seconds()) + }, + ) +} diff --git a/pkg/rest/middleware.go b/pkg/rest/middleware.go new file mode 100644 index 000000000..5b85c7b83 --- /dev/null +++ b/pkg/rest/middleware.go @@ -0,0 +1,69 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rest + +import ( + "bytes" + "net/http" + "strings" +) + +type middleware func(http.Handler) http.Handler + +type middlewares struct { + mw []middleware +} + +func newMiddleWares() *middlewares { + return &middlewares{} +} + +func (ms *middlewares) Append(mw middleware) *middlewares { + ms.mw = append(ms.mw, mw) + return ms +} + +func (ms *middlewares) Handle(handler http.Handler) http.Handler { + next := handler + for i := len(ms.mw) - 1; i >= 0; i-- { + next = ms.mw[i](next) + } + return next +} + +type responseRecorder struct { + http.ResponseWriter + statusCode int + body *bytes.Buffer +} + +func (rr *responseRecorder) WriteHeader(statusCode int) { + rr.statusCode = statusCode + rr.ResponseWriter.WriteHeader(statusCode) +} + +func (rr *responseRecorder) Write(b []byte) (int, error) { + rr.body.Write(b) + return rr.ResponseWriter.Write(b) +} + +func splitURLPath(path string) (string, string, string) { + // format: /api_version/service_name/api_name + parts := strings.Split(path, "/") + if len(parts) != 4 { + return "unknown", "unknown", "unknown" + } + return parts[1], parts[2], parts[3] +} diff --git a/pkg/rest/middleware_test.go b/pkg/rest/middleware_test.go new file mode 100644 index 000000000..8701aaaca --- /dev/null +++ b/pkg/rest/middleware_test.go @@ -0,0 +1,109 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rest + +import ( + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const firstKey = "first" +const secondKey = "second" +const dummyURL = "http://example.com" + +func TestHandle(t *testing.T) { + t.Parallel() + var firstRun, secondRun, handlerRun bool + first := func(next http.Handler) http.Handler { + return http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.Header().Add(firstKey, firstKey) + firstRun = true + next.ServeHTTP(w, r) + }, + ) + } + second := func(next http.Handler) http.Handler { + return http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, w.Header().Get(firstKey), firstKey) + w.Header().Add(secondKey, secondKey) + secondRun = true + next.ServeHTTP(w, r) + }, + ) + } + handler := func(w http.ResponseWriter, r *http.Request) { + handlerRun = true + require.Equal(t, w.Header().Get(firstKey), firstKey) + require.Equal(t, w.Header().Get(secondKey), secondKey) + } + mws := newMiddleWares() + mws.Append(first) + mws.Append(second) + handlers := mws.Handle(http.HandlerFunc(handler)) + req := httptest.NewRequest(http.MethodGet, dummyURL, nil) + w := httptest.NewRecorder() + handlers.ServeHTTP(w, req) + assert.True(t, firstRun) + assert.True(t, secondRun) + assert.True(t, handlerRun) +} + +func TestSplitURLPath(t *testing.T) { + t.Parallel() + patterns := []struct { + msg, + input, + expectedApiVersion, + expectedServiceName, + expectedApiName string + }{ + { + msg: "error: wrong path format", + input: "scheme://host/api_version/service_name/api_name/api/", + expectedApiVersion: "unknown", + expectedServiceName: "unknown", + expectedApiName: "unknown", + }, + { + msg: "error: using slash in the end of the path", + input: "scheme://host/api_version/service_name/api_name/", + expectedApiVersion: "unknown", + expectedServiceName: "unknown", + expectedApiName: "unknown", + }, + { + msg: "sucess", + input: "scheme://host/api_version/service_name/api_name", + expectedApiVersion: "api_version", + expectedServiceName: "service_name", + expectedApiName: "api_name", + }, + } + for _, p := range patterns { + url, err := url.Parse(p.input) + assert.NoError(t, err) + apiVersion, serviceName, apiName := splitURLPath(url.Path) + assert.Equal(t, apiVersion, p.expectedApiVersion, p.msg) + assert.Equal(t, serviceName, p.expectedServiceName, p.msg) + assert.Equal(t, apiName, p.expectedApiName, p.msg) + } +} diff --git a/pkg/rest/response.go b/pkg/rest/response.go new file mode 100644 index 000000000..63e8455a0 --- /dev/null +++ b/pkg/rest/response.go @@ -0,0 +1,70 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rest + +import ( + "encoding/json" + "net/http" +) + +type successResponse struct { + Data interface{} `json:"data"` +} + +// This response is based on https://google.github.io/styleguide/jsoncstyleguide.xml?showone=error#error. +type failureResponse struct { + Error errorResponse `json:"error"` +} + +type errorResponse struct { + Code int `json:"code"` + Message string `json:"message"` +} + +func ReturnFailureResponse(w http.ResponseWriter, err error) { + status, ok := convertToErrStatus(err) + if !ok { + w.WriteHeader(http.StatusInternalServerError) + return + } + w.WriteHeader(status.GetStatusCode()) + returnResponse( + w, + &failureResponse{ + Error: errorResponse{ + Code: status.GetStatusCode(), + Message: status.GetErrMessage(), + }, + }, + ) +} + +func ReturnSuccessResponse(w http.ResponseWriter, resp interface{}) { + returnResponse(w, successResponse{Data: resp}) +} + +func returnResponse(w http.ResponseWriter, resp interface{}) { + w.Header().Set("Content-Type", "application/json") + encoded, err := json.Marshal(resp) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + _, err = w.Write(encoded) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } +} diff --git a/pkg/rest/server.go b/pkg/rest/server.go new file mode 100644 index 000000000..aae3b0d7e --- /dev/null +++ b/pkg/rest/server.go @@ -0,0 +1,134 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rest + +import ( + "context" + "fmt" + "net/http" + "time" + + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +type Server struct { + certPath string + keyPath string + port int + metrics metrics.Registerer + httpServer *http.Server + mux *http.ServeMux + logger *zap.Logger + services []Service +} + +type Option func(*Server) + +const httpName = "http" + +func WithLogger(logger *zap.Logger) Option { + return func(s *Server) { + s.logger = logger + } +} + +func WithPort(port int) Option { + return func(s *Server) { + s.port = port + } +} + +func WithMetrics(registerer metrics.Registerer) Option { + return func(s *Server) { + s.metrics = registerer + } +} + +func WithService(service Service) Option { + return func(s *Server) { + s.services = append(s.services, service) + } +} + +func NewServer(certPath, keyPath string, opt ...Option) *Server { + server := &Server{ + port: 8000, + logger: zap.NewNop(), + mux: http.NewServeMux(), + } + for _, o := range opt { + o(server) + } + server.logger = server.logger.Named(httpName) + if len(certPath) == 0 { + server.logger.Fatal("CertPath must not be empty") + } + server.certPath = certPath + if len(keyPath) == 0 { + server.logger.Fatal("KeyPath must not be empty") + } + server.keyPath = keyPath + if len(server.services) == 0 { + server.logger.Fatal("Service must not be nil") + } + return server +} + +func (s *Server) Run() { + if s.metrics != nil { + registerMetrics(s.metrics) + } + s.setup() + s.logger.Info(fmt.Sprintf("Running on %d", s.port)) + s.runServer() +} + +func (s *Server) Stop(timeout time.Duration) { + s.logger.Info("Server is going to sleep 10 seconds before shutting down") + // When the sigterm signal is sent, sometimes the app could get the signal before envoy, + // when it does, the requests will fail because the app cannot receive any request after the shutdown. + // So we wait a bit in case there are still requests to be processed + // between the envoy and app after the signal. + time.Sleep(time.Second) + s.logger.Info("Server is awakening from sleep, and going to shutdown") + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := s.httpServer.Shutdown(ctx) + if err != nil { + s.logger.Error("Failed to shutdown", zap.Error(err)) + } +} + +func (s *Server) setup() { + mws := newMiddleWares() + mws.Append(LogServerMiddleware(s.logger)) + mws.Append(MetricsServerMiddleware) + for _, service := range s.services { + service.Register(s.mux) + } + s.httpServer = &http.Server{ + Addr: fmt.Sprintf(":%d", s.port), + Handler: mws.Handle(s.mux), + } +} + +func (s *Server) runServer() { + err := s.httpServer.ListenAndServeTLS(s.certPath, s.keyPath) + if err != nil && err != http.ErrServerClosed { + s.logger.Fatal("Failed to serve", zap.Error(err)) + } +} diff --git a/pkg/rest/server_test.go b/pkg/rest/server_test.go new file mode 100644 index 000000000..411e71d5a --- /dev/null +++ b/pkg/rest/server_test.go @@ -0,0 +1,98 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rest + +import ( + "crypto/tls" + "fmt" + "net/http" + "os" + "testing" + "time" + + "go.uber.org/zap" +) + +const ( + certPath = "testdata/server.crt" + keyPath = "testdata/server.key" + timeout = 10 * time.Second + port = 9222 +) + +type dummyService struct{} + +func (*dummyService) Register(mux *http.ServeMux) {} + +func newServer() *Server { + logger := zap.NewExample() + server := NewServer( + certPath, + keyPath, + WithLogger(logger), + WithPort(port), + WithService(&dummyService{}), + ) + return server +} + +func newHTTPClient() *http.Client { + /* + ** Better to do it like net/http/httptest: + ** https://golang.org/pkg/crypto/tls/#example_Dial + ** certpool := x509.NewCertPool() + ** certpool.AddCert(s.certificate) + ** s.client.Transport = &http.Transport{ + ** TLSClientConfig: &tls.Config{ + ** RootCAs: certpool, + ** }} + */ + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }, + Timeout: 5 * time.Second, + } +} + +func TestMain(m *testing.M) { + // Because os.Exit doesn't return, we need to call defer in separated function. + code := testMain(m) + os.Exit(code) +} + +func testMain(m *testing.M) int { + server := newServer() + defer server.Stop(time.Second) + go server.Run() + waitForServer() + return m.Run() +} + +func waitForServer() { + client := newHTTPClient() + go func() { + <-time.After(timeout) + fmt.Fprintln(os.Stderr, "failed to get response") + os.Exit(1) + }() + for { + resp, err := client.Get(fmt.Sprintf("https://localhost:%d", port)) + if err == nil { + resp.Body.Close() + return + } + } +} diff --git a/pkg/rest/testdata/server.crt b/pkg/rest/testdata/server.crt new file mode 100644 index 000000000..375ebd09f --- /dev/null +++ b/pkg/rest/testdata/server.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDPjCCAiagAwIBAgIJALaUc+KZlzMUMA0GCSqGSIb3DQEBCwUAMBQxEjAQBgNV +BAMMCWJ1Y2tldGVlcjAgFw0xOTA4MDcwODE4MTZaGA8zMDE4MTIwODA4MTgxNlow +FDESMBAGA1UEAwwJYnVja2V0ZWVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAuK0ZNyr9nf1MF9D9xsvnkqrXdNibutfuaqE0tF44rRC0/OLwGPGY9kXT +cretIZF8aO+eIeZ/a+UhRoihSrD4js1bpsrUZr7CXYf/YjqSQlM9mUkzqBAwDYLj +XJ6A1s6kxqdDgEEtTRWT30vBibkRF7Fs6gzYGAcwDOrosL7vVTweXcndfPNybFlN +kub2cKQsJSEn5mLgTbdPmHe6ewEpeE7flHpz9IT+PNAObAyDdpjViESAzUGdnwDJ +tIBx8FigcH4obcLMw8yY/VqvY40U6zdXuGK3SBQKtJIOFcRwhkbqSry8iX6SDTGL +UszoaIvxLvmJ9Y2HXi1Y+HjegYNhTQIDAQABo4GQMIGNMAkGA1UdEwQCMAAwCwYD +VR0PBAQDAgKkMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4E +FgQUEJ31pVNCN33i5qDU1UIwrxCjkuYwHwYDVR0jBBgwFoAUEJ31pVNCN33i5qDU +1UIwrxCjkuYwFAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IB +AQAQS/ntHZmMR/UpPv3GHn7az9kyuVA+U4PDOJLQbdG4mpnCzJZs6N8jbB9k9h6m +SYaXxMoYhgIEP757vWsQtuJGEhDet/SAHAi1MrPpzqBilR75oN9AzBC/dKWMXHaT +CP7qp4z3SkVWqNFOieD3O67Qj6fXn38RsAq3vaOAXqLaV+sPjndzkqV2MxfDlBC0 +eBunuQCS6kVckRAYeMjvRY6GKdhueaf3sDq8els+vE+LNx2QnN/l01Mta4vvMrNe +kKR7UoFoWjCjo6IDRzcHWI1+9rXQB4ELxCpjyWJTQ/dSnxwlnI+pQ6jMBSYg4Nw/ +cwopwZcyOlmL/mxK7dy+o8Qu +-----END CERTIFICATE----- diff --git a/pkg/rest/testdata/server.key b/pkg/rest/testdata/server.key new file mode 100644 index 000000000..21223d63f --- /dev/null +++ b/pkg/rest/testdata/server.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC4rRk3Kv2d/UwX +0P3Gy+eSqtd02Ju61+5qoTS0XjitELT84vAY8Zj2RdNyt60hkXxo754h5n9r5SFG +iKFKsPiOzVumytRmvsJdh/9iOpJCUz2ZSTOoEDANguNcnoDWzqTGp0OAQS1NFZPf +S8GJuREXsWzqDNgYBzAM6uiwvu9VPB5dyd1883JsWU2S5vZwpCwlISfmYuBNt0+Y +d7p7ASl4Tt+UenP0hP480A5sDIN2mNWIRIDNQZ2fAMm0gHHwWKBwfihtwszDzJj9 +Wq9jjRTrN1e4YrdIFAq0kg4VxHCGRupKvLyJfpINMYtSzOhoi/Eu+Yn1jYdeLVj4 +eN6Bg2FNAgMBAAECggEAU+IMOgrE+CY9kfPT0aB8pxoCk4hv9AZwvO5MSkEh7TpR +eyx5clsK55H/4XOcqEq0/9UXNr7D0fZZjvgwiwSnYfXVU4V9xM2Q63sCfVOta4Lz +z9R4KjZwHTL+ous8ClYCclk3R+JS+Vh7uklmt2/gW6qzlfwPi5p4MOXim6WegE/w +P+Q3po2WyC/ggo4Givt/NQpudqQ/1da5wInr+bBgqfB75KQiSbKbW2CBp3wKr78A +jN7PnS2qMuveRZA8xwG/FH+g80mi+N7/bKlfyX2d0jq8p6+iCyLxhzGBGJua4Bl5 +8hB7/kaDc7648aHyLXxuXn7ovTf6R+Dh/ZQhmoTZ4QKBgQDqvppmulmXCgbw0hS+ +QQb4mds7dN3tYBp0LrZGij2W8VCSyUzxdPhEUi4pixT9zZPxnv5kvif9XpWVUy+7 +nGfA7/tRu2oOVvuYW7v7wM8TNNvlOw6g0Vr9GK3Y6Z2V97p77xOdUHrZNrPcGzFy +2WefwXZ1cUYfak6YsEwxq6V4YwKBgQDJZegOBlKau38TUvZUUOUlXeuyx+g9E0IL +2o3QpmyFkr+BYW8ViiviCyQWNSClIzDrPUhXZeDw8o4k41uP8di9vKb63BXRUrmK +IOQDNhAFSot6cwpjs23ihcftz17lDRnLeyZIlNbZtEd/4XJI6eY6fh2ta7YtIYZ3 +XlgGfeL2jwKBgQDqkpbVyqeV77Yp1bRWvcJKj+xsSIcwGlW0/ay1ZpTPDcs3MyLJ +MdqY4wowB0Rtro2E6B1L7F0nqemN8zeCoXNocCbScJY1SCRYBmsd0njat3p5YX9n +omzq90tZs3D+mDNofuo5zF9GSYyHiUDrhGYxVPCbwRqSb/ekSp+JDv3mAwKBgCN4 +3nlx5aS5N3WY3CgXo3SEaVow2ZfAR5a85NOGLIEOSsqn9Z/OmVIT8kQzEU/ktd4J +Ci/Skt79acnXfa4Jw8oPaz2t++3Fa0aH6oEiSYoVCiIEFyVeWhFWzNL2/ljLiOqN +Az4vcPQhS6Kbe7yZ7eIJioKcARdL21o09L4X+BzDAoGARuTsGTiZt75aEygQex6E +b6FdjfaAzPkN3q3Vm3Bg3yuLeza1lXy4VsIojmm9ssIAg6mbfYRVbBP+Y2f45M9Y +Ll4WNaIYcK+cqJGkvSlr5yhH1sBQBXLXU4UX2YZlJXbFkQQZzVfF1OdqUUtTBwVu +xoVRHpUPKTezZ+m78njC4nM= +-----END PRIVATE KEY----- diff --git a/pkg/rest/testdata/service.config b/pkg/rest/testdata/service.config new file mode 100644 index 000000000..7c1ad9283 --- /dev/null +++ b/pkg/rest/testdata/service.config @@ -0,0 +1,16 @@ +[req] +x509_extensions = v3_req +distinguished_name = req_distinguished_name + +[req_distinguished_name] + +[v3_req] +basicConstraints = CA:FALSE +keyUsage = digitalSignature, keyEncipherment, keyCertSign +extendedKeyUsage = serverAuth, clientAuth +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid, issuer +subjectAltName = @alt_names + +[alt_names] +DNS.1 = localhost \ No newline at end of file diff --git a/pkg/role/BUILD.bazel b/pkg/role/BUILD.bazel new file mode 100644 index 000000000..e9418af2d --- /dev/null +++ b/pkg/role/BUILD.bazel @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["role.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/role", + visibility = ["//visibility:public"], + deps = [ + "//pkg/rpc:go_default_library", + "//proto/account:go_default_library", + "//proto/event/domain:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["role_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/rpc:go_default_library", + "//pkg/token:go_default_library", + "//proto/account:go_default_library", + "//proto/event/domain:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + ], +) diff --git a/pkg/role/role.go b/pkg/role/role.go new file mode 100644 index 000000000..69b06c303 --- /dev/null +++ b/pkg/role/role.go @@ -0,0 +1,77 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package role + +import ( + "context" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/rpc" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +var ( + ErrUnauthenticated = status.Error(codes.Unauthenticated, "Unauthenticated user") + ErrPermissionDenied = status.Error(codes.PermissionDenied, "Permission denied") + ErrInternal = status.Error(codes.Internal, "Internal") +) + +func CheckAdminRole(ctx context.Context) (*eventproto.Editor, error) { + token, ok := rpc.GetIDToken(ctx) + if !ok { + return nil, ErrUnauthenticated + } + if !token.IsAdmin() { + return nil, ErrPermissionDenied + } + return checkRole(token.Email, accountproto.Account_OWNER, accountproto.Account_OWNER, true) +} + +func CheckRole( + ctx context.Context, + requiredRole accountproto.Account_Role, + getAccountFunc func(email string) (*accountproto.GetAccountResponse, error), +) (*eventproto.Editor, error) { + token, ok := rpc.GetIDToken(ctx) + if !ok { + return nil, ErrUnauthenticated + } + if !token.IsAdmin() { + // get account for the environment namespace + resp, err := getAccountFunc(token.Email) + if err != nil { + if code := status.Code(err); code == codes.NotFound { + return nil, ErrUnauthenticated + } + return nil, ErrInternal + } + return checkRole(resp.Account.Email, resp.Account.Role, requiredRole, false) + } + return checkRole(token.Email, accountproto.Account_OWNER, requiredRole, true) +} + +func checkRole(email string, role, requiredRole accountproto.Account_Role, isAdmin bool) (*eventproto.Editor, error) { + if role == accountproto.Account_UNASSIGNED || role < requiredRole { + return nil, ErrPermissionDenied + } + return &eventproto.Editor{ + Email: email, + Role: role, + IsAdmin: isAdmin, + }, nil +} diff --git a/pkg/role/role_test.go b/pkg/role/role_test.go new file mode 100644 index 000000000..cb775f33f --- /dev/null +++ b/pkg/role/role_test.go @@ -0,0 +1,137 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package role + +import ( + "context" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/token" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" +) + +func TestCheckAdminRole(t *testing.T) { + t.Parallel() + patterns := []struct { + inputCtx context.Context + expected *eventproto.Editor + expectedErr error + }{ + { + inputCtx: context.Background(), + expected: nil, + expectedErr: ErrUnauthenticated, + }, + { + inputCtx: getContextWithToken(t, &token.IDToken{Email: "test@example.com", AdminRole: accountproto.Account_UNASSIGNED}), + expected: nil, + expectedErr: ErrPermissionDenied, + }, + { + inputCtx: getContextWithToken(t, &token.IDToken{Email: "test@example.com", AdminRole: accountproto.Account_OWNER}), + expected: &eventproto.Editor{Email: "test@example.com", Role: accountproto.Account_OWNER, IsAdmin: true}, + expectedErr: nil, + }, + } + for _, p := range patterns { + editor, err := CheckAdminRole(p.inputCtx) + assert.Equal(t, p.expectedErr, err) + assert.Equal(t, p.expected, editor) + } +} + +func TestCheckRole(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + + patterns := []struct { + inputCtx context.Context + inputRequiredRole accountproto.Account_Role + inputGetAccountFunc func(email string) (*accountproto.GetAccountResponse, error) + expected *eventproto.Editor + expectedErr error + }{ + { + inputCtx: context.Background(), + inputRequiredRole: accountproto.Account_EDITOR, + expected: nil, + expectedErr: ErrUnauthenticated, + }, + { + inputCtx: getContextWithToken(t, &token.IDToken{Email: "test@example.com", AdminRole: accountproto.Account_UNASSIGNED}), + inputRequiredRole: accountproto.Account_EDITOR, + inputGetAccountFunc: func(email string) (*accountproto.GetAccountResponse, error) { + return nil, status.Error(codes.NotFound, "") + }, + expected: nil, + expectedErr: ErrUnauthenticated, + }, + { + inputCtx: getContextWithToken(t, &token.IDToken{Email: "test@example.com", AdminRole: accountproto.Account_UNASSIGNED}), + inputRequiredRole: accountproto.Account_EDITOR, + inputGetAccountFunc: func(email string) (*accountproto.GetAccountResponse, error) { + return nil, status.Error(codes.Internal, "") + }, + expected: nil, + expectedErr: ErrInternal, + }, + { + inputCtx: getContextWithToken(t, &token.IDToken{Email: "test@example.com", AdminRole: accountproto.Account_UNASSIGNED}), + inputRequiredRole: accountproto.Account_EDITOR, + inputGetAccountFunc: func(email string) (*accountproto.GetAccountResponse, error) { + return &accountproto.GetAccountResponse{ + Account: &accountproto.Account{Email: "test@example.com", Role: accountproto.Account_VIEWER}, + }, nil + }, + expected: nil, + expectedErr: ErrPermissionDenied, + }, + { + inputCtx: getContextWithToken(t, &token.IDToken{Email: "test@example.com", AdminRole: accountproto.Account_UNASSIGNED}), + inputRequiredRole: accountproto.Account_EDITOR, + inputGetAccountFunc: func(email string) (*accountproto.GetAccountResponse, error) { + return &accountproto.GetAccountResponse{ + Account: &accountproto.Account{Email: "test@example.com", Role: accountproto.Account_EDITOR}, + }, nil + }, + expected: &eventproto.Editor{Email: "test@example.com", Role: accountproto.Account_EDITOR, IsAdmin: false}, + expectedErr: nil, + }, + { + inputCtx: getContextWithToken(t, &token.IDToken{Email: "test@example.com", AdminRole: accountproto.Account_OWNER}), + inputRequiredRole: accountproto.Account_OWNER, + expected: &eventproto.Editor{Email: "test@example.com", Role: accountproto.Account_OWNER, IsAdmin: true}, + expectedErr: nil, + }, + } + for _, p := range patterns { + editor, err := CheckRole(p.inputCtx, p.inputRequiredRole, p.inputGetAccountFunc) + assert.Equal(t, p.expectedErr, err) + assert.Equal(t, p.expected, editor) + } +} + +func getContextWithToken(t *testing.T, token *token.IDToken) context.Context { + t.Helper() + return context.WithValue(context.Background(), rpc.Key, token) +} diff --git a/pkg/rpc/BUILD.bazel b/pkg/rpc/BUILD.bazel new file mode 100644 index 000000000..53a88cf7d --- /dev/null +++ b/pkg/rpc/BUILD.bazel @@ -0,0 +1,58 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "auth.go", + "interceptor.go", + "log.go", + "metrics.go", + "server.go", + "service.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/rpc", + visibility = ["//visibility:public"], + deps = [ + "//pkg/log:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/token:go_default_library", + "@com_github_golang_protobuf//jsonpb:go_default_library_gen", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@io_opencensus_go//plugin/ocgrpc:go_default_library", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//credentials:go_default_library", + "@org_golang_google_grpc//metadata:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "interceptor_test.go", + "log_test.go", + "server_test.go", + ], + data = glob(["testdata/**"]), + embed = [":go_default_library"], + deps = [ + "//pkg/health:go_default_library", + "//pkg/token:go_default_library", + "//proto/feature:go_default_library", + "//proto/gateway:go_default_library", + "//proto/test:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//credentials:go_default_library", + "@org_golang_google_grpc//health/grpc_health_v1:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/rpc/auth.go b/pkg/rpc/auth.go new file mode 100644 index 000000000..a2b9c31be --- /dev/null +++ b/pkg/rpc/auth.go @@ -0,0 +1,69 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpc + +import ( + "context" + "strings" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/token" +) + +type tokenKey struct{} + +var Key = tokenKey{} + +const healthServiceName = "/grpc.health.v1.Health/" + +func AuthUnaryServerInterceptor(verifier token.Verifier) grpc.UnaryServerInterceptor { + return func( + ctx context.Context, + req interface{}, + info *grpc.UnaryServerInfo, + handler grpc.UnaryHandler, + ) (interface{}, error) { + if strings.HasPrefix(info.FullMethod, healthServiceName) { + return handler(ctx, req) + } + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, status.Error(codes.Unauthenticated, "token is required") + } + rawTokens, ok := md["authorization"] + if !ok || len(rawTokens) == 0 { + return nil, status.Error(codes.Unauthenticated, "token is required") + } + subs := strings.Split(rawTokens[0], " ") + if len(subs) != 2 { + return nil, status.Error(codes.Unauthenticated, "token is malformed") + } + token, err := verifier.Verify(subs[1]) + if err != nil { + return nil, status.Errorf(codes.Unauthenticated, "token is invalid: %s", err.Error()) + } + ctx = context.WithValue(ctx, Key, token) + return handler(ctx, req) + } +} + +func GetIDToken(ctx context.Context) (*token.IDToken, bool) { + t, ok := ctx.Value(Key).(*token.IDToken) + return t, ok +} diff --git a/pkg/rpc/client/BUILD.bazel b/pkg/rpc/client/BUILD.bazel new file mode 100644 index 000000000..fe0165caa --- /dev/null +++ b/pkg/rpc/client/BUILD.bazel @@ -0,0 +1,40 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "client.go", + "credentials.go", + "interceptor.go", + "log.go", + "metrics.go", + "request_id.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/rpc/client", + visibility = ["//visibility:public"], + deps = [ + "//pkg/log:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/rpc/metadata:go_default_library", + "@com_github_golang_protobuf//jsonpb:go_default_library_gen", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@io_opencensus_go//plugin/ocgrpc:go_default_library", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//credentials:go_default_library", + "@org_golang_google_grpc//stats:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["interceptor_test.go"], + embed = [":go_default_library"], + deps = [ + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + "@org_golang_google_grpc//:go_default_library", + ], +) diff --git a/pkg/rpc/client/client.go b/pkg/rpc/client/client.go new file mode 100644 index 000000000..1a4526481 --- /dev/null +++ b/pkg/rpc/client/client.go @@ -0,0 +1,125 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "time" + + "go.opencensus.io/plugin/ocgrpc" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/stats" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +type options struct { + dialTimeout time.Duration + perRPCCredentials credentials.PerRPCCredentials + block bool + logger *zap.Logger + metrics metrics.Registerer + statsHandler stats.Handler +} + +var defaultOptions = options{ + block: false, + logger: zap.NewNop(), + statsHandler: &ocgrpc.ClientHandler{}, +} + +type Option func(*options) + +func WithDialTimeout(d time.Duration) Option { + return func(o *options) { + o.dialTimeout = d + } +} + +func WithPerRPCCredentials(creds credentials.PerRPCCredentials) Option { + return func(o *options) { + o.perRPCCredentials = creds + } +} + +func WithBlock() Option { + return func(o *options) { + o.block = true + } +} + +func WithLogger(logger *zap.Logger) Option { + return func(o *options) { + o.logger = logger + } +} + +func WithMetrics(registerer metrics.Registerer) Option { + return func(o *options) { + o.metrics = registerer + } +} + +func WithStatsHandler(handler stats.Handler) Option { + return func(o *options) { + o.statsHandler = handler + } +} + +func NewClientConn(addr string, certPath string, opts ...Option) (*grpc.ClientConn, error) { + options := defaultOptions + for _, o := range opts { + o(&options) + } + ctx := context.Background() + if options.dialTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, options.dialTimeout) + defer cancel() + } + cred, err := credentials.NewClientTLSFromFile(certPath, "") + if err != nil { + return nil, err + } + dialOptions := []grpc.DialOption{ + grpc.WithTransportCredentials(cred), + grpc.WithUnaryInterceptor(options.unaryInterceptor()), + grpc.WithStatsHandler(options.statsHandler), + } + if options.perRPCCredentials != nil { + dialOptions = append(dialOptions, grpc.WithPerRPCCredentials(options.perRPCCredentials)) + } + if options.block { + dialOptions = append(dialOptions, grpc.WithBlock()) + } + return grpc.DialContext(ctx, addr, dialOptions...) +} + +func (o *options) unaryInterceptor() grpc.UnaryClientInterceptor { + if o.metrics == nil { + return ChainUnaryClientInterceptors( + XRequestIDUnaryClientInterceptor(), + LogUnaryClientInterceptor(o.logger), + ) + } + registerMetrics(o.metrics) + return ChainUnaryClientInterceptors( + XRequestIDUnaryClientInterceptor(), + LogUnaryClientInterceptor(o.logger), + MetricsUnaryClientInterceptor(), + ) +} diff --git a/pkg/rpc/client/credentials.go b/pkg/rpc/client/credentials.go new file mode 100644 index 000000000..0c92166c0 --- /dev/null +++ b/pkg/rpc/client/credentials.go @@ -0,0 +1,48 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "fmt" + "io/ioutil" + "strings" + + "google.golang.org/grpc/credentials" +) + +type perRPCCredentials struct { + token string +} + +func NewPerRPCCredentials(tokenPath string) (credentials.PerRPCCredentials, error) { + data, err := ioutil.ReadFile(tokenPath) + if err != nil { + return nil, err + } + return perRPCCredentials{ + token: strings.TrimSpace(string(data)), + }, nil +} + +func (c perRPCCredentials) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + return map[string]string{ + "authorization": fmt.Sprintf("bearer %s", c.token), + }, nil +} + +func (c perRPCCredentials) RequireTransportSecurity() bool { + return true +} diff --git a/pkg/rpc/client/interceptor.go b/pkg/rpc/client/interceptor.go new file mode 100644 index 000000000..5fcaaf007 --- /dev/null +++ b/pkg/rpc/client/interceptor.go @@ -0,0 +1,49 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + + "google.golang.org/grpc" +) + +func ChainUnaryClientInterceptors(is ...grpc.UnaryClientInterceptor) grpc.UnaryClientInterceptor { + return func( + ctx context.Context, + method string, + req, reply interface{}, + cc *grpc.ClientConn, + invoker grpc.UnaryInvoker, + opts ...grpc.CallOption, + ) error { + chain := func(interceptor grpc.UnaryClientInterceptor, next grpc.UnaryInvoker) grpc.UnaryInvoker { + return func( + ctx context.Context, + method string, + req, reply interface{}, + cc *grpc.ClientConn, + opts ...grpc.CallOption, + ) error { + return interceptor(ctx, method, req, reply, cc, next, opts...) + } + } + next := invoker + for i := len(is) - 1; i >= 0; i-- { + next = chain(is[i], next) + } + return next(ctx, method, req, reply, cc, opts...) + } +} diff --git a/pkg/rpc/client/interceptor_test.go b/pkg/rpc/client/interceptor_test.go new file mode 100644 index 000000000..3b7cd27a9 --- /dev/null +++ b/pkg/rpc/client/interceptor_test.go @@ -0,0 +1,57 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" +) + +func TestChainUnaryClientInterceptors(t *testing.T) { + type parentKey string + parent := parentKey("parent") + ctx := context.WithValue(context.Background(), parent, "") + serviceMethod := "test-method" + var firstRun, secondRun, invokerRun bool + first := func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + require.Equal(t, serviceMethod, method) + require.Equal(t, "", ctx.Value(parent).(string)) + ctx = context.WithValue(ctx, parent, "first") + firstRun = true + return invoker(ctx, method, req, reply, cc, opts...) + } + second := func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + require.Equal(t, serviceMethod, method) + require.Equal(t, "first", ctx.Value(parent).(string)) + ctx = context.WithValue(ctx, parent, "second") + secondRun = true + return invoker(ctx, method, req, reply, cc, opts...) + } + invoker := func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, opts ...grpc.CallOption) error { + require.Equal(t, serviceMethod, method) + require.Equal(t, "second", ctx.Value(parent).(string)) + invokerRun = true + return nil + } + interceptors := ChainUnaryClientInterceptors(first, second) + interceptors(ctx, serviceMethod, "req", "reply", nil, invoker, nil) + assert.True(t, firstRun) + assert.True(t, secondRun) + assert.True(t, invokerRun) +} diff --git a/pkg/rpc/client/log.go b/pkg/rpc/client/log.go new file mode 100644 index 000000000..75f26765a --- /dev/null +++ b/pkg/rpc/client/log.go @@ -0,0 +1,78 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "context" + "fmt" + "time" + + "github.com/golang/protobuf/jsonpb" // nolint:staticcheck + "github.com/golang/protobuf/proto" // nolint:staticcheck + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/log" +) + +var marshaler = &jsonpb.Marshaler{} + +func LogUnaryClientInterceptor(logger *zap.Logger) grpc.UnaryClientInterceptor { + logger = logger.Named("grpc_client") + return func( + ctx context.Context, + method string, + req, reply interface{}, + cc *grpc.ClientConn, + invoker grpc.UnaryInvoker, + opts ...grpc.CallOption, + ) error { + serviceName, methodName := splitFullMethodName(method) + startTime := time.Now() + err := invoker(ctx, method, req, reply, cc, opts...) + logger.Check(zap.DebugLevel, "").Write( + log.FieldsFromOutgoingContext(ctx).AddFields( + zap.Error(err), + zap.String("gprcService", serviceName), + zap.String("grpcMethod", methodName), + zap.String("grpcCode", status.Code(err).String()), + zap.Duration("duration", time.Since(startTime)), + zap.Reflect("request", makeUnmarshallable(req)), + )..., + ) + return err + } +} + +type unmarshallable struct { + proto.Message +} + +func makeUnmarshallable(msg interface{}) *unmarshallable { + if m, ok := msg.(proto.Message); ok { + return &unmarshallable{m} + } + return nil +} + +func (m *unmarshallable) MarshalJSON() ([]byte, error) { + b := &bytes.Buffer{} + if err := marshaler.Marshal(b, m); err != nil { + return nil, fmt.Errorf("jsonpb serializer failed: %v", err) + } + return b.Bytes(), nil +} diff --git a/pkg/rpc/client/metrics.go b/pkg/rpc/client/metrics.go new file mode 100644 index 000000000..e7902cfef --- /dev/null +++ b/pkg/rpc/client/metrics.go @@ -0,0 +1,81 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "strings" + "sync" + + "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc" + "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +const rpcTypeUnary = "Unary" + +var ( + registerOnce sync.Once + + startedCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "grpc", + Name: "client_started_total", + Help: "Total number of RPCs started on the client.", + }, []string{"type", "service", "method"}) + + handledCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "grpc", + Name: "client_handled_total", + Help: "Total number of RPCs completed by the client, regardless of success or failure.", + }, []string{"type", "service", "method", "code"}) +) + +func registerMetrics(r metrics.Registerer) { + registerOnce.Do(func() { + r.MustRegister(startedCounter, handledCounter) + }) +} + +func MetricsUnaryClientInterceptor() grpc.UnaryClientInterceptor { + return func( + ctx context.Context, + method string, + req, reply interface{}, + cc *grpc.ClientConn, + invoker grpc.UnaryInvoker, + opts ...grpc.CallOption, + ) error { + serviceName, methodName := splitFullMethodName(method) + startedCounter.WithLabelValues(rpcTypeUnary, serviceName, methodName).Inc() + err := invoker(ctx, method, req, reply, cc, opts...) + handledCounter.WithLabelValues(rpcTypeUnary, serviceName, methodName, status.Code(err).String()).Inc() + return err + } +} + +func splitFullMethodName(fullMethodName string) (string, string) { + // format: /package.service/method + parts := strings.Split(fullMethodName, "/") + if len(parts) != 3 { + return "unknown", "unknown" + } + return parts[1], parts[2] +} diff --git a/pkg/rpc/client/request_id.go b/pkg/rpc/client/request_id.go new file mode 100644 index 000000000..629fea44a --- /dev/null +++ b/pkg/rpc/client/request_id.go @@ -0,0 +1,42 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/bucketeer-io/bucketeer/pkg/rpc/metadata" +) + +func XRequestIDUnaryClientInterceptor() grpc.UnaryClientInterceptor { + return func( + ctx context.Context, + method string, + req, reply interface{}, + cc *grpc.ClientConn, + invoker grpc.UnaryInvoker, + opts ...grpc.CallOption, + ) error { + reqID := metadata.GetXRequestIDFromIncomingContext(ctx) + if reqID == "" { + reqID = metadata.GenerateXRequestID() + } + ctx = metadata.AppendXRequestIDToOutgoingContext(ctx, reqID) + err := invoker(ctx, method, req, reply, cc, opts...) + return err + } +} diff --git a/pkg/rpc/interceptor.go b/pkg/rpc/interceptor.go new file mode 100644 index 000000000..a14327cdb --- /dev/null +++ b/pkg/rpc/interceptor.go @@ -0,0 +1,54 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpc + +import ( + "context" + + "google.golang.org/grpc" +) + +// TODO: change in case this (lambda etc) becomes a performance bottleneck. +func chainUnaryServerInterceptors(is ...grpc.UnaryServerInterceptor) grpc.UnaryServerInterceptor { + // each interceptor should get the context and request from the previous one + // real grpc handler should be called in the end and passed back up + // interceptor() + // -- add some before stuff + // -- res, err := handler() + // -- interceptor() <------ this means handler should be interceptor and the last handler should be the real handler + // ---- add more to context + // ---- res, err := handler() + // ---- do some after stuff + // ---- return res, err + // -- do some after stuff + // return res, err + return func( + ctx context.Context, + req interface{}, + info *grpc.UnaryServerInfo, + handler grpc.UnaryHandler, + ) (interface{}, error) { + chain := func(interceptor grpc.UnaryServerInterceptor, next grpc.UnaryHandler) grpc.UnaryHandler { + return func(ctx context.Context, req interface{}) (interface{}, error) { + return interceptor(ctx, req, info, next) + } + } + next := handler + for i := len(is) - 1; i >= 0; i-- { + next = chain(is[i], next) + } + return next(ctx, req) + } +} diff --git a/pkg/rpc/interceptor_test.go b/pkg/rpc/interceptor_test.go new file mode 100644 index 000000000..820266df1 --- /dev/null +++ b/pkg/rpc/interceptor_test.go @@ -0,0 +1,60 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpc + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" +) + +func TestChainUnaryServerInterceptors(t *testing.T) { + type parentKey string + parent := parentKey("parent") + ctx := context.WithValue(context.Background(), parent, "") + serverInfo := &grpc.UnaryServerInfo{ + FullMethod: "service.test", + } + out := "out" + var firstRun, secondRun, handlerRun bool + first := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + require.Equal(t, serverInfo, info) + require.Equal(t, "", ctx.Value(parent).(string)) + ctx = context.WithValue(ctx, parent, "first") + firstRun = true + return handler(ctx, req) + } + second := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + require.Equal(t, serverInfo, info) + require.Equal(t, "first", ctx.Value(parent).(string)) + ctx = context.WithValue(ctx, parent, "second") + secondRun = true + return handler(ctx, req) + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + require.Equal(t, "second", ctx.Value(parent).(string)) + handlerRun = true + return out, nil + } + interceptors := chainUnaryServerInterceptors(first, second) + result, _ := interceptors(ctx, "req", serverInfo, handler) + assert.Equal(t, out, result) + assert.True(t, firstRun) + assert.True(t, secondRun) + assert.True(t, handlerRun) +} diff --git a/pkg/rpc/log.go b/pkg/rpc/log.go new file mode 100644 index 000000000..f5ff51800 --- /dev/null +++ b/pkg/rpc/log.go @@ -0,0 +1,103 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpc + +import ( + "bytes" + "context" + "fmt" + "strings" + "time" + + "github.com/golang/protobuf/jsonpb" // nolint:staticcheck + "github.com/golang/protobuf/proto" // nolint:staticcheck + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/log" +) + +var marshaler = jsonpb.Marshaler{EmitDefaults: true} + +func LogUnaryServerInterceptor(logger *zap.Logger) grpc.UnaryServerInterceptor { + logger = logger.Named("grpc_server") + return func( + ctx context.Context, + req interface{}, + info *grpc.UnaryServerInfo, + handler grpc.UnaryHandler, + ) (interface{}, error) { + startTime := time.Now() + resp, err := handler(ctx, req) + level := zap.DebugLevel + code := status.Code(err) + if err != nil { + switch code { + case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.PermissionDenied, codes.Unauthenticated: + level = zap.WarnLevel + default: + level = zap.ErrorLevel + } + } + if level == zap.DebugLevel && strings.HasPrefix(info.FullMethod, healthServiceName) { + return resp, err + } + serviceName, methodName := splitFullMethodName(info.FullMethod) + logger.Check(level, "").Write( + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("grpcService", serviceName), + zap.String("grpcMethod", methodName), + zap.String("grpcCode", code.String()), + zap.Duration("duration", time.Since(startTime)), + zap.Reflect("request", makeMarshallable(req)), + zap.Reflect("response", makeMarshallable(resp)), + )..., + ) + return resp, err + } +} + +type marshallable struct { + proto.Message +} + +func makeMarshallable(msg interface{}) *marshallable { + if m, ok := msg.(proto.Message); ok { + return &marshallable{m} + } + return nil +} + +/* zap json encoder calls json.Marshal when doing zap.Reflected() +func (enc *jsonEncoder) AppendReflected(val interface{}) error { + marshaled, err := json.Marshal(val) + if err != nil { + return err + } + enc.addElementSeparator() + _, err = enc.buf.Write(marshaled) + return err +} +*/ +func (m *marshallable) MarshalJSON() ([]byte, error) { + b := &bytes.Buffer{} + if err := marshaler.Marshal(b, m.Message); err != nil { + return nil, fmt.Errorf("jsonpb serializer failed: %v", err) + } + return b.Bytes(), nil +} diff --git a/pkg/rpc/log_test.go b/pkg/rpc/log_test.go new file mode 100644 index 000000000..712970950 --- /dev/null +++ b/pkg/rpc/log_test.go @@ -0,0 +1,52 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpc + +import ( + "encoding/json" + "testing" + + "github.com/golang/protobuf/proto" + + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" + gatewayproto "github.com/bucketeer-io/bucketeer/proto/gateway" +) + +func TestJSONPMarshaller(t *testing.T) { + var marshalingTests = []struct { + desc string + pb proto.Message + json string + }{ + { + "default values", + &gatewayproto.GetEvaluationsResponse{State: featureproto.UserEvaluations_QUEUED}, + `{"state":"QUEUED","evaluations":null,"userEvaluationsId":""}`, + }, + { + "non-default values", + &gatewayproto.GetEvaluationsResponse{State: featureproto.UserEvaluations_FULL}, + `{"state":"FULL","evaluations":null,"userEvaluationsId":""}`, + }, + } + for _, tt := range marshalingTests { + json, err := json.Marshal(makeMarshallable(tt.pb)) + if err != nil { + t.Errorf("%s: marshaling error: %v", tt.desc, err) + } else if tt.json != string(json) { + t.Errorf("%s: got [%v] want [%v]", tt.desc, string(json), tt.json) + } + } +} diff --git a/pkg/rpc/metadata/BUILD.bazel b/pkg/rpc/metadata/BUILD.bazel new file mode 100644 index 000000000..c738fff54 --- /dev/null +++ b/pkg/rpc/metadata/BUILD.bazel @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["request_id.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/rpc/metadata", + visibility = ["//visibility:public"], + deps = [ + "//pkg/uuid:go_default_library", + "@org_golang_google_grpc//metadata:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["request_id_test.go"], + embed = [":go_default_library"], + deps = [ + "@com_github_stretchr_testify//assert:go_default_library", + "@org_golang_google_grpc//metadata:go_default_library", + ], +) diff --git a/pkg/rpc/metadata/request_id.go b/pkg/rpc/metadata/request_id.go new file mode 100644 index 000000000..374fa4613 --- /dev/null +++ b/pkg/rpc/metadata/request_id.go @@ -0,0 +1,61 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "context" + + gmetadata "google.golang.org/grpc/metadata" + + "github.com/bucketeer-io/bucketeer/pkg/uuid" +) + +const xRequestIDKey = "x-request-id" + +func GetXRequestIDFromIncomingContext(ctx context.Context) string { + md, ok := gmetadata.FromIncomingContext(ctx) + if !ok { + return "" + } + reqIDs, ok := md[xRequestIDKey] + if !ok || len(reqIDs) == 0 { + return "" + } + return reqIDs[0] +} + +func GetXRequestIDFromOutgoingContext(ctx context.Context) string { + md, ok := gmetadata.FromOutgoingContext(ctx) + if !ok { + return "" + } + reqIDs, ok := md[xRequestIDKey] + if !ok || len(reqIDs) == 0 { + return "" + } + return reqIDs[0] +} + +func AppendXRequestIDToOutgoingContext(ctx context.Context, xRequestID string) context.Context { + return gmetadata.AppendToOutgoingContext(ctx, xRequestIDKey, xRequestID) +} + +func GenerateXRequestID() string { + id, err := uuid.NewUUID() + if err != nil { + return "" + } + return id.String() +} diff --git a/pkg/rpc/metadata/request_id_test.go b/pkg/rpc/metadata/request_id_test.go new file mode 100644 index 000000000..bf42e5edb --- /dev/null +++ b/pkg/rpc/metadata/request_id_test.go @@ -0,0 +1,105 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + gmetadata "google.golang.org/grpc/metadata" +) + +func TestXGetRequestIDFromIncomingContext(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + ctx context.Context + expected string + }{ + "metadata doesn't exist": { + ctx: context.Background(), + expected: "", + }, + "xRequestIDKey doesn't exist": { + ctx: gmetadata.NewIncomingContext( + context.Background(), + gmetadata.Pairs(), + ), + expected: "", + }, + "success": { + ctx: gmetadata.NewIncomingContext( + context.Background(), + gmetadata.Pairs(xRequestIDKey, "request-id-1"), + ), + expected: "request-id-1", + }, + } + + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + actual := GetXRequestIDFromIncomingContext(p.ctx) + assert.Equal(t, p.expected, actual) + }) + } +} + +func TestXGetRequestIDFromOutgoingContext(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + ctx context.Context + expected string + }{ + "metadata doesn't exist": { + ctx: context.Background(), + expected: "", + }, + "xRequestIDKey doesn't exist": { + ctx: gmetadata.NewOutgoingContext( + context.Background(), + gmetadata.Pairs(), + ), + expected: "", + }, + "success": { + ctx: gmetadata.NewOutgoingContext( + context.Background(), + gmetadata.Pairs(xRequestIDKey, "request-id-1"), + ), + expected: "request-id-1", + }, + } + + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + actual := GetXRequestIDFromOutgoingContext(p.ctx) + assert.Equal(t, p.expected, actual) + }) + } +} + +func TestAppendXRequestIDToOutgoingContext(t *testing.T) { + t.Parallel() + ctx := gmetadata.NewOutgoingContext( + context.Background(), + gmetadata.Pairs(), + ) + actualReqID := GetXRequestIDFromOutgoingContext(ctx) + assert.Equal(t, "", actualReqID) + expectedReqID := "request-id-1" + ctx = AppendXRequestIDToOutgoingContext(ctx, expectedReqID) + actualReqID = GetXRequestIDFromOutgoingContext(ctx) + assert.Equal(t, expectedReqID, actualReqID) +} diff --git a/pkg/rpc/metrics.go b/pkg/rpc/metrics.go new file mode 100644 index 000000000..8766cfc1b --- /dev/null +++ b/pkg/rpc/metrics.go @@ -0,0 +1,95 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpc + +import ( + "context" + "strings" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc" + "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +const rpcTypeUnary = "Unary" + +var ( + registerOnce sync.Once + + serverStartedCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "grpc", + Name: "server_started_total", + Help: "Total number of RPCs started on the server.", + }, []string{"type", "service", "method"}) + + serverHandledCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "grpc", + Name: "server_handled_total", + Help: "Total number of RPCs completed on the server, regardless of success or failure.", + }, []string{"type", "service", "method", "code"}) + + serverHandledHistogram = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "bucketeer", + Subsystem: "grpc", + Name: "server_handling_seconds", + Help: "Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.", + Buckets: prometheus.DefBuckets, + }, []string{"type", "service", "method"}) +) + +func registerMetrics(r metrics.Registerer) { + registerOnce.Do(func() { + r.MustRegister( + serverStartedCounter, + serverHandledCounter, + serverHandledHistogram, + ) + }) +} + +func MetricsUnaryServerInterceptor() grpc.UnaryServerInterceptor { + return func( + ctx context.Context, + req interface{}, + info *grpc.UnaryServerInfo, + handler grpc.UnaryHandler, + ) (interface{}, error) { + startTime := time.Now() + serviceName, methodName := splitFullMethodName(info.FullMethod) + serverStartedCounter.WithLabelValues(rpcTypeUnary, serviceName, methodName).Inc() + resp, err := handler(ctx, req) + serverHandledCounter.WithLabelValues(rpcTypeUnary, serviceName, methodName, status.Code(err).String()).Inc() + serverHandledHistogram.WithLabelValues(rpcTypeUnary, serviceName, methodName).Observe(time.Since(startTime).Seconds()) + return resp, err + } +} + +func splitFullMethodName(fullMethodName string) (string, string) { + // format: /package.service/method + parts := strings.Split(fullMethodName, "/") + if len(parts) != 3 { + return "unknown", "unknown" + } + return parts[1], parts[2] +} diff --git a/pkg/rpc/server.go b/pkg/rpc/server.go new file mode 100644 index 000000000..0eac7c913 --- /dev/null +++ b/pkg/rpc/server.go @@ -0,0 +1,193 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpc + +import ( + "context" + "fmt" + "net/http" + "strings" + "time" + + "go.opencensus.io/plugin/ocgrpc" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/token" +) + +type Server struct { + certPath string + keyPath string + logger *zap.Logger + port int + metrics metrics.Registerer + verifier token.Verifier + services []Service + handlers []httpHandler + rpcServer *grpc.Server + httpServer *http.Server +} + +type httpHandler struct { + http.Handler + path string +} + +type Option func(*Server) + +func WithPort(port int) Option { + return func(s *Server) { + s.port = port + } +} + +func WithVerifier(verifier token.Verifier) Option { + return func(s *Server) { + s.verifier = verifier + } +} + +func WithLogger(logger *zap.Logger) Option { + return func(s *Server) { + s.logger = logger + } +} + +func WithService(service Service) Option { + return func(s *Server) { + s.services = append(s.services, service) + } +} + +func WithMetrics(registerer metrics.Registerer) Option { + return func(s *Server) { + s.metrics = registerer + } +} + +func WithHandler(path string, handler http.Handler) Option { + return func(s *Server) { + s.handlers = append(s.handlers, httpHandler{Handler: handler, path: path}) + } +} + +func NewServer(service Service, certPath, keyPath string, opt ...Option) *Server { + server := &Server{ + port: 9000, + logger: zap.NewNop(), + } + for _, o := range opt { + o(server) + } + server.logger = server.logger.Named("rpc-server") + if len(certPath) == 0 { + server.logger.Fatal("CertPath must not be empty") + } + server.certPath = certPath + if len(keyPath) == 0 { + server.logger.Fatal("KeyPath must not be empty") + } + server.keyPath = keyPath + if service == nil { + server.logger.Fatal("Service must not be nil") + } + server.services = append(server.services, service) + return server +} + +func (s *Server) Run() { + if s.metrics != nil { + registerMetrics(s.metrics) + } + s.setupRPC() + s.setupHTTP() + s.logger.Info(fmt.Sprintf("Running on %d", s.port)) + s.runServer() +} + +func (s *Server) Stop(timeout time.Duration) { + s.logger.Info("Server is going to sleep 10 seconds before shutting down") + // When the sigterm signal is sent, sometimes the app could get the signal before envoy, + // when it does, the requests will fail because the app cannot receive any request after the shutdown. + // So we wait a bit in case there are still requests to be processed + // between the envoy and app after the signal. + time.Sleep(10 * time.Second) + s.logger.Info("Server is awakening from sleep, and going to shutdown") + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := s.httpServer.Shutdown(ctx) + if err != nil { + s.logger.Error("Failed to shutdown", zap.Error(err)) + } +} + +func (s *Server) setupRPC() { + creds, err := credentials.NewServerTLSFromFile(s.certPath, s.keyPath) + if err != nil { + s.logger.Fatal("Failed to read credentials: %v", zap.Error(err)) + } + interceptor := chainUnaryServerInterceptors( + LogUnaryServerInterceptor(s.logger), + MetricsUnaryServerInterceptor(), + ) + if s.verifier != nil { + interceptor = chainUnaryServerInterceptors( + interceptor, + AuthUnaryServerInterceptor(s.verifier)) + } + s.rpcServer = grpc.NewServer( + grpc.Creds(creds), + grpc.UnaryInterceptor(interceptor), + grpc.StatsHandler(&ocgrpc.ServerHandler{}), + ) + for _, service := range s.services { + service.Register(s.rpcServer) + } +} + +func (s *Server) setupHTTP() { + mux := http.NewServeMux() + for _, handler := range s.handlers { + mux.Handle(handler.path, handler) + } + s.httpServer = &http.Server{ + Addr: fmt.Sprintf(":%d", s.port), + Handler: http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { + if isRPC(req) { + s.rpcServer.ServeHTTP(resp, req) + } else { + mux.ServeHTTP(resp, req) + } + }), + } +} + +func (s *Server) runServer() { + err := s.httpServer.ListenAndServeTLS(s.certPath, s.keyPath) + if err != nil && err != http.ErrServerClosed { + s.logger.Fatal("Failed to serve", zap.Error(err)) + } +} + +func isRPC(req *http.Request) bool { + if req.ProtoMajor == 2 && + strings.HasPrefix(req.Header.Get("Content-Type"), "application/grpc") { + return true + } + return false +} diff --git a/pkg/rpc/server_test.go b/pkg/rpc/server_test.go new file mode 100644 index 000000000..93e9f8da4 --- /dev/null +++ b/pkg/rpc/server_test.go @@ -0,0 +1,236 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpc + +import ( + "context" + "crypto/tls" + "net/http" + "os" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + pb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/token" + proto "github.com/bucketeer-io/bucketeer/proto/test" +) + +const certPath = "testdata/server.crt" +const keyPath = "testdata/server.key" + +type testService struct { +} + +func (s *testService) Register(server *grpc.Server) { + proto.RegisterTestServiceServer(server, s) +} + +func (s *testService) Test(ctx context.Context, req *proto.TestRequest) (*proto.TestResponse, error) { + return &proto.TestResponse{Message: "test"}, nil +} + +type dummyService struct { +} + +func (s *dummyService) Register(server *grpc.Server) { +} + +type dummyRegisterer struct { +} + +func (s *dummyRegisterer) MustRegister(...prometheus.Collector) { + +} + +func (s *dummyRegisterer) Unregister(prometheus.Collector) bool { + return true +} + +type dummyVerifier struct { +} + +func (v *dummyVerifier) Verify(rawIDToken string) (*token.IDToken, error) { + return &token.IDToken{ + Email: "test@email", + }, nil +} + +type dummyPerRPCCredentials struct { + Metadata map[string]string +} + +func (c dummyPerRPCCredentials) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + return c.Metadata, nil +} + +func (c dummyPerRPCCredentials) RequireTransportSecurity() bool { + return true +} + +func newServer(ctx context.Context) *Server { + logger := zap.NewExample() + health := health.NewGrpcChecker() + server := NewServer( + &testService{}, + certPath, + keyPath, + WithService(health), + WithVerifier(&dummyVerifier{}), + WithMetrics(&dummyRegisterer{}), + WithLogger(logger), + WithPort(4443), + WithHandler("/health", health), + ) + return server +} + +func newRPCClient(t *testing.T, rpcCreds credentials.PerRPCCredentials) proto.TestServiceClient { + creds, err := credentials.NewClientTLSFromFile(certPath, "localhost") + if err != nil { + t.Fatal(err) + } + conn, err := grpc.Dial("localhost:4443", + grpc.WithBlock(), + grpc.WithTransportCredentials(creds), + grpc.WithTimeout(5*time.Second), + grpc.WithPerRPCCredentials(rpcCreds), + ) + if err != nil { + t.Fatal(err) + } + return proto.NewTestServiceClient(conn) +} + +func newHealthClient(t *testing.T) pb.HealthClient { + creds, err := credentials.NewClientTLSFromFile(certPath, "localhost") + if err != nil { + t.Fatal(err) + } + conn, err := grpc.Dial("localhost:4443", + grpc.WithBlock(), + grpc.WithTransportCredentials(creds), + grpc.WithTimeout(5*time.Second), + ) + if err != nil { + t.Fatal(err) + } + return pb.NewHealthClient(conn) +} + +func TestGRPCHealthHandler(t *testing.T) { + client := newHealthClient(t) + resp, err := client.Check(context.TODO(), &pb.HealthCheckRequest{}) + if err != nil { + t.Fatal(err) + } + if resp.Status != pb.HealthCheckResponse_NOT_SERVING { + t.Fatal(resp) + } +} + +func newHTTPClient() *http.Client { + /* + ** Better to do it like net/http/httptest: + ** https://golang.org/pkg/crypto/tls/#example_Dial + ** certpool := x509.NewCertPool() + ** certpool.AddCert(s.certificate) + ** s.client.Transport = &http.Transport{ + ** TLSClientConfig: &tls.Config{ + ** RootCAs: certpool, + ** }} + */ + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }, + Timeout: 5 * time.Second, + } +} + +func TestHTTPHealthHandler(t *testing.T) { + client := newHTTPClient() + resp, err := client.Get("https://localhost:4443/health") + if err != nil { + t.Fatal(err) + } + if resp.StatusCode != http.StatusServiceUnavailable { + t.Fatal(resp) + } +} + +func TestRPCHandlerOK(t *testing.T) { + client := newRPCClient(t, &dummyPerRPCCredentials{ + Metadata: map[string]string{ + "authorization": "bearer dummy-token", + }, + }) + resp, err := client.Test(context.TODO(), &proto.TestRequest{}) + if err != nil { + t.Fatal(err) + } + if resp.Message != "test" { + t.Fatal(resp) + } +} + +func TestRPCHandlerUnauthenticated(t *testing.T) { + client := newRPCClient(t, &dummyPerRPCCredentials{}) + _, err := client.Test(context.TODO(), &proto.TestRequest{}) + if err == nil { + t.Fatal("expected an error") + } + status, ok := status.FromError(err) + if !ok { + t.FailNow() + } + if status.Code() != codes.Unauthenticated { + t.Fatal("code should be Unauthenticated") + } +} + +func TestMain(m *testing.M) { + // Because os.Exit doesn't return, we need to call defer in separated function. + code := testMain(m) + os.Exit(code) +} + +func testMain(m *testing.M) int { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + server := newServer(ctx) + defer server.Stop(time.Second) + go server.Run() + waitForServer() + return m.Run() +} + +func waitForServer() { + client := newHTTPClient() + for { + resp, err := client.Get("https://localhost:4443") + if err == nil { + resp.Body.Close() + return + } + } +} diff --git a/pkg/rpc/service.go b/pkg/rpc/service.go new file mode 100644 index 000000000..cd582e2ea --- /dev/null +++ b/pkg/rpc/service.go @@ -0,0 +1,21 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpc + +import "google.golang.org/grpc" + +type Service interface { + Register(server *grpc.Server) +} diff --git a/pkg/rpc/status/BUILD.bazel b/pkg/rpc/status/BUILD.bazel new file mode 100644 index 000000000..b113f939f --- /dev/null +++ b/pkg/rpc/status/BUILD.bazel @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["status.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/rpc/status", + visibility = ["//visibility:public"], + deps = [ + "@com_github_golang_protobuf//proto:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + ], +) diff --git a/pkg/rpc/status/status.go b/pkg/rpc/status/status.go new file mode 100644 index 000000000..df994cf79 --- /dev/null +++ b/pkg/rpc/status/status.go @@ -0,0 +1,28 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package status + +import ( + "github.com/golang/protobuf/proto" // nolint:staticcheck + "google.golang.org/grpc/status" +) + +func MustWithDetails(s *status.Status, details ...proto.Message) error { + dt, err := s.WithDetails(details...) + if err != nil { + panic(err) + } + return dt.Err() +} diff --git a/pkg/rpc/testdata/server.crt b/pkg/rpc/testdata/server.crt new file mode 100644 index 000000000..375ebd09f --- /dev/null +++ b/pkg/rpc/testdata/server.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDPjCCAiagAwIBAgIJALaUc+KZlzMUMA0GCSqGSIb3DQEBCwUAMBQxEjAQBgNV +BAMMCWJ1Y2tldGVlcjAgFw0xOTA4MDcwODE4MTZaGA8zMDE4MTIwODA4MTgxNlow +FDESMBAGA1UEAwwJYnVja2V0ZWVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAuK0ZNyr9nf1MF9D9xsvnkqrXdNibutfuaqE0tF44rRC0/OLwGPGY9kXT +cretIZF8aO+eIeZ/a+UhRoihSrD4js1bpsrUZr7CXYf/YjqSQlM9mUkzqBAwDYLj +XJ6A1s6kxqdDgEEtTRWT30vBibkRF7Fs6gzYGAcwDOrosL7vVTweXcndfPNybFlN +kub2cKQsJSEn5mLgTbdPmHe6ewEpeE7flHpz9IT+PNAObAyDdpjViESAzUGdnwDJ +tIBx8FigcH4obcLMw8yY/VqvY40U6zdXuGK3SBQKtJIOFcRwhkbqSry8iX6SDTGL +UszoaIvxLvmJ9Y2HXi1Y+HjegYNhTQIDAQABo4GQMIGNMAkGA1UdEwQCMAAwCwYD +VR0PBAQDAgKkMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4E +FgQUEJ31pVNCN33i5qDU1UIwrxCjkuYwHwYDVR0jBBgwFoAUEJ31pVNCN33i5qDU +1UIwrxCjkuYwFAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IB +AQAQS/ntHZmMR/UpPv3GHn7az9kyuVA+U4PDOJLQbdG4mpnCzJZs6N8jbB9k9h6m +SYaXxMoYhgIEP757vWsQtuJGEhDet/SAHAi1MrPpzqBilR75oN9AzBC/dKWMXHaT +CP7qp4z3SkVWqNFOieD3O67Qj6fXn38RsAq3vaOAXqLaV+sPjndzkqV2MxfDlBC0 +eBunuQCS6kVckRAYeMjvRY6GKdhueaf3sDq8els+vE+LNx2QnN/l01Mta4vvMrNe +kKR7UoFoWjCjo6IDRzcHWI1+9rXQB4ELxCpjyWJTQ/dSnxwlnI+pQ6jMBSYg4Nw/ +cwopwZcyOlmL/mxK7dy+o8Qu +-----END CERTIFICATE----- diff --git a/pkg/rpc/testdata/server.key b/pkg/rpc/testdata/server.key new file mode 100644 index 000000000..21223d63f --- /dev/null +++ b/pkg/rpc/testdata/server.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC4rRk3Kv2d/UwX +0P3Gy+eSqtd02Ju61+5qoTS0XjitELT84vAY8Zj2RdNyt60hkXxo754h5n9r5SFG +iKFKsPiOzVumytRmvsJdh/9iOpJCUz2ZSTOoEDANguNcnoDWzqTGp0OAQS1NFZPf +S8GJuREXsWzqDNgYBzAM6uiwvu9VPB5dyd1883JsWU2S5vZwpCwlISfmYuBNt0+Y +d7p7ASl4Tt+UenP0hP480A5sDIN2mNWIRIDNQZ2fAMm0gHHwWKBwfihtwszDzJj9 +Wq9jjRTrN1e4YrdIFAq0kg4VxHCGRupKvLyJfpINMYtSzOhoi/Eu+Yn1jYdeLVj4 +eN6Bg2FNAgMBAAECggEAU+IMOgrE+CY9kfPT0aB8pxoCk4hv9AZwvO5MSkEh7TpR +eyx5clsK55H/4XOcqEq0/9UXNr7D0fZZjvgwiwSnYfXVU4V9xM2Q63sCfVOta4Lz +z9R4KjZwHTL+ous8ClYCclk3R+JS+Vh7uklmt2/gW6qzlfwPi5p4MOXim6WegE/w +P+Q3po2WyC/ggo4Givt/NQpudqQ/1da5wInr+bBgqfB75KQiSbKbW2CBp3wKr78A +jN7PnS2qMuveRZA8xwG/FH+g80mi+N7/bKlfyX2d0jq8p6+iCyLxhzGBGJua4Bl5 +8hB7/kaDc7648aHyLXxuXn7ovTf6R+Dh/ZQhmoTZ4QKBgQDqvppmulmXCgbw0hS+ +QQb4mds7dN3tYBp0LrZGij2W8VCSyUzxdPhEUi4pixT9zZPxnv5kvif9XpWVUy+7 +nGfA7/tRu2oOVvuYW7v7wM8TNNvlOw6g0Vr9GK3Y6Z2V97p77xOdUHrZNrPcGzFy +2WefwXZ1cUYfak6YsEwxq6V4YwKBgQDJZegOBlKau38TUvZUUOUlXeuyx+g9E0IL +2o3QpmyFkr+BYW8ViiviCyQWNSClIzDrPUhXZeDw8o4k41uP8di9vKb63BXRUrmK +IOQDNhAFSot6cwpjs23ihcftz17lDRnLeyZIlNbZtEd/4XJI6eY6fh2ta7YtIYZ3 +XlgGfeL2jwKBgQDqkpbVyqeV77Yp1bRWvcJKj+xsSIcwGlW0/ay1ZpTPDcs3MyLJ +MdqY4wowB0Rtro2E6B1L7F0nqemN8zeCoXNocCbScJY1SCRYBmsd0njat3p5YX9n +omzq90tZs3D+mDNofuo5zF9GSYyHiUDrhGYxVPCbwRqSb/ekSp+JDv3mAwKBgCN4 +3nlx5aS5N3WY3CgXo3SEaVow2ZfAR5a85NOGLIEOSsqn9Z/OmVIT8kQzEU/ktd4J +Ci/Skt79acnXfa4Jw8oPaz2t++3Fa0aH6oEiSYoVCiIEFyVeWhFWzNL2/ljLiOqN +Az4vcPQhS6Kbe7yZ7eIJioKcARdL21o09L4X+BzDAoGARuTsGTiZt75aEygQex6E +b6FdjfaAzPkN3q3Vm3Bg3yuLeza1lXy4VsIojmm9ssIAg6mbfYRVbBP+Y2f45M9Y +Ll4WNaIYcK+cqJGkvSlr5yhH1sBQBXLXU4UX2YZlJXbFkQQZzVfF1OdqUUtTBwVu +xoVRHpUPKTezZ+m78njC4nM= +-----END PRIVATE KEY----- diff --git a/pkg/rpc/testdata/service.config b/pkg/rpc/testdata/service.config new file mode 100644 index 000000000..7c1ad9283 --- /dev/null +++ b/pkg/rpc/testdata/service.config @@ -0,0 +1,16 @@ +[req] +x509_extensions = v3_req +distinguished_name = req_distinguished_name + +[req_distinguished_name] + +[v3_req] +basicConstraints = CA:FALSE +keyUsage = digitalSignature, keyEncipherment, keyCertSign +extendedKeyUsage = serverAuth, clientAuth +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid, issuer +subjectAltName = @alt_names + +[alt_names] +DNS.1 = localhost \ No newline at end of file diff --git a/pkg/storage/BUILD.bazel b/pkg/storage/BUILD.bazel new file mode 100644 index 000000000..5d1971df6 --- /dev/null +++ b/pkg/storage/BUILD.bazel @@ -0,0 +1,8 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["storage.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/storage", + visibility = ["//visibility:public"], +) diff --git a/pkg/storage/druid/BUILD.bazel b/pkg/storage/druid/BUILD.bazel new file mode 100644 index 000000000..889d50569 --- /dev/null +++ b/pkg/storage/druid/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "broker_client.go", + "coordinator_client.go", + "druid.go", + "overlord_client.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/storage/druid", + visibility = ["//visibility:public"], + deps = ["@com_github_ca_dp_godruid//:go_default_library"], +) diff --git a/pkg/storage/druid/broker_client.go b/pkg/storage/druid/broker_client.go new file mode 100644 index 000000000..b6a2441b4 --- /dev/null +++ b/pkg/storage/druid/broker_client.go @@ -0,0 +1,67 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package druid + +import ( + "context" + "fmt" + "net" + "net/http" + "time" + + "github.com/ca-dp/godruid" +) + +type BrokerClient struct { + *godruid.Client +} + +func NewBrokerClient( + ctx context.Context, + url, + username, + password string) (*BrokerClient, error) { + return &BrokerClient{ + Client: &godruid.Client{ + Url: fmt.Sprintf("http://%s:%s@%s", username, password, url), + HttpClient: createHTTPClient(), + }, + }, nil +} + +func (c *BrokerClient) Close() {} + +// Depending on the request timing for long requests, it could get an EOF error +// because the HTTP client reuses the connection for concurrent requests. +// We set the client transport manually, so the keep-alive setting can be disabled. +func createHTTPClient() *http.Client { + t := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + DisableKeepAlives: true, + } + return &http.Client{ + Transport: t, + } +} diff --git a/pkg/storage/druid/coordinator_client.go b/pkg/storage/druid/coordinator_client.go new file mode 100644 index 000000000..29cdd5ded --- /dev/null +++ b/pkg/storage/druid/coordinator_client.go @@ -0,0 +1,43 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package druid + +import ( + "context" + "fmt" + "net/http" + + "github.com/ca-dp/godruid" +) + +type CoordinatorClient struct { + *godruid.CoordinatorClient +} + +func NewCoordinatorClient( + ctx context.Context, + url, + username, + password string) (*CoordinatorClient, error) { + + return &CoordinatorClient{ + CoordinatorClient: &godruid.CoordinatorClient{ + Url: fmt.Sprintf("http://%s:%s@%s", username, password, url), + HttpClient: &http.Client{}, + }, + }, nil +} + +func (c *CoordinatorClient) Close() {} diff --git a/pkg/storage/druid/druid.go b/pkg/storage/druid/druid.go new file mode 100644 index 000000000..ac39cb174 --- /dev/null +++ b/pkg/storage/druid/druid.go @@ -0,0 +1,23 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package druid + +import ( + "fmt" +) + +func Datasource(prefix, dataType string) string { + return fmt.Sprintf("%s.%s", prefix, dataType) +} diff --git a/pkg/storage/druid/overlord_client.go b/pkg/storage/druid/overlord_client.go new file mode 100644 index 000000000..37cb496cd --- /dev/null +++ b/pkg/storage/druid/overlord_client.go @@ -0,0 +1,43 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package druid + +import ( + "context" + "fmt" + "net/http" + + "github.com/ca-dp/godruid" +) + +type OverlordClient struct { + *godruid.OverlordClient +} + +func NewOverlordClient( + ctx context.Context, + url, + username, + password string) (*OverlordClient, error) { + + return &OverlordClient{ + OverlordClient: &godruid.OverlordClient{ + Url: fmt.Sprintf("http://%s:%s@%s", username, password, url), + HttpClient: &http.Client{}, + }, + }, nil +} + +func (c *OverlordClient) Close() {} diff --git a/pkg/storage/kafka/BUILD.bazel b/pkg/storage/kafka/BUILD.bazel new file mode 100644 index 000000000..c471df441 --- /dev/null +++ b/pkg/storage/kafka/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "cluster_admin.go", + "kafka.go", + "producer.go", + "scram_client.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/storage/kafka", + visibility = ["//visibility:public"], + deps = [ + "@com_github_shopify_sarama//:go_default_library", + "@com_github_xdg_scram//:go_default_library", + ], +) diff --git a/pkg/storage/kafka/cluster_admin.go b/pkg/storage/kafka/cluster_admin.go new file mode 100644 index 000000000..19246ad32 --- /dev/null +++ b/pkg/storage/kafka/cluster_admin.go @@ -0,0 +1,61 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafka + +import ( + "context" + + "github.com/Shopify/sarama" +) + +type ClusterAdmin struct { + url string + config *sarama.Config +} + +func NewClusterAdmin( + ctx context.Context, + url, + username, + password string, +) (*ClusterAdmin, error) { + config := sarama.NewConfig() + config.Metadata.Full = true + config.Version = sarama.V2_6_0_0 + config.ClientID = "sasl_scram_client" + + config.Net.SASL.Enable = true + config.Net.SASL.User = username + config.Net.SASL.Password = password + config.Net.SASL.Handshake = true + config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { + return &XDGSCRAMClient{HashGeneratorFcn: SHA512} + } + config.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA512 + + return &ClusterAdmin{ + url: url, + config: config, + }, nil +} + +func (c *ClusterAdmin) CreateTopic(topicName string, detail *sarama.TopicDetail) error { + ca, err := sarama.NewClusterAdmin([]string{c.url}, c.config) + if err != nil { + return err + } + defer ca.Close() + return ca.CreateTopic(topicName, detail, false) +} diff --git a/pkg/storage/kafka/kafka.go b/pkg/storage/kafka/kafka.go new file mode 100644 index 000000000..3d004cdb7 --- /dev/null +++ b/pkg/storage/kafka/kafka.go @@ -0,0 +1,23 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafka + +import ( + "fmt" +) + +func TopicName(topicPrefix, topicDataType string) string { + return fmt.Sprintf("%s.%s", topicPrefix, topicDataType) +} diff --git a/pkg/storage/kafka/producer.go b/pkg/storage/kafka/producer.go new file mode 100644 index 000000000..dc228ac91 --- /dev/null +++ b/pkg/storage/kafka/producer.go @@ -0,0 +1,59 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafka + +import ( + "context" + + "github.com/Shopify/sarama" +) + +type Producer struct { + sarama.SyncProducer +} + +func NewProducer( + ctx context.Context, + project, + url, + userName, + password string, +) (*Producer, error) { + config := sarama.NewConfig() + config.Producer.Retry.Max = 5 + config.Producer.RequiredAcks = sarama.WaitForAll + config.Producer.Return.Successes = true + + config.Metadata.Full = true + config.Version = sarama.V2_6_0_0 + config.ClientID = "sasl_scram_client" + + config.Net.SASL.Enable = true + config.Net.SASL.User = userName + config.Net.SASL.Password = password + config.Net.SASL.Handshake = true + config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { + return &XDGSCRAMClient{HashGeneratorFcn: SHA512} + } + config.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA512 + + prd, err := sarama.NewSyncProducer([]string{url}, config) + if err != nil { + return nil, err + } + return &Producer{ + SyncProducer: prd, + }, nil +} diff --git a/pkg/storage/kafka/scram_client.go b/pkg/storage/kafka/scram_client.go new file mode 100644 index 000000000..b272f1e45 --- /dev/null +++ b/pkg/storage/kafka/scram_client.go @@ -0,0 +1,50 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafka + +import ( + "crypto/sha256" + "crypto/sha512" + "hash" + + "github.com/xdg/scram" +) + +var SHA256 scram.HashGeneratorFcn = func() hash.Hash { return sha256.New() } +var SHA512 scram.HashGeneratorFcn = func() hash.Hash { return sha512.New() } + +type XDGSCRAMClient struct { + *scram.Client + *scram.ClientConversation + scram.HashGeneratorFcn +} + +func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { + x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID) + if err != nil { + return err + } + x.ClientConversation = x.Client.NewConversation() + return nil +} + +func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { + response, err = x.ClientConversation.Step(challenge) + return +} + +func (x *XDGSCRAMClient) Done() bool { + return x.ClientConversation.Done() +} diff --git a/pkg/storage/mock/BUILD.bazel b/pkg/storage/mock/BUILD.bazel new file mode 100644 index 000000000..16327180e --- /dev/null +++ b/pkg/storage/mock/BUILD.bazel @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["storage.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/storage/mock", + visibility = ["//visibility:public"], + deps = [ + "//pkg/storage:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + ], +) diff --git a/pkg/storage/mock/storage.go b/pkg/storage/mock/storage.go new file mode 100644 index 000000000..a3fa08d82 --- /dev/null +++ b/pkg/storage/mock/storage.go @@ -0,0 +1,837 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: storage.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + storage "github.com/bucketeer-io/bucketeer/pkg/storage" +) + +// MockIterator is a mock of Iterator interface. +type MockIterator struct { + ctrl *gomock.Controller + recorder *MockIteratorMockRecorder +} + +// MockIteratorMockRecorder is the mock recorder for MockIterator. +type MockIteratorMockRecorder struct { + mock *MockIterator +} + +// NewMockIterator creates a new mock instance. +func NewMockIterator(ctrl *gomock.Controller) *MockIterator { + mock := &MockIterator{ctrl: ctrl} + mock.recorder = &MockIteratorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIterator) EXPECT() *MockIteratorMockRecorder { + return m.recorder +} + +// Cursor mocks base method. +func (m *MockIterator) Cursor() (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Cursor") + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Cursor indicates an expected call of Cursor. +func (mr *MockIteratorMockRecorder) Cursor() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Cursor", reflect.TypeOf((*MockIterator)(nil).Cursor)) +} + +// Next mocks base method. +func (m *MockIterator) Next(dst interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Next", dst) + ret0, _ := ret[0].(error) + return ret0 +} + +// Next indicates an expected call of Next. +func (mr *MockIteratorMockRecorder) Next(dst interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockIterator)(nil).Next), dst) +} + +// MockGetter is a mock of Getter interface. +type MockGetter struct { + ctrl *gomock.Controller + recorder *MockGetterMockRecorder +} + +// MockGetterMockRecorder is the mock recorder for MockGetter. +type MockGetterMockRecorder struct { + mock *MockGetter +} + +// NewMockGetter creates a new mock instance. +func NewMockGetter(ctrl *gomock.Controller) *MockGetter { + mock := &MockGetter{ctrl: ctrl} + mock.recorder = &MockGetterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockGetter) EXPECT() *MockGetterMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockGetter) Get(ctx context.Context, key *storage.Key, dst interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, key, dst) + ret0, _ := ret[0].(error) + return ret0 +} + +// Get indicates an expected call of Get. +func (mr *MockGetterMockRecorder) Get(ctx, key, dst interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockGetter)(nil).Get), ctx, key, dst) +} + +// GetMulti mocks base method. +func (m *MockGetter) GetMulti(ctx context.Context, keys []*storage.Key, dst interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMulti", ctx, keys, dst) + ret0, _ := ret[0].(error) + return ret0 +} + +// GetMulti indicates an expected call of GetMulti. +func (mr *MockGetterMockRecorder) GetMulti(ctx, keys, dst interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMulti", reflect.TypeOf((*MockGetter)(nil).GetMulti), ctx, keys, dst) +} + +// MockPutter is a mock of Putter interface. +type MockPutter struct { + ctrl *gomock.Controller + recorder *MockPutterMockRecorder +} + +// MockPutterMockRecorder is the mock recorder for MockPutter. +type MockPutterMockRecorder struct { + mock *MockPutter +} + +// NewMockPutter creates a new mock instance. +func NewMockPutter(ctrl *gomock.Controller) *MockPutter { + mock := &MockPutter{ctrl: ctrl} + mock.recorder = &MockPutterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPutter) EXPECT() *MockPutterMockRecorder { + return m.recorder +} + +// Put mocks base method. +func (m *MockPutter) Put(ctx context.Context, key *storage.Key, src interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Put", ctx, key, src) + ret0, _ := ret[0].(error) + return ret0 +} + +// Put indicates an expected call of Put. +func (mr *MockPutterMockRecorder) Put(ctx, key, src interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockPutter)(nil).Put), ctx, key, src) +} + +// PutMulti mocks base method. +func (m *MockPutter) PutMulti(ctx context.Context, keys []*storage.Key, src interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutMulti", ctx, keys, src) + ret0, _ := ret[0].(error) + return ret0 +} + +// PutMulti indicates an expected call of PutMulti. +func (mr *MockPutterMockRecorder) PutMulti(ctx, keys, src interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutMulti", reflect.TypeOf((*MockPutter)(nil).PutMulti), ctx, keys, src) +} + +// MockGetPutter is a mock of GetPutter interface. +type MockGetPutter struct { + ctrl *gomock.Controller + recorder *MockGetPutterMockRecorder +} + +// MockGetPutterMockRecorder is the mock recorder for MockGetPutter. +type MockGetPutterMockRecorder struct { + mock *MockGetPutter +} + +// NewMockGetPutter creates a new mock instance. +func NewMockGetPutter(ctrl *gomock.Controller) *MockGetPutter { + mock := &MockGetPutter{ctrl: ctrl} + mock.recorder = &MockGetPutterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockGetPutter) EXPECT() *MockGetPutterMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockGetPutter) Get(ctx context.Context, key *storage.Key, dst interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, key, dst) + ret0, _ := ret[0].(error) + return ret0 +} + +// Get indicates an expected call of Get. +func (mr *MockGetPutterMockRecorder) Get(ctx, key, dst interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockGetPutter)(nil).Get), ctx, key, dst) +} + +// GetMulti mocks base method. +func (m *MockGetPutter) GetMulti(ctx context.Context, keys []*storage.Key, dst interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMulti", ctx, keys, dst) + ret0, _ := ret[0].(error) + return ret0 +} + +// GetMulti indicates an expected call of GetMulti. +func (mr *MockGetPutterMockRecorder) GetMulti(ctx, keys, dst interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMulti", reflect.TypeOf((*MockGetPutter)(nil).GetMulti), ctx, keys, dst) +} + +// Put mocks base method. +func (m *MockGetPutter) Put(ctx context.Context, key *storage.Key, src interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Put", ctx, key, src) + ret0, _ := ret[0].(error) + return ret0 +} + +// Put indicates an expected call of Put. +func (mr *MockGetPutterMockRecorder) Put(ctx, key, src interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockGetPutter)(nil).Put), ctx, key, src) +} + +// PutMulti mocks base method. +func (m *MockGetPutter) PutMulti(ctx context.Context, keys []*storage.Key, src interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutMulti", ctx, keys, src) + ret0, _ := ret[0].(error) + return ret0 +} + +// PutMulti indicates an expected call of PutMulti. +func (mr *MockGetPutterMockRecorder) PutMulti(ctx, keys, src interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutMulti", reflect.TypeOf((*MockGetPutter)(nil).PutMulti), ctx, keys, src) +} + +// MockQuerier is a mock of Querier interface. +type MockQuerier struct { + ctrl *gomock.Controller + recorder *MockQuerierMockRecorder +} + +// MockQuerierMockRecorder is the mock recorder for MockQuerier. +type MockQuerierMockRecorder struct { + mock *MockQuerier +} + +// NewMockQuerier creates a new mock instance. +func NewMockQuerier(ctrl *gomock.Controller) *MockQuerier { + mock := &MockQuerier{ctrl: ctrl} + mock.recorder = &MockQuerierMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockQuerier) EXPECT() *MockQuerierMockRecorder { + return m.recorder +} + +// RunQuery mocks base method. +func (m *MockQuerier) RunQuery(ctx context.Context, query storage.Query) (storage.Iterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RunQuery", ctx, query) + ret0, _ := ret[0].(storage.Iterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RunQuery indicates an expected call of RunQuery. +func (mr *MockQuerierMockRecorder) RunQuery(ctx, query interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunQuery", reflect.TypeOf((*MockQuerier)(nil).RunQuery), ctx, query) +} + +// MockTransaction is a mock of Transaction interface. +type MockTransaction struct { + ctrl *gomock.Controller + recorder *MockTransactionMockRecorder +} + +// MockTransactionMockRecorder is the mock recorder for MockTransaction. +type MockTransactionMockRecorder struct { + mock *MockTransaction +} + +// NewMockTransaction creates a new mock instance. +func NewMockTransaction(ctrl *gomock.Controller) *MockTransaction { + mock := &MockTransaction{ctrl: ctrl} + mock.recorder = &MockTransactionMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockTransaction) EXPECT() *MockTransactionMockRecorder { + return m.recorder +} + +// Delete mocks base method. +func (m *MockTransaction) Delete(ctx context.Context, key *storage.Key) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", ctx, key) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockTransactionMockRecorder) Delete(ctx, key interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockTransaction)(nil).Delete), ctx, key) +} + +// Get mocks base method. +func (m *MockTransaction) Get(ctx context.Context, key *storage.Key, dst interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, key, dst) + ret0, _ := ret[0].(error) + return ret0 +} + +// Get indicates an expected call of Get. +func (mr *MockTransactionMockRecorder) Get(ctx, key, dst interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockTransaction)(nil).Get), ctx, key, dst) +} + +// GetMulti mocks base method. +func (m *MockTransaction) GetMulti(ctx context.Context, keys []*storage.Key, dst interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMulti", ctx, keys, dst) + ret0, _ := ret[0].(error) + return ret0 +} + +// GetMulti indicates an expected call of GetMulti. +func (mr *MockTransactionMockRecorder) GetMulti(ctx, keys, dst interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMulti", reflect.TypeOf((*MockTransaction)(nil).GetMulti), ctx, keys, dst) +} + +// Put mocks base method. +func (m *MockTransaction) Put(ctx context.Context, key *storage.Key, src interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Put", ctx, key, src) + ret0, _ := ret[0].(error) + return ret0 +} + +// Put indicates an expected call of Put. +func (mr *MockTransactionMockRecorder) Put(ctx, key, src interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockTransaction)(nil).Put), ctx, key, src) +} + +// PutMulti mocks base method. +func (m *MockTransaction) PutMulti(ctx context.Context, keys []*storage.Key, src interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutMulti", ctx, keys, src) + ret0, _ := ret[0].(error) + return ret0 +} + +// PutMulti indicates an expected call of PutMulti. +func (mr *MockTransactionMockRecorder) PutMulti(ctx, keys, src interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutMulti", reflect.TypeOf((*MockTransaction)(nil).PutMulti), ctx, keys, src) +} + +// MockClient is a mock of Client interface. +type MockClient struct { + ctrl *gomock.Controller + recorder *MockClientMockRecorder +} + +// MockClientMockRecorder is the mock recorder for MockClient. +type MockClientMockRecorder struct { + mock *MockClient +} + +// NewMockClient creates a new mock instance. +func NewMockClient(ctrl *gomock.Controller) *MockClient { + mock := &MockClient{ctrl: ctrl} + mock.recorder = &MockClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClient) EXPECT() *MockClientMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockClient) Close() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Close") +} + +// Close indicates an expected call of Close. +func (mr *MockClientMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockClient)(nil).Close)) +} + +// Delete mocks base method. +func (m *MockClient) Delete(ctx context.Context, key *storage.Key) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", ctx, key) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockClientMockRecorder) Delete(ctx, key interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockClient)(nil).Delete), ctx, key) +} + +// Get mocks base method. +func (m *MockClient) Get(ctx context.Context, key *storage.Key, dst interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, key, dst) + ret0, _ := ret[0].(error) + return ret0 +} + +// Get indicates an expected call of Get. +func (mr *MockClientMockRecorder) Get(ctx, key, dst interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockClient)(nil).Get), ctx, key, dst) +} + +// GetMulti mocks base method. +func (m *MockClient) GetMulti(ctx context.Context, keys []*storage.Key, dst interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMulti", ctx, keys, dst) + ret0, _ := ret[0].(error) + return ret0 +} + +// GetMulti indicates an expected call of GetMulti. +func (mr *MockClientMockRecorder) GetMulti(ctx, keys, dst interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMulti", reflect.TypeOf((*MockClient)(nil).GetMulti), ctx, keys, dst) +} + +// Put mocks base method. +func (m *MockClient) Put(ctx context.Context, key *storage.Key, src interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Put", ctx, key, src) + ret0, _ := ret[0].(error) + return ret0 +} + +// Put indicates an expected call of Put. +func (mr *MockClientMockRecorder) Put(ctx, key, src interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockClient)(nil).Put), ctx, key, src) +} + +// PutMulti mocks base method. +func (m *MockClient) PutMulti(ctx context.Context, keys []*storage.Key, src interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutMulti", ctx, keys, src) + ret0, _ := ret[0].(error) + return ret0 +} + +// PutMulti indicates an expected call of PutMulti. +func (mr *MockClientMockRecorder) PutMulti(ctx, keys, src interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutMulti", reflect.TypeOf((*MockClient)(nil).PutMulti), ctx, keys, src) +} + +// RunInTransaction mocks base method. +func (m *MockClient) RunInTransaction(ctx context.Context, f func(storage.Transaction) error) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RunInTransaction", ctx, f) + ret0, _ := ret[0].(error) + return ret0 +} + +// RunInTransaction indicates an expected call of RunInTransaction. +func (mr *MockClientMockRecorder) RunInTransaction(ctx, f interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunInTransaction", reflect.TypeOf((*MockClient)(nil).RunInTransaction), ctx, f) +} + +// RunQuery mocks base method. +func (m *MockClient) RunQuery(ctx context.Context, query storage.Query) (storage.Iterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RunQuery", ctx, query) + ret0, _ := ret[0].(storage.Iterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RunQuery indicates an expected call of RunQuery. +func (mr *MockClientMockRecorder) RunQuery(ctx, query interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunQuery", reflect.TypeOf((*MockClient)(nil).RunQuery), ctx, query) +} + +// MockWriter is a mock of Writer interface. +type MockWriter struct { + ctrl *gomock.Controller + recorder *MockWriterMockRecorder +} + +// MockWriterMockRecorder is the mock recorder for MockWriter. +type MockWriterMockRecorder struct { + mock *MockWriter +} + +// NewMockWriter creates a new mock instance. +func NewMockWriter(ctrl *gomock.Controller) *MockWriter { + mock := &MockWriter{ctrl: ctrl} + mock.recorder = &MockWriterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockWriter) EXPECT() *MockWriterMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockWriter) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockWriterMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockWriter)(nil).Close)) +} + +// Write mocks base method. +func (m *MockWriter) Write(p []byte) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Write", p) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Write indicates an expected call of Write. +func (mr *MockWriterMockRecorder) Write(p interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockWriter)(nil).Write), p) +} + +// MockReader is a mock of Reader interface. +type MockReader struct { + ctrl *gomock.Controller + recorder *MockReaderMockRecorder +} + +// MockReaderMockRecorder is the mock recorder for MockReader. +type MockReaderMockRecorder struct { + mock *MockReader +} + +// NewMockReader creates a new mock instance. +func NewMockReader(ctrl *gomock.Controller) *MockReader { + mock := &MockReader{ctrl: ctrl} + mock.recorder = &MockReaderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockReader) EXPECT() *MockReaderMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockReader) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockReaderMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockReader)(nil).Close)) +} + +// Read mocks base method. +func (m *MockReader) Read(p []byte) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Read", p) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Read indicates an expected call of Read. +func (mr *MockReaderMockRecorder) Read(p interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Read", reflect.TypeOf((*MockReader)(nil).Read), p) +} + +// MockDeleter is a mock of Deleter interface. +type MockDeleter struct { + ctrl *gomock.Controller + recorder *MockDeleterMockRecorder +} + +// MockDeleterMockRecorder is the mock recorder for MockDeleter. +type MockDeleterMockRecorder struct { + mock *MockDeleter +} + +// NewMockDeleter creates a new mock instance. +func NewMockDeleter(ctrl *gomock.Controller) *MockDeleter { + mock := &MockDeleter{ctrl: ctrl} + mock.recorder = &MockDeleterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDeleter) EXPECT() *MockDeleterMockRecorder { + return m.recorder +} + +// Delete mocks base method. +func (m *MockDeleter) Delete(ctx context.Context, key *storage.Key) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", ctx, key) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockDeleterMockRecorder) Delete(ctx, key interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockDeleter)(nil).Delete), ctx, key) +} + +// MockObjectStorageClient is a mock of ObjectStorageClient interface. +type MockObjectStorageClient struct { + ctrl *gomock.Controller + recorder *MockObjectStorageClientMockRecorder +} + +// MockObjectStorageClientMockRecorder is the mock recorder for MockObjectStorageClient. +type MockObjectStorageClientMockRecorder struct { + mock *MockObjectStorageClient +} + +// NewMockObjectStorageClient creates a new mock instance. +func NewMockObjectStorageClient(ctrl *gomock.Controller) *MockObjectStorageClient { + mock := &MockObjectStorageClient{ctrl: ctrl} + mock.recorder = &MockObjectStorageClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockObjectStorageClient) EXPECT() *MockObjectStorageClientMockRecorder { + return m.recorder +} + +// Bucket mocks base method. +func (m *MockObjectStorageClient) Bucket(ctx context.Context, bucket string) (storage.Bucket, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Bucket", ctx, bucket) + ret0, _ := ret[0].(storage.Bucket) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Bucket indicates an expected call of Bucket. +func (mr *MockObjectStorageClientMockRecorder) Bucket(ctx, bucket interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bucket", reflect.TypeOf((*MockObjectStorageClient)(nil).Bucket), ctx, bucket) +} + +// Close mocks base method. +func (m *MockObjectStorageClient) Close() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Close") +} + +// Close indicates an expected call of Close. +func (mr *MockObjectStorageClientMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockObjectStorageClient)(nil).Close)) +} + +// MockBucket is a mock of Bucket interface. +type MockBucket struct { + ctrl *gomock.Controller + recorder *MockBucketMockRecorder +} + +// MockBucketMockRecorder is the mock recorder for MockBucket. +type MockBucketMockRecorder struct { + mock *MockBucket +} + +// NewMockBucket creates a new mock instance. +func NewMockBucket(ctrl *gomock.Controller) *MockBucket { + mock := &MockBucket{ctrl: ctrl} + mock.recorder = &MockBucketMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBucket) EXPECT() *MockBucketMockRecorder { + return m.recorder +} + +// Delete mocks base method. +func (m *MockBucket) Delete(ctx context.Context, key *storage.Key) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", ctx, key) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockBucketMockRecorder) Delete(ctx, key interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockBucket)(nil).Delete), ctx, key) +} + +// Reader mocks base method. +func (m *MockBucket) Reader(ctx context.Context, environmentNamespace, filename string) (storage.Reader, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Reader", ctx, environmentNamespace, filename) + ret0, _ := ret[0].(storage.Reader) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Reader indicates an expected call of Reader. +func (mr *MockBucketMockRecorder) Reader(ctx, environmentNamespace, filename interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reader", reflect.TypeOf((*MockBucket)(nil).Reader), ctx, environmentNamespace, filename) +} + +// Writer mocks base method. +func (m *MockBucket) Writer(ctx context.Context, environmentNamespace, filename string, CRC32C uint32) (storage.Writer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Writer", ctx, environmentNamespace, filename, CRC32C) + ret0, _ := ret[0].(storage.Writer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Writer indicates an expected call of Writer. +func (mr *MockBucketMockRecorder) Writer(ctx, environmentNamespace, filename, CRC32C interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Writer", reflect.TypeOf((*MockBucket)(nil).Writer), ctx, environmentNamespace, filename, CRC32C) +} + +// MockObject is a mock of Object interface. +type MockObject struct { + ctrl *gomock.Controller + recorder *MockObjectMockRecorder +} + +// MockObjectMockRecorder is the mock recorder for MockObject. +type MockObjectMockRecorder struct { + mock *MockObject +} + +// NewMockObject creates a new mock instance. +func NewMockObject(ctrl *gomock.Controller) *MockObject { + mock := &MockObject{ctrl: ctrl} + mock.recorder = &MockObjectMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockObject) EXPECT() *MockObjectMockRecorder { + return m.recorder +} + +// Delete mocks base method. +func (m *MockObject) Delete(ctx context.Context, key *storage.Key) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", ctx, key) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockObjectMockRecorder) Delete(ctx, key interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockObject)(nil).Delete), ctx, key) +} + +// Reader mocks base method. +func (m *MockObject) Reader(ctx context.Context, environmentNamespace, filename string) (storage.Reader, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Reader", ctx, environmentNamespace, filename) + ret0, _ := ret[0].(storage.Reader) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Reader indicates an expected call of Reader. +func (mr *MockObjectMockRecorder) Reader(ctx, environmentNamespace, filename interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reader", reflect.TypeOf((*MockObject)(nil).Reader), ctx, environmentNamespace, filename) +} + +// Writer mocks base method. +func (m *MockObject) Writer(ctx context.Context, environmentNamespace, filename string, CRC32C uint32) (storage.Writer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Writer", ctx, environmentNamespace, filename, CRC32C) + ret0, _ := ret[0].(storage.Writer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Writer indicates an expected call of Writer. +func (mr *MockObjectMockRecorder) Writer(ctx, environmentNamespace, filename, CRC32C interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Writer", reflect.TypeOf((*MockObject)(nil).Writer), ctx, environmentNamespace, filename, CRC32C) +} diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go new file mode 100644 index 000000000..b59a67e01 --- /dev/null +++ b/pkg/storage/storage.go @@ -0,0 +1,186 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package storage + +import ( + "context" + "errors" + "fmt" + "io" +) + +const ( + AdminEnvironmentNamespace = "" + + OrderDirectionAsc OrderDirection = 0 + OrderDirectionDesc OrderDirection = 1 + + // use 0 instead of -1 because it's used for cap to make slice in storage lister. + QueryUnlimited = 0 +) + +var ( + ErrConcurrentTransaction = errors.New("storage: concurrent transaction in progress") + ErrKeyAlreadyExists = errors.New("storage: key already exists") + ErrKeyNotFound = errors.New("storage: key not found") + ErrIteratorDone = errors.New("storage: iterator is done") + ErrInvalidCursor = errors.New("storage: cursor is invalid") + ErrBucketNotExist = errors.New("storage: bucket doesn't exist") + ErrObjectNotExist = errors.New("storage: object doesn't exist") + ErrEmptyName = errors.New("storage: name is empty") + ErrInvalidName = errors.New("storage: invalid name") +) + +// MultiError is returned by batch operations when there are errors with +// particular elements. Errors will be in a one-to-one correspondence with +// the input elements; successful elements will have a nil entry. +type MultiError []error + +func (m MultiError) Error() string { + s, n := "", 0 + for _, e := range m { + if e != nil { + if n == 0 { + s = e.Error() + } + n++ + } + } + switch n { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, n-1) +} + +type Key struct { + ID string + Kind string + // If it is empty string, query will be executed in admin namespace. + // If not, query will be executed in namespace for target environment. + EnvironmentNamespace string +} + +func NewKey(id, kind, environmentNamespace string) *Key { + return &Key{ID: id, Kind: kind, EnvironmentNamespace: environmentNamespace} +} + +type Iterator interface { + Next(dst interface{}) error + Cursor() (string, error) +} + +type Getter interface { + Get(ctx context.Context, key *Key, dst interface{}) error + GetMulti(ctx context.Context, keys []*Key, dst interface{}) error +} + +type Putter interface { + Put(ctx context.Context, key *Key, src interface{}) error + PutMulti(ctx context.Context, keys []*Key, src interface{}) error +} + +type GetPutter interface { + Getter + Putter +} + +type Query struct { + Kind string + Limit int + StartCursor string + Orders []*Order + Filters []*Filter + // If it is empty string, query will be executed in admin namespace. + // If not, query will be executed in namespace for target environment. + EnvironmentNamespace string +} + +type Filter struct { + Property string + Operator string + Value interface{} +} + +func NewFilter(property, operator string, value interface{}) *Filter { + return &Filter{ + Property: property, + Operator: operator, + Value: value, + } +} + +type OrderDirection int + +type Order struct { + Property string + Direction OrderDirection +} + +func NewOrder(property string, direction OrderDirection) *Order { + return &Order{ + Property: property, + Direction: direction, + } +} + +type Querier interface { + RunQuery(ctx context.Context, query Query) (Iterator, error) +} + +type Transaction interface { + GetPutter + Deleter +} + +type Client interface { + GetPutter + Deleter + Querier + RunInTransaction(ctx context.Context, f func(t Transaction) error) error + Close() +} + +type Writer interface { + io.WriteCloser +} + +type Reader interface { + io.ReadCloser +} + +type Deleter interface { + Delete(ctx context.Context, key *Key) error +} + +type ObjectStorageClient interface { + Bucket(ctx context.Context, bucket string) (Bucket, error) + Close() +} + +type Bucket interface { + Object +} + +type Object interface { + Writer(ctx context.Context, environmentNamespace, filename string, CRC32C uint32) (Writer, error) + Reader(ctx context.Context, environmentNamespace, filename string) (Reader, error) + Deleter +} diff --git a/pkg/storage/testing/BUILD.bazel b/pkg/storage/testing/BUILD.bazel new file mode 100644 index 000000000..5d0c88e7d --- /dev/null +++ b/pkg/storage/testing/BUILD.bazel @@ -0,0 +1,15 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "file.go", + "storage.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/storage/testing", + visibility = ["//visibility:public"], + deps = [ + "//pkg/storage:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + ], +) diff --git a/pkg/storage/testing/file.go b/pkg/storage/testing/file.go new file mode 100644 index 000000000..8c3028967 --- /dev/null +++ b/pkg/storage/testing/file.go @@ -0,0 +1,52 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testing + +import ( + "context" + + "github.com/bucketeer-io/bucketeer/pkg/storage" +) + +type inMemoryStorageBucket struct { +} + +func NewInMemoryStorageBucket() storage.Bucket { + return &inMemoryStorageBucket{} +} + +func (f *inMemoryStorageBucket) Delete(ctx context.Context, key *storage.Key) error { + // TODO + return nil +} + +func (f *inMemoryStorageBucket) Writer( + ctx context.Context, + environmentNamespace, + filename string, + CRC32C uint32, +) (storage.Writer, error) { + // TODO + return nil, nil +} + +func (f *inMemoryStorageBucket) Reader( + ctx context.Context, + environmentNamespace, + filename string, +) (storage.Reader, error) { + // TODO + return nil, nil +} diff --git a/pkg/storage/testing/storage.go b/pkg/storage/testing/storage.go new file mode 100644 index 000000000..a9669b119 --- /dev/null +++ b/pkg/storage/testing/storage.go @@ -0,0 +1,173 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testing + +import ( + "context" + "errors" + "fmt" + "reflect" + "sync" + + "github.com/golang/protobuf/proto" // nolint:staticcheck + + "github.com/bucketeer-io/bucketeer/pkg/storage" +) + +var ( + errSourceMustBeProtoMessage = errors.New("storage: source is not a proto message") + errMultiArgTypeInvalid = errors.New("storage: src has invalid type") + errDifferentLength = errors.New("storage: keys and src slices have different length") +) + +type iterator struct { +} + +func (i *iterator) Next(dst interface{}) error { + return storage.ErrIteratorDone +} + +func (i *iterator) Cursor() (string, error) { + return "", nil +} + +type inMemoryStorage struct { + data map[string]interface{} + mutex sync.Mutex +} + +func NewInMemoryStorage() storage.Client { + return &inMemoryStorage{ + data: make(map[string]interface{}), + } +} + +func (s *inMemoryStorage) Get(ctx context.Context, key *storage.Key, dst interface{}) error { + s.mutex.Lock() + defer s.mutex.Unlock() + if val, ok := s.data[s.key(key)]; ok { + err := proto.Unmarshal(val.([]byte), dst.(proto.Message)) + if err != nil { + return err + } + return nil + } + return storage.ErrKeyNotFound +} + +func (s *inMemoryStorage) GetMulti(ctx context.Context, keys []*storage.Key, dst interface{}) error { + values := reflect.ValueOf(dst) + if values.Kind() != reflect.Slice { + return errMultiArgTypeInvalid + } + if len(keys) != values.Len() { + return errDifferentLength + } + if len(keys) == 0 { + return nil + } + multiErr, any := make(storage.MultiError, len(keys)), false + for i, key := range keys { + if val, ok := s.data[s.key(key)]; ok { + if !values.Index(i).CanInterface() { + return errMultiArgTypeInvalid + } + msg, ok := values.Index(i).Interface().(proto.Message) + if !ok { + return errSourceMustBeProtoMessage + } + err := proto.Unmarshal(val.([]byte), msg) + if err != nil { + return err + } + } else { + multiErr[i], any = storage.ErrKeyNotFound, true + } + } + if any { + return multiErr + } + return nil +} + +func (s *inMemoryStorage) Put(ctx context.Context, key *storage.Key, src interface{}) error { + s.mutex.Lock() + defer s.mutex.Unlock() + msg, ok := src.(proto.Message) + if !ok { + return errSourceMustBeProtoMessage + } + buffer, err := proto.Marshal(msg) + if err != nil { + return err + } + s.data[s.key(key)] = buffer + return nil +} + +func (s *inMemoryStorage) PutMulti(ctx context.Context, keys []*storage.Key, src interface{}) error { + s.mutex.Lock() + defer s.mutex.Unlock() + values := reflect.ValueOf(src) + if values.Kind() != reflect.Slice { + return errMultiArgTypeInvalid + } + if len(keys) != values.Len() { + return errDifferentLength + } + if len(keys) == 0 { + return nil + } + for i, key := range keys { + if !values.Index(i).CanInterface() { + return errMultiArgTypeInvalid + } + msg, ok := values.Index(i).Interface().(proto.Message) + if !ok { + return errSourceMustBeProtoMessage + } + buffer, err := proto.Marshal(msg) + if err != nil { + return err + } + s.data[s.key(key)] = buffer + } + return nil +} + +func (s *inMemoryStorage) RunQuery(ctx context.Context, query storage.Query) (storage.Iterator, error) { + return &iterator{}, nil +} + +func (s *inMemoryStorage) RunInTransaction(ctx context.Context, f func(t storage.Transaction) error) error { + return f(s) +} + +func (s *inMemoryStorage) Delete(ctx context.Context, key *storage.Key) error { + s.mutex.Lock() + defer s.mutex.Unlock() + delete(s.data, s.key(key)) + return nil +} + +func (s *inMemoryStorage) Close() { +} + +func (s *inMemoryStorage) key(key *storage.Key) string { + if key.EnvironmentNamespace == storage.AdminEnvironmentNamespace { + return fmt.Sprintf("%s:%s", key.Kind, key.ID) + } + return fmt.Sprintf("%s:%s:%s", key.EnvironmentNamespace, key.Kind, key.ID) +} diff --git a/pkg/storage/v2/bigtable/BUILD.bazel b/pkg/storage/v2/bigtable/BUILD.bazel new file mode 100644 index 000000000..50d2cd81f --- /dev/null +++ b/pkg/storage/v2/bigtable/BUILD.bazel @@ -0,0 +1,33 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "client.go", + "metrics.go", + "request.go", + "rows.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/storage/v2/bigtable", + visibility = ["//visibility:public"], + deps = [ + "//pkg/metrics:go_default_library", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@com_google_cloud_go_bigtable//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "request_test.go", + "rows_test.go", + ], + embed = [":go_default_library"], + deps = [ + "@com_github_stretchr_testify//assert:go_default_library", + "@com_google_cloud_go_bigtable//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/storage/v2/bigtable/client.go b/pkg/storage/v2/bigtable/client.go new file mode 100644 index 000000000..0255bafc8 --- /dev/null +++ b/pkg/storage/v2/bigtable/client.go @@ -0,0 +1,248 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package bigtable + +import ( + "context" + "errors" + + "cloud.google.com/go/bigtable" + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +var ( + ErrKeyNotFound = errors.New("storage: key not found") + ErrColumnFamilyNotFound = errors.New("storage: column family not found") + ErrColumnNotFound = errors.New("storage: column not found") + ErrInternal = errors.New("storage: internal") + errFailedToWritePartialRows = errors.New("storage: failed to write partial rows") +) + +type Reader interface { + ReadRows(ctx context.Context, request *ReadRequest) (Rows, error) +} + +type Writer interface { + WriteRow(ctx context.Context, request *WriteRequest) error + WriteRows(ctx context.Context, request *WriteRequest) error +} + +type ReadWriter interface { + Reader + Writer +} + +type Client interface { + Reader + Writer + Close() error +} + +type client struct { + client *bigtable.Client + opts *options + logger *zap.Logger +} + +type options struct { + metrics metrics.Registerer + logger *zap.Logger +} + +type Option func(*options) + +func WithMetrics(r metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = r + } +} + +func WithLogger(logger *zap.Logger) Option { + return func(opts *options) { + opts.logger = logger + } +} + +func NewBigtableClient( + ctx context.Context, + projectID, instance string, + opts ...Option, +) (Client, error) { + dopts := &options{ + logger: zap.NewNop(), + } + for _, opt := range opts { + opt(dopts) + } + if dopts.metrics != nil { + registerMetrics(dopts.metrics) + } + logger := dopts.logger.Named("bigtable") + c, err := bigtable.NewClient(ctx, projectID, instance) + if err != nil { + logger.Error("Failed to create bigtable client", zap.Error(err)) + return nil, err + } + return &client{ + client: c, + opts: dopts, + logger: logger, + }, nil +} + +func (c *client) ReadRows(ctx context.Context, req *ReadRequest) (Rows, error) { + var err error + defer record()(operationReadRows, &err) + // Row set + var rowSet bigtable.RowSet + if req.RowSet.get() != nil { + rowSet = req.RowSet.get() + } else { + rowSet = bigtable.RowRange{} // Read all keys + } + var rs []bigtable.Row + if len(req.RowFilters) == 0 { + rs, err = c.readRows(ctx, req.TableName, rowSet) + } else { + rs, err = c.readRowsWithFilter(ctx, req.TableName, rowSet, req.RowFilters) + } + if err != nil { + c.logger.Error("Failed to read rows", zap.Error(err)) + return nil, ErrInternal + } + if len(rs) == 0 { + err = ErrKeyNotFound + return nil, ErrKeyNotFound + } + return &rows{ + rows: rs, + columnFamily: req.ColumnFamily, + logger: c.logger, + }, nil +} + +func (c *client) readRows( + ctx context.Context, + tableName string, + rowSet bigtable.RowSet, +) ([]bigtable.Row, error) { + tbl := c.client.Open(tableName) + var rs []bigtable.Row + err := tbl.ReadRows( + ctx, + rowSet, + func(row bigtable.Row) bool { + rs = append(rs, row) + return true + }, + ) + if err != nil { + return nil, err + } + return rs, nil +} + +func (c *client) readRowsWithFilter( + ctx context.Context, + tableName string, + rowSet bigtable.RowSet, + rowFilters []RowFilter, +) ([]bigtable.Row, error) { + // Read filters + rf := makeFilters(rowFilters) + tbl := c.client.Open(tableName) + var rs []bigtable.Row + err := tbl.ReadRows( + ctx, + rowSet, + func(row bigtable.Row) bool { + rs = append(rs, row) + return true + }, + bigtable.RowFilter(rf), + ) + if err != nil { + return nil, err + } + return rs, nil +} + +func (c *client) WriteRow(ctx context.Context, req *WriteRequest) error { + var err error + defer record()(operationWriteRow, &err) + mut := bigtable.NewMutation() + mut.Set(req.ColumnFamily, req.ColumnName, bigtable.Now(), req.Items[0].Value) + tbl := c.client.Open(req.TableName) + if err = tbl.Apply(ctx, req.Items[0].Key, mut); err != nil { + c.logger.Error("Failed to write row", zap.Error(err), zap.String("rowKey", req.Items[0].Key)) + return ErrInternal + } + return nil +} + +func (c *client) WriteRows(ctx context.Context, req *WriteRequest) error { + var err error + var errs []error + defer record()(operationWriteRows, &err) + muts := make([]*bigtable.Mutation, 0, len(req.Items)) + rowKeys := make([]string, 0, len(req.Items)) + for _, item := range req.Items { + mut := bigtable.NewMutation() + mut.Set(req.ColumnFamily, req.ColumnName, bigtable.Now(), item.Value) + muts = append(muts, mut) + rowKeys = append(rowKeys, item.Key) + } + tbl := c.client.Open(req.TableName) + errs, err = tbl.ApplyBulk(ctx, rowKeys, muts) + if err != nil { + c.logger.Error("Failed to write rows", + zap.Error(err), + zap.Strings("rowKeys", rowKeys)) + return ErrInternal + } + if errs != nil { + err = errFailedToWritePartialRows + c.logger.Error("Failed to write partial rows", + zap.Int("errs size", len(errs)), + zap.Errors("errs", errs), + zap.Strings("rowKeys", rowKeys)) + return ErrInternal + } + return nil +} + +func (c *client) Close() error { + var err error + defer record()(operationClose, &err) + if err = c.client.Close(); err != nil { + c.logger.Error("Failed to close bigtable client", zap.Error(err)) + return ErrInternal + } + return nil +} + +func makeFilters(filters []RowFilter) bigtable.Filter { + if len(filters) == 1 { + return filters[0].get() + } + chainFilters := make([]bigtable.Filter, 0, len(filters)) + for _, filter := range filters { + chainFilters = append(chainFilters, filter.get()) + } + return bigtable.ChainFilters(chainFilters...) +} diff --git a/pkg/storage/v2/bigtable/metrics.go b/pkg/storage/v2/bigtable/metrics.go new file mode 100644 index 000000000..94b7057eb --- /dev/null +++ b/pkg/storage/v2/bigtable/metrics.go @@ -0,0 +1,94 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigtable + +import ( + "context" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +const ( + operationReadItems = "ReadItems" + operationReadRows = "ReadRows" + operationWriteRow = "WriteRow" + operationWriteRows = "WriteRows" + operationClose = "Close" + + codeOK = "OK" + codeKeyNotFound = "KeyNotFound" + codeColumnFamilyNotFound = "ColumnFamilyNotFound" + codeColumnNotFound = "ColumnNotFound" + codeFailedToWritePartialRows = "FailedToWritePartialRows" + codeDeadlineExceeded = "DeadlineExceeded" + codeCanceled = "Canceled" + codeUnknown = "Unknown" +) + +var ( + handledCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "bigtable", + Name: "handled_total", + Help: "Total number of completed operations.", + }, []string{"operation", "code"}) + + handledHistogram = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "bucketeer", + Subsystem: "bigtable", + Name: "handling_seconds", + Help: "Histogram of operation response latency (seconds).", + Buckets: prometheus.DefBuckets, + }, []string{"operation", "code"}) +) + +func record() func(operation string, err *error) { + startTime := time.Now() + return func(operation string, err *error) { + var code string + switch *err { + case nil: + code = codeOK + case ErrKeyNotFound: + code = codeKeyNotFound + case ErrColumnFamilyNotFound: + code = codeColumnFamilyNotFound + case ErrColumnNotFound: + code = codeColumnNotFound + case errFailedToWritePartialRows: + code = codeFailedToWritePartialRows + case context.DeadlineExceeded: + code = codeDeadlineExceeded + case context.Canceled: + code = codeCanceled + default: + code = codeUnknown + } + handledCounter.WithLabelValues(operation, code).Inc() + handledHistogram.WithLabelValues(operation, code).Observe(time.Since(startTime).Seconds()) + } +} + +func registerMetrics(r metrics.Registerer) { + r.MustRegister( + handledCounter, + handledHistogram, + ) +} diff --git a/pkg/storage/v2/bigtable/mock/BUILD.bazel b/pkg/storage/v2/bigtable/mock/BUILD.bazel new file mode 100644 index 000000000..f57ce1f43 --- /dev/null +++ b/pkg/storage/v2/bigtable/mock/BUILD.bazel @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["client.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/storage/v2/bigtable/mock", + visibility = ["//visibility:public"], + deps = [ + "//pkg/storage/v2/bigtable:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + ], +) diff --git a/pkg/storage/v2/bigtable/mock/client.go b/pkg/storage/v2/bigtable/mock/client.go new file mode 100644 index 000000000..5338d4233 --- /dev/null +++ b/pkg/storage/v2/bigtable/mock/client.go @@ -0,0 +1,249 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: client.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + bigtable "github.com/bucketeer-io/bucketeer/pkg/storage/v2/bigtable" +) + +// MockReader is a mock of Reader interface. +type MockReader struct { + ctrl *gomock.Controller + recorder *MockReaderMockRecorder +} + +// MockReaderMockRecorder is the mock recorder for MockReader. +type MockReaderMockRecorder struct { + mock *MockReader +} + +// NewMockReader creates a new mock instance. +func NewMockReader(ctrl *gomock.Controller) *MockReader { + mock := &MockReader{ctrl: ctrl} + mock.recorder = &MockReaderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockReader) EXPECT() *MockReaderMockRecorder { + return m.recorder +} + +// ReadRows mocks base method. +func (m *MockReader) ReadRows(ctx context.Context, request *bigtable.ReadRequest) (bigtable.Rows, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReadRows", ctx, request) + ret0, _ := ret[0].(bigtable.Rows) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReadRows indicates an expected call of ReadRows. +func (mr *MockReaderMockRecorder) ReadRows(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadRows", reflect.TypeOf((*MockReader)(nil).ReadRows), ctx, request) +} + +// MockWriter is a mock of Writer interface. +type MockWriter struct { + ctrl *gomock.Controller + recorder *MockWriterMockRecorder +} + +// MockWriterMockRecorder is the mock recorder for MockWriter. +type MockWriterMockRecorder struct { + mock *MockWriter +} + +// NewMockWriter creates a new mock instance. +func NewMockWriter(ctrl *gomock.Controller) *MockWriter { + mock := &MockWriter{ctrl: ctrl} + mock.recorder = &MockWriterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockWriter) EXPECT() *MockWriterMockRecorder { + return m.recorder +} + +// WriteRow mocks base method. +func (m *MockWriter) WriteRow(ctx context.Context, request *bigtable.WriteRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteRow", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteRow indicates an expected call of WriteRow. +func (mr *MockWriterMockRecorder) WriteRow(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteRow", reflect.TypeOf((*MockWriter)(nil).WriteRow), ctx, request) +} + +// WriteRows mocks base method. +func (m *MockWriter) WriteRows(ctx context.Context, request *bigtable.WriteRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteRows", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteRows indicates an expected call of WriteRows. +func (mr *MockWriterMockRecorder) WriteRows(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteRows", reflect.TypeOf((*MockWriter)(nil).WriteRows), ctx, request) +} + +// MockReadWriter is a mock of ReadWriter interface. +type MockReadWriter struct { + ctrl *gomock.Controller + recorder *MockReadWriterMockRecorder +} + +// MockReadWriterMockRecorder is the mock recorder for MockReadWriter. +type MockReadWriterMockRecorder struct { + mock *MockReadWriter +} + +// NewMockReadWriter creates a new mock instance. +func NewMockReadWriter(ctrl *gomock.Controller) *MockReadWriter { + mock := &MockReadWriter{ctrl: ctrl} + mock.recorder = &MockReadWriterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockReadWriter) EXPECT() *MockReadWriterMockRecorder { + return m.recorder +} + +// ReadRows mocks base method. +func (m *MockReadWriter) ReadRows(ctx context.Context, request *bigtable.ReadRequest) (bigtable.Rows, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReadRows", ctx, request) + ret0, _ := ret[0].(bigtable.Rows) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReadRows indicates an expected call of ReadRows. +func (mr *MockReadWriterMockRecorder) ReadRows(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadRows", reflect.TypeOf((*MockReadWriter)(nil).ReadRows), ctx, request) +} + +// WriteRow mocks base method. +func (m *MockReadWriter) WriteRow(ctx context.Context, request *bigtable.WriteRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteRow", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteRow indicates an expected call of WriteRow. +func (mr *MockReadWriterMockRecorder) WriteRow(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteRow", reflect.TypeOf((*MockReadWriter)(nil).WriteRow), ctx, request) +} + +// WriteRows mocks base method. +func (m *MockReadWriter) WriteRows(ctx context.Context, request *bigtable.WriteRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteRows", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteRows indicates an expected call of WriteRows. +func (mr *MockReadWriterMockRecorder) WriteRows(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteRows", reflect.TypeOf((*MockReadWriter)(nil).WriteRows), ctx, request) +} + +// MockClient is a mock of Client interface. +type MockClient struct { + ctrl *gomock.Controller + recorder *MockClientMockRecorder +} + +// MockClientMockRecorder is the mock recorder for MockClient. +type MockClientMockRecorder struct { + mock *MockClient +} + +// NewMockClient creates a new mock instance. +func NewMockClient(ctrl *gomock.Controller) *MockClient { + mock := &MockClient{ctrl: ctrl} + mock.recorder = &MockClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClient) EXPECT() *MockClientMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockClient) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockClientMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockClient)(nil).Close)) +} + +// ReadRows mocks base method. +func (m *MockClient) ReadRows(ctx context.Context, request *bigtable.ReadRequest) (bigtable.Rows, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReadRows", ctx, request) + ret0, _ := ret[0].(bigtable.Rows) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReadRows indicates an expected call of ReadRows. +func (mr *MockClientMockRecorder) ReadRows(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadRows", reflect.TypeOf((*MockClient)(nil).ReadRows), ctx, request) +} + +// WriteRow mocks base method. +func (m *MockClient) WriteRow(ctx context.Context, request *bigtable.WriteRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteRow", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteRow indicates an expected call of WriteRow. +func (mr *MockClientMockRecorder) WriteRow(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteRow", reflect.TypeOf((*MockClient)(nil).WriteRow), ctx, request) +} + +// WriteRows mocks base method. +func (m *MockClient) WriteRows(ctx context.Context, request *bigtable.WriteRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteRows", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteRows indicates an expected call of WriteRows. +func (mr *MockClientMockRecorder) WriteRows(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteRows", reflect.TypeOf((*MockClient)(nil).WriteRows), ctx, request) +} diff --git a/pkg/storage/v2/bigtable/request.go b/pkg/storage/v2/bigtable/request.go new file mode 100644 index 000000000..2e193598a --- /dev/null +++ b/pkg/storage/v2/bigtable/request.go @@ -0,0 +1,102 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigtable + +import ( + "fmt" + + "cloud.google.com/go/bigtable" +) + +func NewKey(environmentNamespace, key string) string { + if environmentNamespace == "" { + return fmt.Sprintf("default#%s", key) + } + return fmt.Sprintf("%s#%s", environmentNamespace, key) +} + +// All fields are required +type ReadRequest struct { + TableName string + ColumnFamily string + RowSet RowSet + RowFilters []RowFilter // Optional +} + +// All fields are required +type WriteRequest struct { + TableName string + ColumnFamily string + ColumnName string + Items []*WriteItem +} + +type RowFilter interface { + get() bigtable.Filter +} + +// LatestNFilter returns a filter that matches the most recent N cells in each column. +type LatestNFilter int + +func (r LatestNFilter) get() bigtable.Filter { + return bigtable.LatestNFilter(int(r)) +} + +// ColumnFilter returns a filter that matches cells whose column name +type ColumnFilter string + +func (r ColumnFilter) get() bigtable.Filter { + return bigtable.ColumnFilter(string(r)) +} + +// RowSet is a set of rows to be read. +// It is satisfied by RowKey, RowList, RowPrefix, and RowPrefixRange. +type RowSet interface { + get() bigtable.RowSet +} + +// The row key. +type RowKey string + +func (r RowKey) get() bigtable.RowSet { + // SingleRow returns a RowSet for reading a single row. + return bigtable.SingleRow(string(r)) +} + +// RowList is a sequence of row keys. +type RowList []string + +func (r RowList) get() bigtable.RowSet { + keys := make(bigtable.RowList, 0, len(r)) + return append(keys, r...) +} + +// PrefixRange returns a RowRange consisting of all keys starting with the prefix. +type RowPrefix string + +func (r RowPrefix) get() bigtable.RowSet { + return bigtable.PrefixRange(string(r)) +} + +// A RowRange is a half-open interval (start, limit) encompassing +// all the rows with keys at least as large as Start, and less than Limit. +type RowPrefixRange struct { + Start string + Limit string +} + +func (r RowPrefixRange) get() bigtable.RowSet { + return bigtable.NewRange(r.Start, r.Limit) +} diff --git a/pkg/storage/v2/bigtable/request_test.go b/pkg/storage/v2/bigtable/request_test.go new file mode 100644 index 000000000..28b2c50d2 --- /dev/null +++ b/pkg/storage/v2/bigtable/request_test.go @@ -0,0 +1,46 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigtable + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewKey(t *testing.T) { + t.Parallel() + patterns := []struct { + desc string + key, environmentNamespace, columnFamily string + expected string + }{ + { + desc: "Valid without environmentNamespace", + key: "user-id#tag", + environmentNamespace: "", + expected: "default#user-id#tag", + }, + { + desc: "Valid with environmentNamespace", + key: "user-id#tag", + environmentNamespace: "environmentNamespace", + expected: "environmentNamespace#user-id#tag", + }, + } + for _, p := range patterns { + assert.Equal(t, p.expected, NewKey(p.environmentNamespace, p.key)) + } +} diff --git a/pkg/storage/v2/bigtable/rows.go b/pkg/storage/v2/bigtable/rows.go new file mode 100644 index 000000000..1a97fb1f9 --- /dev/null +++ b/pkg/storage/v2/bigtable/rows.go @@ -0,0 +1,87 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigtable + +import ( + "fmt" + + "cloud.google.com/go/bigtable" + "go.uber.org/zap" +) + +// A ReadItem is returned by ReadItems. A ReadItem contains data from a specific row and column. +type ReadItem struct { + RowKey, Column string + Timestamp int64 + Value []byte +} + +// A WriteItem contains the key and value to write an item to a specific row +type WriteItem struct { + Key string + Value []byte +} + +type Rows interface { + ReadItems(column string) ([]*ReadItem, error) +} + +type rows struct { + rows []bigtable.Row + columnFamily string + logger *zap.Logger +} + +func (r *rows) ReadItems(column string) (readItems []*ReadItem, err error) { + defer record()(operationReadItems, &err) + for _, row := range r.rows { + var items []*ReadItem + items, err = r.getColumnItems(row, column) + if err != nil { + r.logger.Error("Failed to read items by column", + zap.Error(err), + zap.String("columnFamily", r.columnFamily), + zap.String("column", column), + ) + return nil, err + } + readItems = append(readItems, items...) + } + return readItems, nil +} + +func (r *rows) getColumnItems(row bigtable.Row, column string) ([]*ReadItem, error) { + items, ok := row[r.columnFamily] + if !ok { + return nil, ErrColumnFamilyNotFound + } + var readItems []*ReadItem + col := fmt.Sprintf("%s:%s", r.columnFamily, column) + for _, item := range items { + if item.Column == col { + i := &ReadItem{ + RowKey: item.Row, + Column: item.Column, + Timestamp: item.Timestamp.Time().Unix(), + Value: item.Value, + } + readItems = append(readItems, i) + } + } + if len(readItems) == 0 { + return nil, ErrColumnNotFound + } + return readItems, nil +} diff --git a/pkg/storage/v2/bigtable/rows_test.go b/pkg/storage/v2/bigtable/rows_test.go new file mode 100644 index 000000000..2e910958e --- /dev/null +++ b/pkg/storage/v2/bigtable/rows_test.go @@ -0,0 +1,141 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigtable + +import ( + "testing" + + "cloud.google.com/go/bigtable" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" +) + +func TestItems(t *testing.T) { + t.Parallel() + patterns := []struct { + desc string + rows *rows + expected []*ReadItem + expectedError error + }{ + { + desc: "ErrColumnFamilyNotFound", + rows: &rows{ + rows: []bigtable.Row{ + map[string][]bigtable.ReadItem{"columnFamily-2": { + { + Row: "Row-1", + Column: "columnFamily:Column", + Timestamp: 0, + Value: []byte("Value-1"), + }, + }}, + }, + logger: zap.NewNop(), + }, + expected: nil, + expectedError: ErrColumnFamilyNotFound, + }, + { + desc: "Valid", + rows: &rows{ + rows: []bigtable.Row{ + map[string][]bigtable.ReadItem{"columnFamily": { + { + Row: "Row-1", + Column: "columnFamily:Column", + Timestamp: 0, + Value: []byte("Value-1"), + }, + }}, + map[string][]bigtable.ReadItem{"columnFamily": { + { + Row: "Row-2", + Column: "columnFamily:Column-1", + Timestamp: 0, + Value: []byte("Value-1"), + }, + { + Row: "Row-2", + Column: "columnFamily:Column", + Timestamp: 0, + Value: []byte("Value-2"), + }, + }}, + map[string][]bigtable.ReadItem{"columnFamily": { + { + Row: "Row-3", + Column: "columnFamily:Column-1", + Timestamp: 0, + Value: []byte("Value-1"), + }, + { + Row: "Row-3", + Column: "columnFamily:Column-2", + Timestamp: 0, + Value: []byte("Value-2"), + }, + { + Row: "Row-3", + Column: "columnFamily:Column", + Timestamp: 0, + Value: []byte("Value-4"), + }, + { + Row: "Row-3", + Column: "columnFamily:Column", + Timestamp: 0, + Value: []byte("Value-3"), + }, + }}, + }, + columnFamily: "columnFamily", + logger: zap.NewNop(), + }, + expected: []*ReadItem{ + { + RowKey: "Row-1", + Column: "columnFamily:Column", + Timestamp: 0, + Value: []byte("Value-1"), + }, + { + RowKey: "Row-2", + Column: "columnFamily:Column", + Timestamp: 0, + Value: []byte("Value-2"), + }, + { + RowKey: "Row-3", + Column: "columnFamily:Column", + Timestamp: 0, + Value: []byte("Value-4"), + }, + { + RowKey: "Row-3", + Column: "columnFamily:Column", + Timestamp: 0, + Value: []byte("Value-3"), + }, + }, + expectedError: nil, + }, + } + for _, p := range patterns { + items, err := p.rows.ReadItems("Column") + assert.Equal(t, p.expected, items) + assert.Equal(t, p.expectedError, err) + } +} diff --git a/pkg/storage/v2/mysql/BUILD.bazel b/pkg/storage/v2/mysql/BUILD.bazel new file mode 100644 index 000000000..a6838c4d2 --- /dev/null +++ b/pkg/storage/v2/mysql/BUILD.bazel @@ -0,0 +1,40 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "client.go", + "error.go", + "json.go", + "jsonpb.go", + "metrics.go", + "query.go", + "result.go", + "transaction.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql", + visibility = ["//visibility:public"], + deps = [ + "//pkg/metrics:go_default_library", + "@com_github_go_sql_driver_mysql//:go_default_library", + "@com_github_golang_protobuf//jsonpb:go_default_library_gen", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@com_github_vividcortex_mysqlerr//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "error_test.go", + "query_test.go", + ], + embed = [":go_default_library"], + deps = [ + "@com_github_go_sql_driver_mysql//:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_vividcortex_mysqlerr//:go_default_library", + ], +) diff --git a/pkg/storage/v2/mysql/client.go b/pkg/storage/v2/mysql/client.go new file mode 100644 index 000000000..d3576b9de --- /dev/null +++ b/pkg/storage/v2/mysql/client.go @@ -0,0 +1,191 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package mysql + +import ( + "context" + "database/sql" + "fmt" + "time" + + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +const dsnParams = "collation=utf8mb4_bin" + +type options struct { + connMaxLifetime time.Duration + maxOpenConns int + maxIdleConns int + logger *zap.Logger + metrics metrics.Registerer +} + +func defaultOptions() *options { + return &options{ + connMaxLifetime: 300 * time.Second, + maxOpenConns: 10, + maxIdleConns: 5, + logger: zap.NewNop(), + } +} + +type Option func(*options) + +func WithConnMaxLifetime(cml time.Duration) Option { + return func(opts *options) { + opts.connMaxLifetime = cml + } +} + +func WithMaxOpenConns(moc int) Option { + return func(opts *options) { + opts.maxOpenConns = moc + } +} + +func WithMaxIdleConns(mic int) Option { + return func(opts *options) { + opts.maxIdleConns = mic + } +} + +func WithLogger(logger *zap.Logger) Option { + return func(opts *options) { + opts.logger = logger + } +} + +func WithMetrics(r metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = r + } +} + +type Queryer interface { + QueryContext(ctx context.Context, query string, args ...interface{}) (Rows, error) + QueryRowContext(ctx context.Context, query string, args ...interface{}) Row +} + +type Execer interface { + ExecContext(ctx context.Context, query string, args ...interface{}) (Result, error) +} + +type QueryExecer interface { + Queryer + Execer +} + +type Client interface { + QueryExecer + Close() error + BeginTx(ctx context.Context) (Transaction, error) + RunInTransaction(ctx context.Context, tx Transaction, f func() error) error +} + +type client struct { + db *sql.DB + opts *options + logger *zap.Logger +} + +func NewClient( + ctx context.Context, + dbUser, dbPass, dbHost string, + dbPort int, + dbName string, + opts ...Option, +) (Client, error) { + dopts := defaultOptions() + for _, opt := range opts { + opt(dopts) + } + if dopts.metrics != nil { + registerMetrics(dopts.metrics) + } + logger := dopts.logger.Named("mysql") + dsn := fmt.Sprintf( + "%s:%s@tcp(%s:%d)/%s?%s", + dbUser, dbPass, dbHost, dbPort, dbName, dsnParams, + ) + db, err := sql.Open("mysql", dsn) + if err != nil { + logger.Error("Failed to open db", zap.Error(err)) + return nil, err + } + db.SetConnMaxLifetime(dopts.connMaxLifetime) + db.SetMaxOpenConns(dopts.maxOpenConns) + db.SetMaxIdleConns(dopts.maxIdleConns) + if err := db.PingContext(ctx); err != nil { + logger.Error("Failed to ping db", zap.Error(err)) + return nil, err + } + return &client{ + db: db, + opts: dopts, + logger: logger, + }, nil +} + +func (c *client) Close() error { + return c.db.Close() +} + +func (c *client) ExecContext(ctx context.Context, query string, args ...interface{}) (Result, error) { + var err error + defer record()(operationExec, &err) + sret, err := c.db.ExecContext(ctx, query, args...) + err = convertMySQLError(err) + return &result{sret}, err +} + +func (c *client) QueryContext(ctx context.Context, query string, args ...interface{}) (Rows, error) { + var err error + defer record()(operationQuery, &err) + srows, err := c.db.QueryContext(ctx, query, args...) + return &rows{srows}, err +} + +func (c *client) QueryRowContext(ctx context.Context, query string, args ...interface{}) Row { + var err error + defer record()(operationQueryRow, &err) + r := &row{c.db.QueryRowContext(ctx, query, args...)} + err = r.Err() + return r +} + +func (c *client) BeginTx(ctx context.Context) (Transaction, error) { + var err error + defer record()(operationBeginTx, &err) + stx, err := c.db.BeginTx(ctx, nil) + return &transaction{stx}, err +} + +func (c *client) RunInTransaction(ctx context.Context, tx Transaction, f func() error) error { + var err error + defer record()(operationRunInTransaction, &err) + defer func() { + if err != nil { + tx.Rollback() // nolint:errcheck + } + }() + if err = f(); err == nil { + err = tx.Commit() + } + return err +} diff --git a/pkg/storage/v2/mysql/error.go b/pkg/storage/v2/mysql/error.go new file mode 100644 index 000000000..e930e5457 --- /dev/null +++ b/pkg/storage/v2/mysql/error.go @@ -0,0 +1,43 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mysql + +import ( + "errors" + + "github.com/VividCortex/mysqlerr" + libmysql "github.com/go-sql-driver/mysql" +) + +var ( + ErrNoRows = errors.New("mysql: no rows") + ErrTxDone = errors.New("mysql: tx done") + + // errors converted from MySQLError + ErrDuplicateEntry = errors.New("mysql: duplicate entry") +) + +func convertMySQLError(err error) error { + if err == nil { + return nil + } + if mysqlErr, ok := err.(*libmysql.MySQLError); ok { + switch mysqlErr.Number { + case mysqlerr.ER_DUP_ENTRY: + return ErrDuplicateEntry + } + } + return err +} diff --git a/pkg/storage/v2/mysql/error_test.go b/pkg/storage/v2/mysql/error_test.go new file mode 100644 index 000000000..ca07e1fe8 --- /dev/null +++ b/pkg/storage/v2/mysql/error_test.go @@ -0,0 +1,51 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mysql + +import ( + "errors" + "testing" + + "github.com/VividCortex/mysqlerr" + libmysql "github.com/go-sql-driver/mysql" + "github.com/stretchr/testify/assert" +) + +func TestConvertMySQLError(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + input error + expected error + }{ + "nil": { + input: nil, + expected: nil, + }, + "mysql error: ErrDuplicateEntry": { + input: &libmysql.MySQLError{Number: mysqlerr.ER_DUP_ENTRY}, + expected: ErrDuplicateEntry, + }, + "non mysql error": { + input: errors.New("non mysql error"), + expected: errors.New("non mysql error"), + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + actual := convertMySQLError(p.input) + assert.Equal(t, p.expected, actual) + }) + } +} diff --git a/pkg/storage/v2/mysql/json.go b/pkg/storage/v2/mysql/json.go new file mode 100644 index 000000000..d508bf2c8 --- /dev/null +++ b/pkg/storage/v2/mysql/json.go @@ -0,0 +1,42 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mysql + +import ( + "database/sql/driver" + "encoding/json" + "errors" +) + +type JSONObject struct { + Val interface{} +} + +func (o JSONObject) Value() (driver.Value, error) { + return json.Marshal(o.Val) +} + +func (o *JSONObject) Scan(src interface{}) error { + var _src []byte + switch s := src.(type) { + case []byte: + _src = s + case nil: + return nil + default: + return errors.New("incompatible type for JSONObject") + } + return json.Unmarshal(_src, &o.Val) +} diff --git a/pkg/storage/v2/mysql/jsonpb.go b/pkg/storage/v2/mysql/jsonpb.go new file mode 100644 index 000000000..2714483b2 --- /dev/null +++ b/pkg/storage/v2/mysql/jsonpb.go @@ -0,0 +1,55 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mysql + +import ( + "bytes" + "database/sql/driver" + "errors" + + "github.com/golang/protobuf/jsonpb" // nolint:staticcheck + "github.com/golang/protobuf/proto" // nolint:staticcheck +) + +var ( + marshaler = jsonpb.Marshaler{OrigName: true} + unmarshaller = jsonpb.Unmarshaler{AllowUnknownFields: true} +) + +type JSONPBObject struct { + Val proto.Message +} + +func (o JSONPBObject) Value() (driver.Value, error) { + var buf bytes.Buffer + err := marshaler.Marshal(&buf, o.Val) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (o *JSONPBObject) Scan(src interface{}) error { + var _src []byte + switch s := src.(type) { + case []byte: + _src = s + case nil: + return nil + default: + return errors.New("incompatible type for JSONPBObject") + } + return unmarshaller.Unmarshal(bytes.NewReader(_src), o.Val) +} diff --git a/pkg/storage/v2/mysql/metrics.go b/pkg/storage/v2/mysql/metrics.go new file mode 100644 index 000000000..3c5920f4d --- /dev/null +++ b/pkg/storage/v2/mysql/metrics.go @@ -0,0 +1,86 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mysql + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +const ( + operationExec = "Exec" + operationQuery = "Query" + operationQueryRow = "QueryRow" + operationBeginTx = "BeginTx" + operationRunInTransaction = "RunInTransaction" + operationCommit = "Commit" + operationRollback = "Rollback" + + codeOK = "OK" + codeNoRows = "NoRows" + codeTxDone = "TxDone" + codeDuplicateEntry = "DuplicateEntry" + codeUnknown = "Unknown" +) + +var ( + handledCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "mysql", + Name: "handled_total", + Help: "Total number of completed operations.", + }, []string{"operation", "code"}) + + handledHistogram = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "bucketeer", + Subsystem: "mysql", + Name: "handling_seconds", + Help: "Histogram of operation response latency (seconds).", + Buckets: prometheus.DefBuckets, + }, []string{"operation", "code"}) +) + +func record() func(operation string, err *error) { + startTime := time.Now() + return func(operation string, err *error) { + var code string + switch *err { + case nil: + code = codeOK + case ErrNoRows: + code = codeNoRows + case ErrTxDone: + code = codeTxDone + case ErrDuplicateEntry: + code = codeDuplicateEntry + default: + code = codeUnknown + } + handledCounter.WithLabelValues(operation, code).Inc() + handledHistogram.WithLabelValues(operation, code).Observe(time.Since(startTime).Seconds()) + } +} + +func registerMetrics(r metrics.Registerer) { + r.MustRegister( + handledCounter, + handledHistogram, + ) +} diff --git a/pkg/storage/v2/mysql/mock/BUILD.bazel b/pkg/storage/v2/mysql/mock/BUILD.bazel new file mode 100644 index 000000000..07e118a3d --- /dev/null +++ b/pkg/storage/v2/mysql/mock/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "client.go", + "query.go", + "result.go", + "transaction.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock", + visibility = ["//visibility:public"], + deps = [ + "//pkg/storage/v2/mysql:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + ], +) diff --git a/pkg/storage/v2/mysql/mock/client.go b/pkg/storage/v2/mysql/mock/client.go new file mode 100644 index 000000000..8da76b032 --- /dev/null +++ b/pkg/storage/v2/mysql/mock/client.go @@ -0,0 +1,326 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: client.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + mysql "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" +) + +// MockQueryer is a mock of Queryer interface. +type MockQueryer struct { + ctrl *gomock.Controller + recorder *MockQueryerMockRecorder +} + +// MockQueryerMockRecorder is the mock recorder for MockQueryer. +type MockQueryerMockRecorder struct { + mock *MockQueryer +} + +// NewMockQueryer creates a new mock instance. +func NewMockQueryer(ctrl *gomock.Controller) *MockQueryer { + mock := &MockQueryer{ctrl: ctrl} + mock.recorder = &MockQueryerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockQueryer) EXPECT() *MockQueryerMockRecorder { + return m.recorder +} + +// QueryContext mocks base method. +func (m *MockQueryer) QueryContext(ctx context.Context, query string, args ...interface{}) (mysql.Rows, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, query} + for _, a := range args { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "QueryContext", varargs...) + ret0, _ := ret[0].(mysql.Rows) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// QueryContext indicates an expected call of QueryContext. +func (mr *MockQueryerMockRecorder) QueryContext(ctx, query interface{}, args ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, query}, args...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "QueryContext", reflect.TypeOf((*MockQueryer)(nil).QueryContext), varargs...) +} + +// QueryRowContext mocks base method. +func (m *MockQueryer) QueryRowContext(ctx context.Context, query string, args ...interface{}) mysql.Row { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, query} + for _, a := range args { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "QueryRowContext", varargs...) + ret0, _ := ret[0].(mysql.Row) + return ret0 +} + +// QueryRowContext indicates an expected call of QueryRowContext. +func (mr *MockQueryerMockRecorder) QueryRowContext(ctx, query interface{}, args ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, query}, args...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "QueryRowContext", reflect.TypeOf((*MockQueryer)(nil).QueryRowContext), varargs...) +} + +// MockExecer is a mock of Execer interface. +type MockExecer struct { + ctrl *gomock.Controller + recorder *MockExecerMockRecorder +} + +// MockExecerMockRecorder is the mock recorder for MockExecer. +type MockExecerMockRecorder struct { + mock *MockExecer +} + +// NewMockExecer creates a new mock instance. +func NewMockExecer(ctrl *gomock.Controller) *MockExecer { + mock := &MockExecer{ctrl: ctrl} + mock.recorder = &MockExecerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockExecer) EXPECT() *MockExecerMockRecorder { + return m.recorder +} + +// ExecContext mocks base method. +func (m *MockExecer) ExecContext(ctx context.Context, query string, args ...interface{}) (mysql.Result, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, query} + for _, a := range args { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ExecContext", varargs...) + ret0, _ := ret[0].(mysql.Result) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExecContext indicates an expected call of ExecContext. +func (mr *MockExecerMockRecorder) ExecContext(ctx, query interface{}, args ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, query}, args...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecContext", reflect.TypeOf((*MockExecer)(nil).ExecContext), varargs...) +} + +// MockQueryExecer is a mock of QueryExecer interface. +type MockQueryExecer struct { + ctrl *gomock.Controller + recorder *MockQueryExecerMockRecorder +} + +// MockQueryExecerMockRecorder is the mock recorder for MockQueryExecer. +type MockQueryExecerMockRecorder struct { + mock *MockQueryExecer +} + +// NewMockQueryExecer creates a new mock instance. +func NewMockQueryExecer(ctrl *gomock.Controller) *MockQueryExecer { + mock := &MockQueryExecer{ctrl: ctrl} + mock.recorder = &MockQueryExecerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockQueryExecer) EXPECT() *MockQueryExecerMockRecorder { + return m.recorder +} + +// ExecContext mocks base method. +func (m *MockQueryExecer) ExecContext(ctx context.Context, query string, args ...interface{}) (mysql.Result, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, query} + for _, a := range args { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ExecContext", varargs...) + ret0, _ := ret[0].(mysql.Result) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExecContext indicates an expected call of ExecContext. +func (mr *MockQueryExecerMockRecorder) ExecContext(ctx, query interface{}, args ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, query}, args...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecContext", reflect.TypeOf((*MockQueryExecer)(nil).ExecContext), varargs...) +} + +// QueryContext mocks base method. +func (m *MockQueryExecer) QueryContext(ctx context.Context, query string, args ...interface{}) (mysql.Rows, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, query} + for _, a := range args { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "QueryContext", varargs...) + ret0, _ := ret[0].(mysql.Rows) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// QueryContext indicates an expected call of QueryContext. +func (mr *MockQueryExecerMockRecorder) QueryContext(ctx, query interface{}, args ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, query}, args...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "QueryContext", reflect.TypeOf((*MockQueryExecer)(nil).QueryContext), varargs...) +} + +// QueryRowContext mocks base method. +func (m *MockQueryExecer) QueryRowContext(ctx context.Context, query string, args ...interface{}) mysql.Row { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, query} + for _, a := range args { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "QueryRowContext", varargs...) + ret0, _ := ret[0].(mysql.Row) + return ret0 +} + +// QueryRowContext indicates an expected call of QueryRowContext. +func (mr *MockQueryExecerMockRecorder) QueryRowContext(ctx, query interface{}, args ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, query}, args...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "QueryRowContext", reflect.TypeOf((*MockQueryExecer)(nil).QueryRowContext), varargs...) +} + +// MockClient is a mock of Client interface. +type MockClient struct { + ctrl *gomock.Controller + recorder *MockClientMockRecorder +} + +// MockClientMockRecorder is the mock recorder for MockClient. +type MockClientMockRecorder struct { + mock *MockClient +} + +// NewMockClient creates a new mock instance. +func NewMockClient(ctrl *gomock.Controller) *MockClient { + mock := &MockClient{ctrl: ctrl} + mock.recorder = &MockClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClient) EXPECT() *MockClientMockRecorder { + return m.recorder +} + +// BeginTx mocks base method. +func (m *MockClient) BeginTx(ctx context.Context) (mysql.Transaction, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BeginTx", ctx) + ret0, _ := ret[0].(mysql.Transaction) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BeginTx indicates an expected call of BeginTx. +func (mr *MockClientMockRecorder) BeginTx(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginTx", reflect.TypeOf((*MockClient)(nil).BeginTx), ctx) +} + +// Close mocks base method. +func (m *MockClient) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockClientMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockClient)(nil).Close)) +} + +// ExecContext mocks base method. +func (m *MockClient) ExecContext(ctx context.Context, query string, args ...interface{}) (mysql.Result, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, query} + for _, a := range args { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ExecContext", varargs...) + ret0, _ := ret[0].(mysql.Result) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExecContext indicates an expected call of ExecContext. +func (mr *MockClientMockRecorder) ExecContext(ctx, query interface{}, args ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, query}, args...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecContext", reflect.TypeOf((*MockClient)(nil).ExecContext), varargs...) +} + +// QueryContext mocks base method. +func (m *MockClient) QueryContext(ctx context.Context, query string, args ...interface{}) (mysql.Rows, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, query} + for _, a := range args { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "QueryContext", varargs...) + ret0, _ := ret[0].(mysql.Rows) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// QueryContext indicates an expected call of QueryContext. +func (mr *MockClientMockRecorder) QueryContext(ctx, query interface{}, args ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, query}, args...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "QueryContext", reflect.TypeOf((*MockClient)(nil).QueryContext), varargs...) +} + +// QueryRowContext mocks base method. +func (m *MockClient) QueryRowContext(ctx context.Context, query string, args ...interface{}) mysql.Row { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, query} + for _, a := range args { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "QueryRowContext", varargs...) + ret0, _ := ret[0].(mysql.Row) + return ret0 +} + +// QueryRowContext indicates an expected call of QueryRowContext. +func (mr *MockClientMockRecorder) QueryRowContext(ctx, query interface{}, args ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, query}, args...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "QueryRowContext", reflect.TypeOf((*MockClient)(nil).QueryRowContext), varargs...) +} + +// RunInTransaction mocks base method. +func (m *MockClient) RunInTransaction(ctx context.Context, tx mysql.Transaction, f func() error) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RunInTransaction", ctx, tx, f) + ret0, _ := ret[0].(error) + return ret0 +} + +// RunInTransaction indicates an expected call of RunInTransaction. +func (mr *MockClientMockRecorder) RunInTransaction(ctx, tx, f interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunInTransaction", reflect.TypeOf((*MockClient)(nil).RunInTransaction), ctx, tx, f) +} diff --git a/pkg/storage/v2/mysql/mock/query.go b/pkg/storage/v2/mysql/mock/query.go new file mode 100644 index 000000000..ffe1aafe6 --- /dev/null +++ b/pkg/storage/v2/mysql/mock/query.go @@ -0,0 +1,49 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: query.go + +// Package mock is a generated GoMock package. +package mock + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockWherePart is a mock of WherePart interface. +type MockWherePart struct { + ctrl *gomock.Controller + recorder *MockWherePartMockRecorder +} + +// MockWherePartMockRecorder is the mock recorder for MockWherePart. +type MockWherePartMockRecorder struct { + mock *MockWherePart +} + +// NewMockWherePart creates a new mock instance. +func NewMockWherePart(ctrl *gomock.Controller) *MockWherePart { + mock := &MockWherePart{ctrl: ctrl} + mock.recorder = &MockWherePartMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockWherePart) EXPECT() *MockWherePartMockRecorder { + return m.recorder +} + +// SQLString mocks base method. +func (m *MockWherePart) SQLString() (string, []interface{}) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SQLString") + ret0, _ := ret[0].(string) + ret1, _ := ret[1].([]interface{}) + return ret0, ret1 +} + +// SQLString indicates an expected call of SQLString. +func (mr *MockWherePartMockRecorder) SQLString() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SQLString", reflect.TypeOf((*MockWherePart)(nil).SQLString)) +} diff --git a/pkg/storage/v2/mysql/mock/result.go b/pkg/storage/v2/mysql/mock/result.go new file mode 100644 index 000000000..492e074fd --- /dev/null +++ b/pkg/storage/v2/mysql/mock/result.go @@ -0,0 +1,202 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: result.go + +// Package mock is a generated GoMock package. +package mock + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockResult is a mock of Result interface. +type MockResult struct { + ctrl *gomock.Controller + recorder *MockResultMockRecorder +} + +// MockResultMockRecorder is the mock recorder for MockResult. +type MockResultMockRecorder struct { + mock *MockResult +} + +// NewMockResult creates a new mock instance. +func NewMockResult(ctrl *gomock.Controller) *MockResult { + mock := &MockResult{ctrl: ctrl} + mock.recorder = &MockResultMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockResult) EXPECT() *MockResultMockRecorder { + return m.recorder +} + +// LastInsertId mocks base method. +func (m *MockResult) LastInsertId() (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LastInsertId") + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LastInsertId indicates an expected call of LastInsertId. +func (mr *MockResultMockRecorder) LastInsertId() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastInsertId", reflect.TypeOf((*MockResult)(nil).LastInsertId)) +} + +// RowsAffected mocks base method. +func (m *MockResult) RowsAffected() (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RowsAffected") + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RowsAffected indicates an expected call of RowsAffected. +func (mr *MockResultMockRecorder) RowsAffected() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RowsAffected", reflect.TypeOf((*MockResult)(nil).RowsAffected)) +} + +// MockRow is a mock of Row interface. +type MockRow struct { + ctrl *gomock.Controller + recorder *MockRowMockRecorder +} + +// MockRowMockRecorder is the mock recorder for MockRow. +type MockRowMockRecorder struct { + mock *MockRow +} + +// NewMockRow creates a new mock instance. +func NewMockRow(ctrl *gomock.Controller) *MockRow { + mock := &MockRow{ctrl: ctrl} + mock.recorder = &MockRowMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockRow) EXPECT() *MockRowMockRecorder { + return m.recorder +} + +// Err mocks base method. +func (m *MockRow) Err() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Err") + ret0, _ := ret[0].(error) + return ret0 +} + +// Err indicates an expected call of Err. +func (mr *MockRowMockRecorder) Err() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Err", reflect.TypeOf((*MockRow)(nil).Err)) +} + +// Scan mocks base method. +func (m *MockRow) Scan(dest ...interface{}) error { + m.ctrl.T.Helper() + varargs := []interface{}{} + for _, a := range dest { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Scan", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Scan indicates an expected call of Scan. +func (mr *MockRowMockRecorder) Scan(dest ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Scan", reflect.TypeOf((*MockRow)(nil).Scan), dest...) +} + +// MockRows is a mock of Rows interface. +type MockRows struct { + ctrl *gomock.Controller + recorder *MockRowsMockRecorder +} + +// MockRowsMockRecorder is the mock recorder for MockRows. +type MockRowsMockRecorder struct { + mock *MockRows +} + +// NewMockRows creates a new mock instance. +func NewMockRows(ctrl *gomock.Controller) *MockRows { + mock := &MockRows{ctrl: ctrl} + mock.recorder = &MockRowsMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockRows) EXPECT() *MockRowsMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockRows) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockRowsMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockRows)(nil).Close)) +} + +// Err mocks base method. +func (m *MockRows) Err() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Err") + ret0, _ := ret[0].(error) + return ret0 +} + +// Err indicates an expected call of Err. +func (mr *MockRowsMockRecorder) Err() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Err", reflect.TypeOf((*MockRows)(nil).Err)) +} + +// Next mocks base method. +func (m *MockRows) Next() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Next") + ret0, _ := ret[0].(bool) + return ret0 +} + +// Next indicates an expected call of Next. +func (mr *MockRowsMockRecorder) Next() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockRows)(nil).Next)) +} + +// Scan mocks base method. +func (m *MockRows) Scan(dest ...interface{}) error { + m.ctrl.T.Helper() + varargs := []interface{}{} + for _, a := range dest { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Scan", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Scan indicates an expected call of Scan. +func (mr *MockRowsMockRecorder) Scan(dest ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Scan", reflect.TypeOf((*MockRows)(nil).Scan), dest...) +} diff --git a/pkg/storage/v2/mysql/mock/transaction.go b/pkg/storage/v2/mysql/mock/transaction.go new file mode 100644 index 000000000..58cf9cc39 --- /dev/null +++ b/pkg/storage/v2/mysql/mock/transaction.go @@ -0,0 +1,124 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: transaction.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + mysql "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" +) + +// MockTransaction is a mock of Transaction interface. +type MockTransaction struct { + ctrl *gomock.Controller + recorder *MockTransactionMockRecorder +} + +// MockTransactionMockRecorder is the mock recorder for MockTransaction. +type MockTransactionMockRecorder struct { + mock *MockTransaction +} + +// NewMockTransaction creates a new mock instance. +func NewMockTransaction(ctrl *gomock.Controller) *MockTransaction { + mock := &MockTransaction{ctrl: ctrl} + mock.recorder = &MockTransactionMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockTransaction) EXPECT() *MockTransactionMockRecorder { + return m.recorder +} + +// Commit mocks base method. +func (m *MockTransaction) Commit() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Commit") + ret0, _ := ret[0].(error) + return ret0 +} + +// Commit indicates an expected call of Commit. +func (mr *MockTransactionMockRecorder) Commit() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Commit", reflect.TypeOf((*MockTransaction)(nil).Commit)) +} + +// ExecContext mocks base method. +func (m *MockTransaction) ExecContext(ctx context.Context, query string, args ...interface{}) (mysql.Result, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, query} + for _, a := range args { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ExecContext", varargs...) + ret0, _ := ret[0].(mysql.Result) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExecContext indicates an expected call of ExecContext. +func (mr *MockTransactionMockRecorder) ExecContext(ctx, query interface{}, args ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, query}, args...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecContext", reflect.TypeOf((*MockTransaction)(nil).ExecContext), varargs...) +} + +// QueryContext mocks base method. +func (m *MockTransaction) QueryContext(ctx context.Context, query string, args ...interface{}) (mysql.Rows, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, query} + for _, a := range args { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "QueryContext", varargs...) + ret0, _ := ret[0].(mysql.Rows) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// QueryContext indicates an expected call of QueryContext. +func (mr *MockTransactionMockRecorder) QueryContext(ctx, query interface{}, args ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, query}, args...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "QueryContext", reflect.TypeOf((*MockTransaction)(nil).QueryContext), varargs...) +} + +// QueryRowContext mocks base method. +func (m *MockTransaction) QueryRowContext(ctx context.Context, query string, args ...interface{}) mysql.Row { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, query} + for _, a := range args { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "QueryRowContext", varargs...) + ret0, _ := ret[0].(mysql.Row) + return ret0 +} + +// QueryRowContext indicates an expected call of QueryRowContext. +func (mr *MockTransactionMockRecorder) QueryRowContext(ctx, query interface{}, args ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, query}, args...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "QueryRowContext", reflect.TypeOf((*MockTransaction)(nil).QueryRowContext), varargs...) +} + +// Rollback mocks base method. +func (m *MockTransaction) Rollback() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Rollback") + ret0, _ := ret[0].(error) + return ret0 +} + +// Rollback indicates an expected call of Rollback. +func (mr *MockTransactionMockRecorder) Rollback() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Rollback", reflect.TypeOf((*MockTransaction)(nil).Rollback)) +} diff --git a/pkg/storage/v2/mysql/query.go b/pkg/storage/v2/mysql/query.go new file mode 100644 index 000000000..31e8e09f1 --- /dev/null +++ b/pkg/storage/v2/mysql/query.go @@ -0,0 +1,283 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package mysql + +import ( + "fmt" + "math" + "strings" +) + +const placeHolder = "?" + +type WherePart interface { + SQLString() (sql string, args []interface{}) +} + +type Filter struct { + Column string + Operator string + Value interface{} +} + +func NewFilter(column, operator string, value interface{}) WherePart { + return &Filter{ + Column: column, + Operator: operator, + Value: value, + } +} + +func (f *Filter) SQLString() (sql string, args []interface{}) { + if f.Column == "" || f.Operator == "" { + return "", nil + } + sql = fmt.Sprintf("%s %s %s", f.Column, f.Operator, placeHolder) + args = append(args, f.Value) + return +} + +type InFilter struct { + Column string + Values []interface{} +} + +func NewInFilter(column string, values []interface{}) WherePart { + return &InFilter{ + Column: column, + Values: values, + } +} + +func (f *InFilter) SQLString() (sql string, args []interface{}) { + if f.Column == "" { + return "", nil + } + var sb strings.Builder + sb.WriteString(fmt.Sprintf("%s IN (", f.Column)) + for i := range f.Values { + if i != 0 { + sb.WriteString(", ") + } + sb.WriteString("?") + } + sb.WriteString(")") + sql = sb.String() + args = f.Values + return +} + +type NullFilter struct { + Column string + IsNull bool +} + +func NewNullFilter(column string, isNull bool) WherePart { + return &NullFilter{ + Column: column, + IsNull: isNull, + } +} + +func (f *NullFilter) SQLString() (sql string, args []interface{}) { + if f.Column == "" { + return "", nil + } + var sb strings.Builder + if f.IsNull { + sb.WriteString(fmt.Sprintf("%s IS NULL", f.Column)) + } else { + sb.WriteString(fmt.Sprintf("%s IS NOT NULL", f.Column)) + } + sql = sb.String() + return +} + +type JSONFilterFunc int + +const ( + _ JSONFilterFunc = iota + JSONContainsNumber + JSONContainsString +) + +type JSONFilter struct { + Column string + Func JSONFilterFunc + Values []interface{} +} + +func NewJSONFilter(column string, f JSONFilterFunc, values []interface{}) WherePart { + return &JSONFilter{ + Column: column, + Func: f, + Values: values, + } +} + +func (f *JSONFilter) SQLString() (sql string, args []interface{}) { + if f.Column == "" { + return "", nil + } + switch f.Func { + case JSONContainsNumber: + sql = fmt.Sprintf("JSON_CONTAINS(%s, ?)", f.Column) + var sb strings.Builder + sb.WriteString("[") + for i, v := range f.Values { + if i != 0 { + sb.WriteString(", ") + } + sb.WriteString(fmt.Sprint(v)) + } + sb.WriteString("]") + args = append(args, sb.String()) + return + case JSONContainsString: + sql = fmt.Sprintf("JSON_CONTAINS(%s, ?)", f.Column) + var sb strings.Builder + sb.WriteString("[") + for i, v := range f.Values { + if i != 0 { + sb.WriteString(", ") + } + sb.WriteString(fmt.Sprintf(`"%s"`, v)) + } + sb.WriteString("]") + args = append(args, sb.String()) + return + default: + return "", nil + } +} + +type SearchQuery struct { + Columns []string + Keyword string +} + +func NewSearchQuery(columns []string, keyword string) WherePart { + return &SearchQuery{ + Columns: columns, + Keyword: keyword, + } +} + +func (q *SearchQuery) SQLString() (sql string, args []interface{}) { + if len(q.Columns) == 0 { + return "", nil + } + var sb strings.Builder + sb.WriteString("(") + for i, col := range q.Columns { + if i != 0 { + sb.WriteString(" OR ") + } + sb.WriteString(fmt.Sprintf("%s LIKE ?", col)) + args = append(args, "%"+q.Keyword+"%") + } + sb.WriteString(")") + sql = sb.String() + return +} + +func ConstructWhereSQLString(wps []WherePart) (sql string, args []interface{}) { + if len(wps) == 0 { + return "", nil + } + var sb strings.Builder + sb.WriteString("WHERE ") + for i, wp := range wps { + if i != 0 { + sb.WriteString(" AND ") + } + wpSQL, wpArgs := wp.SQLString() + sb.WriteString(wpSQL) + args = append(args, wpArgs...) + } + sql = sb.String() + return +} + +type OrderDirection int + +const ( + // default asc + OrderDirectionAsc OrderDirection = iota + OrderDirectionDesc +) + +func (o OrderDirection) String() string { + switch o { + case OrderDirectionAsc: + return "ASC" + case OrderDirectionDesc: + return "DESC" + default: + return "" + } +} + +type Order struct { + Column string + Direction OrderDirection +} + +func NewOrder(column string, direction OrderDirection) *Order { + return &Order{ + Column: column, + Direction: direction, + } +} + +func ConstructOrderBySQLString(orders []*Order) string { + if len(orders) == 0 { + return "" + } + var sb strings.Builder + sb.WriteString("ORDER BY ") + for i, o := range orders { + if i != 0 { + sb.WriteString(", ") + } + sb.WriteString(o.Column) + sb.WriteString(" ") + sb.WriteString(o.Direction.String()) + } + return sb.String() +} + +const ( + QueryNoLimit = 0 + QueryNoOffset = 0 + + // Workaround for MySQL not support offset without limit + // ref: https://dev.mysql.com/doc/refman/8.0/en/select.html + queryLimitAllRows = math.MaxInt64 +) + +func ConstructLimitOffsetSQLString(limit, offset int) string { + if limit == QueryNoLimit && offset == QueryNoOffset { + return "" + } + if limit == QueryNoLimit && offset != QueryNoOffset { + return fmt.Sprintf("LIMIT %d OFFSET %d", queryLimitAllRows, offset) + } + if limit != QueryNoLimit && offset == QueryNoOffset { + return fmt.Sprintf("LIMIT %d", limit) + } + return fmt.Sprintf("LIMIT %d OFFSET %d", limit, offset) +} diff --git a/pkg/storage/v2/mysql/query_test.go b/pkg/storage/v2/mysql/query_test.go new file mode 100644 index 000000000..6371a73c4 --- /dev/null +++ b/pkg/storage/v2/mysql/query_test.go @@ -0,0 +1,282 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mysql + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestFilterSQLString(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + input *Filter + expectedSQL string + expectedArgs []interface{} + }{ + "Empty": { + input: &Filter{}, + expectedSQL: "", + expectedArgs: nil, + }, + "Success": { + input: &Filter{ + Column: "name", + Operator: "=", + Value: "feature", + }, + expectedSQL: "name = ?", + expectedArgs: []interface{}{"feature"}, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + sql, args := p.input.SQLString() + assert.Equal(t, p.expectedSQL, sql) + assert.Equal(t, p.expectedArgs, args) + }) + } +} + +func TestInFilterSQLString(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + input *InFilter + expectedSQL string + expectedArgs []interface{} + }{ + "Empty": { + input: &InFilter{}, + expectedSQL: "", + expectedArgs: nil, + }, + "Success": { + input: &InFilter{ + Column: "name", + Values: []interface{}{"v1", "v2"}, + }, + expectedSQL: "name IN (?, ?)", + expectedArgs: []interface{}{"v1", "v2"}, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + sql, args := p.input.SQLString() + assert.Equal(t, p.expectedSQL, sql) + assert.Equal(t, p.expectedArgs, args) + }) + } +} + +func TestNullFilterSQLString(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + input *NullFilter + expectedSQL string + expectedArgs []interface{} + }{ + "Empty": { + input: &NullFilter{}, + expectedSQL: "", + expectedArgs: nil, + }, + "Success: null": { + input: &NullFilter{ + Column: "name", + IsNull: true, + }, + expectedSQL: "name IS NULL", + expectedArgs: nil, + }, + "Success: not null": { + input: &NullFilter{ + Column: "name", + IsNull: false, + }, + expectedSQL: "name IS NOT NULL", + expectedArgs: nil, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + sql, args := p.input.SQLString() + assert.Equal(t, p.expectedSQL, sql) + assert.Equal(t, p.expectedArgs, args) + }) + } +} + +func TestJSONFilterSQLString(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + input *JSONFilter + expectedSQL string + expectedArgs []interface{} + }{ + "Empty": { + input: &JSONFilter{}, + expectedSQL: "", + expectedArgs: nil, + }, + "Success: JSONContainsNumber": { + input: &JSONFilter{ + Column: "enums", + Func: JSONContainsNumber, + Values: []interface{}{1, 3}, + }, + expectedSQL: "JSON_CONTAINS(enums, ?)", + expectedArgs: []interface{}{"[1, 3]"}, + }, + "Success: JSONContainsString": { + input: &JSONFilter{ + Column: "enums", + Func: JSONContainsString, + Values: []interface{}{"abc", "xyz"}, + }, + expectedSQL: "JSON_CONTAINS(enums, ?)", + expectedArgs: []interface{}{`["abc", "xyz"]`}, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + sql, args := p.input.SQLString() + assert.Equal(t, p.expectedSQL, sql) + assert.Equal(t, p.expectedArgs, args) + }) + } +} + +func TestSearchQuerySQLString(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + input *SearchQuery + expectedSQL string + expectedArgs []interface{} + }{ + "Empty": { + input: &SearchQuery{}, + expectedSQL: "", + expectedArgs: nil, + }, + "Success": { + input: &SearchQuery{ + Columns: []string{"id", "name"}, + Keyword: "test", + }, + expectedSQL: "(id LIKE ? OR name LIKE ?)", + expectedArgs: []interface{}{"%test%", "%test%"}, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + sql, args := p.input.SQLString() + assert.Equal(t, p.expectedSQL, sql) + assert.Equal(t, p.expectedArgs, args) + }) + } +} + +func TestConstructWhereSQLString(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + input []WherePart + expectedSQL string + expectedArgs []interface{} + }{ + "Empty": { + input: nil, + expectedSQL: "", + expectedArgs: nil, + }, + "Success": { + input: []WherePart{ + NewFilter("name", "=", "feature"), + NewJSONFilter("enums", JSONContainsNumber, []interface{}{1, 3}), + }, + expectedSQL: "WHERE name = ? AND JSON_CONTAINS(enums, ?)", + expectedArgs: []interface{}{"feature", "[1, 3]"}, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + sql, args := ConstructWhereSQLString(p.input) + assert.Equal(t, p.expectedSQL, sql) + assert.Equal(t, p.expectedArgs, args) + }) + } +} + +func TestConstructOrderBySQLString(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + input []*Order + expectedSQL string + }{ + "Empty": { + input: nil, + expectedSQL: "", + }, + "Success": { + input: []*Order{ + NewOrder("created_at", OrderDirectionDesc), + NewOrder("id", OrderDirectionAsc), + }, + expectedSQL: "ORDER BY created_at DESC, id ASC", + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + sql := ConstructOrderBySQLString(p.input) + assert.Equal(t, p.expectedSQL, sql) + }) + } +} + +func TestConstructLimitOffsetSQLString(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + limit int + offset int + expectedSQL string + }{ + "no limit & no offset": { + limit: 0, + offset: 0, + expectedSQL: "", + }, + "no limit & offset": { + limit: 0, + offset: 5, + expectedSQL: "LIMIT 9223372036854775807 OFFSET 5", + }, + "limit & no offset": { + limit: 10, + offset: 0, + expectedSQL: "LIMIT 10", + }, + "limit & offset": { + limit: 10, + offset: 5, + expectedSQL: "LIMIT 10 OFFSET 5", + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + sql := ConstructLimitOffsetSQLString(p.limit, p.offset) + assert.Equal(t, p.expectedSQL, sql) + }) + } +} diff --git a/pkg/storage/v2/mysql/result.go b/pkg/storage/v2/mysql/result.go new file mode 100644 index 000000000..581f4d7c7 --- /dev/null +++ b/pkg/storage/v2/mysql/result.go @@ -0,0 +1,63 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package mysql + +import "database/sql" + +type Result interface { + LastInsertId() (int64, error) + RowsAffected() (int64, error) +} + +type result struct { + sql.Result +} + +type Row interface { + Err() error + Scan(dest ...interface{}) error +} + +type row struct { + srow *sql.Row +} + +func (r *row) Err() error { + err := r.srow.Err() + if err == sql.ErrNoRows { + return ErrNoRows + } + return err +} + +func (r *row) Scan(dest ...interface{}) error { + err := r.srow.Scan(dest...) + if err == sql.ErrNoRows { + return ErrNoRows + } + return err +} + +type Rows interface { + Close() error + Err() error + Next() bool + Scan(dest ...interface{}) error +} + +type rows struct { + *sql.Rows +} diff --git a/pkg/storage/v2/mysql/transaction.go b/pkg/storage/v2/mysql/transaction.go new file mode 100644 index 000000000..ac8d69f4f --- /dev/null +++ b/pkg/storage/v2/mysql/transaction.go @@ -0,0 +1,75 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// nolint:lll +//go:generate mockgen -source=$GOFILE -aux_files=github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql=client.go -package=mock -destination=./mock/$GOFILE +package mysql + +import ( + "context" + "database/sql" +) + +type Transaction interface { + QueryExecer + Commit() error + Rollback() error +} + +type transaction struct { + stx *sql.Tx +} + +func (tx *transaction) ExecContext(ctx context.Context, query string, args ...interface{}) (Result, error) { + var err error + defer record()(operationExec, &err) + sret, err := tx.stx.ExecContext(ctx, query, args...) + err = convertMySQLError(err) + return &result{sret}, err +} + +func (tx *transaction) QueryContext(ctx context.Context, query string, args ...interface{}) (Rows, error) { + var err error + defer record()(operationQuery, &err) + srows, err := tx.stx.QueryContext(ctx, query, args...) + return &rows{srows}, err +} + +func (tx *transaction) QueryRowContext(ctx context.Context, query string, args ...interface{}) Row { + var err error + defer record()(operationQueryRow, &err) + r := &row{tx.stx.QueryRowContext(ctx, query, args...)} + err = r.Err() + return r +} + +func (tx *transaction) Commit() error { + var err error + defer record()(operationCommit, &err) + err = tx.stx.Commit() + if err == sql.ErrTxDone { + err = ErrTxDone + } + return err +} + +func (tx *transaction) Rollback() error { + var err error + defer record()(operationRollback, &err) + err = tx.stx.Rollback() + if err == sql.ErrTxDone { + err = ErrTxDone + } + return err +} diff --git a/pkg/storage/v2/postgres/BUILD.bazel b/pkg/storage/v2/postgres/BUILD.bazel new file mode 100644 index 000000000..2f4d3634b --- /dev/null +++ b/pkg/storage/v2/postgres/BUILD.bazel @@ -0,0 +1,28 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "client.go", + "error.go", + "json.go", + "result.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/storage/v2/postgres", + visibility = ["//visibility:public"], + deps = [ + "@com_github_lib_pq//:go_default_library", + "@com_google_cloud_go_alloydbconn//driver/pgxv4:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["error_test.go"], + embed = [":go_default_library"], + deps = [ + "@com_github_lib_pq//:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + ], +) diff --git a/pkg/storage/v2/postgres/client.go b/pkg/storage/v2/postgres/client.go new file mode 100644 index 000000000..ac063664a --- /dev/null +++ b/pkg/storage/v2/postgres/client.go @@ -0,0 +1,141 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package postgres + +import ( + "context" + "database/sql" + "fmt" + "log" + "time" + + "cloud.google.com/go/alloydbconn/driver/pgxv4" + "go.uber.org/zap" +) + +const ( + alloydb = "alloydb" +) + +type options struct { + connMaxLifetime time.Duration + maxOpenConns int + maxIdleConns int + logger *zap.Logger +} + +func defaultOptions() *options { + return &options{ + connMaxLifetime: 300 * time.Second, + maxOpenConns: 10, + maxIdleConns: 5, + logger: zap.NewNop(), + } +} + +type Execer interface { + ExecContext(ctx context.Context, query string, args ...interface{}) (Result, error) +} + +type Client interface { + Execer + Close() error +} + +type client struct { + db *sql.DB + opts *options + logger *zap.Logger +} + +type Option func(*options) + +func WithConnMaxLifetime(cml time.Duration) Option { + return func(opts *options) { + opts.connMaxLifetime = cml + } +} + +func WithMaxOpenConns(moc int) Option { + return func(opts *options) { + opts.maxOpenConns = moc + } +} + +func WithMaxIdleConns(mic int) Option { + return func(opts *options) { + opts.maxIdleConns = mic + } +} + +func WithLogger(logger *zap.Logger) Option { + return func(opts *options) { + opts.logger = logger + } +} + +func NewClient( + ctx context.Context, + project, region, cluster, instance string, + dbUser, dbPass, dbName string, + opts ...Option, +) (Client, error) { + dopts := defaultOptions() + for _, opt := range opts { + opt(dopts) + } + logger := dopts.logger.Named("postgres") + cleanup, err := pgxv4.RegisterDriver(alloydb) + if err != nil { + return nil, err + } + defer func() { + if err := cleanup(); err != nil { + log.Fatal(err) + } + }() + dsn := fmt.Sprintf( + "host=projects/%s/locations/%s/clusters/%s/instances/%s user=%s password=%s dbname=%s sslmode=disable", + project, region, cluster, instance, dbUser, dbPass, dbName, + ) + db, err := sql.Open(alloydb, dsn) + if err != nil { + logger.Error("Failed to open db", zap.Error(err)) + return nil, err + } + db.SetConnMaxLifetime(dopts.connMaxLifetime) + db.SetMaxOpenConns(dopts.maxOpenConns) + db.SetMaxIdleConns(dopts.maxIdleConns) + if err := db.PingContext(ctx); err != nil { + logger.Error("Failed to ping db", zap.Error(err)) + return nil, err + } + return &client{ + db: db, + opts: dopts, + logger: logger, + }, nil +} + +func (c *client) ExecContext(ctx context.Context, query string, args ...interface{}) (Result, error) { + var err error + sret, err := c.db.ExecContext(ctx, query, args...) + err = convertPostgresError(err) + return &result{sret}, err +} + +func (c *client) Close() error { + return c.db.Close() +} diff --git a/pkg/storage/v2/postgres/error.go b/pkg/storage/v2/postgres/error.go new file mode 100644 index 000000000..4ff5135e0 --- /dev/null +++ b/pkg/storage/v2/postgres/error.go @@ -0,0 +1,40 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package postgres + +import ( + "errors" + + "github.com/lib/pq" +) + +var ( + ErrDuplicateEntry = errors.New("postgres: duplicate entry") +) + +const uniqueViolation pq.ErrorCode = "23505" + +func convertPostgresError(err error) error { + if err == nil { + return nil + } + if postgresErr, ok := err.(*pq.Error); ok { + switch postgresErr.Code { + case uniqueViolation: + return ErrDuplicateEntry + } + } + return err +} diff --git a/pkg/storage/v2/postgres/error_test.go b/pkg/storage/v2/postgres/error_test.go new file mode 100644 index 000000000..e14c47d86 --- /dev/null +++ b/pkg/storage/v2/postgres/error_test.go @@ -0,0 +1,50 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package postgres + +import ( + "errors" + "testing" + + "github.com/lib/pq" + "github.com/stretchr/testify/assert" +) + +func TestConvertPostgresError(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + input error + expected error + }{ + "nil": { + input: nil, + expected: nil, + }, + "mysql error: ErrDuplicateEntry": { + input: &pq.Error{Code: uniqueViolation}, + expected: ErrDuplicateEntry, + }, + "non mysql error": { + input: errors.New("non postgres error"), + expected: errors.New("non postgres error"), + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + actual := convertPostgresError(p.input) + assert.Equal(t, p.expected, actual) + }) + } +} diff --git a/pkg/storage/v2/postgres/json.go b/pkg/storage/v2/postgres/json.go new file mode 100644 index 000000000..ce6165969 --- /dev/null +++ b/pkg/storage/v2/postgres/json.go @@ -0,0 +1,42 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package postgres + +import ( + "database/sql/driver" + "encoding/json" + "errors" +) + +type JSONObject struct { + Val interface{} +} + +func (o JSONObject) Value() (driver.Value, error) { + return json.Marshal(o.Val) +} + +func (o *JSONObject) Scan(src interface{}) error { + var _src []byte + switch s := src.(type) { + case []byte: + _src = s + case nil: + return nil + default: + return errors.New("incompatible type for JSONObject") + } + return json.Unmarshal(_src, &o.Val) +} diff --git a/pkg/storage/v2/postgres/result.go b/pkg/storage/v2/postgres/result.go new file mode 100644 index 000000000..c7c89ca69 --- /dev/null +++ b/pkg/storage/v2/postgres/result.go @@ -0,0 +1,27 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package postgres + +import "database/sql" + +type Result interface { + LastInsertId() (int64, error) + RowsAffected() (int64, error) +} + +type result struct { + sql.Result +} diff --git a/pkg/token/BUILD.bazel b/pkg/token/BUILD.bazel new file mode 100644 index 000000000..e63661396 --- /dev/null +++ b/pkg/token/BUILD.bazel @@ -0,0 +1,36 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "idtoken.go", + "signer.go", + "verifier.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/token", + visibility = ["//visibility:public"], + deps = [ + "//proto/account:go_default_library", + "//proto/auth:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@in_gopkg_square_go_jose_v2//:go_default_library", + "@in_gopkg_square_go_jose_v2//jwt:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "idtoken_test.go", + "signer_test.go", + "verifier_test.go", + ], + data = glob(["testdata/**"]), + embed = [":go_default_library"], + deps = [ + "//proto/auth:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + ], +) diff --git a/pkg/token/idtoken.go b/pkg/token/idtoken.go new file mode 100644 index 000000000..090f0d6b5 --- /dev/null +++ b/pkg/token/idtoken.go @@ -0,0 +1,57 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package token + +import ( + "encoding/base64" + "time" + + "github.com/golang/protobuf/proto" // nolint:staticcheck + + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + authproto "github.com/bucketeer-io/bucketeer/proto/auth" +) + +type IDToken struct { + Issuer string `json:"iss"` + Subject string `json:"sub"` + Audience string `json:"aud"` + Expiry time.Time `json:"exp"` + IssuedAt time.Time `json:"iat"` + Email string `json:"email"` + // Use "role" as json tag to keep compatibility. + // AdminRole is accountproto.Account_OWNER in the case of AdminAccount. + // AdminRole is accountproto.Account_UNASSIGNED in the case of Account. + AdminRole accountproto.Account_Role `json:"role"` +} + +func (t *IDToken) IsAdmin() bool { + return t.AdminRole != accountproto.Account_UNASSIGNED +} + +func ExtractUserID(subject string) (string, error) { + tokenSubject := &authproto.IDTokenSubject{} + // Q: Why do we need to decode the sub string + // A: https://github.com/coreos/dex/blob/master/server/internal/codec.go#L20 + data, err := base64.RawURLEncoding.DecodeString(subject) + if err != nil { + return "", err + } + err = proto.Unmarshal(data, tokenSubject) + if err != nil { + return "", err + } + return tokenSubject.UserId, nil +} diff --git a/pkg/token/idtoken_test.go b/pkg/token/idtoken_test.go new file mode 100644 index 000000000..e66c064b6 --- /dev/null +++ b/pkg/token/idtoken_test.go @@ -0,0 +1,60 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package token + +import ( + "encoding/base64" + "fmt" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + authproto "github.com/bucketeer-io/bucketeer/proto/auth" +) + +func TestExtractUserID(t *testing.T) { + userID := "test-id" + sub := &authproto.IDTokenSubject{ + UserId: userID, + ConnId: "test-connector-id", + } + data, err := proto.Marshal(sub) + require.NoError(t, err) + encodedSub := base64.RawURLEncoding.EncodeToString(data) + testcases := []struct { + subject string + userID string + failed bool + }{ + { + subject: "invalid", + userID: "", + failed: true, + }, + { + subject: encodedSub, + userID: userID, + failed: false, + }, + } + for i, tc := range testcases { + des := fmt.Sprintf("index %d", i) + userID, err := ExtractUserID(tc.subject) + assert.Equal(t, tc.userID, userID, des) + assert.Equal(t, tc.failed, err != nil, des) + } +} diff --git a/pkg/token/signer.go b/pkg/token/signer.go new file mode 100644 index 000000000..2928cb7b4 --- /dev/null +++ b/pkg/token/signer.go @@ -0,0 +1,86 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package token + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + + jose "gopkg.in/square/go-jose.v2" + "gopkg.in/square/go-jose.v2/jwt" +) + +type Signer interface { + Sign(*IDToken) (string, error) +} + +type signer struct { + sig jose.Signer +} + +func NewSigner(keyPath string) (Signer, error) { + data, err := ioutil.ReadFile(keyPath) + if err != nil { + return nil, err + } + key, err := parseRSAPrivateKey(data) + if err != nil { + return nil, err + } + // TODO: Currently, we are using RSA algorithm to be compatible with istio envoy. + // https://github.com/istio/proxy/tree/master/src/envoy/auth + // But in the future, we should consider to move to HMAC for a better performance. + return NewSignerWithPrivateKey(key) +} + +func NewSignerWithPrivateKey(privateKey *rsa.PrivateKey) (Signer, error) { + signingKey := jose.SigningKey{ + Key: privateKey, + Algorithm: jose.RS256, + } + sig, err := jose.NewSigner(signingKey, &jose.SignerOptions{}) + if err != nil { + return nil, err + } + return &signer{sig: sig}, nil +} + +func (s *signer) Sign(token *IDToken) (string, error) { + return jwt.Signed(s.sig).Claims(token).CompactSerialize() +} + +func parseRSAPrivateKey(data []byte) (*rsa.PrivateKey, error) { + input := data + block, _ := pem.Decode(data) + if block != nil { + input = block.Bytes + } + var parsedKey interface{} + parsedKey, err := x509.ParsePKCS1PrivateKey(input) + if err != nil { + parsedKey, err = x509.ParsePKCS8PrivateKey(input) + if err != nil { + return nil, err + } + } + rsaKey, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, fmt.Errorf("key is not a valid RSA private key") + } + return rsaKey, nil +} diff --git a/pkg/token/signer_test.go b/pkg/token/signer_test.go new file mode 100644 index 000000000..8b3fb974f --- /dev/null +++ b/pkg/token/signer_test.go @@ -0,0 +1,102 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package token + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewSigner(t *testing.T) { + testcases := []struct { + path string + ok bool + }{ + {"testdata/valid-private.pem", true}, + {"testdata/invalid-private.pem", false}, + } + for i, tc := range testcases { + des := fmt.Sprintf("index: %d", i) + s, err := NewSigner(tc.path) + assert.Equal(t, err == nil, tc.ok, des) + assert.Equal(t, s != nil, tc.ok, des) + } +} + +func TestSign(t *testing.T) { + issuer := "test_issuer" + clientID := "test_client_id" + signer, err := NewSigner("testdata/valid-private.pem") + require.NoError(t, err) + verifier, err := NewVerifier("testdata/valid-public.pem", issuer, clientID) + require.NoError(t, err) + testcases := []struct { + token *IDToken + ok bool + }{ + { + &IDToken{ + Issuer: issuer, + Subject: "subject", + Audience: clientID, + Email: "test@email", + Expiry: time.Now().Add(time.Hour), + }, + true, + }, + { + &IDToken{ + Issuer: issuer, + Subject: "subject", + Audience: clientID, + Expiry: time.Now().Add(time.Hour), + }, + false, + }, + { + &IDToken{ + Issuer: issuer, + Subject: "subject", + Audience: clientID, + Email: "test@email", + Expiry: time.Now().Add(-time.Hour), + }, + false, + }, + } + for i, tc := range testcases { + des := fmt.Sprintf("index: %d", i) + signedToken, err := signer.Sign(tc.token) + require.NoError(t, err, des) + require.True(t, len(signedToken) > 0, des) + parsedToken, err := verifier.Verify(signedToken) + if tc.ok { + require.NoError(t, err, fmt.Sprintf("index: %d, error: %v", i, err)) + require.Equal(t, tc.token.Issuer, parsedToken.Issuer, des) + require.Equal(t, tc.token.Subject, parsedToken.Subject, des) + require.Equal(t, tc.token.Audience, parsedToken.Audience, des) + require.True(t, tc.token.Expiry.Equal(parsedToken.Expiry), des) + require.True(t, tc.token.IssuedAt.Equal(parsedToken.IssuedAt), des) + require.Equal(t, tc.token.Email, parsedToken.Email, des) + require.Equal(t, tc.token.AdminRole, parsedToken.AdminRole, des) + } else { + require.Error(t, err, des) + } + } +} diff --git a/pkg/token/testdata/invalid-private.pem b/pkg/token/testdata/invalid-private.pem new file mode 100644 index 000000000..9977a2836 --- /dev/null +++ b/pkg/token/testdata/invalid-private.pem @@ -0,0 +1 @@ +invalid diff --git a/pkg/token/testdata/invalid-public.pem b/pkg/token/testdata/invalid-public.pem new file mode 100644 index 000000000..9977a2836 --- /dev/null +++ b/pkg/token/testdata/invalid-public.pem @@ -0,0 +1 @@ +invalid diff --git a/pkg/token/testdata/valid-private.pem b/pkg/token/testdata/valid-private.pem new file mode 100644 index 000000000..8a0c955a2 --- /dev/null +++ b/pkg/token/testdata/valid-private.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEA+tMNIgc1RCQRAcdH8E1ySbkmHdhpJK5Y9tKGrr5jghSnYg1F +xCOUxcBJ/SBRUEnCAL0DCc3Jr1LU1PdL+ad7k43jNYFQu8wdi0Lh4SypiRKco8W3 +UafmXRMW2SaD/74XZKW0KFsr/BQJ3Ounle+ukT5kNXe+T3fodzfzok5ib1XdgMLY +LSVQ4HMvFDIr7xCAth712Hvv6bIJvKifTcb8rHbCaEog3oLRVfuQubUHQHOP4a6T +0kUUWgIBUzDQxqmekrEdLGxlnhS94o259cPE9VMUnnQPbgQOby7fPGSwPJT3BEiH +dZkH8IH+MWXDSRIMAR6Y2LsOjTU5PUaMwrbNp05zKY/rRnZpSc2g/ZviQ4CK4GWW +MRsnzOLnHAykgeWC9JK/5QZwpmPwqrovW+LJUkNG7mCfw6gjkvxBDcCXzOyb7gX/ +2J9zZSKRjYybCrcJkKnVWoGdnAbYy1Nso3DUBHIOSM3yWSVanYZTLSqIAIOMqdVa +jTVzB3nVz5M77Uuz6GhvF1cWH10jkNGvp0EzQnrq/6CrOotvv04GOkuxBQ6kj9ir +5J0rKQIhvRZ299C+/Sli1VPUPpAI8LtSOPFeCC9ryFghEA/KibUAO/DpXdmNWtid +dRuqYCxjdGrCmcMLS8xDe9Aage0DS6I/9g9P4xVHLpWRMgh3PP/D3cIa4a0CAwEA +AQKCAgBto2l/MVVoRrekZl6cvM2zShpOO3WiUg+TPG0g3xoSF0MfShvv43m5gxcU +bVb92G/T8MruMyTw3S0pNjvgkgQjeIcTsUYAAPX9FKw1yFcHErqjn95E9DZvAtQe +8YYLeSUymngIl+m/E2dIST4E/TL6TCJzeLcYd5qG2k9F6CuA7TCYgxMJkOEsa0zt +8AxiQH1ynk/qZCp3pZo5/B8+WTfDj1nlSlnZyacFhaQ6eNYFpz4CcVQuOoHt4iiA +G7t2y1gEWSUecPduRTTHXhomQn/KmJlPtf20olNI6F0uQfkDUj4zghysG8trpvbc +0tWtEBVtO9mmCwYOVXCCx17qEgwMM2eDv0wzDi3vzLAUS0kP49itExFIK+Xxg+UC +zo9SFYwGo8f46qvSfpFpD73W5UbPxnz1bOHqV7cZ52pzTx1YjjGXrVRfhzfTR4Ia +frnLemKKsY1nwwmloJxAqUEFj+WACWVSdiZeSAvHK6AA/UHc4utec2v8teDpFek9 +b6SkMYF0Y9bnGgG52C3/+PFGqcAZlbv2q/f5t39V1jLperh/itllrjNSUmTOghwe +wPqRqT3bXRDKbl17f7oFpjIZxgNTNRuoKE5y6FsOCCqGkssvVRYR3y8smkHTy/m2 +z8HUtXyi2/RQ2LdD6IZwNVU7PvxIRthKRXqPTkWc2tTzIPBk5QKCAQEA/e3VPtss +8A9W4qkg4uHbvs9tty7T4nE9SsAtkfm8o2XcINiM5qRk2iVxIfS822jofDghjDZR +QsXWaajVrNY0gy271QkUTzIkVa7SNPzkKs/yFzSeXt7e3u8U1z+yEF/GtjVw731W +7c+eYkt7xM0xcjImIW1YgSS0q9W7sfKNq8hVjcduS09o05VdjRCF94ButKvGDW12 +pLAz91bBVvhGDr/l9ynlouAKm0p/Y6rYPn45umq/hVLjBjyyVxv/uOX+f2s0Z2BX +wWkhdXVEmkTndtKFHVe0ne+rgAUyYROHM8fGhoH+q4mfNmhffRVeS7X3U88cd/2v +gXSOJ5loce5mpwKCAQEA/N68f5qcNPDwGOlAXB9uR/geHmZn49NZpZ+XzyjEe2LF +SOzMUWftDOZr8NV7kXtWhqWaHgbQb8Xm+uD48hZMEpXmxyp3CRUhPjjej96dnT+o +4m3ACXXf8WAZbtGLHBlpnLyB3sDkdIYv27KT2fdg1Kxi2H/L+vOHw18/L8q4TIUw +z4bK/LO+Ek9eCbqPK07e+2nWuwwHO43InEi5nSL2G2Zew/I/+71u/+EbpBytzdzi +nPaSDCq/X5rHD+9pfCAOsCDqjU/+p6xIE0j0oLxpKEzfkXV0xPvp9m2fGM6xsg8R +6qguzm+wu+2ao37t1lF2aUVfrSGnUeS6J2NyH7dTiwKCAQAaLEB1mRu5pqsa87nL +COKgeTrizbXTOj7KDKLlz+rsoJwe2pMzVrB6d6+Ag3xP1TFZVGPI0vRvQHsIKvnT +NcT13Gnm3Ge2fgGWv001XveZnhNi5u6H8srgCZ6JTSxqWaTetQVt+epS2rqFgShB +sbCuB5M8kMu1i/X/XClbfxexJBYFZg/tPUMdw+B/oLteNmOyxZ3FVooLEYpZrRyC +fVajQI5QeoYL0rHlLbmRqVNepZ4Ho+cXs+JoiysKpX0nLrWbpo4hNaH18oMbWzn1 ++LIu4iYMTzTzrndr251EukGOScIhrRZ10dqSPhHRj7lHbnsOrWNOztwfKMdaN4q3 +a44zAoIBABzYe8BCrtcLNRhiY0ke/MGQ+D6Xh/phPbS7A+JJ0EATAFZVTOlsZYo3 +LtEGSVZLL4+kjHQoataVSMgNqdhT2tGz7+OAYJvUoTor2pem2Wv6uxU5V9K8c5Nh +VwVhOfE2pOmQR3BVqLnyJLt3H46ZvZ4FHDF+QpKZ/T64OskE4wk3mF3UDovNJT2J +eDy732EdWipr5Gbp9FlCz0OkuCfIuTJe9yh7Ffltnp/p2fFapCj7rY4iQn7I0ZPi +YBrdQLc8IPDOkHMWUQJnkMFCbUV4uLY0Lry1HWDrK8FEuAVobrTVK0INdWlvLEYD +0MQd0pvEJzHTDBr/38pjwSiFG5uqmgkCggEBAJMNMK9gPxpTlpewL3jpqH0bRtaa +r80WrgJqaMnZjX4OLUkZgsfX5ldUuNKc2R9IQbvL84Z+90UREMgSQUHOjKwSgNHk +cob2v0bAT0t+p7FdFnb2TdIWgSycW/OtLN+b+3sn6Wih9rHUfafg64diTNCgb28a +ukRTCtbdOxkIwm/ad+15FDAeJnrXly0NfIDdo8oIvxOMXvkCwgC2WMY/xlBXm9K7 +/JOZaXbwkMI4YUrcEBFDvOMIJK+l8Y81gUd8t1XF4xCmI6hTQTI18KNBjDkZO6qL +TGWYWzU4QdF4ARdZ+FClyxO9frtk4VwgodePDlU6y2+WB6vExhol4u3eUKo= +-----END RSA PRIVATE KEY----- diff --git a/pkg/token/testdata/valid-public.pem b/pkg/token/testdata/valid-public.pem new file mode 100644 index 000000000..9eb5a1d26 --- /dev/null +++ b/pkg/token/testdata/valid-public.pem @@ -0,0 +1,14 @@ +-----BEGIN PUBLIC KEY----- +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA+tMNIgc1RCQRAcdH8E1y +SbkmHdhpJK5Y9tKGrr5jghSnYg1FxCOUxcBJ/SBRUEnCAL0DCc3Jr1LU1PdL+ad7 +k43jNYFQu8wdi0Lh4SypiRKco8W3UafmXRMW2SaD/74XZKW0KFsr/BQJ3Ounle+u +kT5kNXe+T3fodzfzok5ib1XdgMLYLSVQ4HMvFDIr7xCAth712Hvv6bIJvKifTcb8 +rHbCaEog3oLRVfuQubUHQHOP4a6T0kUUWgIBUzDQxqmekrEdLGxlnhS94o259cPE +9VMUnnQPbgQOby7fPGSwPJT3BEiHdZkH8IH+MWXDSRIMAR6Y2LsOjTU5PUaMwrbN +p05zKY/rRnZpSc2g/ZviQ4CK4GWWMRsnzOLnHAykgeWC9JK/5QZwpmPwqrovW+LJ +UkNG7mCfw6gjkvxBDcCXzOyb7gX/2J9zZSKRjYybCrcJkKnVWoGdnAbYy1Nso3DU +BHIOSM3yWSVanYZTLSqIAIOMqdVajTVzB3nVz5M77Uuz6GhvF1cWH10jkNGvp0Ez +Qnrq/6CrOotvv04GOkuxBQ6kj9ir5J0rKQIhvRZ299C+/Sli1VPUPpAI8LtSOPFe +CC9ryFghEA/KibUAO/DpXdmNWtiddRuqYCxjdGrCmcMLS8xDe9Aage0DS6I/9g9P +4xVHLpWRMgh3PP/D3cIa4a0CAwEAAQ== +-----END PUBLIC KEY----- diff --git a/pkg/token/verifier.go b/pkg/token/verifier.go new file mode 100644 index 000000000..e43ef3d17 --- /dev/null +++ b/pkg/token/verifier.go @@ -0,0 +1,104 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package token + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "fmt" + "io/ioutil" + "time" + + jose "gopkg.in/square/go-jose.v2" +) + +type Verifier interface { + Verify(string) (*IDToken, error) +} + +type verifier struct { + issuer string + clientID string + algorithm jose.SignatureAlgorithm + pubKey *rsa.PublicKey +} + +func NewVerifier(keyPath, issuer, clientID string) (Verifier, error) { + data, err := ioutil.ReadFile(keyPath) + if err != nil { + return nil, err + } + key, err := parseRSAPublicKey(data) + if err != nil { + return nil, err + } + return &verifier{ + issuer: issuer, + clientID: clientID, + algorithm: jose.RS256, + pubKey: key, + }, nil +} + +func (v *verifier) Verify(rawIDToken string) (*IDToken, error) { + jws, err := jose.ParseSigned(rawIDToken) + if err != nil { + return nil, fmt.Errorf("malformed jwt: %v", err) + } + payload, err := jws.Verify(v.pubKey) + if err != nil { + return nil, fmt.Errorf("invalid jwt: %v", err) + } + t := &IDToken{} + if err := json.Unmarshal(payload, t); err != nil { + return nil, fmt.Errorf("failed to unmarshal claims: %v", err) + } + if t.Issuer != v.issuer { + return nil, fmt.Errorf("id token issued by a different provider, expected %q got %q", v.issuer, t.Issuer) + } + if t.Audience != v.clientID { + return nil, fmt.Errorf("expected audience %q got %q", v.clientID, t.Audience) + } + if t.Expiry.Before(time.Now()) { + return nil, fmt.Errorf("token is expired (Token Expiry: %v)", t.Expiry) + } + if t.Email == "" { + return nil, fmt.Errorf("email must be not empty") + } + return t, nil +} + +func parseRSAPublicKey(data []byte) (*rsa.PublicKey, error) { + input := data + block, _ := pem.Decode(data) + if block != nil { + input = block.Bytes + } + parsedKey, err := x509.ParsePKIXPublicKey(input) + if err != nil { + cert, err := x509.ParseCertificate(input) + if err != nil { + return nil, err + } + parsedKey = cert.PublicKey + } + pubKey, ok := parsedKey.(*rsa.PublicKey) + if !ok { + return nil, fmt.Errorf("key is not a valid RSA public key") + } + return pubKey, nil +} diff --git a/pkg/token/verifier_test.go b/pkg/token/verifier_test.go new file mode 100644 index 000000000..14f678145 --- /dev/null +++ b/pkg/token/verifier_test.go @@ -0,0 +1,105 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package token + +import ( + "encoding/base64" + "fmt" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewVerifier(t *testing.T) { + t.Parallel() + testcases := []struct { + path string + ok bool + }{ + {"testdata/valid-public.pem", true}, + {"testdata/invalid-public.pem", false}, + } + for i, tc := range testcases { + des := fmt.Sprintf("index: %d", i) + s, err := NewVerifier(tc.path, "issuer", "client_id") + assert.Equal(t, err == nil, tc.ok, fmt.Sprintf("index: %d, err: %v", i, err)) + assert.Equal(t, s != nil, tc.ok, des) + } +} + +func TestVerify(t *testing.T) { + t.Parallel() + issuer := "test_issuer" + clientID := "test_client_id" + signer, err := NewSigner("testdata/valid-private.pem") + require.NoError(t, err) + idToken := &IDToken{ + Issuer: issuer, + Subject: "subject", + Audience: clientID, + Email: "test@email", + Expiry: time.Now().Add(time.Hour), + } + testcases := map[string]struct { + rawIDToken string + valid bool + }{ + "err: malformed jwt": { + rawIDToken: "", + valid: false, + }, + "err: invalid jwt": { + rawIDToken: createInvalidRawIDToken(t, signer, idToken), + valid: false, + }, + "success": { + rawIDToken: createValidRawIDToken(t, signer, idToken), + valid: true, + }, + } + verifier, err := NewVerifier("testdata/valid-public.pem", issuer, clientID) + require.NoError(t, err) + for msg, p := range testcases { + t.Run(msg, func(t *testing.T) { + actualToken, err := verifier.Verify(p.rawIDToken) + if p.valid { + assert.NotNil(t, actualToken) + assert.NoError(t, err) + } else { + assert.Nil(t, actualToken) + assert.Error(t, err) + } + }) + } +} + +func createValidRawIDToken(t *testing.T, signer Signer, idToken *IDToken) string { + t.Helper() + rawIDToken, err := signer.Sign(idToken) + require.NoError(t, err) + return rawIDToken +} + +func createInvalidRawIDToken(t *testing.T, signer Signer, idToken *IDToken) string { + t.Helper() + rawIDToken, err := signer.Sign(idToken) + require.NoError(t, err) + parts := strings.Split(rawIDToken, ".") + invalidSignature := base64.RawURLEncoding.EncodeToString([]byte("invalid-signature")) + return fmt.Sprintf("%s.%s.%s", parts[0], parts[1], invalidSignature) +} diff --git a/pkg/trace/BUILD.bazel b/pkg/trace/BUILD.bazel new file mode 100644 index 000000000..cb8b228f4 --- /dev/null +++ b/pkg/trace/BUILD.bazel @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["trace.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/trace", + visibility = ["//visibility:public"], + deps = [ + "@io_opencensus_go//trace:go_default_library", + "@io_opencensus_go_contrib_exporter_stackdriver//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["trace_test.go"], + embed = [":go_default_library"], + deps = [ + "@com_github_stretchr_testify//assert:go_default_library", + "@io_opencensus_go//trace:go_default_library", + ], +) diff --git a/pkg/trace/trace.go b/pkg/trace/trace.go new file mode 100644 index 000000000..3e97f65f4 --- /dev/null +++ b/pkg/trace/trace.go @@ -0,0 +1,70 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "contrib.go.opencensus.io/exporter/stackdriver" + "go.opencensus.io/trace" + "go.uber.org/zap" +) + +func NewStackdriverExporter(service, version string, logger *zap.Logger) (*stackdriver.Exporter, error) { + return stackdriver.NewExporter(stackdriver.Options{ + OnError: func(err error) { + logger.Warn("Failed to upload tracing data to Stackdriver", zap.Error(err)) + }, + DefaultTraceAttributes: map[string]interface{}{ + "Service": service, + "Version": version, + }, + }) +} + +type sampler struct { + probability float64 + filteringSamplers map[string]trace.Sampler +} + +type SamplerOption func(*sampler) + +func WithDefaultProbability(p float64) SamplerOption { + return func(s *sampler) { + s.probability = p + } +} + +func WithFilteringSampler(name string, fs trace.Sampler) SamplerOption { + return func(s *sampler) { + s.filteringSamplers[name] = fs + } +} + +func NewSampler(options ...SamplerOption) trace.Sampler { + s := &sampler{ + probability: 0.01, + filteringSamplers: make(map[string]trace.Sampler), + } + for _, opt := range options { + opt(s) + } + return s.sampler +} + +func (s *sampler) sampler(p trace.SamplingParameters) trace.SamplingDecision { + if fs, ok := s.filteringSamplers[p.Name]; ok { + return fs(p) + } + return trace.ProbabilitySampler(s.probability)(p) +} diff --git a/pkg/trace/trace_test.go b/pkg/trace/trace_test.go new file mode 100644 index 000000000..7b4d7170a --- /dev/null +++ b/pkg/trace/trace_test.go @@ -0,0 +1,81 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opencensus.io/trace" +) + +func TestSampler(t *testing.T) { + t.Parallel() + filteringSpanName := "span-name" + testcases := map[string]struct { + sampler trace.Sampler + name string + expected bool + }{ + "false: filteringSpanName NeverSample": { + sampler: NewSampler( + WithDefaultProbability(1.0), + WithFilteringSampler(filteringSpanName, trace.NeverSample()), + ), + name: filteringSpanName, + expected: false, + }, + "false: filteringSpanName Probability=0.0": { + sampler: NewSampler( + WithDefaultProbability(1.0), + WithFilteringSampler(filteringSpanName, trace.ProbabilitySampler(0.0)), + ), + name: filteringSpanName, + expected: false, + }, + "true: filteringSpanName Probability=1.0": { + sampler: NewSampler( + WithDefaultProbability(0.0), + WithFilteringSampler(filteringSpanName, trace.ProbabilitySampler(1.0)), + ), + name: filteringSpanName, + expected: true, + }, + "false: default Probability=0.0": { + sampler: NewSampler( + WithDefaultProbability(0.0), + WithFilteringSampler(filteringSpanName, trace.ProbabilitySampler(1.0)), + ), + name: "default", + expected: false, + }, + "true: default Probability=1.0": { + sampler: NewSampler( + WithDefaultProbability(1.0), + WithFilteringSampler(filteringSpanName, trace.ProbabilitySampler(0.0)), + ), + name: "default", + expected: true, + }, + } + for msg, tc := range testcases { + t.Run(msg, func(t *testing.T) { + decision := tc.sampler(trace.SamplingParameters{ + Name: tc.name, + }) + assert.Equal(t, tc.expected, decision.Sample) + }) + } +} diff --git a/pkg/user/api/BUILD.bazel b/pkg/user/api/BUILD.bazel new file mode 100644 index 000000000..0fa2a2f3c --- /dev/null +++ b/pkg/user/api/BUILD.bazel @@ -0,0 +1,50 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "api.go", + "error.go", + "user.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/user/api", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/client:go_default_library", + "//pkg/locale:go_default_library", + "//pkg/log:go_default_library", + "//pkg/role:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/rpc/status:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/user/domain:go_default_library", + "//pkg/user/storage/v2:go_default_library", + "//proto/account:go_default_library", + "//proto/event/domain:go_default_library", + "//proto/user:go_default_library", + "@go_googleapis//google/rpc:errdetails_go_proto", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["user_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/account/client/mock:go_default_library", + "//pkg/locale:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/storage/v2/mysql/mock:go_default_library", + "//pkg/token:go_default_library", + "//proto/account:go_default_library", + "//proto/user:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/user/api/api.go b/pkg/user/api/api.go new file mode 100644 index 000000000..af5f844c3 --- /dev/null +++ b/pkg/user/api/api.go @@ -0,0 +1,123 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/role" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/domain" + proto "github.com/bucketeer-io/bucketeer/proto/user" +) + +type options struct { + logger *zap.Logger +} + +var defaultOptions = options{ + logger: zap.NewNop(), +} + +type Option func(*options) + +func WithLogger(logger *zap.Logger) Option { + return func(opts *options) { + opts.logger = logger + } +} + +type userService struct { + storageClient mysql.Client + accountClient accountclient.Client + opts *options + logger *zap.Logger +} + +func NewUserService( + client mysql.Client, + accountClient accountclient.Client, + opts ...Option, +) rpc.Service { + options := defaultOptions + for _, opt := range opts { + opt(&options) + } + return &userService{ + storageClient: client, + accountClient: accountClient, + opts: &options, + logger: options.logger.Named("api"), + } +} + +func (s *userService) Register(server *grpc.Server) { + proto.RegisterUserServiceServer(server, s) +} + +func (s *userService) checkRole( + ctx context.Context, + requiredRole accountproto.Account_Role, + environmentNamespace string, +) (*eventproto.Editor, error) { + editor, err := role.CheckRole(ctx, requiredRole, func(email string) (*accountproto.GetAccountResponse, error) { + return s.accountClient.GetAccount(ctx, &accountproto.GetAccountRequest{ + Email: email, + EnvironmentNamespace: environmentNamespace, + }) + }) + if err != nil { + switch status.Code(err) { + case codes.Unauthenticated: + s.logger.Info( + "Unauthenticated", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusUnauthenticated, locale.JaJP) + case codes.PermissionDenied: + s.logger.Info( + "Permission denied", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusPermissionDenied, locale.JaJP) + default: + s.logger.Error( + "Failed to check role", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + } + return editor, nil +} diff --git a/pkg/user/api/error.go b/pkg/user/api/error.go new file mode 100644 index 000000000..114c9697c --- /dev/null +++ b/pkg/user/api/error.go @@ -0,0 +1,106 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + gstatus "google.golang.org/grpc/status" + + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/rpc/status" +) + +var ( + statusInternal = gstatus.New(codes.Internal, "user: internal") + statusInvalidCursor = gstatus.New(codes.InvalidArgument, "user: cursor is invalid") + statusInvalidOrderBy = gstatus.New(codes.InvalidArgument, "user: order_by is invalid") + statusMissingUserID = gstatus.New(codes.InvalidArgument, "user: user id must be specified") + statusNotFound = gstatus.New(codes.NotFound, "user: not found") + statusUnauthenticated = gstatus.New(codes.Unauthenticated, "user: unauthenticated") + statusPermissionDenied = gstatus.New(codes.PermissionDenied, "user: permission denied") + + errInternalJaJP = status.MustWithDetails( + statusInternal, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "内部エラーが発生しました", + }, + ) + errInvalidCursorJaJP = status.MustWithDetails( + statusInvalidCursor, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なcursorです", + }, + ) + errInvalidOrderByJaJP = status.MustWithDetails( + statusInvalidOrderBy, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "不正なソート順の指定です", + }, + ) + errMissingUserIDJaJP = status.MustWithDetails( + statusMissingUserID, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "user idは必須です", + }, + ) + errNotFoundJaJP = status.MustWithDetails( + statusNotFound, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "データが存在しません", + }, + ) + errUnauthenticatedJaJP = status.MustWithDetails( + statusUnauthenticated, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "認証されていません", + }, + ) + errPermissionDeniedJaJP = status.MustWithDetails( + statusPermissionDenied, + &errdetails.LocalizedMessage{ + Locale: locale.JaJP, + Message: "権限がありません", + }, + ) +) + +func localizedError(s *gstatus.Status, loc string) error { + // handle loc if multi-lang is necessary + switch s { + case statusInternal: + return errInternalJaJP + case statusInvalidCursor: + return errInvalidCursorJaJP + case statusInvalidOrderBy: + return errInvalidOrderByJaJP + case statusMissingUserID: + return errMissingUserIDJaJP + case statusNotFound: + return errNotFoundJaJP + case statusUnauthenticated: + return errUnauthenticatedJaJP + case statusPermissionDenied: + return errPermissionDeniedJaJP + default: + return errInternalJaJP + } +} diff --git a/pkg/user/api/user.go b/pkg/user/api/user.go new file mode 100644 index 000000000..f1652c892 --- /dev/null +++ b/pkg/user/api/user.go @@ -0,0 +1,157 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "strconv" + + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/user/domain" + userstorage "github.com/bucketeer-io/bucketeer/pkg/user/storage/v2" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + userproto "github.com/bucketeer-io/bucketeer/proto/user" +) + +const maxPageSizePerRequest = 50 + +func (s *userService) GetUser(ctx context.Context, req *userproto.GetUserRequest) (*userproto.GetUserResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + if err := s.validateGetUserRequest(req); err != nil { + return nil, err + } + user, err := s.getUser(ctx, req.UserId, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + return &userproto.GetUserResponse{ + User: user.User, + }, nil +} + +func (s *userService) validateGetUserRequest(req *userproto.GetUserRequest) error { + if req.UserId == "" { + return localizedError(statusMissingUserID, locale.JaJP) + } + return nil +} + +func (s *userService) getUser(ctx context.Context, userID, environmentNamespace string) (*domain.User, error) { + userStorage := userstorage.NewUserStorage(s.storageClient) + user, err := userStorage.GetUser(ctx, userID, environmentNamespace) + if err != nil { + if err == userstorage.ErrUserNotFound { + return nil, localizedError(statusNotFound, locale.JaJP) + } + s.logger.Error( + "Failed to get user", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("userId", userID), + zap.String("environmentNamespace", environmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return user, nil +} + +func (s *userService) ListUsers( + ctx context.Context, + req *userproto.ListUsersRequest, +) (*userproto.ListUsersResponse, error) { + _, err := s.checkRole(ctx, accountproto.Account_VIEWER, req.EnvironmentNamespace) + if err != nil { + return nil, err + } + whereParts := []mysql.WherePart{ + mysql.NewFilter("environment_namespace", "=", req.EnvironmentNamespace), + } + if req.SearchKeyword != "" { + whereParts = append(whereParts, mysql.NewSearchQuery([]string{"id"}, req.SearchKeyword)) + } + if req.From != 0 { + whereParts = append(whereParts, mysql.NewFilter("last_seen", ">=", req.From)) + } + if req.To != 0 { + whereParts = append(whereParts, mysql.NewFilter("last_seen", "<=", req.To)) + } + orders, err := s.newListOrders(req.OrderBy, req.OrderDirection) + if err != nil { + s.logger.Error( + "Invalid argument", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, err + } + if req.PageSize == 0 { + req.PageSize = maxPageSizePerRequest + } + limit := int(req.PageSize) + if req.Cursor == "" { + req.Cursor = "0" + } + offset, err := strconv.Atoi(req.Cursor) + if err != nil { + return nil, localizedError(statusInvalidCursor, locale.JaJP) + } + storage := userstorage.NewUserStorage(s.storageClient) + users, nextCursor, err := storage.ListUsers(ctx, whereParts, orders, limit, offset) + if err != nil { + s.logger.Error( + "Failed to list users", + log.FieldsFromImcomingContext(ctx).AddFields( + zap.Error(err), + zap.String("environmentNamespace", req.EnvironmentNamespace), + )..., + ) + return nil, localizedError(statusInternal, locale.JaJP) + } + return &userproto.ListUsersResponse{ + Users: users, + Cursor: strconv.Itoa(nextCursor), + }, nil +} + +func (s *userService) newListOrders( + orderBy userproto.ListUsersRequest_OrderBy, + orderDirection userproto.ListUsersRequest_OrderDirection, +) ([]*mysql.Order, error) { + var column string + switch orderBy { + case userproto.ListUsersRequest_DEFAULT, + userproto.ListUsersRequest_LAST_SEEN: + column = "last_seen" + case userproto.ListUsersRequest_CREATED_AT: + column = "created_at" + default: + return nil, localizedError(statusInvalidOrderBy, locale.JaJP) + } + direction := mysql.OrderDirectionAsc + if orderDirection == userproto.ListUsersRequest_DESC { + direction = mysql.OrderDirectionDesc + } + return []*mysql.Order{mysql.NewOrder(column, direction)}, nil +} diff --git a/pkg/user/api/user_test.go b/pkg/user/api/user_test.go new file mode 100644 index 000000000..9d6ff683a --- /dev/null +++ b/pkg/user/api/user_test.go @@ -0,0 +1,151 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "errors" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + accountclientmock "github.com/bucketeer-io/bucketeer/pkg/account/client/mock" + "github.com/bucketeer-io/bucketeer/pkg/locale" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + "github.com/bucketeer-io/bucketeer/pkg/token" + accountproto "github.com/bucketeer-io/bucketeer/proto/account" + userproto "github.com/bucketeer-io/bucketeer/proto/user" +) + +const userKind = "User" + +func TestValidateGetUserRequest(t *testing.T) { + patterns := []struct { + input *userproto.GetUserRequest + expected error + }{ + { + input: &userproto.GetUserRequest{UserId: "test", EnvironmentNamespace: "ns0"}, + expected: nil, + }, + { + input: &userproto.GetUserRequest{EnvironmentNamespace: "ns0"}, + expected: localizedError(statusMissingUserID, locale.JaJP), + }, + } + s := userService{} + for _, p := range patterns { + err := s.validateGetUserRequest(p.input) + assert.Equal(t, p.expected, err) + } +} + +func TestGetUserRequest(t *testing.T) { + mockController := gomock.NewController(t) + defer mockController.Finish() + patterns := []struct { + desc string + setup func(s *userService) + input *userproto.GetUserRequest + expected *userproto.GetUserResponse + expectedErr error + }{ + { + desc: "user not found", + setup: func(s *userService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + s.storageClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + input: &userproto.GetUserRequest{UserId: "user-id-0", EnvironmentNamespace: "ns0"}, + expected: nil, + expectedErr: localizedError(statusNotFound, locale.JaJP), + }, + { + desc: "internal error", + setup: func(s *userService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(errors.New("internal error")) + s.storageClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + input: &userproto.GetUserRequest{UserId: "user-id-1", EnvironmentNamespace: "ns0"}, + expected: nil, + expectedErr: localizedError(statusInternal, locale.JaJP), + }, + { + desc: "success", + setup: func(s *userService) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(nil) + s.storageClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + input: &userproto.GetUserRequest{UserId: "user-id-1", EnvironmentNamespace: "ns0"}, + expected: &userproto.GetUserResponse{ + User: &userproto.User{ + Id: "", + Data: nil, + TaggedData: nil, + LastSeen: 0, + CreatedAt: 0, + }, + }, + expectedErr: nil, + }, + } + for _, p := range patterns { + service := createUserService(mockController) + p.setup(service) + ctx := createContextWithToken(t, accountproto.Account_UNASSIGNED) + actual, err := service.GetUser(ctx, p.input) + assert.Equal(t, p.expected, actual) + assert.Equal(t, p.expectedErr, err) + } +} + +func createUserService(c *gomock.Controller) *userService { + accountClientMock := accountclientmock.NewMockClient(c) + ar := &accountproto.GetAccountResponse{ + Account: &accountproto.Account{ + Email: "email", + Role: accountproto.Account_VIEWER, + }, + } + accountClientMock.EXPECT().GetAccount(gomock.Any(), gomock.Any()).Return(ar, nil).AnyTimes() + return &userService{ + storageClient: mysqlmock.NewMockClient(c), + accountClient: accountClientMock, + logger: zap.NewNop().Named("api"), + } +} + +func createContextWithToken(t *testing.T, role accountproto.Account_Role) context.Context { + t.Helper() + token := &token.IDToken{ + Email: "test@example.com", + AdminRole: role, + } + ctx := context.TODO() + return context.WithValue(ctx, rpc.Key, token) +} diff --git a/pkg/user/client/BUILD.bazel b/pkg/user/client/BUILD.bazel new file mode 100644 index 000000000..d870a583d --- /dev/null +++ b/pkg/user/client/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["client.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/user/client", + visibility = ["//visibility:public"], + deps = [ + "//pkg/rpc/client:go_default_library", + "//proto/user:go_default_library", + "@org_golang_google_grpc//:go_default_library", + ], +) diff --git a/pkg/user/client/client.go b/pkg/user/client/client.go new file mode 100644 index 000000000..234a6b134 --- /dev/null +++ b/pkg/user/client/client.go @@ -0,0 +1,50 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package client + +import ( + "google.golang.org/grpc" + + rpcclient "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + proto "github.com/bucketeer-io/bucketeer/proto/user" +) + +type Client interface { + proto.UserServiceClient + Close() +} + +type client struct { + proto.UserServiceClient + address string + connection *grpc.ClientConn +} + +func NewClient(addr, certPath string, opts ...rpcclient.Option) (Client, error) { + conn, err := rpcclient.NewClientConn(addr, certPath, opts...) + if err != nil { + return nil, err + } + return &client{ + UserServiceClient: proto.NewUserServiceClient(conn), + address: addr, + connection: conn, + }, nil +} + +func (c *client) Close() { + c.connection.Close() +} diff --git a/pkg/user/client/mock/BUILD.bazel b/pkg/user/client/mock/BUILD.bazel new file mode 100644 index 000000000..61e2211a1 --- /dev/null +++ b/pkg/user/client/mock/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["client.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/user/client/mock", + visibility = ["//visibility:public"], + deps = [ + "//proto/user:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@org_golang_google_grpc//:go_default_library", + ], +) diff --git a/pkg/user/client/mock/client.go b/pkg/user/client/mock/client.go new file mode 100644 index 000000000..52253ed6f --- /dev/null +++ b/pkg/user/client/mock/client.go @@ -0,0 +1,90 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: client.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + grpc "google.golang.org/grpc" + + user "github.com/bucketeer-io/bucketeer/proto/user" +) + +// MockClient is a mock of Client interface. +type MockClient struct { + ctrl *gomock.Controller + recorder *MockClientMockRecorder +} + +// MockClientMockRecorder is the mock recorder for MockClient. +type MockClientMockRecorder struct { + mock *MockClient +} + +// NewMockClient creates a new mock instance. +func NewMockClient(ctrl *gomock.Controller) *MockClient { + mock := &MockClient{ctrl: ctrl} + mock.recorder = &MockClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClient) EXPECT() *MockClientMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockClient) Close() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Close") +} + +// Close indicates an expected call of Close. +func (mr *MockClientMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockClient)(nil).Close)) +} + +// GetUser mocks base method. +func (m *MockClient) GetUser(ctx context.Context, in *user.GetUserRequest, opts ...grpc.CallOption) (*user.GetUserResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetUser", varargs...) + ret0, _ := ret[0].(*user.GetUserResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUser indicates an expected call of GetUser. +func (mr *MockClientMockRecorder) GetUser(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUser", reflect.TypeOf((*MockClient)(nil).GetUser), varargs...) +} + +// ListUsers mocks base method. +func (m *MockClient) ListUsers(ctx context.Context, in *user.ListUsersRequest, opts ...grpc.CallOption) (*user.ListUsersResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListUsers", varargs...) + ret0, _ := ret[0].(*user.ListUsersResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListUsers indicates an expected call of ListUsers. +func (mr *MockClientMockRecorder) ListUsers(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUsers", reflect.TypeOf((*MockClient)(nil).ListUsers), varargs...) +} diff --git a/pkg/user/cmd/persister/BUILD.bazel b/pkg/user/cmd/persister/BUILD.bazel new file mode 100644 index 000000000..4e5089109 --- /dev/null +++ b/pkg/user/cmd/persister/BUILD.bazel @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["persister.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/user/cmd/persister", + visibility = ["//visibility:public"], + deps = [ + "//pkg/cli:go_default_library", + "//pkg/feature/client:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub:go_default_library", + "//pkg/pubsub/puller:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/rpc/client:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/user/persister:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/user/cmd/persister/persister.go b/pkg/user/cmd/persister/persister.go new file mode 100644 index 000000000..b1e02dac5 --- /dev/null +++ b/pkg/user/cmd/persister/persister.go @@ -0,0 +1,211 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persister + +import ( + "context" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + "github.com/bucketeer-io/bucketeer/pkg/cli" + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + pst "github.com/bucketeer-io/bucketeer/pkg/user/persister" +) + +const command = "persister" + +type Persister interface { + Run(context.Context, metrics.Metrics, *zap.Logger) error +} + +type persister struct { + *kingpin.CmdClause + port *int + project *string + mysqlUser *string + mysqlPass *string + mysqlHost *string + mysqlPort *int + mysqlDBName *string + subscription *string + maxMPS *int + numWorkers *int + topic *string + flushSize *int + flushInterval *time.Duration + publishNumGoroutines *int + publishTimeout *time.Duration + featureService *string + certPath *string + keyPath *string + serviceTokenPath *string + pullerNumGoroutines *int + pullerMaxExtension *time.Duration + pullerMaxOutstandingMessages *int + pullerMaxOutstandingBytes *int +} + +func RegisterCommand(r cli.CommandRegistry, p cli.ParentCommand) cli.Command { + cmd := p.Command(command, "Start user persister") + persister := &persister{ + CmdClause: cmd, + port: cmd.Flag("port", "Port to bind to.").Default("9090").Int(), + project: cmd.Flag("project", "Google Cloud project name.").String(), + mysqlUser: cmd.Flag("mysql-user", "MySQL user.").Required().String(), + mysqlPass: cmd.Flag("mysql-pass", "MySQL password.").Required().String(), + mysqlHost: cmd.Flag("mysql-host", "MySQL host.").Required().String(), + mysqlPort: cmd.Flag("mysql-port", "MySQL port.").Required().Int(), + mysqlDBName: cmd.Flag("mysql-db-name", "MySQL database name.").Required().String(), + subscription: cmd.Flag("subscription", "Google PubSub subscription name.").Required().String(), + topic: cmd.Flag("topic", "Google PubSub topic name.").Required().String(), + maxMPS: cmd.Flag("max-mps", "Maximum messages should be handled in a second.").Default("1000").Int(), + numWorkers: cmd.Flag("num-workers", "Number of workers.").Default("2").Int(), + flushSize: cmd.Flag("flush-size", "Maximum number of messages in one flush.").Default("100").Int(), + flushInterval: cmd.Flag("flush-interval", "Maximum interval between two flushes.").Default("2s").Duration(), + publishNumGoroutines: cmd.Flag( + "publish-num-goroutines", + "The number of goroutines for publishing.", + ).Default("0").Int(), + publishTimeout: cmd.Flag( + "publish-timeout", + "The maximum time to publish a bundle of messages.", + ).Default("1m").Duration(), + featureService: cmd.Flag("feature-service", "bucketeer-feature-service address.").Default("feature:9090").String(), + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + keyPath: cmd.Flag("key", "Path to TLS key.").Required().String(), + serviceTokenPath: cmd.Flag("service-token", "Path to service token.").Required().String(), + pullerNumGoroutines: cmd.Flag( + "puller-num-goroutines", + "Number of goroutines will be spawned to pull messages.", + ).Int(), + pullerMaxExtension: cmd.Flag( + "puller-max-extension", + "Maximum duration in seconds until a message expires.", + ).Default("600s").Duration(), + pullerMaxOutstandingMessages: cmd.Flag( + "puller-max-outstanding-messages", + "Maximum number of unprocessed messages.", + ).Int(), + pullerMaxOutstandingBytes: cmd.Flag("puller-max-outstanding-bytes", "Maximum size of unprocessed messages.").Int(), + } + r.RegisterCommand(persister) + return persister +} + +func (p *persister) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + registerer := metrics.DefaultRegisterer() + + mysqlClient, err := p.createMySQLClient(ctx, registerer, logger) + if err != nil { + return err + } + defer mysqlClient.Close() + + puller, err := p.createPuller(ctx, logger) + if err != nil { + return err + } + + creds, err := client.NewPerRPCCredentials(*p.serviceTokenPath) + if err != nil { + return err + } + + featureClient, err := featureclient.NewClient(*p.featureService, *p.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer featureClient.Close() + + persister := pst.NewPersister( + mysqlClient, + featureClient, + puller, + pst.WithMaxMPS(*p.maxMPS), + pst.WithNumWorkers(*p.numWorkers), + pst.WithFlushSize(*p.flushSize), + pst.WithFlushInterval(*p.flushInterval), + pst.WithMetrics(registerer), + pst.WithLogger(logger), + ) + defer persister.Stop() + go persister.Run() // nolint:errcheck + + healthChecker := health.NewGrpcChecker( + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + health.WithCheck("persister", persister.Check), + ) + go healthChecker.Run(ctx) + + server := rpc.NewServer(healthChecker, *p.certPath, *p.keyPath, + rpc.WithPort(*p.port), + rpc.WithMetrics(registerer), + rpc.WithLogger(logger), + rpc.WithHandler("/health", healthChecker), + ) + defer server.Stop(10 * time.Second) + go server.Run() + + <-ctx.Done() + return nil +} + +func (p *persister) createMySQLClient( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (mysql.Client, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + return mysql.NewClient( + ctx, + *p.mysqlUser, *p.mysqlPass, *p.mysqlHost, + *p.mysqlPort, + *p.mysqlDBName, + mysql.WithLogger(logger), + mysql.WithMetrics(registerer), + ) +} + +func (p *persister) createPuller(ctx context.Context, logger *zap.Logger) (puller.Puller, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + client, err := pubsub.NewClient(ctx, *p.project, pubsub.WithLogger(logger)) + if err != nil { + return nil, err + } + return client.CreatePuller(*p.subscription, *p.topic, + pubsub.WithNumGoroutines(*p.pullerNumGoroutines), + pubsub.WithMaxExtension(*p.pullerMaxExtension), + pubsub.WithMaxOutstandingMessages(*p.pullerMaxOutstandingMessages), + pubsub.WithMaxOutstandingBytes(*p.pullerMaxOutstandingBytes), + ) +} diff --git a/pkg/user/cmd/server/BUILD.bazel b/pkg/user/cmd/server/BUILD.bazel new file mode 100644 index 000000000..6d3de3e3f --- /dev/null +++ b/pkg/user/cmd/server/BUILD.bazel @@ -0,0 +1,21 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["server.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/user/cmd/server", + visibility = ["//visibility:public"], + deps = [ + "//pkg/account/client:go_default_library", + "//pkg/cli:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/rpc:go_default_library", + "//pkg/rpc/client:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/token:go_default_library", + "//pkg/user/api:go_default_library", + "@in_gopkg_alecthomas_kingpin_v2//:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) diff --git a/pkg/user/cmd/server/server.go b/pkg/user/cmd/server/server.go new file mode 100644 index 000000000..ea8ec2209 --- /dev/null +++ b/pkg/user/cmd/server/server.go @@ -0,0 +1,155 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "time" + + "go.uber.org/zap" + kingpin "gopkg.in/alecthomas/kingpin.v2" + + accountclient "github.com/bucketeer-io/bucketeer/pkg/account/client" + "github.com/bucketeer-io/bucketeer/pkg/cli" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/rpc" + "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/token" + "github.com/bucketeer-io/bucketeer/pkg/user/api" +) + +const command = "server" + +type server struct { + *kingpin.CmdClause + port *int + project *string + mysqlUser *string + mysqlPass *string + mysqlHost *string + mysqlPort *int + mysqlDBName *string + accountService *string + certPath *string + keyPath *string + serviceTokenPath *string + + oauthKeyPath *string + oauthClientID *string + oauthIssuer *string +} + +func RegisterCommand(r cli.CommandRegistry, p cli.ParentCommand) cli.Command { + cmd := p.Command(command, "Start the server") + server := &server{ + CmdClause: cmd, + port: cmd.Flag("port", "Port to bind to.").Default("9090").Int(), + project: cmd.Flag("project", "Google Cloud project name.").String(), + mysqlUser: cmd.Flag("mysql-user", "MySQL user.").Required().String(), + mysqlPass: cmd.Flag("mysql-pass", "MySQL password.").Required().String(), + mysqlHost: cmd.Flag("mysql-host", "MySQL host.").Required().String(), + mysqlPort: cmd.Flag("mysql-port", "MySQL port.").Required().Int(), + mysqlDBName: cmd.Flag("mysql-db-name", "MySQL database name.").Required().String(), + accountService: cmd.Flag( + "account-service", + "bucketeer-account-service address.", + ).Default("account:9090").String(), + certPath: cmd.Flag("cert", "Path to TLS certificate.").Required().String(), + keyPath: cmd.Flag("key", "Path to TLS key.").Required().String(), + serviceTokenPath: cmd.Flag("service-token", "Path to service token.").Required().String(), + oauthKeyPath: cmd.Flag("oauth-key", "Path to public key used to verify oauth token.").Required().String(), + oauthClientID: cmd.Flag("oauth-client-id", "The oauth clientID registered at dex.").Required().String(), + oauthIssuer: cmd.Flag("oauth-issuer", "The url of dex issuer.").Required().String(), + } + r.RegisterCommand(server) + return server +} + +func (s *server) Run(ctx context.Context, metrics metrics.Metrics, logger *zap.Logger) error { + registerer := metrics.DefaultRegisterer() + + db, err := s.createMySQLClient(ctx, registerer, logger) + if err != nil { + return err + } + defer db.Close() + + creds, err := client.NewPerRPCCredentials(*s.serviceTokenPath) + if err != nil { + return err + } + + accountClient, err := accountclient.NewClient(*s.accountService, *s.certPath, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + client.WithMetrics(registerer), + client.WithLogger(logger), + ) + if err != nil { + return err + } + defer accountClient.Close() + + service := api.NewUserService( + db, + accountClient, + api.WithLogger(logger), + ) + + verifier, err := token.NewVerifier(*s.oauthKeyPath, *s.oauthIssuer, *s.oauthClientID) + if err != nil { + return err + } + + healthChecker := health.NewGrpcChecker( + health.WithTimeout(time.Second), + health.WithCheck("metrics", metrics.Check), + ) + go healthChecker.Run(ctx) + + server := rpc.NewServer(service, *s.certPath, *s.keyPath, + rpc.WithPort(*s.port), + rpc.WithVerifier(verifier), + rpc.WithMetrics(registerer), + rpc.WithLogger(logger), + rpc.WithService(healthChecker), + rpc.WithHandler("/health", healthChecker), + ) + defer server.Stop(10 * time.Second) + go server.Run() + + <-ctx.Done() + return nil +} + +func (s *server) createMySQLClient( + ctx context.Context, + registerer metrics.Registerer, + logger *zap.Logger, +) (mysql.Client, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + return mysql.NewClient( + ctx, + *s.mysqlUser, *s.mysqlPass, *s.mysqlHost, + *s.mysqlPort, + *s.mysqlDBName, + mysql.WithLogger(logger), + mysql.WithMetrics(registerer), + ) +} diff --git a/pkg/user/domain/BUILD.bazel b/pkg/user/domain/BUILD.bazel new file mode 100644 index 000000000..9a980daab --- /dev/null +++ b/pkg/user/domain/BUILD.bazel @@ -0,0 +1,20 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["user.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/user/domain", + visibility = ["//visibility:public"], + deps = ["//proto/user:go_default_library"], +) + +go_test( + name = "go_default_test", + srcs = ["user_test.go"], + embed = [":go_default_library"], + deps = [ + "//proto/user:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + ], +) diff --git a/pkg/user/domain/user.go b/pkg/user/domain/user.go new file mode 100644 index 000000000..92da38bd1 --- /dev/null +++ b/pkg/user/domain/user.go @@ -0,0 +1,66 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "errors" + "strings" + + userproto "github.com/bucketeer-io/bucketeer/proto/user" +) + +var ( + ErrNotSameID = errors.New("user: user id is not same") + ErrNotLater = errors.New("user: update user is not later") +) + +type User struct { + *userproto.User +} + +func (u *User) ID() string { + return u.Id +} + +func (u *User) UpdateMe(newer *User) error { + if u.Id != newer.Id { + return ErrNotSameID + } + if u.LastSeen >= newer.LastSeen { + return ErrNotLater + } + u.LastSeen = newer.LastSeen + for key, newData := range newer.TaggedData { + u.TaggedData[key] = u.trimData(newData) + } + return nil +} + +func (u *User) trimData(data *userproto.User_Data) *userproto.User_Data { + if data == nil { + return nil + } + for key, val := range data.Value { + data.Value[key] = strings.TrimSpace(val) + } + return data +} + +func (u *User) Data(tag string) map[string]string { + if u.TaggedData[tag] == nil { + return nil + } + return u.TaggedData[tag].Value +} diff --git a/pkg/user/domain/user_test.go b/pkg/user/domain/user_test.go new file mode 100644 index 000000000..57c1a4231 --- /dev/null +++ b/pkg/user/domain/user_test.go @@ -0,0 +1,203 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package domain + +import ( + "testing" + + proto "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/assert" + + userproto "github.com/bucketeer-io/bucketeer/proto/user" +) + +func TestUpdateMe(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + origin *User + input *User + expected *User + expectedErr error + }{ + "update without data": { + origin: &User{ + User: &userproto.User{ + Id: "hoge", + LastSeen: 0, + }, + }, + input: &User{ + User: &userproto.User{ + Id: "hoge", + LastSeen: 1, + }, + }, + expected: &User{ + User: &userproto.User{ + Id: "hoge", + Data: map[string]string{}, + LastSeen: 1, + }, + }, + }, + "update overriding data": { + origin: &User{ + User: &userproto.User{ + Id: "id", + TaggedData: map[string]*userproto.User_Data{"tag": {Value: map[string]string{"key-0": "val-0", "key-1": "val-1"}}}, + LastSeen: 0, + }, + }, + input: &User{ + User: &userproto.User{ + Id: "id", + TaggedData: map[string]*userproto.User_Data{"tag": {Value: map[string]string{"key-0": " val-0 ", "key-2": " val-2 "}}}, + LastSeen: 1, + }, + }, + expected: &User{ + User: &userproto.User{ + Id: "id", + TaggedData: map[string]*userproto.User_Data{"tag": {Value: map[string]string{"key-0": "val-0", "key-2": "val-2"}}}, + LastSeen: 1, + }, + }, + }, + "update appending data": { + origin: &User{ + User: &userproto.User{ + Id: "id", + TaggedData: map[string]*userproto.User_Data{ + "tag-0": {Value: map[string]string{"key-0": "val-0", "key-1": "val-1"}}, + "tag-1": {Value: map[string]string{"key-1": "val-1", "key-2": "val-2"}}, + }, + LastSeen: 0, + }, + }, + input: &User{ + User: &userproto.User{ + Id: "id", + TaggedData: map[string]*userproto.User_Data{ + "tag-2": {Value: map[string]string{"key-2": " val-2 ", "key-3": " val-3 "}}, + "tag-3": {Value: map[string]string{"key-3": " val-3 ", "key-4": " val-4 "}}, + }, + LastSeen: 1, + }, + }, + expected: &User{ + User: &userproto.User{ + Id: "id", + TaggedData: map[string]*userproto.User_Data{ + "tag-0": {Value: map[string]string{"key-0": "val-0", "key-1": "val-1"}}, + "tag-1": {Value: map[string]string{"key-1": "val-1", "key-2": "val-2"}}, + "tag-2": {Value: map[string]string{"key-2": "val-2", "key-3": "val-3"}}, + "tag-3": {Value: map[string]string{"key-3": "val-3", "key-4": "val-4"}}, + }, + LastSeen: 1, + }, + }, + }, + "err: id not same": { + origin: &User{ + User: &userproto.User{ + Id: "foo", + LastSeen: 0, + }, + }, + input: &User{ + User: &userproto.User{ + Id: "fee", + LastSeen: 1, + }, + }, + expected: &User{ + User: &userproto.User{ + Id: "foo", + LastSeen: 0, + }, + }, + expectedErr: ErrNotSameID, + }, + "err: id not later": { + origin: &User{ + User: &userproto.User{ + Id: "foo", + LastSeen: 1, + }, + }, + input: &User{ + User: &userproto.User{ + Id: "foo", + LastSeen: 0, + }, + }, + expected: &User{ + User: &userproto.User{ + Id: "foo", + LastSeen: 1, + }, + }, + expectedErr: ErrNotLater, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + err := p.origin.UpdateMe(p.input) + assert.True(t, proto.Equal(p.expected, p.origin)) + assert.Equal(t, p.expectedErr, err) + }) + } +} + +func TestData(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + origin *User + input string + expected map[string]string + }{ + "no data": { + origin: &User{ + User: &userproto.User{ + TaggedData: map[string]*userproto.User_Data{ + "t0": {Value: map[string]string{"t0-k0": "t0-v0"}}, + }, + }, + }, + input: "t1", + expected: nil, + }, + "hit": { + origin: &User{ + User: &userproto.User{ + TaggedData: map[string]*userproto.User_Data{ + "t0": {Value: map[string]string{"t0-k0": "t0-v0"}}, + "t1": {Value: map[string]string{"t1-k0": "t1-v0"}}, + }, + }, + }, + input: "t1", + expected: map[string]string{ + "t1-k0": "t1-v0", + }, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + actual := p.origin.Data(p.input) + assert.Equal(t, p.expected, actual) + }) + } +} diff --git a/pkg/user/persister/BUILD.bazel b/pkg/user/persister/BUILD.bazel new file mode 100644 index 000000000..a5b9fe30e --- /dev/null +++ b/pkg/user/persister/BUILD.bazel @@ -0,0 +1,48 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "metrics.go", + "persister.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/pkg/user/persister", + visibility = ["//visibility:public"], + deps = [ + "//pkg/errgroup:go_default_library", + "//pkg/feature/client:go_default_library", + "//pkg/health:go_default_library", + "//pkg/metrics:go_default_library", + "//pkg/pubsub/puller:go_default_library", + "//pkg/pubsub/puller/codes:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/user/domain:go_default_library", + "//pkg/user/storage/v2:go_default_library", + "//pkg/uuid:go_default_library", + "//proto/event/client:go_default_library", + "//proto/event/service:go_default_library", + "//proto/user:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@org_uber_go_zap//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["persister_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/feature/client/mock:go_default_library", + "//pkg/log:go_default_library", + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/storage/v2/mysql/mock:go_default_library", + "//pkg/uuid:go_default_library", + "//proto/event/service:go_default_library", + "//proto/user:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + ], +) diff --git a/pkg/user/persister/metrics.go b/pkg/user/persister/metrics.go new file mode 100644 index 000000000..a6f80c401 --- /dev/null +++ b/pkg/user/persister/metrics.go @@ -0,0 +1,55 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persister + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/bucketeer-io/bucketeer/pkg/metrics" +) + +var ( + receivedCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "user", + Name: "persister_received_total", + Help: "Total number of received messages", + }) + + handledCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "user", + Name: "persister_handled_total", + Help: "Total number of handled messages", + }, []string{"code"}) + + cacheCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "bucketeer", + Subsystem: "user", + Name: "api_cache_requests_total", + Help: "Total number of cache requests", + }, []string{"type", "layer", "code"}) +) + +func registerMetrics(r metrics.Registerer) { + r.MustRegister( + receivedCounter, + handledCounter, + cacheCounter, + ) +} diff --git a/pkg/user/persister/persister.go b/pkg/user/persister/persister.go new file mode 100644 index 000000000..8ad176faa --- /dev/null +++ b/pkg/user/persister/persister.go @@ -0,0 +1,338 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persister + +import ( + "context" + "time" + + "github.com/golang/protobuf/proto" // nolint:staticcheck + "github.com/golang/protobuf/ptypes" + "go.uber.org/zap" + + "github.com/bucketeer-io/bucketeer/pkg/errgroup" + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + "github.com/bucketeer-io/bucketeer/pkg/health" + "github.com/bucketeer-io/bucketeer/pkg/metrics" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller" + "github.com/bucketeer-io/bucketeer/pkg/pubsub/puller/codes" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + userdomain "github.com/bucketeer-io/bucketeer/pkg/user/domain" + ustorage "github.com/bucketeer-io/bucketeer/pkg/user/storage/v2" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + ecproto "github.com/bucketeer-io/bucketeer/proto/event/client" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/service" + userproto "github.com/bucketeer-io/bucketeer/proto/user" +) + +type options struct { + maxMPS int + numWorkers int + flushSize int + flushInterval time.Duration + pubsubTimeout time.Duration + metrics metrics.Registerer + logger *zap.Logger +} + +type Option func(*options) + +var defaultOptions = &options{ + maxMPS: 1000, + numWorkers: 1, + flushSize: 1000000, + flushInterval: time.Second, + pubsubTimeout: 20 * time.Second, + logger: zap.NewNop(), +} + +func WithMaxMPS(mps int) Option { + return func(opts *options) { + opts.maxMPS = mps + } +} + +func WithNumWorkers(n int) Option { + return func(opts *options) { + opts.numWorkers = n + } +} + +func WithFlushSize(s int) Option { + return func(opts *options) { + opts.flushSize = s + } +} + +func WithFlushInterval(i time.Duration) Option { + return func(opts *options) { + opts.flushInterval = i + } +} + +func WithMetrics(r metrics.Registerer) Option { + return func(opts *options) { + opts.metrics = r + } +} + +func WithLogger(logger *zap.Logger) Option { + return func(opts *options) { + opts.logger = logger + } +} + +type Persister interface { + Run() error + Stop() + Check(context.Context) health.Status +} + +type persister struct { + mysqlClient mysql.Client + featureClient featureclient.Client + timeNow func() time.Time + newUUID func() (*uuid.UUID, error) + puller puller.RateLimitedPuller + group errgroup.Group + opts *options + logger *zap.Logger + ctx context.Context + cancel func() + doneCh chan struct{} +} + +func NewPersister( + mysqlClient mysql.Client, + featureClient featureclient.Client, + p puller.Puller, + opts ...Option) Persister { + + dopts := defaultOptions + for _, opt := range opts { + opt(dopts) + } + ctx, cancel := context.WithCancel(context.Background()) + if dopts.metrics != nil { + registerMetrics(dopts.metrics) + } + return &persister{ + mysqlClient: mysqlClient, + featureClient: featureClient, + timeNow: time.Now, + newUUID: uuid.NewUUID, + puller: puller.NewRateLimitedPuller(p, dopts.maxMPS), + opts: dopts, + logger: dopts.logger.Named("persister"), + ctx: ctx, + cancel: cancel, + doneCh: make(chan struct{}), + } +} + +func (p *persister) Run() error { + defer close(p.doneCh) + p.group.Go(func() error { + return p.puller.Run(p.ctx) + }) + for i := 0; i < p.opts.numWorkers; i++ { + p.group.Go(p.runWorker) + } + return p.group.Wait() +} + +func (p *persister) Stop() { + p.cancel() + <-p.doneCh +} + +func (p *persister) Check(ctx context.Context) health.Status { + select { + case <-p.ctx.Done(): + p.logger.Error("Unhealthy due to context Done is closed", zap.Error(p.ctx.Err())) + return health.Unhealthy + default: + if p.group.FinishedCount() > 0 { + p.logger.Error("Unhealthy", zap.Int32("FinishedCount", p.group.FinishedCount())) + return health.Unhealthy + } + return health.Healthy + } +} + +func (p *persister) runWorker() error { + chunk := make(map[string]*puller.Message, p.opts.flushSize) + timer := time.NewTimer(p.opts.flushInterval) + defer timer.Stop() + for { + select { + case msg, ok := <-p.puller.MessageCh(): + if !ok { + return nil + } + receivedCounter.Inc() + id := msg.Attributes["id"] + if id == "" { + msg.Ack() + handledCounter.WithLabelValues(codes.MissingID.String()).Inc() + continue + } + if pre, ok := chunk[id]; ok { + pre.Ack() + p.logger.Warn("Message with duplicate id", zap.String("id", id)) + handledCounter.WithLabelValues(codes.DuplicateID.String()).Inc() + } + chunk[id] = msg + if len(chunk) >= p.opts.flushSize { + p.handleChunk(chunk) + chunk = make(map[string]*puller.Message, p.opts.flushSize) + timer.Reset(p.opts.flushInterval) + } + case <-timer.C: + if len(chunk) > 0 { + p.handleChunk(chunk) + chunk = make(map[string]*puller.Message, p.opts.flushSize) + } + timer.Reset(p.opts.flushInterval) + case <-p.ctx.Done(): + return nil + } + } +} + +func (p *persister) handleChunk(chunk map[string]*puller.Message) { + for _, msg := range chunk { + event, err := p.unmarshalMessage(msg) + // The message is acked no matter what error is returned, + // because the data will be sent again from the SDK from time to time. + msg.Ack() + if err != nil { + handledCounter.WithLabelValues(codes.BadMessage.String()).Inc() + continue + } + if !p.validateEvent(event) { + handledCounter.WithLabelValues(codes.BadMessage.String()).Inc() + continue + } + ok, repeatable := p.upsert(event) + if !ok { + if repeatable { + handledCounter.WithLabelValues(codes.RepeatableError.String()).Inc() + } else { + handledCounter.WithLabelValues(codes.NonRepeatableError.String()).Inc() + } + continue + } + handledCounter.WithLabelValues(codes.OK.String()).Inc() + } +} + +func (p *persister) validateEvent(event *eventproto.UserEvent) bool { + if event.UserId == "" { + p.logger.Warn("Message contains an empty User Id", zap.Any("event", event)) + return false + } + if event.LastSeen == 0 { + p.logger.Warn("Message's LastSeen is zero", zap.Any("event", event)) + return false + } + return true +} + +func (p *persister) unmarshalMessage(msg *puller.Message) (*eventproto.UserEvent, error) { + event := &ecproto.Event{} + err := proto.Unmarshal(msg.Data, event) + if err != nil { + return nil, err + } + var userEvent eventproto.UserEvent + if err := ptypes.UnmarshalAny(event.Event, &userEvent); err != nil { + p.logger.Error("Failed to unmarshal Event -> UserEvent", zap.Error(err), zap.Any("msg", msg)) + return nil, err + } + return &userEvent, err +} + +func (p *persister) upsert(event *eventproto.UserEvent) (ok, repeatable bool) { + exist, err := p.getUser(event.UserId, event.EnvironmentNamespace) + if err != nil && err != ustorage.ErrUserNotFound { + p.logger.Error("Failed to get User", + zap.Error(err), + zap.String("environmentNamespace", event.EnvironmentNamespace), + zap.String("userId", event.UserId), + zap.String("tag", event.Tag), + ) + return false, true + } + updatedUser, err := p.updateUser(exist, event) + if err != nil { + p.logger.Debug("Failed to update user", + zap.Error(err), + zap.String("environmentNamespace", event.EnvironmentNamespace), + zap.String("userId", event.UserId), + zap.String("tag", event.Tag), + ) + return true, false + } + storage := ustorage.NewUserStorage(p.mysqlClient) + if err := storage.UpsertUser(p.ctx, updatedUser, event.EnvironmentNamespace); err != nil { + p.logger.Error("Failed to upsert User into MySQL", + zap.Error(err), + zap.String("environmentNamespace", event.EnvironmentNamespace), + zap.String("userId", event.UserId), + zap.String("tag", event.Tag), + ) + return false, true + } + if exist == nil { + handledCounter.WithLabelValues(codes.NewID.String()).Inc() + } + return true, false +} + +func (p *persister) getUser(userID, environmentNamespace string) (*userproto.User, error) { + storage := ustorage.NewUserStorage(p.mysqlClient) + user, err := storage.GetUser(p.ctx, userID, environmentNamespace) + if err != nil { + return nil, err + } + return user.User, nil +} + +func (p *persister) updateUser( + existUser *userproto.User, + event *eventproto.UserEvent, +) (*userdomain.User, error) { + taggedData := map[string]*userproto.User_Data{event.Tag: {Value: event.Data}} + if existUser == nil { + return &userdomain.User{User: &userproto.User{ + Id: event.UserId, + LastSeen: event.LastSeen, + TaggedData: taggedData, + CreatedAt: time.Now().Unix(), + }}, nil + } + newer := &userdomain.User{User: &userproto.User{ + Id: event.UserId, + LastSeen: event.LastSeen, + TaggedData: taggedData, + }} + exist := &userdomain.User{User: existUser} + err := exist.UpdateMe(newer) + if err != nil { + return nil, err + } + return exist, nil +} diff --git a/pkg/user/persister/persister_test.go b/pkg/user/persister/persister_test.go new file mode 100644 index 000000000..15305442c --- /dev/null +++ b/pkg/user/persister/persister_test.go @@ -0,0 +1,266 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persister + +import ( + "errors" + "reflect" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + featureclientmock "github.com/bucketeer-io/bucketeer/pkg/feature/client/mock" + "github.com/bucketeer-io/bucketeer/pkg/log" + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + mysqlmock "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/service" + userproto "github.com/bucketeer-io/bucketeer/proto/user" +) + +func TestValidateEvent(t *testing.T) { + t.Parallel() + patterns := []struct { + input *eventproto.UserEvent + expected bool + }{ + { + input: &eventproto.UserEvent{ + UserId: "hoge", + LastSeen: 3456789, + }, + expected: true, + }, + { + input: &eventproto.UserEvent{}, + expected: false, + }, + { + input: &eventproto.UserEvent{ + UserId: "", + LastSeen: 3456789, + }, + expected: false, + }, + { + input: &eventproto.UserEvent{ + UserId: "hoge", + LastSeen: 0, + }, + expected: false, + }, + } + logger, _ := log.NewLogger() + pst := persister{logger: logger} + for _, p := range patterns { + actual := pst.validateEvent(p.input) + assert.Equal(t, p.expected, actual) + } +} + +func TestUpsert(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + now := time.Now() + uuid, err := uuid.NewUUID() + require.NoError(t, err) + + patterns := map[string]struct { + setup func(*persister) + input *eventproto.UserEvent + expectedOK bool + expectedRepeatable bool + expected *userproto.User + }{ + "get user error": { + setup: func(p *persister) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(errors.New("internal")) + p.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + }, + input: &eventproto.UserEvent{ + EnvironmentNamespace: "ns0", + UserId: "id-1", + LastSeen: 3, + }, + expectedOK: false, + expectedRepeatable: true, + expected: &userproto.User{ + Id: "id-1", + LastSeen: 3, + }, + }, + "upsert error": { + setup: func(p *persister) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + p.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + p.mysqlClient.(*mysqlmock.MockClient).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("internal")) + }, + input: &eventproto.UserEvent{ + EnvironmentNamespace: "ns0", + UserId: "id-1", + LastSeen: 3, + }, + expectedOK: false, + expectedRepeatable: true, + expected: &userproto.User{ + Id: "id-1", + LastSeen: 3, + }, + }, + "upsert success": { + setup: func(p *persister) { + row := mysqlmock.NewMockRow(mockController) + row.EXPECT().Scan(gomock.Any()).Return(mysql.ErrNoRows) + p.mysqlClient.(*mysqlmock.MockClient).EXPECT().QueryRowContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(row) + p.mysqlClient.(*mysqlmock.MockClient).EXPECT().ExecContext( + gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, nil) + }, + input: &eventproto.UserEvent{ + EnvironmentNamespace: "ns0", + UserId: "id-1", + LastSeen: 3, + }, + expectedOK: true, + expectedRepeatable: false, + expected: &userproto.User{ + Id: "id-1", + LastSeen: 3, + }, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + pst := newPersisterWithMock(t, mockController, now, uuid) + if p.setup != nil { + p.setup(pst) + } + ok, repeatable := pst.upsert(p.input) + assert.Equal(t, p.expectedOK, ok) + assert.Equal(t, p.expectedRepeatable, repeatable) + }) + } +} + +func TestUpdateUser(t *testing.T) { + t.Parallel() + patterns := map[string]struct { + inputExist *userproto.User + inputEvent *eventproto.UserEvent + expectedUser *userproto.User + }{ + "not exist": { + inputExist: nil, + inputEvent: &eventproto.UserEvent{ + UserId: "uid-0", + Tag: "t-0", + Data: map[string]string{"d-0": "v-0"}, + LastSeen: int64(1), + }, + expectedUser: &userproto.User{ + Id: "uid-0", + Data: map[string]string{"d-0": "v-0"}, + TaggedData: map[string]*userproto.User_Data{"t-0": {Value: map[string]string{"d-0": "v-0"}}}, + LastSeen: int64(1), + }, + }, + "exists overriding data": { + inputExist: &userproto.User{ + Id: "uid-0", + TaggedData: map[string]*userproto.User_Data{"t-0": {Value: map[string]string{"d-0": "v-0"}}}, + LastSeen: int64(0), + }, + inputEvent: &eventproto.UserEvent{ + UserId: "uid-0", + Tag: "t-0", + Data: map[string]string{"d-0": "v-0", "d-1": "v-1"}, + LastSeen: int64(1), + }, + expectedUser: &userproto.User{ + Id: "uid-0", + TaggedData: map[string]*userproto.User_Data{"t-0": {Value: map[string]string{"d-0": "v-0", "d-1": "v-1"}}}, + LastSeen: int64(1), + }, + }, + "exists appending data": { + inputExist: &userproto.User{ + Id: "uid-0", + TaggedData: map[string]*userproto.User_Data{"t-0": {Value: map[string]string{"d-0": "v-0"}}}, + LastSeen: int64(0), + }, + inputEvent: &eventproto.UserEvent{ + UserId: "uid-0", + Tag: "t-1", + Data: map[string]string{"d-1": "v-1"}, + LastSeen: int64(1), + }, + expectedUser: &userproto.User{ + Id: "uid-0", + TaggedData: map[string]*userproto.User_Data{ + "t-0": {Value: map[string]string{"d-0": "v-0"}}, + "t-1": {Value: map[string]string{"d-1": "v-1"}}, + }, + LastSeen: int64(1), + }, + }, + } + for msg, p := range patterns { + t.Run(msg, func(t *testing.T) { + pst := &persister{opts: defaultOptions, logger: defaultOptions.logger.Named("persister")} + actualUser, _ := pst.updateUser(p.inputExist, p.inputEvent) + if msg == "not exist" { + assert.Equal(t, actualUser.User.Id, p.expectedUser.Id) + assert.True(t, len(actualUser.User.Data) == 0) + assert.Equal(t, actualUser.User.TaggedData, p.expectedUser.TaggedData) + assert.Equal(t, actualUser.LastSeen, p.expectedUser.LastSeen) + assert.True(t, actualUser.CreatedAt > 0) + } else { + assert.True(t, reflect.DeepEqual(actualUser.User, p.expectedUser)) + } + }) + } +} + +func newPersisterWithMock( + t *testing.T, + mockController *gomock.Controller, + now time.Time, + id *uuid.UUID, +) *persister { + logger, err := log.NewLogger() + require.NoError(t, err) + return &persister{ + mysqlClient: mysqlmock.NewMockClient(mockController), + featureClient: featureclientmock.NewMockClient(mockController), + timeNow: func() time.Time { return now }, + newUUID: func() (*uuid.UUID, error) { return id, nil }, + opts: defaultOptions, + logger: logger, + } +} diff --git a/pkg/user/storage/v2/BUILD.bazel b/pkg/user/storage/v2/BUILD.bazel new file mode 100644 index 000000000..77250fbd6 --- /dev/null +++ b/pkg/user/storage/v2/BUILD.bazel @@ -0,0 +1,24 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["user.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/user/storage/v2", + visibility = ["//visibility:public"], + deps = [ + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/user/domain:go_default_library", + "//proto/user:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["user_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/storage/v2/mysql/mock:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + ], +) diff --git a/pkg/user/storage/v2/mock/BUILD.bazel b/pkg/user/storage/v2/mock/BUILD.bazel new file mode 100644 index 000000000..e008e1240 --- /dev/null +++ b/pkg/user/storage/v2/mock/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["user.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/user/storage/v2/mock", + visibility = ["//visibility:public"], + deps = [ + "//pkg/storage/v2/mysql:go_default_library", + "//pkg/user/domain:go_default_library", + "//proto/user:go_default_library", + "@com_github_golang_mock//gomock:go_default_library", + ], +) diff --git a/pkg/user/storage/v2/mock/user.go b/pkg/user/storage/v2/mock/user.go new file mode 100644 index 000000000..f9cbe089a --- /dev/null +++ b/pkg/user/storage/v2/mock/user.go @@ -0,0 +1,99 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: user.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + mysql "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + domain "github.com/bucketeer-io/bucketeer/pkg/user/domain" + user "github.com/bucketeer-io/bucketeer/proto/user" +) + +// MockUserStorage is a mock of UserStorage interface. +type MockUserStorage struct { + ctrl *gomock.Controller + recorder *MockUserStorageMockRecorder +} + +// MockUserStorageMockRecorder is the mock recorder for MockUserStorage. +type MockUserStorageMockRecorder struct { + mock *MockUserStorage +} + +// NewMockUserStorage creates a new mock instance. +func NewMockUserStorage(ctrl *gomock.Controller) *MockUserStorage { + mock := &MockUserStorage{ctrl: ctrl} + mock.recorder = &MockUserStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockUserStorage) EXPECT() *MockUserStorageMockRecorder { + return m.recorder +} + +// GetUser mocks base method. +func (m *MockUserStorage) GetUser(ctx context.Context, id, environmentNamespace string) (*domain.User, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUser", ctx, id, environmentNamespace) + ret0, _ := ret[0].(*domain.User) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUser indicates an expected call of GetUser. +func (mr *MockUserStorageMockRecorder) GetUser(ctx, id, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUser", reflect.TypeOf((*MockUserStorage)(nil).GetUser), ctx, id, environmentNamespace) +} + +// GetUsers mocks base method. +func (m *MockUserStorage) GetUsers(ctx context.Context, ids []string, environmentNamespace string) ([]*domain.User, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUsers", ctx, ids, environmentNamespace) + ret0, _ := ret[0].([]*domain.User) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUsers indicates an expected call of GetUsers. +func (mr *MockUserStorageMockRecorder) GetUsers(ctx, ids, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUsers", reflect.TypeOf((*MockUserStorage)(nil).GetUsers), ctx, ids, environmentNamespace) +} + +// ListUsers mocks base method. +func (m *MockUserStorage) ListUsers(ctx context.Context, whereParts []mysql.WherePart, orders []*mysql.Order, limit, offset int) ([]*user.User, int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListUsers", ctx, whereParts, orders, limit, offset) + ret0, _ := ret[0].([]*user.User) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// ListUsers indicates an expected call of ListUsers. +func (mr *MockUserStorageMockRecorder) ListUsers(ctx, whereParts, orders, limit, offset interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUsers", reflect.TypeOf((*MockUserStorage)(nil).ListUsers), ctx, whereParts, orders, limit, offset) +} + +// UpsertUser mocks base method. +func (m *MockUserStorage) UpsertUser(ctx context.Context, user *domain.User, environmentNamespace string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpsertUser", ctx, user, environmentNamespace) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpsertUser indicates an expected call of UpsertUser. +func (mr *MockUserStorageMockRecorder) UpsertUser(ctx, user, environmentNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertUser", reflect.TypeOf((*MockUserStorage)(nil).UpsertUser), ctx, user, environmentNamespace) +} diff --git a/pkg/user/storage/v2/user.go b/pkg/user/storage/v2/user.go new file mode 100644 index 000000000..8458b26fc --- /dev/null +++ b/pkg/user/storage/v2/user.go @@ -0,0 +1,214 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mockgen -source=$GOFILE -package=mock -destination=./mock/$GOFILE +package v2 + +import ( + "context" + "errors" + "fmt" + + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql" + "github.com/bucketeer-io/bucketeer/pkg/user/domain" + proto "github.com/bucketeer-io/bucketeer/proto/user" +) + +var ( + ErrUserNotFound = errors.New("user: not found") +) + +type UserStorage interface { + GetUser(ctx context.Context, id, environmentNamespace string) (*domain.User, error) + GetUsers(ctx context.Context, ids []string, environmentNamespace string) ([]*domain.User, error) + UpsertUser(ctx context.Context, user *domain.User, environmentNamespace string) error + ListUsers( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, + ) ([]*proto.User, int, error) +} + +type userStorage struct { + qe mysql.QueryExecer +} + +func NewUserStorage(qe mysql.QueryExecer) UserStorage { + return &userStorage{qe: qe} +} + +func (s *userStorage) GetUser(ctx context.Context, id, environmentNamespace string) (*domain.User, error) { + user := &proto.User{} + query := ` + SELECT + id, + tagged_data, + last_seen, + created_at + FROM + user + WHERE + id = ? AND + environment_namespace = ? + ` + err := s.qe.QueryRowContext( + ctx, + query, + id, + environmentNamespace, + ).Scan( + &user.Id, + &mysql.JSONObject{Val: &user.TaggedData}, + &user.LastSeen, + &user.CreatedAt, + ) + if err != nil { + if err == mysql.ErrNoRows { + return nil, ErrUserNotFound + } + return nil, err + } + return &domain.User{User: user}, nil +} + +func (s *userStorage) GetUsers(ctx context.Context, ids []string, environmentNamespace string) ([]*domain.User, error) { + inFilterIDs := make([]interface{}, 0, len(ids)) + for _, id := range ids { + inFilterIDs = append(inFilterIDs, id) + } + whereParts := []mysql.WherePart{ + mysql.NewInFilter("id", inFilterIDs), + mysql.NewFilter("environment_namespace", "=", environmentNamespace), + } + whereSQL, whereArgs := mysql.ConstructWhereSQLString(whereParts) + query := fmt.Sprintf(` + SELECT + id, + tagged_data, + last_seen, + created_at + FROM + user + %s + `, whereSQL, + ) + rows, err := s.qe.QueryContext( + ctx, + query, + whereArgs..., + ) + if err != nil { + return nil, err + } + defer rows.Close() + entries := make([]*proto.User, 0, len(ids)) + for rows.Next() { + user := proto.User{} + err := rows.Scan( + &user.Id, + &mysql.JSONObject{Val: &user.TaggedData}, + &user.LastSeen, + &user.CreatedAt, + ) + if err != nil { + return nil, err + } + entries = append(entries, &user) + } + if rows.Err() != nil { + return nil, err + } + // NOTE: If the performance matters, remove the following loop and return protos. + domainUsers := make([]*domain.User, 0, len(entries)) + for _, e := range entries { + domainUsers = append(domainUsers, &domain.User{User: e}) + } + return domainUsers, nil +} + +func (s *userStorage) UpsertUser(ctx context.Context, user *domain.User, environmentNamespace string) error { + query := ` + INSERT INTO user ( + id, + tagged_data, + last_seen, + created_at, + environment_namespace + ) VALUES ( + ?, ?, ?, ?, ? + ) ON DUPLICATE KEY UPDATE + tagged_data = VALUES(tagged_data), + last_seen = VALUES(last_seen) +` + _, err := s.qe.ExecContext( + ctx, + query, + user.Id, + mysql.JSONObject{Val: user.TaggedData}, + user.LastSeen, + user.CreatedAt, + environmentNamespace, + ) + if err != nil { + return err + } + return nil +} + +func (s *userStorage) ListUsers( + ctx context.Context, + whereParts []mysql.WherePart, + orders []*mysql.Order, + limit, offset int, +) ([]*proto.User, int, error) { + whereSQL, whereArgs := mysql.ConstructWhereSQLString(whereParts) + orderBySQL := mysql.ConstructOrderBySQLString(orders) + limitOffsetSQL := mysql.ConstructLimitOffsetSQLString(limit, offset) + query := fmt.Sprintf(` + SELECT + id, + tagged_data, + last_seen, + created_at + FROM + user + %s %s %s + `, whereSQL, orderBySQL, limitOffsetSQL, + ) + rows, err := s.qe.QueryContext(ctx, query, whereArgs...) + if err != nil { + return nil, 0, err + } + defer rows.Close() + users := make([]*proto.User, 0, limit) + for rows.Next() { + user := proto.User{} + err := rows.Scan( + &user.Id, + &mysql.JSONObject{Val: &user.TaggedData}, + &user.LastSeen, + &user.CreatedAt, + ) + if err != nil { + return nil, 0, err + } + users = append(users, &user) + } + if rows.Err() != nil { + return nil, 0, err + } + nextOffset := offset + len(users) + return users, nextOffset, nil +} diff --git a/pkg/user/storage/v2/user_test.go b/pkg/user/storage/v2/user_test.go new file mode 100644 index 000000000..7e513dd55 --- /dev/null +++ b/pkg/user/storage/v2/user_test.go @@ -0,0 +1,32 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2 + +import ( + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/storage/v2/mysql/mock" +) + +func TestNewUserStorage(t *testing.T) { + t.Parallel() + mockController := gomock.NewController(t) + defer mockController.Finish() + storage := NewUserStorage(mock.NewMockQueryExecer(mockController)) + assert.IsType(t, &userStorage{}, storage) +} diff --git a/pkg/uuid/BUILD.bazel b/pkg/uuid/BUILD.bazel new file mode 100644 index 000000000..fa11fcfac --- /dev/null +++ b/pkg/uuid/BUILD.bazel @@ -0,0 +1,18 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["uuid.go"], + importpath = "github.com/bucketeer-io/bucketeer/pkg/uuid", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["uuid_test.go"], + embed = [":go_default_library"], + deps = [ + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + ], +) diff --git a/pkg/uuid/uuid.go b/pkg/uuid/uuid.go new file mode 100644 index 000000000..80e5a3ef4 --- /dev/null +++ b/pkg/uuid/uuid.go @@ -0,0 +1,62 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package uuid + +import ( + "crypto/rand" + "errors" + "fmt" + "regexp" +) + +var ( + ErrIncorrectUUIDFormat = errors.New("uuid: format must be an uuid version 4") + // Version 4 + uuidRegex = regexp.MustCompile( + "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-[8|9|aA|bB][a-fA-F0-9]{3}-[a-fA-F0-9]{12}$", + ) +) + +// https://tools.ietf.org/html/rfc4122 +type UUID [16]byte + +func NewUUID() (*UUID, error) { + uuid := &UUID{} + if _, err := rand.Read(uuid[:]); err != nil { + return nil, err + } + uuid.setVariant() + uuid.setVersion() + return uuid, nil +} + +func ValidateUUID(id string) error { + if !uuidRegex.MatchString(id) { + return ErrIncorrectUUIDFormat + } + return nil +} + +func (uuid *UUID) String() string { + return fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:]) +} + +func (uuid *UUID) setVariant() { + uuid[8] = (uuid[8] & 0x3f) | 0x80 +} + +func (uuid *UUID) setVersion() { + uuid[6] = (uuid[6] & 0x0f) | 0x40 // version 4 +} diff --git a/pkg/uuid/uuid_test.go b/pkg/uuid/uuid_test.go new file mode 100644 index 000000000..a3580977e --- /dev/null +++ b/pkg/uuid/uuid_test.go @@ -0,0 +1,56 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package uuid + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestValidateUUID(t *testing.T) { + uuid, err := NewUUID() + require.NoError(t, err) + patterns := []*struct { + id string + expected error + }{ + { + id: "0efe416e 2fd2 4996 c5c3 194f05444f1f", + expected: ErrIncorrectUUIDFormat, + }, + { + id: "0efe416e2fd24996b5c3194f05444f1f", + expected: ErrIncorrectUUIDFormat, + }, + { + id: "0efe416e_2fd2_4996_b5c3_194f05444f1f", + expected: ErrIncorrectUUIDFormat, + }, + { + id: "0efe416e-2fd2-4996-b5c3-194f05444f1f", + expected: nil, + }, + { + id: uuid.String(), + expected: nil, + }, + } + for _, p := range patterns { + err := ValidateUUID(p.id) + assert.Equal(t, p.expected, err) + } +} diff --git a/proto/.clang-format b/proto/.clang-format new file mode 100644 index 000000000..f6cb8ad93 --- /dev/null +++ b/proto/.clang-format @@ -0,0 +1 @@ +BasedOnStyle: Google diff --git a/proto/BUILD.bazel b/proto/BUILD.bazel new file mode 100644 index 000000000..e69de29bb diff --git a/proto/Makefile b/proto/Makefile new file mode 100644 index 000000000..ca76a6930 --- /dev/null +++ b/proto/Makefile @@ -0,0 +1,46 @@ +PROTO_FOLDERS := $(filter-out ./external%, $(shell find . -name '*.proto' -print0 | xargs -0 -n1 dirname | sort --unique)) +GIT_TOP_DIR := $(shell realpath ..) +PROTOBUF_INCLUDE_DIR := $(GIT_TOP_DIR)/proto/external/protocolbuffers/protobuf/v3.18.1 + +.PHONY: go +go: remove-go + for f in ${PROTO_FOLDERS}; do \ + protoc -I"$(GIT_TOP_DIR)" \ + -I"$(PROTOBUF_INCLUDE_DIR)" \ + -I"${GOPATH}/src/github.com/googleapis/googleapis" \ + --go_out=plugins=grpc:${GOPATH}/src \ + $(GIT_TOP_DIR)/proto/$$f/*.proto; \ + done + +.PHONY: remove-go +remove-go: + find . -name "*.pb.go" -type f -delete + +.PHONY: check +check: fmt-check lock-check + +.PHONY: fmt +fmt: + find . -name "*.proto" | grep -v external | xargs clang-format -i + +.PHONY: fmt-check +fmt-check: + test -z "$$(find . -name "*.proto" | grep -v external | xargs clang-format -i -output-replacements-xml | grep " labels = 1; + google.protobuf.Duration duration = 2; +} + +message GetEvaluationSizeMetricsEvent { + map labels = 1; + int32 size_byte = 2; +} + +message TimeoutErrorCountMetricsEvent { + string tag = 1; +} + +message InternalErrorCountMetricsEvent { + string tag = 1; +} + +message OpsEvent { + int64 timestamp = 1; + string feature_id = 2; + int32 feature_version = 3; + string variation_id = 4; + string goal_id = 5; + string user_id = 6; +} + +message GoalBatchEvent { + string user_id = 1; + repeated UserGoalEventsOverTag user_goal_events_over_tags = 2; +} + +message UserGoalEventsOverTag { + string tag = 1; + repeated UserGoalEvent user_goal_events = 2; +} + +message UserGoalEvent { + int64 timestamp = 1; + string goal_id = 2; + double value = 3; +} \ No newline at end of file diff --git a/proto/event/domain/BUILD.bazel b/proto/event/domain/BUILD.bazel new file mode 100644 index 000000000..dd8bee232 --- /dev/null +++ b/proto/event/domain/BUILD.bazel @@ -0,0 +1,40 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "domain_proto", + srcs = [ + "event.proto", + "localized_message.proto", + ], + visibility = ["//visibility:public"], + deps = [ + "//proto/account:account_proto", + "//proto/autoops:autoops_proto", + "//proto/feature:feature_proto", + "//proto/notification:notification_proto", + "@com_google_protobuf//:any_proto", + "@com_google_protobuf//:wrappers_proto", + ], +) + +go_proto_library( + name = "domain_go_proto", + importpath = "github.com/bucketeer-io/bucketeer/proto/event/domain", + proto = ":domain_proto", + visibility = ["//visibility:public"], + deps = [ + "//proto/account:go_default_library", + "//proto/autoops:go_default_library", + "//proto/feature:go_default_library", + "//proto/notification:go_default_library", + ], +) + +go_library( + name = "go_default_library", + embed = [":domain_go_proto"], + importpath = "github.com/bucketeer-io/bucketeer/proto/event/domain", + visibility = ["//visibility:public"], +) diff --git a/proto/event/domain/event.proto b/proto/event/domain/event.proto new file mode 100644 index 000000000..fe7dd00ee --- /dev/null +++ b/proto/event/domain/event.proto @@ -0,0 +1,876 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.event.domain; +option go_package = "github.com/bucketeer-io/bucketeer/proto/event/domain"; + +import "google/protobuf/any.proto"; +import "google/protobuf/wrappers.proto"; + +import "proto/feature/clause.proto"; +import "proto/feature/feature.proto"; +import "proto/feature/rule.proto"; +import "proto/feature/variation.proto"; +import "proto/feature/strategy.proto"; +import "proto/feature/segment.proto"; +import "proto/feature/target.proto"; +import "proto/account/account.proto"; +import "proto/account/api_key.proto"; +import "proto/autoops/auto_ops_rule.proto"; +import "proto/autoops/clause.proto"; +import "proto/notification/subscription.proto"; +import "proto/notification/recipient.proto"; +import "proto/feature/prerequisite.proto"; + +message Event { + enum EntityType { + FEATURE = 0; + GOAL = 1; + EXPERIMENT = 2; + ACCOUNT = 3; + APIKEY = 4; + SEGMENT = 5; + ENVIRONMENT = 6; + ADMIN_ACCOUNT = 7; + AUTOOPS_RULE = 8; + PUSH = 9; + SUBSCRIPTION = 10; + ADMIN_SUBSCRIPTION = 11; + PROJECT = 12; + WEBHOOK = 13; + } + enum Type { + UNKNOWN = 0; + FEATURE_CREATED = 1; + FEATURE_RENAMED = 2; + FEATURE_ENABLED = 3; + FEATURE_DISABLED = 4; + FEATURE_DELETED = 5; + FEATURE_EVALUATION_DELAYABLE_SET = 6; + FEATURE_EVALUATION_UNDELAYABLE_SET = 7; + FEATURE_DESCRIPTION_CHANGED = 8; + FEATURE_VARIATION_ADDED = 9; + FEATURE_VARIATION_REMOVED = 10; + FEATURE_OFF_VARIATION_CHANGED = 11; + VARIATION_VALUE_CHANGED = 12; + VARIATION_NAME_CHANGED = 13; + VARIATION_DESCRIPTION_CHANGED = 14; + VARIATION_USER_ADDED = 15; + VARIATION_USER_REMOVED = 16; + FEATURE_RULE_ADDED = 17; + FEATURE_RULE_STRATEGY_CHANGED = 18; + FEATURE_RULE_DELETED = 19; + RULE_CLAUSE_ADDED = 20; + RULE_CLAUSE_DELETED = 21; + RULE_FIXED_STRATEGY_CHANGED = 22; + RULE_ROLLOUT_STRATEGY_CHANGED = 23; + CLAUSE_ATTRIBUTE_CHANGED = 24; + CLAUSE_OPERATOR_CHANGED = 25; + CLAUSE_VALUE_ADDED = 26; + CLAUSE_VALUE_REMOVED = 27; + FEATURE_DEFAULT_STRATEGY_CHANGED = 28; + FEATURE_TAG_ADDED = 29; + FEATURE_TAG_REMOVED = 30; + FEATURE_VERSION_INCREMENTED = 31; + FEATURE_ARCHIVED = 32; + FEATURE_CLONED = 33; + FEATURE_UNARCHIVED = 35; + SAMPLING_SEED_RESET = 34; + PREREQUISITE_ADDED = 36; + PREREQUISITE_REMOVED = 37; + PREREQUISITE_VARIATION_CHANGED = 38; + GOAL_CREATED = 100; + GOAL_RENAMED = 101; + GOAL_DESCRIPTION_CHANGED = 102; + GOAL_DELETED = 103; + GOAL_ARCHIVED = 104; + EXPERIMENT_CREATED = 200; + EXPERIMENT_STOPPED = 201; + EXPERIMENT_START_AT_CHANGED = 202; + EXPERIMENT_STOP_AT_CHANGED = 203; + EXPERIMENT_DELETED = 204; + EXPERIMENT_PERIOD_CHANGED = 205; + EXPERIMENT_NAME_CHANGED = 206; + EXPERIMENT_DESCRIPTION_CHANGED = 207; + EXPERIMENT_STARTED = 208; + EXPERIMENT_FINISHED = 209; + EXPERIMENT_ARCHIVED = 210; + ACCOUNT_CREATED = 300; + ACCOUNT_ROLE_CHANGED = 301; + ACCOUNT_ENABLED = 302; + ACCOUNT_DISABLED = 303; + ACCOUNT_DELETED = 304; + APIKEY_CREATED = 400; + APIKEY_NAME_CHANGED = 401; + APIKEY_ENABLED = 402; + APIKEY_DISABLED = 403; + SEGMENT_CREATED = 500; + SEGMENT_DELETED = 501; + SEGMENT_NAME_CHANGED = 502; + SEGMENT_DESCRIPTION_CHANGED = 503; + SEGMENT_RULE_ADDED = 504; + SEGMENT_RULE_DELETED = 505; + SEGMENT_RULE_CLAUSE_ADDED = 506; + SEGMENT_RULE_CLAUSE_DELETED = 507; + SEGMENT_CLAUSE_ATTRIBUTE_CHANGED = 508; + SEGMENT_CLAUSE_OPERATOR_CHANGED = 509; + SEGMENT_CLAUSE_VALUE_ADDED = 510; + SEGMENT_CLAUSE_VALUE_REMOVED = 511; + SEGMENT_USER_ADDED = 512; + SEGMENT_USER_DELETED = 513; + SEGMENT_BULK_UPLOAD_USERS = 514; + SEGMENT_BULK_UPLOAD_USERS_STATUS_CHANGED = 515; + ENVIRONMENT_CREATED = 600; + ENVIRONMENT_RENAMED = 601; + ENVIRONMENT_DESCRIPTION_CHANGED = 602; + ENVIRONMENT_DELETED = 603; + ADMIN_ACCOUNT_CREATED = 700; + ADMIN_ACCOUNT_ENABLED = 702; + ADMIN_ACCOUNT_DISABLED = 703; + AUTOOPS_RULE_CREATED = 800; + AUTOOPS_RULE_DELETED = 801; + AUTOOPS_RULE_OPS_TYPE_CHANGED = 802; + AUTOOPS_RULE_CLAUSE_DELETED = 803; + AUTOOPS_RULE_TRIGGERED_AT_CHANGED = 804; + OPS_EVENT_RATE_CLAUSE_ADDED = 805; + OPS_EVENT_RATE_CLAUSE_CHANGED = 806; + DATETIME_CLAUSE_ADDED = 807; + DATETIME_CLAUSE_CHANGED = 808; + PUSH_CREATED = 900; + PUSH_DELETED = 901; + PUSH_TAGS_ADDED = 902; + PUSH_TAGS_DELETED = 903; + PUSH_RENAMED = 904; + SUBSCRIPTION_CREATED = 1000; + SUBSCRIPTION_DELETED = 1001; + SUBSCRIPTION_ENABLED = 1002; + SUBSCRIPTION_DISABLED = 1003; + SUBSCRIPTION_SOURCE_TYPE_ADDED = 1004; + SUBSCRIPTION_SOURCE_TYPE_DELETED = 1005; + SUBSCRIPTION_RENAMED = 1006; + ADMIN_SUBSCRIPTION_CREATED = 1100; + ADMIN_SUBSCRIPTION_DELETED = 1101; + ADMIN_SUBSCRIPTION_ENABLED = 1102; + ADMIN_SUBSCRIPTION_DISABLED = 1103; + ADMIN_SUBSCRIPTION_SOURCE_TYPE_ADDED = 1104; + ADMIN_SUBSCRIPTION_SOURCE_TYPE_DELETED = 1105; + ADMIN_SUBSCRIPTION_RENAMED = 1106; + PROJECT_CREATED = 1200; + PROJECT_DESCRIPTION_CHANGED = 1201; + PROJECT_ENABLED = 1202; + PROJECT_DISABLED = 1203; + PROJECT_TRIAL_CREATED = 1204; + PROJECT_TRIAL_CONVERTED = 1205; + WEBHOOK_CREATED = 1300; + WEBHOOK_DELETED = 1301; + WEBHOOK_NAME_CHANGED = 1302; + WEBHOOK_DESCRIPTION_CHANGED = 1303; + WEBHOOK_CLAUSE_ADDED = 1304; + WEBHOOK_CLAUSE_CHANGED = 1305; + } + string id = 1; + int64 timestamp = 2; + EntityType entity_type = 3; + string entity_id = 4; + Type type = 5; + Editor editor = 6; + google.protobuf.Any data = 7; + string environment_namespace = 8; + bool is_admin_event = 9; // if true, it's stored in AdminDomainEvent table + // and AdminAuditLog table. + Options options = 10; // optional +} + +message Editor { + string email = 1; + bucketeer.account.Account.Role role = 2; + bool is_admin = 3; +} + +message Options { + string comment = 1; + int32 new_version = 2; +} + +message FeatureCreatedEvent { + string id = 1; + string name = 2; + string description = 3; + string user = 4; + repeated bucketeer.feature.Variation variations = 5; + google.protobuf.Int32Value default_on_variation_index = 6; + google.protobuf.Int32Value default_off_variation_index = 7; + bucketeer.feature.Feature.VariationType variation_type = 8; +} + +message FeatureEnabledEvent { + string id = 1; +} + +message FeatureDisabledEvent { + string id = 1; +} + +message FeatureArchivedEvent { + string id = 1; +} + +message FeatureUnarchivedEvent { + string id = 1; +} + +message FeatureDeletedEvent { + string id = 1; +} + +message EvaluationDelayableSetEvent { + string id = 1; +} + +message EvaluationUndelayableSetEvent { + string id = 1; +} + +message FeatureRenamedEvent { + string id = 1; + string name = 2; +} + +message FeatureDescriptionChangedEvent { + string id = 1; + string description = 2; +} + +message FeatureOffVariationChangedEvent { + string id = 1; + string off_variation = 2; +} + +message FeatureVariationAddedEvent { + string id = 1; + bucketeer.feature.Variation variation = 2; +} + +message FeatureVariationRemovedEvent { + string id = 1; + string variation_id = 2; +} + +message VariationValueChangedEvent { + string feature_id = 1; + string id = 2; + string value = 3; +} + +message VariationNameChangedEvent { + string feature_id = 1; + string id = 2; + string name = 3; +} + +message VariationDescriptionChangedEvent { + string feature_id = 1; + string id = 2; + string description = 3; +} + +message VariationUserAddedEvent { + string feature_id = 1; + string id = 2; + string user = 3; +} + +message VariationUserRemovedEvent { + string feature_id = 1; + string id = 2; + string user = 3; +} + +message FeatureRuleAddedEvent { + string id = 1; + bucketeer.feature.Rule rule = 2; +} + +message FeatureChangeRuleStrategyEvent { + string feature_id = 1; + string rule_id = 2; + bucketeer.feature.Strategy strategy = 3; +} + +message FeatureRuleDeletedEvent { + string id = 1; + string rule_id = 2; +} + +message FeatureFixedStrategyChangedEvent { + string feature_id = 1; + string rule_id = 2; + bucketeer.feature.FixedStrategy strategy = 3; +} + +message FeatureRolloutStrategyChangedEvent { + string feature_id = 1; + string rule_id = 2; + bucketeer.feature.RolloutStrategy strategy = 3; +} + +message RuleClauseAddedEvent { + string feature_id = 1; + string rule_id = 2; + bucketeer.feature.Clause clause = 3; +} + +message RuleClauseDeletedEvent { + string feature_id = 1; + string rule_id = 2; + string id = 3; +} + +message ClauseAttributeChangedEvent { + string feature_id = 1; + string rule_id = 2; + string id = 3; + string attribute = 4; +} + +message ClauseOperatorChangedEvent { + string feature_id = 1; + string rule_id = 2; + string id = 3; + bucketeer.feature.Clause.Operator operator = 4; +} + +message ClauseValueAddedEvent { + string feature_id = 1; + string rule_id = 2; + string id = 3; + string value = 4; +} + +message ClauseValueRemovedEvent { + string feature_id = 1; + string rule_id = 2; + string id = 3; + string value = 4; +} + +message FeatureDefaultStrategyChangedEvent { + string id = 1; + bucketeer.feature.Strategy strategy = 2; +} + +message FeatureTagAddedEvent { + string id = 1; + string tag = 2; +} + +message FeatureTagRemovedEvent { + string id = 1; + string tag = 2; +} + +message FeatureVersionIncrementedEvent { + string id = 1; + int32 version = 2; +} + +message FeatureClonedEvent { + string id = 1; + string name = 2; + string description = 3; + repeated bucketeer.feature.Variation variations = 4; + repeated bucketeer.feature.Target targets = 5; + repeated bucketeer.feature.Rule rules = 6; + bucketeer.feature.Strategy default_strategy = 7; + string off_variation = 8; + repeated string tags = 9; + string maintainer = 10; + bucketeer.feature.Feature.VariationType variation_type = 11; +} + +message FeatureSamplingSeedResetEvent { + string sampling_seed = 1; +} + +message GoalCreatedEvent { + string id = 1; + string name = 2; + string description = 3; + bool deleted = 4; + int64 created_at = 5; + int64 updated_at = 6; +} + +message GoalRenamedEvent { + string id = 1; + string name = 2; +} + +message GoalDescriptionChangedEvent { + string id = 1; + string description = 2; +} + +message GoalArchivedEvent { + string id = 1; +} + +message GoalDeletedEvent { + string id = 1; +} + +message ExperimentCreatedEvent { + string id = 1; + string feature_id = 2; + int32 feature_version = 3; + repeated bucketeer.feature.Variation variations = 4; + string goal_id = 5 [deprecated = true]; + int64 start_at = 6; + int64 stop_at = 7; + bool stopped = 8; + int64 stopped_at = 9; + int64 created_at = 10; + int64 updated_at = 11; + repeated string goal_ids = 12; + string name = 13; + string description = 14; + string base_variation_id = 15; +} + +message ExperimentStoppedEvent { + string id = 1; + int64 stopped_at = 2; +} + +message ExperimentArchivedEvent { + string id = 1; +} + +message ExperimentDeletedEvent { + string id = 1; +} + +message ExperimentStartAtChangedEvent { + string id = 1; + int64 start_at = 2; +} + +message ExperimentStopAtChangedEvent { + string id = 1; + int64 stop_at = 2; +} + +message ExperimentPeriodChangedEvent { + string id = 1; + int64 start_at = 2; + int64 stop_at = 3; +} + +message ExperimentNameChangedEvent { + string id = 1; + string name = 2; +} + +message ExperimentDescriptionChangedEvent { + string id = 1; + string description = 2; +} + +message ExperimentStartedEvent {} + +message ExperimentFinishedEvent {} + +message AccountCreatedEvent { + string id = 1; + string email = 2; + string name = 3; + bucketeer.account.Account.Role role = 4; + bool disabled = 5; + int64 created_at = 6; + int64 updated_at = 7; +} + +message AccountRoleChangedEvent { + string id = 1; + bucketeer.account.Account.Role role = 2; +} + +message AccountEnabledEvent { + string id = 1; +} + +message AccountDisabledEvent { + string id = 1; +} + +message AccountDeletedEvent { + string id = 1; +} + +message APIKeyCreatedEvent { + string id = 1; + string name = 2; + bucketeer.account.APIKey.Role role = 3; + bool disabled = 4; + int64 created_at = 5; + int64 updated_at = 6; +} + +message APIKeyNameChangedEvent { + string id = 1; + string name = 2; +} + +message APIKeyEnabledEvent { + string id = 1; +} + +message APIKeyDisabledEvent { + string id = 1; +} + +message SegmentCreatedEvent { + string id = 1; + string name = 2; + string description = 3; +} + +message SegmentDeletedEvent { + string id = 1; +} + +message SegmentNameChangedEvent { + string id = 1; + string name = 2; +} + +message SegmentDescriptionChangedEvent { + string id = 1; + string description = 2; +} + +message SegmentRuleAddedEvent { + string id = 1; + bucketeer.feature.Rule rule = 2; +} + +message SegmentRuleDeletedEvent { + string id = 1; + string rule_id = 2; +} + +message SegmentRuleClauseAddedEvent { + string segment_id = 1; + string rule_id = 2; + bucketeer.feature.Clause clause = 3; +} + +message SegmentRuleClauseDeletedEvent { + string segment_id = 1; + string rule_id = 2; + string clause_id = 3; +} + +message SegmentClauseAttributeChangedEvent { + string segment_id = 1; + string rule_id = 2; + string clause_id = 3; + string attribute = 4; +} + +message SegmentClauseOperatorChangedEvent { + string segment_id = 1; + string rule_id = 2; + string clause_id = 3; + bucketeer.feature.Clause.Operator operator = 4; +} + +message SegmentClauseValueAddedEvent { + string segment_id = 1; + string rule_id = 2; + string clause_id = 3; + string value = 4; +} + +message SegmentClauseValueRemovedEvent { + string segment_id = 1; + string rule_id = 2; + string clause_id = 3; + string value = 4; +} + +message SegmentUserAddedEvent { + string segment_id = 1; + repeated string user_ids = 2; + bucketeer.feature.SegmentUser.State state = 3; +} + +message SegmentUserDeletedEvent { + string segment_id = 1; + repeated string user_ids = 2; + bucketeer.feature.SegmentUser.State state = 3; +} + +message SegmentBulkUploadUsersEvent { + string segment_id = 1; + bucketeer.feature.Segment.Status status = 2; + bucketeer.feature.SegmentUser.State state = 3; +} + +message SegmentBulkUploadUsersStatusChangedEvent { + string segment_id = 1; + bucketeer.feature.Segment.Status status = 2; + bucketeer.feature.SegmentUser.State state = 3; + int64 count = 4; +} + +message EnvironmentCreatedEvent { + string id = 1; + string namespace = 2; + string name = 3; + string description = 4; + bool deleted = 5; + int64 created_at = 6; + int64 updated_at = 7; + string project_id = 8; +} + +message EnvironmentRenamedEvent { + string id = 1; + string name = 2; +} + +message EnvironmentDescriptionChangedEvent { + string id = 1; + string description = 2; +} + +message EnvironmentDeletedEvent { + string id = 1; + string namespace = 2; +} + +message AdminAccountCreatedEvent { + string id = 1; + string email = 2; + string name = 3; + bucketeer.account.Account.Role role = 4; + bool disabled = 5; + int64 created_at = 6; + int64 updated_at = 7; +} + +message AdminAccountEnabledEvent { + string id = 1; +} + +message AdminAccountDisabledEvent { + string id = 1; +} + +message AdminAccountDeletedEvent { + string id = 1; +} + +message AutoOpsRuleCreatedEvent { + string feature_id = 1; + bucketeer.autoops.OpsType ops_type = 2; + repeated bucketeer.autoops.Clause clauses = 3; + int64 triggered_at = 4; + int64 created_at = 5; + int64 updated_at = 6; +} + +message AutoOpsRuleDeletedEvent {} + +message AutoOpsRuleOpsTypeChangedEvent { + bucketeer.autoops.OpsType ops_type = 1; +} + +message AutoOpsRuleTriggeredAtChangedEvent {} + +message OpsEventRateClauseAddedEvent { + string clause_id = 1; + bucketeer.autoops.OpsEventRateClause ops_event_rate_clause = 2; +} + +message OpsEventRateClauseChangedEvent { + string clause_id = 1; + bucketeer.autoops.OpsEventRateClause ops_event_rate_clause = 2; +} + +message AutoOpsRuleClauseDeletedEvent { + string clause_id = 1; +} + +message DatetimeClauseAddedEvent { + string clause_id = 1; + bucketeer.autoops.DatetimeClause datetime_clause = 2; +} + +message DatetimeClauseChangedEvent { + string clause_id = 1; + bucketeer.autoops.DatetimeClause datetime_clause = 2; +} + +message PushCreatedEvent { + string fcm_api_key = 2; + repeated string tags = 3; + string name = 4; +} + +message PushDeletedEvent {} + +message PushTagsAddedEvent { + repeated string tags = 2; +} + +message PushTagsDeletedEvent { + repeated string tags = 2; +} + +message PushRenamedEvent { + string name = 2; +} + +message SubscriptionCreatedEvent { + repeated bucketeer.notification.Subscription.SourceType source_types = 1; + bucketeer.notification.Recipient recipient = 2; + string name = 3; +} + +message SubscriptionDeletedEvent {} + +message SubscriptionEnabledEvent {} + +message SubscriptionDisabledEvent {} + +message SubscriptionSourceTypesAddedEvent { + repeated bucketeer.notification.Subscription.SourceType source_types = 1; +} + +message SubscriptionSourceTypesDeletedEvent { + repeated bucketeer.notification.Subscription.SourceType source_types = 1; +} + +message SubscriptionRenamedEvent { + string name = 1; +} + +message AdminSubscriptionCreatedEvent { + repeated bucketeer.notification.Subscription.SourceType source_types = 1; + bucketeer.notification.Recipient recipient = 2; + string name = 3; +} + +message AdminSubscriptionDeletedEvent {} + +message AdminSubscriptionEnabledEvent {} + +message AdminSubscriptionDisabledEvent {} + +message AdminSubscriptionSourceTypesAddedEvent { + repeated bucketeer.notification.Subscription.SourceType source_types = 1; +} + +message AdminSubscriptionSourceTypesDeletedEvent { + repeated bucketeer.notification.Subscription.SourceType source_types = 1; +} + +message AdminSubscriptionRenamedEvent { + string name = 1; +} + +message ProjectCreatedEvent { + string id = 1; + string description = 2; + bool disabled = 3; + bool trial = 4; + string creator_email = 5; + int64 created_at = 6; + int64 updated_at = 7; +} + +message ProjectDescriptionChangedEvent { + string id = 1; + string description = 2; +} + +message ProjectEnabledEvent { + string id = 1; +} + +message ProjectDisabledEvent { + string id = 1; +} + +message ProjectTrialCreatedEvent { + string id = 1; + string description = 2; + bool disabled = 3; + bool trial = 4; + string creator_email = 5; + int64 created_at = 6; + int64 updated_at = 7; +} + +message ProjectTrialConvertedEvent { + string id = 1; +} + +message PrerequisiteAddedEvent { + bucketeer.feature.Prerequisite prerequisite = 1; +} + +message PrerequisiteVariationChangedEvent { + bucketeer.feature.Prerequisite prerequisite = 1; +} + +message PrerequisiteRemovedEvent { + string feature_id = 1; +} + +message WebhookCreatedEvent { + string id = 1; + string name = 2; + string description = 3; + int64 created_at = 4; + int64 updated_at = 5; +} + +message WebhookDeletedEvent { + string id = 1; +} + +message WebhookNameChangedEvent { + string id = 1; + string name = 2; +} + +message WebhookDescriptionChangedEvent { + string id = 1; + string description = 2; +} + +message WebhookClauseAddedEvent { + string clause_id = 1; + bucketeer.autoops.WebhookClause webhook_clause = 2; +} + +message WebhookClauseChangedEvent { + string clause_id = 1; + bucketeer.autoops.WebhookClause webhook_clause = 2; +} diff --git a/proto/event/domain/localized_message.proto b/proto/event/domain/localized_message.proto new file mode 100644 index 000000000..aae773b3f --- /dev/null +++ b/proto/event/domain/localized_message.proto @@ -0,0 +1,26 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.event.domain; +option go_package = "github.com/bucketeer-io/bucketeer/proto/event/domain"; + +message LocalizedMessage { + // The locale used following the specification defined at + // http://www.rfc-editor.org/rfc/bcp/bcp47.txt. + // Examples are: "en-US", "fr-CH", "es-MX" + string locale = 1; + string message = 2; +} diff --git a/proto/event/service/BUILD.bazel b/proto/event/service/BUILD.bazel new file mode 100644 index 000000000..c4726a366 --- /dev/null +++ b/proto/event/service/BUILD.bazel @@ -0,0 +1,39 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "service_proto", + srcs = [ + "feature.proto", + "segment.proto", + "user.proto", + ], + visibility = ["//visibility:public"], + deps = [ + "//proto/event/client:client_proto", + "//proto/event/domain:domain_proto", + "//proto/feature:feature_proto", + "//proto/user:user_proto", + ], +) + +go_proto_library( + name = "service_go_proto", + importpath = "github.com/bucketeer-io/bucketeer/proto/event/service", + proto = ":service_proto", + visibility = ["//visibility:public"], + deps = [ + "//proto/event/client:go_default_library", + "//proto/event/domain:go_default_library", + "//proto/feature:go_default_library", + "//proto/user:go_default_library", + ], +) + +go_library( + name = "go_default_library", + embed = [":service_go_proto"], + importpath = "github.com/bucketeer-io/bucketeer/proto/event/service", + visibility = ["//visibility:public"], +) diff --git a/proto/event/service/feature.proto b/proto/event/service/feature.proto new file mode 100644 index 000000000..f586d5bd0 --- /dev/null +++ b/proto/event/service/feature.proto @@ -0,0 +1,30 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.event.service; +option go_package = "github.com/bucketeer-io/bucketeer/proto/event/service"; + +import "proto/user/user.proto"; + +message EvaluationRequestEvent { + string id = 1; + int64 timestamp = 2; + bucketeer.user.User user = 3; + reserved 4; // repeated bucketeer.feature.Feature features = 4 [deprecated = + // true]; + string environment_namespace = 5; + string tag = 6; +} diff --git a/proto/event/service/segment.proto b/proto/event/service/segment.proto new file mode 100644 index 000000000..75d389a2b --- /dev/null +++ b/proto/event/service/segment.proto @@ -0,0 +1,30 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.event.service; +option go_package = "github.com/bucketeer-io/bucketeer/proto/event/service"; + +import "proto/feature/segment.proto"; +import "proto/event/domain/event.proto"; + +message BulkSegmentUsersReceivedEvent { + string id = 1; + string environment_namespace = 2; + string segment_id = 3; + bytes data = 4; + bucketeer.feature.SegmentUser.State state = 5; + bucketeer.event.domain.Editor editor = 6; +} diff --git a/proto/event/service/user.proto b/proto/event/service/user.proto new file mode 100644 index 000000000..7766c5eea --- /dev/null +++ b/proto/event/service/user.proto @@ -0,0 +1,33 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.event.service; +option go_package = "github.com/bucketeer-io/bucketeer/proto/event/service"; + +import "proto/event/client/event.proto"; + +message UserEvent { + string id = 1; + reserved 2; // bucketeer.user.User user = 2 [deprecated = true]; + reserved 3; // repeated bucketeer.feature.Feature features = 3 [deprecated = + // true]; + string environment_namespace = 4; + string tag = 5; + string user_id = 6; + int64 last_seen = 7; + map data = 8; + bucketeer.event.client.SourceId source_id = 9; +} diff --git a/proto/eventcounter/BUILD.bazel b/proto/eventcounter/BUILD.bazel new file mode 100644 index 000000000..bcf3562cd --- /dev/null +++ b/proto/eventcounter/BUILD.bazel @@ -0,0 +1,49 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("//proto:proto_descriptor.bzl", "proto_descriptor") + +proto_library( + name = "eventcounter_proto", + srcs = [ + "distribution_summary.proto", + "evaluation_count.proto", + "experiment_count.proto", + "experiment_result.proto", + "filter.proto", + "goal_result.proto", + "histogram.proto", + "service.proto", + "table.proto", + "timeseries.proto", + "variation_count.proto", + "variation_result.proto", + ], + visibility = ["//visibility:public"], + deps = ["@com_google_protobuf//:wrappers_proto"], +) + +go_proto_library( + name = "eventcounter_go_proto", + compilers = ["@io_bazel_rules_go//proto:go_grpc"], + importpath = "github.com/bucketeer-io/bucketeer/proto/eventcounter", + proto = ":eventcounter_proto", + visibility = ["//visibility:public"], +) + +go_library( + name = "go_default_library", + embed = [":eventcounter_go_proto"], + importpath = "github.com/bucketeer-io/bucketeer/proto/eventcounter", + visibility = ["//visibility:public"], +) + +proto_descriptor( + name = "proto_descriptor", + srcs = ["service.proto"], + visibility = ["//visibility:public"], + deps = [ + ":eventcounter_proto", + "@com_google_protobuf//:wrappers_proto", + ], +) diff --git a/proto/eventcounter/distribution_summary.proto b/proto/eventcounter/distribution_summary.proto new file mode 100644 index 000000000..cff0ee22a --- /dev/null +++ b/proto/eventcounter/distribution_summary.proto @@ -0,0 +1,32 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.eventcounter; +option go_package = "github.com/bucketeer-io/bucketeer/proto/eventcounter"; + +import "proto/eventcounter/histogram.proto"; + +message DistributionSummary { + double mean = 1; + double sd = 2; + double rhat = 3; + Histogram histogram = 4; + double median = 5; + double percentile025 = 6; // Remove "_" because python should generate same + // field name for Datasource. + double percentile975 = 7; // Remove "_" because python should generate same + // field name for Datasource. +} diff --git a/proto/eventcounter/evaluation_count.proto b/proto/eventcounter/evaluation_count.proto new file mode 100644 index 000000000..f6e7422f2 --- /dev/null +++ b/proto/eventcounter/evaluation_count.proto @@ -0,0 +1,29 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.eventcounter; +option go_package = "github.com/bucketeer-io/bucketeer/proto/eventcounter"; + +import "proto/eventcounter/variation_count.proto"; + +message EvaluationCount { + string id = 1; + string feature_id = 2; + int32 feature_version = 3; + repeated VariationCount realtime_counts = 4; + repeated VariationCount batch_counts = 5; + int64 updated_at = 6; +} diff --git a/proto/eventcounter/experiment_count.proto b/proto/eventcounter/experiment_count.proto new file mode 100644 index 000000000..de8ac753a --- /dev/null +++ b/proto/eventcounter/experiment_count.proto @@ -0,0 +1,37 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.eventcounter; +option go_package = "github.com/bucketeer-io/bucketeer/proto/eventcounter"; + +import "proto/eventcounter/variation_count.proto"; + +message ExperimentCount { + string id = 1; + string feature_id = 2; + int32 feature_version = 3; + string goal_id = 4 [deprecated = true]; + repeated VariationCount realtime_counts = 5 [deprecated = true]; + repeated VariationCount batch_counts = 6 [deprecated = true]; + int64 updated_at = 7; + repeated GoalCounts goal_counts = 8; +} + +message GoalCounts { + string goal_id = 1; + repeated VariationCount realtime_counts = 2; + repeated VariationCount batch_counts = 3 [deprecated = true]; +} \ No newline at end of file diff --git a/proto/eventcounter/experiment_result.proto b/proto/eventcounter/experiment_result.proto new file mode 100644 index 000000000..d11e7c0db --- /dev/null +++ b/proto/eventcounter/experiment_result.proto @@ -0,0 +1,27 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.eventcounter; +option go_package = "github.com/bucketeer-io/bucketeer/proto/eventcounter"; + +import "proto/eventcounter/goal_result.proto"; + +message ExperimentResult { + string id = 1; + string experiment_id = 2; + int64 updated_at = 3; + repeated GoalResult goal_results = 4; +} \ No newline at end of file diff --git a/proto/eventcounter/filter.proto b/proto/eventcounter/filter.proto new file mode 100644 index 000000000..f24f86dbb --- /dev/null +++ b/proto/eventcounter/filter.proto @@ -0,0 +1,25 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.eventcounter; +option go_package = "github.com/bucketeer-io/bucketeer/proto/eventcounter"; + +message Filter { + enum Operator { EQUALS = 0; } + string key = 1; + Operator operator = 2; + repeated string values = 3; +} diff --git a/proto/eventcounter/goal_result.proto b/proto/eventcounter/goal_result.proto new file mode 100644 index 000000000..556b40cec --- /dev/null +++ b/proto/eventcounter/goal_result.proto @@ -0,0 +1,25 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.eventcounter; +option go_package = "github.com/bucketeer-io/bucketeer/proto/eventcounter"; + +import "proto/eventcounter/variation_result.proto"; + +message GoalResult { + string goal_id = 1; + repeated VariationResult variation_results = 2; +} \ No newline at end of file diff --git a/proto/eventcounter/histogram.proto b/proto/eventcounter/histogram.proto new file mode 100644 index 000000000..92b0af1f6 --- /dev/null +++ b/proto/eventcounter/histogram.proto @@ -0,0 +1,23 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.eventcounter; +option go_package = "github.com/bucketeer-io/bucketeer/proto/eventcounter"; + +message Histogram { + repeated int64 hist = 1; + repeated double bins = 2; +} \ No newline at end of file diff --git a/proto/eventcounter/service.proto b/proto/eventcounter/service.proto new file mode 100644 index 000000000..b05423353 --- /dev/null +++ b/proto/eventcounter/service.proto @@ -0,0 +1,159 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.eventcounter; +option go_package = "github.com/bucketeer-io/bucketeer/proto/eventcounter"; + +import "google/protobuf/wrappers.proto"; + +import "proto/eventcounter/evaluation_count.proto"; +import "proto/eventcounter/experiment_count.proto"; +import "proto/eventcounter/experiment_result.proto"; +import "proto/eventcounter/filter.proto"; +import "proto/eventcounter/table.proto"; +import "proto/eventcounter/timeseries.proto"; + +message GetEvaluationCountV2Request { + string environment_namespace = 1; + int64 start_at = 2; + int64 end_at = 3; + string feature_id = 4; + int32 feature_version = 5; + repeated string variation_ids = 6; +} + +message GetEvaluationCountV2Response { + EvaluationCount count = 1; +} + +message GetEvaluationTimeseriesCountRequest { + string environment_namespace = 1; + string feature_id = 2; +} + +message GetEvaluationTimeseriesCountResponse { + repeated VariationTimeseries user_counts = 1; + repeated VariationTimeseries event_counts = 2; +} + +message GetExperimentResultRequest { + string environment_namespace = 1; + string experiment_id = 2; +} + +message GetExperimentResultResponse { + ExperimentResult experiment_result = 1; +} + +message ListExperimentResultsRequest { + string feature_id = 1; + google.protobuf.Int32Value feature_version = 2; + string environment_namespace = 3; +} + +message ListExperimentResultsResponse { + map results = 1; +} + +message ListUserDataKeysRequest { + string environment_namespace = 1; +} + +message ListUserDataKeysResponse { + repeated string keys = 1; +} + +message ListUserDataValuesRequest { + string environment_namespace = 1; + string key = 2; +} + +message ListUserDataValuesResponse { + repeated string values = 1; +} + +message GetGoalCountRequest { + string environment_namespace = 1; + string feature_id = 2; + int32 feature_version = 3; + string goal_id = 4; + int64 start_at = 5; + int64 end_at = 6; + repeated Filter filters = 7; + repeated string segments = 8; + string reason = 9; +} + +message GetGoalCountResponse { + Row headers = 1; + repeated Row rows = 2; +} + +message GetGoalCountV2Request { + string environment_namespace = 1; + int64 start_at = 2; + int64 end_at = 3; + string goal_id = 4; + string feature_id = 5; + int32 feature_version = 6; + repeated string variation_ids = 7; +} + +message GetGoalCountV2Response { + GoalCounts goal_counts = 1; +} + +message GetUserCountV2Request { + string environment_namespace = 1; + int64 start_at = 2; + int64 end_at = 3; +} + +message GetUserCountV2Response { + int64 event_count = 1; + int64 user_count = 2; +} + +message ListUserMetadataRequest { + string environment_namespace = 1; +} + +message ListUserMetadataResponse { + repeated string data = 1; +} + +service EventCounterService { + rpc GetEvaluationCountV2(GetEvaluationCountV2Request) + returns (GetEvaluationCountV2Response) {} + + rpc GetEvaluationTimeseriesCount(GetEvaluationTimeseriesCountRequest) + returns (GetEvaluationTimeseriesCountResponse) {} + + rpc GetExperimentResult(GetExperimentResultRequest) + returns (GetExperimentResultResponse) {} + + rpc ListExperimentResults(ListExperimentResultsRequest) + returns (ListExperimentResultsResponse) {} + + rpc GetGoalCount(GetGoalCountRequest) returns (GetGoalCountResponse) {} + + rpc GetGoalCountV2(GetGoalCountV2Request) returns (GetGoalCountV2Response) {} + + rpc GetUserCountV2(GetUserCountV2Request) returns (GetUserCountV2Response) {} + + rpc ListUserMetadata(ListUserMetadataRequest) + returns (ListUserMetadataResponse) {} +} \ No newline at end of file diff --git a/proto/eventcounter/table.proto b/proto/eventcounter/table.proto new file mode 100644 index 000000000..aa1bd5b5c --- /dev/null +++ b/proto/eventcounter/table.proto @@ -0,0 +1,32 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.eventcounter; +option go_package = "github.com/bucketeer-io/bucketeer/proto/eventcounter"; + +message Row { + repeated Cell cells = 1; +} + +message Cell { + enum Type { + STRING = 0; + DOUBLE = 2; + } + Type type = 1; + string value = 2; + double valueDouble = 4; +} \ No newline at end of file diff --git a/proto/eventcounter/timeseries.proto b/proto/eventcounter/timeseries.proto new file mode 100644 index 000000000..6d9b4dad6 --- /dev/null +++ b/proto/eventcounter/timeseries.proto @@ -0,0 +1,28 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.eventcounter; +option go_package = "github.com/bucketeer-io/bucketeer/proto/eventcounter"; + +message VariationTimeseries { + string variation_id = 1; + Timeseries timeseries = 2; +} + +message Timeseries { + repeated int64 timestamps = 1; + repeated double values = 2; +} \ No newline at end of file diff --git a/proto/eventcounter/variation_count.proto b/proto/eventcounter/variation_count.proto new file mode 100644 index 000000000..36df8f699 --- /dev/null +++ b/proto/eventcounter/variation_count.proto @@ -0,0 +1,29 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.eventcounter; +option go_package = "github.com/bucketeer-io/bucketeer/proto/eventcounter"; + +message VariationCount { + string variation_id = 1; + int64 user_count = 2; + int64 event_count = 3; + double value_sum = 4; + int64 created_at = 5; + string variation_value = 6; + double value_sum_per_user_mean = 7; + double value_sum_per_user_variance = 8; +} diff --git a/proto/eventcounter/variation_result.proto b/proto/eventcounter/variation_result.proto new file mode 100644 index 000000000..ee9610193 --- /dev/null +++ b/proto/eventcounter/variation_result.proto @@ -0,0 +1,47 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.eventcounter; +option go_package = "github.com/bucketeer-io/bucketeer/proto/eventcounter"; + +import "proto/eventcounter/variation_count.proto"; +import "proto/eventcounter/distribution_summary.proto"; +import "proto/eventcounter/timeseries.proto"; + +message VariationResult { + string variation_id = 1; + VariationCount experiment_count = 2; + VariationCount evaluation_count = 3; + DistributionSummary cvr_prob_best = 4; + DistributionSummary cvr_prob_beat_baseline = 5; + DistributionSummary cvr_prob = 6; + Timeseries evaluation_user_count_timeseries = 7; + Timeseries evaluation_event_count_timeseries = 8; + Timeseries goal_user_count_timeseries = 9; + Timeseries goal_event_count_timeseries = 10; + Timeseries goal_value_sum_timeseries = 11; + Timeseries cvr_median_timeseries = 12; + Timeseries cvr_percentile025_timeseries = 13; + Timeseries cvr_percentile975_timeseries = 14; + Timeseries cvr_timeseries = 15; + Timeseries goal_value_sum_per_user_timeseries = 16; + DistributionSummary goal_value_sum_per_user_prob = 17; + DistributionSummary goal_value_sum_per_user_prob_best = 18; + DistributionSummary goal_value_sum_per_user_prob_beat_baseline = 19; + Timeseries goal_value_sum_per_user_median_timeseries = 20; + Timeseries goal_value_sum_per_user_percentile025_timeseries = 21; + Timeseries goal_value_sum_per_user_percentile975_timeseries = 22; +} \ No newline at end of file diff --git a/proto/experiment/BUILD.bazel b/proto/experiment/BUILD.bazel new file mode 100644 index 000000000..b9fa364ff --- /dev/null +++ b/proto/experiment/BUILD.bazel @@ -0,0 +1,46 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("//proto:proto_descriptor.bzl", "proto_descriptor") + +proto_library( + name = "experiment_proto", + srcs = [ + "command.proto", + "experiment.proto", + "goal.proto", + "service.proto", + ], + visibility = ["//visibility:public"], + deps = [ + "//proto/feature:feature_proto", + "@com_google_protobuf//:wrappers_proto", + ], +) + +go_proto_library( + name = "experiment_go_proto", + compilers = ["@io_bazel_rules_go//proto:go_grpc"], + importpath = "github.com/bucketeer-io/bucketeer/proto/experiment", + proto = ":experiment_proto", + visibility = ["//visibility:public"], + deps = ["//proto/feature:go_default_library"], +) + +go_library( + name = "go_default_library", + embed = [":experiment_go_proto"], + importpath = "github.com/bucketeer-io/bucketeer/proto/experiment", + visibility = ["//visibility:public"], +) + +proto_descriptor( + name = "proto_descriptor", + srcs = ["service.proto"], + visibility = ["//visibility:public"], + deps = [ + ":experiment_proto", + "//proto/feature:feature_proto", + "@com_google_protobuf//:wrappers_proto", + ], +) diff --git a/proto/experiment/command.proto b/proto/experiment/command.proto new file mode 100644 index 000000000..85fece7f7 --- /dev/null +++ b/proto/experiment/command.proto @@ -0,0 +1,70 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.experiment; +option go_package = "github.com/bucketeer-io/bucketeer/proto/experiment"; + +message CreateGoalCommand { + string id = 1; + string name = 2; + string description = 3; +} + +message RenameGoalCommand { + string name = 1; +} + +message ChangeDescriptionGoalCommand { + string description = 1; +} + +message ArchiveGoalCommand {} + +message DeleteGoalCommand {} + +message CreateExperimentCommand { + string feature_id = 1; + reserved 2; // string goal_id = 2 [deprecated = true]; + int64 start_at = 3; + int64 stop_at = 4; + repeated string goal_ids = 5; + string name = 6; + string description = 7; + string base_variation_id = 8; +} + +message ChangeExperimentPeriodCommand { + int64 start_at = 1; + int64 stop_at = 2; +} + +message ChangeExperimentNameCommand { + string name = 1; +} + +message ChangeExperimentDescriptionCommand { + string description = 1; +} + +message StopExperimentCommand {} + +message ArchiveExperimentCommand {} + +message DeleteExperimentCommand {} + +message StartExperimentCommand {} + +message FinishExperimentCommand {} \ No newline at end of file diff --git a/proto/experiment/experiment.proto b/proto/experiment/experiment.proto new file mode 100644 index 000000000..60b338917 --- /dev/null +++ b/proto/experiment/experiment.proto @@ -0,0 +1,53 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.experiment; +option go_package = "github.com/bucketeer-io/bucketeer/proto/experiment"; + +import "proto/feature/variation.proto"; + +message Experiment { + enum Status { + WAITING = 0; + RUNNING = 1; + STOPPED = 2; + FORCE_STOPPED = 3; + } + string id = 1; + string goal_id = 2 [deprecated = true]; + string feature_id = 3; + int32 feature_version = 4; + repeated bucketeer.feature.Variation variations = 5; + int64 start_at = 6; + int64 stop_at = 7; + bool stopped = 8 [deprecated = true]; + int64 stopped_at = 9 [jstype = JS_STRING]; + int64 created_at = 10; + int64 updated_at = 11; + bool deleted = 12; + repeated string goal_ids = 13; + string name = 14; + string description = 15; + string base_variation_id = 16; + reserved 17; // bucketeer.feature.Feature current_feature = 17 + Status status = 18; + string maintainer = 19; + bool archived = 20; +} + +message Experiments { + repeated Experiment experiments = 1; +} diff --git a/proto/experiment/goal.proto b/proto/experiment/goal.proto new file mode 100644 index 000000000..c9dab4b80 --- /dev/null +++ b/proto/experiment/goal.proto @@ -0,0 +1,29 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.experiment; +option go_package = "github.com/bucketeer-io/bucketeer/proto/experiment"; + +message Goal { + string id = 1; + string name = 2; + string description = 3; + bool deleted = 4; + int64 created_at = 5; + int64 updated_at = 6; + bool is_in_use_status = 7; // This field is set only when APIs return. + bool archived = 8; +} diff --git a/proto/experiment/service.proto b/proto/experiment/service.proto new file mode 100644 index 000000000..8ec722f0b --- /dev/null +++ b/proto/experiment/service.proto @@ -0,0 +1,223 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.experiment; +option go_package = "github.com/bucketeer-io/bucketeer/proto/experiment"; + +import "google/protobuf/wrappers.proto"; + +import "proto/experiment/command.proto"; +import "proto/experiment/goal.proto"; +import "proto/experiment/experiment.proto"; + +message GetGoalRequest { + string id = 1; + string environment_namespace = 2; +} + +message GetGoalResponse { + Goal goal = 1; +} + +message ListGoalsRequest { + enum OrderBy { + DEFAULT = 0; + NAME = 1; + CREATED_AT = 2; + UPDATED_AT = 3; + } + enum OrderDirection { + ASC = 0; + DESC = 1; + } + int64 page_size = 1; + string cursor = 2; + string environment_namespace = 3; + OrderBy order_by = 4; + OrderDirection order_direction = 5; + string search_keyword = 6; + google.protobuf.BoolValue is_in_use_status = 7; + google.protobuf.BoolValue archived = 8; +} + +message ListGoalsResponse { + repeated Goal goals = 1; + string cursor = 2; + int64 total_count = 3; +} + +message CreateGoalRequest { + CreateGoalCommand command = 1; + string environment_namespace = 2; +} + +message CreateGoalResponse {} + +message ArchiveGoalRequest { + string id = 1; + ArchiveGoalCommand command = 2; + string environment_namespace = 3; +} + +message ArchiveGoalResponse {} + +message DeleteGoalRequest { + string id = 1; + DeleteGoalCommand command = 2; + string environment_namespace = 3; +} + +message DeleteGoalResponse {} + +message UpdateGoalRequest { + string id = 1; + RenameGoalCommand rename_command = 2; + ChangeDescriptionGoalCommand change_description_command = 3; + string environment_namespace = 4; +} + +message UpdateGoalResponse {} + +message GetExperimentRequest { + string id = 1; + string environment_namespace = 2; +} + +message GetExperimentResponse { + Experiment experiment = 1; +} + +message ListExperimentsRequest { + enum OrderBy { + DEFAULT = 0; + NAME = 1; + CREATED_AT = 2; + UPDATED_AT = 3; + } + enum OrderDirection { + ASC = 0; + DESC = 1; + } + string feature_id = 1; + google.protobuf.Int32Value feature_version = 2; + int64 from = 3; + int64 to = 4; + int64 page_size = 5; + string cursor = 6; + string environment_namespace = 7; + google.protobuf.Int32Value status = 8; // [deprecated = true]; + string maintainer = 9; + OrderBy order_by = 10; + OrderDirection order_direction = 11; + string search_keyword = 12; + google.protobuf.BoolValue archived = 13; + repeated Experiment.Status statuses = 14; +} + +message ListExperimentsResponse { + repeated Experiment experiments = 1; + string cursor = 2; + int64 total_count = 3; +} + +message CreateExperimentRequest { + CreateExperimentCommand command = 1; + string environment_namespace = 2; +} + +message CreateExperimentResponse { + Experiment experiment = 1; +} + +message UpdateExperimentRequest { + string id = 1; + reserved 2; // ChangeStartAtExperimentCommand change_start_at_command = 2 + // [deprecated = true]; + reserved 3; // ChangeStopAtExperimentCommand change_stop_at_command = 3 + // [deprecated = true]; + string environment_namespace = 4; + ChangeExperimentPeriodCommand change_experiment_period_command = 5; + ChangeExperimentNameCommand change_name_command = 6; + ChangeExperimentDescriptionCommand change_description_command = 7; +} + +message UpdateExperimentResponse {} + +message StartExperimentRequest { + string environment_namespace = 1; + string id = 2; + StartExperimentCommand command = 3; +} + +message StartExperimentResponse {} + +message FinishExperimentRequest { + string environment_namespace = 1; + string id = 2; + FinishExperimentCommand command = 3; +} + +message FinishExperimentResponse {} + +message StopExperimentRequest { + string id = 1; + StopExperimentCommand command = 2; + string environment_namespace = 3; +} + +message StopExperimentResponse {} + +message ArchiveExperimentRequest { + string id = 1; + ArchiveExperimentCommand command = 2; + string environment_namespace = 3; +} + +message ArchiveExperimentResponse {} + +message DeleteExperimentRequest { + string id = 1; + DeleteExperimentCommand command = 2; + string environment_namespace = 3; +} + +message DeleteExperimentResponse {} + +service ExperimentService { + rpc GetGoal(GetGoalRequest) returns (GetGoalResponse) {} + rpc ListGoals(ListGoalsRequest) returns (ListGoalsResponse) {} + rpc CreateGoal(CreateGoalRequest) returns (CreateGoalResponse) {} + rpc UpdateGoal(UpdateGoalRequest) returns (UpdateGoalResponse) {} + rpc ArchiveGoal(ArchiveGoalRequest) returns (ArchiveGoalResponse) {} + rpc DeleteGoal(DeleteGoalRequest) returns (DeleteGoalResponse) {} + + rpc GetExperiment(GetExperimentRequest) returns (GetExperimentResponse) {} + rpc ListExperiments(ListExperimentsRequest) + returns (ListExperimentsResponse) {} + rpc CreateExperiment(CreateExperimentRequest) + returns (CreateExperimentResponse) {} + rpc UpdateExperiment(UpdateExperimentRequest) + returns (UpdateExperimentResponse) {} + rpc StartExperiment(StartExperimentRequest) + returns (StartExperimentResponse) {} + rpc FinishExperiment(FinishExperimentRequest) + returns (FinishExperimentResponse) {} + rpc StopExperiment(StopExperimentRequest) returns (StopExperimentResponse) {} + rpc ArchiveExperiment(ArchiveExperimentRequest) + returns (ArchiveExperimentResponse) {} + rpc DeleteExperiment(DeleteExperimentRequest) + returns (DeleteExperimentResponse) {} +} diff --git a/proto/external/googleapis/googleapis/83e756a66b80b072bd234abcfe89edf459090974/google/rpc/BUILD.bazel b/proto/external/googleapis/googleapis/83e756a66b80b072bd234abcfe89edf459090974/google/rpc/BUILD.bazel new file mode 100644 index 000000000..a1e2496f5 --- /dev/null +++ b/proto/external/googleapis/googleapis/83e756a66b80b072bd234abcfe89edf459090974/google/rpc/BUILD.bazel @@ -0,0 +1,31 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "status_proto", + srcs = [ + "code.proto", + "error_details.proto", + "status.proto", + ], + visibility = ["//visibility:public"], + deps = [ + "@com_google_protobuf//:any_proto", + "@com_google_protobuf//:duration_proto", + ], +) + +go_proto_library( + name = "status_go_proto", + importpath = "google.golang.org/genproto/googleapis/rpc/status", + proto = ":status_proto", + visibility = ["//visibility:public"], +) + +go_library( + name = "go_default_library", + embed = [":status_go_proto"], + importpath = "google.golang.org/genproto/googleapis/rpc/status", + visibility = ["//visibility:public"], +) diff --git a/proto/external/googleapis/googleapis/83e756a66b80b072bd234abcfe89edf459090974/google/rpc/code.proto b/proto/external/googleapis/googleapis/83e756a66b80b072bd234abcfe89edf459090974/google/rpc/code.proto new file mode 100644 index 000000000..c8d7e548b --- /dev/null +++ b/proto/external/googleapis/googleapis/83e756a66b80b072bd234abcfe89edf459090974/google/rpc/code.proto @@ -0,0 +1,200 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.rpc; + +option go_package = "google.golang.org/genproto/googleapis/rpc/code;code"; +option java_multiple_files = true; +option java_outer_classname = "CodeProto"; +option java_package = "com.google.rpc"; +option objc_class_prefix = "RPC"; + + +// The canonical error codes for Google APIs. +// +// +// Sometimes multiple error codes may apply. Services should return +// the most specific error code that applies. For example, prefer +// `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply. +// Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`. +enum Code { + // Not an error; returned on success + // + // HTTP Mapping: 200 OK + OK = 0; + + // The operation was cancelled, typically by the caller. + // + // HTTP Mapping: 499 Client Closed Request + CANCELLED = 1; + + // Unknown error. For example, this error may be returned when + // a `Status` value received from another address space belongs to + // an error space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + // + // HTTP Mapping: 500 Internal Server Error + UNKNOWN = 2; + + // The client specified an invalid argument. Note that this differs + // from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + // + // HTTP Mapping: 400 Bad Request + INVALID_ARGUMENT = 3; + + // The deadline expired before the operation could complete. For operations + // that change the state of the system, this error may be returned + // even if the operation has completed successfully. For example, a + // successful response from a server could have been delayed long + // enough for the deadline to expire. + // + // HTTP Mapping: 504 Gateway Timeout + DEADLINE_EXCEEDED = 4; + + // Some requested entity (e.g., file or directory) was not found. + // + // Note to server developers: if a request is denied for an entire class + // of users, such as gradual feature rollout or undocumented whitelist, + // `NOT_FOUND` may be used. If a request is denied for some users within + // a class of users, such as user-based access control, `PERMISSION_DENIED` + // must be used. + // + // HTTP Mapping: 404 Not Found + NOT_FOUND = 5; + + // The entity that a client attempted to create (e.g., file or directory) + // already exists. + // + // HTTP Mapping: 409 Conflict + ALREADY_EXISTS = 6; + + // The caller does not have permission to execute the specified + // operation. `PERMISSION_DENIED` must not be used for rejections + // caused by exhausting some resource (use `RESOURCE_EXHAUSTED` + // instead for those errors). `PERMISSION_DENIED` must not be + // used if the caller can not be identified (use `UNAUTHENTICATED` + // instead for those errors). This error code does not imply the + // request is valid or the requested entity exists or satisfies + // other pre-conditions. + // + // HTTP Mapping: 403 Forbidden + PERMISSION_DENIED = 7; + + // The request does not have valid authentication credentials for the + // operation. + // + // HTTP Mapping: 401 Unauthorized + UNAUTHENTICATED = 16; + + // Some resource has been exhausted, perhaps a per-user quota, or + // perhaps the entire file system is out of space. + // + // HTTP Mapping: 429 Too Many Requests + RESOURCE_EXHAUSTED = 8; + + // The operation was rejected because the system is not in a state + // required for the operation's execution. For example, the directory + // to be deleted is non-empty, an rmdir operation is applied to + // a non-directory, etc. + // + // Service implementors can use the following guidelines to decide + // between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`: + // (a) Use `UNAVAILABLE` if the client can retry just the failing call. + // (b) Use `ABORTED` if the client should retry at a higher level + // (e.g., when a client-specified test-and-set fails, indicating the + // client should restart a read-modify-write sequence). + // (c) Use `FAILED_PRECONDITION` if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, `FAILED_PRECONDITION` + // should be returned since the client should not retry unless + // the files are deleted from the directory. + // + // HTTP Mapping: 400 Bad Request + FAILED_PRECONDITION = 9; + + // The operation was aborted, typically due to a concurrency issue such as + // a sequencer check failure or transaction abort. + // + // See the guidelines above for deciding between `FAILED_PRECONDITION`, + // `ABORTED`, and `UNAVAILABLE`. + // + // HTTP Mapping: 409 Conflict + ABORTED = 10; + + // The operation was attempted past the valid range. E.g., seeking or + // reading past end-of-file. + // + // Unlike `INVALID_ARGUMENT`, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate `INVALID_ARGUMENT` if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // `OUT_OF_RANGE` if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between `FAILED_PRECONDITION` and + // `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an `OUT_OF_RANGE` error to detect when + // they are done. + // + // HTTP Mapping: 400 Bad Request + OUT_OF_RANGE = 11; + + // The operation is not implemented or is not supported/enabled in this + // service. + // + // HTTP Mapping: 501 Not Implemented + UNIMPLEMENTED = 12; + + // Internal errors. This means that some invariants expected by the + // underlying system have been broken. This error code is reserved + // for serious errors. + // + // HTTP Mapping: 500 Internal Server Error + INTERNAL = 13; + + // The service is currently unavailable. This is most likely a + // transient condition, which can be corrected by retrying with + // a backoff. + // + // See the guidelines above for deciding between `FAILED_PRECONDITION`, + // `ABORTED`, and `UNAVAILABLE`. + // + // HTTP Mapping: 503 Service Unavailable + UNAVAILABLE = 14; + + // Unrecoverable data loss or corruption. + // + // HTTP Mapping: 500 Internal Server Error + DATA_LOSS = 15; +} diff --git a/proto/external/googleapis/googleapis/83e756a66b80b072bd234abcfe89edf459090974/google/rpc/error_details.proto b/proto/external/googleapis/googleapis/83e756a66b80b072bd234abcfe89edf459090974/google/rpc/error_details.proto new file mode 100644 index 000000000..e1e134e8f --- /dev/null +++ b/proto/external/googleapis/googleapis/83e756a66b80b072bd234abcfe89edf459090974/google/rpc/error_details.proto @@ -0,0 +1,214 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.rpc; + +import "google/protobuf/duration.proto"; + +option go_package = "google.golang.org/genproto/googleapis/rpc/errdetails;errdetails"; +option java_multiple_files = true; +option java_outer_classname = "ErrorDetailsProto"; +option java_package = "com.google.rpc"; +option objc_class_prefix = "RPC"; + + +// Describes when the clients can retry a failed request. Clients could ignore +// the recommendation here or retry when this information is missing from error +// responses. +// +// It's always recommended that clients should use exponential backoff when +// retrying. +// +// Clients should wait until `retry_delay` amount of time has passed since +// receiving the error response before retrying. If retrying requests also +// fail, clients should use an exponential backoff scheme to gradually increase +// the delay between retries based on `retry_delay`, until either a maximum +// number of retires have been reached or a maximum retry delay cap has been +// reached. +message RetryInfo { + // Clients should wait at least this long between retrying the same request. + google.protobuf.Duration retry_delay = 1; +} + +// Describes additional debugging info. +message DebugInfo { + // The stack trace entries indicating where the error occurred. + repeated string stack_entries = 1; + + // Additional debugging information provided by the server. + string detail = 2; +} + +// Describes how a quota check failed. +// +// For example if a daily limit was exceeded for the calling project, +// a service could respond with a QuotaFailure detail containing the project +// id and the description of the quota limit that was exceeded. If the +// calling project hasn't enabled the service in the developer console, then +// a service could respond with the project id and set `service_disabled` +// to true. +// +// Also see RetryDetail and Help types for other details about handling a +// quota failure. +message QuotaFailure { + // A message type used to describe a single quota violation. For example, a + // daily quota or a custom quota that was exceeded. + message Violation { + // The subject on which the quota check failed. + // For example, "clientip:" or "project:". + string subject = 1; + + // A description of how the quota check failed. Clients can use this + // description to find more about the quota configuration in the service's + // public documentation, or find the relevant quota limit to adjust through + // developer console. + // + // For example: "Service disabled" or "Daily Limit for read operations + // exceeded". + string description = 2; + } + + // Describes all quota violations. + repeated Violation violations = 1; +} + +// Describes what preconditions have failed. +// +// For example, if an RPC failed because it required the Terms of Service to be +// acknowledged, it could list the terms of service violation in the +// PreconditionFailure message. +message PreconditionFailure { + // A message type used to describe a single precondition failure. + message Violation { + // The type of PreconditionFailure. We recommend using a service-specific + // enum type to define the supported precondition violation types. For + // example, "TOS" for "Terms of Service violation". + string type = 1; + + // The subject, relative to the type, that failed. + // For example, "google.com/cloud" relative to the "TOS" type would + // indicate which terms of service is being referenced. + string subject = 2; + + // A description of how the precondition failed. Developers can use this + // description to understand how to fix the failure. + // + // For example: "Terms of service not accepted". + string description = 3; + } + + // Describes all precondition violations. + repeated Violation violations = 1; +} + +// Describes violations in a client request. This error type focuses on the +// syntactic aspects of the request. +message BadRequest { + // A message type used to describe a single bad request field. + message FieldViolation { + // A path leading to a field in the request body. The value will be a + // sequence of dot-separated identifiers that identify a protocol buffer + // field. E.g., "field_violations.field" would identify this field. + string field = 1; + + // A description of why the request element is bad. + string description = 2; + } + + // Describes all violations in a client request. + repeated FieldViolation field_violations = 1; +} + +// Contains metadata about the request that clients can attach when filing a bug +// or providing other forms of feedback. +message RequestInfo { + // An opaque string that should only be interpreted by the service generating + // it. For example, it can be used to identify requests in the service's logs. + string request_id = 1; + + // Any data that was used to serve this request. For example, an encrypted + // stack trace that can be sent back to the service provider for debugging. + string serving_data = 2; +} + +// Describes the resource that is being accessed. +message ResourceInfo { + // A name for the type of resource being accessed, e.g. "sql table", + // "cloud storage bucket", "file", "Google calendar"; or the type URL + // of the resource: e.g. "type.googleapis.com/google.pubsub.v1.Topic". + string resource_type = 1; + + // The name of the resource being accessed. For example, a shared calendar + // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current + // error is [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. + string resource_name = 2; + + // The owner of the resource (optional). + // For example, "user:" or "project:". + string owner = 3; + + // Describes what error is encountered when accessing this resource. + // For example, updating a cloud project may require the `writer` permission + // on the developer console project. + string description = 4; +} + +// Provides links to documentation or for performing an out of band action. +// +// For example, if a quota check failed with an error indicating the calling +// project hasn't enabled the accessed service, this can contain a URL pointing +// directly to the right place in the developer console to flip the bit. +message Help { + // Describes a URL link. + message Link { + // Describes what the link offers. + string description = 1; + + // The URL of the link. + string url = 2; + } + + // URL(s) pointing to additional information on handling the current error. + repeated Link links = 1; +} + +// Provides a localized error message that is safe to return to the user +// which can be attached to an RPC error. +message LocalizedMessage { + // The locale used following the specification defined at + // http://www.rfc-editor.org/rfc/bcp/bcp47.txt. + // Examples are: "en-US", "fr-CH", "es-MX" + string locale = 1; + + // The localized error message in the above locale. + string message = 2; +} diff --git a/proto/external/googleapis/googleapis/83e756a66b80b072bd234abcfe89edf459090974/google/rpc/status.proto b/proto/external/googleapis/googleapis/83e756a66b80b072bd234abcfe89edf459090974/google/rpc/status.proto new file mode 100644 index 000000000..10d2f3552 --- /dev/null +++ b/proto/external/googleapis/googleapis/83e756a66b80b072bd234abcfe89edf459090974/google/rpc/status.proto @@ -0,0 +1,106 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.rpc; + +import "google/protobuf/any.proto"; + +option go_package = "google.golang.org/genproto/googleapis/rpc/status;status"; +option java_multiple_files = true; +option java_outer_classname = "StatusProto"; +option java_package = "com.google.rpc"; +option objc_class_prefix = "RPC"; + + +// The `Status` type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. It is used by +// [gRPC](https://github.com/grpc). The error model is designed to be: +// +// - Simple to use and understand for most users +// - Flexible enough to meet unexpected needs +// +// # Overview +// +// The `Status` message contains three pieces of data: error code, error message, +// and error details. The error code should be an enum value of +// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The +// error message should be a developer-facing English message that helps +// developers *understand* and *resolve* the error. If a localized user-facing +// error message is needed, put the localized message in the error details or +// localize it in the client. The optional error details may contain arbitrary +// information about the error. There is a predefined set of error detail types +// in the package `google.rpc` that can be used for common error conditions. +// +// # Language mapping +// +// The `Status` message is the logical representation of the error model, but it +// is not necessarily the actual wire format. When the `Status` message is +// exposed in different client libraries and different wire protocols, it can be +// mapped differently. For example, it will likely be mapped to some exceptions +// in Java, but more likely mapped to some error codes in C. +// +// # Other uses +// +// The error model and the `Status` message can be used in a variety of +// environments, either with or without APIs, to provide a +// consistent developer experience across different environments. +// +// Example uses of this error model include: +// +// - Partial errors. If a service needs to return partial errors to the client, +// it may embed the `Status` in the normal response to indicate the partial +// errors. +// +// - Workflow errors. A typical workflow has multiple steps. Each step may +// have a `Status` message for error reporting. +// +// - Batch operations. If a client uses batch request and batch response, the +// `Status` message should be used directly inside batch response, one for +// each error sub-response. +// +// - Asynchronous operations. If an API call embeds asynchronous operation +// results in its response, the status of those operations should be +// represented directly using the `Status` message. +// +// - Logging. If some API errors are stored in logs, the message `Status` could +// be used directly after any stripping needed for security/privacy reasons. +message Status { + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + int32 code = 1; + + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + string message = 2; + + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + repeated google.protobuf.Any details = 3; +} diff --git a/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/BUILD.bazel b/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/BUILD.bazel new file mode 100644 index 000000000..b606306cb --- /dev/null +++ b/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/BUILD.bazel @@ -0,0 +1,40 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "wrappers_proto", + srcs = [ + "any.proto", + "api.proto", + "descriptor.proto", + "duration.proto", + "empty.proto", + "field_mask.proto", + "source_context.proto", + "struct.proto", + "timestamp.proto", + "type.proto", + "wrappers.proto", + ], + visibility = ["//visibility:public"], + deps = [ + "@com_google_protobuf//:any_proto", + "@com_google_protobuf//:source_context_proto", + "@com_google_protobuf//:type_proto", + ], +) + +go_proto_library( + name = "wrappers_go_proto", + importpath = "github.com/golang/protobuf/ptypes/wrappers", + proto = ":wrappers_proto", + visibility = ["//visibility:public"], +) + +go_library( + name = "go_default_library", + embed = [":wrappers_go_proto"], + importpath = "github.com/golang/protobuf/ptypes/wrappers", + visibility = ["//visibility:public"], +) diff --git a/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/any.proto b/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/any.proto new file mode 100644 index 000000000..983397532 --- /dev/null +++ b/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/any.proto @@ -0,0 +1,172 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "google.golang.org/protobuf/types/known/anypb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := anypb.New(foo) +// if err != nil { +// ... +// } +// ... +// foo := &pb.Foo{} +// if err := any.UnmarshalTo(foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. This string must contain at least + // one "/" character. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/api.proto b/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/api.proto new file mode 100644 index 000000000..577c99122 --- /dev/null +++ b/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/api.proto @@ -0,0 +1,222 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +import "google/protobuf/source_context.proto"; +import "google/protobuf/type.proto"; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "ApiProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option go_package = "google.golang.org/protobuf/types/known/apipb"; + +// Api is a light-weight descriptor for an API Interface. +// +// Interfaces are also described as "protocol buffer services" in some contexts, +// such as by the "service" keyword in a .proto file, but they are different +// from API Services, which represent a concrete implementation of an interface +// as opposed to simply a description of methods and bindings. They are also +// sometimes simply referred to as "APIs" in other contexts, such as the name of +// this message itself. See https://cloud.google.com/apis/design/glossary for +// detailed terminology. +message Api { + // The fully qualified name of this interface, including package name + // followed by the interface's simple name. + string name = 1; + + // The methods of this interface, in unspecified order. + repeated Method methods = 2; + + // Any metadata attached to the interface. + repeated Option options = 3; + + // A version string for this interface. If specified, must have the form + // `major-version.minor-version`, as in `1.10`. If the minor version is + // omitted, it defaults to zero. If the entire version field is empty, the + // major version is derived from the package name, as outlined below. If the + // field is not empty, the version in the package name will be verified to be + // consistent with what is provided here. + // + // The versioning schema uses [semantic + // versioning](http://semver.org) where the major version number + // indicates a breaking change and the minor version an additive, + // non-breaking change. Both version numbers are signals to users + // what to expect from different versions, and should be carefully + // chosen based on the product plan. + // + // The major version is also reflected in the package name of the + // interface, which must end in `v`, as in + // `google.feature.v1`. For major versions 0 and 1, the suffix can + // be omitted. Zero major versions must only be used for + // experimental, non-GA interfaces. + // + // + string version = 4; + + // Source context for the protocol buffer service represented by this + // message. + SourceContext source_context = 5; + + // Included interfaces. See [Mixin][]. + repeated Mixin mixins = 6; + + // The source syntax of the service. + Syntax syntax = 7; +} + +// Method represents a method of an API interface. +message Method { + // The simple name of this method. + string name = 1; + + // A URL of the input message type. + string request_type_url = 2; + + // If true, the request is streamed. + bool request_streaming = 3; + + // The URL of the output message type. + string response_type_url = 4; + + // If true, the response is streamed. + bool response_streaming = 5; + + // Any metadata attached to the method. + repeated Option options = 6; + + // The source syntax of this method. + Syntax syntax = 7; +} + +// Declares an API Interface to be included in this interface. The including +// interface must redeclare all the methods from the included interface, but +// documentation and options are inherited as follows: +// +// - If after comment and whitespace stripping, the documentation +// string of the redeclared method is empty, it will be inherited +// from the original method. +// +// - Each annotation belonging to the service config (http, +// visibility) which is not set in the redeclared method will be +// inherited. +// +// - If an http annotation is inherited, the path pattern will be +// modified as follows. Any version prefix will be replaced by the +// version of the including interface plus the [root][] path if +// specified. +// +// Example of a simple mixin: +// +// package google.acl.v1; +// service AccessControl { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v1/{resource=**}:getAcl"; +// } +// } +// +// package google.storage.v2; +// service Storage { +// rpc GetAcl(GetAclRequest) returns (Acl); +// +// // Get a data record. +// rpc GetData(GetDataRequest) returns (Data) { +// option (google.api.http).get = "/v2/{resource=**}"; +// } +// } +// +// Example of a mixin configuration: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// +// The mixin construct implies that all methods in `AccessControl` are +// also declared with same name and request/response types in +// `Storage`. A documentation generator or annotation processor will +// see the effective `Storage.GetAcl` method after inheriting +// documentation and annotations as follows: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/{resource=**}:getAcl"; +// } +// ... +// } +// +// Note how the version in the path pattern changed from `v1` to `v2`. +// +// If the `root` field in the mixin is specified, it should be a +// relative path under which inherited HTTP paths are placed. Example: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// root: acls +// +// This implies the following inherited HTTP annotation: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; +// } +// ... +// } +message Mixin { + // The fully qualified name of the interface which is included. + string name = 1; + + // If non-empty specifies a path under which inherited HTTP paths + // are rooted. + string root = 2; +} diff --git a/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/descriptor.proto b/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/descriptor.proto new file mode 100644 index 000000000..2a0929531 --- /dev/null +++ b/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/descriptor.proto @@ -0,0 +1,925 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + + +syntax = "proto2"; + +package google.protobuf; + +option go_package = "google.golang.org/protobuf/types/descriptorpb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DescriptorProtos"; +option csharp_namespace = "Google.Protobuf.Reflection"; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// descriptor.proto must be optimized for speed because reflection-based +// algorithms don't work during bootstrapping. +option optimize_for = SPEED; + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +message FileDescriptorSet { + repeated FileDescriptorProto file = 1; +} + +// Describes a complete .proto file. +message FileDescriptorProto { + optional string name = 1; // file name, relative to root of source tree + optional string package = 2; // e.g. "foo", "foo.bar", etc. + + // Names of files imported by this file. + repeated string dependency = 3; + // Indexes of the public imported files in the dependency list above. + repeated int32 public_dependency = 10; + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + repeated int32 weak_dependency = 11; + + // All top-level definitions in this file. + repeated DescriptorProto message_type = 4; + repeated EnumDescriptorProto enum_type = 5; + repeated ServiceDescriptorProto service = 6; + repeated FieldDescriptorProto extension = 7; + + optional FileOptions options = 8; + + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + optional SourceCodeInfo source_code_info = 9; + + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + optional string syntax = 12; +} + +// Describes a message type. +message DescriptorProto { + optional string name = 1; + + repeated FieldDescriptorProto field = 2; + repeated FieldDescriptorProto extension = 6; + + repeated DescriptorProto nested_type = 3; + repeated EnumDescriptorProto enum_type = 4; + + message ExtensionRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + + optional ExtensionRangeOptions options = 3; + } + repeated ExtensionRange extension_range = 5; + + repeated OneofDescriptorProto oneof_decl = 8; + + optional MessageOptions options = 7; + + // Range of reserved tag numbers. Reserved tag numbers may not be used by + // fields or extension ranges in the same message. Reserved ranges may + // not overlap. + message ReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + } + repeated ReservedRange reserved_range = 9; + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + repeated string reserved_name = 10; +} + +message ExtensionRangeOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +// Describes a field within a message. +message FieldDescriptorProto { + enum Type { + // 0 is reserved for errors. + // Order is weird for historical reasons. + TYPE_DOUBLE = 1; + TYPE_FLOAT = 2; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + TYPE_INT64 = 3; + TYPE_UINT64 = 4; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + TYPE_INT32 = 5; + TYPE_FIXED64 = 6; + TYPE_FIXED32 = 7; + TYPE_BOOL = 8; + TYPE_STRING = 9; + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + TYPE_GROUP = 10; + TYPE_MESSAGE = 11; // Length-delimited aggregate. + + // New in version 2. + TYPE_BYTES = 12; + TYPE_UINT32 = 13; + TYPE_ENUM = 14; + TYPE_SFIXED32 = 15; + TYPE_SFIXED64 = 16; + TYPE_SINT32 = 17; // Uses ZigZag encoding. + TYPE_SINT64 = 18; // Uses ZigZag encoding. + } + + enum Label { + // 0 is reserved for errors + LABEL_OPTIONAL = 1; + LABEL_REQUIRED = 2; + LABEL_REPEATED = 3; + } + + optional string name = 1; + optional int32 number = 3; + optional Label label = 4; + + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + optional Type type = 5; + + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + optional string type_name = 6; + + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + optional string extendee = 2; + + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + optional string default_value = 7; + + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + optional int32 oneof_index = 9; + + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + optional string json_name = 10; + + optional FieldOptions options = 8; + + // If true, this is a proto3 "optional". When a proto3 field is optional, it + // tracks presence regardless of field type. + // + // When proto3_optional is true, this field must be belong to a oneof to + // signal to old proto3 clients that presence is tracked for this field. This + // oneof is known as a "synthetic" oneof, and this field must be its sole + // member (each proto3 optional field gets its own synthetic oneof). Synthetic + // oneofs exist in the descriptor only, and do not generate any API. Synthetic + // oneofs must be ordered after all "real" oneofs. + // + // For message fields, proto3_optional doesn't create any semantic change, + // since non-repeated message fields always track presence. However it still + // indicates the semantic detail of whether the user wrote "optional" or not. + // This can be useful for round-tripping the .proto file. For consistency we + // give message fields a synthetic oneof also, even though it is not required + // to track presence. This is especially important because the parser can't + // tell if a field is a message or an enum, so it must always create a + // synthetic oneof. + // + // Proto2 optional fields do not set this flag, because they already indicate + // optional with `LABEL_OPTIONAL`. + optional bool proto3_optional = 17; +} + +// Describes a oneof. +message OneofDescriptorProto { + optional string name = 1; + optional OneofOptions options = 2; +} + +// Describes an enum type. +message EnumDescriptorProto { + optional string name = 1; + + repeated EnumValueDescriptorProto value = 2; + + optional EnumOptions options = 3; + + // Range of reserved numeric values. Reserved values may not be used by + // entries in the same enum. Reserved ranges may not overlap. + // + // Note that this is distinct from DescriptorProto.ReservedRange in that it + // is inclusive such that it can appropriately represent the entire int32 + // domain. + message EnumReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Inclusive. + } + + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + repeated EnumReservedRange reserved_range = 4; + + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + repeated string reserved_name = 5; +} + +// Describes a value within an enum. +message EnumValueDescriptorProto { + optional string name = 1; + optional int32 number = 2; + + optional EnumValueOptions options = 3; +} + +// Describes a service. +message ServiceDescriptorProto { + optional string name = 1; + repeated MethodDescriptorProto method = 2; + + optional ServiceOptions options = 3; +} + +// Describes a method of a service. +message MethodDescriptorProto { + optional string name = 1; + + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + optional string input_type = 2; + optional string output_type = 3; + + optional MethodOptions options = 4; + + // Identifies if client streams multiple client messages + optional bool client_streaming = 5 [default = false]; + // Identifies if server streams multiple server messages + optional bool server_streaming = 6 [default = false]; +} + + +// =================================================================== +// Options + +// Each of the definitions above may have "options" attached. These are +// just annotations which may cause code to be generated slightly differently +// or may contain hints for code that manipulates protocol messages. +// +// Clients may define custom options as extensions of the *Options messages. +// These extensions may not yet be known at parsing time, so the parser cannot +// store the values in them. Instead it stores them in a field in the *Options +// message called uninterpreted_option. This field must have the same name +// across all *Options messages. We then use this field to populate the +// extensions when we build a descriptor, at which point all protos have been +// parsed and so all extensions are known. +// +// Extension numbers for custom options may be chosen as follows: +// * For options which will only be used within a single application or +// organization, or for experimental options, use field numbers 50000 +// through 99999. It is up to you to ensure that you do not use the +// same number for multiple options. +// * For options which will be published and used publicly by multiple +// independent entities, e-mail protobuf-global-extension-registry@google.com +// to reserve extension numbers. Simply provide your project name (e.g. +// Objective-C plugin) and your project website (if available) -- there's no +// need to explain how you intend to use them. Usually you only need one +// extension number. You can declare multiple options with only one extension +// number by putting them in a sub-message. See the Custom Options section of +// the docs for examples: +// https://developers.google.com/protocol-buffers/docs/proto#options +// If this turns out to be popular, a web service will be set up +// to automatically assign option numbers. + +message FileOptions { + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + optional string java_package = 1; + + + // Controls the name of the wrapper Java class generated for the .proto file. + // That class will always contain the .proto file's getDescriptor() method as + // well as any top-level extensions defined in the .proto file. + // If java_multiple_files is disabled, then all the other classes from the + // .proto file will be nested inside the single wrapper outer class. + optional string java_outer_classname = 8; + + // If enabled, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the wrapper class + // named by java_outer_classname. However, the wrapper class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + optional bool java_multiple_files = 10 [default = false]; + + // This option does nothing. + optional bool java_generate_equals_and_hash = 20 [deprecated=true]; + + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + optional bool java_string_check_utf8 = 27 [default = false]; + + + // Generated classes can be optimized for speed or code size. + enum OptimizeMode { + SPEED = 1; // Generate complete code for parsing, serialization, + // etc. + CODE_SIZE = 2; // Use ReflectionOps to implement these methods. + LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. + } + optional OptimizeMode optimize_for = 9 [default = SPEED]; + + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + optional string go_package = 11; + + + + + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + optional bool cc_generic_services = 16 [default = false]; + optional bool java_generic_services = 17 [default = false]; + optional bool py_generic_services = 18 [default = false]; + optional bool php_generic_services = 42 [default = false]; + + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + optional bool deprecated = 23 [default = false]; + + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + optional bool cc_enable_arenas = 31 [default = true]; + + + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + optional string objc_class_prefix = 36; + + // Namespace for generated classes; defaults to the package. + optional string csharp_namespace = 37; + + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + optional string swift_prefix = 39; + + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + optional string php_class_prefix = 40; + + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + optional string php_namespace = 41; + + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + optional string php_metadata_namespace = 44; + + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + optional string ruby_package = 45; + + + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. + // See the documentation for the "Options" section above. + extensions 1000 to max; + + reserved 38; +} + +message MessageOptions { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + optional bool message_set_wire_format = 1 [default = false]; + + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + optional bool no_standard_descriptor_accessor = 2 [default = false]; + + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + optional bool deprecated = 3 [default = false]; + + reserved 4, 5, 6; + + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + optional bool map_entry = 7; + + reserved 8; // javalite_serializable + reserved 9; // javanano_as_lite + + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message FieldOptions { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + optional CType ctype = 1 [default = STRING]; + enum CType { + // Default mode. + STRING = 0; + + CORD = 1; + + STRING_PIECE = 2; + } + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + optional bool packed = 2; + + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + optional JSType jstype = 6 [default = JS_NORMAL]; + enum JSType { + // Use the default type. + JS_NORMAL = 0; + + // Use JavaScript strings. + JS_STRING = 1; + + // Use JavaScript numbers. + JS_NUMBER = 2; + } + + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + optional bool lazy = 5 [default = false]; + + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + optional bool deprecated = 3 [default = false]; + + // For Google-internal migration only. Do not use. + optional bool weak = 10 [default = false]; + + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + reserved 4; // removed jtype +} + +message OneofOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumOptions { + + // Set this option to true to allow mapping different tag names to the same + // value. + optional bool allow_alias = 2; + + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + optional bool deprecated = 3 [default = false]; + + reserved 5; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumValueOptions { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + optional bool deprecated = 1 [default = false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message ServiceOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + optional bool deprecated = 33 [default = false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message MethodOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + optional bool deprecated = 33 [default = false]; + + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + // or neither? HTTP based RPC implementation may choose GET verb for safe + // methods, and PUT verb for idempotent methods instead of the default POST. + enum IdempotencyLevel { + IDEMPOTENCY_UNKNOWN = 0; + NO_SIDE_EFFECTS = 1; // implies idempotent + IDEMPOTENT = 2; // idempotent, but may have side effects + } + optional IdempotencyLevel idempotency_level = 34 + [default = IDEMPOTENCY_UNKNOWN]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +message UninterpretedOption { + // The name of the uninterpreted option. Each string represents a segment in + // a dot-separated name. is_extension is true iff a segment represents an + // extension (denoted with parentheses in options specs in .proto files). + // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents + // "foo.(bar.baz).qux". + message NamePart { + required string name_part = 1; + required bool is_extension = 2; + } + repeated NamePart name = 2; + + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + optional string identifier_value = 3; + optional uint64 positive_int_value = 4; + optional int64 negative_int_value = 5; + optional double double_value = 6; + optional bytes string_value = 7; + optional string aggregate_value = 8; +} + +// =================================================================== +// Optional source code info + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +message SourceCodeInfo { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + repeated Location location = 1; + message Location { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + repeated int32 path = 1 [packed = true]; + + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + repeated int32 span = 2 [packed = true]; + + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + optional string leading_comments = 3; + optional string trailing_comments = 4; + repeated string leading_detached_comments = 6; + } +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +message GeneratedCodeInfo { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + repeated Annotation annotation = 1; + message Annotation { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + repeated int32 path = 1 [packed = true]; + + // Identifies the filesystem path to the original source .proto. + optional string source_file = 2; + + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + optional int32 begin = 3; + + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + optional int32 end = 4; + } +} diff --git a/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/duration.proto b/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/duration.proto new file mode 100644 index 000000000..65d4c48b7 --- /dev/null +++ b/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/duration.proto @@ -0,0 +1,130 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "google.golang.org/protobuf/types/known/durationpb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (duration.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +message Duration { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/empty.proto b/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/empty.proto new file mode 100644 index 000000000..9081d5193 --- /dev/null +++ b/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/empty.proto @@ -0,0 +1,66 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "google.golang.org/protobuf/types/known/emptypb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "EmptyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +message Empty {} diff --git a/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/field_mask.proto b/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/field_mask.proto new file mode 100644 index 000000000..58854b6d7 --- /dev/null +++ b/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/field_mask.proto @@ -0,0 +1,259 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "FieldMaskProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option go_package = "google.golang.org/protobuf/types/known/fieldmaskpb"; +option cc_enable_arenas = true; + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, new values will +// be appended to the existing repeated field in the target resource. Note that +// a repeated field is only allowed in the last position of a `paths` string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then new value will be merged into the existing sub-message +// in the target resource. +// +// For example, given the target message: +// +// f { +// b { +// d: 1 +// x: 2 +// } +// c: [1] +// } +// +// And an update message: +// +// f { +// b { +// d: 10 +// } +// c: [2] +// } +// +// then if the field mask is: +// +// paths: ["f.b", "f.c"] +// +// then the result will be: +// +// f { +// b { +// d: 10 +// x: 2 +// } +// c: [1, 2] +// } +// +// An implementation may provide options to override this default behavior for +// repeated and message fields. +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +// +// ## Field Mask Verification +// +// The implementation of any API method which has a FieldMask type field in the +// request should verify the included field paths, and return an +// `INVALID_ARGUMENT` error if any path is unmappable. +message FieldMask { + // The set of field mask paths. + repeated string paths = 1; +} diff --git a/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/source_context.proto b/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/source_context.proto new file mode 100644 index 000000000..884dab20f --- /dev/null +++ b/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/source_context.proto @@ -0,0 +1,62 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "SourceContextProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option go_package = "google.golang.org/protobuf/types/known/sourcecontextpb"; + +// `SourceContext` represents information about the source of a +// protobuf element, like the file in which it is defined. +message SourceContext { + // The path-qualified name of the .proto file that contained the associated + // protobuf element. For example: `"google/protobuf/source_context.proto"`. + string file_name = 1; +} diff --git a/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/struct.proto b/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/struct.proto new file mode 100644 index 000000000..b7e7bfa25 --- /dev/null +++ b/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/struct.proto @@ -0,0 +1,109 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "google.golang.org/protobuf/types/known/structpb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +message Struct { + // Unordered map of dynamically typed values. + map fields = 1; +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +message Value { + // The kind of value. + oneof kind { + // Represents a null value. + NullValue null_value = 1; + // Represents a double value. + double number_value = 2; + // Represents a string value. + string string_value = 3; + // Represents a boolean value. + bool bool_value = 4; + // Represents a structured value. + Struct struct_value = 5; + // Represents a repeated `Value`. + ListValue list_value = 6; + } +} + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +enum NullValue { + // Null value. + NULL_VALUE = 0; +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +message ListValue { + // Repeated field of dynamically typed values. + repeated Value values = 1; +} diff --git a/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/timestamp.proto b/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/timestamp.proto new file mode 100644 index 000000000..d088c86b8 --- /dev/null +++ b/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/timestamp.proto @@ -0,0 +1,161 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "google.golang.org/protobuf/types/known/timestamppb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone or local +// calendar, encoded as a count of seconds and fractions of seconds at +// nanosecond resolution. The count is relative to an epoch at UTC midnight on +// January 1, 1970, in the proleptic Gregorian calendar which extends the +// Gregorian calendar backwards to year one. +// +// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap +// second table is needed for interpretation, using a [24-hour linear +// smear](https://developers.google.com/time/smear). +// +// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By +// restricting to that range, we ensure that we can convert to and from [RFC +// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from Java `Instant.now()`. +// +// Instant now = Instant.now(); +// +// Timestamp timestamp = +// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) +// .setNanos(now.getNano()).build(); +// +// +// Example 6: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard +// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using +// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with +// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use +// the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D +// ) to obtain a formatter capable of generating timestamps in this format. +// +// +message Timestamp { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/type.proto b/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/type.proto new file mode 100644 index 000000000..a309234da --- /dev/null +++ b/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/type.proto @@ -0,0 +1,201 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +import "google/protobuf/any.proto"; +import "google/protobuf/source_context.proto"; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TypeProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option go_package = "google.golang.org/protobuf/types/known/typepb"; + +// A protocol buffer message type. +message Type { + // The fully qualified message name. + string name = 1; + // The list of fields. + repeated Field fields = 2; + // The list of types appearing in `oneof` definitions in this type. + repeated string oneofs = 3; + // The protocol buffer options. + repeated Option options = 4; + // The source context. + SourceContext source_context = 5; + // The source syntax. + Syntax syntax = 6; +} + +// A single field of a message type. +message Field { + // Basic field types. + enum Kind { + // Field type unknown. + TYPE_UNKNOWN = 0; + // Field type double. + TYPE_DOUBLE = 1; + // Field type float. + TYPE_FLOAT = 2; + // Field type int64. + TYPE_INT64 = 3; + // Field type uint64. + TYPE_UINT64 = 4; + // Field type int32. + TYPE_INT32 = 5; + // Field type fixed64. + TYPE_FIXED64 = 6; + // Field type fixed32. + TYPE_FIXED32 = 7; + // Field type bool. + TYPE_BOOL = 8; + // Field type string. + TYPE_STRING = 9; + // Field type group. Proto2 syntax only, and deprecated. + TYPE_GROUP = 10; + // Field type message. + TYPE_MESSAGE = 11; + // Field type bytes. + TYPE_BYTES = 12; + // Field type uint32. + TYPE_UINT32 = 13; + // Field type enum. + TYPE_ENUM = 14; + // Field type sfixed32. + TYPE_SFIXED32 = 15; + // Field type sfixed64. + TYPE_SFIXED64 = 16; + // Field type sint32. + TYPE_SINT32 = 17; + // Field type sint64. + TYPE_SINT64 = 18; + } + + // Whether a field is optional, required, or repeated. + enum Cardinality { + // For fields with unknown cardinality. + CARDINALITY_UNKNOWN = 0; + // For optional fields. + CARDINALITY_OPTIONAL = 1; + // For required fields. Proto2 syntax only. + CARDINALITY_REQUIRED = 2; + // For repeated fields. + CARDINALITY_REPEATED = 3; + } + + // The field type. + Kind kind = 1; + // The field cardinality. + Cardinality cardinality = 2; + // The field number. + int32 number = 3; + // The field name. + string name = 4; + // The field type URL, without the scheme, for message or enumeration + // types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`. + string type_url = 6; + // The index of the field type in `Type.oneofs`, for message or enumeration + // types. The first type has index 1; zero means the type is not in the list. + int32 oneof_index = 7; + // Whether to use alternative packed wire representation. + bool packed = 8; + // The protocol buffer options. + repeated Option options = 9; + // The field JSON name. + string json_name = 10; + // The string value of the default value of this field. Proto2 syntax only. + string default_value = 11; +} + +// Enum type definition. +message Enum { + // Enum type name. + string name = 1; + // Enum value definitions. + repeated EnumValue enumvalue = 2; + // Protocol buffer options. + repeated Option options = 3; + // The source context. + SourceContext source_context = 4; + // The source syntax. + Syntax syntax = 5; +} + +// Enum value definition. +message EnumValue { + // Enum value name. + string name = 1; + // Enum value number. + int32 number = 2; + // Protocol buffer options. + repeated Option options = 3; +} + +// A protocol buffer option, which can be attached to a message, field, +// enumeration, etc. +message Option { + // The option's name. For protobuf built-in options (options defined in + // descriptor.proto), this is the short name. For example, `"map_entry"`. + // For custom options, it should be the fully-qualified name. For example, + // `"google.api.http"`. + string name = 1; + // The option's value packed in an Any message. If the value is a primitive, + // the corresponding wrapper type defined in google/protobuf/wrappers.proto + // should be used. If the value is an enum, it should be stored as an int32 + // value using the google.protobuf.Int32Value type. + Any value = 2; +} + +// The syntax in which a protocol buffer element is defined. +enum Syntax { + // Syntax `proto2`. + SYNTAX_PROTO2 = 0; + // Syntax `proto3`. + SYNTAX_PROTO3 = 1; +} diff --git a/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/wrappers.proto b/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/wrappers.proto new file mode 100644 index 000000000..d8c9373c4 --- /dev/null +++ b/proto/external/protocolbuffers/protobuf/v3.18.1/google/protobuf/wrappers.proto @@ -0,0 +1,137 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. +// +// These wrappers have no meaningful use within repeated fields as they lack +// the ability to detect presence on individual elements. +// These wrappers have no meaningful use within a map or a oneof since +// individual entries of a map or fields of a oneof can already detect presence. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "google.golang.org/protobuf/types/known/wrapperspb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "WrappersProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +message DoubleValue { + // The double value. + double value = 1; +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +message FloatValue { + // The float value. + float value = 1; +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +message Int64Value { + // The int64 value. + int64 value = 1; +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +message UInt64Value { + // The uint64 value. + uint64 value = 1; +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +message Int32Value { + // The int32 value. + int32 value = 1; +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +message UInt32Value { + // The uint32 value. + uint32 value = 1; +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +message BoolValue { + // The bool value. + bool value = 1; +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +message StringValue { + // The string value. + string value = 1; +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +message BytesValue { + // The bytes value. + bytes value = 1; +} diff --git a/proto/feature/BUILD.bazel b/proto/feature/BUILD.bazel new file mode 100644 index 000000000..a00c630d4 --- /dev/null +++ b/proto/feature/BUILD.bazel @@ -0,0 +1,57 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("//proto:proto_descriptor.bzl", "proto_descriptor") + +proto_library( + name = "feature_proto", + srcs = [ + "clause.proto", + "command.proto", + "evaluation.proto", + "feature.proto", + "feature_last_used_info.proto", + "prerequisite.proto", + "reason.proto", + "rule.proto", + "segment.proto", + "service.proto", + "strategy.proto", + "target.proto", + "variation.proto", + ], + visibility = ["//visibility:public"], + deps = [ + "//proto/user:user_proto", + "@com_google_protobuf//:any_proto", + "@com_google_protobuf//:wrappers_proto", + ], +) + +go_proto_library( + name = "feature_go_proto", + compilers = ["@io_bazel_rules_go//proto:go_grpc"], + importpath = "github.com/bucketeer-io/bucketeer/proto/feature", + proto = ":feature_proto", + visibility = ["//visibility:public"], + deps = ["//proto/user:go_default_library"], +) + +go_library( + name = "go_default_library", + embed = [":feature_go_proto"], + importpath = "github.com/bucketeer-io/bucketeer/proto/feature", + visibility = ["//visibility:public"], +) + +proto_descriptor( + name = "proto_descriptor", + srcs = ["service.proto"], + visibility = ["//visibility:public"], + deps = [ + ":feature_proto", + "//proto/user:user_proto", + "@com_google_protobuf//:any_proto", + "@com_google_protobuf//:wrappers_proto", + ], +) diff --git a/proto/feature/clause.proto b/proto/feature/clause.proto new file mode 100644 index 000000000..96fd09990 --- /dev/null +++ b/proto/feature/clause.proto @@ -0,0 +1,38 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.feature; +option go_package = "github.com/bucketeer-io/bucketeer/proto/feature"; + +message Clause { + enum Operator { + EQUALS = 0; + IN = 1; + ENDS_WITH = 2; + STARTS_WITH = 3; + SEGMENT = 4; + GREATER = 5; + GREATER_OR_EQUAL = 6; + LESS = 7; + LESS_OR_EQUAL = 8; + BEFORE = 9; + AFTER = 10; + } + string id = 1; + string attribute = 2; + Operator operator = 3; + repeated string values = 4; +} diff --git a/proto/feature/command.proto b/proto/feature/command.proto new file mode 100644 index 000000000..dd2b8a9dd --- /dev/null +++ b/proto/feature/command.proto @@ -0,0 +1,229 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.feature; +option go_package = "github.com/bucketeer-io/bucketeer/proto/feature"; + +import "google/protobuf/any.proto"; +import "google/protobuf/wrappers.proto"; + +import "proto/feature/clause.proto"; +import "proto/feature/feature.proto"; +import "proto/feature/rule.proto"; +import "proto/feature/variation.proto"; +import "proto/feature/strategy.proto"; +import "proto/feature/segment.proto"; +import "proto/feature/prerequisite.proto"; + +message Command { + google.protobuf.Any command = 1; +} + +message CreateFeatureCommand { + string id = 1; + string name = 2; + string description = 3; // This is an optional field + repeated Variation variations = 4; + repeated string tags = 5; + google.protobuf.Int32Value default_on_variation_index = 6; + google.protobuf.Int32Value default_off_variation_index = 7; + Feature.VariationType variation_type = 8; +} + +message ArchiveFeatureCommand {} + +message UnarchiveFeatureCommand {} + +message DeleteFeatureCommand {} + +message RenameFeatureCommand { + string name = 1; +} + +message ChangeDescriptionCommand { + string description = 1; +} + +message ChangeBulkUploadSegmentUsersStatusCommand { + Segment.Status status = 1; + SegmentUser.State state = 2; + int64 count = 3; +} + +message AddTagCommand { + string tag = 1; +} + +message RemoveTagCommand { + string tag = 1; +} + +message EnableFeatureCommand {} + +message DisableFeatureCommand {} + +message AddVariationCommand { + string value = 1; + string name = 2; + string description = 3; // This is an optional field +} + +message RemoveVariationCommand { + string id = 1; +} + +message ChangeVariationValueCommand { + string id = 1; + string value = 2; +} + +message ChangeVariationNameCommand { + string id = 1; + string name = 2; +} + +message ChangeVariationDescriptionCommand { + string id = 1; + string description = 2; +} + +message ChangeOffVariationCommand { + string id = 1; +} + +message AddUserToVariationCommand { + string id = 1; + string user = 2; +} + +message RemoveUserFromVariationCommand { + string id = 1; + string user = 2; +} + +message ChangeDefaultStrategyCommand { + Strategy strategy = 1; +} + +message AddRuleCommand { + Rule rule = 1; +} + +message ChangeRuleStrategyCommand { + string id = 1; + string rule_id = 2; + Strategy strategy = 3; +} + +message DeleteRuleCommand { + string id = 1; +} + +message AddClauseCommand { + string rule_id = 1; + Clause clause = 2; +} + +message DeleteClauseCommand { + string id = 1; + string rule_id = 2; +} + +message ChangeClauseAttributeCommand { + string id = 1; + string rule_id = 2; + string attribute = 3; +} + +message ChangeClauseOperatorCommand { + string id = 1; + string rule_id = 2; + Clause.Operator operator = 3; +} + +message AddClauseValueCommand { + string id = 1; + string rule_id = 2; + string value = 3; +} + +message RemoveClauseValueCommand { + string id = 1; + string rule_id = 2; + string value = 3; +} + +message ChangeFixedStrategyCommand { + string id = 1; + string rule_id = 2; + FixedStrategy strategy = 3; +} + +message ChangeRolloutStrategyCommand { + string id = 1; + string rule_id = 2; + RolloutStrategy strategy = 3; +} + +message CreateSegmentCommand { + string name = 1; + string description = 2; +} + +message DeleteSegmentCommand {} + +message ChangeSegmentNameCommand { + string name = 1; +} + +message ChangeSegmentDescriptionCommand { + string description = 1; +} + +message AddSegmentUserCommand { + repeated string user_ids = 1; + bucketeer.feature.SegmentUser.State state = 2; +} + +message DeleteSegmentUserCommand { + repeated string user_ids = 1; + bucketeer.feature.SegmentUser.State state = 2; +} + +message BulkUploadSegmentUsersCommand { + bytes data = 1; // segment user ids separated by comma or new line + SegmentUser.State state = 2; +} + +message IncrementFeatureVersionCommand {} + +message CloneFeatureCommand { + string environment_namespace = 1; +} + +message ResetSamplingSeedCommand {} + +message AddPrerequisiteCommand { + Prerequisite prerequisite = 1; +} + +message RemovePrerequisiteCommand { + string feature_id = 1; +} + +message ChangePrerequisiteVariationCommand { + Prerequisite prerequisite = 1; +} diff --git a/proto/feature/evaluation.proto b/proto/feature/evaluation.proto new file mode 100644 index 000000000..16480ef78 --- /dev/null +++ b/proto/feature/evaluation.proto @@ -0,0 +1,43 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.feature; +option go_package = "github.com/bucketeer-io/bucketeer/proto/feature"; + +import "proto/feature/variation.proto"; +import "proto/feature/reason.proto"; + +message Evaluation { + string id = 1; + string feature_id = 2; + int32 feature_version = 3; + string user_id = 4; + string variation_id = 5; + Variation variation = 6 [deprecated = true]; + Reason reason = 7; + string variation_value = 8; +} + +message UserEvaluations { + enum State { + QUEUED = 0; + PARTIAL = 1; + FULL = 2; + } + string id = 1; + repeated Evaluation evaluations = 2; + int64 created_at = 3; +} diff --git a/proto/feature/feature.proto b/proto/feature/feature.proto new file mode 100644 index 000000000..0b3a77838 --- /dev/null +++ b/proto/feature/feature.proto @@ -0,0 +1,66 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.feature; +option go_package = "github.com/bucketeer-io/bucketeer/proto/feature"; + +import "proto/feature/rule.proto"; +import "proto/feature/target.proto"; +import "proto/feature/variation.proto"; +import "proto/feature/strategy.proto"; +import "proto/feature/feature_last_used_info.proto"; +import "proto/feature/prerequisite.proto"; + +message Feature { + enum VariationType { + STRING = 0; + BOOLEAN = 1; + NUMBER = 2; + JSON = 3; + } + string id = 1; + string name = 2; + string description = 3; + bool enabled = 4; + bool deleted = 5; + bool evaluation_undelayable = 6 [deprecated = true]; + int32 ttl = 7; + int32 version = 8; + int64 created_at = 9; + int64 updated_at = 10; + repeated Variation variations = 11; + repeated Target targets = 12; + repeated Rule rules = 13; + Strategy default_strategy = 14; + string off_variation = 15; + repeated string tags = 16; + FeatureLastUsedInfo last_used_info = 17; + string maintainer = 18; + VariationType variation_type = 19; + bool archived = 20; + repeated Prerequisite prerequisites = 21; + string sampling_seed = 22; +} + +message Features { + repeated Feature features = 1; +} + +message Tag { + string id = 1; + int64 created_at = 2; + int64 updated_at = 3; +} diff --git a/proto/feature/feature_last_used_info.proto b/proto/feature/feature_last_used_info.proto new file mode 100644 index 000000000..20dacbefe --- /dev/null +++ b/proto/feature/feature_last_used_info.proto @@ -0,0 +1,27 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.feature; +option go_package = "github.com/bucketeer-io/bucketeer/proto/feature"; + +message FeatureLastUsedInfo { + string feature_id = 1; + int32 version = 2; + int64 last_used_at = 3; + int64 created_at = 4; + string client_oldest_version = 5; + string client_latest_version = 6; +} \ No newline at end of file diff --git a/proto/feature/prerequisite.proto b/proto/feature/prerequisite.proto new file mode 100644 index 000000000..bad5839a4 --- /dev/null +++ b/proto/feature/prerequisite.proto @@ -0,0 +1,23 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.feature; +option go_package = "github.com/bucketeer-io/bucketeer/proto/feature"; + +message Prerequisite { + string feature_id = 1; + string variation_id = 2; +} diff --git a/proto/feature/reason.proto b/proto/feature/reason.proto new file mode 100644 index 000000000..28a6bcce2 --- /dev/null +++ b/proto/feature/reason.proto @@ -0,0 +1,31 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.feature; +option go_package = "github.com/bucketeer-io/bucketeer/proto/feature"; + +message Reason { + enum Type { + TARGET = 0; + RULE = 1; + DEFAULT = 3; + CLIENT = 4; + OFF_VARIATION = 5; + PREREQUISITE = 6; + } + Type type = 1; + string rule_id = 2; +} diff --git a/proto/feature/rule.proto b/proto/feature/rule.proto new file mode 100644 index 000000000..d6cb826a5 --- /dev/null +++ b/proto/feature/rule.proto @@ -0,0 +1,27 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.feature; +option go_package = "github.com/bucketeer-io/bucketeer/proto/feature"; + +import "proto/feature/clause.proto"; +import "proto/feature/strategy.proto"; + +message Rule { + string id = 1; + Strategy strategy = 2; + repeated Clause clauses = 3; +} diff --git a/proto/feature/segment.proto b/proto/feature/segment.proto new file mode 100644 index 000000000..72856c263 --- /dev/null +++ b/proto/feature/segment.proto @@ -0,0 +1,58 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.feature; +option go_package = "github.com/bucketeer-io/bucketeer/proto/feature"; + +import "proto/feature/rule.proto"; + +message Segment { + enum Status { + INITIAL = 0; + UPLOADING = 1; + SUCEEDED = 2; + FAILED = 3; + } + string id = 1; + string name = 2; + string description = 3; + repeated Rule rules = 4; + int64 created_at = 5; + int64 updated_at = 6; + int64 version = 7 [deprecated = true]; + bool deleted = 8; + int64 included_user_count = 9; + int64 excluded_user_count = 10 [deprecated = true]; + Status status = 11; + bool is_in_use_status = 12; // This field is set only when APIs return. +} + +message SegmentUser { + enum State { + INCLUDED = 0; + EXCLUDED = 1 [deprecated = true]; + } + string id = 1; + string segment_id = 2; + string user_id = 3; + State state = 4; + bool deleted = 5; +} + +message SegmentUsers { + string segment_id = 1; + repeated SegmentUser users = 2; +} \ No newline at end of file diff --git a/proto/feature/service.proto b/proto/feature/service.proto new file mode 100644 index 000000000..de90d76b3 --- /dev/null +++ b/proto/feature/service.proto @@ -0,0 +1,407 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.feature; +option go_package = "github.com/bucketeer-io/bucketeer/proto/feature"; + +import "google/protobuf/wrappers.proto"; + +import "proto/feature/command.proto"; +import "proto/feature/feature.proto"; +import "proto/feature/evaluation.proto"; +import "proto/user/user.proto"; +import "proto/feature/segment.proto"; + +message GetFeatureRequest { + string id = 1; + string environment_namespace = 2; +} + +message GetFeatureResponse { + Feature feature = 1; +} + +message GetFeaturesRequest { + string environment_namespace = 1; + repeated string ids = 2; +} + +message GetFeaturesResponse { + repeated Feature features = 1; +} + +message ListFeaturesRequest { + enum OrderBy { + DEFAULT = 0; + NAME = 1; + CREATED_AT = 2; + UPDATED_AT = 3; + TAGS = 4; + ENABLED = 5; + } + enum OrderDirection { + ASC = 0; + DESC = 1; + } + int64 page_size = 1; + string cursor = 2; + repeated string tags = 3; + OrderBy order_by = 4; + OrderDirection order_direction = 5; + string environment_namespace = 6; + string maintainer = 7; + google.protobuf.BoolValue enabled = 8; + google.protobuf.BoolValue has_experiment = 9; + string search_keyword = 10; + google.protobuf.BoolValue archived = 11; +} + +message ListFeaturesResponse { + repeated Feature features = 1; + string cursor = 2; + int64 total_count = 3; +} + +message ListEnabledFeaturesRequest { + int64 page_size = 1; + string cursor = 2; + repeated string tags = 3; + string environment_namespace = 4; +} + +message ListEnabledFeaturesResponse { + repeated Feature features = 1; + string cursor = 2; +} + +message CreateFeatureRequest { + CreateFeatureCommand command = 1; + string environment_namespace = 2; +} + +message CreateFeatureResponse {} + +message EnableFeatureRequest { + string id = 1; + EnableFeatureCommand command = 2; + string environment_namespace = 3; + string comment = 4; +} + +message EnableFeatureResponse {} + +message DisableFeatureRequest { + string id = 1; + DisableFeatureCommand command = 2; + string environment_namespace = 3; + string comment = 4; +} + +message DisableFeatureResponse {} + +message ArchiveFeatureRequest { + string id = 1; + ArchiveFeatureCommand command = 2; + string environment_namespace = 3; + string comment = 4; +} + +message ArchiveFeatureResponse {} + +message UnarchiveFeatureRequest { + string id = 1; + UnarchiveFeatureCommand command = 2; + string environment_namespace = 3; + string comment = 4; +} + +message UnarchiveFeatureResponse {} + +message DeleteFeatureRequest { + string id = 1; + DeleteFeatureCommand command = 2; + string environment_namespace = 3; + string comment = 4; +} + +message DeleteFeatureResponse {} + +message UpdateFeatureDetailsRequest { + string id = 1; + RenameFeatureCommand rename_feature_command = 2; + ChangeDescriptionCommand change_description_command = 3; + repeated AddTagCommand add_tag_commands = 4; + repeated RemoveTagCommand remove_tag_commands = 5; + string environment_namespace = 6; + string comment = 7; +} + +message UpdateFeatureDetailsResponse {} + +message UpdateFeatureVariationsRequest { + string id = 1; + repeated Command commands = 2; + string environment_namespace = 3; + string comment = 4; +} + +message UpdateFeatureVariationsResponse {} + +message UpdateFeatureTargetingRequest { + string id = 1; + repeated Command commands = 2; + string environment_namespace = 3; + string comment = 4; +} + +message UpdateFeatureTargetingResponse {} + +message CloneFeatureRequest { + string id = 1; + CloneFeatureCommand command = 2; + string environment_namespace = 3; +} + +message CloneFeatureResponse {} + +message CreateSegmentRequest { + CreateSegmentCommand command = 1; + string environment_namespace = 2; +} + +message CreateSegmentResponse { + Segment segment = 1; +} + +message GetSegmentRequest { + string id = 1; + string environment_namespace = 2; +} + +message GetSegmentResponse { + Segment segment = 1; +} + +message ListSegmentsRequest { + enum OrderBy { + DEFAULT = 0; + NAME = 1; + CREATED_AT = 2; + UPDATED_AT = 3; + } + enum OrderDirection { + ASC = 0; + DESC = 1; + } + int64 page_size = 1; + string cursor = 2; + string environment_namespace = 3; + OrderBy order_by = 4; + OrderDirection order_direction = 5; + string search_keyword = 6; + google.protobuf.Int32Value status = 7; + google.protobuf.BoolValue is_in_use_status = 8; +} + +message ListSegmentsResponse { + repeated Segment segments = 1; + string cursor = 2; + int64 total_count = 3; +} + +message DeleteSegmentRequest { + string id = 1; + DeleteSegmentCommand command = 2; + string environment_namespace = 3; +} + +message DeleteSegmentResponse {} + +message UpdateSegmentRequest { + string id = 1; + repeated Command commands = 2; + string environment_namespace = 3; +} + +message UpdateSegmentResponse {} + +message AddSegmentUserRequest { + string id = 1; + AddSegmentUserCommand command = 2; + string environment_namespace = 3; +} + +message AddSegmentUserResponse {} + +message DeleteSegmentUserRequest { + string id = 1; + DeleteSegmentUserCommand command = 2; + string environment_namespace = 3; +} + +message DeleteSegmentUserResponse {} + +message GetSegmentUserRequest { + string segment_id = 1; + string user_id = 2; + SegmentUser.State state = 3; + string environment_namespace = 4; +} + +message GetSegmentUserResponse { + SegmentUser user = 1; +} + +message ListSegmentUsersRequest { + int64 page_size = 1; + string cursor = 2; + string segment_id = 3; + google.protobuf.Int32Value state = 4; + string user_id = 5; + string environment_namespace = 6; +} + +message ListSegmentUsersResponse { + repeated SegmentUser users = 1; + string cursor = 2; +} + +message BulkUploadSegmentUsersRequest { + string environment_namespace = 1; + string segment_id = 2; + BulkUploadSegmentUsersCommand command = 3; +} + +message BulkUploadSegmentUsersResponse {} + +message BulkDownloadSegmentUsersRequest { + string environment_namespace = 1; + string segment_id = 2; + SegmentUser.State state = 3; +} + +message BulkDownloadSegmentUsersResponse { + bytes data = 1; // segment user ids separated by new line +} + +message EvaluateFeaturesRequest { + bucketeer.user.User user = 1; + string environment_namespace = 2; + string tag = 3; +} + +message EvaluateFeaturesResponse { + bucketeer.feature.UserEvaluations user_evaluations = 1; +} + +message GetUserEvaluationsRequest { + string environment_namespace = 1; + string tag = 2; + string user_id = 3; +} + +message GetUserEvaluationsResponse { + repeated bucketeer.feature.Evaluation evaluations = 1; +} + +message UpsertUserEvaluationRequest { + string environment_namespace = 1; + string tag = 2; + bucketeer.feature.Evaluation evaluation = 3; +} + +message ListTagsRequest { + enum OrderBy { + DEFAULT = 0; + ID = 1; + CREATED_AT = 2; + UPDATED_AT = 3; + } + enum OrderDirection { + ASC = 0; + DESC = 1; + } + string environment_namespace = 1; + int64 page_size = 2; + string cursor = 3; + OrderBy order_by = 4; + OrderDirection order_direction = 5; + string search_keyword = 6; +} + +message ListTagsResponse { + repeated Tag tags = 1; + string cursor = 2; + int64 total_count = 3; +} + +message UpsertUserEvaluationResponse {} + +service FeatureService { + rpc GetFeature(GetFeatureRequest) returns (GetFeatureResponse) {} + rpc GetFeatures(GetFeaturesRequest) returns (GetFeaturesResponse) {} + rpc ListFeatures(ListFeaturesRequest) returns (ListFeaturesResponse) {} + rpc ListEnabledFeatures(ListEnabledFeaturesRequest) + returns (ListEnabledFeaturesResponse) {} + rpc CreateFeature(CreateFeatureRequest) returns (CreateFeatureResponse) {} + rpc EnableFeature(EnableFeatureRequest) returns (EnableFeatureResponse) { + option deprecated = true; + } + rpc DisableFeature(DisableFeatureRequest) returns (DisableFeatureResponse) { + option deprecated = true; + } + rpc ArchiveFeature(ArchiveFeatureRequest) returns (ArchiveFeatureResponse) {} + rpc UnarchiveFeature(UnarchiveFeatureRequest) + returns (UnarchiveFeatureResponse) {} + rpc DeleteFeature(DeleteFeatureRequest) returns (DeleteFeatureResponse) {} + rpc UpdateFeatureDetails(UpdateFeatureDetailsRequest) + returns (UpdateFeatureDetailsResponse) {} + rpc UpdateFeatureVariations(UpdateFeatureVariationsRequest) + returns (UpdateFeatureVariationsResponse) {} + rpc UpdateFeatureTargeting(UpdateFeatureTargetingRequest) + returns (UpdateFeatureTargetingResponse) {} + rpc CloneFeature(CloneFeatureRequest) returns (CloneFeatureResponse) {} + + rpc CreateSegment(CreateSegmentRequest) returns (CreateSegmentResponse) {} + rpc GetSegment(GetSegmentRequest) returns (GetSegmentResponse) {} + rpc ListSegments(ListSegmentsRequest) returns (ListSegmentsResponse) {} + rpc DeleteSegment(DeleteSegmentRequest) returns (DeleteSegmentResponse) {} + rpc UpdateSegment(UpdateSegmentRequest) returns (UpdateSegmentResponse) {} + + rpc AddSegmentUser(AddSegmentUserRequest) returns (AddSegmentUserResponse) { + option deprecated = true; + } + rpc DeleteSegmentUser(DeleteSegmentUserRequest) + returns (DeleteSegmentUserResponse) { + option deprecated = true; + } + rpc GetSegmentUser(GetSegmentUserRequest) returns (GetSegmentUserResponse) { + option deprecated = true; + } + rpc ListSegmentUsers(ListSegmentUsersRequest) + returns (ListSegmentUsersResponse) {} + rpc BulkUploadSegmentUsers(BulkUploadSegmentUsersRequest) + returns (BulkUploadSegmentUsersResponse) {} + rpc BulkDownloadSegmentUsers(BulkDownloadSegmentUsersRequest) + returns (BulkDownloadSegmentUsersResponse) {} + rpc EvaluateFeatures(EvaluateFeaturesRequest) + returns (EvaluateFeaturesResponse) {} + rpc GetUserEvaluations(GetUserEvaluationsRequest) + returns (GetUserEvaluationsResponse) {} + rpc UpsertUserEvaluation(UpsertUserEvaluationRequest) + returns (UpsertUserEvaluationResponse) {} + rpc ListTags(ListTagsRequest) returns (ListTagsResponse) {} +} diff --git a/proto/feature/strategy.proto b/proto/feature/strategy.proto new file mode 100644 index 000000000..0144fb139 --- /dev/null +++ b/proto/feature/strategy.proto @@ -0,0 +1,40 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.feature; +option go_package = "github.com/bucketeer-io/bucketeer/proto/feature"; + +message FixedStrategy { + string variation = 1; +} + +message RolloutStrategy { + message Variation { + string variation = 1; + int32 weight = 2; + } + repeated Variation variations = 1; +} + +message Strategy { + enum Type { + FIXED = 0; + ROLLOUT = 1; + } + Type type = 1; + FixedStrategy fixed_strategy = 2; + RolloutStrategy rollout_strategy = 3; +} diff --git a/proto/feature/target.proto b/proto/feature/target.proto new file mode 100644 index 000000000..0eefc2d4c --- /dev/null +++ b/proto/feature/target.proto @@ -0,0 +1,23 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.feature; +option go_package = "github.com/bucketeer-io/bucketeer/proto/feature"; + +message Target { + string variation = 1; + repeated string users = 2; +} diff --git a/proto/feature/variation.proto b/proto/feature/variation.proto new file mode 100644 index 000000000..3058ceb0f --- /dev/null +++ b/proto/feature/variation.proto @@ -0,0 +1,26 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.feature; +option go_package = "github.com/bucketeer-io/bucketeer/proto/feature"; + +message Variation { + string id = 1; + string value = 2; // this is called value because it could be a string, + // number or even json object + string name = 3; + string description = 4; +} diff --git a/proto/gateway/BUILD.bazel b/proto/gateway/BUILD.bazel new file mode 100644 index 000000000..ab1ea4c48 --- /dev/null +++ b/proto/gateway/BUILD.bazel @@ -0,0 +1,79 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("//proto:proto_descriptor.bzl", "proto_descriptor") + +proto_library( + name = "api_proto", + srcs = ["service.proto"], + visibility = ["//visibility:public"], + deps = [ + "//proto/event/client:client_proto", + "//proto/feature:feature_proto", + "//proto/user:user_proto", + "@com_github_googleapis_googleapis//:api_proto", + ], +) + +go_proto_library( + name = "api_go_proto", + compilers = ["@io_bazel_rules_go//proto:go_grpc"], + importpath = "github.com/bucketeer-io/bucketeer/proto/api", + proto = ":api_proto", + visibility = ["//visibility:public"], + deps = [ + "//proto/event/client:go_default_library", + "//proto/feature:go_default_library", + "//proto/user:go_default_library", + "@org_golang_google_genproto//googleapis/api/annotations:go_default_library", + ], +) + +go_library( + name = "go_default_library", + embed = [":gateway_go_proto"], + importpath = "github.com/bucketeer-io/bucketeer/proto/gateway", + visibility = ["//visibility:public"], +) + +proto_library( + name = "gateway_proto", + srcs = ["service.proto"], + visibility = ["//visibility:public"], + deps = [ + "//proto/event/client:client_proto", + "//proto/feature:feature_proto", + "//proto/user:user_proto", + "@go_googleapis//google/api:annotations_proto", + ], +) + +go_proto_library( + name = "gateway_go_proto", + compilers = ["@io_bazel_rules_go//proto:go_grpc"], + importpath = "github.com/bucketeer-io/bucketeer/proto/gateway", + proto = ":gateway_proto", + visibility = ["//visibility:public"], + deps = [ + "//proto/event/client:go_default_library", + "//proto/feature:go_default_library", + "//proto/user:go_default_library", + "@go_googleapis//google/api:annotations_go_proto", + ], +) + +proto_descriptor( + name = "proto_descriptor", + srcs = ["service.proto"], + visibility = ["//visibility:public"], + deps = [ + ":gateway_proto", + "//proto/event/client:client_proto", + "//proto/feature:feature_proto", + "//proto/user:user_proto", + "@com_github_googleapis_googleapis//:api_proto", + "@com_google_protobuf//:any_proto", + "@com_google_protobuf//:descriptor_proto", + "@com_google_protobuf//:duration_proto", + ], +) diff --git a/proto/gateway/service.proto b/proto/gateway/service.proto new file mode 100644 index 000000000..3a08ad3ee --- /dev/null +++ b/proto/gateway/service.proto @@ -0,0 +1,94 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.gateway; +option go_package = "github.com/bucketeer-io/bucketeer/proto/gateway"; + +import "google/api/annotations.proto"; + +import "proto/user/user.proto"; +import "proto/feature/evaluation.proto"; +import "proto/event/client/event.proto"; + +message PingRequest {} + +message PingResponse { + int64 time = 1; +} + +message GetEvaluationsRequest { + string tag = 1; + user.User user = 2; + string user_evaluations_id = 3; + string feature_id = 4 [deprecated = true]; // instead, use GetEvaluation API + bucketeer.event.client.SourceId source_id = 5; +} + +message GetEvaluationsResponse { + feature.UserEvaluations.State state = 1; + feature.UserEvaluations evaluations = 2; + string user_evaluations_id = 3; +} + +message GetEvaluationRequest { + string tag = 1; + user.User user = 2; + string feature_id = 3; + bucketeer.event.client.SourceId source_id = 4; +} + +message GetEvaluationResponse { + feature.Evaluation evaluation = 1; +} + +message RegisterEventsRequest { + repeated bucketeer.event.client.Event events = 1; +} + +message RegisterEventsResponse { + message Error { + bool retriable = 1; + string message = 2; + } + map errors = 1; +} + +service Gateway { + rpc Ping(PingRequest) returns (PingResponse) { + option (google.api.http) = { + post: "/ping" + body: "*" + }; + } + rpc GetEvaluations(GetEvaluationsRequest) returns (GetEvaluationsResponse) { + option (google.api.http) = { + post: "/get_evaluations" + body: "*" + }; + } + rpc GetEvaluation(GetEvaluationRequest) returns (GetEvaluationResponse) { + option (google.api.http) = { + post: "/get_evaluation" + body: "*" + }; + } + rpc RegisterEvents(RegisterEventsRequest) returns (RegisterEventsResponse) { + option (google.api.http) = { + post: "/register_events" + body: "*" + }; + } +} diff --git a/proto/migration/BUILD.bazel b/proto/migration/BUILD.bazel new file mode 100644 index 000000000..533e9d693 --- /dev/null +++ b/proto/migration/BUILD.bazel @@ -0,0 +1,32 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("//proto:proto_descriptor.bzl", "proto_descriptor") + +proto_library( + name = "migration_proto", + srcs = ["mysql_service.proto"], + visibility = ["//visibility:public"], +) + +go_proto_library( + name = "migration_go_proto", + compilers = ["@io_bazel_rules_go//proto:go_grpc"], + importpath = "github.com/bucketeer-io/bucketeer/proto/migration", + proto = ":migration_proto", + visibility = ["//visibility:public"], +) + +go_library( + name = "go_default_library", + embed = [":migration_go_proto"], + importpath = "github.com/bucketeer-io/bucketeer/proto/migration", + visibility = ["//visibility:public"], +) + +proto_descriptor( + name = "proto_descriptor", + srcs = ["mysql_service.proto"], + visibility = ["//visibility:public"], + deps = [":migration_proto"], +) diff --git a/proto/migration/mysql_service.proto b/proto/migration/mysql_service.proto new file mode 100644 index 000000000..5200c8e81 --- /dev/null +++ b/proto/migration/mysql_service.proto @@ -0,0 +1,35 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.migration; +option go_package = "github.com/bucketeer-io/bucketeer/proto/migration"; + +message MigrateAllMasterSchemaRequest {} + +message MigrateAllMasterSchemaResponse {} + +message RollbackMasterSchemaRequest { + int64 step = 1; +} + +message RollbackMasterSchemaResponse {} + +service MigrationMySQLService { + rpc MigrateAllMasterSchema(MigrateAllMasterSchemaRequest) + returns (MigrateAllMasterSchemaResponse) {} + rpc RollbackMasterSchema(RollbackMasterSchemaRequest) + returns (RollbackMasterSchemaResponse) {} +} diff --git a/proto/notification/BUILD.bazel b/proto/notification/BUILD.bazel new file mode 100644 index 000000000..4300c85a1 --- /dev/null +++ b/proto/notification/BUILD.bazel @@ -0,0 +1,41 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("@rules_proto//proto:defs.bzl", "proto_library") +load("//proto:proto_descriptor.bzl", "proto_descriptor") + +proto_library( + name = "notification_proto", + srcs = [ + "command.proto", + "recipient.proto", + "service.proto", + "subscription.proto", + ], + visibility = ["//visibility:public"], + deps = ["@com_google_protobuf//:wrappers_proto"], +) + +go_proto_library( + name = "notification_go_proto", + compilers = ["@io_bazel_rules_go//proto:go_grpc"], + importpath = "github.com/bucketeer-io/bucketeer/proto/notification", + proto = ":notification_proto", + visibility = ["//visibility:public"], +) + +go_library( + name = "go_default_library", + embed = [":notification_go_proto"], + importpath = "github.com/bucketeer-io/bucketeer/proto/notification", + visibility = ["//visibility:public"], +) + +proto_descriptor( + name = "proto_descriptor", + srcs = ["service.proto"], + visibility = ["//visibility:public"], + deps = [ + ":notification_proto", + "@com_google_protobuf//:wrappers_proto", + ], +) diff --git a/proto/notification/command.proto b/proto/notification/command.proto new file mode 100644 index 000000000..49a61692c --- /dev/null +++ b/proto/notification/command.proto @@ -0,0 +1,69 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.notification; +option go_package = "github.com/bucketeer-io/bucketeer/proto/notification"; + +import "proto/notification/subscription.proto"; +import "proto/notification/recipient.proto"; + +message CreateAdminSubscriptionCommand { + repeated Subscription.SourceType source_types = 1; + Recipient recipient = 2; + string name = 3; +} + +message AddAdminSubscriptionSourceTypesCommand { + repeated Subscription.SourceType source_types = 1; +} + +message DeleteAdminSubscriptionSourceTypesCommand { + repeated Subscription.SourceType source_types = 1; +} + +message EnableAdminSubscriptionCommand {} + +message DisableAdminSubscriptionCommand {} + +message DeleteAdminSubscriptionCommand {} + +message RenameAdminSubscriptionCommand { + string name = 1; +} + +message CreateSubscriptionCommand { + repeated Subscription.SourceType source_types = 1; + Recipient recipient = 2; + string name = 3; +} + +message AddSourceTypesCommand { + repeated Subscription.SourceType source_types = 1; +} + +message DeleteSourceTypesCommand { + repeated Subscription.SourceType source_types = 1; +} + +message EnableSubscriptionCommand {} + +message DisableSubscriptionCommand {} + +message DeleteSubscriptionCommand {} + +message RenameSubscriptionCommand { + string name = 1; +} \ No newline at end of file diff --git a/proto/notification/recipient.proto b/proto/notification/recipient.proto new file mode 100644 index 000000000..c35635449 --- /dev/null +++ b/proto/notification/recipient.proto @@ -0,0 +1,28 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.notification; +option go_package = "github.com/bucketeer-io/bucketeer/proto/notification"; + +message Recipient { + enum Type { SlackChannel = 0; } + Type type = 1; + SlackChannelRecipient slack_channel_recipient = 2; +} + +message SlackChannelRecipient { + string webhook_url = 1; +} \ No newline at end of file diff --git a/proto/notification/sender/BUILD.bazel b/proto/notification/sender/BUILD.bazel new file mode 100644 index 000000000..a9511b3d3 --- /dev/null +++ b/proto/notification/sender/BUILD.bazel @@ -0,0 +1,38 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("@rules_proto//proto:defs.bzl", "proto_library") + +proto_library( + name = "sender_proto", + srcs = [ + "notification.proto", + "notification_event.proto", + ], + visibility = ["//visibility:public"], + deps = [ + "//proto/event/domain:domain_proto", + "//proto/experiment:experiment_proto", + "//proto/feature:feature_proto", + "//proto/notification:notification_proto", + ], +) + +go_proto_library( + name = "sender_go_proto", + importpath = "github.com/bucketeer-io/bucketeer/proto/notification/sender", + proto = ":sender_proto", + visibility = ["//visibility:public"], + deps = [ + "//proto/event/domain:go_default_library", + "//proto/experiment:go_default_library", + "//proto/feature:go_default_library", + "//proto/notification:go_default_library", + ], +) + +go_library( + name = "go_default_library", + embed = [":sender_go_proto"], + importpath = "github.com/bucketeer-io/bucketeer/proto/notification/sender", + visibility = ["//visibility:public"], +) diff --git a/proto/notification/sender/notification.proto b/proto/notification/sender/notification.proto new file mode 100644 index 000000000..d7d5419c3 --- /dev/null +++ b/proto/notification/sender/notification.proto @@ -0,0 +1,64 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.notification.sender; +option go_package = "github.com/bucketeer-io/bucketeer/proto/notification/sender"; + +import "proto/event/domain/event.proto"; +import "proto/feature/feature.proto"; +import "proto/experiment/experiment.proto"; + +message Notification { + enum Type { + DomainEvent = 0; + FeatureStale = 1; + ExperimentRunning = 2; + MauCount = 3; + } + Type type = 1; + DomainEventNotification domain_event_notification = 2; + FeatureStaleNotification feature_stale_notification = 3; + ExperimentRunningNotification experiment_running_notification = 4; + MauCountNotification mau_count_notification = 5; +} + +message DomainEventNotification { + reserved 1; // string environment_namespace = 1 + bucketeer.event.domain.Editor editor = 2; + bucketeer.event.domain.Event.EntityType entity_type = 3; + string entity_id = 4; + bucketeer.event.domain.Event.Type type = 5; + string environment_id = 6; +} + +message FeatureStaleNotification { + reserved 1; // string environment_namespace = 1 + repeated bucketeer.feature.Feature features = 2; + string environment_id = 3; +} + +message ExperimentRunningNotification { + reserved 1; // string environment_namespace = 1 + string environment_id = 2; + repeated bucketeer.experiment.Experiment experiments = 3; +} + +message MauCountNotification { + string environment_id = 1; + int64 event_count = 2; + int64 user_count = 3; + int32 month = 4; +} diff --git a/proto/notification/sender/notification_event.proto b/proto/notification/sender/notification_event.proto new file mode 100644 index 000000000..2555ca0bf --- /dev/null +++ b/proto/notification/sender/notification_event.proto @@ -0,0 +1,29 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.notification.sender; +option go_package = "github.com/bucketeer-io/bucketeer/proto/notification/sender"; + +import "proto/notification/sender/notification.proto"; +import "proto/notification/subscription.proto"; + +message NotificationEvent { + string id = 1; + string environment_namespace = 2; + bucketeer.notification.Subscription.SourceType source_type = 3; + Notification notification = 4; + bool is_admin_event = 5; +} \ No newline at end of file diff --git a/proto/notification/service.proto b/proto/notification/service.proto new file mode 100644 index 000000000..743c48efc --- /dev/null +++ b/proto/notification/service.proto @@ -0,0 +1,228 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.notification; +option go_package = "github.com/bucketeer-io/bucketeer/proto/notification"; + +import "google/protobuf/wrappers.proto"; + +import "proto/notification/subscription.proto"; +import "proto/notification/command.proto"; + +message GetAdminSubscriptionRequest { + string id = 1; +} + +message GetAdminSubscriptionResponse { + Subscription subscription = 1; +} + +message ListAdminSubscriptionsRequest { + enum OrderBy { + DEFAULT = 0; + NAME = 1; + CREATED_AT = 2; + UPDATED_AT = 3; + } + enum OrderDirection { + ASC = 0; + DESC = 1; + } + int64 page_size = 1; + string cursor = 2; + repeated Subscription.SourceType source_types = 3; + OrderBy order_by = 4; + OrderDirection order_direction = 5; + string search_keyword = 6; + google.protobuf.BoolValue disabled = 7; +} + +message ListAdminSubscriptionsResponse { + repeated Subscription subscriptions = 1; + string cursor = 2; + int64 total_count = 3; +} + +message ListEnabledAdminSubscriptionsRequest { + int64 page_size = 1; + string cursor = 2; + repeated Subscription.SourceType source_types = 3; +} + +message ListEnabledAdminSubscriptionsResponse { + repeated Subscription subscriptions = 1; + string cursor = 2; +} + +message CreateAdminSubscriptionRequest { + CreateAdminSubscriptionCommand command = 1; +} + +message CreateAdminSubscriptionResponse {} + +message DeleteAdminSubscriptionRequest { + string id = 1; + DeleteAdminSubscriptionCommand command = 2; +} + +message DeleteAdminSubscriptionResponse {} + +message EnableAdminSubscriptionRequest { + string id = 1; + EnableAdminSubscriptionCommand command = 2; +} + +message EnableAdminSubscriptionResponse {} + +message DisableAdminSubscriptionRequest { + string id = 1; + DisableAdminSubscriptionCommand command = 2; +} + +message DisableAdminSubscriptionResponse {} + +message UpdateAdminSubscriptionRequest { + string id = 1; + AddAdminSubscriptionSourceTypesCommand add_source_types_command = 2; + DeleteAdminSubscriptionSourceTypesCommand delete_source_types_command = 3; + RenameAdminSubscriptionCommand rename_subscription_command = 4; +} + +message UpdateAdminSubscriptionResponse {} + +message GetSubscriptionRequest { + string environment_namespace = 1; + string id = 2; +} + +message GetSubscriptionResponse { + Subscription subscription = 1; +} + +message ListSubscriptionsRequest { + enum OrderBy { + DEFAULT = 0; + NAME = 1; + CREATED_AT = 2; + UPDATED_AT = 3; + } + enum OrderDirection { + ASC = 0; + DESC = 1; + } + string environment_namespace = 1; + int64 page_size = 2; + string cursor = 3; + repeated Subscription.SourceType source_types = 4; + OrderBy order_by = 5; + OrderDirection order_direction = 6; + string search_keyword = 7; + google.protobuf.BoolValue disabled = 8; +} + +message ListSubscriptionsResponse { + repeated Subscription subscriptions = 1; + string cursor = 2; + int64 total_count = 3; +} + +message ListEnabledSubscriptionsRequest { + string environment_namespace = 1; + int64 page_size = 2; + string cursor = 3; + repeated Subscription.SourceType source_types = 4; +} + +message ListEnabledSubscriptionsResponse { + repeated Subscription subscriptions = 1; + string cursor = 2; +} + +message CreateSubscriptionRequest { + string environment_namespace = 1; + CreateSubscriptionCommand command = 2; +} + +message CreateSubscriptionResponse {} + +message DeleteSubscriptionRequest { + string environment_namespace = 1; + string id = 2; + DeleteSubscriptionCommand command = 3; +} + +message DeleteSubscriptionResponse {} + +message EnableSubscriptionRequest { + string environment_namespace = 1; + string id = 2; + EnableSubscriptionCommand command = 3; +} + +message EnableSubscriptionResponse {} + +message DisableSubscriptionRequest { + string environment_namespace = 1; + string id = 2; + DisableSubscriptionCommand command = 3; +} + +message DisableSubscriptionResponse {} + +message UpdateSubscriptionRequest { + string environment_namespace = 1; + string id = 2; + AddSourceTypesCommand add_source_types_command = 3; + DeleteSourceTypesCommand delete_source_types_command = 4; + RenameSubscriptionCommand rename_subscription_command = 5; +} + +message UpdateSubscriptionResponse {} + +service NotificationService { + rpc GetAdminSubscription(GetAdminSubscriptionRequest) + returns (GetAdminSubscriptionResponse) {} + rpc ListAdminSubscriptions(ListAdminSubscriptionsRequest) + returns (ListAdminSubscriptionsResponse) {} + rpc ListEnabledAdminSubscriptions(ListEnabledAdminSubscriptionsRequest) + returns (ListEnabledAdminSubscriptionsResponse) {} + rpc CreateAdminSubscription(CreateAdminSubscriptionRequest) + returns (CreateAdminSubscriptionResponse) {} + rpc DeleteAdminSubscription(DeleteAdminSubscriptionRequest) + returns (DeleteAdminSubscriptionResponse) {} + rpc EnableAdminSubscription(EnableAdminSubscriptionRequest) + returns (EnableAdminSubscriptionResponse) {} + rpc DisableAdminSubscription(DisableAdminSubscriptionRequest) + returns (DisableAdminSubscriptionResponse) {} + rpc UpdateAdminSubscription(UpdateAdminSubscriptionRequest) + returns (UpdateAdminSubscriptionResponse) {} + rpc GetSubscription(GetSubscriptionRequest) + returns (GetSubscriptionResponse) {} + rpc ListSubscriptions(ListSubscriptionsRequest) + returns (ListSubscriptionsResponse) {} + rpc ListEnabledSubscriptions(ListEnabledSubscriptionsRequest) + returns (ListEnabledSubscriptionsResponse) {} + rpc CreateSubscription(CreateSubscriptionRequest) + returns (CreateSubscriptionResponse) {} + rpc DeleteSubscription(DeleteSubscriptionRequest) + returns (DeleteSubscriptionResponse) {} + rpc EnableSubscription(EnableSubscriptionRequest) + returns (EnableSubscriptionResponse) {} + rpc DisableSubscription(DisableSubscriptionRequest) + returns (DisableSubscriptionResponse) {} + rpc UpdateSubscription(UpdateSubscriptionRequest) + returns (UpdateSubscriptionResponse) {} +} \ No newline at end of file diff --git a/proto/notification/subscription.proto b/proto/notification/subscription.proto new file mode 100644 index 000000000..961687d56 --- /dev/null +++ b/proto/notification/subscription.proto @@ -0,0 +1,49 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.notification; +option go_package = "github.com/bucketeer-io/bucketeer/proto/notification"; + +import "proto/notification/recipient.proto"; + +message Subscription { + enum SourceType { + DOMAIN_EVENT_FEATURE = 0; + DOMAIN_EVENT_GOAL = 1; + DOMAIN_EVENT_EXPERIMENT = 2; + DOMAIN_EVENT_ACCOUNT = 3; + DOMAIN_EVENT_APIKEY = 4; + DOMAIN_EVENT_SEGMENT = 5; + DOMAIN_EVENT_ENVIRONMENT = 6; + DOMAIN_EVENT_ADMIN_ACCOUNT = 7; + DOMAIN_EVENT_AUTOOPS_RULE = 8; + DOMAIN_EVENT_PUSH = 9; + DOMAIN_EVENT_SUBSCRIPTION = 10; + DOMAIN_EVENT_ADMIN_SUBSCRIPTION = 11; + DOMAIN_EVENT_PROJECT = 12; + DOMAIN_EVENT_WEBHOOK = 13; + FEATURE_STALE = 100; + EXPERIMENT_RUNNING = 200; + MAU_COUNT = 300; + } + string id = 1; + int64 created_at = 2; + int64 updated_at = 3; + bool disabled = 4; + repeated SourceType source_types = 5; + Recipient recipient = 6; + string name = 7; +} diff --git a/proto/proto.lock b/proto/proto.lock new file mode 100644 index 000000000..924c25ad4 --- /dev/null +++ b/proto/proto.lock @@ -0,0 +1,14208 @@ +{ + "definitions": [ + { + "protopath": "account:/:account.proto", + "def": { + "enums": [ + { + "name": "Account.Role", + "enum_fields": [ + { + "name": "VIEWER" + }, + { + "name": "EDITOR", + "integer": 1 + }, + { + "name": "OWNER", + "integer": 2 + }, + { + "name": "UNASSIGNED", + "integer": 99 + } + ] + } + ], + "messages": [ + { + "name": "Account", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "email", + "type": "string" + }, + { + "id": 3, + "name": "name", + "type": "string" + }, + { + "id": 4, + "name": "role", + "type": "Role" + }, + { + "id": 5, + "name": "disabled", + "type": "bool" + }, + { + "id": 6, + "name": "created_at", + "type": "int64" + }, + { + "id": 7, + "name": "updated_at", + "type": "int64" + }, + { + "id": 8, + "name": "deleted", + "type": "bool" + } + ] + }, + { + "name": "EnvironmentRole", + "fields": [ + { + "id": 1, + "name": "environment", + "type": "environment.Environment" + }, + { + "id": 2, + "name": "role", + "type": "Account.Role" + }, + { + "id": 3, + "name": "trial_project", + "type": "bool" + }, + { + "id": 4, + "name": "trial_started_at", + "type": "int64" + } + ] + } + ], + "imports": [ + { + "path": "proto/environment/environment.proto" + } + ], + "package": { + "name": "bucketeer.account" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/account" + } + ] + } + }, + { + "protopath": "account:/:api_key.proto", + "def": { + "enums": [ + { + "name": "APIKey.Role", + "enum_fields": [ + { + "name": "SDK" + }, + { + "name": "SERVICE", + "integer": 1 + } + ] + } + ], + "messages": [ + { + "name": "APIKey", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "name", + "type": "string" + }, + { + "id": 3, + "name": "role", + "type": "Role" + }, + { + "id": 4, + "name": "disabled", + "type": "bool" + }, + { + "id": 5, + "name": "created_at", + "type": "int64" + }, + { + "id": 6, + "name": "updated_at", + "type": "int64" + } + ] + }, + { + "name": "EnvironmentAPIKey", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "api_key", + "type": "APIKey" + }, + { + "id": 3, + "name": "environment_disabled", + "type": "bool" + } + ] + } + ], + "package": { + "name": "bucketeer.account" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/account" + } + ] + } + }, + { + "protopath": "account:/:command.proto", + "def": { + "messages": [ + { + "name": "CreateAdminAccountCommand", + "fields": [ + { + "id": 1, + "name": "email", + "type": "string" + } + ] + }, + { + "name": "EnableAdminAccountCommand" + }, + { + "name": "DisableAdminAccountCommand" + }, + { + "name": "ConvertAccountCommand" + }, + { + "name": "DeleteAccountCommand" + }, + { + "name": "CreateAccountCommand", + "fields": [ + { + "id": 1, + "name": "email", + "type": "string" + }, + { + "id": 2, + "name": "role", + "type": "account.Account.Role" + } + ] + }, + { + "name": "ChangeAccountRoleCommand", + "fields": [ + { + "id": 1, + "name": "role", + "type": "account.Account.Role" + } + ] + }, + { + "name": "EnableAccountCommand" + }, + { + "name": "DisableAccountCommand" + }, + { + "name": "CreateAPIKeyCommand", + "fields": [ + { + "id": 1, + "name": "name", + "type": "string" + }, + { + "id": 2, + "name": "role", + "type": "account.APIKey.Role" + } + ] + }, + { + "name": "ChangeAPIKeyNameCommand", + "fields": [ + { + "id": 1, + "name": "name", + "type": "string" + } + ] + }, + { + "name": "EnableAPIKeyCommand" + }, + { + "name": "DisableAPIKeyCommand" + } + ], + "imports": [ + { + "path": "proto/account/account.proto" + }, + { + "path": "proto/account/api_key.proto" + } + ], + "package": { + "name": "bucketeer.account" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/account" + } + ] + } + }, + { + "protopath": "account:/:service.proto", + "def": { + "enums": [ + { + "name": "ListAdminAccountsRequest.OrderBy", + "enum_fields": [ + { + "name": "DEFAULT" + }, + { + "name": "EMAIL", + "integer": 1 + }, + { + "name": "CREATED_AT", + "integer": 2 + }, + { + "name": "UPDATED_AT", + "integer": 3 + } + ] + }, + { + "name": "ListAdminAccountsRequest.OrderDirection", + "enum_fields": [ + { + "name": "ASC" + }, + { + "name": "DESC", + "integer": 1 + } + ] + }, + { + "name": "ListAccountsRequest.OrderBy", + "enum_fields": [ + { + "name": "DEFAULT" + }, + { + "name": "EMAIL", + "integer": 1 + }, + { + "name": "CREATED_AT", + "integer": 2 + }, + { + "name": "UPDATED_AT", + "integer": 3 + } + ] + }, + { + "name": "ListAccountsRequest.OrderDirection", + "enum_fields": [ + { + "name": "ASC" + }, + { + "name": "DESC", + "integer": 1 + } + ] + }, + { + "name": "ListAPIKeysRequest.OrderBy", + "enum_fields": [ + { + "name": "DEFAULT" + }, + { + "name": "NAME", + "integer": 1 + }, + { + "name": "CREATED_AT", + "integer": 2 + }, + { + "name": "UPDATED_AT", + "integer": 3 + } + ] + }, + { + "name": "ListAPIKeysRequest.OrderDirection", + "enum_fields": [ + { + "name": "ASC" + }, + { + "name": "DESC", + "integer": 1 + } + ] + } + ], + "messages": [ + { + "name": "GetMeRequest" + }, + { + "name": "GetMeByEmailRequest", + "fields": [ + { + "id": 1, + "name": "email", + "type": "string" + } + ] + }, + { + "name": "GetMeResponse", + "fields": [ + { + "id": 1, + "name": "account", + "type": "Account", + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "id": 2, + "name": "email", + "type": "string" + }, + { + "id": 3, + "name": "is_admin", + "type": "bool" + }, + { + "id": 4, + "name": "admin_role", + "type": "Account.Role", + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "id": 5, + "name": "disabled", + "type": "bool", + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "id": 6, + "name": "environment_roles", + "type": "EnvironmentRole", + "is_repeated": true + }, + { + "id": 7, + "name": "deleted", + "type": "bool", + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + } + ] + }, + { + "name": "CreateAdminAccountRequest", + "fields": [ + { + "id": 1, + "name": "command", + "type": "CreateAdminAccountCommand" + } + ] + }, + { + "name": "CreateAdminAccountResponse" + }, + { + "name": "EnableAdminAccountRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "EnableAdminAccountCommand" + } + ] + }, + { + "name": "EnableAdminAccountResponse" + }, + { + "name": "DisableAdminAccountRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "DisableAdminAccountCommand" + } + ] + }, + { + "name": "DisableAdminAccountResponse" + }, + { + "name": "GetAdminAccountRequest", + "fields": [ + { + "id": 1, + "name": "email", + "type": "string" + } + ] + }, + { + "name": "GetAdminAccountResponse", + "fields": [ + { + "id": 1, + "name": "account", + "type": "Account" + } + ] + }, + { + "name": "ListAdminAccountsRequest", + "fields": [ + { + "id": 1, + "name": "page_size", + "type": "int64" + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "order_by", + "type": "OrderBy" + }, + { + "id": 4, + "name": "order_direction", + "type": "OrderDirection" + }, + { + "id": 5, + "name": "search_keyword", + "type": "string" + }, + { + "id": 6, + "name": "disabled", + "type": "google.protobuf.BoolValue" + } + ] + }, + { + "name": "ListAdminAccountsResponse", + "fields": [ + { + "id": 1, + "name": "accounts", + "type": "Account", + "is_repeated": true + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "total_count", + "type": "int64" + } + ] + }, + { + "name": "ConvertAccountRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "ConvertAccountCommand" + } + ] + }, + { + "name": "ConvertAccountResponse" + }, + { + "name": "CreateAccountRequest", + "fields": [ + { + "id": 1, + "name": "command", + "type": "CreateAccountCommand" + }, + { + "id": 2, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "CreateAccountResponse" + }, + { + "name": "EnableAccountRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "EnableAccountCommand" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "EnableAccountResponse" + }, + { + "name": "DisableAccountRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "DisableAccountCommand" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "DisableAccountResponse" + }, + { + "name": "ChangeAccountRoleRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "ChangeAccountRoleCommand" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "ChangeAccountRoleResponse" + }, + { + "name": "GetAccountRequest", + "fields": [ + { + "id": 1, + "name": "email", + "type": "string" + }, + { + "id": 2, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "GetAccountResponse", + "fields": [ + { + "id": 1, + "name": "account", + "type": "Account" + } + ] + }, + { + "name": "ListAccountsRequest", + "fields": [ + { + "id": 1, + "name": "page_size", + "type": "int64" + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 4, + "name": "order_by", + "type": "OrderBy" + }, + { + "id": 5, + "name": "order_direction", + "type": "OrderDirection" + }, + { + "id": 6, + "name": "search_keyword", + "type": "string" + }, + { + "id": 7, + "name": "disabled", + "type": "google.protobuf.BoolValue" + }, + { + "id": 8, + "name": "role", + "type": "google.protobuf.Int32Value" + } + ] + }, + { + "name": "ListAccountsResponse", + "fields": [ + { + "id": 1, + "name": "accounts", + "type": "Account", + "is_repeated": true + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "total_count", + "type": "int64" + } + ] + }, + { + "name": "CreateAPIKeyRequest", + "fields": [ + { + "id": 1, + "name": "command", + "type": "CreateAPIKeyCommand" + }, + { + "id": 2, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "CreateAPIKeyResponse", + "fields": [ + { + "id": 1, + "name": "api_key", + "type": "APIKey" + } + ] + }, + { + "name": "ChangeAPIKeyNameRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "ChangeAPIKeyNameCommand" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "ChangeAPIKeyNameResponse" + }, + { + "name": "EnableAPIKeyRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "EnableAPIKeyCommand" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "EnableAPIKeyResponse" + }, + { + "name": "DisableAPIKeyRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "DisableAPIKeyCommand" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "DisableAPIKeyResponse" + }, + { + "name": "GetAPIKeyRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "GetAPIKeyResponse", + "fields": [ + { + "id": 1, + "name": "api_key", + "type": "APIKey" + } + ] + }, + { + "name": "ListAPIKeysRequest", + "fields": [ + { + "id": 1, + "name": "page_size", + "type": "int64" + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 4, + "name": "order_by", + "type": "OrderBy" + }, + { + "id": 5, + "name": "order_direction", + "type": "OrderDirection" + }, + { + "id": 6, + "name": "search_keyword", + "type": "string" + }, + { + "id": 7, + "name": "disabled", + "type": "google.protobuf.BoolValue" + } + ] + }, + { + "name": "ListAPIKeysResponse", + "fields": [ + { + "id": 1, + "name": "api_keys", + "type": "APIKey", + "is_repeated": true + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "total_count", + "type": "int64" + } + ] + }, + { + "name": "GetAPIKeyBySearchingAllEnvironmentsRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "GetAPIKeyBySearchingAllEnvironmentsResponse", + "fields": [ + { + "id": 1, + "name": "environment_api_key", + "type": "EnvironmentAPIKey" + } + ] + } + ], + "services": [ + { + "name": "AccountService", + "rpcs": [ + { + "name": "GetMe", + "in_type": "GetMeRequest", + "out_type": "GetMeResponse" + }, + { + "name": "GetMeByEmail", + "in_type": "GetMeByEmailRequest", + "out_type": "GetMeResponse" + }, + { + "name": "CreateAdminAccount", + "in_type": "CreateAdminAccountRequest", + "out_type": "CreateAdminAccountResponse" + }, + { + "name": "EnableAdminAccount", + "in_type": "EnableAdminAccountRequest", + "out_type": "EnableAdminAccountResponse" + }, + { + "name": "DisableAdminAccount", + "in_type": "DisableAdminAccountRequest", + "out_type": "DisableAdminAccountResponse" + }, + { + "name": "GetAdminAccount", + "in_type": "GetAdminAccountRequest", + "out_type": "GetAdminAccountResponse" + }, + { + "name": "ListAdminAccounts", + "in_type": "ListAdminAccountsRequest", + "out_type": "ListAdminAccountsResponse" + }, + { + "name": "ConvertAccount", + "in_type": "ConvertAccountRequest", + "out_type": "ConvertAccountResponse" + }, + { + "name": "CreateAccount", + "in_type": "CreateAccountRequest", + "out_type": "CreateAccountResponse" + }, + { + "name": "EnableAccount", + "in_type": "EnableAccountRequest", + "out_type": "EnableAccountResponse" + }, + { + "name": "DisableAccount", + "in_type": "DisableAccountRequest", + "out_type": "DisableAccountResponse" + }, + { + "name": "ChangeAccountRole", + "in_type": "ChangeAccountRoleRequest", + "out_type": "ChangeAccountRoleResponse" + }, + { + "name": "GetAccount", + "in_type": "GetAccountRequest", + "out_type": "GetAccountResponse" + }, + { + "name": "ListAccounts", + "in_type": "ListAccountsRequest", + "out_type": "ListAccountsResponse" + }, + { + "name": "CreateAPIKey", + "in_type": "CreateAPIKeyRequest", + "out_type": "CreateAPIKeyResponse" + }, + { + "name": "ChangeAPIKeyName", + "in_type": "ChangeAPIKeyNameRequest", + "out_type": "ChangeAPIKeyNameResponse" + }, + { + "name": "EnableAPIKey", + "in_type": "EnableAPIKeyRequest", + "out_type": "EnableAPIKeyResponse" + }, + { + "name": "DisableAPIKey", + "in_type": "DisableAPIKeyRequest", + "out_type": "DisableAPIKeyResponse" + }, + { + "name": "GetAPIKey", + "in_type": "GetAPIKeyRequest", + "out_type": "GetAPIKeyResponse" + }, + { + "name": "ListAPIKeys", + "in_type": "ListAPIKeysRequest", + "out_type": "ListAPIKeysResponse" + }, + { + "name": "GetAPIKeyBySearchingAllEnvironments", + "in_type": "GetAPIKeyBySearchingAllEnvironmentsRequest", + "out_type": "GetAPIKeyBySearchingAllEnvironmentsResponse" + } + ] + } + ], + "imports": [ + { + "path": "google/protobuf/wrappers.proto" + }, + { + "path": "proto/account/account.proto" + }, + { + "path": "proto/account/api_key.proto" + }, + { + "path": "proto/account/command.proto" + } + ], + "package": { + "name": "bucketeer.account" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/account" + } + ] + } + }, + { + "protopath": "auditlog:/:auditlog.proto", + "def": { + "messages": [ + { + "name": "AuditLog", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "timestamp", + "type": "int64" + }, + { + "id": 3, + "name": "entity_type", + "type": "bucketeer.event.domain.Event.EntityType" + }, + { + "id": 4, + "name": "entity_id", + "type": "string" + }, + { + "id": 5, + "name": "type", + "type": "bucketeer.event.domain.Event.Type" + }, + { + "id": 6, + "name": "event", + "type": "google.protobuf.Any" + }, + { + "id": 7, + "name": "editor", + "type": "bucketeer.event.domain.Editor" + }, + { + "id": 8, + "name": "options", + "type": "bucketeer.event.domain.Options" + }, + { + "id": 9, + "name": "localized_message", + "type": "bucketeer.event.domain.LocalizedMessage" + } + ] + } + ], + "imports": [ + { + "path": "google/protobuf/any.proto" + }, + { + "path": "proto/event/domain/event.proto" + }, + { + "path": "proto/event/domain/localized_message.proto" + } + ], + "package": { + "name": "bucketeer.auditlog" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/auditlog" + } + ] + } + }, + { + "protopath": "auditlog:/:service.proto", + "def": { + "enums": [ + { + "name": "ListAuditLogsRequest.OrderBy", + "enum_fields": [ + { + "name": "DEFAULT" + }, + { + "name": "TIMESTAMP", + "integer": 1 + } + ] + }, + { + "name": "ListAuditLogsRequest.OrderDirection", + "enum_fields": [ + { + "name": "DESC" + }, + { + "name": "ASC", + "integer": 1 + } + ] + }, + { + "name": "ListAdminAuditLogsRequest.OrderBy", + "enum_fields": [ + { + "name": "DEFAULT" + }, + { + "name": "TIMESTAMP", + "integer": 1 + } + ] + }, + { + "name": "ListAdminAuditLogsRequest.OrderDirection", + "enum_fields": [ + { + "name": "DESC" + }, + { + "name": "ASC", + "integer": 1 + } + ] + }, + { + "name": "ListFeatureHistoryRequest.OrderBy", + "enum_fields": [ + { + "name": "DEFAULT" + }, + { + "name": "TIMESTAMP", + "integer": 1 + } + ] + }, + { + "name": "ListFeatureHistoryRequest.OrderDirection", + "enum_fields": [ + { + "name": "DESC" + }, + { + "name": "ASC", + "integer": 1 + } + ] + } + ], + "messages": [ + { + "name": "ListAuditLogsRequest", + "fields": [ + { + "id": 1, + "name": "page_size", + "type": "int64" + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 4, + "name": "order_by", + "type": "OrderBy" + }, + { + "id": 5, + "name": "order_direction", + "type": "OrderDirection" + }, + { + "id": 6, + "name": "search_keyword", + "type": "string" + }, + { + "id": 7, + "name": "from", + "type": "int64" + }, + { + "id": 8, + "name": "to", + "type": "int64" + }, + { + "id": 9, + "name": "entity_type", + "type": "google.protobuf.Int32Value" + } + ] + }, + { + "name": "ListAuditLogsResponse", + "fields": [ + { + "id": 1, + "name": "audit_logs", + "type": "AuditLog", + "is_repeated": true + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "total_count", + "type": "int64" + } + ] + }, + { + "name": "ListAdminAuditLogsRequest", + "fields": [ + { + "id": 1, + "name": "page_size", + "type": "int64" + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "order_by", + "type": "OrderBy" + }, + { + "id": 4, + "name": "order_direction", + "type": "OrderDirection" + }, + { + "id": 5, + "name": "search_keyword", + "type": "string" + }, + { + "id": 6, + "name": "from", + "type": "int64" + }, + { + "id": 7, + "name": "to", + "type": "int64" + }, + { + "id": 8, + "name": "entity_type", + "type": "google.protobuf.Int32Value" + } + ] + }, + { + "name": "ListAdminAuditLogsResponse", + "fields": [ + { + "id": 1, + "name": "audit_logs", + "type": "AuditLog", + "is_repeated": true + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "total_count", + "type": "int64" + } + ] + }, + { + "name": "ListFeatureHistoryRequest", + "fields": [ + { + "id": 1, + "name": "feature_id", + "type": "string" + }, + { + "id": 2, + "name": "page_size", + "type": "int64" + }, + { + "id": 3, + "name": "cursor", + "type": "string" + }, + { + "id": 4, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 5, + "name": "order_by", + "type": "OrderBy" + }, + { + "id": 6, + "name": "order_direction", + "type": "OrderDirection" + }, + { + "id": 7, + "name": "search_keyword", + "type": "string" + }, + { + "id": 8, + "name": "from", + "type": "int64" + }, + { + "id": 9, + "name": "to", + "type": "int64" + } + ] + }, + { + "name": "ListFeatureHistoryResponse", + "fields": [ + { + "id": 1, + "name": "audit_logs", + "type": "AuditLog", + "is_repeated": true + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "total_count", + "type": "int64" + } + ] + } + ], + "services": [ + { + "name": "AuditLogService", + "rpcs": [ + { + "name": "ListAuditLogs", + "in_type": "ListAuditLogsRequest", + "out_type": "ListAuditLogsResponse" + }, + { + "name": "ListAdminAuditLogs", + "in_type": "ListAdminAuditLogsRequest", + "out_type": "ListAdminAuditLogsResponse" + }, + { + "name": "ListFeatureHistory", + "in_type": "ListFeatureHistoryRequest", + "out_type": "ListFeatureHistoryResponse" + } + ] + } + ], + "imports": [ + { + "path": "google/protobuf/wrappers.proto" + }, + { + "path": "proto/auditlog/auditlog.proto" + } + ], + "package": { + "name": "bucketeer.auditlog" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/auditlog" + } + ] + } + }, + { + "protopath": "auth:/:service.proto", + "def": { + "messages": [ + { + "name": "GetAuthCodeURLRequest", + "fields": [ + { + "id": 1, + "name": "state", + "type": "string" + }, + { + "id": 2, + "name": "redirect_url", + "type": "string" + } + ] + }, + { + "name": "GetAuthCodeURLResponse", + "fields": [ + { + "id": 1, + "name": "url", + "type": "string" + } + ] + }, + { + "name": "ExchangeTokenRequest", + "fields": [ + { + "id": 1, + "name": "code", + "type": "string" + }, + { + "id": 2, + "name": "redirect_url", + "type": "string" + } + ] + }, + { + "name": "ExchangeTokenResponse", + "fields": [ + { + "id": 1, + "name": "token", + "type": "Token" + } + ] + }, + { + "name": "RefreshTokenRequest", + "fields": [ + { + "id": 1, + "name": "refresh_token", + "type": "string" + }, + { + "id": 2, + "name": "redirect_url", + "type": "string" + } + ] + }, + { + "name": "RefreshTokenResponse", + "fields": [ + { + "id": 1, + "name": "token", + "type": "Token" + } + ] + } + ], + "services": [ + { + "name": "AuthService", + "rpcs": [ + { + "name": "GetAuthCodeURL", + "in_type": "GetAuthCodeURLRequest", + "out_type": "GetAuthCodeURLResponse" + }, + { + "name": "ExchangeToken", + "in_type": "ExchangeTokenRequest", + "out_type": "ExchangeTokenResponse" + }, + { + "name": "RefreshToken", + "in_type": "RefreshTokenRequest", + "out_type": "RefreshTokenResponse" + } + ] + } + ], + "imports": [ + { + "path": "proto/auth/token.proto" + } + ], + "package": { + "name": "bucketeer.auth" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/auth" + } + ] + } + }, + { + "protopath": "auth:/:token.proto", + "def": { + "messages": [ + { + "name": "Token", + "fields": [ + { + "id": 1, + "name": "access_token", + "type": "string" + }, + { + "id": 2, + "name": "token_type", + "type": "string" + }, + { + "id": 3, + "name": "refresh_token", + "type": "string" + }, + { + "id": 4, + "name": "expiry", + "type": "int64" + }, + { + "id": 5, + "name": "id_token", + "type": "string" + } + ] + }, + { + "name": "IDTokenSubject", + "fields": [ + { + "id": 1, + "name": "user_id", + "type": "string" + }, + { + "id": 2, + "name": "conn_id", + "type": "string" + } + ] + } + ], + "package": { + "name": "bucketeer.auth" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/auth" + } + ] + } + }, + { + "protopath": "autoops:/:auto_ops_rule.proto", + "def": { + "enums": [ + { + "name": "OpsType", + "enum_fields": [ + { + "name": "ENABLE_FEATURE" + }, + { + "name": "DISABLE_FEATURE", + "integer": 1 + } + ] + } + ], + "messages": [ + { + "name": "AutoOpsRule", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "feature_id", + "type": "string" + }, + { + "id": 3, + "name": "ops_type", + "type": "OpsType" + }, + { + "id": 4, + "name": "clauses", + "type": "Clause", + "is_repeated": true + }, + { + "id": 6, + "name": "triggered_at", + "type": "int64" + }, + { + "id": 7, + "name": "created_at", + "type": "int64" + }, + { + "id": 8, + "name": "updated_at", + "type": "int64" + }, + { + "id": 9, + "name": "deleted", + "type": "bool" + } + ] + } + ], + "imports": [ + { + "path": "proto/autoops/clause.proto" + } + ], + "package": { + "name": "bucketeer.autoops" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/autoops" + } + ] + } + }, + { + "protopath": "autoops:/:clause.proto", + "def": { + "enums": [ + { + "name": "OpsEventRateClause.Operator", + "enum_fields": [ + { + "name": "GREATER_OR_EQUAL" + }, + { + "name": "LESS_OR_EQUAL", + "integer": 1 + } + ] + }, + { + "name": "Condition.Operator", + "enum_fields": [ + { + "name": "EQUAL" + }, + { + "name": "NOT_EQUAL", + "integer": 1 + }, + { + "name": "MORE_THAN", + "integer": 2 + }, + { + "name": "MORE_THAN_OR_EQUAL", + "integer": 3 + }, + { + "name": "LESS_THAN", + "integer": 4 + }, + { + "name": "LESS_THAN_OR_EQUAL", + "integer": 5 + } + ] + } + ], + "messages": [ + { + "name": "Clause", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "clause", + "type": "google.protobuf.Any" + } + ] + }, + { + "name": "OpsEventRateClause", + "fields": [ + { + "id": 2, + "name": "variation_id", + "type": "string" + }, + { + "id": 3, + "name": "goal_id", + "type": "string" + }, + { + "id": 4, + "name": "min_count", + "type": "int64" + }, + { + "id": 5, + "name": "threadshold_rate", + "type": "double" + }, + { + "id": 6, + "name": "operator", + "type": "Operator" + } + ], + "reserved_ids": [ + 1 + ] + }, + { + "name": "DatetimeClause", + "fields": [ + { + "id": 1, + "name": "time", + "type": "int64" + } + ] + }, + { + "name": "WebhookClause", + "fields": [ + { + "id": 1, + "name": "webhook_id", + "type": "string" + }, + { + "id": 2, + "name": "conditions", + "type": "Condition", + "is_repeated": true + } + ], + "messages": [ + { + "name": "Condition", + "fields": [ + { + "id": 1, + "name": "filter", + "type": "string" + }, + { + "id": 2, + "name": "value", + "type": "string" + }, + { + "id": 3, + "name": "operator", + "type": "Operator" + } + ] + } + ] + } + ], + "imports": [ + { + "path": "google/protobuf/any.proto" + } + ], + "package": { + "name": "bucketeer.autoops" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/autoops" + } + ] + } + }, + { + "protopath": "autoops:/:command.proto", + "def": { + "messages": [ + { + "name": "CreateAutoOpsRuleCommand", + "fields": [ + { + "id": 1, + "name": "feature_id", + "type": "string" + }, + { + "id": 2, + "name": "ops_type", + "type": "OpsType" + }, + { + "id": 3, + "name": "ops_event_rate_clauses", + "type": "OpsEventRateClause", + "is_repeated": true + }, + { + "id": 4, + "name": "datetime_clauses", + "type": "DatetimeClause", + "is_repeated": true + }, + { + "id": 5, + "name": "webhook_clauses", + "type": "WebhookClause", + "is_repeated": true + } + ] + }, + { + "name": "ChangeAutoOpsRuleOpsTypeCommand", + "fields": [ + { + "id": 1, + "name": "ops_type", + "type": "OpsType" + } + ] + }, + { + "name": "DeleteAutoOpsRuleCommand" + }, + { + "name": "ChangeAutoOpsRuleTriggeredAtCommand" + }, + { + "name": "AddOpsEventRateClauseCommand", + "fields": [ + { + "id": 1, + "name": "ops_event_rate_clause", + "type": "OpsEventRateClause" + } + ] + }, + { + "name": "ChangeOpsEventRateClauseCommand", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "ops_event_rate_clause", + "type": "OpsEventRateClause" + } + ] + }, + { + "name": "DeleteClauseCommand", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "AddDatetimeClauseCommand", + "fields": [ + { + "id": 1, + "name": "datetime_clause", + "type": "DatetimeClause" + } + ] + }, + { + "name": "ChangeDatetimeClauseCommand", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "datetime_clause", + "type": "DatetimeClause" + } + ] + }, + { + "name": "CreateWebhookCommand", + "fields": [ + { + "id": 1, + "name": "name", + "type": "string" + }, + { + "id": 2, + "name": "description", + "type": "string" + } + ] + }, + { + "name": "ChangeWebhookNameCommand", + "fields": [ + { + "id": 1, + "name": "name", + "type": "string" + } + ] + }, + { + "name": "ChangeWebhookDescriptionCommand", + "fields": [ + { + "id": 1, + "name": "description", + "type": "string" + } + ] + }, + { + "name": "DeleteWebhookCommand" + }, + { + "name": "AddWebhookClauseCommand", + "fields": [ + { + "id": 1, + "name": "webhook_clause", + "type": "WebhookClause" + } + ] + }, + { + "name": "ChangeWebhookClauseCommand", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "webhook_clause", + "type": "WebhookClause" + } + ] + } + ], + "imports": [ + { + "path": "proto/autoops/auto_ops_rule.proto" + }, + { + "path": "proto/autoops/clause.proto" + } + ], + "package": { + "name": "bucketeer.autoops" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/autoops" + } + ] + } + }, + { + "protopath": "autoops:/:ops_count.proto", + "def": { + "messages": [ + { + "name": "OpsCount", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "auto_ops_rule_id", + "type": "string" + }, + { + "id": 3, + "name": "clause_id", + "type": "string" + }, + { + "id": 4, + "name": "updated_at", + "type": "int64" + }, + { + "id": 5, + "name": "ops_event_count", + "type": "int64" + }, + { + "id": 6, + "name": "evaluation_count", + "type": "int64" + }, + { + "id": 7, + "name": "feature_id", + "type": "string" + } + ] + } + ], + "package": { + "name": "bucketeer.autoops" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/autoops" + } + ] + } + }, + { + "protopath": "autoops:/:service.proto", + "def": { + "enums": [ + { + "name": "ListWebhooksRequest.OrderBy", + "enum_fields": [ + { + "name": "DEFAULT" + }, + { + "name": "NAME", + "integer": 1 + }, + { + "name": "CREATED_AT", + "integer": 2 + }, + { + "name": "UPDATED_AT", + "integer": 3 + } + ] + }, + { + "name": "ListWebhooksRequest.OrderDirection", + "enum_fields": [ + { + "name": "ASC" + }, + { + "name": "DESC", + "integer": 1 + } + ] + } + ], + "messages": [ + { + "name": "GetAutoOpsRuleRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "GetAutoOpsRuleResponse", + "fields": [ + { + "id": 1, + "name": "auto_ops_rule", + "type": "AutoOpsRule" + } + ] + }, + { + "name": "CreateAutoOpsRuleRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "CreateAutoOpsRuleCommand" + } + ] + }, + { + "name": "CreateAutoOpsRuleResponse" + }, + { + "name": "ListAutoOpsRulesRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "page_size", + "type": "int64" + }, + { + "id": 3, + "name": "cursor", + "type": "string" + }, + { + "id": 4, + "name": "feature_ids", + "type": "string", + "is_repeated": true + } + ] + }, + { + "name": "ListAutoOpsRulesResponse", + "fields": [ + { + "id": 1, + "name": "auto_ops_rules", + "type": "AutoOpsRule", + "is_repeated": true + }, + { + "id": 2, + "name": "cursor", + "type": "string" + } + ] + }, + { + "name": "DeleteAutoOpsRuleRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "id", + "type": "string" + }, + { + "id": 3, + "name": "command", + "type": "DeleteAutoOpsRuleCommand" + } + ] + }, + { + "name": "DeleteAutoOpsRuleResponse" + }, + { + "name": "UpdateAutoOpsRuleRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "id", + "type": "string" + }, + { + "id": 3, + "name": "change_auto_ops_rule_ops_type_command", + "type": "ChangeAutoOpsRuleOpsTypeCommand" + }, + { + "id": 4, + "name": "add_ops_event_rate_clause_commands", + "type": "AddOpsEventRateClauseCommand", + "is_repeated": true + }, + { + "id": 5, + "name": "change_ops_event_rate_clause_commands", + "type": "ChangeOpsEventRateClauseCommand", + "is_repeated": true + }, + { + "id": 6, + "name": "delete_clause_commands", + "type": "DeleteClauseCommand", + "is_repeated": true + }, + { + "id": 7, + "name": "add_datetime_clause_commands", + "type": "AddDatetimeClauseCommand", + "is_repeated": true + }, + { + "id": 8, + "name": "change_datetime_clause_commands", + "type": "ChangeDatetimeClauseCommand", + "is_repeated": true + }, + { + "id": 9, + "name": "add_webhook_clause_commands", + "type": "AddWebhookClauseCommand", + "is_repeated": true + }, + { + "id": 10, + "name": "change_webhook_clause_commands", + "type": "ChangeWebhookClauseCommand", + "is_repeated": true + } + ] + }, + { + "name": "UpdateAutoOpsRuleResponse" + }, + { + "name": "ExecuteAutoOpsRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "id", + "type": "string" + }, + { + "id": 3, + "name": "change_auto_ops_rule_triggered_at_command", + "type": "ChangeAutoOpsRuleTriggeredAtCommand" + } + ] + }, + { + "name": "ExecuteAutoOpsResponse", + "fields": [ + { + "id": 1, + "name": "already_triggered", + "type": "bool" + } + ] + }, + { + "name": "ListOpsCountsRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "page_size", + "type": "int64" + }, + { + "id": 3, + "name": "cursor", + "type": "string" + }, + { + "id": 4, + "name": "auto_ops_rule_ids", + "type": "string", + "is_repeated": true + }, + { + "id": 5, + "name": "feature_ids", + "type": "string", + "is_repeated": true + } + ] + }, + { + "name": "ListOpsCountsResponse", + "fields": [ + { + "id": 1, + "name": "cursor", + "type": "string" + }, + { + "id": 2, + "name": "ops_counts", + "type": "OpsCount", + "is_repeated": true + } + ] + }, + { + "name": "CreateWebhookRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "CreateWebhookCommand" + } + ] + }, + { + "name": "CreateWebhookResponse", + "fields": [ + { + "id": 1, + "name": "webhook", + "type": "Webhook" + }, + { + "id": 2, + "name": "url", + "type": "string" + } + ] + }, + { + "name": "GetWebhookRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "GetWebhookResponse", + "fields": [ + { + "id": 1, + "name": "webhook", + "type": "Webhook" + }, + { + "id": 2, + "name": "url", + "type": "string" + } + ] + }, + { + "name": "UpdateWebhookRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 3, + "name": "changeWebhookNameCommand", + "type": "ChangeWebhookNameCommand" + }, + { + "id": 4, + "name": "changeWebhookDescriptionCommand", + "type": "ChangeWebhookDescriptionCommand" + } + ] + }, + { + "name": "UpdateWebhookResponse" + }, + { + "name": "DeleteWebhookRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 3, + "name": "command", + "type": "DeleteWebhookCommand" + } + ] + }, + { + "name": "DeleteWebhookResponse" + }, + { + "name": "ListWebhooksRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "page_size", + "type": "int64" + }, + { + "id": 3, + "name": "cursor", + "type": "string" + }, + { + "id": 4, + "name": "order_by", + "type": "OrderBy" + }, + { + "id": 5, + "name": "order_direction", + "type": "OrderDirection" + }, + { + "id": 6, + "name": "search_keyword", + "type": "string" + } + ] + }, + { + "name": "ListWebhooksResponse", + "fields": [ + { + "id": 1, + "name": "webhooks", + "type": "Webhook", + "is_repeated": true + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "total_count", + "type": "int64" + } + ] + } + ], + "services": [ + { + "name": "AutoOpsService", + "rpcs": [ + { + "name": "GetAutoOpsRule", + "in_type": "GetAutoOpsRuleRequest", + "out_type": "GetAutoOpsRuleResponse" + }, + { + "name": "ListAutoOpsRules", + "in_type": "ListAutoOpsRulesRequest", + "out_type": "ListAutoOpsRulesResponse" + }, + { + "name": "CreateAutoOpsRule", + "in_type": "CreateAutoOpsRuleRequest", + "out_type": "CreateAutoOpsRuleResponse" + }, + { + "name": "DeleteAutoOpsRule", + "in_type": "DeleteAutoOpsRuleRequest", + "out_type": "DeleteAutoOpsRuleResponse" + }, + { + "name": "UpdateAutoOpsRule", + "in_type": "UpdateAutoOpsRuleRequest", + "out_type": "UpdateAutoOpsRuleResponse" + }, + { + "name": "ExecuteAutoOps", + "in_type": "ExecuteAutoOpsRequest", + "out_type": "ExecuteAutoOpsResponse" + }, + { + "name": "ListOpsCounts", + "in_type": "ListOpsCountsRequest", + "out_type": "ListOpsCountsResponse" + }, + { + "name": "CreateWebhook", + "in_type": "CreateWebhookRequest", + "out_type": "CreateWebhookResponse" + }, + { + "name": "GetWebhook", + "in_type": "GetWebhookRequest", + "out_type": "GetWebhookResponse" + }, + { + "name": "UpdateWebhook", + "in_type": "UpdateWebhookRequest", + "out_type": "UpdateWebhookResponse" + }, + { + "name": "DeleteWebhook", + "in_type": "DeleteWebhookRequest", + "out_type": "DeleteWebhookResponse" + }, + { + "name": "ListWebhooks", + "in_type": "ListWebhooksRequest", + "out_type": "ListWebhooksResponse" + } + ] + } + ], + "imports": [ + { + "path": "proto/autoops/auto_ops_rule.proto" + }, + { + "path": "proto/autoops/command.proto" + }, + { + "path": "proto/autoops/ops_count.proto" + }, + { + "path": "proto/autoops/webhook.proto" + } + ], + "package": { + "name": "bucketeer.autoops" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/autoops" + } + ] + } + }, + { + "protopath": "autoops:/:webhook.proto", + "def": { + "messages": [ + { + "name": "Webhook", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "name", + "type": "string" + }, + { + "id": 3, + "name": "description", + "type": "string" + }, + { + "id": 4, + "name": "created_at", + "type": "int64" + }, + { + "id": 5, + "name": "updated_at", + "type": "int64" + } + ] + } + ], + "package": { + "name": "bucketeer.autoops" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/autoops" + } + ] + } + }, + { + "protopath": "environment:/:command.proto", + "def": { + "messages": [ + { + "name": "CreateEnvironmentCommand", + "fields": [ + { + "id": 1, + "name": "namespace", + "type": "string", + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "id": 2, + "name": "name", + "type": "string", + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "id": 3, + "name": "description", + "type": "string" + }, + { + "id": 4, + "name": "id", + "type": "string" + }, + { + "id": 5, + "name": "project_id", + "type": "string" + } + ] + }, + { + "name": "RenameEnvironmentCommand", + "fields": [ + { + "id": 1, + "name": "name", + "type": "string" + } + ] + }, + { + "name": "ChangeDescriptionEnvironmentCommand", + "fields": [ + { + "id": 1, + "name": "description", + "type": "string" + } + ] + }, + { + "name": "DeleteEnvironmentCommand" + }, + { + "name": "CreateProjectCommand", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "description", + "type": "string" + } + ] + }, + { + "name": "CreateTrialProjectCommand", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "email", + "type": "string" + } + ] + }, + { + "name": "ChangeDescriptionProjectCommand", + "fields": [ + { + "id": 1, + "name": "description", + "type": "string" + } + ] + }, + { + "name": "EnableProjectCommand" + }, + { + "name": "DisableProjectCommand" + }, + { + "name": "ConvertTrialProjectCommand" + } + ], + "package": { + "name": "bucketeer.environment" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/environment" + } + ] + } + }, + { + "protopath": "environment:/:environment.proto", + "def": { + "messages": [ + { + "name": "Environment", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "namespace", + "type": "string" + }, + { + "id": 3, + "name": "name", + "type": "string", + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "id": 4, + "name": "description", + "type": "string" + }, + { + "id": 5, + "name": "deleted", + "type": "bool" + }, + { + "id": 6, + "name": "created_at", + "type": "int64" + }, + { + "id": 7, + "name": "updated_at", + "type": "int64" + }, + { + "id": 8, + "name": "project_id", + "type": "string" + } + ] + } + ], + "package": { + "name": "bucketeer.environment" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/environment" + } + ] + } + }, + { + "protopath": "environment:/:project.proto", + "def": { + "messages": [ + { + "name": "Project", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "description", + "type": "string" + }, + { + "id": 3, + "name": "disabled", + "type": "bool" + }, + { + "id": 4, + "name": "trial", + "type": "bool" + }, + { + "id": 5, + "name": "creator_email", + "type": "string" + }, + { + "id": 6, + "name": "created_at", + "type": "int64" + }, + { + "id": 7, + "name": "updated_at", + "type": "int64" + } + ] + } + ], + "package": { + "name": "bucketeer.environment" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/environment" + } + ] + } + }, + { + "protopath": "environment:/:service.proto", + "def": { + "enums": [ + { + "name": "ListEnvironmentsRequest.OrderBy", + "enum_fields": [ + { + "name": "DEFAULT" + }, + { + "name": "ID", + "integer": 1 + }, + { + "name": "CREATED_AT", + "integer": 2 + }, + { + "name": "UPDATED_AT", + "integer": 3 + } + ] + }, + { + "name": "ListEnvironmentsRequest.OrderDirection", + "enum_fields": [ + { + "name": "ASC" + }, + { + "name": "DESC", + "integer": 1 + } + ] + }, + { + "name": "ListProjectsRequest.OrderBy", + "enum_fields": [ + { + "name": "DEFAULT" + }, + { + "name": "ID", + "integer": 1 + }, + { + "name": "CREATED_AT", + "integer": 2 + }, + { + "name": "UPDATED_AT", + "integer": 3 + } + ] + }, + { + "name": "ListProjectsRequest.OrderDirection", + "enum_fields": [ + { + "name": "ASC" + }, + { + "name": "DESC", + "integer": 1 + } + ] + } + ], + "messages": [ + { + "name": "GetEnvironmentRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "GetEnvironmentResponse", + "fields": [ + { + "id": 1, + "name": "environment", + "type": "Environment" + } + ] + }, + { + "name": "GetEnvironmentByNamespaceRequest", + "fields": [ + { + "id": 1, + "name": "namespace", + "type": "string" + } + ] + }, + { + "name": "GetEnvironmentByNamespaceResponse", + "fields": [ + { + "id": 1, + "name": "environment", + "type": "Environment" + } + ] + }, + { + "name": "ListEnvironmentsRequest", + "fields": [ + { + "id": 1, + "name": "page_size", + "type": "int64" + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "project_id", + "type": "string" + }, + { + "id": 4, + "name": "order_by", + "type": "OrderBy" + }, + { + "id": 5, + "name": "order_direction", + "type": "OrderDirection" + }, + { + "id": 6, + "name": "search_keyword", + "type": "string" + } + ] + }, + { + "name": "ListEnvironmentsResponse", + "fields": [ + { + "id": 1, + "name": "environments", + "type": "Environment", + "is_repeated": true + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "total_count", + "type": "int64" + } + ] + }, + { + "name": "CreateEnvironmentRequest", + "fields": [ + { + "id": 1, + "name": "command", + "type": "CreateEnvironmentCommand" + } + ] + }, + { + "name": "CreateEnvironmentResponse" + }, + { + "name": "UpdateEnvironmentRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "rename_command", + "type": "RenameEnvironmentCommand", + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "id": 3, + "name": "change_description_command", + "type": "ChangeDescriptionEnvironmentCommand" + } + ] + }, + { + "name": "UpdateEnvironmentResponse" + }, + { + "name": "DeleteEnvironmentRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "DeleteEnvironmentCommand" + } + ] + }, + { + "name": "DeleteEnvironmentResponse" + }, + { + "name": "GetProjectRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "GetProjectResponse", + "fields": [ + { + "id": 1, + "name": "project", + "type": "Project" + } + ] + }, + { + "name": "ListProjectsRequest", + "fields": [ + { + "id": 1, + "name": "page_size", + "type": "int64" + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "order_by", + "type": "OrderBy" + }, + { + "id": 4, + "name": "order_direction", + "type": "OrderDirection" + }, + { + "id": 5, + "name": "search_keyword", + "type": "string" + }, + { + "id": 6, + "name": "disabled", + "type": "google.protobuf.BoolValue" + } + ] + }, + { + "name": "ListProjectsResponse", + "fields": [ + { + "id": 1, + "name": "projects", + "type": "Project", + "is_repeated": true + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "total_count", + "type": "int64" + } + ] + }, + { + "name": "CreateProjectRequest", + "fields": [ + { + "id": 1, + "name": "command", + "type": "CreateProjectCommand" + } + ] + }, + { + "name": "CreateProjectResponse" + }, + { + "name": "CreateTrialProjectRequest", + "fields": [ + { + "id": 1, + "name": "command", + "type": "CreateTrialProjectCommand" + } + ] + }, + { + "name": "CreateTrialProjectResponse" + }, + { + "name": "UpdateProjectRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "change_description_command", + "type": "ChangeDescriptionProjectCommand" + } + ] + }, + { + "name": "UpdateProjectResponse" + }, + { + "name": "EnableProjectRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "EnableProjectCommand" + } + ] + }, + { + "name": "EnableProjectResponse" + }, + { + "name": "DisableProjectRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "DisableProjectCommand" + } + ] + }, + { + "name": "DisableProjectResponse" + }, + { + "name": "ConvertTrialProjectRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "ConvertTrialProjectCommand" + } + ] + }, + { + "name": "ConvertTrialProjectResponse" + } + ], + "services": [ + { + "name": "EnvironmentService", + "rpcs": [ + { + "name": "GetEnvironment", + "in_type": "GetEnvironmentRequest", + "out_type": "GetEnvironmentResponse" + }, + { + "name": "GetEnvironmentByNamespace", + "in_type": "GetEnvironmentByNamespaceRequest", + "out_type": "GetEnvironmentByNamespaceResponse" + }, + { + "name": "ListEnvironments", + "in_type": "ListEnvironmentsRequest", + "out_type": "ListEnvironmentsResponse" + }, + { + "name": "CreateEnvironment", + "in_type": "CreateEnvironmentRequest", + "out_type": "CreateEnvironmentResponse" + }, + { + "name": "UpdateEnvironment", + "in_type": "UpdateEnvironmentRequest", + "out_type": "UpdateEnvironmentResponse" + }, + { + "name": "DeleteEnvironment", + "in_type": "DeleteEnvironmentRequest", + "out_type": "DeleteEnvironmentResponse" + }, + { + "name": "GetProject", + "in_type": "GetProjectRequest", + "out_type": "GetProjectResponse" + }, + { + "name": "ListProjects", + "in_type": "ListProjectsRequest", + "out_type": "ListProjectsResponse" + }, + { + "name": "CreateProject", + "in_type": "CreateProjectRequest", + "out_type": "CreateProjectResponse" + }, + { + "name": "CreateTrialProject", + "in_type": "CreateTrialProjectRequest", + "out_type": "CreateTrialProjectResponse" + }, + { + "name": "UpdateProject", + "in_type": "UpdateProjectRequest", + "out_type": "UpdateProjectResponse" + }, + { + "name": "EnableProject", + "in_type": "EnableProjectRequest", + "out_type": "EnableProjectResponse" + }, + { + "name": "DisableProject", + "in_type": "DisableProjectRequest", + "out_type": "DisableProjectResponse" + }, + { + "name": "ConvertTrialProject", + "in_type": "ConvertTrialProjectRequest", + "out_type": "ConvertTrialProjectResponse" + } + ] + } + ], + "imports": [ + { + "path": "google/protobuf/wrappers.proto" + }, + { + "path": "proto/environment/environment.proto" + }, + { + "path": "proto/environment/project.proto" + }, + { + "path": "proto/environment/command.proto" + } + ], + "package": { + "name": "bucketeer.environment" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/environment" + } + ] + } + }, + { + "protopath": "event:/:client:/:event.proto", + "def": { + "enums": [ + { + "name": "SourceId", + "enum_fields": [ + { + "name": "UNKNOWN" + }, + { + "name": "ANDROID", + "integer": 1 + }, + { + "name": "IOS", + "integer": 2 + }, + { + "name": "WEB", + "integer": 3 + }, + { + "name": "GOAL_BATCH", + "integer": 4 + }, + { + "name": "GO_SERVER", + "integer": 5 + }, + { + "name": "NODE_SERVER", + "integer": 6 + } + ] + } + ], + "messages": [ + { + "name": "Event", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "event", + "type": "google.protobuf.Any" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "EvaluationEvent", + "fields": [ + { + "id": 1, + "name": "timestamp", + "type": "int64" + }, + { + "id": 2, + "name": "feature_id", + "type": "string" + }, + { + "id": 3, + "name": "feature_version", + "type": "int32" + }, + { + "id": 4, + "name": "user_id", + "type": "string" + }, + { + "id": 5, + "name": "variation_id", + "type": "string" + }, + { + "id": 6, + "name": "user", + "type": "bucketeer.user.User" + }, + { + "id": 7, + "name": "reason", + "type": "bucketeer.feature.Reason" + }, + { + "id": 8, + "name": "tag", + "type": "string" + }, + { + "id": 9, + "name": "source_id", + "type": "SourceId" + } + ] + }, + { + "name": "GoalEvent", + "fields": [ + { + "id": 1, + "name": "timestamp", + "type": "int64" + }, + { + "id": 2, + "name": "goal_id", + "type": "string" + }, + { + "id": 3, + "name": "user_id", + "type": "string" + }, + { + "id": 4, + "name": "value", + "type": "double" + }, + { + "id": 5, + "name": "user", + "type": "bucketeer.user.User" + }, + { + "id": 6, + "name": "evaluations", + "type": "bucketeer.feature.Evaluation", + "is_repeated": true, + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "id": 7, + "name": "tag", + "type": "string" + }, + { + "id": 8, + "name": "source_id", + "type": "SourceId" + } + ] + }, + { + "name": "MetricsEvent", + "fields": [ + { + "id": 1, + "name": "timestamp", + "type": "int64" + }, + { + "id": 2, + "name": "event", + "type": "google.protobuf.Any" + } + ] + }, + { + "name": "GetEvaluationLatencyMetricsEvent", + "fields": [ + { + "id": 2, + "name": "duration", + "type": "google.protobuf.Duration" + } + ], + "maps": [ + { + "key_type": "string", + "field": { + "id": 1, + "name": "labels", + "type": "string" + } + } + ] + }, + { + "name": "GetEvaluationSizeMetricsEvent", + "fields": [ + { + "id": 2, + "name": "size_byte", + "type": "int32" + } + ], + "maps": [ + { + "key_type": "string", + "field": { + "id": 1, + "name": "labels", + "type": "string" + } + } + ] + }, + { + "name": "TimeoutErrorCountMetricsEvent", + "fields": [ + { + "id": 1, + "name": "tag", + "type": "string" + } + ] + }, + { + "name": "InternalErrorCountMetricsEvent", + "fields": [ + { + "id": 1, + "name": "tag", + "type": "string" + } + ] + }, + { + "name": "OpsEvent", + "fields": [ + { + "id": 1, + "name": "timestamp", + "type": "int64" + }, + { + "id": 2, + "name": "feature_id", + "type": "string" + }, + { + "id": 3, + "name": "feature_version", + "type": "int32" + }, + { + "id": 4, + "name": "variation_id", + "type": "string" + }, + { + "id": 5, + "name": "goal_id", + "type": "string" + }, + { + "id": 6, + "name": "user_id", + "type": "string" + } + ] + }, + { + "name": "GoalBatchEvent", + "fields": [ + { + "id": 1, + "name": "user_id", + "type": "string" + }, + { + "id": 2, + "name": "user_goal_events_over_tags", + "type": "UserGoalEventsOverTag", + "is_repeated": true + } + ] + }, + { + "name": "UserGoalEventsOverTag", + "fields": [ + { + "id": 1, + "name": "tag", + "type": "string" + }, + { + "id": 2, + "name": "user_goal_events", + "type": "UserGoalEvent", + "is_repeated": true + } + ] + }, + { + "name": "UserGoalEvent", + "fields": [ + { + "id": 1, + "name": "timestamp", + "type": "int64" + }, + { + "id": 2, + "name": "goal_id", + "type": "string" + }, + { + "id": 3, + "name": "value", + "type": "double" + } + ] + } + ], + "imports": [ + { + "path": "google/protobuf/any.proto" + }, + { + "path": "google/protobuf/duration.proto" + }, + { + "path": "proto/feature/evaluation.proto" + }, + { + "path": "proto/feature/reason.proto" + }, + { + "path": "proto/user/user.proto" + } + ], + "package": { + "name": "bucketeer.event.client" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/event/client" + } + ] + } + }, + { + "protopath": "event:/:domain:/:event.proto", + "def": { + "enums": [ + { + "name": "Event.EntityType", + "enum_fields": [ + { + "name": "FEATURE" + }, + { + "name": "GOAL", + "integer": 1 + }, + { + "name": "EXPERIMENT", + "integer": 2 + }, + { + "name": "ACCOUNT", + "integer": 3 + }, + { + "name": "APIKEY", + "integer": 4 + }, + { + "name": "SEGMENT", + "integer": 5 + }, + { + "name": "ENVIRONMENT", + "integer": 6 + }, + { + "name": "ADMIN_ACCOUNT", + "integer": 7 + }, + { + "name": "AUTOOPS_RULE", + "integer": 8 + }, + { + "name": "PUSH", + "integer": 9 + }, + { + "name": "SUBSCRIPTION", + "integer": 10 + }, + { + "name": "ADMIN_SUBSCRIPTION", + "integer": 11 + }, + { + "name": "PROJECT", + "integer": 12 + }, + { + "name": "WEBHOOK", + "integer": 13 + } + ] + }, + { + "name": "Event.Type", + "enum_fields": [ + { + "name": "UNKNOWN" + }, + { + "name": "FEATURE_CREATED", + "integer": 1 + }, + { + "name": "FEATURE_RENAMED", + "integer": 2 + }, + { + "name": "FEATURE_ENABLED", + "integer": 3 + }, + { + "name": "FEATURE_DISABLED", + "integer": 4 + }, + { + "name": "FEATURE_DELETED", + "integer": 5 + }, + { + "name": "FEATURE_EVALUATION_DELAYABLE_SET", + "integer": 6 + }, + { + "name": "FEATURE_EVALUATION_UNDELAYABLE_SET", + "integer": 7 + }, + { + "name": "FEATURE_DESCRIPTION_CHANGED", + "integer": 8 + }, + { + "name": "FEATURE_VARIATION_ADDED", + "integer": 9 + }, + { + "name": "FEATURE_VARIATION_REMOVED", + "integer": 10 + }, + { + "name": "FEATURE_OFF_VARIATION_CHANGED", + "integer": 11 + }, + { + "name": "VARIATION_VALUE_CHANGED", + "integer": 12 + }, + { + "name": "VARIATION_NAME_CHANGED", + "integer": 13 + }, + { + "name": "VARIATION_DESCRIPTION_CHANGED", + "integer": 14 + }, + { + "name": "VARIATION_USER_ADDED", + "integer": 15 + }, + { + "name": "VARIATION_USER_REMOVED", + "integer": 16 + }, + { + "name": "FEATURE_RULE_ADDED", + "integer": 17 + }, + { + "name": "FEATURE_RULE_STRATEGY_CHANGED", + "integer": 18 + }, + { + "name": "FEATURE_RULE_DELETED", + "integer": 19 + }, + { + "name": "RULE_CLAUSE_ADDED", + "integer": 20 + }, + { + "name": "RULE_CLAUSE_DELETED", + "integer": 21 + }, + { + "name": "RULE_FIXED_STRATEGY_CHANGED", + "integer": 22 + }, + { + "name": "RULE_ROLLOUT_STRATEGY_CHANGED", + "integer": 23 + }, + { + "name": "CLAUSE_ATTRIBUTE_CHANGED", + "integer": 24 + }, + { + "name": "CLAUSE_OPERATOR_CHANGED", + "integer": 25 + }, + { + "name": "CLAUSE_VALUE_ADDED", + "integer": 26 + }, + { + "name": "CLAUSE_VALUE_REMOVED", + "integer": 27 + }, + { + "name": "FEATURE_DEFAULT_STRATEGY_CHANGED", + "integer": 28 + }, + { + "name": "FEATURE_TAG_ADDED", + "integer": 29 + }, + { + "name": "FEATURE_TAG_REMOVED", + "integer": 30 + }, + { + "name": "FEATURE_VERSION_INCREMENTED", + "integer": 31 + }, + { + "name": "FEATURE_ARCHIVED", + "integer": 32 + }, + { + "name": "FEATURE_CLONED", + "integer": 33 + }, + { + "name": "FEATURE_UNARCHIVED", + "integer": 35 + }, + { + "name": "SAMPLING_SEED_RESET", + "integer": 34 + }, + { + "name": "PREREQUISITE_ADDED", + "integer": 36 + }, + { + "name": "PREREQUISITE_REMOVED", + "integer": 37 + }, + { + "name": "PREREQUISITE_VARIATION_CHANGED", + "integer": 38 + }, + { + "name": "GOAL_CREATED", + "integer": 100 + }, + { + "name": "GOAL_RENAMED", + "integer": 101 + }, + { + "name": "GOAL_DESCRIPTION_CHANGED", + "integer": 102 + }, + { + "name": "GOAL_DELETED", + "integer": 103 + }, + { + "name": "GOAL_ARCHIVED", + "integer": 104 + }, + { + "name": "EXPERIMENT_CREATED", + "integer": 200 + }, + { + "name": "EXPERIMENT_STOPPED", + "integer": 201 + }, + { + "name": "EXPERIMENT_START_AT_CHANGED", + "integer": 202 + }, + { + "name": "EXPERIMENT_STOP_AT_CHANGED", + "integer": 203 + }, + { + "name": "EXPERIMENT_DELETED", + "integer": 204 + }, + { + "name": "EXPERIMENT_PERIOD_CHANGED", + "integer": 205 + }, + { + "name": "EXPERIMENT_NAME_CHANGED", + "integer": 206 + }, + { + "name": "EXPERIMENT_DESCRIPTION_CHANGED", + "integer": 207 + }, + { + "name": "EXPERIMENT_STARTED", + "integer": 208 + }, + { + "name": "EXPERIMENT_FINISHED", + "integer": 209 + }, + { + "name": "EXPERIMENT_ARCHIVED", + "integer": 210 + }, + { + "name": "ACCOUNT_CREATED", + "integer": 300 + }, + { + "name": "ACCOUNT_ROLE_CHANGED", + "integer": 301 + }, + { + "name": "ACCOUNT_ENABLED", + "integer": 302 + }, + { + "name": "ACCOUNT_DISABLED", + "integer": 303 + }, + { + "name": "ACCOUNT_DELETED", + "integer": 304 + }, + { + "name": "APIKEY_CREATED", + "integer": 400 + }, + { + "name": "APIKEY_NAME_CHANGED", + "integer": 401 + }, + { + "name": "APIKEY_ENABLED", + "integer": 402 + }, + { + "name": "APIKEY_DISABLED", + "integer": 403 + }, + { + "name": "SEGMENT_CREATED", + "integer": 500 + }, + { + "name": "SEGMENT_DELETED", + "integer": 501 + }, + { + "name": "SEGMENT_NAME_CHANGED", + "integer": 502 + }, + { + "name": "SEGMENT_DESCRIPTION_CHANGED", + "integer": 503 + }, + { + "name": "SEGMENT_RULE_ADDED", + "integer": 504 + }, + { + "name": "SEGMENT_RULE_DELETED", + "integer": 505 + }, + { + "name": "SEGMENT_RULE_CLAUSE_ADDED", + "integer": 506 + }, + { + "name": "SEGMENT_RULE_CLAUSE_DELETED", + "integer": 507 + }, + { + "name": "SEGMENT_CLAUSE_ATTRIBUTE_CHANGED", + "integer": 508 + }, + { + "name": "SEGMENT_CLAUSE_OPERATOR_CHANGED", + "integer": 509 + }, + { + "name": "SEGMENT_CLAUSE_VALUE_ADDED", + "integer": 510 + }, + { + "name": "SEGMENT_CLAUSE_VALUE_REMOVED", + "integer": 511 + }, + { + "name": "SEGMENT_USER_ADDED", + "integer": 512 + }, + { + "name": "SEGMENT_USER_DELETED", + "integer": 513 + }, + { + "name": "SEGMENT_BULK_UPLOAD_USERS", + "integer": 514 + }, + { + "name": "SEGMENT_BULK_UPLOAD_USERS_STATUS_CHANGED", + "integer": 515 + }, + { + "name": "ENVIRONMENT_CREATED", + "integer": 600 + }, + { + "name": "ENVIRONMENT_RENAMED", + "integer": 601 + }, + { + "name": "ENVIRONMENT_DESCRIPTION_CHANGED", + "integer": 602 + }, + { + "name": "ENVIRONMENT_DELETED", + "integer": 603 + }, + { + "name": "ADMIN_ACCOUNT_CREATED", + "integer": 700 + }, + { + "name": "ADMIN_ACCOUNT_ENABLED", + "integer": 702 + }, + { + "name": "ADMIN_ACCOUNT_DISABLED", + "integer": 703 + }, + { + "name": "AUTOOPS_RULE_CREATED", + "integer": 800 + }, + { + "name": "AUTOOPS_RULE_DELETED", + "integer": 801 + }, + { + "name": "AUTOOPS_RULE_OPS_TYPE_CHANGED", + "integer": 802 + }, + { + "name": "AUTOOPS_RULE_CLAUSE_DELETED", + "integer": 803 + }, + { + "name": "AUTOOPS_RULE_TRIGGERED_AT_CHANGED", + "integer": 804 + }, + { + "name": "OPS_EVENT_RATE_CLAUSE_ADDED", + "integer": 805 + }, + { + "name": "OPS_EVENT_RATE_CLAUSE_CHANGED", + "integer": 806 + }, + { + "name": "DATETIME_CLAUSE_ADDED", + "integer": 807 + }, + { + "name": "DATETIME_CLAUSE_CHANGED", + "integer": 808 + }, + { + "name": "PUSH_CREATED", + "integer": 900 + }, + { + "name": "PUSH_DELETED", + "integer": 901 + }, + { + "name": "PUSH_TAGS_ADDED", + "integer": 902 + }, + { + "name": "PUSH_TAGS_DELETED", + "integer": 903 + }, + { + "name": "PUSH_RENAMED", + "integer": 904 + }, + { + "name": "SUBSCRIPTION_CREATED", + "integer": 1000 + }, + { + "name": "SUBSCRIPTION_DELETED", + "integer": 1001 + }, + { + "name": "SUBSCRIPTION_ENABLED", + "integer": 1002 + }, + { + "name": "SUBSCRIPTION_DISABLED", + "integer": 1003 + }, + { + "name": "SUBSCRIPTION_SOURCE_TYPE_ADDED", + "integer": 1004 + }, + { + "name": "SUBSCRIPTION_SOURCE_TYPE_DELETED", + "integer": 1005 + }, + { + "name": "SUBSCRIPTION_RENAMED", + "integer": 1006 + }, + { + "name": "ADMIN_SUBSCRIPTION_CREATED", + "integer": 1100 + }, + { + "name": "ADMIN_SUBSCRIPTION_DELETED", + "integer": 1101 + }, + { + "name": "ADMIN_SUBSCRIPTION_ENABLED", + "integer": 1102 + }, + { + "name": "ADMIN_SUBSCRIPTION_DISABLED", + "integer": 1103 + }, + { + "name": "ADMIN_SUBSCRIPTION_SOURCE_TYPE_ADDED", + "integer": 1104 + }, + { + "name": "ADMIN_SUBSCRIPTION_SOURCE_TYPE_DELETED", + "integer": 1105 + }, + { + "name": "ADMIN_SUBSCRIPTION_RENAMED", + "integer": 1106 + }, + { + "name": "PROJECT_CREATED", + "integer": 1200 + }, + { + "name": "PROJECT_DESCRIPTION_CHANGED", + "integer": 1201 + }, + { + "name": "PROJECT_ENABLED", + "integer": 1202 + }, + { + "name": "PROJECT_DISABLED", + "integer": 1203 + }, + { + "name": "PROJECT_TRIAL_CREATED", + "integer": 1204 + }, + { + "name": "PROJECT_TRIAL_CONVERTED", + "integer": 1205 + }, + { + "name": "WEBHOOK_CREATED", + "integer": 1300 + }, + { + "name": "WEBHOOK_DELETED", + "integer": 1301 + }, + { + "name": "WEBHOOK_NAME_CHANGED", + "integer": 1302 + }, + { + "name": "WEBHOOK_DESCRIPTION_CHANGED", + "integer": 1303 + }, + { + "name": "WEBHOOK_CLAUSE_ADDED", + "integer": 1304 + }, + { + "name": "WEBHOOK_CLAUSE_CHANGED", + "integer": 1305 + } + ] + } + ], + "messages": [ + { + "name": "Event", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "timestamp", + "type": "int64" + }, + { + "id": 3, + "name": "entity_type", + "type": "EntityType" + }, + { + "id": 4, + "name": "entity_id", + "type": "string" + }, + { + "id": 5, + "name": "type", + "type": "Type" + }, + { + "id": 6, + "name": "editor", + "type": "Editor" + }, + { + "id": 7, + "name": "data", + "type": "google.protobuf.Any" + }, + { + "id": 8, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 9, + "name": "is_admin_event", + "type": "bool" + }, + { + "id": 10, + "name": "options", + "type": "Options" + } + ] + }, + { + "name": "Editor", + "fields": [ + { + "id": 1, + "name": "email", + "type": "string" + }, + { + "id": 2, + "name": "role", + "type": "bucketeer.account.Account.Role" + }, + { + "id": 3, + "name": "is_admin", + "type": "bool" + } + ] + }, + { + "name": "Options", + "fields": [ + { + "id": 1, + "name": "comment", + "type": "string" + }, + { + "id": 2, + "name": "new_version", + "type": "int32" + } + ] + }, + { + "name": "FeatureCreatedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "name", + "type": "string" + }, + { + "id": 3, + "name": "description", + "type": "string" + }, + { + "id": 4, + "name": "user", + "type": "string" + }, + { + "id": 5, + "name": "variations", + "type": "bucketeer.feature.Variation", + "is_repeated": true + }, + { + "id": 6, + "name": "default_on_variation_index", + "type": "google.protobuf.Int32Value" + }, + { + "id": 7, + "name": "default_off_variation_index", + "type": "google.protobuf.Int32Value" + }, + { + "id": 8, + "name": "variation_type", + "type": "bucketeer.feature.Feature.VariationType" + } + ] + }, + { + "name": "FeatureEnabledEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "FeatureDisabledEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "FeatureArchivedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "FeatureUnarchivedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "FeatureDeletedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "EvaluationDelayableSetEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "EvaluationUndelayableSetEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "FeatureRenamedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "name", + "type": "string" + } + ] + }, + { + "name": "FeatureDescriptionChangedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "description", + "type": "string" + } + ] + }, + { + "name": "FeatureOffVariationChangedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "off_variation", + "type": "string" + } + ] + }, + { + "name": "FeatureVariationAddedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "variation", + "type": "bucketeer.feature.Variation" + } + ] + }, + { + "name": "FeatureVariationRemovedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "variation_id", + "type": "string" + } + ] + }, + { + "name": "VariationValueChangedEvent", + "fields": [ + { + "id": 1, + "name": "feature_id", + "type": "string" + }, + { + "id": 2, + "name": "id", + "type": "string" + }, + { + "id": 3, + "name": "value", + "type": "string" + } + ] + }, + { + "name": "VariationNameChangedEvent", + "fields": [ + { + "id": 1, + "name": "feature_id", + "type": "string" + }, + { + "id": 2, + "name": "id", + "type": "string" + }, + { + "id": 3, + "name": "name", + "type": "string" + } + ] + }, + { + "name": "VariationDescriptionChangedEvent", + "fields": [ + { + "id": 1, + "name": "feature_id", + "type": "string" + }, + { + "id": 2, + "name": "id", + "type": "string" + }, + { + "id": 3, + "name": "description", + "type": "string" + } + ] + }, + { + "name": "VariationUserAddedEvent", + "fields": [ + { + "id": 1, + "name": "feature_id", + "type": "string" + }, + { + "id": 2, + "name": "id", + "type": "string" + }, + { + "id": 3, + "name": "user", + "type": "string" + } + ] + }, + { + "name": "VariationUserRemovedEvent", + "fields": [ + { + "id": 1, + "name": "feature_id", + "type": "string" + }, + { + "id": 2, + "name": "id", + "type": "string" + }, + { + "id": 3, + "name": "user", + "type": "string" + } + ] + }, + { + "name": "FeatureRuleAddedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "rule", + "type": "bucketeer.feature.Rule" + } + ] + }, + { + "name": "FeatureChangeRuleStrategyEvent", + "fields": [ + { + "id": 1, + "name": "feature_id", + "type": "string" + }, + { + "id": 2, + "name": "rule_id", + "type": "string" + }, + { + "id": 3, + "name": "strategy", + "type": "bucketeer.feature.Strategy" + } + ] + }, + { + "name": "FeatureRuleDeletedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "rule_id", + "type": "string" + } + ] + }, + { + "name": "FeatureFixedStrategyChangedEvent", + "fields": [ + { + "id": 1, + "name": "feature_id", + "type": "string" + }, + { + "id": 2, + "name": "rule_id", + "type": "string" + }, + { + "id": 3, + "name": "strategy", + "type": "bucketeer.feature.FixedStrategy" + } + ] + }, + { + "name": "FeatureRolloutStrategyChangedEvent", + "fields": [ + { + "id": 1, + "name": "feature_id", + "type": "string" + }, + { + "id": 2, + "name": "rule_id", + "type": "string" + }, + { + "id": 3, + "name": "strategy", + "type": "bucketeer.feature.RolloutStrategy" + } + ] + }, + { + "name": "RuleClauseAddedEvent", + "fields": [ + { + "id": 1, + "name": "feature_id", + "type": "string" + }, + { + "id": 2, + "name": "rule_id", + "type": "string" + }, + { + "id": 3, + "name": "clause", + "type": "bucketeer.feature.Clause" + } + ] + }, + { + "name": "RuleClauseDeletedEvent", + "fields": [ + { + "id": 1, + "name": "feature_id", + "type": "string" + }, + { + "id": 2, + "name": "rule_id", + "type": "string" + }, + { + "id": 3, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "ClauseAttributeChangedEvent", + "fields": [ + { + "id": 1, + "name": "feature_id", + "type": "string" + }, + { + "id": 2, + "name": "rule_id", + "type": "string" + }, + { + "id": 3, + "name": "id", + "type": "string" + }, + { + "id": 4, + "name": "attribute", + "type": "string" + } + ] + }, + { + "name": "ClauseOperatorChangedEvent", + "fields": [ + { + "id": 1, + "name": "feature_id", + "type": "string" + }, + { + "id": 2, + "name": "rule_id", + "type": "string" + }, + { + "id": 3, + "name": "id", + "type": "string" + }, + { + "id": 4, + "name": "operator", + "type": "bucketeer.feature.Clause.Operator" + } + ] + }, + { + "name": "ClauseValueAddedEvent", + "fields": [ + { + "id": 1, + "name": "feature_id", + "type": "string" + }, + { + "id": 2, + "name": "rule_id", + "type": "string" + }, + { + "id": 3, + "name": "id", + "type": "string" + }, + { + "id": 4, + "name": "value", + "type": "string" + } + ] + }, + { + "name": "ClauseValueRemovedEvent", + "fields": [ + { + "id": 1, + "name": "feature_id", + "type": "string" + }, + { + "id": 2, + "name": "rule_id", + "type": "string" + }, + { + "id": 3, + "name": "id", + "type": "string" + }, + { + "id": 4, + "name": "value", + "type": "string" + } + ] + }, + { + "name": "FeatureDefaultStrategyChangedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "strategy", + "type": "bucketeer.feature.Strategy" + } + ] + }, + { + "name": "FeatureTagAddedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "tag", + "type": "string" + } + ] + }, + { + "name": "FeatureTagRemovedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "tag", + "type": "string" + } + ] + }, + { + "name": "FeatureVersionIncrementedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "version", + "type": "int32" + } + ] + }, + { + "name": "FeatureClonedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "name", + "type": "string" + }, + { + "id": 3, + "name": "description", + "type": "string" + }, + { + "id": 4, + "name": "variations", + "type": "bucketeer.feature.Variation", + "is_repeated": true + }, + { + "id": 5, + "name": "targets", + "type": "bucketeer.feature.Target", + "is_repeated": true + }, + { + "id": 6, + "name": "rules", + "type": "bucketeer.feature.Rule", + "is_repeated": true + }, + { + "id": 7, + "name": "default_strategy", + "type": "bucketeer.feature.Strategy" + }, + { + "id": 8, + "name": "off_variation", + "type": "string" + }, + { + "id": 9, + "name": "tags", + "type": "string", + "is_repeated": true + }, + { + "id": 10, + "name": "maintainer", + "type": "string" + }, + { + "id": 11, + "name": "variation_type", + "type": "bucketeer.feature.Feature.VariationType" + } + ] + }, + { + "name": "FeatureSamplingSeedResetEvent", + "fields": [ + { + "id": 1, + "name": "sampling_seed", + "type": "string" + } + ] + }, + { + "name": "GoalCreatedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "name", + "type": "string" + }, + { + "id": 3, + "name": "description", + "type": "string" + }, + { + "id": 4, + "name": "deleted", + "type": "bool" + }, + { + "id": 5, + "name": "created_at", + "type": "int64" + }, + { + "id": 6, + "name": "updated_at", + "type": "int64" + } + ] + }, + { + "name": "GoalRenamedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "name", + "type": "string" + } + ] + }, + { + "name": "GoalDescriptionChangedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "description", + "type": "string" + } + ] + }, + { + "name": "GoalArchivedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "GoalDeletedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "ExperimentCreatedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "feature_id", + "type": "string" + }, + { + "id": 3, + "name": "feature_version", + "type": "int32" + }, + { + "id": 4, + "name": "variations", + "type": "bucketeer.feature.Variation", + "is_repeated": true + }, + { + "id": 5, + "name": "goal_id", + "type": "string", + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "id": 6, + "name": "start_at", + "type": "int64" + }, + { + "id": 7, + "name": "stop_at", + "type": "int64" + }, + { + "id": 8, + "name": "stopped", + "type": "bool" + }, + { + "id": 9, + "name": "stopped_at", + "type": "int64" + }, + { + "id": 10, + "name": "created_at", + "type": "int64" + }, + { + "id": 11, + "name": "updated_at", + "type": "int64" + }, + { + "id": 12, + "name": "goal_ids", + "type": "string", + "is_repeated": true + }, + { + "id": 13, + "name": "name", + "type": "string" + }, + { + "id": 14, + "name": "description", + "type": "string" + }, + { + "id": 15, + "name": "base_variation_id", + "type": "string" + } + ] + }, + { + "name": "ExperimentStoppedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "stopped_at", + "type": "int64" + } + ] + }, + { + "name": "ExperimentArchivedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "ExperimentDeletedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "ExperimentStartAtChangedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "start_at", + "type": "int64" + } + ] + }, + { + "name": "ExperimentStopAtChangedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "stop_at", + "type": "int64" + } + ] + }, + { + "name": "ExperimentPeriodChangedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "start_at", + "type": "int64" + }, + { + "id": 3, + "name": "stop_at", + "type": "int64" + } + ] + }, + { + "name": "ExperimentNameChangedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "name", + "type": "string" + } + ] + }, + { + "name": "ExperimentDescriptionChangedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "description", + "type": "string" + } + ] + }, + { + "name": "ExperimentStartedEvent" + }, + { + "name": "ExperimentFinishedEvent" + }, + { + "name": "AccountCreatedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "email", + "type": "string" + }, + { + "id": 3, + "name": "name", + "type": "string" + }, + { + "id": 4, + "name": "role", + "type": "bucketeer.account.Account.Role" + }, + { + "id": 5, + "name": "disabled", + "type": "bool" + }, + { + "id": 6, + "name": "created_at", + "type": "int64" + }, + { + "id": 7, + "name": "updated_at", + "type": "int64" + } + ] + }, + { + "name": "AccountRoleChangedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "role", + "type": "bucketeer.account.Account.Role" + } + ] + }, + { + "name": "AccountEnabledEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "AccountDisabledEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "AccountDeletedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "APIKeyCreatedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "name", + "type": "string" + }, + { + "id": 3, + "name": "role", + "type": "bucketeer.account.APIKey.Role" + }, + { + "id": 4, + "name": "disabled", + "type": "bool" + }, + { + "id": 5, + "name": "created_at", + "type": "int64" + }, + { + "id": 6, + "name": "updated_at", + "type": "int64" + } + ] + }, + { + "name": "APIKeyNameChangedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "name", + "type": "string" + } + ] + }, + { + "name": "APIKeyEnabledEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "APIKeyDisabledEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "SegmentCreatedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "name", + "type": "string" + }, + { + "id": 3, + "name": "description", + "type": "string" + } + ] + }, + { + "name": "SegmentDeletedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "SegmentNameChangedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "name", + "type": "string" + } + ] + }, + { + "name": "SegmentDescriptionChangedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "description", + "type": "string" + } + ] + }, + { + "name": "SegmentRuleAddedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "rule", + "type": "bucketeer.feature.Rule" + } + ] + }, + { + "name": "SegmentRuleDeletedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "rule_id", + "type": "string" + } + ] + }, + { + "name": "SegmentRuleClauseAddedEvent", + "fields": [ + { + "id": 1, + "name": "segment_id", + "type": "string" + }, + { + "id": 2, + "name": "rule_id", + "type": "string" + }, + { + "id": 3, + "name": "clause", + "type": "bucketeer.feature.Clause" + } + ] + }, + { + "name": "SegmentRuleClauseDeletedEvent", + "fields": [ + { + "id": 1, + "name": "segment_id", + "type": "string" + }, + { + "id": 2, + "name": "rule_id", + "type": "string" + }, + { + "id": 3, + "name": "clause_id", + "type": "string" + } + ] + }, + { + "name": "SegmentClauseAttributeChangedEvent", + "fields": [ + { + "id": 1, + "name": "segment_id", + "type": "string" + }, + { + "id": 2, + "name": "rule_id", + "type": "string" + }, + { + "id": 3, + "name": "clause_id", + "type": "string" + }, + { + "id": 4, + "name": "attribute", + "type": "string" + } + ] + }, + { + "name": "SegmentClauseOperatorChangedEvent", + "fields": [ + { + "id": 1, + "name": "segment_id", + "type": "string" + }, + { + "id": 2, + "name": "rule_id", + "type": "string" + }, + { + "id": 3, + "name": "clause_id", + "type": "string" + }, + { + "id": 4, + "name": "operator", + "type": "bucketeer.feature.Clause.Operator" + } + ] + }, + { + "name": "SegmentClauseValueAddedEvent", + "fields": [ + { + "id": 1, + "name": "segment_id", + "type": "string" + }, + { + "id": 2, + "name": "rule_id", + "type": "string" + }, + { + "id": 3, + "name": "clause_id", + "type": "string" + }, + { + "id": 4, + "name": "value", + "type": "string" + } + ] + }, + { + "name": "SegmentClauseValueRemovedEvent", + "fields": [ + { + "id": 1, + "name": "segment_id", + "type": "string" + }, + { + "id": 2, + "name": "rule_id", + "type": "string" + }, + { + "id": 3, + "name": "clause_id", + "type": "string" + }, + { + "id": 4, + "name": "value", + "type": "string" + } + ] + }, + { + "name": "SegmentUserAddedEvent", + "fields": [ + { + "id": 1, + "name": "segment_id", + "type": "string" + }, + { + "id": 2, + "name": "user_ids", + "type": "string", + "is_repeated": true + }, + { + "id": 3, + "name": "state", + "type": "bucketeer.feature.SegmentUser.State" + } + ] + }, + { + "name": "SegmentUserDeletedEvent", + "fields": [ + { + "id": 1, + "name": "segment_id", + "type": "string" + }, + { + "id": 2, + "name": "user_ids", + "type": "string", + "is_repeated": true + }, + { + "id": 3, + "name": "state", + "type": "bucketeer.feature.SegmentUser.State" + } + ] + }, + { + "name": "SegmentBulkUploadUsersEvent", + "fields": [ + { + "id": 1, + "name": "segment_id", + "type": "string" + }, + { + "id": 2, + "name": "status", + "type": "bucketeer.feature.Segment.Status" + }, + { + "id": 3, + "name": "state", + "type": "bucketeer.feature.SegmentUser.State" + } + ] + }, + { + "name": "SegmentBulkUploadUsersStatusChangedEvent", + "fields": [ + { + "id": 1, + "name": "segment_id", + "type": "string" + }, + { + "id": 2, + "name": "status", + "type": "bucketeer.feature.Segment.Status" + }, + { + "id": 3, + "name": "state", + "type": "bucketeer.feature.SegmentUser.State" + }, + { + "id": 4, + "name": "count", + "type": "int64" + } + ] + }, + { + "name": "EnvironmentCreatedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "namespace", + "type": "string" + }, + { + "id": 3, + "name": "name", + "type": "string" + }, + { + "id": 4, + "name": "description", + "type": "string" + }, + { + "id": 5, + "name": "deleted", + "type": "bool" + }, + { + "id": 6, + "name": "created_at", + "type": "int64" + }, + { + "id": 7, + "name": "updated_at", + "type": "int64" + }, + { + "id": 8, + "name": "project_id", + "type": "string" + } + ] + }, + { + "name": "EnvironmentRenamedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "name", + "type": "string" + } + ] + }, + { + "name": "EnvironmentDescriptionChangedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "description", + "type": "string" + } + ] + }, + { + "name": "EnvironmentDeletedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "namespace", + "type": "string" + } + ] + }, + { + "name": "AdminAccountCreatedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "email", + "type": "string" + }, + { + "id": 3, + "name": "name", + "type": "string" + }, + { + "id": 4, + "name": "role", + "type": "bucketeer.account.Account.Role" + }, + { + "id": 5, + "name": "disabled", + "type": "bool" + }, + { + "id": 6, + "name": "created_at", + "type": "int64" + }, + { + "id": 7, + "name": "updated_at", + "type": "int64" + } + ] + }, + { + "name": "AdminAccountEnabledEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "AdminAccountDisabledEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "AdminAccountDeletedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "AutoOpsRuleCreatedEvent", + "fields": [ + { + "id": 1, + "name": "feature_id", + "type": "string" + }, + { + "id": 2, + "name": "ops_type", + "type": "bucketeer.autoops.OpsType" + }, + { + "id": 3, + "name": "clauses", + "type": "bucketeer.autoops.Clause", + "is_repeated": true + }, + { + "id": 4, + "name": "triggered_at", + "type": "int64" + }, + { + "id": 5, + "name": "created_at", + "type": "int64" + }, + { + "id": 6, + "name": "updated_at", + "type": "int64" + } + ] + }, + { + "name": "AutoOpsRuleDeletedEvent" + }, + { + "name": "AutoOpsRuleOpsTypeChangedEvent", + "fields": [ + { + "id": 1, + "name": "ops_type", + "type": "bucketeer.autoops.OpsType" + } + ] + }, + { + "name": "AutoOpsRuleTriggeredAtChangedEvent" + }, + { + "name": "OpsEventRateClauseAddedEvent", + "fields": [ + { + "id": 1, + "name": "clause_id", + "type": "string" + }, + { + "id": 2, + "name": "ops_event_rate_clause", + "type": "bucketeer.autoops.OpsEventRateClause" + } + ] + }, + { + "name": "OpsEventRateClauseChangedEvent", + "fields": [ + { + "id": 1, + "name": "clause_id", + "type": "string" + }, + { + "id": 2, + "name": "ops_event_rate_clause", + "type": "bucketeer.autoops.OpsEventRateClause" + } + ] + }, + { + "name": "AutoOpsRuleClauseDeletedEvent", + "fields": [ + { + "id": 1, + "name": "clause_id", + "type": "string" + } + ] + }, + { + "name": "DatetimeClauseAddedEvent", + "fields": [ + { + "id": 1, + "name": "clause_id", + "type": "string" + }, + { + "id": 2, + "name": "datetime_clause", + "type": "bucketeer.autoops.DatetimeClause" + } + ] + }, + { + "name": "DatetimeClauseChangedEvent", + "fields": [ + { + "id": 1, + "name": "clause_id", + "type": "string" + }, + { + "id": 2, + "name": "datetime_clause", + "type": "bucketeer.autoops.DatetimeClause" + } + ] + }, + { + "name": "PushCreatedEvent", + "fields": [ + { + "id": 2, + "name": "fcm_api_key", + "type": "string" + }, + { + "id": 3, + "name": "tags", + "type": "string", + "is_repeated": true + }, + { + "id": 4, + "name": "name", + "type": "string" + } + ] + }, + { + "name": "PushDeletedEvent" + }, + { + "name": "PushTagsAddedEvent", + "fields": [ + { + "id": 2, + "name": "tags", + "type": "string", + "is_repeated": true + } + ] + }, + { + "name": "PushTagsDeletedEvent", + "fields": [ + { + "id": 2, + "name": "tags", + "type": "string", + "is_repeated": true + } + ] + }, + { + "name": "PushRenamedEvent", + "fields": [ + { + "id": 2, + "name": "name", + "type": "string" + } + ] + }, + { + "name": "SubscriptionCreatedEvent", + "fields": [ + { + "id": 1, + "name": "source_types", + "type": "bucketeer.notification.Subscription.SourceType", + "is_repeated": true + }, + { + "id": 2, + "name": "recipient", + "type": "bucketeer.notification.Recipient" + }, + { + "id": 3, + "name": "name", + "type": "string" + } + ] + }, + { + "name": "SubscriptionDeletedEvent" + }, + { + "name": "SubscriptionEnabledEvent" + }, + { + "name": "SubscriptionDisabledEvent" + }, + { + "name": "SubscriptionSourceTypesAddedEvent", + "fields": [ + { + "id": 1, + "name": "source_types", + "type": "bucketeer.notification.Subscription.SourceType", + "is_repeated": true + } + ] + }, + { + "name": "SubscriptionSourceTypesDeletedEvent", + "fields": [ + { + "id": 1, + "name": "source_types", + "type": "bucketeer.notification.Subscription.SourceType", + "is_repeated": true + } + ] + }, + { + "name": "SubscriptionRenamedEvent", + "fields": [ + { + "id": 1, + "name": "name", + "type": "string" + } + ] + }, + { + "name": "AdminSubscriptionCreatedEvent", + "fields": [ + { + "id": 1, + "name": "source_types", + "type": "bucketeer.notification.Subscription.SourceType", + "is_repeated": true + }, + { + "id": 2, + "name": "recipient", + "type": "bucketeer.notification.Recipient" + }, + { + "id": 3, + "name": "name", + "type": "string" + } + ] + }, + { + "name": "AdminSubscriptionDeletedEvent" + }, + { + "name": "AdminSubscriptionEnabledEvent" + }, + { + "name": "AdminSubscriptionDisabledEvent" + }, + { + "name": "AdminSubscriptionSourceTypesAddedEvent", + "fields": [ + { + "id": 1, + "name": "source_types", + "type": "bucketeer.notification.Subscription.SourceType", + "is_repeated": true + } + ] + }, + { + "name": "AdminSubscriptionSourceTypesDeletedEvent", + "fields": [ + { + "id": 1, + "name": "source_types", + "type": "bucketeer.notification.Subscription.SourceType", + "is_repeated": true + } + ] + }, + { + "name": "AdminSubscriptionRenamedEvent", + "fields": [ + { + "id": 1, + "name": "name", + "type": "string" + } + ] + }, + { + "name": "ProjectCreatedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "description", + "type": "string" + }, + { + "id": 3, + "name": "disabled", + "type": "bool" + }, + { + "id": 4, + "name": "trial", + "type": "bool" + }, + { + "id": 5, + "name": "creator_email", + "type": "string" + }, + { + "id": 6, + "name": "created_at", + "type": "int64" + }, + { + "id": 7, + "name": "updated_at", + "type": "int64" + } + ] + }, + { + "name": "ProjectDescriptionChangedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "description", + "type": "string" + } + ] + }, + { + "name": "ProjectEnabledEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "ProjectDisabledEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "ProjectTrialCreatedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "description", + "type": "string" + }, + { + "id": 3, + "name": "disabled", + "type": "bool" + }, + { + "id": 4, + "name": "trial", + "type": "bool" + }, + { + "id": 5, + "name": "creator_email", + "type": "string" + }, + { + "id": 6, + "name": "created_at", + "type": "int64" + }, + { + "id": 7, + "name": "updated_at", + "type": "int64" + } + ] + }, + { + "name": "ProjectTrialConvertedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "PrerequisiteAddedEvent", + "fields": [ + { + "id": 1, + "name": "prerequisite", + "type": "bucketeer.feature.Prerequisite" + } + ] + }, + { + "name": "PrerequisiteVariationChangedEvent", + "fields": [ + { + "id": 1, + "name": "prerequisite", + "type": "bucketeer.feature.Prerequisite" + } + ] + }, + { + "name": "PrerequisiteRemovedEvent", + "fields": [ + { + "id": 1, + "name": "feature_id", + "type": "string" + } + ] + }, + { + "name": "WebhookCreatedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "name", + "type": "string" + }, + { + "id": 3, + "name": "description", + "type": "string" + }, + { + "id": 4, + "name": "created_at", + "type": "int64" + }, + { + "id": 5, + "name": "updated_at", + "type": "int64" + } + ] + }, + { + "name": "WebhookDeletedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "WebhookNameChangedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "name", + "type": "string" + } + ] + }, + { + "name": "WebhookDescriptionChangedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "description", + "type": "string" + } + ] + }, + { + "name": "WebhookClauseAddedEvent", + "fields": [ + { + "id": 1, + "name": "clause_id", + "type": "string" + }, + { + "id": 2, + "name": "webhook_clause", + "type": "bucketeer.autoops.WebhookClause" + } + ] + }, + { + "name": "WebhookClauseChangedEvent", + "fields": [ + { + "id": 1, + "name": "clause_id", + "type": "string" + }, + { + "id": 2, + "name": "webhook_clause", + "type": "bucketeer.autoops.WebhookClause" + } + ] + } + ], + "imports": [ + { + "path": "google/protobuf/any.proto" + }, + { + "path": "google/protobuf/wrappers.proto" + }, + { + "path": "proto/feature/clause.proto" + }, + { + "path": "proto/feature/feature.proto" + }, + { + "path": "proto/feature/rule.proto" + }, + { + "path": "proto/feature/variation.proto" + }, + { + "path": "proto/feature/strategy.proto" + }, + { + "path": "proto/feature/segment.proto" + }, + { + "path": "proto/feature/target.proto" + }, + { + "path": "proto/account/account.proto" + }, + { + "path": "proto/account/api_key.proto" + }, + { + "path": "proto/autoops/auto_ops_rule.proto" + }, + { + "path": "proto/autoops/clause.proto" + }, + { + "path": "proto/notification/subscription.proto" + }, + { + "path": "proto/notification/recipient.proto" + }, + { + "path": "proto/feature/prerequisite.proto" + } + ], + "package": { + "name": "bucketeer.event.domain" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/event/domain" + } + ] + } + }, + { + "protopath": "event:/:domain:/:localized_message.proto", + "def": { + "messages": [ + { + "name": "LocalizedMessage", + "fields": [ + { + "id": 1, + "name": "locale", + "type": "string" + }, + { + "id": 2, + "name": "message", + "type": "string" + } + ] + } + ], + "package": { + "name": "bucketeer.event.domain" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/event/domain" + } + ] + } + }, + { + "protopath": "event:/:service:/:feature.proto", + "def": { + "messages": [ + { + "name": "EvaluationRequestEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "timestamp", + "type": "int64" + }, + { + "id": 3, + "name": "user", + "type": "bucketeer.user.User" + }, + { + "id": 5, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 6, + "name": "tag", + "type": "string" + } + ], + "reserved_ids": [ + 4 + ] + } + ], + "imports": [ + { + "path": "proto/user/user.proto" + } + ], + "package": { + "name": "bucketeer.event.service" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/event/service" + } + ] + } + }, + { + "protopath": "event:/:service:/:segment.proto", + "def": { + "messages": [ + { + "name": "BulkSegmentUsersReceivedEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 3, + "name": "segment_id", + "type": "string" + }, + { + "id": 4, + "name": "data", + "type": "bytes" + }, + { + "id": 5, + "name": "state", + "type": "bucketeer.feature.SegmentUser.State" + }, + { + "id": 6, + "name": "editor", + "type": "bucketeer.event.domain.Editor" + } + ] + } + ], + "imports": [ + { + "path": "proto/feature/segment.proto" + }, + { + "path": "proto/event/domain/event.proto" + } + ], + "package": { + "name": "bucketeer.event.service" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/event/service" + } + ] + } + }, + { + "protopath": "event:/:service:/:user.proto", + "def": { + "messages": [ + { + "name": "UserEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 4, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 5, + "name": "tag", + "type": "string" + }, + { + "id": 6, + "name": "user_id", + "type": "string" + }, + { + "id": 7, + "name": "last_seen", + "type": "int64" + }, + { + "id": 9, + "name": "source_id", + "type": "bucketeer.event.client.SourceId" + } + ], + "maps": [ + { + "key_type": "string", + "field": { + "id": 8, + "name": "data", + "type": "string" + } + } + ], + "reserved_ids": [ + 2, + 3 + ] + } + ], + "imports": [ + { + "path": "proto/event/client/event.proto" + } + ], + "package": { + "name": "bucketeer.event.service" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/event/service" + } + ] + } + }, + { + "protopath": "eventcounter:/:distribution_summary.proto", + "def": { + "messages": [ + { + "name": "DistributionSummary", + "fields": [ + { + "id": 1, + "name": "mean", + "type": "double" + }, + { + "id": 2, + "name": "sd", + "type": "double" + }, + { + "id": 3, + "name": "rhat", + "type": "double" + }, + { + "id": 4, + "name": "histogram", + "type": "Histogram" + }, + { + "id": 5, + "name": "median", + "type": "double" + }, + { + "id": 6, + "name": "percentile025", + "type": "double" + }, + { + "id": 7, + "name": "percentile975", + "type": "double" + } + ] + } + ], + "imports": [ + { + "path": "proto/eventcounter/histogram.proto" + } + ], + "package": { + "name": "bucketeer.eventcounter" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/eventcounter" + } + ] + } + }, + { + "protopath": "eventcounter:/:evaluation_count.proto", + "def": { + "messages": [ + { + "name": "EvaluationCount", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "feature_id", + "type": "string" + }, + { + "id": 3, + "name": "feature_version", + "type": "int32" + }, + { + "id": 4, + "name": "realtime_counts", + "type": "VariationCount", + "is_repeated": true + }, + { + "id": 5, + "name": "batch_counts", + "type": "VariationCount", + "is_repeated": true + }, + { + "id": 6, + "name": "updated_at", + "type": "int64" + } + ] + } + ], + "imports": [ + { + "path": "proto/eventcounter/variation_count.proto" + } + ], + "package": { + "name": "bucketeer.eventcounter" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/eventcounter" + } + ] + } + }, + { + "protopath": "eventcounter:/:experiment_count.proto", + "def": { + "messages": [ + { + "name": "ExperimentCount", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "feature_id", + "type": "string" + }, + { + "id": 3, + "name": "feature_version", + "type": "int32" + }, + { + "id": 4, + "name": "goal_id", + "type": "string", + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "id": 5, + "name": "realtime_counts", + "type": "VariationCount", + "is_repeated": true, + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "id": 6, + "name": "batch_counts", + "type": "VariationCount", + "is_repeated": true, + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "id": 7, + "name": "updated_at", + "type": "int64" + }, + { + "id": 8, + "name": "goal_counts", + "type": "GoalCounts", + "is_repeated": true + } + ] + }, + { + "name": "GoalCounts", + "fields": [ + { + "id": 1, + "name": "goal_id", + "type": "string" + }, + { + "id": 2, + "name": "realtime_counts", + "type": "VariationCount", + "is_repeated": true + }, + { + "id": 3, + "name": "batch_counts", + "type": "VariationCount", + "is_repeated": true, + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + } + ] + } + ], + "imports": [ + { + "path": "proto/eventcounter/variation_count.proto" + } + ], + "package": { + "name": "bucketeer.eventcounter" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/eventcounter" + } + ] + } + }, + { + "protopath": "eventcounter:/:experiment_result.proto", + "def": { + "messages": [ + { + "name": "ExperimentResult", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "experiment_id", + "type": "string" + }, + { + "id": 3, + "name": "updated_at", + "type": "int64" + }, + { + "id": 4, + "name": "goal_results", + "type": "GoalResult", + "is_repeated": true + } + ] + } + ], + "imports": [ + { + "path": "proto/eventcounter/goal_result.proto" + } + ], + "package": { + "name": "bucketeer.eventcounter" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/eventcounter" + } + ] + } + }, + { + "protopath": "eventcounter:/:filter.proto", + "def": { + "enums": [ + { + "name": "Filter.Operator", + "enum_fields": [ + { + "name": "EQUALS" + } + ] + } + ], + "messages": [ + { + "name": "Filter", + "fields": [ + { + "id": 1, + "name": "key", + "type": "string" + }, + { + "id": 2, + "name": "operator", + "type": "Operator" + }, + { + "id": 3, + "name": "values", + "type": "string", + "is_repeated": true + } + ] + } + ], + "package": { + "name": "bucketeer.eventcounter" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/eventcounter" + } + ] + } + }, + { + "protopath": "eventcounter:/:goal_result.proto", + "def": { + "messages": [ + { + "name": "GoalResult", + "fields": [ + { + "id": 1, + "name": "goal_id", + "type": "string" + }, + { + "id": 2, + "name": "variation_results", + "type": "VariationResult", + "is_repeated": true + } + ] + } + ], + "imports": [ + { + "path": "proto/eventcounter/variation_result.proto" + } + ], + "package": { + "name": "bucketeer.eventcounter" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/eventcounter" + } + ] + } + }, + { + "protopath": "eventcounter:/:histogram.proto", + "def": { + "messages": [ + { + "name": "Histogram", + "fields": [ + { + "id": 1, + "name": "hist", + "type": "int64", + "is_repeated": true + }, + { + "id": 2, + "name": "bins", + "type": "double", + "is_repeated": true + } + ] + } + ], + "package": { + "name": "bucketeer.eventcounter" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/eventcounter" + } + ] + } + }, + { + "protopath": "eventcounter:/:service.proto", + "def": { + "messages": [ + { + "name": "GetEvaluationCountV2Request", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "start_at", + "type": "int64" + }, + { + "id": 3, + "name": "end_at", + "type": "int64" + }, + { + "id": 4, + "name": "feature_id", + "type": "string" + }, + { + "id": 5, + "name": "feature_version", + "type": "int32" + }, + { + "id": 6, + "name": "variation_ids", + "type": "string", + "is_repeated": true + } + ] + }, + { + "name": "GetEvaluationCountV2Response", + "fields": [ + { + "id": 1, + "name": "count", + "type": "EvaluationCount" + } + ] + }, + { + "name": "GetEvaluationTimeseriesCountRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "feature_id", + "type": "string" + } + ] + }, + { + "name": "GetEvaluationTimeseriesCountResponse", + "fields": [ + { + "id": 1, + "name": "user_counts", + "type": "VariationTimeseries", + "is_repeated": true + }, + { + "id": 2, + "name": "event_counts", + "type": "VariationTimeseries", + "is_repeated": true + } + ] + }, + { + "name": "GetExperimentResultRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "experiment_id", + "type": "string" + } + ] + }, + { + "name": "GetExperimentResultResponse", + "fields": [ + { + "id": 1, + "name": "experiment_result", + "type": "ExperimentResult" + } + ] + }, + { + "name": "ListExperimentResultsRequest", + "fields": [ + { + "id": 1, + "name": "feature_id", + "type": "string" + }, + { + "id": 2, + "name": "feature_version", + "type": "google.protobuf.Int32Value" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "ListExperimentResultsResponse", + "maps": [ + { + "key_type": "string", + "field": { + "id": 1, + "name": "results", + "type": "ExperimentResult" + } + } + ] + }, + { + "name": "ListUserDataKeysRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "ListUserDataKeysResponse", + "fields": [ + { + "id": 1, + "name": "keys", + "type": "string", + "is_repeated": true + } + ] + }, + { + "name": "ListUserDataValuesRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "key", + "type": "string" + } + ] + }, + { + "name": "ListUserDataValuesResponse", + "fields": [ + { + "id": 1, + "name": "values", + "type": "string", + "is_repeated": true + } + ] + }, + { + "name": "GetGoalCountRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "feature_id", + "type": "string" + }, + { + "id": 3, + "name": "feature_version", + "type": "int32" + }, + { + "id": 4, + "name": "goal_id", + "type": "string" + }, + { + "id": 5, + "name": "start_at", + "type": "int64" + }, + { + "id": 6, + "name": "end_at", + "type": "int64" + }, + { + "id": 7, + "name": "filters", + "type": "Filter", + "is_repeated": true + }, + { + "id": 8, + "name": "segments", + "type": "string", + "is_repeated": true + }, + { + "id": 9, + "name": "reason", + "type": "string" + } + ] + }, + { + "name": "GetGoalCountResponse", + "fields": [ + { + "id": 1, + "name": "headers", + "type": "Row" + }, + { + "id": 2, + "name": "rows", + "type": "Row", + "is_repeated": true + } + ] + }, + { + "name": "GetGoalCountV2Request", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "start_at", + "type": "int64" + }, + { + "id": 3, + "name": "end_at", + "type": "int64" + }, + { + "id": 4, + "name": "goal_id", + "type": "string" + }, + { + "id": 5, + "name": "feature_id", + "type": "string" + }, + { + "id": 6, + "name": "feature_version", + "type": "int32" + }, + { + "id": 7, + "name": "variation_ids", + "type": "string", + "is_repeated": true + } + ] + }, + { + "name": "GetGoalCountV2Response", + "fields": [ + { + "id": 1, + "name": "goal_counts", + "type": "GoalCounts" + } + ] + }, + { + "name": "GetUserCountV2Request", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "start_at", + "type": "int64" + }, + { + "id": 3, + "name": "end_at", + "type": "int64" + } + ] + }, + { + "name": "GetUserCountV2Response", + "fields": [ + { + "id": 1, + "name": "event_count", + "type": "int64" + }, + { + "id": 2, + "name": "user_count", + "type": "int64" + } + ] + }, + { + "name": "ListUserMetadataRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "ListUserMetadataResponse", + "fields": [ + { + "id": 1, + "name": "data", + "type": "string", + "is_repeated": true + } + ] + } + ], + "services": [ + { + "name": "EventCounterService", + "rpcs": [ + { + "name": "GetEvaluationCountV2", + "in_type": "GetEvaluationCountV2Request", + "out_type": "GetEvaluationCountV2Response" + }, + { + "name": "GetEvaluationTimeseriesCount", + "in_type": "GetEvaluationTimeseriesCountRequest", + "out_type": "GetEvaluationTimeseriesCountResponse" + }, + { + "name": "GetExperimentResult", + "in_type": "GetExperimentResultRequest", + "out_type": "GetExperimentResultResponse" + }, + { + "name": "ListExperimentResults", + "in_type": "ListExperimentResultsRequest", + "out_type": "ListExperimentResultsResponse" + }, + { + "name": "GetGoalCount", + "in_type": "GetGoalCountRequest", + "out_type": "GetGoalCountResponse" + }, + { + "name": "GetGoalCountV2", + "in_type": "GetGoalCountV2Request", + "out_type": "GetGoalCountV2Response" + }, + { + "name": "GetUserCountV2", + "in_type": "GetUserCountV2Request", + "out_type": "GetUserCountV2Response" + }, + { + "name": "ListUserMetadata", + "in_type": "ListUserMetadataRequest", + "out_type": "ListUserMetadataResponse" + } + ] + } + ], + "imports": [ + { + "path": "google/protobuf/wrappers.proto" + }, + { + "path": "proto/eventcounter/evaluation_count.proto" + }, + { + "path": "proto/eventcounter/experiment_count.proto" + }, + { + "path": "proto/eventcounter/experiment_result.proto" + }, + { + "path": "proto/eventcounter/filter.proto" + }, + { + "path": "proto/eventcounter/table.proto" + }, + { + "path": "proto/eventcounter/timeseries.proto" + } + ], + "package": { + "name": "bucketeer.eventcounter" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/eventcounter" + } + ] + } + }, + { + "protopath": "eventcounter:/:table.proto", + "def": { + "enums": [ + { + "name": "Cell.Type", + "enum_fields": [ + { + "name": "STRING" + }, + { + "name": "DOUBLE", + "integer": 2 + } + ] + } + ], + "messages": [ + { + "name": "Row", + "fields": [ + { + "id": 1, + "name": "cells", + "type": "Cell", + "is_repeated": true + } + ] + }, + { + "name": "Cell", + "fields": [ + { + "id": 1, + "name": "type", + "type": "Type" + }, + { + "id": 2, + "name": "value", + "type": "string" + }, + { + "id": 4, + "name": "valueDouble", + "type": "double" + } + ] + } + ], + "package": { + "name": "bucketeer.eventcounter" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/eventcounter" + } + ] + } + }, + { + "protopath": "eventcounter:/:timeseries.proto", + "def": { + "messages": [ + { + "name": "VariationTimeseries", + "fields": [ + { + "id": 1, + "name": "variation_id", + "type": "string" + }, + { + "id": 2, + "name": "timeseries", + "type": "Timeseries" + } + ] + }, + { + "name": "Timeseries", + "fields": [ + { + "id": 1, + "name": "timestamps", + "type": "int64", + "is_repeated": true + }, + { + "id": 2, + "name": "values", + "type": "double", + "is_repeated": true + } + ] + } + ], + "package": { + "name": "bucketeer.eventcounter" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/eventcounter" + } + ] + } + }, + { + "protopath": "eventcounter:/:variation_count.proto", + "def": { + "messages": [ + { + "name": "VariationCount", + "fields": [ + { + "id": 1, + "name": "variation_id", + "type": "string" + }, + { + "id": 2, + "name": "user_count", + "type": "int64" + }, + { + "id": 3, + "name": "event_count", + "type": "int64" + }, + { + "id": 4, + "name": "value_sum", + "type": "double" + }, + { + "id": 5, + "name": "created_at", + "type": "int64" + }, + { + "id": 6, + "name": "variation_value", + "type": "string" + }, + { + "id": 7, + "name": "value_sum_per_user_mean", + "type": "double" + }, + { + "id": 8, + "name": "value_sum_per_user_variance", + "type": "double" + } + ] + } + ], + "package": { + "name": "bucketeer.eventcounter" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/eventcounter" + } + ] + } + }, + { + "protopath": "eventcounter:/:variation_result.proto", + "def": { + "messages": [ + { + "name": "VariationResult", + "fields": [ + { + "id": 1, + "name": "variation_id", + "type": "string" + }, + { + "id": 2, + "name": "experiment_count", + "type": "VariationCount" + }, + { + "id": 3, + "name": "evaluation_count", + "type": "VariationCount" + }, + { + "id": 4, + "name": "cvr_prob_best", + "type": "DistributionSummary" + }, + { + "id": 5, + "name": "cvr_prob_beat_baseline", + "type": "DistributionSummary" + }, + { + "id": 6, + "name": "cvr_prob", + "type": "DistributionSummary" + }, + { + "id": 7, + "name": "evaluation_user_count_timeseries", + "type": "Timeseries" + }, + { + "id": 8, + "name": "evaluation_event_count_timeseries", + "type": "Timeseries" + }, + { + "id": 9, + "name": "goal_user_count_timeseries", + "type": "Timeseries" + }, + { + "id": 10, + "name": "goal_event_count_timeseries", + "type": "Timeseries" + }, + { + "id": 11, + "name": "goal_value_sum_timeseries", + "type": "Timeseries" + }, + { + "id": 12, + "name": "cvr_median_timeseries", + "type": "Timeseries" + }, + { + "id": 13, + "name": "cvr_percentile025_timeseries", + "type": "Timeseries" + }, + { + "id": 14, + "name": "cvr_percentile975_timeseries", + "type": "Timeseries" + }, + { + "id": 15, + "name": "cvr_timeseries", + "type": "Timeseries" + }, + { + "id": 16, + "name": "goal_value_sum_per_user_timeseries", + "type": "Timeseries" + }, + { + "id": 17, + "name": "goal_value_sum_per_user_prob", + "type": "DistributionSummary" + }, + { + "id": 18, + "name": "goal_value_sum_per_user_prob_best", + "type": "DistributionSummary" + }, + { + "id": 19, + "name": "goal_value_sum_per_user_prob_beat_baseline", + "type": "DistributionSummary" + }, + { + "id": 20, + "name": "goal_value_sum_per_user_median_timeseries", + "type": "Timeseries" + }, + { + "id": 21, + "name": "goal_value_sum_per_user_percentile025_timeseries", + "type": "Timeseries" + }, + { + "id": 22, + "name": "goal_value_sum_per_user_percentile975_timeseries", + "type": "Timeseries" + } + ] + } + ], + "imports": [ + { + "path": "proto/eventcounter/variation_count.proto" + }, + { + "path": "proto/eventcounter/distribution_summary.proto" + }, + { + "path": "proto/eventcounter/timeseries.proto" + } + ], + "package": { + "name": "bucketeer.eventcounter" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/eventcounter" + } + ] + } + }, + { + "protopath": "experiment:/:command.proto", + "def": { + "messages": [ + { + "name": "CreateGoalCommand", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "name", + "type": "string" + }, + { + "id": 3, + "name": "description", + "type": "string" + } + ] + }, + { + "name": "RenameGoalCommand", + "fields": [ + { + "id": 1, + "name": "name", + "type": "string" + } + ] + }, + { + "name": "ChangeDescriptionGoalCommand", + "fields": [ + { + "id": 1, + "name": "description", + "type": "string" + } + ] + }, + { + "name": "ArchiveGoalCommand" + }, + { + "name": "DeleteGoalCommand" + }, + { + "name": "CreateExperimentCommand", + "fields": [ + { + "id": 1, + "name": "feature_id", + "type": "string" + }, + { + "id": 3, + "name": "start_at", + "type": "int64" + }, + { + "id": 4, + "name": "stop_at", + "type": "int64" + }, + { + "id": 5, + "name": "goal_ids", + "type": "string", + "is_repeated": true + }, + { + "id": 6, + "name": "name", + "type": "string" + }, + { + "id": 7, + "name": "description", + "type": "string" + }, + { + "id": 8, + "name": "base_variation_id", + "type": "string" + } + ], + "reserved_ids": [ + 2 + ] + }, + { + "name": "ChangeExperimentPeriodCommand", + "fields": [ + { + "id": 1, + "name": "start_at", + "type": "int64" + }, + { + "id": 2, + "name": "stop_at", + "type": "int64" + } + ] + }, + { + "name": "ChangeExperimentNameCommand", + "fields": [ + { + "id": 1, + "name": "name", + "type": "string" + } + ] + }, + { + "name": "ChangeExperimentDescriptionCommand", + "fields": [ + { + "id": 1, + "name": "description", + "type": "string" + } + ] + }, + { + "name": "StopExperimentCommand" + }, + { + "name": "ArchiveExperimentCommand" + }, + { + "name": "DeleteExperimentCommand" + }, + { + "name": "StartExperimentCommand" + }, + { + "name": "FinishExperimentCommand" + } + ], + "package": { + "name": "bucketeer.experiment" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/experiment" + } + ] + } + }, + { + "protopath": "experiment:/:experiment.proto", + "def": { + "enums": [ + { + "name": "Experiment.Status", + "enum_fields": [ + { + "name": "WAITING" + }, + { + "name": "RUNNING", + "integer": 1 + }, + { + "name": "STOPPED", + "integer": 2 + }, + { + "name": "FORCE_STOPPED", + "integer": 3 + } + ] + } + ], + "messages": [ + { + "name": "Experiment", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "goal_id", + "type": "string", + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "id": 3, + "name": "feature_id", + "type": "string" + }, + { + "id": 4, + "name": "feature_version", + "type": "int32" + }, + { + "id": 5, + "name": "variations", + "type": "bucketeer.feature.Variation", + "is_repeated": true + }, + { + "id": 6, + "name": "start_at", + "type": "int64" + }, + { + "id": 7, + "name": "stop_at", + "type": "int64" + }, + { + "id": 8, + "name": "stopped", + "type": "bool", + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "id": 9, + "name": "stopped_at", + "type": "int64", + "options": [ + { + "name": "jstype", + "value": "JS_STRING" + } + ] + }, + { + "id": 10, + "name": "created_at", + "type": "int64" + }, + { + "id": 11, + "name": "updated_at", + "type": "int64" + }, + { + "id": 12, + "name": "deleted", + "type": "bool" + }, + { + "id": 13, + "name": "goal_ids", + "type": "string", + "is_repeated": true + }, + { + "id": 14, + "name": "name", + "type": "string" + }, + { + "id": 15, + "name": "description", + "type": "string" + }, + { + "id": 16, + "name": "base_variation_id", + "type": "string" + }, + { + "id": 18, + "name": "status", + "type": "Status" + }, + { + "id": 19, + "name": "maintainer", + "type": "string" + }, + { + "id": 20, + "name": "archived", + "type": "bool" + } + ], + "reserved_ids": [ + 17 + ] + }, + { + "name": "Experiments", + "fields": [ + { + "id": 1, + "name": "experiments", + "type": "Experiment", + "is_repeated": true + } + ] + } + ], + "imports": [ + { + "path": "proto/feature/variation.proto" + } + ], + "package": { + "name": "bucketeer.experiment" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/experiment" + } + ] + } + }, + { + "protopath": "experiment:/:goal.proto", + "def": { + "messages": [ + { + "name": "Goal", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "name", + "type": "string" + }, + { + "id": 3, + "name": "description", + "type": "string" + }, + { + "id": 4, + "name": "deleted", + "type": "bool" + }, + { + "id": 5, + "name": "created_at", + "type": "int64" + }, + { + "id": 6, + "name": "updated_at", + "type": "int64" + }, + { + "id": 7, + "name": "is_in_use_status", + "type": "bool" + }, + { + "id": 8, + "name": "archived", + "type": "bool" + } + ] + } + ], + "package": { + "name": "bucketeer.experiment" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/experiment" + } + ] + } + }, + { + "protopath": "experiment:/:service.proto", + "def": { + "enums": [ + { + "name": "ListGoalsRequest.OrderBy", + "enum_fields": [ + { + "name": "DEFAULT" + }, + { + "name": "NAME", + "integer": 1 + }, + { + "name": "CREATED_AT", + "integer": 2 + }, + { + "name": "UPDATED_AT", + "integer": 3 + } + ] + }, + { + "name": "ListGoalsRequest.OrderDirection", + "enum_fields": [ + { + "name": "ASC" + }, + { + "name": "DESC", + "integer": 1 + } + ] + }, + { + "name": "ListExperimentsRequest.OrderBy", + "enum_fields": [ + { + "name": "DEFAULT" + }, + { + "name": "NAME", + "integer": 1 + }, + { + "name": "CREATED_AT", + "integer": 2 + }, + { + "name": "UPDATED_AT", + "integer": 3 + } + ] + }, + { + "name": "ListExperimentsRequest.OrderDirection", + "enum_fields": [ + { + "name": "ASC" + }, + { + "name": "DESC", + "integer": 1 + } + ] + } + ], + "messages": [ + { + "name": "GetGoalRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "GetGoalResponse", + "fields": [ + { + "id": 1, + "name": "goal", + "type": "Goal" + } + ] + }, + { + "name": "ListGoalsRequest", + "fields": [ + { + "id": 1, + "name": "page_size", + "type": "int64" + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 4, + "name": "order_by", + "type": "OrderBy" + }, + { + "id": 5, + "name": "order_direction", + "type": "OrderDirection" + }, + { + "id": 6, + "name": "search_keyword", + "type": "string" + }, + { + "id": 7, + "name": "is_in_use_status", + "type": "google.protobuf.BoolValue" + }, + { + "id": 8, + "name": "archived", + "type": "google.protobuf.BoolValue" + } + ] + }, + { + "name": "ListGoalsResponse", + "fields": [ + { + "id": 1, + "name": "goals", + "type": "Goal", + "is_repeated": true + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "total_count", + "type": "int64" + } + ] + }, + { + "name": "CreateGoalRequest", + "fields": [ + { + "id": 1, + "name": "command", + "type": "CreateGoalCommand" + }, + { + "id": 2, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "CreateGoalResponse" + }, + { + "name": "ArchiveGoalRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "ArchiveGoalCommand" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "ArchiveGoalResponse" + }, + { + "name": "DeleteGoalRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "DeleteGoalCommand" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "DeleteGoalResponse" + }, + { + "name": "UpdateGoalRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "rename_command", + "type": "RenameGoalCommand" + }, + { + "id": 3, + "name": "change_description_command", + "type": "ChangeDescriptionGoalCommand" + }, + { + "id": 4, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "UpdateGoalResponse" + }, + { + "name": "GetExperimentRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "GetExperimentResponse", + "fields": [ + { + "id": 1, + "name": "experiment", + "type": "Experiment" + } + ] + }, + { + "name": "ListExperimentsRequest", + "fields": [ + { + "id": 1, + "name": "feature_id", + "type": "string" + }, + { + "id": 2, + "name": "feature_version", + "type": "google.protobuf.Int32Value" + }, + { + "id": 3, + "name": "from", + "type": "int64" + }, + { + "id": 4, + "name": "to", + "type": "int64" + }, + { + "id": 5, + "name": "page_size", + "type": "int64" + }, + { + "id": 6, + "name": "cursor", + "type": "string" + }, + { + "id": 7, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 8, + "name": "status", + "type": "google.protobuf.Int32Value" + }, + { + "id": 9, + "name": "maintainer", + "type": "string" + }, + { + "id": 10, + "name": "order_by", + "type": "OrderBy" + }, + { + "id": 11, + "name": "order_direction", + "type": "OrderDirection" + }, + { + "id": 12, + "name": "search_keyword", + "type": "string" + }, + { + "id": 13, + "name": "archived", + "type": "google.protobuf.BoolValue" + }, + { + "id": 14, + "name": "statuses", + "type": "Experiment.Status", + "is_repeated": true + } + ] + }, + { + "name": "ListExperimentsResponse", + "fields": [ + { + "id": 1, + "name": "experiments", + "type": "Experiment", + "is_repeated": true + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "total_count", + "type": "int64" + } + ] + }, + { + "name": "CreateExperimentRequest", + "fields": [ + { + "id": 1, + "name": "command", + "type": "CreateExperimentCommand" + }, + { + "id": 2, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "CreateExperimentResponse", + "fields": [ + { + "id": 1, + "name": "experiment", + "type": "Experiment" + } + ] + }, + { + "name": "UpdateExperimentRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 4, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 5, + "name": "change_experiment_period_command", + "type": "ChangeExperimentPeriodCommand" + }, + { + "id": 6, + "name": "change_name_command", + "type": "ChangeExperimentNameCommand" + }, + { + "id": 7, + "name": "change_description_command", + "type": "ChangeExperimentDescriptionCommand" + } + ], + "reserved_ids": [ + 2, + 3 + ] + }, + { + "name": "UpdateExperimentResponse" + }, + { + "name": "StartExperimentRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "id", + "type": "string" + }, + { + "id": 3, + "name": "command", + "type": "StartExperimentCommand" + } + ] + }, + { + "name": "StartExperimentResponse" + }, + { + "name": "FinishExperimentRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "id", + "type": "string" + }, + { + "id": 3, + "name": "command", + "type": "FinishExperimentCommand" + } + ] + }, + { + "name": "FinishExperimentResponse" + }, + { + "name": "StopExperimentRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "StopExperimentCommand" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "StopExperimentResponse" + }, + { + "name": "ArchiveExperimentRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "ArchiveExperimentCommand" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "ArchiveExperimentResponse" + }, + { + "name": "DeleteExperimentRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "DeleteExperimentCommand" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "DeleteExperimentResponse" + } + ], + "services": [ + { + "name": "ExperimentService", + "rpcs": [ + { + "name": "GetGoal", + "in_type": "GetGoalRequest", + "out_type": "GetGoalResponse" + }, + { + "name": "ListGoals", + "in_type": "ListGoalsRequest", + "out_type": "ListGoalsResponse" + }, + { + "name": "CreateGoal", + "in_type": "CreateGoalRequest", + "out_type": "CreateGoalResponse" + }, + { + "name": "UpdateGoal", + "in_type": "UpdateGoalRequest", + "out_type": "UpdateGoalResponse" + }, + { + "name": "ArchiveGoal", + "in_type": "ArchiveGoalRequest", + "out_type": "ArchiveGoalResponse" + }, + { + "name": "DeleteGoal", + "in_type": "DeleteGoalRequest", + "out_type": "DeleteGoalResponse" + }, + { + "name": "GetExperiment", + "in_type": "GetExperimentRequest", + "out_type": "GetExperimentResponse" + }, + { + "name": "ListExperiments", + "in_type": "ListExperimentsRequest", + "out_type": "ListExperimentsResponse" + }, + { + "name": "CreateExperiment", + "in_type": "CreateExperimentRequest", + "out_type": "CreateExperimentResponse" + }, + { + "name": "UpdateExperiment", + "in_type": "UpdateExperimentRequest", + "out_type": "UpdateExperimentResponse" + }, + { + "name": "StartExperiment", + "in_type": "StartExperimentRequest", + "out_type": "StartExperimentResponse" + }, + { + "name": "FinishExperiment", + "in_type": "FinishExperimentRequest", + "out_type": "FinishExperimentResponse" + }, + { + "name": "StopExperiment", + "in_type": "StopExperimentRequest", + "out_type": "StopExperimentResponse" + }, + { + "name": "ArchiveExperiment", + "in_type": "ArchiveExperimentRequest", + "out_type": "ArchiveExperimentResponse" + }, + { + "name": "DeleteExperiment", + "in_type": "DeleteExperimentRequest", + "out_type": "DeleteExperimentResponse" + } + ] + } + ], + "imports": [ + { + "path": "google/protobuf/wrappers.proto" + }, + { + "path": "proto/experiment/command.proto" + }, + { + "path": "proto/experiment/goal.proto" + }, + { + "path": "proto/experiment/experiment.proto" + } + ], + "package": { + "name": "bucketeer.experiment" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/experiment" + } + ] + } + }, + { + "protopath": "feature:/:clause.proto", + "def": { + "enums": [ + { + "name": "Clause.Operator", + "enum_fields": [ + { + "name": "EQUALS" + }, + { + "name": "IN", + "integer": 1 + }, + { + "name": "ENDS_WITH", + "integer": 2 + }, + { + "name": "STARTS_WITH", + "integer": 3 + }, + { + "name": "SEGMENT", + "integer": 4 + }, + { + "name": "GREATER", + "integer": 5 + }, + { + "name": "GREATER_OR_EQUAL", + "integer": 6 + }, + { + "name": "LESS", + "integer": 7 + }, + { + "name": "LESS_OR_EQUAL", + "integer": 8 + }, + { + "name": "BEFORE", + "integer": 9 + }, + { + "name": "AFTER", + "integer": 10 + } + ] + } + ], + "messages": [ + { + "name": "Clause", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "attribute", + "type": "string" + }, + { + "id": 3, + "name": "operator", + "type": "Operator" + }, + { + "id": 4, + "name": "values", + "type": "string", + "is_repeated": true + } + ] + } + ], + "package": { + "name": "bucketeer.feature" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/feature" + } + ] + } + }, + { + "protopath": "feature:/:command.proto", + "def": { + "messages": [ + { + "name": "Command", + "fields": [ + { + "id": 1, + "name": "command", + "type": "google.protobuf.Any" + } + ] + }, + { + "name": "CreateFeatureCommand", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "name", + "type": "string" + }, + { + "id": 3, + "name": "description", + "type": "string" + }, + { + "id": 4, + "name": "variations", + "type": "Variation", + "is_repeated": true + }, + { + "id": 5, + "name": "tags", + "type": "string", + "is_repeated": true + }, + { + "id": 6, + "name": "default_on_variation_index", + "type": "google.protobuf.Int32Value" + }, + { + "id": 7, + "name": "default_off_variation_index", + "type": "google.protobuf.Int32Value" + }, + { + "id": 8, + "name": "variation_type", + "type": "Feature.VariationType" + } + ] + }, + { + "name": "ArchiveFeatureCommand" + }, + { + "name": "UnarchiveFeatureCommand" + }, + { + "name": "DeleteFeatureCommand" + }, + { + "name": "RenameFeatureCommand", + "fields": [ + { + "id": 1, + "name": "name", + "type": "string" + } + ] + }, + { + "name": "ChangeDescriptionCommand", + "fields": [ + { + "id": 1, + "name": "description", + "type": "string" + } + ] + }, + { + "name": "ChangeBulkUploadSegmentUsersStatusCommand", + "fields": [ + { + "id": 1, + "name": "status", + "type": "Segment.Status" + }, + { + "id": 2, + "name": "state", + "type": "SegmentUser.State" + }, + { + "id": 3, + "name": "count", + "type": "int64" + } + ] + }, + { + "name": "AddTagCommand", + "fields": [ + { + "id": 1, + "name": "tag", + "type": "string" + } + ] + }, + { + "name": "RemoveTagCommand", + "fields": [ + { + "id": 1, + "name": "tag", + "type": "string" + } + ] + }, + { + "name": "EnableFeatureCommand" + }, + { + "name": "DisableFeatureCommand" + }, + { + "name": "AddVariationCommand", + "fields": [ + { + "id": 1, + "name": "value", + "type": "string" + }, + { + "id": 2, + "name": "name", + "type": "string" + }, + { + "id": 3, + "name": "description", + "type": "string" + } + ] + }, + { + "name": "RemoveVariationCommand", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "ChangeVariationValueCommand", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "value", + "type": "string" + } + ] + }, + { + "name": "ChangeVariationNameCommand", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "name", + "type": "string" + } + ] + }, + { + "name": "ChangeVariationDescriptionCommand", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "description", + "type": "string" + } + ] + }, + { + "name": "ChangeOffVariationCommand", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "AddUserToVariationCommand", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "user", + "type": "string" + } + ] + }, + { + "name": "RemoveUserFromVariationCommand", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "user", + "type": "string" + } + ] + }, + { + "name": "ChangeDefaultStrategyCommand", + "fields": [ + { + "id": 1, + "name": "strategy", + "type": "Strategy" + } + ] + }, + { + "name": "AddRuleCommand", + "fields": [ + { + "id": 1, + "name": "rule", + "type": "Rule" + } + ] + }, + { + "name": "ChangeRuleStrategyCommand", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "rule_id", + "type": "string" + }, + { + "id": 3, + "name": "strategy", + "type": "Strategy" + } + ] + }, + { + "name": "DeleteRuleCommand", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "AddClauseCommand", + "fields": [ + { + "id": 1, + "name": "rule_id", + "type": "string" + }, + { + "id": 2, + "name": "clause", + "type": "Clause" + } + ] + }, + { + "name": "DeleteClauseCommand", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "rule_id", + "type": "string" + } + ] + }, + { + "name": "ChangeClauseAttributeCommand", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "rule_id", + "type": "string" + }, + { + "id": 3, + "name": "attribute", + "type": "string" + } + ] + }, + { + "name": "ChangeClauseOperatorCommand", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "rule_id", + "type": "string" + }, + { + "id": 3, + "name": "operator", + "type": "Clause.Operator" + } + ] + }, + { + "name": "AddClauseValueCommand", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "rule_id", + "type": "string" + }, + { + "id": 3, + "name": "value", + "type": "string" + } + ] + }, + { + "name": "RemoveClauseValueCommand", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "rule_id", + "type": "string" + }, + { + "id": 3, + "name": "value", + "type": "string" + } + ] + }, + { + "name": "ChangeFixedStrategyCommand", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "rule_id", + "type": "string" + }, + { + "id": 3, + "name": "strategy", + "type": "FixedStrategy" + } + ] + }, + { + "name": "ChangeRolloutStrategyCommand", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "rule_id", + "type": "string" + }, + { + "id": 3, + "name": "strategy", + "type": "RolloutStrategy" + } + ] + }, + { + "name": "CreateSegmentCommand", + "fields": [ + { + "id": 1, + "name": "name", + "type": "string" + }, + { + "id": 2, + "name": "description", + "type": "string" + } + ] + }, + { + "name": "DeleteSegmentCommand" + }, + { + "name": "ChangeSegmentNameCommand", + "fields": [ + { + "id": 1, + "name": "name", + "type": "string" + } + ] + }, + { + "name": "ChangeSegmentDescriptionCommand", + "fields": [ + { + "id": 1, + "name": "description", + "type": "string" + } + ] + }, + { + "name": "AddSegmentUserCommand", + "fields": [ + { + "id": 1, + "name": "user_ids", + "type": "string", + "is_repeated": true + }, + { + "id": 2, + "name": "state", + "type": "bucketeer.feature.SegmentUser.State" + } + ] + }, + { + "name": "DeleteSegmentUserCommand", + "fields": [ + { + "id": 1, + "name": "user_ids", + "type": "string", + "is_repeated": true + }, + { + "id": 2, + "name": "state", + "type": "bucketeer.feature.SegmentUser.State" + } + ] + }, + { + "name": "BulkUploadSegmentUsersCommand", + "fields": [ + { + "id": 1, + "name": "data", + "type": "bytes" + }, + { + "id": 2, + "name": "state", + "type": "SegmentUser.State" + } + ] + }, + { + "name": "IncrementFeatureVersionCommand" + }, + { + "name": "CloneFeatureCommand", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "ResetSamplingSeedCommand" + }, + { + "name": "AddPrerequisiteCommand", + "fields": [ + { + "id": 1, + "name": "prerequisite", + "type": "Prerequisite" + } + ] + }, + { + "name": "RemovePrerequisiteCommand", + "fields": [ + { + "id": 1, + "name": "feature_id", + "type": "string" + } + ] + }, + { + "name": "ChangePrerequisiteVariationCommand", + "fields": [ + { + "id": 1, + "name": "prerequisite", + "type": "Prerequisite" + } + ] + } + ], + "imports": [ + { + "path": "google/protobuf/any.proto" + }, + { + "path": "google/protobuf/wrappers.proto" + }, + { + "path": "proto/feature/clause.proto" + }, + { + "path": "proto/feature/feature.proto" + }, + { + "path": "proto/feature/rule.proto" + }, + { + "path": "proto/feature/variation.proto" + }, + { + "path": "proto/feature/strategy.proto" + }, + { + "path": "proto/feature/segment.proto" + }, + { + "path": "proto/feature/prerequisite.proto" + } + ], + "package": { + "name": "bucketeer.feature" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/feature" + } + ] + } + }, + { + "protopath": "feature:/:evaluation.proto", + "def": { + "enums": [ + { + "name": "UserEvaluations.State", + "enum_fields": [ + { + "name": "QUEUED" + }, + { + "name": "PARTIAL", + "integer": 1 + }, + { + "name": "FULL", + "integer": 2 + } + ] + } + ], + "messages": [ + { + "name": "Evaluation", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "feature_id", + "type": "string" + }, + { + "id": 3, + "name": "feature_version", + "type": "int32" + }, + { + "id": 4, + "name": "user_id", + "type": "string" + }, + { + "id": 5, + "name": "variation_id", + "type": "string" + }, + { + "id": 6, + "name": "variation", + "type": "Variation", + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "id": 7, + "name": "reason", + "type": "Reason" + }, + { + "id": 8, + "name": "variation_value", + "type": "string" + } + ] + }, + { + "name": "UserEvaluations", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "evaluations", + "type": "Evaluation", + "is_repeated": true + }, + { + "id": 3, + "name": "created_at", + "type": "int64" + } + ] + } + ], + "imports": [ + { + "path": "proto/feature/variation.proto" + }, + { + "path": "proto/feature/reason.proto" + } + ], + "package": { + "name": "bucketeer.feature" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/feature" + } + ] + } + }, + { + "protopath": "feature:/:feature.proto", + "def": { + "enums": [ + { + "name": "Feature.VariationType", + "enum_fields": [ + { + "name": "STRING" + }, + { + "name": "BOOLEAN", + "integer": 1 + }, + { + "name": "NUMBER", + "integer": 2 + }, + { + "name": "JSON", + "integer": 3 + } + ] + } + ], + "messages": [ + { + "name": "Feature", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "name", + "type": "string" + }, + { + "id": 3, + "name": "description", + "type": "string" + }, + { + "id": 4, + "name": "enabled", + "type": "bool" + }, + { + "id": 5, + "name": "deleted", + "type": "bool" + }, + { + "id": 6, + "name": "evaluation_undelayable", + "type": "bool", + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "id": 7, + "name": "ttl", + "type": "int32" + }, + { + "id": 8, + "name": "version", + "type": "int32" + }, + { + "id": 9, + "name": "created_at", + "type": "int64" + }, + { + "id": 10, + "name": "updated_at", + "type": "int64" + }, + { + "id": 11, + "name": "variations", + "type": "Variation", + "is_repeated": true + }, + { + "id": 12, + "name": "targets", + "type": "Target", + "is_repeated": true + }, + { + "id": 13, + "name": "rules", + "type": "Rule", + "is_repeated": true + }, + { + "id": 14, + "name": "default_strategy", + "type": "Strategy" + }, + { + "id": 15, + "name": "off_variation", + "type": "string" + }, + { + "id": 16, + "name": "tags", + "type": "string", + "is_repeated": true + }, + { + "id": 17, + "name": "last_used_info", + "type": "FeatureLastUsedInfo" + }, + { + "id": 18, + "name": "maintainer", + "type": "string" + }, + { + "id": 19, + "name": "variation_type", + "type": "VariationType" + }, + { + "id": 20, + "name": "archived", + "type": "bool" + }, + { + "id": 21, + "name": "prerequisites", + "type": "Prerequisite", + "is_repeated": true + }, + { + "id": 22, + "name": "sampling_seed", + "type": "string" + } + ] + }, + { + "name": "Features", + "fields": [ + { + "id": 1, + "name": "features", + "type": "Feature", + "is_repeated": true + } + ] + }, + { + "name": "Tag", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "created_at", + "type": "int64" + }, + { + "id": 3, + "name": "updated_at", + "type": "int64" + } + ] + } + ], + "imports": [ + { + "path": "proto/feature/rule.proto" + }, + { + "path": "proto/feature/target.proto" + }, + { + "path": "proto/feature/variation.proto" + }, + { + "path": "proto/feature/strategy.proto" + }, + { + "path": "proto/feature/feature_last_used_info.proto" + }, + { + "path": "proto/feature/prerequisite.proto" + } + ], + "package": { + "name": "bucketeer.feature" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/feature" + } + ] + } + }, + { + "protopath": "feature:/:feature_last_used_info.proto", + "def": { + "messages": [ + { + "name": "FeatureLastUsedInfo", + "fields": [ + { + "id": 1, + "name": "feature_id", + "type": "string" + }, + { + "id": 2, + "name": "version", + "type": "int32" + }, + { + "id": 3, + "name": "last_used_at", + "type": "int64" + }, + { + "id": 4, + "name": "created_at", + "type": "int64" + }, + { + "id": 5, + "name": "client_oldest_version", + "type": "string" + }, + { + "id": 6, + "name": "client_latest_version", + "type": "string" + } + ] + } + ], + "package": { + "name": "bucketeer.feature" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/feature" + } + ] + } + }, + { + "protopath": "feature:/:prerequisite.proto", + "def": { + "messages": [ + { + "name": "Prerequisite", + "fields": [ + { + "id": 1, + "name": "feature_id", + "type": "string" + }, + { + "id": 2, + "name": "variation_id", + "type": "string" + } + ] + } + ], + "package": { + "name": "bucketeer.feature" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/feature" + } + ] + } + }, + { + "protopath": "feature:/:reason.proto", + "def": { + "enums": [ + { + "name": "Reason.Type", + "enum_fields": [ + { + "name": "TARGET" + }, + { + "name": "RULE", + "integer": 1 + }, + { + "name": "DEFAULT", + "integer": 3 + }, + { + "name": "CLIENT", + "integer": 4 + }, + { + "name": "OFF_VARIATION", + "integer": 5 + }, + { + "name": "PREREQUISITE", + "integer": 6 + } + ] + } + ], + "messages": [ + { + "name": "Reason", + "fields": [ + { + "id": 1, + "name": "type", + "type": "Type" + }, + { + "id": 2, + "name": "rule_id", + "type": "string" + } + ] + } + ], + "package": { + "name": "bucketeer.feature" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/feature" + } + ] + } + }, + { + "protopath": "feature:/:rule.proto", + "def": { + "messages": [ + { + "name": "Rule", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "strategy", + "type": "Strategy" + }, + { + "id": 3, + "name": "clauses", + "type": "Clause", + "is_repeated": true + } + ] + } + ], + "imports": [ + { + "path": "proto/feature/clause.proto" + }, + { + "path": "proto/feature/strategy.proto" + } + ], + "package": { + "name": "bucketeer.feature" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/feature" + } + ] + } + }, + { + "protopath": "feature:/:segment.proto", + "def": { + "enums": [ + { + "name": "Segment.Status", + "enum_fields": [ + { + "name": "INITIAL" + }, + { + "name": "UPLOADING", + "integer": 1 + }, + { + "name": "SUCEEDED", + "integer": 2 + }, + { + "name": "FAILED", + "integer": 3 + } + ] + }, + { + "name": "SegmentUser.State", + "enum_fields": [ + { + "name": "INCLUDED" + }, + { + "name": "EXCLUDED", + "integer": 1, + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + } + ] + } + ], + "messages": [ + { + "name": "Segment", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "name", + "type": "string" + }, + { + "id": 3, + "name": "description", + "type": "string" + }, + { + "id": 4, + "name": "rules", + "type": "Rule", + "is_repeated": true + }, + { + "id": 5, + "name": "created_at", + "type": "int64" + }, + { + "id": 6, + "name": "updated_at", + "type": "int64" + }, + { + "id": 7, + "name": "version", + "type": "int64", + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "id": 8, + "name": "deleted", + "type": "bool" + }, + { + "id": 9, + "name": "included_user_count", + "type": "int64" + }, + { + "id": 10, + "name": "excluded_user_count", + "type": "int64", + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "id": 11, + "name": "status", + "type": "Status" + }, + { + "id": 12, + "name": "is_in_use_status", + "type": "bool" + } + ] + }, + { + "name": "SegmentUser", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "segment_id", + "type": "string" + }, + { + "id": 3, + "name": "user_id", + "type": "string" + }, + { + "id": 4, + "name": "state", + "type": "State" + }, + { + "id": 5, + "name": "deleted", + "type": "bool" + } + ] + }, + { + "name": "SegmentUsers", + "fields": [ + { + "id": 1, + "name": "segment_id", + "type": "string" + }, + { + "id": 2, + "name": "users", + "type": "SegmentUser", + "is_repeated": true + } + ] + } + ], + "imports": [ + { + "path": "proto/feature/rule.proto" + } + ], + "package": { + "name": "bucketeer.feature" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/feature" + } + ] + } + }, + { + "protopath": "feature:/:service.proto", + "def": { + "enums": [ + { + "name": "ListFeaturesRequest.OrderBy", + "enum_fields": [ + { + "name": "DEFAULT" + }, + { + "name": "NAME", + "integer": 1 + }, + { + "name": "CREATED_AT", + "integer": 2 + }, + { + "name": "UPDATED_AT", + "integer": 3 + }, + { + "name": "TAGS", + "integer": 4 + }, + { + "name": "ENABLED", + "integer": 5 + } + ] + }, + { + "name": "ListFeaturesRequest.OrderDirection", + "enum_fields": [ + { + "name": "ASC" + }, + { + "name": "DESC", + "integer": 1 + } + ] + }, + { + "name": "ListSegmentsRequest.OrderBy", + "enum_fields": [ + { + "name": "DEFAULT" + }, + { + "name": "NAME", + "integer": 1 + }, + { + "name": "CREATED_AT", + "integer": 2 + }, + { + "name": "UPDATED_AT", + "integer": 3 + } + ] + }, + { + "name": "ListSegmentsRequest.OrderDirection", + "enum_fields": [ + { + "name": "ASC" + }, + { + "name": "DESC", + "integer": 1 + } + ] + }, + { + "name": "ListTagsRequest.OrderBy", + "enum_fields": [ + { + "name": "DEFAULT" + }, + { + "name": "ID", + "integer": 1 + }, + { + "name": "CREATED_AT", + "integer": 2 + }, + { + "name": "UPDATED_AT", + "integer": 3 + } + ] + }, + { + "name": "ListTagsRequest.OrderDirection", + "enum_fields": [ + { + "name": "ASC" + }, + { + "name": "DESC", + "integer": 1 + } + ] + } + ], + "messages": [ + { + "name": "GetFeatureRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "GetFeatureResponse", + "fields": [ + { + "id": 1, + "name": "feature", + "type": "Feature" + } + ] + }, + { + "name": "GetFeaturesRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "ids", + "type": "string", + "is_repeated": true + } + ] + }, + { + "name": "GetFeaturesResponse", + "fields": [ + { + "id": 1, + "name": "features", + "type": "Feature", + "is_repeated": true + } + ] + }, + { + "name": "ListFeaturesRequest", + "fields": [ + { + "id": 1, + "name": "page_size", + "type": "int64" + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "tags", + "type": "string", + "is_repeated": true + }, + { + "id": 4, + "name": "order_by", + "type": "OrderBy" + }, + { + "id": 5, + "name": "order_direction", + "type": "OrderDirection" + }, + { + "id": 6, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 7, + "name": "maintainer", + "type": "string" + }, + { + "id": 8, + "name": "enabled", + "type": "google.protobuf.BoolValue" + }, + { + "id": 9, + "name": "has_experiment", + "type": "google.protobuf.BoolValue" + }, + { + "id": 10, + "name": "search_keyword", + "type": "string" + }, + { + "id": 11, + "name": "archived", + "type": "google.protobuf.BoolValue" + } + ] + }, + { + "name": "ListFeaturesResponse", + "fields": [ + { + "id": 1, + "name": "features", + "type": "Feature", + "is_repeated": true + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "total_count", + "type": "int64" + } + ] + }, + { + "name": "ListEnabledFeaturesRequest", + "fields": [ + { + "id": 1, + "name": "page_size", + "type": "int64" + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "tags", + "type": "string", + "is_repeated": true + }, + { + "id": 4, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "ListEnabledFeaturesResponse", + "fields": [ + { + "id": 1, + "name": "features", + "type": "Feature", + "is_repeated": true + }, + { + "id": 2, + "name": "cursor", + "type": "string" + } + ] + }, + { + "name": "CreateFeatureRequest", + "fields": [ + { + "id": 1, + "name": "command", + "type": "CreateFeatureCommand" + }, + { + "id": 2, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "CreateFeatureResponse" + }, + { + "name": "EnableFeatureRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "EnableFeatureCommand" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 4, + "name": "comment", + "type": "string" + } + ] + }, + { + "name": "EnableFeatureResponse" + }, + { + "name": "DisableFeatureRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "DisableFeatureCommand" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 4, + "name": "comment", + "type": "string" + } + ] + }, + { + "name": "DisableFeatureResponse" + }, + { + "name": "ArchiveFeatureRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "ArchiveFeatureCommand" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 4, + "name": "comment", + "type": "string" + } + ] + }, + { + "name": "ArchiveFeatureResponse" + }, + { + "name": "UnarchiveFeatureRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "UnarchiveFeatureCommand" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 4, + "name": "comment", + "type": "string" + } + ] + }, + { + "name": "UnarchiveFeatureResponse" + }, + { + "name": "DeleteFeatureRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "DeleteFeatureCommand" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 4, + "name": "comment", + "type": "string" + } + ] + }, + { + "name": "DeleteFeatureResponse" + }, + { + "name": "UpdateFeatureDetailsRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "rename_feature_command", + "type": "RenameFeatureCommand" + }, + { + "id": 3, + "name": "change_description_command", + "type": "ChangeDescriptionCommand" + }, + { + "id": 4, + "name": "add_tag_commands", + "type": "AddTagCommand", + "is_repeated": true + }, + { + "id": 5, + "name": "remove_tag_commands", + "type": "RemoveTagCommand", + "is_repeated": true + }, + { + "id": 6, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 7, + "name": "comment", + "type": "string" + } + ] + }, + { + "name": "UpdateFeatureDetailsResponse" + }, + { + "name": "UpdateFeatureVariationsRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "commands", + "type": "Command", + "is_repeated": true + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 4, + "name": "comment", + "type": "string" + } + ] + }, + { + "name": "UpdateFeatureVariationsResponse" + }, + { + "name": "UpdateFeatureTargetingRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "commands", + "type": "Command", + "is_repeated": true + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 4, + "name": "comment", + "type": "string" + } + ] + }, + { + "name": "UpdateFeatureTargetingResponse" + }, + { + "name": "CloneFeatureRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "CloneFeatureCommand" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "CloneFeatureResponse" + }, + { + "name": "CreateSegmentRequest", + "fields": [ + { + "id": 1, + "name": "command", + "type": "CreateSegmentCommand" + }, + { + "id": 2, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "CreateSegmentResponse", + "fields": [ + { + "id": 1, + "name": "segment", + "type": "Segment" + } + ] + }, + { + "name": "GetSegmentRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "GetSegmentResponse", + "fields": [ + { + "id": 1, + "name": "segment", + "type": "Segment" + } + ] + }, + { + "name": "ListSegmentsRequest", + "fields": [ + { + "id": 1, + "name": "page_size", + "type": "int64" + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 4, + "name": "order_by", + "type": "OrderBy" + }, + { + "id": 5, + "name": "order_direction", + "type": "OrderDirection" + }, + { + "id": 6, + "name": "search_keyword", + "type": "string" + }, + { + "id": 7, + "name": "status", + "type": "google.protobuf.Int32Value" + }, + { + "id": 8, + "name": "is_in_use_status", + "type": "google.protobuf.BoolValue" + } + ] + }, + { + "name": "ListSegmentsResponse", + "fields": [ + { + "id": 1, + "name": "segments", + "type": "Segment", + "is_repeated": true + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "total_count", + "type": "int64" + } + ] + }, + { + "name": "DeleteSegmentRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "DeleteSegmentCommand" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "DeleteSegmentResponse" + }, + { + "name": "UpdateSegmentRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "commands", + "type": "Command", + "is_repeated": true + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "UpdateSegmentResponse" + }, + { + "name": "AddSegmentUserRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "AddSegmentUserCommand" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "AddSegmentUserResponse" + }, + { + "name": "DeleteSegmentUserRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "DeleteSegmentUserCommand" + }, + { + "id": 3, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "DeleteSegmentUserResponse" + }, + { + "name": "GetSegmentUserRequest", + "fields": [ + { + "id": 1, + "name": "segment_id", + "type": "string" + }, + { + "id": 2, + "name": "user_id", + "type": "string" + }, + { + "id": 3, + "name": "state", + "type": "SegmentUser.State" + }, + { + "id": 4, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "GetSegmentUserResponse", + "fields": [ + { + "id": 1, + "name": "user", + "type": "SegmentUser" + } + ] + }, + { + "name": "ListSegmentUsersRequest", + "fields": [ + { + "id": 1, + "name": "page_size", + "type": "int64" + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "segment_id", + "type": "string" + }, + { + "id": 4, + "name": "state", + "type": "google.protobuf.Int32Value" + }, + { + "id": 5, + "name": "user_id", + "type": "string" + }, + { + "id": 6, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "ListSegmentUsersResponse", + "fields": [ + { + "id": 1, + "name": "users", + "type": "SegmentUser", + "is_repeated": true + }, + { + "id": 2, + "name": "cursor", + "type": "string" + } + ] + }, + { + "name": "BulkUploadSegmentUsersRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "segment_id", + "type": "string" + }, + { + "id": 3, + "name": "command", + "type": "BulkUploadSegmentUsersCommand" + } + ] + }, + { + "name": "BulkUploadSegmentUsersResponse" + }, + { + "name": "BulkDownloadSegmentUsersRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "segment_id", + "type": "string" + }, + { + "id": 3, + "name": "state", + "type": "SegmentUser.State" + } + ] + }, + { + "name": "BulkDownloadSegmentUsersResponse", + "fields": [ + { + "id": 1, + "name": "data", + "type": "bytes" + } + ] + }, + { + "name": "EvaluateFeaturesRequest", + "fields": [ + { + "id": 1, + "name": "user", + "type": "bucketeer.user.User" + }, + { + "id": 2, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 3, + "name": "tag", + "type": "string" + } + ] + }, + { + "name": "EvaluateFeaturesResponse", + "fields": [ + { + "id": 1, + "name": "user_evaluations", + "type": "bucketeer.feature.UserEvaluations" + } + ] + }, + { + "name": "GetUserEvaluationsRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "tag", + "type": "string" + }, + { + "id": 3, + "name": "user_id", + "type": "string" + } + ] + }, + { + "name": "GetUserEvaluationsResponse", + "fields": [ + { + "id": 1, + "name": "evaluations", + "type": "bucketeer.feature.Evaluation", + "is_repeated": true + } + ] + }, + { + "name": "UpsertUserEvaluationRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "tag", + "type": "string" + }, + { + "id": 3, + "name": "evaluation", + "type": "bucketeer.feature.Evaluation" + } + ] + }, + { + "name": "ListTagsRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "page_size", + "type": "int64" + }, + { + "id": 3, + "name": "cursor", + "type": "string" + }, + { + "id": 4, + "name": "order_by", + "type": "OrderBy" + }, + { + "id": 5, + "name": "order_direction", + "type": "OrderDirection" + }, + { + "id": 6, + "name": "search_keyword", + "type": "string" + } + ] + }, + { + "name": "ListTagsResponse", + "fields": [ + { + "id": 1, + "name": "tags", + "type": "Tag", + "is_repeated": true + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "total_count", + "type": "int64" + } + ] + }, + { + "name": "UpsertUserEvaluationResponse" + } + ], + "services": [ + { + "name": "FeatureService", + "rpcs": [ + { + "name": "GetFeature", + "in_type": "GetFeatureRequest", + "out_type": "GetFeatureResponse" + }, + { + "name": "GetFeatures", + "in_type": "GetFeaturesRequest", + "out_type": "GetFeaturesResponse" + }, + { + "name": "ListFeatures", + "in_type": "ListFeaturesRequest", + "out_type": "ListFeaturesResponse" + }, + { + "name": "ListEnabledFeatures", + "in_type": "ListEnabledFeaturesRequest", + "out_type": "ListEnabledFeaturesResponse" + }, + { + "name": "CreateFeature", + "in_type": "CreateFeatureRequest", + "out_type": "CreateFeatureResponse" + }, + { + "name": "EnableFeature", + "in_type": "EnableFeatureRequest", + "out_type": "EnableFeatureResponse", + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "name": "DisableFeature", + "in_type": "DisableFeatureRequest", + "out_type": "DisableFeatureResponse", + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "name": "ArchiveFeature", + "in_type": "ArchiveFeatureRequest", + "out_type": "ArchiveFeatureResponse" + }, + { + "name": "UnarchiveFeature", + "in_type": "UnarchiveFeatureRequest", + "out_type": "UnarchiveFeatureResponse" + }, + { + "name": "DeleteFeature", + "in_type": "DeleteFeatureRequest", + "out_type": "DeleteFeatureResponse" + }, + { + "name": "UpdateFeatureDetails", + "in_type": "UpdateFeatureDetailsRequest", + "out_type": "UpdateFeatureDetailsResponse" + }, + { + "name": "UpdateFeatureVariations", + "in_type": "UpdateFeatureVariationsRequest", + "out_type": "UpdateFeatureVariationsResponse" + }, + { + "name": "UpdateFeatureTargeting", + "in_type": "UpdateFeatureTargetingRequest", + "out_type": "UpdateFeatureTargetingResponse" + }, + { + "name": "CloneFeature", + "in_type": "CloneFeatureRequest", + "out_type": "CloneFeatureResponse" + }, + { + "name": "CreateSegment", + "in_type": "CreateSegmentRequest", + "out_type": "CreateSegmentResponse" + }, + { + "name": "GetSegment", + "in_type": "GetSegmentRequest", + "out_type": "GetSegmentResponse" + }, + { + "name": "ListSegments", + "in_type": "ListSegmentsRequest", + "out_type": "ListSegmentsResponse" + }, + { + "name": "DeleteSegment", + "in_type": "DeleteSegmentRequest", + "out_type": "DeleteSegmentResponse" + }, + { + "name": "UpdateSegment", + "in_type": "UpdateSegmentRequest", + "out_type": "UpdateSegmentResponse" + }, + { + "name": "AddSegmentUser", + "in_type": "AddSegmentUserRequest", + "out_type": "AddSegmentUserResponse", + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "name": "DeleteSegmentUser", + "in_type": "DeleteSegmentUserRequest", + "out_type": "DeleteSegmentUserResponse", + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "name": "GetSegmentUser", + "in_type": "GetSegmentUserRequest", + "out_type": "GetSegmentUserResponse", + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "name": "ListSegmentUsers", + "in_type": "ListSegmentUsersRequest", + "out_type": "ListSegmentUsersResponse" + }, + { + "name": "BulkUploadSegmentUsers", + "in_type": "BulkUploadSegmentUsersRequest", + "out_type": "BulkUploadSegmentUsersResponse" + }, + { + "name": "BulkDownloadSegmentUsers", + "in_type": "BulkDownloadSegmentUsersRequest", + "out_type": "BulkDownloadSegmentUsersResponse" + }, + { + "name": "EvaluateFeatures", + "in_type": "EvaluateFeaturesRequest", + "out_type": "EvaluateFeaturesResponse" + }, + { + "name": "GetUserEvaluations", + "in_type": "GetUserEvaluationsRequest", + "out_type": "GetUserEvaluationsResponse" + }, + { + "name": "UpsertUserEvaluation", + "in_type": "UpsertUserEvaluationRequest", + "out_type": "UpsertUserEvaluationResponse" + }, + { + "name": "ListTags", + "in_type": "ListTagsRequest", + "out_type": "ListTagsResponse" + } + ] + } + ], + "imports": [ + { + "path": "google/protobuf/wrappers.proto" + }, + { + "path": "proto/feature/command.proto" + }, + { + "path": "proto/feature/feature.proto" + }, + { + "path": "proto/feature/evaluation.proto" + }, + { + "path": "proto/user/user.proto" + }, + { + "path": "proto/feature/segment.proto" + } + ], + "package": { + "name": "bucketeer.feature" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/feature" + } + ] + } + }, + { + "protopath": "feature:/:strategy.proto", + "def": { + "enums": [ + { + "name": "Strategy.Type", + "enum_fields": [ + { + "name": "FIXED" + }, + { + "name": "ROLLOUT", + "integer": 1 + } + ] + } + ], + "messages": [ + { + "name": "FixedStrategy", + "fields": [ + { + "id": 1, + "name": "variation", + "type": "string" + } + ] + }, + { + "name": "RolloutStrategy", + "fields": [ + { + "id": 1, + "name": "variations", + "type": "Variation", + "is_repeated": true + } + ], + "messages": [ + { + "name": "Variation", + "fields": [ + { + "id": 1, + "name": "variation", + "type": "string" + }, + { + "id": 2, + "name": "weight", + "type": "int32" + } + ] + } + ] + }, + { + "name": "Strategy", + "fields": [ + { + "id": 1, + "name": "type", + "type": "Type" + }, + { + "id": 2, + "name": "fixed_strategy", + "type": "FixedStrategy" + }, + { + "id": 3, + "name": "rollout_strategy", + "type": "RolloutStrategy" + } + ] + } + ], + "package": { + "name": "bucketeer.feature" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/feature" + } + ] + } + }, + { + "protopath": "feature:/:target.proto", + "def": { + "messages": [ + { + "name": "Target", + "fields": [ + { + "id": 1, + "name": "variation", + "type": "string" + }, + { + "id": 2, + "name": "users", + "type": "string", + "is_repeated": true + } + ] + } + ], + "package": { + "name": "bucketeer.feature" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/feature" + } + ] + } + }, + { + "protopath": "feature:/:variation.proto", + "def": { + "messages": [ + { + "name": "Variation", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "value", + "type": "string" + }, + { + "id": 3, + "name": "name", + "type": "string" + }, + { + "id": 4, + "name": "description", + "type": "string" + } + ] + } + ], + "package": { + "name": "bucketeer.feature" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/feature" + } + ] + } + }, + { + "protopath": "gateway:/:service.proto", + "def": { + "messages": [ + { + "name": "PingRequest" + }, + { + "name": "PingResponse", + "fields": [ + { + "id": 1, + "name": "time", + "type": "int64" + } + ] + }, + { + "name": "GetEvaluationsRequest", + "fields": [ + { + "id": 1, + "name": "tag", + "type": "string" + }, + { + "id": 2, + "name": "user", + "type": "user.User" + }, + { + "id": 3, + "name": "user_evaluations_id", + "type": "string" + }, + { + "id": 4, + "name": "feature_id", + "type": "string", + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "id": 5, + "name": "source_id", + "type": "bucketeer.event.client.SourceId" + } + ] + }, + { + "name": "GetEvaluationsResponse", + "fields": [ + { + "id": 1, + "name": "state", + "type": "feature.UserEvaluations.State" + }, + { + "id": 2, + "name": "evaluations", + "type": "feature.UserEvaluations" + }, + { + "id": 3, + "name": "user_evaluations_id", + "type": "string" + } + ] + }, + { + "name": "GetEvaluationRequest", + "fields": [ + { + "id": 1, + "name": "tag", + "type": "string" + }, + { + "id": 2, + "name": "user", + "type": "user.User" + }, + { + "id": 3, + "name": "feature_id", + "type": "string" + }, + { + "id": 4, + "name": "source_id", + "type": "bucketeer.event.client.SourceId" + } + ] + }, + { + "name": "GetEvaluationResponse", + "fields": [ + { + "id": 1, + "name": "evaluation", + "type": "feature.Evaluation" + } + ] + }, + { + "name": "RegisterEventsRequest", + "fields": [ + { + "id": 1, + "name": "events", + "type": "bucketeer.event.client.Event", + "is_repeated": true + } + ] + }, + { + "name": "RegisterEventsResponse", + "maps": [ + { + "key_type": "string", + "field": { + "id": 1, + "name": "errors", + "type": "Error" + } + } + ], + "messages": [ + { + "name": "Error", + "fields": [ + { + "id": 1, + "name": "retriable", + "type": "bool" + }, + { + "id": 2, + "name": "message", + "type": "string" + } + ] + } + ] + } + ], + "services": [ + { + "name": "Gateway", + "rpcs": [ + { + "name": "Ping", + "in_type": "PingRequest", + "out_type": "PingResponse", + "options": [ + { + "name": "(google.api.http)", + "aggregated": [ + { + "name": "post", + "value": "/ping" + }, + { + "name": "body", + "value": "*" + } + ] + } + ] + }, + { + "name": "GetEvaluations", + "in_type": "GetEvaluationsRequest", + "out_type": "GetEvaluationsResponse", + "options": [ + { + "name": "(google.api.http)", + "aggregated": [ + { + "name": "post", + "value": "/get_evaluations" + }, + { + "name": "body", + "value": "*" + } + ] + } + ] + }, + { + "name": "GetEvaluation", + "in_type": "GetEvaluationRequest", + "out_type": "GetEvaluationResponse", + "options": [ + { + "name": "(google.api.http)", + "aggregated": [ + { + "name": "post", + "value": "/get_evaluation" + }, + { + "name": "body", + "value": "*" + } + ] + } + ] + }, + { + "name": "RegisterEvents", + "in_type": "RegisterEventsRequest", + "out_type": "RegisterEventsResponse", + "options": [ + { + "name": "(google.api.http)", + "aggregated": [ + { + "name": "post", + "value": "/register_events" + }, + { + "name": "body", + "value": "*" + } + ] + } + ] + } + ] + } + ], + "imports": [ + { + "path": "google/api/annotations.proto" + }, + { + "path": "proto/user/user.proto" + }, + { + "path": "proto/feature/evaluation.proto" + }, + { + "path": "proto/event/client/event.proto" + } + ], + "package": { + "name": "bucketeer.gateway" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/gateway" + } + ] + } + }, + { + "protopath": "migration:/:mysql_service.proto", + "def": { + "messages": [ + { + "name": "MigrateAllMasterSchemaRequest" + }, + { + "name": "MigrateAllMasterSchemaResponse" + }, + { + "name": "RollbackMasterSchemaRequest", + "fields": [ + { + "id": 1, + "name": "step", + "type": "int64" + } + ] + }, + { + "name": "RollbackMasterSchemaResponse" + } + ], + "services": [ + { + "name": "MigrationMySQLService", + "rpcs": [ + { + "name": "MigrateAllMasterSchema", + "in_type": "MigrateAllMasterSchemaRequest", + "out_type": "MigrateAllMasterSchemaResponse" + }, + { + "name": "RollbackMasterSchema", + "in_type": "RollbackMasterSchemaRequest", + "out_type": "RollbackMasterSchemaResponse" + } + ] + } + ], + "package": { + "name": "bucketeer.migration" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/migration" + } + ] + } + }, + { + "protopath": "notification:/:command.proto", + "def": { + "messages": [ + { + "name": "CreateAdminSubscriptionCommand", + "fields": [ + { + "id": 1, + "name": "source_types", + "type": "Subscription.SourceType", + "is_repeated": true + }, + { + "id": 2, + "name": "recipient", + "type": "Recipient" + }, + { + "id": 3, + "name": "name", + "type": "string" + } + ] + }, + { + "name": "AddAdminSubscriptionSourceTypesCommand", + "fields": [ + { + "id": 1, + "name": "source_types", + "type": "Subscription.SourceType", + "is_repeated": true + } + ] + }, + { + "name": "DeleteAdminSubscriptionSourceTypesCommand", + "fields": [ + { + "id": 1, + "name": "source_types", + "type": "Subscription.SourceType", + "is_repeated": true + } + ] + }, + { + "name": "EnableAdminSubscriptionCommand" + }, + { + "name": "DisableAdminSubscriptionCommand" + }, + { + "name": "DeleteAdminSubscriptionCommand" + }, + { + "name": "RenameAdminSubscriptionCommand", + "fields": [ + { + "id": 1, + "name": "name", + "type": "string" + } + ] + }, + { + "name": "CreateSubscriptionCommand", + "fields": [ + { + "id": 1, + "name": "source_types", + "type": "Subscription.SourceType", + "is_repeated": true + }, + { + "id": 2, + "name": "recipient", + "type": "Recipient" + }, + { + "id": 3, + "name": "name", + "type": "string" + } + ] + }, + { + "name": "AddSourceTypesCommand", + "fields": [ + { + "id": 1, + "name": "source_types", + "type": "Subscription.SourceType", + "is_repeated": true + } + ] + }, + { + "name": "DeleteSourceTypesCommand", + "fields": [ + { + "id": 1, + "name": "source_types", + "type": "Subscription.SourceType", + "is_repeated": true + } + ] + }, + { + "name": "EnableSubscriptionCommand" + }, + { + "name": "DisableSubscriptionCommand" + }, + { + "name": "DeleteSubscriptionCommand" + }, + { + "name": "RenameSubscriptionCommand", + "fields": [ + { + "id": 1, + "name": "name", + "type": "string" + } + ] + } + ], + "imports": [ + { + "path": "proto/notification/subscription.proto" + }, + { + "path": "proto/notification/recipient.proto" + } + ], + "package": { + "name": "bucketeer.notification" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/notification" + } + ] + } + }, + { + "protopath": "notification:/:recipient.proto", + "def": { + "enums": [ + { + "name": "Recipient.Type", + "enum_fields": [ + { + "name": "SlackChannel" + } + ] + } + ], + "messages": [ + { + "name": "Recipient", + "fields": [ + { + "id": 1, + "name": "type", + "type": "Type" + }, + { + "id": 2, + "name": "slack_channel_recipient", + "type": "SlackChannelRecipient" + } + ] + }, + { + "name": "SlackChannelRecipient", + "fields": [ + { + "id": 1, + "name": "webhook_url", + "type": "string" + } + ] + } + ], + "package": { + "name": "bucketeer.notification" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/notification" + } + ] + } + }, + { + "protopath": "notification:/:sender:/:notification.proto", + "def": { + "enums": [ + { + "name": "Notification.Type", + "enum_fields": [ + { + "name": "DomainEvent" + }, + { + "name": "FeatureStale", + "integer": 1 + }, + { + "name": "ExperimentRunning", + "integer": 2 + }, + { + "name": "MauCount", + "integer": 3 + } + ] + } + ], + "messages": [ + { + "name": "Notification", + "fields": [ + { + "id": 1, + "name": "type", + "type": "Type" + }, + { + "id": 2, + "name": "domain_event_notification", + "type": "DomainEventNotification" + }, + { + "id": 3, + "name": "feature_stale_notification", + "type": "FeatureStaleNotification" + }, + { + "id": 4, + "name": "experiment_running_notification", + "type": "ExperimentRunningNotification" + }, + { + "id": 5, + "name": "mau_count_notification", + "type": "MauCountNotification" + } + ] + }, + { + "name": "DomainEventNotification", + "fields": [ + { + "id": 2, + "name": "editor", + "type": "bucketeer.event.domain.Editor" + }, + { + "id": 3, + "name": "entity_type", + "type": "bucketeer.event.domain.Event.EntityType" + }, + { + "id": 4, + "name": "entity_id", + "type": "string" + }, + { + "id": 5, + "name": "type", + "type": "bucketeer.event.domain.Event.Type" + }, + { + "id": 6, + "name": "environment_id", + "type": "string" + } + ], + "reserved_ids": [ + 1 + ] + }, + { + "name": "FeatureStaleNotification", + "fields": [ + { + "id": 2, + "name": "features", + "type": "bucketeer.feature.Feature", + "is_repeated": true + }, + { + "id": 3, + "name": "environment_id", + "type": "string" + } + ], + "reserved_ids": [ + 1 + ] + }, + { + "name": "ExperimentRunningNotification", + "fields": [ + { + "id": 2, + "name": "environment_id", + "type": "string" + }, + { + "id": 3, + "name": "experiments", + "type": "bucketeer.experiment.Experiment", + "is_repeated": true + } + ], + "reserved_ids": [ + 1 + ] + }, + { + "name": "MauCountNotification", + "fields": [ + { + "id": 1, + "name": "environment_id", + "type": "string" + }, + { + "id": 2, + "name": "event_count", + "type": "int64" + }, + { + "id": 3, + "name": "user_count", + "type": "int64" + }, + { + "id": 4, + "name": "month", + "type": "int32" + } + ] + } + ], + "imports": [ + { + "path": "proto/event/domain/event.proto" + }, + { + "path": "proto/feature/feature.proto" + }, + { + "path": "proto/experiment/experiment.proto" + } + ], + "package": { + "name": "bucketeer.notification.sender" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/notification/sender" + } + ] + } + }, + { + "protopath": "notification:/:sender:/:notification_event.proto", + "def": { + "messages": [ + { + "name": "NotificationEvent", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 3, + "name": "source_type", + "type": "bucketeer.notification.Subscription.SourceType" + }, + { + "id": 4, + "name": "notification", + "type": "Notification" + }, + { + "id": 5, + "name": "is_admin_event", + "type": "bool" + } + ] + } + ], + "imports": [ + { + "path": "proto/notification/sender/notification.proto" + }, + { + "path": "proto/notification/subscription.proto" + } + ], + "package": { + "name": "bucketeer.notification.sender" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/notification/sender" + } + ] + } + }, + { + "protopath": "notification:/:service.proto", + "def": { + "enums": [ + { + "name": "ListAdminSubscriptionsRequest.OrderBy", + "enum_fields": [ + { + "name": "DEFAULT" + }, + { + "name": "NAME", + "integer": 1 + }, + { + "name": "CREATED_AT", + "integer": 2 + }, + { + "name": "UPDATED_AT", + "integer": 3 + } + ] + }, + { + "name": "ListAdminSubscriptionsRequest.OrderDirection", + "enum_fields": [ + { + "name": "ASC" + }, + { + "name": "DESC", + "integer": 1 + } + ] + }, + { + "name": "ListSubscriptionsRequest.OrderBy", + "enum_fields": [ + { + "name": "DEFAULT" + }, + { + "name": "NAME", + "integer": 1 + }, + { + "name": "CREATED_AT", + "integer": 2 + }, + { + "name": "UPDATED_AT", + "integer": 3 + } + ] + }, + { + "name": "ListSubscriptionsRequest.OrderDirection", + "enum_fields": [ + { + "name": "ASC" + }, + { + "name": "DESC", + "integer": 1 + } + ] + } + ], + "messages": [ + { + "name": "GetAdminSubscriptionRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "GetAdminSubscriptionResponse", + "fields": [ + { + "id": 1, + "name": "subscription", + "type": "Subscription" + } + ] + }, + { + "name": "ListAdminSubscriptionsRequest", + "fields": [ + { + "id": 1, + "name": "page_size", + "type": "int64" + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "source_types", + "type": "Subscription.SourceType", + "is_repeated": true + }, + { + "id": 4, + "name": "order_by", + "type": "OrderBy" + }, + { + "id": 5, + "name": "order_direction", + "type": "OrderDirection" + }, + { + "id": 6, + "name": "search_keyword", + "type": "string" + }, + { + "id": 7, + "name": "disabled", + "type": "google.protobuf.BoolValue" + } + ] + }, + { + "name": "ListAdminSubscriptionsResponse", + "fields": [ + { + "id": 1, + "name": "subscriptions", + "type": "Subscription", + "is_repeated": true + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "total_count", + "type": "int64" + } + ] + }, + { + "name": "ListEnabledAdminSubscriptionsRequest", + "fields": [ + { + "id": 1, + "name": "page_size", + "type": "int64" + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "source_types", + "type": "Subscription.SourceType", + "is_repeated": true + } + ] + }, + { + "name": "ListEnabledAdminSubscriptionsResponse", + "fields": [ + { + "id": 1, + "name": "subscriptions", + "type": "Subscription", + "is_repeated": true + }, + { + "id": 2, + "name": "cursor", + "type": "string" + } + ] + }, + { + "name": "CreateAdminSubscriptionRequest", + "fields": [ + { + "id": 1, + "name": "command", + "type": "CreateAdminSubscriptionCommand" + } + ] + }, + { + "name": "CreateAdminSubscriptionResponse" + }, + { + "name": "DeleteAdminSubscriptionRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "DeleteAdminSubscriptionCommand" + } + ] + }, + { + "name": "DeleteAdminSubscriptionResponse" + }, + { + "name": "EnableAdminSubscriptionRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "EnableAdminSubscriptionCommand" + } + ] + }, + { + "name": "EnableAdminSubscriptionResponse" + }, + { + "name": "DisableAdminSubscriptionRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "DisableAdminSubscriptionCommand" + } + ] + }, + { + "name": "DisableAdminSubscriptionResponse" + }, + { + "name": "UpdateAdminSubscriptionRequest", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "add_source_types_command", + "type": "AddAdminSubscriptionSourceTypesCommand" + }, + { + "id": 3, + "name": "delete_source_types_command", + "type": "DeleteAdminSubscriptionSourceTypesCommand" + }, + { + "id": 4, + "name": "rename_subscription_command", + "type": "RenameAdminSubscriptionCommand" + } + ] + }, + { + "name": "UpdateAdminSubscriptionResponse" + }, + { + "name": "GetSubscriptionRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "id", + "type": "string" + } + ] + }, + { + "name": "GetSubscriptionResponse", + "fields": [ + { + "id": 1, + "name": "subscription", + "type": "Subscription" + } + ] + }, + { + "name": "ListSubscriptionsRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "page_size", + "type": "int64" + }, + { + "id": 3, + "name": "cursor", + "type": "string" + }, + { + "id": 4, + "name": "source_types", + "type": "Subscription.SourceType", + "is_repeated": true + }, + { + "id": 5, + "name": "order_by", + "type": "OrderBy" + }, + { + "id": 6, + "name": "order_direction", + "type": "OrderDirection" + }, + { + "id": 7, + "name": "search_keyword", + "type": "string" + }, + { + "id": 8, + "name": "disabled", + "type": "google.protobuf.BoolValue" + } + ] + }, + { + "name": "ListSubscriptionsResponse", + "fields": [ + { + "id": 1, + "name": "subscriptions", + "type": "Subscription", + "is_repeated": true + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "total_count", + "type": "int64" + } + ] + }, + { + "name": "ListEnabledSubscriptionsRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "page_size", + "type": "int64" + }, + { + "id": 3, + "name": "cursor", + "type": "string" + }, + { + "id": 4, + "name": "source_types", + "type": "Subscription.SourceType", + "is_repeated": true + } + ] + }, + { + "name": "ListEnabledSubscriptionsResponse", + "fields": [ + { + "id": 1, + "name": "subscriptions", + "type": "Subscription", + "is_repeated": true + }, + { + "id": 2, + "name": "cursor", + "type": "string" + } + ] + }, + { + "name": "CreateSubscriptionRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "CreateSubscriptionCommand" + } + ] + }, + { + "name": "CreateSubscriptionResponse" + }, + { + "name": "DeleteSubscriptionRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "id", + "type": "string" + }, + { + "id": 3, + "name": "command", + "type": "DeleteSubscriptionCommand" + } + ] + }, + { + "name": "DeleteSubscriptionResponse" + }, + { + "name": "EnableSubscriptionRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "id", + "type": "string" + }, + { + "id": 3, + "name": "command", + "type": "EnableSubscriptionCommand" + } + ] + }, + { + "name": "EnableSubscriptionResponse" + }, + { + "name": "DisableSubscriptionRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "id", + "type": "string" + }, + { + "id": 3, + "name": "command", + "type": "DisableSubscriptionCommand" + } + ] + }, + { + "name": "DisableSubscriptionResponse" + }, + { + "name": "UpdateSubscriptionRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "id", + "type": "string" + }, + { + "id": 3, + "name": "add_source_types_command", + "type": "AddSourceTypesCommand" + }, + { + "id": 4, + "name": "delete_source_types_command", + "type": "DeleteSourceTypesCommand" + }, + { + "id": 5, + "name": "rename_subscription_command", + "type": "RenameSubscriptionCommand" + } + ] + }, + { + "name": "UpdateSubscriptionResponse" + } + ], + "services": [ + { + "name": "NotificationService", + "rpcs": [ + { + "name": "GetAdminSubscription", + "in_type": "GetAdminSubscriptionRequest", + "out_type": "GetAdminSubscriptionResponse" + }, + { + "name": "ListAdminSubscriptions", + "in_type": "ListAdminSubscriptionsRequest", + "out_type": "ListAdminSubscriptionsResponse" + }, + { + "name": "ListEnabledAdminSubscriptions", + "in_type": "ListEnabledAdminSubscriptionsRequest", + "out_type": "ListEnabledAdminSubscriptionsResponse" + }, + { + "name": "CreateAdminSubscription", + "in_type": "CreateAdminSubscriptionRequest", + "out_type": "CreateAdminSubscriptionResponse" + }, + { + "name": "DeleteAdminSubscription", + "in_type": "DeleteAdminSubscriptionRequest", + "out_type": "DeleteAdminSubscriptionResponse" + }, + { + "name": "EnableAdminSubscription", + "in_type": "EnableAdminSubscriptionRequest", + "out_type": "EnableAdminSubscriptionResponse" + }, + { + "name": "DisableAdminSubscription", + "in_type": "DisableAdminSubscriptionRequest", + "out_type": "DisableAdminSubscriptionResponse" + }, + { + "name": "UpdateAdminSubscription", + "in_type": "UpdateAdminSubscriptionRequest", + "out_type": "UpdateAdminSubscriptionResponse" + }, + { + "name": "GetSubscription", + "in_type": "GetSubscriptionRequest", + "out_type": "GetSubscriptionResponse" + }, + { + "name": "ListSubscriptions", + "in_type": "ListSubscriptionsRequest", + "out_type": "ListSubscriptionsResponse" + }, + { + "name": "ListEnabledSubscriptions", + "in_type": "ListEnabledSubscriptionsRequest", + "out_type": "ListEnabledSubscriptionsResponse" + }, + { + "name": "CreateSubscription", + "in_type": "CreateSubscriptionRequest", + "out_type": "CreateSubscriptionResponse" + }, + { + "name": "DeleteSubscription", + "in_type": "DeleteSubscriptionRequest", + "out_type": "DeleteSubscriptionResponse" + }, + { + "name": "EnableSubscription", + "in_type": "EnableSubscriptionRequest", + "out_type": "EnableSubscriptionResponse" + }, + { + "name": "DisableSubscription", + "in_type": "DisableSubscriptionRequest", + "out_type": "DisableSubscriptionResponse" + }, + { + "name": "UpdateSubscription", + "in_type": "UpdateSubscriptionRequest", + "out_type": "UpdateSubscriptionResponse" + } + ] + } + ], + "imports": [ + { + "path": "google/protobuf/wrappers.proto" + }, + { + "path": "proto/notification/subscription.proto" + }, + { + "path": "proto/notification/command.proto" + } + ], + "package": { + "name": "bucketeer.notification" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/notification" + } + ] + } + }, + { + "protopath": "notification:/:subscription.proto", + "def": { + "enums": [ + { + "name": "Subscription.SourceType", + "enum_fields": [ + { + "name": "DOMAIN_EVENT_FEATURE" + }, + { + "name": "DOMAIN_EVENT_GOAL", + "integer": 1 + }, + { + "name": "DOMAIN_EVENT_EXPERIMENT", + "integer": 2 + }, + { + "name": "DOMAIN_EVENT_ACCOUNT", + "integer": 3 + }, + { + "name": "DOMAIN_EVENT_APIKEY", + "integer": 4 + }, + { + "name": "DOMAIN_EVENT_SEGMENT", + "integer": 5 + }, + { + "name": "DOMAIN_EVENT_ENVIRONMENT", + "integer": 6 + }, + { + "name": "DOMAIN_EVENT_ADMIN_ACCOUNT", + "integer": 7 + }, + { + "name": "DOMAIN_EVENT_AUTOOPS_RULE", + "integer": 8 + }, + { + "name": "DOMAIN_EVENT_PUSH", + "integer": 9 + }, + { + "name": "DOMAIN_EVENT_SUBSCRIPTION", + "integer": 10 + }, + { + "name": "DOMAIN_EVENT_ADMIN_SUBSCRIPTION", + "integer": 11 + }, + { + "name": "DOMAIN_EVENT_PROJECT", + "integer": 12 + }, + { + "name": "DOMAIN_EVENT_WEBHOOK", + "integer": 13 + }, + { + "name": "FEATURE_STALE", + "integer": 100 + }, + { + "name": "EXPERIMENT_RUNNING", + "integer": 200 + }, + { + "name": "MAU_COUNT", + "integer": 300 + } + ] + } + ], + "messages": [ + { + "name": "Subscription", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "created_at", + "type": "int64" + }, + { + "id": 3, + "name": "updated_at", + "type": "int64" + }, + { + "id": 4, + "name": "disabled", + "type": "bool" + }, + { + "id": 5, + "name": "source_types", + "type": "SourceType", + "is_repeated": true + }, + { + "id": 6, + "name": "recipient", + "type": "Recipient" + }, + { + "id": 7, + "name": "name", + "type": "string" + } + ] + } + ], + "imports": [ + { + "path": "proto/notification/recipient.proto" + } + ], + "package": { + "name": "bucketeer.notification" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/notification" + } + ] + } + }, + { + "protopath": "push:/:command.proto", + "def": { + "messages": [ + { + "name": "CreatePushCommand", + "fields": [ + { + "id": 1, + "name": "fcm_api_key", + "type": "string" + }, + { + "id": 2, + "name": "tags", + "type": "string", + "is_repeated": true + }, + { + "id": 3, + "name": "name", + "type": "string" + } + ] + }, + { + "name": "AddPushTagsCommand", + "fields": [ + { + "id": 1, + "name": "tags", + "type": "string", + "is_repeated": true + } + ] + }, + { + "name": "DeletePushTagsCommand", + "fields": [ + { + "id": 1, + "name": "tags", + "type": "string", + "is_repeated": true + } + ] + }, + { + "name": "DeletePushCommand" + }, + { + "name": "RenamePushCommand", + "fields": [ + { + "id": 1, + "name": "name", + "type": "string" + } + ] + } + ], + "package": { + "name": "bucketeer.push" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/push" + } + ] + } + }, + { + "protopath": "push:/:push.proto", + "def": { + "messages": [ + { + "name": "Push", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 2, + "name": "fcm_api_key", + "type": "string" + }, + { + "id": 3, + "name": "tags", + "type": "string", + "is_repeated": true + }, + { + "id": 4, + "name": "deleted", + "type": "bool" + }, + { + "id": 5, + "name": "name", + "type": "string" + }, + { + "id": 6, + "name": "created_at", + "type": "int64" + }, + { + "id": 7, + "name": "updated_at", + "type": "int64" + } + ] + } + ], + "package": { + "name": "bucketeer.push" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/push" + } + ] + } + }, + { + "protopath": "push:/:service.proto", + "def": { + "enums": [ + { + "name": "ListPushesRequest.OrderBy", + "enum_fields": [ + { + "name": "DEFAULT" + }, + { + "name": "NAME", + "integer": 1 + }, + { + "name": "CREATED_AT", + "integer": 2 + }, + { + "name": "UPDATED_AT", + "integer": 3 + } + ] + }, + { + "name": "ListPushesRequest.OrderDirection", + "enum_fields": [ + { + "name": "ASC" + }, + { + "name": "DESC", + "integer": 1 + } + ] + } + ], + "messages": [ + { + "name": "CreatePushRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "command", + "type": "CreatePushCommand" + } + ] + }, + { + "name": "CreatePushResponse" + }, + { + "name": "ListPushesRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "page_size", + "type": "int64" + }, + { + "id": 3, + "name": "cursor", + "type": "string" + }, + { + "id": 4, + "name": "order_by", + "type": "OrderBy" + }, + { + "id": 5, + "name": "order_direction", + "type": "OrderDirection" + }, + { + "id": 6, + "name": "search_keyword", + "type": "string" + } + ] + }, + { + "name": "ListPushesResponse", + "fields": [ + { + "id": 1, + "name": "pushes", + "type": "Push", + "is_repeated": true + }, + { + "id": 2, + "name": "cursor", + "type": "string" + }, + { + "id": 3, + "name": "total_count", + "type": "int64" + } + ] + }, + { + "name": "DeletePushRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "id", + "type": "string" + }, + { + "id": 3, + "name": "command", + "type": "DeletePushCommand" + } + ] + }, + { + "name": "DeletePushResponse" + }, + { + "name": "UpdatePushRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "id", + "type": "string" + }, + { + "id": 3, + "name": "add_push_tags_command", + "type": "AddPushTagsCommand" + }, + { + "id": 4, + "name": "delete_push_tags_command", + "type": "DeletePushTagsCommand" + }, + { + "id": 5, + "name": "rename_push_command", + "type": "RenamePushCommand" + } + ] + }, + { + "name": "UpdatePushResponse" + } + ], + "services": [ + { + "name": "PushService", + "rpcs": [ + { + "name": "ListPushes", + "in_type": "ListPushesRequest", + "out_type": "ListPushesResponse" + }, + { + "name": "CreatePush", + "in_type": "CreatePushRequest", + "out_type": "CreatePushResponse" + }, + { + "name": "DeletePush", + "in_type": "DeletePushRequest", + "out_type": "DeletePushResponse" + }, + { + "name": "UpdatePush", + "in_type": "UpdatePushRequest", + "out_type": "UpdatePushResponse" + } + ] + } + ], + "imports": [ + { + "path": "proto/push/push.proto" + }, + { + "path": "proto/push/command.proto" + } + ], + "package": { + "name": "bucketeer.push" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/push" + } + ] + } + }, + { + "protopath": "test:/:service.proto", + "def": { + "messages": [ + { + "name": "TestRequest", + "fields": [ + { + "id": 1, + "name": "message", + "type": "string" + } + ] + }, + { + "name": "TestResponse", + "fields": [ + { + "id": 1, + "name": "message", + "type": "string" + } + ] + } + ], + "services": [ + { + "name": "TestService", + "rpcs": [ + { + "name": "Test", + "in_type": "TestRequest", + "out_type": "TestResponse" + } + ] + } + ], + "package": { + "name": "bucketeer.test" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/test" + } + ] + } + }, + { + "protopath": "user:/:service.proto", + "def": { + "enums": [ + { + "name": "ListUsersRequest.OrderBy", + "enum_fields": [ + { + "name": "DEFAULT" + }, + { + "name": "CREATED_AT", + "integer": 1 + }, + { + "name": "LAST_SEEN", + "integer": 2 + } + ] + }, + { + "name": "ListUsersRequest.OrderDirection", + "enum_fields": [ + { + "name": "ASC" + }, + { + "name": "DESC", + "integer": 1 + } + ] + } + ], + "messages": [ + { + "name": "GetUserRequest", + "fields": [ + { + "id": 1, + "name": "user_id", + "type": "string" + }, + { + "id": 2, + "name": "environment_namespace", + "type": "string" + } + ] + }, + { + "name": "GetUserResponse", + "fields": [ + { + "id": 1, + "name": "user", + "type": "User" + } + ] + }, + { + "name": "ListUsersRequest", + "fields": [ + { + "id": 1, + "name": "environment_namespace", + "type": "string" + }, + { + "id": 2, + "name": "page_size", + "type": "int64" + }, + { + "id": 3, + "name": "cursor", + "type": "string" + }, + { + "id": 4, + "name": "order_by", + "type": "OrderBy" + }, + { + "id": 5, + "name": "order_direction", + "type": "OrderDirection" + }, + { + "id": 6, + "name": "search_keyword", + "type": "string" + }, + { + "id": 7, + "name": "from", + "type": "int64" + }, + { + "id": 8, + "name": "to", + "type": "int64" + } + ] + }, + { + "name": "ListUsersResponse", + "fields": [ + { + "id": 1, + "name": "users", + "type": "User", + "is_repeated": true + }, + { + "id": 2, + "name": "cursor", + "type": "string" + } + ] + } + ], + "services": [ + { + "name": "UserService", + "rpcs": [ + { + "name": "GetUser", + "in_type": "GetUserRequest", + "out_type": "GetUserResponse" + }, + { + "name": "ListUsers", + "in_type": "ListUsersRequest", + "out_type": "ListUsersResponse" + } + ] + } + ], + "imports": [ + { + "path": "proto/user/user.proto" + } + ], + "package": { + "name": "bucketeer.user" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/user" + } + ] + } + }, + { + "protopath": "user:/:user.proto", + "def": { + "messages": [ + { + "name": "User", + "fields": [ + { + "id": 1, + "name": "id", + "type": "string" + }, + { + "id": 4, + "name": "last_seen", + "type": "int64" + }, + { + "id": 5, + "name": "created_at", + "type": "int64" + } + ], + "maps": [ + { + "key_type": "string", + "field": { + "id": 2, + "name": "data", + "type": "string" + } + }, + { + "key_type": "string", + "field": { + "id": 3, + "name": "tagged_data", + "type": "Data" + } + } + ], + "messages": [ + { + "name": "Data", + "maps": [ + { + "key_type": "string", + "field": { + "id": 1, + "name": "value", + "type": "string" + } + } + ] + } + ] + } + ], + "package": { + "name": "bucketeer.user" + }, + "options": [ + { + "name": "go_package", + "value": "github.com/bucketeer-io/bucketeer/proto/user" + } + ] + } + } + ] +} \ No newline at end of file diff --git a/proto/proto_descriptor.bzl b/proto/proto_descriptor.bzl new file mode 100644 index 000000000..b59b59cf9 --- /dev/null +++ b/proto/proto_descriptor.bzl @@ -0,0 +1,30 @@ +def _proto_descriptor_impl(ctx): + inputs = ctx.files.srcs + ctx.files.deps + descriptors = ":".join([f.path for f in ctx.files.deps]) + args = ctx.actions.args() + args.add(descriptors, format = "--descriptor_set_in=%s") + args.add("--include_imports") + args.add("--include_source_info") + args.add(ctx.outputs.out.path, format = "--descriptor_set_out=%s") + args.add_all([f.path for f in ctx.files.srcs]) + ctx.actions.run( + inputs = inputs, + outputs = [ctx.outputs.out], + arguments = [args], + executable = ctx.executable.compiler, + ) + +proto_descriptor = rule( + implementation = _proto_descriptor_impl, + attrs = { + "srcs": attr.label_list(mandatory = True, allow_files = True), + "deps": attr.label_list(allow_files = True), + "compiler": attr.label( + executable = True, + cfg = "host", + allow_files = True, + default = Label("@com_google_protobuf//:protoc"), + ), + }, + outputs = {"out": "%{name}.pb"}, +) diff --git a/proto/push/BUILD.bazel b/proto/push/BUILD.bazel new file mode 100644 index 000000000..ce7a01abc --- /dev/null +++ b/proto/push/BUILD.bazel @@ -0,0 +1,40 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("//proto:proto_descriptor.bzl", "proto_descriptor") + +proto_library( + name = "push_proto", + srcs = [ + "command.proto", + "push.proto", + "service.proto", + ], + visibility = ["//visibility:public"], +) + +go_proto_library( + name = "push_go_proto", + compilers = ["@io_bazel_rules_go//proto:go_grpc"], + importpath = "github.com/bucketeer-io/bucketeer/proto/push", + proto = ":push_proto", + visibility = ["//visibility:public"], +) + +go_library( + name = "go_default_library", + embed = [":push_go_proto"], + importpath = "github.com/bucketeer-io/bucketeer/proto/push", + visibility = ["//visibility:public"], +) + +proto_descriptor( + name = "proto_descriptor", + srcs = ["service.proto"], + visibility = ["//visibility:public"], + deps = [ + ":push_proto", + "@com_github_googleapis_googleapis//:api_proto", + "@com_google_protobuf//:descriptor_proto", + ], +) diff --git a/proto/push/command.proto b/proto/push/command.proto new file mode 100644 index 000000000..69488b93f --- /dev/null +++ b/proto/push/command.proto @@ -0,0 +1,38 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.push; +option go_package = "github.com/bucketeer-io/bucketeer/proto/push"; + +message CreatePushCommand { + string fcm_api_key = 1; + repeated string tags = 2; + string name = 3; +} + +message AddPushTagsCommand { + repeated string tags = 1; +} + +message DeletePushTagsCommand { + repeated string tags = 1; +} + +message DeletePushCommand {} + +message RenamePushCommand { + string name = 1; +} \ No newline at end of file diff --git a/proto/push/push.proto b/proto/push/push.proto new file mode 100644 index 000000000..bb238b074 --- /dev/null +++ b/proto/push/push.proto @@ -0,0 +1,28 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.push; +option go_package = "github.com/bucketeer-io/bucketeer/proto/push"; + +message Push { + string id = 1; + string fcm_api_key = 2; + repeated string tags = 3; + bool deleted = 4; + string name = 5; + int64 created_at = 6; + int64 updated_at = 7; +} diff --git a/proto/push/service.proto b/proto/push/service.proto new file mode 100644 index 000000000..ca064269b --- /dev/null +++ b/proto/push/service.proto @@ -0,0 +1,78 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.push; +option go_package = "github.com/bucketeer-io/bucketeer/proto/push"; + +import "proto/push/push.proto"; +import "proto/push/command.proto"; + +message CreatePushRequest { + string environment_namespace = 1; + CreatePushCommand command = 2; +} + +message CreatePushResponse {} + +message ListPushesRequest { + enum OrderBy { + DEFAULT = 0; + NAME = 1; + CREATED_AT = 2; + UPDATED_AT = 3; + } + enum OrderDirection { + ASC = 0; + DESC = 1; + } + string environment_namespace = 1; + int64 page_size = 2; + string cursor = 3; + OrderBy order_by = 4; + OrderDirection order_direction = 5; + string search_keyword = 6; +} + +message ListPushesResponse { + repeated Push pushes = 1; + string cursor = 2; + int64 total_count = 3; +} + +message DeletePushRequest { + string environment_namespace = 1; + string id = 2; + DeletePushCommand command = 3; +} + +message DeletePushResponse {} + +message UpdatePushRequest { + string environment_namespace = 1; + string id = 2; + AddPushTagsCommand add_push_tags_command = 3; + DeletePushTagsCommand delete_push_tags_command = 4; + RenamePushCommand rename_push_command = 5; +} + +message UpdatePushResponse {} + +service PushService { + rpc ListPushes(ListPushesRequest) returns (ListPushesResponse) {} + rpc CreatePush(CreatePushRequest) returns (CreatePushResponse) {} + rpc DeletePush(DeletePushRequest) returns (DeletePushResponse) {} + rpc UpdatePush(UpdatePushRequest) returns (UpdatePushResponse) {} +} diff --git a/proto/test/BUILD.bazel b/proto/test/BUILD.bazel new file mode 100644 index 000000000..2958286e2 --- /dev/null +++ b/proto/test/BUILD.bazel @@ -0,0 +1,24 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "test_proto", + srcs = ["service.proto"], + visibility = ["//visibility:public"], +) + +go_proto_library( + name = "test_go_proto", + compilers = ["@io_bazel_rules_go//proto:go_grpc"], + importpath = "github.com/bucketeer-io/bucketeer/proto/test", + proto = ":test_proto", + visibility = ["//visibility:public"], +) + +go_library( + name = "go_default_library", + embed = [":test_go_proto"], + importpath = "github.com/bucketeer-io/bucketeer/proto/test", + visibility = ["//visibility:public"], +) diff --git a/proto/test/service.proto b/proto/test/service.proto new file mode 100644 index 000000000..a33464eff --- /dev/null +++ b/proto/test/service.proto @@ -0,0 +1,30 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.test; +option go_package = "github.com/bucketeer-io/bucketeer/proto/test"; + +message TestRequest { + string message = 1; +} + +message TestResponse { + string message = 1; +} + +service TestService { + rpc Test(TestRequest) returns (TestResponse) {} +} \ No newline at end of file diff --git a/proto/user/BUILD.bazel b/proto/user/BUILD.bazel new file mode 100644 index 000000000..082630a66 --- /dev/null +++ b/proto/user/BUILD.bazel @@ -0,0 +1,35 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("//proto:proto_descriptor.bzl", "proto_descriptor") + +proto_library( + name = "user_proto", + srcs = [ + "service.proto", + "user.proto", + ], + visibility = ["//visibility:public"], +) + +go_proto_library( + name = "user_go_proto", + compilers = ["@io_bazel_rules_go//proto:go_grpc"], + importpath = "github.com/bucketeer-io/bucketeer/proto/user", + proto = ":user_proto", + visibility = ["//visibility:public"], +) + +go_library( + name = "go_default_library", + embed = [":user_go_proto"], + importpath = "github.com/bucketeer-io/bucketeer/proto/user", + visibility = ["//visibility:public"], +) + +proto_descriptor( + name = "proto_descriptor", + srcs = ["service.proto"], + visibility = ["//visibility:public"], + deps = [":user_proto"], +) diff --git a/proto/user/service.proto b/proto/user/service.proto new file mode 100644 index 000000000..0517a0258 --- /dev/null +++ b/proto/user/service.proto @@ -0,0 +1,59 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.user; +option go_package = "github.com/bucketeer-io/bucketeer/proto/user"; + +import "proto/user/user.proto"; + +message GetUserRequest { + string user_id = 1; + string environment_namespace = 2; +} + +message GetUserResponse { + User user = 1; +} + +message ListUsersRequest { + enum OrderBy { + DEFAULT = 0; + CREATED_AT = 1; + LAST_SEEN = 2; + } + enum OrderDirection { + ASC = 0; + DESC = 1; + } + string environment_namespace = 1; + int64 page_size = 2; + string cursor = 3; + OrderBy order_by = 4; + OrderDirection order_direction = 5; + string search_keyword = 6; + int64 from = 7; + int64 to = 8; +} + +message ListUsersResponse { + repeated User users = 1; + string cursor = 2; +} + +service UserService { + rpc GetUser(GetUserRequest) returns (GetUserResponse) {} + rpc ListUsers(ListUsersRequest) returns (ListUsersResponse) {} +} diff --git a/proto/user/user.proto b/proto/user/user.proto new file mode 100644 index 000000000..e9b544c9d --- /dev/null +++ b/proto/user/user.proto @@ -0,0 +1,29 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package bucketeer.user; +option go_package = "github.com/bucketeer-io/bucketeer/proto/user"; + +message User { + message Data { + map value = 1; + } + string id = 1; + map data = 2; // used by the sdk client + map tagged_data = 3; + int64 last_seen = 4; + int64 created_at = 5; +} diff --git a/python/.gitignore b/python/.gitignore new file mode 100644 index 000000000..94d868121 --- /dev/null +++ b/python/.gitignore @@ -0,0 +1,5 @@ + +.DS_Store +.venv +__pycache__ +proto \ No newline at end of file diff --git a/python/Dockerfile b/python/Dockerfile new file mode 100644 index 000000000..d52878186 --- /dev/null +++ b/python/Dockerfile @@ -0,0 +1,25 @@ +FROM index.docker.io/library/python:3.7.6-buster as builder + +RUN wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/v0.3.1/grpc_health_probe-linux-amd64 + +FROM index.docker.io/library/python:3.7.6-slim-buster as runner + +RUN apt update \ + && apt install -y build-essential python3-dev + +COPY --from=builder /bin/grpc_health_probe /bin/grpc_health_probe +RUN chmod +x /bin/grpc_health_probe + +WORKDIR /opt/app + +COPY requirements.txt /opt/app + +RUN pip install --upgrade pip + +RUN pip install -r requirements.txt + +COPY src /opt/app + +ENV PYTHONPATH "${PYTHONPATH}:/opt/app" + +CMD [ "python", "cmd/calculator/main.py"] diff --git a/python/Makefile b/python/Makefile new file mode 100644 index 000000000..721db3514 --- /dev/null +++ b/python/Makefile @@ -0,0 +1,48 @@ +PY_FILES := $$(find . -iname '*.py' -not -path "./.venv/*") +GIT_TOP_DIR := $(shell git rev-parse --show-toplevel) +PROTOBUF_INCLUDE_DIR := $(GIT_TOP_DIR)/proto/external/protocolbuffers/protobuf/v3.9.0 +PROTO_FOLDERS := $(filter-out $(GIT_TOP_DIR)/proto/external%, $(shell find $(GIT_TOP_DIR)/proto -name '*.proto' -print0 | xargs -0 -n1 dirname | sort --unique)) + +.PHONY: init +init: + poetry install + +.PHONY: test +test: + PYTHONPATH=./src poetry run pytest tests + +.PHONY: update-deps +update-deps: + poetry lock + poetry export -f requirements.txt --output requirements.txt --without-hashes + poetry export -f requirements.txt --output requirements-dev.txt --without-hashes --dev + +.PHONY: fmt +fmt: + poetry run black ${PY_FILES} + +.PHONY: fmt-check +fmt-check: + poetry run black --check ${PY_FILES} + +.PHONY: gen +gen: + rm -fr ./src/proto + for f in ${PROTO_FOLDERS}; do \ + poetry run python -m grpc_tools.protoc \ + -I"$(GIT_TOP_DIR)" \ + -I"$(PROTOBUF_INCLUDE_DIR)" \ + -I"${GOPATH}/src/github.com/googleapis/googleapis" \ + --python_out=$(GIT_TOP_DIR)/python/src \ + $$f/*.proto; \ + poetry run python -m grpc_tools.protoc \ + -I"$(GIT_TOP_DIR)" \ + -I"$(PROTOBUF_INCLUDE_DIR)" \ + -I"${GOPATH}/src/github.com/googleapis/googleapis" \ + --grpc_python_out=$(GIT_TOP_DIR)/python/src \ + $$f/*service.proto; \ + done \ + +.PHONY: docker-build-dev +docker-build-dev: + docker build -t "bucketeer-python-dev:latest" -f Dockerfile.dev . \ No newline at end of file diff --git a/python/README.md b/python/README.md new file mode 100644 index 000000000..d9130c4a8 --- /dev/null +++ b/python/README.md @@ -0,0 +1,63 @@ +# Bucketer/python + +## Requirements + +``` +brew install poetry +``` + +## Dependencies + +### Calculator + +- pystan: depends on gcc(GNU C Compiler), g++(GNU C++ Compiler) + +If you use Mac and have already installed XCode, you should have them. + +## Development + +To initialize venv; + +```sh +make init-py +``` + +To activate venv; + +```sh +source $(poetry env info --path)/bin/activate +``` + +To deactivate venv; + +```sh +deactivate +``` + +To add dependencies; + +See [poetry documentation](https://python-poetry.org/docs/). + +To export dependencies to requirements.txt; + +```sh +make update-deps +``` + +To test; + +```sh +make test +``` + +To format; + +```sh +make fmt +``` + +To generate codes from proto files; + +```sh +make gen +``` \ No newline at end of file diff --git a/python/poetry.lock b/python/poetry.lock new file mode 100644 index 000000000..840b8c1d0 --- /dev/null +++ b/python/poetry.lock @@ -0,0 +1,1682 @@ +[[package]] +name = "aiohttp" +version = "3.7.4.post0" +description = "Async http client/server framework (asyncio)" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +async-timeout = ">=3.0,<4.0" +attrs = ">=17.3.0" +chardet = ">=2.0,<5.0" +multidict = ">=4.5,<7.0" +typing-extensions = ">=3.6.5" +yarl = ">=1.0,<2.0" + +[package.extras] +speedups = ["aiodns", "brotlipy", "cchardet"] + +[[package]] +name = "apscheduler" +version = "3.7.0" +description = "In-process task scheduler with Cron-like capabilities" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" + +[package.dependencies] +pytz = "*" +six = ">=1.4.0" +tzlocal = ">=2.0,<3.0" + +[package.extras] +asyncio = ["trollius"] +doc = ["sphinx", "sphinx-rtd-theme"] +gevent = ["gevent"] +mongodb = ["pymongo (>=3.0)"] +redis = ["redis (>=3.0)"] +rethinkdb = ["rethinkdb (>=2.4.0)"] +sqlalchemy = ["sqlalchemy (>=0.8)"] +testing = ["pytest (<6)", "pytest-cov", "pytest-tornado5", "mock", "pytest-asyncio (<0.6)", "pytest-asyncio"] +tornado = ["tornado (>=4.3)"] +twisted = ["twisted"] +zookeeper = ["kazoo"] + +[[package]] +name = "async-timeout" +version = "3.0.1" +description = "Timeout context manager for asyncio programs" +category = "main" +optional = false +python-versions = ">=3.5.3" + +[[package]] +name = "atomicwrites" +version = "1.4.0" +description = "Atomic file writes." +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "attrs" +version = "21.4.0" +description = "Classes Without Boilerplate" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[package.extras] +dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit", "cloudpickle"] +docs = ["furo", "sphinx", "zope.interface", "sphinx-notfound-page"] +tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "cloudpickle"] +tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "cloudpickle"] + +[[package]] +name = "black" +version = "22.3.0" +description = "The uncompromising code formatter." +category = "dev" +optional = false +python-versions = ">=3.6.2" + +[package.dependencies] +click = ">=8.0.0" +mypy-extensions = ">=0.4.3" +pathspec = ">=0.9.0" +platformdirs = ">=2" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\" and implementation_name == \"cpython\""} +typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""} + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.7.4)"] +jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] +uvloop = ["uvloop (>=0.15.2)"] + +[[package]] +name = "cachetools" +version = "4.2.4" +description = "Extensible memoizing collections and decorators" +category = "main" +optional = false +python-versions = "~=3.5" + +[[package]] +name = "certifi" +version = "2021.10.8" +description = "Python package for providing Mozilla's CA Bundle." +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "chardet" +version = "4.0.0" +description = "Universal encoding detector for Python 2 and 3" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[[package]] +name = "charset-normalizer" +version = "2.0.12" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +category = "main" +optional = false +python-versions = ">=3.5.0" + +[package.extras] +unicode_backport = ["unicodedata2"] + +[[package]] +name = "click" +version = "8.1.2" +description = "Composable command line interface toolkit" +category = "dev" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} +importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} + +[[package]] +name = "colorama" +version = "0.4.4" +description = "Cross-platform colored terminal text." +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[[package]] +name = "cython" +version = "0.29.28" +description = "The Cython compiler for writing C extensions for the Python language." +category = "main" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" + +[[package]] +name = "environs" +version = "9.5.0" +description = "simplified environment variable parsing" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +marshmallow = ">=3.0.0" +python-dotenv = "*" + +[package.extras] +dev = ["pytest", "dj-database-url", "dj-email-url", "django-cache-url", "flake8 (==4.0.1)", "flake8-bugbear (==21.9.2)", "mypy (==0.910)", "pre-commit (>=2.4,<3.0)", "tox"] +django = ["dj-database-url", "dj-email-url", "django-cache-url"] +lint = ["flake8 (==4.0.1)", "flake8-bugbear (==21.9.2)", "mypy (==0.910)", "pre-commit (>=2.4,<3.0)"] +tests = ["pytest", "dj-database-url", "dj-email-url", "django-cache-url"] + +[[package]] +name = "flake8" +version = "3.9.2" +description = "the modular source code checker: pep8 pyflakes and co" +category = "dev" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" + +[package.dependencies] +importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} +mccabe = ">=0.6.0,<0.7.0" +pycodestyle = ">=2.7.0,<2.8.0" +pyflakes = ">=2.3.0,<2.4.0" + +[[package]] +name = "google-api-core" +version = "1.31.5" +description = "Google API client core library" +category = "main" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*" + +[package.dependencies] +google-auth = ">=1.25.0,<2.0dev" +googleapis-common-protos = ">=1.6.0,<2.0dev" +grpcio = {version = ">=1.29.0,<2.0dev", optional = true, markers = "extra == \"grpc\""} +packaging = ">=14.3" +protobuf = {version = ">=3.12.0", markers = "python_version > \"3\""} +pytz = "*" +requests = ">=2.18.0,<3.0.0dev" +six = ">=1.13.0" + +[package.extras] +grpc = ["grpcio (>=1.29.0,<2.0dev)"] +grpcgcp = ["grpcio-gcp (>=0.2.2)"] +grpcio-gcp = ["grpcio-gcp (>=0.2.2)"] + +[[package]] +name = "google-api-python-client" +version = "2.46.0" +description = "Google API Client Library for Python" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +google-api-core = ">=1.31.5,<2.0.0 || >2.3.0,<3.0.0dev" +google-auth = ">=1.16.0,<3.0.0dev" +google-auth-httplib2 = ">=0.1.0" +httplib2 = ">=0.15.0,<1dev" +uritemplate = ">=3.0.1,<5" + +[[package]] +name = "google-auth" +version = "1.35.0" +description = "Google Authentication Library" +category = "main" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*" + +[package.dependencies] +cachetools = ">=2.0.0,<5.0" +pyasn1-modules = ">=0.2.1" +rsa = {version = ">=3.1.4,<5", markers = "python_version >= \"3.6\""} +six = ">=1.9.0" + +[package.extras] +aiohttp = ["requests (>=2.20.0,<3.0.0dev)", "aiohttp (>=3.6.2,<4.0.0dev)"] +pyopenssl = ["pyopenssl (>=20.0.0)"] +reauth = ["pyu2f (>=0.1.5)"] + +[[package]] +name = "google-auth-httplib2" +version = "0.1.0" +description = "Google Authentication Library: httplib2 transport" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +google-auth = "*" +httplib2 = ">=0.15.0" +six = "*" + +[[package]] +name = "google-cloud" +version = "0.34.0" +description = "API Client library for Google Cloud" +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "google-cloud-core" +version = "1.7.2" +description = "Google Cloud API client core library" +category = "main" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*" + +[package.dependencies] +google-api-core = ">=1.21.0,<2.0.0dev" +google-auth = ">=1.24.0,<2.0dev" +six = ">=1.12.0" + +[package.extras] +grpc = ["grpcio (>=1.8.2,<2.0dev)"] + +[[package]] +name = "google-cloud-logging" +version = "1.15.1" +description = "Stackdriver Logging API client library" +category = "main" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*" + +[package.dependencies] +google-api-core = {version = ">=1.15.0,<2.0.0dev", extras = ["grpc"]} +google-cloud-core = ">=1.1.0,<2.0dev" + +[[package]] +name = "googleapis-common-protos" +version = "1.56.0" +description = "Common protobufs used in Google APIs" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +protobuf = ">=3.12.0" + +[package.extras] +grpc = ["grpcio (>=1.0.0)"] + +[[package]] +name = "grpc-health-checking" +version = "0.0.1" +description = "A proxy to install the official gRPC Python package." +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +grpcio = "*" + +[[package]] +name = "grpcio" +version = "1.45.0" +description = "HTTP/2-based RPC framework" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +six = ">=1.5.2" + +[package.extras] +protobuf = ["grpcio-tools (>=1.45.0)"] + +[[package]] +name = "grpcio-health-checking" +version = "1.27.2" +description = "Standard Health Checking Service for gRPC" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +grpcio = ">=1.27.2" +protobuf = ">=3.6.0" + +[[package]] +name = "grpcio-tools" +version = "1.45.0" +description = "Protobuf code generator for gRPC" +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +grpcio = ">=1.45.0" +protobuf = ">=3.5.0.post1,<4.0dev" + +[[package]] +name = "httplib2" +version = "0.20.4" +description = "A comprehensive HTTP client library." +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[package.dependencies] +pyparsing = {version = ">=2.4.2,<3.0.0 || >3.0.0,<3.0.1 || >3.0.1,<3.0.2 || >3.0.2,<3.0.3 || >3.0.3,<4", markers = "python_version > \"3.0\""} + +[[package]] +name = "idna" +version = "3.3" +description = "Internationalized Domain Names in Applications (IDNA)" +category = "main" +optional = false +python-versions = ">=3.5" + +[[package]] +name = "importlib-metadata" +version = "4.11.3" +description = "Read metadata from Python packages" +category = "dev" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} +zipp = ">=0.5" + +[package.extras] +docs = ["sphinx", "jaraco.packaging (>=9)", "rst.linker (>=1.9)"] +perf = ["ipython"] +testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)", "importlib-resources (>=1.3)"] + +[[package]] +name = "iniconfig" +version = "1.1.1" +description = "iniconfig: brain-dead simple config-ini parsing" +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "marshmallow" +version = "3.15.0" +description = "A lightweight library for converting complex datatypes to and from native Python datatypes." +category = "main" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +packaging = "*" + +[package.extras] +dev = ["pytest", "pytz", "simplejson", "mypy (==0.940)", "flake8 (==4.0.1)", "flake8-bugbear (==22.1.11)", "pre-commit (>=2.4,<3.0)", "tox"] +docs = ["sphinx (==4.4.0)", "sphinx-issues (==3.0.1)", "alabaster (==0.7.12)", "sphinx-version-warning (==1.1.2)", "autodocsumm (==0.2.7)"] +lint = ["mypy (==0.940)", "flake8 (==4.0.1)", "flake8-bugbear (==22.1.11)", "pre-commit (>=2.4,<3.0)"] +tests = ["pytest", "pytz", "simplejson"] + +[[package]] +name = "mccabe" +version = "0.6.1" +description = "McCabe checker, plugin for flake8" +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "multidict" +version = "6.0.2" +description = "multidict implementation" +category = "main" +optional = false +python-versions = ">=3.7" + +[[package]] +name = "mypy-extensions" +version = "0.4.3" +description = "Experimental type system extensions for programs checked with the mypy typechecker." +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "numpy" +version = "1.21.6" +description = "NumPy is the fundamental package for array computing with Python." +category = "main" +optional = false +python-versions = ">=3.7,<3.11" + +[[package]] +name = "packaging" +version = "21.3" +description = "Core utilities for Python packages" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +pyparsing = ">=2.0.2,<3.0.5 || >3.0.5" + +[[package]] +name = "pandas" +version = "1.3.5" +description = "Powerful data structures for data analysis, time series, and statistics" +category = "main" +optional = false +python-versions = ">=3.7.1" + +[package.dependencies] +numpy = [ + {version = ">=1.17.3", markers = "platform_machine != \"aarch64\" and platform_machine != \"arm64\" and python_version < \"3.10\""}, + {version = ">=1.19.2", markers = "platform_machine == \"aarch64\" and python_version < \"3.10\""}, + {version = ">=1.20.0", markers = "platform_machine == \"arm64\" and python_version < \"3.10\""}, +] +python-dateutil = ">=2.7.3" +pytz = ">=2017.3" + +[package.extras] +test = ["hypothesis (>=3.58)", "pytest (>=6.0)", "pytest-xdist"] + +[[package]] +name = "pathspec" +version = "0.9.0" +description = "Utility library for gitignore style pattern matching of file paths." +category = "dev" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" + +[[package]] +name = "platformdirs" +version = "2.5.2" +description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +category = "dev" +optional = false +python-versions = ">=3.7" + +[package.extras] +docs = ["furo (>=2021.7.5b38)", "proselint (>=0.10.2)", "sphinx-autodoc-typehints (>=1.12)", "sphinx (>=4)"] +test = ["appdirs (==1.4.4)", "pytest-cov (>=2.7)", "pytest-mock (>=3.6)", "pytest (>=6)"] + +[[package]] +name = "pluggy" +version = "1.0.0" +description = "plugin and hook calling mechanisms for python" +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "prometheus-async" +version = "19.2.0" +description = "Async helpers for prometheus_client." +category = "main" +optional = false +python-versions = ">=2.7, !=3.0, !=3.1, !=3.2, !=3.3, !=3.4" + +[package.dependencies] +prometheus-client = ">=0.0.18" +wrapt = "*" + +[package.extras] +aiohttp = ["aiohttp (>=3)"] +consul = ["aiohttp (>=3)"] +dev = ["aiohttp (>=3)", "twisted", "aiohttp", "sphinx", "sphinxcontrib-asyncio", "coverage", "pytest (<4.1)", "pytest-twisted", "pre-commit", "pytest-asyncio"] +docs = ["aiohttp", "sphinx", "sphinxcontrib-asyncio", "twisted"] +tests = ["coverage", "pytest (<4.1)", "pytest-asyncio"] +twisted = ["twisted"] + +[[package]] +name = "prometheus-client" +version = "0.7.1" +description = "Python client for the Prometheus monitoring system." +category = "main" +optional = false +python-versions = "*" + +[package.extras] +twisted = ["twisted"] + +[[package]] +name = "protobuf" +version = "3.20.1" +description = "Protocol Buffers" +category = "main" +optional = false +python-versions = ">=3.7" + +[[package]] +name = "py" +version = "1.11.0" +description = "library with cross-python path, ini-parsing, io, code, log facilities" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[[package]] +name = "pyasn1" +version = "0.4.8" +description = "ASN.1 types and codecs" +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "pyasn1-modules" +version = "0.2.8" +description = "A collection of ASN.1-based protocols modules." +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +pyasn1 = ">=0.4.6,<0.5.0" + +[[package]] +name = "pycodestyle" +version = "2.7.0" +description = "Python style guide checker" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "pyflakes" +version = "2.3.1" +description = "passive checker of Python programs" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "pymysql" +version = "1.0.2" +description = "Pure Python MySQL Driver" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.extras] +ed25519 = ["PyNaCl (>=1.4.0)"] +rsa = ["cryptography"] + +[[package]] +name = "pyparsing" +version = "3.0.8" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" +category = "main" +optional = false +python-versions = ">=3.6.8" + +[package.extras] +diagrams = ["railroad-diagrams", "jinja2"] + +[[package]] +name = "pystan" +version = "2.19.1.1" +description = "Python interface to Stan, a package for Bayesian inference" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +Cython = ">=0.22,<0.25.1 || >0.25.1" +numpy = ">=1.7" + +[[package]] +name = "pytest" +version = "6.2.5" +description = "pytest: simple powerful testing with Python" +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +atomicwrites = {version = ">=1.0", markers = "sys_platform == \"win32\""} +attrs = ">=19.2.0" +colorama = {version = "*", markers = "sys_platform == \"win32\""} +importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=0.12,<2.0" +py = ">=1.8.2" +toml = "*" + +[package.extras] +testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xmlschema"] + +[[package]] +name = "pytest-mock" +version = "3.7.0" +description = "Thin-wrapper around the mock package for easier use with pytest" +category = "dev" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +pytest = ">=5.0" + +[package.extras] +dev = ["pre-commit", "tox", "pytest-asyncio"] + +[[package]] +name = "python-dateutil" +version = "2.8.2" +description = "Extensions to the standard Python datetime module" +category = "main" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "python-dotenv" +version = "0.20.0" +description = "Read key-value pairs from a .env file and set them as environment variables" +category = "main" +optional = false +python-versions = ">=3.5" + +[package.extras] +cli = ["click (>=5.0)"] + +[[package]] +name = "pytz" +version = "2022.1" +description = "World timezone definitions, modern and historical" +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "requests" +version = "2.27.1" +description = "Python HTTP for Humans." +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = {version = ">=2.0.0,<2.1.0", markers = "python_version >= \"3\""} +idna = {version = ">=2.5,<4", markers = "python_version >= \"3\""} +urllib3 = ">=1.21.1,<1.27" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)", "win-inet-pton"] +use_chardet_on_py3 = ["chardet (>=3.0.2,<5)"] + +[[package]] +name = "rsa" +version = "4.8" +description = "Pure-Python RSA implementation" +category = "main" +optional = false +python-versions = ">=3.6,<4" + +[package.dependencies] +pyasn1 = ">=0.1.3" + +[[package]] +name = "scipy" +version = "1.7.3" +description = "SciPy: Scientific Library for Python" +category = "main" +optional = false +python-versions = ">=3.7,<3.11" + +[package.dependencies] +numpy = ">=1.16.5,<1.23.0" + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" + +[[package]] +name = "toml" +version = "0.10.2" +description = "Python Library for Tom's Obvious, Minimal Language" +category = "dev" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" + +[[package]] +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" +category = "dev" +optional = false +python-versions = ">=3.7" + +[[package]] +name = "typed-ast" +version = "1.5.3" +description = "a fork of Python 2 and 3 ast modules with type comment support" +category = "dev" +optional = false +python-versions = ">=3.6" + +[[package]] +name = "typing-extensions" +version = "4.2.0" +description = "Backported and Experimental Type Hints for Python 3.7+" +category = "main" +optional = false +python-versions = ">=3.7" + +[[package]] +name = "tzlocal" +version = "2.1" +description = "tzinfo object for the local timezone" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +pytz = "*" + +[[package]] +name = "uritemplate" +version = "4.1.1" +description = "Implementation of RFC 6570 URI Templates" +category = "main" +optional = false +python-versions = ">=3.6" + +[[package]] +name = "urllib3" +version = "1.26.9" +description = "HTTP library with thread-safe connection pooling, file post, and more." +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" + +[package.extras] +brotli = ["brotlicffi (>=0.8.0)", "brotli (>=1.0.9)", "brotlipy (>=0.6.0)"] +secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"] +socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] + +[[package]] +name = "wrapt" +version = "1.14.0" +description = "Module for decorators, wrappers and monkey patching." +category = "main" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" + +[[package]] +name = "yarl" +version = "1.7.2" +description = "Yet another URL library" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +idna = ">=2.0" +multidict = ">=4.0" +typing-extensions = {version = ">=3.7.4", markers = "python_version < \"3.8\""} + +[[package]] +name = "zipp" +version = "3.8.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +category = "dev" +optional = false +python-versions = ">=3.7" + +[package.extras] +docs = ["sphinx", "jaraco.packaging (>=9)", "rst.linker (>=1.9)"] +testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)"] + +[metadata] +lock-version = "1.1" +python-versions = "3.7.6" +content-hash = "54b7b9767f0b5839857d2453ed5524a883c6a98e2539a1e73a2570d114ca0158" + +[metadata.files] +aiohttp = [ + {file = "aiohttp-3.7.4.post0-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:3cf75f7cdc2397ed4442594b935a11ed5569961333d49b7539ea741be2cc79d5"}, + {file = "aiohttp-3.7.4.post0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:4b302b45040890cea949ad092479e01ba25911a15e648429c7c5aae9650c67a8"}, + {file = "aiohttp-3.7.4.post0-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:fe60131d21b31fd1a14bd43e6bb88256f69dfc3188b3a89d736d6c71ed43ec95"}, + {file = "aiohttp-3.7.4.post0-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:393f389841e8f2dfc86f774ad22f00923fdee66d238af89b70ea314c4aefd290"}, + {file = "aiohttp-3.7.4.post0-cp36-cp36m-manylinux2014_ppc64le.whl", hash = "sha256:c6e9dcb4cb338d91a73f178d866d051efe7c62a7166653a91e7d9fb18274058f"}, + {file = "aiohttp-3.7.4.post0-cp36-cp36m-manylinux2014_s390x.whl", hash = "sha256:5df68496d19f849921f05f14f31bd6ef53ad4b00245da3195048c69934521809"}, + {file = "aiohttp-3.7.4.post0-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:0563c1b3826945eecd62186f3f5c7d31abb7391fedc893b7e2b26303b5a9f3fe"}, + {file = "aiohttp-3.7.4.post0-cp36-cp36m-win32.whl", hash = "sha256:3d78619672183be860b96ed96f533046ec97ca067fd46ac1f6a09cd9b7484287"}, + {file = "aiohttp-3.7.4.post0-cp36-cp36m-win_amd64.whl", hash = "sha256:f705e12750171c0ab4ef2a3c76b9a4024a62c4103e3a55dd6f99265b9bc6fcfc"}, + {file = "aiohttp-3.7.4.post0-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:230a8f7e24298dea47659251abc0fd8b3c4e38a664c59d4b89cca7f6c09c9e87"}, + {file = "aiohttp-3.7.4.post0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:2e19413bf84934d651344783c9f5e22dee452e251cfd220ebadbed2d9931dbf0"}, + {file = "aiohttp-3.7.4.post0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:e4b2b334e68b18ac9817d828ba44d8fcb391f6acb398bcc5062b14b2cbeac970"}, + {file = "aiohttp-3.7.4.post0-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:d012ad7911653a906425d8473a1465caa9f8dea7fcf07b6d870397b774ea7c0f"}, + {file = "aiohttp-3.7.4.post0-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:40eced07f07a9e60e825554a31f923e8d3997cfc7fb31dbc1328c70826e04cde"}, + {file = "aiohttp-3.7.4.post0-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:209b4a8ee987eccc91e2bd3ac36adee0e53a5970b8ac52c273f7f8fd4872c94c"}, + {file = "aiohttp-3.7.4.post0-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:14762875b22d0055f05d12abc7f7d61d5fd4fe4642ce1a249abdf8c700bf1fd8"}, + {file = "aiohttp-3.7.4.post0-cp37-cp37m-win32.whl", hash = "sha256:7615dab56bb07bff74bc865307aeb89a8bfd9941d2ef9d817b9436da3a0ea54f"}, + {file = "aiohttp-3.7.4.post0-cp37-cp37m-win_amd64.whl", hash = "sha256:d9e13b33afd39ddeb377eff2c1c4f00544e191e1d1dee5b6c51ddee8ea6f0cf5"}, + {file = "aiohttp-3.7.4.post0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:547da6cacac20666422d4882cfcd51298d45f7ccb60a04ec27424d2f36ba3eaf"}, + {file = "aiohttp-3.7.4.post0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:af9aa9ef5ba1fd5b8c948bb11f44891968ab30356d65fd0cc6707d989cd521df"}, + {file = "aiohttp-3.7.4.post0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:64322071e046020e8797117b3658b9c2f80e3267daec409b350b6a7a05041213"}, + {file = "aiohttp-3.7.4.post0-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:bb437315738aa441251214dad17428cafda9cdc9729499f1d6001748e1d432f4"}, + {file = "aiohttp-3.7.4.post0-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:e54962802d4b8b18b6207d4a927032826af39395a3bd9196a5af43fc4e60b009"}, + {file = "aiohttp-3.7.4.post0-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:a00bb73540af068ca7390e636c01cbc4f644961896fa9363154ff43fd37af2f5"}, + {file = "aiohttp-3.7.4.post0-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:79ebfc238612123a713a457d92afb4096e2148be17df6c50fb9bf7a81c2f8013"}, + {file = "aiohttp-3.7.4.post0-cp38-cp38-win32.whl", hash = "sha256:515dfef7f869a0feb2afee66b957cc7bbe9ad0cdee45aec7fdc623f4ecd4fb16"}, + {file = "aiohttp-3.7.4.post0-cp38-cp38-win_amd64.whl", hash = "sha256:114b281e4d68302a324dd33abb04778e8557d88947875cbf4e842c2c01a030c5"}, + {file = "aiohttp-3.7.4.post0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:7b18b97cf8ee5452fa5f4e3af95d01d84d86d32c5e2bfa260cf041749d66360b"}, + {file = "aiohttp-3.7.4.post0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:15492a6368d985b76a2a5fdd2166cddfea5d24e69eefed4630cbaae5c81d89bd"}, + {file = "aiohttp-3.7.4.post0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:bdb230b4943891321e06fc7def63c7aace16095be7d9cf3b1e01be2f10fba439"}, + {file = "aiohttp-3.7.4.post0-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:cffe3ab27871bc3ea47df5d8f7013945712c46a3cc5a95b6bee15887f1675c22"}, + {file = "aiohttp-3.7.4.post0-cp39-cp39-manylinux2014_ppc64le.whl", hash = "sha256:f881853d2643a29e643609da57b96d5f9c9b93f62429dcc1cbb413c7d07f0e1a"}, + {file = "aiohttp-3.7.4.post0-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:a5ca29ee66f8343ed336816c553e82d6cade48a3ad702b9ffa6125d187e2dedb"}, + {file = "aiohttp-3.7.4.post0-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:17c073de315745a1510393a96e680d20af8e67e324f70b42accbd4cb3315c9fb"}, + {file = "aiohttp-3.7.4.post0-cp39-cp39-win32.whl", hash = "sha256:932bb1ea39a54e9ea27fc9232163059a0b8855256f4052e776357ad9add6f1c9"}, + {file = "aiohttp-3.7.4.post0-cp39-cp39-win_amd64.whl", hash = "sha256:02f46fc0e3c5ac58b80d4d56eb0a7c7d97fcef69ace9326289fb9f1955e65cfe"}, + {file = "aiohttp-3.7.4.post0.tar.gz", hash = "sha256:493d3299ebe5f5a7c66b9819eacdcfbbaaf1a8e84911ddffcdc48888497afecf"}, +] +apscheduler = [ + {file = "APScheduler-3.7.0-py2.py3-none-any.whl", hash = "sha256:c06cc796d5bb9eb3c4f77727f6223476eb67749e7eea074d1587550702a7fbe3"}, + {file = "APScheduler-3.7.0.tar.gz", hash = "sha256:1cab7f2521e107d07127b042155b632b7a1cd5e02c34be5a28ff62f77c900c6a"}, +] +async-timeout = [ + {file = "async-timeout-3.0.1.tar.gz", hash = "sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f"}, + {file = "async_timeout-3.0.1-py3-none-any.whl", hash = "sha256:4291ca197d287d274d0b6cb5d6f8f8f82d434ed288f962539ff18cc9012f9ea3"}, +] +atomicwrites = [ + {file = "atomicwrites-1.4.0-py2.py3-none-any.whl", hash = "sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197"}, + {file = "atomicwrites-1.4.0.tar.gz", hash = "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a"}, +] +attrs = [ + {file = "attrs-21.4.0-py2.py3-none-any.whl", hash = "sha256:2d27e3784d7a565d36ab851fe94887c5eccd6a463168875832a1be79c82828b4"}, + {file = "attrs-21.4.0.tar.gz", hash = "sha256:626ba8234211db98e869df76230a137c4c40a12d72445c45d5f5b716f076e2fd"}, +] +black = [ + {file = "black-22.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2497f9c2386572e28921fa8bec7be3e51de6801f7459dffd6e62492531c47e09"}, + {file = "black-22.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5795a0375eb87bfe902e80e0c8cfaedf8af4d49694d69161e5bd3206c18618bb"}, + {file = "black-22.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e3556168e2e5c49629f7b0f377070240bd5511e45e25a4497bb0073d9dda776a"}, + {file = "black-22.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67c8301ec94e3bcc8906740fe071391bce40a862b7be0b86fb5382beefecd968"}, + {file = "black-22.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:fd57160949179ec517d32ac2ac898b5f20d68ed1a9c977346efbac9c2f1e779d"}, + {file = "black-22.3.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:cc1e1de68c8e5444e8f94c3670bb48a2beef0e91dddfd4fcc29595ebd90bb9ce"}, + {file = "black-22.3.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2fc92002d44746d3e7db7cf9313cf4452f43e9ea77a2c939defce3b10b5c82"}, + {file = "black-22.3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:a6342964b43a99dbc72f72812bf88cad8f0217ae9acb47c0d4f141a6416d2d7b"}, + {file = "black-22.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:328efc0cc70ccb23429d6be184a15ce613f676bdfc85e5fe8ea2a9354b4e9015"}, + {file = "black-22.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06f9d8846f2340dfac80ceb20200ea5d1b3f181dd0556b47af4e8e0b24fa0a6b"}, + {file = "black-22.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:ad4efa5fad66b903b4a5f96d91461d90b9507a812b3c5de657d544215bb7877a"}, + {file = "black-22.3.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e8477ec6bbfe0312c128e74644ac8a02ca06bcdb8982d4ee06f209be28cdf163"}, + {file = "black-22.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:637a4014c63fbf42a692d22b55d8ad6968a946b4a6ebc385c5505d9625b6a464"}, + {file = "black-22.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:863714200ada56cbc366dc9ae5291ceb936573155f8bf8e9de92aef51f3ad0f0"}, + {file = "black-22.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10dbe6e6d2988049b4655b2b739f98785a884d4d6b85bc35133a8fb9a2233176"}, + {file = "black-22.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:cee3e11161dde1b2a33a904b850b0899e0424cc331b7295f2a9698e79f9a69a0"}, + {file = "black-22.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5891ef8abc06576985de8fa88e95ab70641de6c1fca97e2a15820a9b69e51b20"}, + {file = "black-22.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:30d78ba6bf080eeaf0b7b875d924b15cd46fec5fd044ddfbad38c8ea9171043a"}, + {file = "black-22.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ee8f1f7228cce7dffc2b464f07ce769f478968bfb3dd1254a4c2eeed84928aad"}, + {file = "black-22.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ee227b696ca60dd1c507be80a6bc849a5a6ab57ac7352aad1ffec9e8b805f21"}, + {file = "black-22.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:9b542ced1ec0ceeff5b37d69838106a6348e60db7b8fdd245294dc1d26136265"}, + {file = "black-22.3.0-py3-none-any.whl", hash = "sha256:bc58025940a896d7e5356952228b68f793cf5fcb342be703c3a2669a1488cb72"}, + {file = "black-22.3.0.tar.gz", hash = "sha256:35020b8886c022ced9282b51b5a875b6d1ab0c387b31a065b84db7c33085ca79"}, +] +cachetools = [ + {file = "cachetools-4.2.4-py3-none-any.whl", hash = "sha256:92971d3cb7d2a97efff7c7bb1657f21a8f5fb309a37530537c71b1774189f2d1"}, + {file = "cachetools-4.2.4.tar.gz", hash = "sha256:89ea6f1b638d5a73a4f9226be57ac5e4f399d22770b92355f92dcb0f7f001693"}, +] +certifi = [ + {file = "certifi-2021.10.8-py2.py3-none-any.whl", hash = "sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569"}, + {file = "certifi-2021.10.8.tar.gz", hash = "sha256:78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872"}, +] +chardet = [ + {file = "chardet-4.0.0-py2.py3-none-any.whl", hash = "sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5"}, + {file = "chardet-4.0.0.tar.gz", hash = "sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa"}, +] +charset-normalizer = [ + {file = "charset-normalizer-2.0.12.tar.gz", hash = "sha256:2857e29ff0d34db842cd7ca3230549d1a697f96ee6d3fb071cfa6c7393832597"}, + {file = "charset_normalizer-2.0.12-py3-none-any.whl", hash = "sha256:6881edbebdb17b39b4eaaa821b438bf6eddffb4468cf344f09f89def34a8b1df"}, +] +click = [ + {file = "click-8.1.2-py3-none-any.whl", hash = "sha256:24e1a4a9ec5bf6299411369b208c1df2188d9eb8d916302fe6bf03faed227f1e"}, + {file = "click-8.1.2.tar.gz", hash = "sha256:479707fe14d9ec9a0757618b7a100a0ae4c4e236fac5b7f80ca68028141a1a72"}, +] +colorama = [ + {file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"}, + {file = "colorama-0.4.4.tar.gz", hash = "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"}, +] +cython = [ + {file = "Cython-0.29.28-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:75686c586e37b1fed0fe4a2c053474f96fc07da0063bbfc98023454540515d31"}, + {file = "Cython-0.29.28-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:16f2e74fcac223c53e298ecead62c353d3cffa107bea5d8232e4b2ba40781634"}, + {file = "Cython-0.29.28-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b6c77cc24861a33714e74212abfab4e54bf42e1ad602623f193b8e369389af2f"}, + {file = "Cython-0.29.28-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:59f4e86b415620a097cf0ec602adf5a7ee3cc33e8220567ded96566f753483f8"}, + {file = "Cython-0.29.28-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:31465dce7fd3f058d02afb98b13af962848cc607052388814428dc801cc26f57"}, + {file = "Cython-0.29.28-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:5658fa477e80d96c49d5ff011938dd4b62da9aa428f771b91f1a7c49af45aad8"}, + {file = "Cython-0.29.28-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:33b69ac9bbf2b93d8cae336cfe48889397a857e6ceeb5cef0b2f0b31b6c54f2b"}, + {file = "Cython-0.29.28-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9d39ee7ddef6856413f950b8959e852d83376d9db1c509505e3f4873df32aa70"}, + {file = "Cython-0.29.28-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c9848a423a14e8f51bd4bbf8e2ff37031764ce66bdc7c6bc06c70d4084eb23c7"}, + {file = "Cython-0.29.28-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:09448aadb818387160ca4d1e1b82dbb7001526b6d0bed7529c4e8ac12e3b6f4c"}, + {file = "Cython-0.29.28-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:341917bdb2c95bcf8322aacfe50bbe6b4794880b16fa8b2300330520e123a5e5"}, + {file = "Cython-0.29.28-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fdcef7abb09fd827691e3abe6fd42c6c34beaccfa0bc2df6074f0a49949df6a8"}, + {file = "Cython-0.29.28-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:43eca77169f855dd04be11921a585c8854a174f30bc925257e92bc7b9197fbd2"}, + {file = "Cython-0.29.28-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7962a78ceb80cdec21345fb5088e675060fa65982030d446069f2d675d30e3cd"}, + {file = "Cython-0.29.28-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:ed32c206e1d68056a34b21d2ec0cf0f23d338d6531476a68c73e21e20bd7bb63"}, + {file = "Cython-0.29.28-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:a0ed39c63ba52edd03a39ea9d6da6f5326aaee5d333c317feba543270a1b3af5"}, + {file = "Cython-0.29.28-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:ded4fd3da4dee2f4414c35214244e29befa7f6fede3e9be317e765169df2cbc7"}, + {file = "Cython-0.29.28-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e24bd94946ffa37f30fcb865f2340fb6d429a3c7bf87b47b22f7d22e0e68a15c"}, + {file = "Cython-0.29.28-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:076aa8da83383e2bed0ca5f92c13a7e76e684bc41fe8e438bbed735f5b1c2731"}, + {file = "Cython-0.29.28-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:004387d8b94c64681ee05660d6a234e125396097726cf2f419c0fa2ac38034d6"}, + {file = "Cython-0.29.28-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:d6036f6a5a0c7fb1af88889872268b15bf20dd9cefe33a6602d79ba18b8db20f"}, + {file = "Cython-0.29.28-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:1612d7439590ba3b8de5f907bf0e54bd8e024eafb8c59261531a7988030c182d"}, + {file = "Cython-0.29.28-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:d7d7beb600d5dd551e9322e1393b74286f4a3d4aa387f7bfbaccc1495a98603b"}, + {file = "Cython-0.29.28-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:5e82f6b3dc2133b2e0e2c5c63d352d40a695e40cc7ed99f4cbe83334bcf9ab39"}, + {file = "Cython-0.29.28-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:49076747b731ed78acf203666c3b3c5d664754ea01ca4527f62f6d8675703688"}, + {file = "Cython-0.29.28-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9f2b7c86a73db0d8dbbd885fe67f04c7b787df37a3848b9867270d3484101fbd"}, + {file = "Cython-0.29.28-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a3b27812ac9e9737026bfbb1dd47434f3e84013f430bafe1c6cbaf1cd51b5518"}, + {file = "Cython-0.29.28-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0378a14d2580dcea234d7a2dc8d75f60c091105885096e6dd5b032be97542c16"}, + {file = "Cython-0.29.28-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:d7c98727397c2547a56aa0c3c98140f1873c69a0642edc9446c6c870d0d8a5b5"}, + {file = "Cython-0.29.28-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:6626f9691ce2093ccbcc9932f449efe3b6e1c893b556910881d177c61612e8ff"}, + {file = "Cython-0.29.28-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:e9cc6af0c9c477c5e175e807dce439509934efefc24ea2da9fced7fbc8170591"}, + {file = "Cython-0.29.28-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05edfa51c0ff31a8df3cb291b90ca93ab499686d023b9b81c216cd3509f73def"}, + {file = "Cython-0.29.28-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:4b3089255b6b1cc69e4b854626a41193e6acae5332263d24707976b3cb8ca644"}, + {file = "Cython-0.29.28-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:03b749e4f0bbf631cee472add2806d338a7d496f8383f6fb28cc5fdc34b7fdb8"}, + {file = "Cython-0.29.28-py2.py3-none-any.whl", hash = "sha256:26d8d0ededca42be50e0ac377c08408e18802b1391caa3aea045a72c1bff47ac"}, + {file = "Cython-0.29.28.tar.gz", hash = "sha256:d6fac2342802c30e51426828fe084ff4deb1b3387367cf98976bb2e64b6f8e45"}, +] +environs = [ + {file = "environs-9.5.0-py2.py3-none-any.whl", hash = "sha256:1e549569a3de49c05f856f40bce86979e7d5ffbbc4398e7f338574c220189124"}, + {file = "environs-9.5.0.tar.gz", hash = "sha256:a76307b36fbe856bdca7ee9161e6c466fd7fcffc297109a118c59b54e27e30c9"}, +] +flake8 = [ + {file = "flake8-3.9.2-py2.py3-none-any.whl", hash = "sha256:bf8fd333346d844f616e8d47905ef3a3384edae6b4e9beb0c5101e25e3110907"}, + {file = "flake8-3.9.2.tar.gz", hash = "sha256:07528381786f2a6237b061f6e96610a4167b226cb926e2aa2b6b1d78057c576b"}, +] +google-api-core = [ + {file = "google-api-core-1.31.5.tar.gz", hash = "sha256:85d2074f2c8f9c07e614d7f978767d71ceb7d40647814ef4236d3a0ef671ee75"}, + {file = "google_api_core-1.31.5-py2.py3-none-any.whl", hash = "sha256:6815207a8b422e9da42c200681603f304b25f98c98b675a9db9fdc3717e44280"}, +] +google-api-python-client = [ + {file = "google-api-python-client-2.46.0.tar.gz", hash = "sha256:ba41ce1da337d39d76da1c5affea6e0b95bb6da74aa2adfe5a40618dd67a01fe"}, + {file = "google_api_python_client-2.46.0-py2.py3-none-any.whl", hash = "sha256:1a7a954fdb778c77ad2a7b28c8e5f4b8761728aca89f5a017e05abd92814b6e9"}, +] +google-auth = [ + {file = "google-auth-1.35.0.tar.gz", hash = "sha256:b7033be9028c188ee30200b204ea00ed82ea1162e8ac1df4aa6ded19a191d88e"}, + {file = "google_auth-1.35.0-py2.py3-none-any.whl", hash = "sha256:997516b42ecb5b63e8d80f5632c1a61dddf41d2a4c2748057837e06e00014258"}, +] +google-auth-httplib2 = [ + {file = "google-auth-httplib2-0.1.0.tar.gz", hash = "sha256:a07c39fd632becacd3f07718dfd6021bf396978f03ad3ce4321d060015cc30ac"}, + {file = "google_auth_httplib2-0.1.0-py2.py3-none-any.whl", hash = "sha256:31e49c36c6b5643b57e82617cb3e021e3e1d2df9da63af67252c02fa9c1f4a10"}, +] +google-cloud = [ + {file = "google-cloud-0.34.0.tar.gz", hash = "sha256:01430187cf56df10a9ba775dd547393185d4b40741db0ea5889301f8e7a9d5d3"}, + {file = "google_cloud-0.34.0-py2.py3-none-any.whl", hash = "sha256:fb1ab7b0548fe44b3d538041f0a374505b7f990d448a935ea36649c5ccab5acf"}, +] +google-cloud-core = [ + {file = "google-cloud-core-1.7.2.tar.gz", hash = "sha256:b1030aadcbb2aeb4ee51475426351af83c1072456b918fb8fdb80666c4bb63b5"}, + {file = "google_cloud_core-1.7.2-py2.py3-none-any.whl", hash = "sha256:5b77935f3d9573e27007749a3b522f08d764c5b5930ff1527b2ab2743e9f0c15"}, +] +google-cloud-logging = [ + {file = "google-cloud-logging-1.15.1.tar.gz", hash = "sha256:cb0d4af9d684eb8a416f14c39d9fa6314be3adf41db2dd8ee8e30db9e8853d90"}, + {file = "google_cloud_logging-1.15.1-py2.py3-none-any.whl", hash = "sha256:20c7557fd170891eab1a5e428338ad646203ddc519bc2fc57fd59bef14cd3602"}, +] +googleapis-common-protos = [ + {file = "googleapis-common-protos-1.56.0.tar.gz", hash = "sha256:4007500795bcfc269d279f0f7d253ae18d6dc1ff5d5a73613ffe452038b1ec5f"}, + {file = "googleapis_common_protos-1.56.0-py2.py3-none-any.whl", hash = "sha256:60220c89b8bd5272159bed4929ecdc1243ae1f73437883a499a44a1cbc084086"}, +] +grpc-health-checking = [ + {file = "grpc-health-checking-0.0.1.tar.gz", hash = "sha256:0b59d2f23ca89d2b2a1419d1f06ca1363c882067c91b6aa4b84c234835d35473"}, +] +grpcio = [ + {file = "grpcio-1.45.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:0d74a159df9401747e57960f0772f4371486e3281919004efa9df8a82985abee"}, + {file = "grpcio-1.45.0-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:4e6d15bfdfa28e5f6d524dd3b29c7dc129cfc578505b067aa97574490c5b70fe"}, + {file = "grpcio-1.45.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:44615be86e5540a18f5e4ca5a0f428d4b1efb800d255cfd9f902a11daca8fd74"}, + {file = "grpcio-1.45.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8b452f715e2cae9e75cb309f59a37f82e5b25f51f0bfc3cd1462de86265cef05"}, + {file = "grpcio-1.45.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db1c45daa35c64f17498af1ba6eb1d0a8d88a8a0b6b322f960ab461e7ef0419e"}, + {file = "grpcio-1.45.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:678a673fe811dad3ed5bd2e2352b79851236e4d718aeaeffc10f372a55954d8d"}, + {file = "grpcio-1.45.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a5c8a08aff0af770c977dcede62fbed53ae7b99adbc184d5299d148bb04652f1"}, + {file = "grpcio-1.45.0-cp310-cp310-win32.whl", hash = "sha256:1d764c8a190719301ec6f3b6ddeb48a234604e337d0fbb3184a4ddcda2aca9da"}, + {file = "grpcio-1.45.0-cp310-cp310-win_amd64.whl", hash = "sha256:797f5b750be6ff2905b9d0529a00c1f873d8035a5d01a9801910ace5f0d52a18"}, + {file = "grpcio-1.45.0-cp36-cp36m-linux_armv7l.whl", hash = "sha256:b46772b7eb58c6cb0b468b56d59618694d2c2f2cee2e5b4e83ae9729a46b8af0"}, + {file = "grpcio-1.45.0-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:2f135e5c8e9acd14f3090fd86dccb9d7c26aea7bfbd4528e8a86ff621d39e610"}, + {file = "grpcio-1.45.0-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:16603b9544a4af135ce4d594a7396602fbe62d1ccaa484b05cb1814c17a3e559"}, + {file = "grpcio-1.45.0-cp36-cp36m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ccba925045c00acc9ce2cc645b6fa9d19767dbb16c9c49921013da412b1d3415"}, + {file = "grpcio-1.45.0-cp36-cp36m-manylinux_2_17_aarch64.whl", hash = "sha256:7262b9d96db79e29049c7eb2b75b03f2b9485fd838209b5ff8e3cca73b2a706c"}, + {file = "grpcio-1.45.0-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a1c1098f35c33b985c312cacea39e2aa66f7ac1462579eed1d3aed2e51fff00d"}, + {file = "grpcio-1.45.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b18c86a9cfbedd0c4e083690fecc82027b3f938100ed0af8db77d52a171eb1e"}, + {file = "grpcio-1.45.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:638364d3603df9e4a1dbc2151b5fe1b491ceecda4e1672be86724e1dfa79c44d"}, + {file = "grpcio-1.45.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:8de79eac582431cb6d05ff5652e68089c40aa0e604ec1630fa52ac926bc44f1b"}, + {file = "grpcio-1.45.0-cp36-cp36m-win32.whl", hash = "sha256:6cf5f1827c182ef9b503d7d01e503c1067f4499d45af792d95ccd1d8b0bea30d"}, + {file = "grpcio-1.45.0-cp36-cp36m-win_amd64.whl", hash = "sha256:4f1a22744f93b38d393b7a83cb607029ac5e2de680cab39957ffdd116590a178"}, + {file = "grpcio-1.45.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:321f84dbc788481f7a3cd12636a133ba5f4d17e57f1c906de5a22fd709c971b5"}, + {file = "grpcio-1.45.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:a33ed7d3e52ddc839e2f020592a4371d805c2ae820fb63b12525058e1810fe46"}, + {file = "grpcio-1.45.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f9f28d8c5343602e1510d4839e38568bcd0ca6353bd98ad9941787584a371a1d"}, + {file = "grpcio-1.45.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3a40dbb8aac60cf6a86583e2ba74fc2c286f1abc7a3404b25dcd12a49b9f7d8b"}, + {file = "grpcio-1.45.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:b00ce58323dde47d2ea240d10ee745471b9966429c97d9e6567c8d56e02b0372"}, + {file = "grpcio-1.45.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd4944f35f1e5ab54804c3e37d24921ecc01908ef871cdce6bd52995ea4f985c"}, + {file = "grpcio-1.45.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc135b77f384a84bac67a37947886986be136356446338d64160a30c85f20c6d"}, + {file = "grpcio-1.45.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:35ae55460514ed404ceaa95533b9a79989691b562faf012fc8fb143d8fd16e47"}, + {file = "grpcio-1.45.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:779db3d00c8da1d3efa942387cb0fea9ac6d50124d656024f82f9faefdd016e3"}, + {file = "grpcio-1.45.0-cp37-cp37m-win32.whl", hash = "sha256:aea67bd3cbf93db552c725bc0b4db0acdc6a284d036d1cc32d638305e0f01fd9"}, + {file = "grpcio-1.45.0-cp37-cp37m-win_amd64.whl", hash = "sha256:7fe3ac700cc5ecba9dc9072c0e6cfd2f964ea9f273ce1111eaa27d13aa20ec32"}, + {file = "grpcio-1.45.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:259c126821fefcda298c020a0d83c4a4edac3cf10b1af12a62d250f8192ea1d1"}, + {file = "grpcio-1.45.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:5d05cd1b2b0975bb000ba97ca465565158dc211616c9bbbef5d1b77871974687"}, + {file = "grpcio-1.45.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6f2e044a715507fd13c70c928cd90daf8d0295c936a81fd9065a24e58ba7cc7d"}, + {file = "grpcio-1.45.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4d37c526b86c46d229f6117df5dca2510de597ab73c5956bc379ca41f8a1db84"}, + {file = "grpcio-1.45.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:6df338b8d2c328ba91a25e28786d10059dea3bc9115fa1ddad30ba5d459e714a"}, + {file = "grpcio-1.45.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:042921a824e90bf2974dbef7d89937096181298294799fb53e5576d9958884c7"}, + {file = "grpcio-1.45.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb23ed6ed84ae312df03e96c7a7cd3aa5f7e3a1ad7066fdb6cd47f1bd334196c"}, + {file = "grpcio-1.45.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:79582ec821ef10162348170a6e912d93ea257c749320a162dfc3a132ef25ac1b"}, + {file = "grpcio-1.45.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d14d372ea5a51d5ab991aa6d499a26e5a1e3b3f3af93f41826ea610f8a276c9e"}, + {file = "grpcio-1.45.0-cp38-cp38-win32.whl", hash = "sha256:b54444cf4212935a7b98cd26a30ad3a036389e4fd2ff3e461b176af876c7e20b"}, + {file = "grpcio-1.45.0-cp38-cp38-win_amd64.whl", hash = "sha256:da395720d6e9599c754f862f3f75bc0e8ff29fa55259e082e442a9cc916ffbc3"}, + {file = "grpcio-1.45.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:add03308fa2d434628aeaa445e0c75cdb9535f39128eb949b1483ae83fafade6"}, + {file = "grpcio-1.45.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:250d8f18332f3dbd4db00efa91d33d336e58362e9c80e6946d45ecf5e82d95ec"}, + {file = "grpcio-1.45.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:dfca4dfd307b449d0a1e92bc7fbb5224ccf16db384aab412ba6766fc56bdffb6"}, + {file = "grpcio-1.45.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b7f2dc8831045eb0c892bb947e1cba2b1ed639e79a54abff7c4ad90bdd329f78"}, + {file = "grpcio-1.45.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:2355493a9e71f15d9004b2ab87892cb532e9e98db6882fced2912115eb5631af"}, + {file = "grpcio-1.45.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2798e42d62a0296982276d0bab96fc7d6772cd148357154348355304d6216763"}, + {file = "grpcio-1.45.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fe6acb1439127e0bee773f8a9a3ece290cb4cac4fe8d46b10bc8dda250a990c"}, + {file = "grpcio-1.45.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6774272a59b9ee16fb0d4f53e23716953a22bbb3efe12fdf9a4ee3eec2c4f81f"}, + {file = "grpcio-1.45.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:52f61fcb17d92b87ba47d54b3c9deae09d4f0216a3ea277b7df4b6c1794e6556"}, + {file = "grpcio-1.45.0-cp39-cp39-win32.whl", hash = "sha256:3992c690228126e5652c7a1f61863c1ebfd71369cf2adb0fce86fee1d82d2d27"}, + {file = "grpcio-1.45.0-cp39-cp39-win_amd64.whl", hash = "sha256:220867a53e53b2e201e98c55061e3053e31c0ce613625087242be684d3e8612a"}, + {file = "grpcio-1.45.0.tar.gz", hash = "sha256:ff2c8b965b0fc25cf281961aa46619c10900543effe3f806ef818231c40aaff3"}, +] +grpcio-health-checking = [ + {file = "grpcio-health-checking-1.27.2.tar.gz", hash = "sha256:a017dbbc7f3dfbad707182ad41de92831f7ffe46b3e68d6d1b47e345f5e24fb9"}, +] +grpcio-tools = [ + {file = "grpcio-tools-1.45.0.tar.gz", hash = "sha256:a016cfc21e0d91b3b036d3d4f968d1fdea865dfa03524cb1fbeca84719fd45a2"}, + {file = "grpcio_tools-1.45.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:0431410ba4463bdb03051a6279c040a1bae1d1b12d7dd533ecfba2462725cf11"}, + {file = "grpcio_tools-1.45.0-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:52a801063894a85f719108b438b8e71f86ca8059c25824944867879a4e8f6d2c"}, + {file = "grpcio_tools-1.45.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:451e54b490c5d0efcb0ad7a8f7e45ec3cf452de67ee017ccb2bd1e5e45571938"}, + {file = "grpcio_tools-1.45.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:047233beb9773f7da454711b3ec029233b494375db03f3fd2e759702b231c09f"}, + {file = "grpcio_tools-1.45.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:210ea758250b5bb4f986713c01dc200f63b122a181a228114906bb99e5822479"}, + {file = "grpcio_tools-1.45.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e021b911fde2f86c3528f67f7937a41ef56195f637a7556409a4e88c81ab8f2d"}, + {file = "grpcio_tools-1.45.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ad8a74b0626b3762eeddfef2a18944f7cb9ddd80db13997fb4587185c821b10e"}, + {file = "grpcio_tools-1.45.0-cp310-cp310-win32.whl", hash = "sha256:baceb0da2ada3ec4959beef208c3685d3274b0ecc59ac531658e0d35d8f67846"}, + {file = "grpcio_tools-1.45.0-cp310-cp310-win_amd64.whl", hash = "sha256:ee423b5ebd1a6a6fa8d2cd4861a5ee758036e4d08f6a9a5eebc4ec2380bd94ef"}, + {file = "grpcio_tools-1.45.0-cp36-cp36m-linux_armv7l.whl", hash = "sha256:e286083bcf32e7bc26b67c8fb8d59a385047cd1213e205a3f0eafee50c171cc4"}, + {file = "grpcio_tools-1.45.0-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:f535902209809acf5d1de59e287dd43c81e76906d4e2e51f8068a544ecc84301"}, + {file = "grpcio_tools-1.45.0-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:40842f52243c0ada1d6262a625bca31dd217e12ef7e7b3dbccaabe289d8b24e5"}, + {file = "grpcio_tools-1.45.0-cp36-cp36m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:64ec376631be21e39a631b940dd833273fc709a19ad08d993089a7bb2a958dc0"}, + {file = "grpcio_tools-1.45.0-cp36-cp36m-manylinux_2_17_aarch64.whl", hash = "sha256:b7de099ae16938aeb7c9e0f5178b9ad2be500731847e3a168be7dbad25d70dee"}, + {file = "grpcio_tools-1.45.0-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bbee84e4fcaca03fbfb1b1defa7b226999d6fa468c72578ff900e46caf01a55b"}, + {file = "grpcio_tools-1.45.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:71a3beee3526b2aacbf19ad83a7737a1c9e4f8a1fad3768b644f9d8bf45f29df"}, + {file = "grpcio_tools-1.45.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:57e79e34ba8ff1d7594dd5556fbeb1ff7bb985a9f34b54da65ea6c617c02969b"}, + {file = "grpcio_tools-1.45.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d8656dca6e4f729a77a156cea16141dd7206657bf303b67c157530c7e7741216"}, + {file = "grpcio_tools-1.45.0-cp36-cp36m-win32.whl", hash = "sha256:527aa1105cf2ef8e979ccbb08b0e80330cbec736b67da54d966fc9f3860d7145"}, + {file = "grpcio_tools-1.45.0-cp36-cp36m-win_amd64.whl", hash = "sha256:31bd8e8e5b383943e72b5fb8e157ee730aa6b52e8a15bb13035166e1b5b9c897"}, + {file = "grpcio_tools-1.45.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:8f64dd3098edcdc99a0ee9e680ae674a86f40f65c125a88a11610c28503844ec"}, + {file = "grpcio_tools-1.45.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:e8808de2aac8c7b2938602cd121b37b3c44e1e80cadb4b48dc695f205ff71e2c"}, + {file = "grpcio_tools-1.45.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ebb17cb82cd921b8950ddc080ba5ed9a3fc06e45942050f6127872bd6fc46325"}, + {file = "grpcio_tools-1.45.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0c1a1f1a794046823aac6ae207746b503b26db992837e7b06cb4bed2dc8520ae"}, + {file = "grpcio_tools-1.45.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:8d9be3e3bf85820ce43ff00d800b6ad61448ab8c458c12c36696f76b81808aaa"}, + {file = "grpcio_tools-1.45.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:319f4905e398558c9c0d508b685976b2728ff5c6629613debb6c153e49e5ad18"}, + {file = "grpcio_tools-1.45.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7db11a65e07410db1c31cbeb9afe344a6bd88a63dcd819557707ca7318478727"}, + {file = "grpcio_tools-1.45.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:477230d6d5054fc949928cbc1d50db54a5cece5d233b8ef3e0aebbf60939bdd8"}, + {file = "grpcio_tools-1.45.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a33edaa319bafd7542105d25abc6d9a8a331d4684fb5886863d1c9a6cd47fa67"}, + {file = "grpcio_tools-1.45.0-cp37-cp37m-win32.whl", hash = "sha256:690b520660008687af9eb956981f6da2134f8ce299434a22314c00e9dac29fcb"}, + {file = "grpcio_tools-1.45.0-cp37-cp37m-win_amd64.whl", hash = "sha256:47c6f5c7d9ed33726ed6ed1630767a8e08e8d10497cba07ba9001b54a599d486"}, + {file = "grpcio_tools-1.45.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:bfea11559bec0935b84174a2bb10ec67ca97367d71d11facbbf9bbf8f5693067"}, + {file = "grpcio_tools-1.45.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:36516f6f9957f6295d81766b1855be858055fc17cebcd4076471d697597cb2c6"}, + {file = "grpcio_tools-1.45.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1b4b1727cdfda8854089f03df2d2b2b0171243c54048ac1359dd89ab5c211180"}, + {file = "grpcio_tools-1.45.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b2425c9cbc1c400a4fd7adcefde7c524c46e7f42f3f2cb52a15399bc559d4fe0"}, + {file = "grpcio_tools-1.45.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:1b4a5dcd433377cedb18cff3c79050c638ce7d6223de9ad9119157994558e37b"}, + {file = "grpcio_tools-1.45.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed8306de5f8076d2fa0ba2a787c7fd1aecc4b901b2af1113acec21cea8178caf"}, + {file = "grpcio_tools-1.45.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19ce25a2d573b2057af7d57aa7c80819db90cb92d20eafc8e8ae1448fe9941b1"}, + {file = "grpcio_tools-1.45.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e3729ecd8bc42e783faba7243d315b1beb44021bc146afec0537791b47980878"}, + {file = "grpcio_tools-1.45.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b7cf16c448e19b10f9fb13327d930925ef053bb21dcd858a20184d2a8f55ce18"}, + {file = "grpcio_tools-1.45.0-cp38-cp38-win32.whl", hash = "sha256:367dfa639abc4538843043ab2dd6bf5b0232ddc519418f1e1287e855e7cf27c4"}, + {file = "grpcio_tools-1.45.0-cp38-cp38-win_amd64.whl", hash = "sha256:115ae6a8df239987b662b228771ab9c1d4db3cc4db3da6d184a1b95bc9759d91"}, + {file = "grpcio_tools-1.45.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:6e7b53a8a605f91e0f9584481b74b008f321ea22461f075eeaebaf98b2de372f"}, + {file = "grpcio_tools-1.45.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:ad49ad7f6f5ea305d92ce33f773f7d9a667742010d1ec02a1a0626b735cf57dd"}, + {file = "grpcio_tools-1.45.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:713ffedf8c01dc34c1d69121c0b3421f557674681ffc0e004205d9bb0fc994b9"}, + {file = "grpcio_tools-1.45.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:bf423863263933a98706e8a8b775c81d62e911222dbeb2b62ba60da815394ae2"}, + {file = "grpcio_tools-1.45.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:5cfd3e7c082a3a01f65fdf3a3ab9168dea36102bf7326edfe03a1f592fe244e4"}, + {file = "grpcio_tools-1.45.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6437665cd46e4dd39343c69da4c18f761868c632793ce6c436942d1b33fd930"}, + {file = "grpcio_tools-1.45.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79d951338e18c90f9e412cbe3e747530c94bd7c71963574e70cf7cdb7c113c01"}, + {file = "grpcio_tools-1.45.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:737d1c0e359b83f45b5d7170ea9bb08e5f90dd36126097b7a8e7ad62ade867be"}, + {file = "grpcio_tools-1.45.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4b6e5c48eb28a215f9d6e76d646406d2505c1a5ae8864273960a73e5c09fe982"}, + {file = "grpcio_tools-1.45.0-cp39-cp39-win32.whl", hash = "sha256:63d66c90f816601385a085726eb678c6c872aed7911df4232d906fc240159c83"}, + {file = "grpcio_tools-1.45.0-cp39-cp39-win_amd64.whl", hash = "sha256:54dfd7d777664973b794b2fe585748f87f1066a4d6bb0af4f918b353b91bd434"}, +] +httplib2 = [ + {file = "httplib2-0.20.4-py3-none-any.whl", hash = "sha256:8b6a905cb1c79eefd03f8669fd993c36dc341f7c558f056cb5a33b5c2f458543"}, + {file = "httplib2-0.20.4.tar.gz", hash = "sha256:58a98e45b4b1a48273073f905d2961666ecf0fbac4250ea5b47aef259eb5c585"}, +] +idna = [ + {file = "idna-3.3-py3-none-any.whl", hash = "sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff"}, + {file = "idna-3.3.tar.gz", hash = "sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d"}, +] +importlib-metadata = [ + {file = "importlib_metadata-4.11.3-py3-none-any.whl", hash = "sha256:1208431ca90a8cca1a6b8af391bb53c1a2db74e5d1cef6ddced95d4b2062edc6"}, + {file = "importlib_metadata-4.11.3.tar.gz", hash = "sha256:ea4c597ebf37142f827b8f39299579e31685c31d3a438b59f469406afd0f2539"}, +] +iniconfig = [ + {file = "iniconfig-1.1.1-py2.py3-none-any.whl", hash = "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3"}, + {file = "iniconfig-1.1.1.tar.gz", hash = "sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"}, +] +marshmallow = [ + {file = "marshmallow-3.15.0-py3-none-any.whl", hash = "sha256:ff79885ed43b579782f48c251d262e062bce49c65c52412458769a4fb57ac30f"}, + {file = "marshmallow-3.15.0.tar.gz", hash = "sha256:2aaaab4f01ef4f5a011a21319af9fce17ab13bf28a026d1252adab0e035648d5"}, +] +mccabe = [ + {file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"}, + {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"}, +] +multidict = [ + {file = "multidict-6.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b9e95a740109c6047602f4db4da9949e6c5945cefbad34a1299775ddc9a62e2"}, + {file = "multidict-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac0e27844758d7177989ce406acc6a83c16ed4524ebc363c1f748cba184d89d3"}, + {file = "multidict-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:041b81a5f6b38244b34dc18c7b6aba91f9cdaf854d9a39e5ff0b58e2b5773b9c"}, + {file = "multidict-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fdda29a3c7e76a064f2477c9aab1ba96fd94e02e386f1e665bca1807fc5386f"}, + {file = "multidict-6.0.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3368bf2398b0e0fcbf46d85795adc4c259299fec50c1416d0f77c0a843a3eed9"}, + {file = "multidict-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4f052ee022928d34fe1f4d2bc743f32609fb79ed9c49a1710a5ad6b2198db20"}, + {file = "multidict-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:225383a6603c086e6cef0f2f05564acb4f4d5f019a4e3e983f572b8530f70c88"}, + {file = "multidict-6.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50bd442726e288e884f7be9071016c15a8742eb689a593a0cac49ea093eef0a7"}, + {file = "multidict-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:47e6a7e923e9cada7c139531feac59448f1f47727a79076c0b1ee80274cd8eee"}, + {file = "multidict-6.0.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:0556a1d4ea2d949efe5fd76a09b4a82e3a4a30700553a6725535098d8d9fb672"}, + {file = "multidict-6.0.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:626fe10ac87851f4cffecee161fc6f8f9853f0f6f1035b59337a51d29ff3b4f9"}, + {file = "multidict-6.0.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:8064b7c6f0af936a741ea1efd18690bacfbae4078c0c385d7c3f611d11f0cf87"}, + {file = "multidict-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2d36e929d7f6a16d4eb11b250719c39560dd70545356365b494249e2186bc389"}, + {file = "multidict-6.0.2-cp310-cp310-win32.whl", hash = "sha256:fcb91630817aa8b9bc4a74023e4198480587269c272c58b3279875ed7235c293"}, + {file = "multidict-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:8cbf0132f3de7cc6c6ce00147cc78e6439ea736cee6bca4f068bcf892b0fd658"}, + {file = "multidict-6.0.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:05f6949d6169878a03e607a21e3b862eaf8e356590e8bdae4227eedadacf6e51"}, + {file = "multidict-6.0.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2c2e459f7050aeb7c1b1276763364884595d47000c1cddb51764c0d8976e608"}, + {file = "multidict-6.0.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d0509e469d48940147e1235d994cd849a8f8195e0bca65f8f5439c56e17872a3"}, + {file = "multidict-6.0.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:514fe2b8d750d6cdb4712346a2c5084a80220821a3e91f3f71eec11cf8d28fd4"}, + {file = "multidict-6.0.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19adcfc2a7197cdc3987044e3f415168fc5dc1f720c932eb1ef4f71a2067e08b"}, + {file = "multidict-6.0.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b9d153e7f1f9ba0b23ad1568b3b9e17301e23b042c23870f9ee0522dc5cc79e8"}, + {file = "multidict-6.0.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:aef9cc3d9c7d63d924adac329c33835e0243b5052a6dfcbf7732a921c6e918ba"}, + {file = "multidict-6.0.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4571f1beddff25f3e925eea34268422622963cd8dc395bb8778eb28418248e43"}, + {file = "multidict-6.0.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:d48b8ee1d4068561ce8033d2c344cf5232cb29ee1a0206a7b828c79cbc5982b8"}, + {file = "multidict-6.0.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:45183c96ddf61bf96d2684d9fbaf6f3564d86b34cb125761f9a0ef9e36c1d55b"}, + {file = "multidict-6.0.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:75bdf08716edde767b09e76829db8c1e5ca9d8bb0a8d4bd94ae1eafe3dac5e15"}, + {file = "multidict-6.0.2-cp37-cp37m-win32.whl", hash = "sha256:a45e1135cb07086833ce969555df39149680e5471c04dfd6a915abd2fc3f6dbc"}, + {file = "multidict-6.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6f3cdef8a247d1eafa649085812f8a310e728bdf3900ff6c434eafb2d443b23a"}, + {file = "multidict-6.0.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0327292e745a880459ef71be14e709aaea2f783f3537588fb4ed09b6c01bca60"}, + {file = "multidict-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e875b6086e325bab7e680e4316d667fc0e5e174bb5611eb16b3ea121c8951b86"}, + {file = "multidict-6.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:feea820722e69451743a3d56ad74948b68bf456984d63c1a92e8347b7b88452d"}, + {file = "multidict-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cc57c68cb9139c7cd6fc39f211b02198e69fb90ce4bc4a094cf5fe0d20fd8b0"}, + {file = "multidict-6.0.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:497988d6b6ec6ed6f87030ec03280b696ca47dbf0648045e4e1d28b80346560d"}, + {file = "multidict-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:89171b2c769e03a953d5969b2f272efa931426355b6c0cb508022976a17fd376"}, + {file = "multidict-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:684133b1e1fe91eda8fa7447f137c9490a064c6b7f392aa857bba83a28cfb693"}, + {file = "multidict-6.0.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd9fc9c4849a07f3635ccffa895d57abce554b467d611a5009ba4f39b78a8849"}, + {file = "multidict-6.0.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e07c8e79d6e6fd37b42f3250dba122053fddb319e84b55dd3a8d6446e1a7ee49"}, + {file = "multidict-6.0.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4070613ea2227da2bfb2c35a6041e4371b0af6b0be57f424fe2318b42a748516"}, + {file = "multidict-6.0.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:47fbeedbf94bed6547d3aa632075d804867a352d86688c04e606971595460227"}, + {file = "multidict-6.0.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:5774d9218d77befa7b70d836004a768fb9aa4fdb53c97498f4d8d3f67bb9cfa9"}, + {file = "multidict-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2957489cba47c2539a8eb7ab32ff49101439ccf78eab724c828c1a54ff3ff98d"}, + {file = "multidict-6.0.2-cp38-cp38-win32.whl", hash = "sha256:e5b20e9599ba74391ca0cfbd7b328fcc20976823ba19bc573983a25b32e92b57"}, + {file = "multidict-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:8004dca28e15b86d1b1372515f32eb6f814bdf6f00952699bdeb541691091f96"}, + {file = "multidict-6.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2e4a0785b84fb59e43c18a015ffc575ba93f7d1dbd272b4cdad9f5134b8a006c"}, + {file = "multidict-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6701bf8a5d03a43375909ac91b6980aea74b0f5402fbe9428fc3f6edf5d9677e"}, + {file = "multidict-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a007b1638e148c3cfb6bf0bdc4f82776cef0ac487191d093cdc316905e504071"}, + {file = "multidict-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:07a017cfa00c9890011628eab2503bee5872f27144936a52eaab449be5eaf032"}, + {file = "multidict-6.0.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c207fff63adcdf5a485969131dc70e4b194327666b7e8a87a97fbc4fd80a53b2"}, + {file = "multidict-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:373ba9d1d061c76462d74e7de1c0c8e267e9791ee8cfefcf6b0b2495762c370c"}, + {file = "multidict-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfba7c6d5d7c9099ba21f84662b037a0ffd4a5e6b26ac07d19e423e6fdf965a9"}, + {file = "multidict-6.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:19d9bad105dfb34eb539c97b132057a4e709919ec4dd883ece5838bcbf262b80"}, + {file = "multidict-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:de989b195c3d636ba000ee4281cd03bb1234635b124bf4cd89eeee9ca8fcb09d"}, + {file = "multidict-6.0.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7c40b7bbece294ae3a87c1bc2abff0ff9beef41d14188cda94ada7bcea99b0fb"}, + {file = "multidict-6.0.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:d16cce709ebfadc91278a1c005e3c17dd5f71f5098bfae1035149785ea6e9c68"}, + {file = "multidict-6.0.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:a2c34a93e1d2aa35fbf1485e5010337c72c6791407d03aa5f4eed920343dd360"}, + {file = "multidict-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:feba80698173761cddd814fa22e88b0661e98cb810f9f986c54aa34d281e4937"}, + {file = "multidict-6.0.2-cp39-cp39-win32.whl", hash = "sha256:23b616fdc3c74c9fe01d76ce0d1ce872d2d396d8fa8e4899398ad64fb5aa214a"}, + {file = "multidict-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:4bae31803d708f6f15fd98be6a6ac0b6958fcf68fda3c77a048a4f9073704aae"}, + {file = "multidict-6.0.2.tar.gz", hash = "sha256:5ff3bd75f38e4c43f1f470f2df7a4d430b821c4ce22be384e1459cb57d6bb013"}, +] +mypy-extensions = [ + {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"}, + {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"}, +] +numpy = [ + {file = "numpy-1.21.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8737609c3bbdd48e380d463134a35ffad3b22dc56295eff6f79fd85bd0eeeb25"}, + {file = "numpy-1.21.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:fdffbfb6832cd0b300995a2b08b8f6fa9f6e856d562800fea9182316d99c4e8e"}, + {file = "numpy-1.21.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3820724272f9913b597ccd13a467cc492a0da6b05df26ea09e78b171a0bb9da6"}, + {file = "numpy-1.21.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f17e562de9edf691a42ddb1eb4a5541c20dd3f9e65b09ded2beb0799c0cf29bb"}, + {file = "numpy-1.21.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f30427731561ce75d7048ac254dbe47a2ba576229250fb60f0fb74db96501a1"}, + {file = "numpy-1.21.6-cp310-cp310-win32.whl", hash = "sha256:d4bf4d43077db55589ffc9009c0ba0a94fa4908b9586d6ccce2e0b164c86303c"}, + {file = "numpy-1.21.6-cp310-cp310-win_amd64.whl", hash = "sha256:d136337ae3cc69aa5e447e78d8e1514be8c3ec9b54264e680cf0b4bd9011574f"}, + {file = "numpy-1.21.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6aaf96c7f8cebc220cdfc03f1d5a31952f027dda050e5a703a0d1c396075e3e7"}, + {file = "numpy-1.21.6-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:67c261d6c0a9981820c3a149d255a76918278a6b03b6a036800359aba1256d46"}, + {file = "numpy-1.21.6-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a6be4cb0ef3b8c9250c19cc122267263093eee7edd4e3fa75395dfda8c17a8e2"}, + {file = "numpy-1.21.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c4068a8c44014b2d55f3c3f574c376b2494ca9cc73d2f1bd692382b6dffe3db"}, + {file = "numpy-1.21.6-cp37-cp37m-win32.whl", hash = "sha256:7c7e5fa88d9ff656e067876e4736379cc962d185d5cd808014a8a928d529ef4e"}, + {file = "numpy-1.21.6-cp37-cp37m-win_amd64.whl", hash = "sha256:bcb238c9c96c00d3085b264e5c1a1207672577b93fa666c3b14a45240b14123a"}, + {file = "numpy-1.21.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:82691fda7c3f77c90e62da69ae60b5ac08e87e775b09813559f8901a88266552"}, + {file = "numpy-1.21.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:643843bcc1c50526b3a71cd2ee561cf0d8773f062c8cbaf9ffac9fdf573f83ab"}, + {file = "numpy-1.21.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:357768c2e4451ac241465157a3e929b265dfac85d9214074985b1786244f2ef3"}, + {file = "numpy-1.21.6-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9f411b2c3f3d76bba0865b35a425157c5dcf54937f82bbeb3d3c180789dd66a6"}, + {file = "numpy-1.21.6-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4aa48afdce4660b0076a00d80afa54e8a97cd49f457d68a4342d188a09451c1a"}, + {file = "numpy-1.21.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6a96eef20f639e6a97d23e57dd0c1b1069a7b4fd7027482a4c5c451cd7732f4"}, + {file = "numpy-1.21.6-cp38-cp38-win32.whl", hash = "sha256:5c3c8def4230e1b959671eb959083661b4a0d2e9af93ee339c7dada6759a9470"}, + {file = "numpy-1.21.6-cp38-cp38-win_amd64.whl", hash = "sha256:bf2ec4b75d0e9356edea834d1de42b31fe11f726a81dfb2c2112bc1eaa508fcf"}, + {file = "numpy-1.21.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4391bd07606be175aafd267ef9bea87cf1b8210c787666ce82073b05f202add1"}, + {file = "numpy-1.21.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:67f21981ba2f9d7ba9ade60c9e8cbaa8cf8e9ae51673934480e45cf55e953673"}, + {file = "numpy-1.21.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ee5ec40fdd06d62fe5d4084bef4fd50fd4bb6bfd2bf519365f569dc470163ab0"}, + {file = "numpy-1.21.6-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1dbe1c91269f880e364526649a52eff93ac30035507ae980d2fed33aaee633ac"}, + {file = "numpy-1.21.6-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d9caa9d5e682102453d96a0ee10c7241b72859b01a941a397fd965f23b3e016b"}, + {file = "numpy-1.21.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58459d3bad03343ac4b1b42ed14d571b8743dc80ccbf27444f266729df1d6f5b"}, + {file = "numpy-1.21.6-cp39-cp39-win32.whl", hash = "sha256:7f5ae4f304257569ef3b948810816bc87c9146e8c446053539947eedeaa32786"}, + {file = "numpy-1.21.6-cp39-cp39-win_amd64.whl", hash = "sha256:e31f0bb5928b793169b87e3d1e070f2342b22d5245c755e2b81caa29756246c3"}, + {file = "numpy-1.21.6-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dd1c8f6bd65d07d3810b90d02eba7997e32abbdf1277a481d698969e921a3be0"}, + {file = "numpy-1.21.6.zip", hash = "sha256:ecb55251139706669fdec2ff073c98ef8e9a84473e51e716211b41aa0f18e656"}, +] +packaging = [ + {file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"}, + {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"}, +] +pandas = [ + {file = "pandas-1.3.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:62d5b5ce965bae78f12c1c0df0d387899dd4211ec0bdc52822373f13a3a022b9"}, + {file = "pandas-1.3.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:adfeb11be2d54f275142c8ba9bf67acee771b7186a5745249c7d5a06c670136b"}, + {file = "pandas-1.3.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:60a8c055d58873ad81cae290d974d13dd479b82cbb975c3e1fa2cf1920715296"}, + {file = "pandas-1.3.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd541ab09e1f80a2a1760032d665f6e032d8e44055d602d65eeea6e6e85498cb"}, + {file = "pandas-1.3.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2651d75b9a167cc8cc572cf787ab512d16e316ae00ba81874b560586fa1325e0"}, + {file = "pandas-1.3.5-cp310-cp310-win_amd64.whl", hash = "sha256:aaf183a615ad790801fa3cf2fa450e5b6d23a54684fe386f7e3208f8b9bfbef6"}, + {file = "pandas-1.3.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:344295811e67f8200de2390093aeb3c8309f5648951b684d8db7eee7d1c81fb7"}, + {file = "pandas-1.3.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:552020bf83b7f9033b57cbae65589c01e7ef1544416122da0c79140c93288f56"}, + {file = "pandas-1.3.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5cce0c6bbeb266b0e39e35176ee615ce3585233092f685b6a82362523e59e5b4"}, + {file = "pandas-1.3.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d28a3c65463fd0d0ba8bbb7696b23073efee0510783340a44b08f5e96ffce0c"}, + {file = "pandas-1.3.5-cp37-cp37m-win32.whl", hash = "sha256:a62949c626dd0ef7de11de34b44c6475db76995c2064e2d99c6498c3dba7fe58"}, + {file = "pandas-1.3.5-cp37-cp37m-win_amd64.whl", hash = "sha256:8025750767e138320b15ca16d70d5cdc1886e8f9cc56652d89735c016cd8aea6"}, + {file = "pandas-1.3.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:fe95bae4e2d579812865db2212bb733144e34d0c6785c0685329e5b60fcb85dd"}, + {file = "pandas-1.3.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f261553a1e9c65b7a310302b9dbac31cf0049a51695c14ebe04e4bfd4a96f02"}, + {file = "pandas-1.3.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b6dbec5f3e6d5dc80dcfee250e0a2a652b3f28663492f7dab9a24416a48ac39"}, + {file = "pandas-1.3.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3bc49af96cd6285030a64779de5b3688633a07eb75c124b0747134a63f4c05f"}, + {file = "pandas-1.3.5-cp38-cp38-win32.whl", hash = "sha256:b6b87b2fb39e6383ca28e2829cddef1d9fc9e27e55ad91ca9c435572cdba51bf"}, + {file = "pandas-1.3.5-cp38-cp38-win_amd64.whl", hash = "sha256:a395692046fd8ce1edb4c6295c35184ae0c2bbe787ecbe384251da609e27edcb"}, + {file = "pandas-1.3.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bd971a3f08b745a75a86c00b97f3007c2ea175951286cdda6abe543e687e5f2f"}, + {file = "pandas-1.3.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37f06b59e5bc05711a518aa10beaec10942188dccb48918bb5ae602ccbc9f1a0"}, + {file = "pandas-1.3.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c21778a688d3712d35710501f8001cdbf96eb70a7c587a3d5613573299fdca6"}, + {file = "pandas-1.3.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3345343206546545bc26a05b4602b6a24385b5ec7c75cb6059599e3d56831da2"}, + {file = "pandas-1.3.5-cp39-cp39-win32.whl", hash = "sha256:c69406a2808ba6cf580c2255bcf260b3f214d2664a3a4197d0e640f573b46fd3"}, + {file = "pandas-1.3.5-cp39-cp39-win_amd64.whl", hash = "sha256:32e1a26d5ade11b547721a72f9bfc4bd113396947606e00d5b4a5b79b3dcb006"}, + {file = "pandas-1.3.5.tar.gz", hash = "sha256:1e4285f5de1012de20ca46b188ccf33521bff61ba5c5ebd78b4fb28e5416a9f1"}, +] +pathspec = [ + {file = "pathspec-0.9.0-py2.py3-none-any.whl", hash = "sha256:7d15c4ddb0b5c802d161efc417ec1a2558ea2653c2e8ad9c19098201dc1c993a"}, + {file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"}, +] +platformdirs = [ + {file = "platformdirs-2.5.2-py3-none-any.whl", hash = "sha256:027d8e83a2d7de06bbac4e5ef7e023c02b863d7ea5d079477e722bb41ab25788"}, + {file = "platformdirs-2.5.2.tar.gz", hash = "sha256:58c8abb07dcb441e6ee4b11d8df0ac856038f944ab98b7be6b27b2a3c7feef19"}, +] +pluggy = [ + {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, + {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, +] +prometheus-async = [ + {file = "prometheus_async-19.2.0-py2.py3-none-any.whl", hash = "sha256:227f516e5bf98a0dc602348381e182358f8b2ed24a8db05e8e34d9cf027bab83"}, + {file = "prometheus_async-19.2.0.tar.gz", hash = "sha256:3cc68d1f39e9bbf16dbd0b51103d87671b3cbd1d75a72cda472cd9a35cc9d0d2"}, +] +prometheus-client = [ + {file = "prometheus_client-0.7.1.tar.gz", hash = "sha256:71cd24a2b3eb335cb800c7159f423df1bd4dcd5171b234be15e3f31ec9f622da"}, +] +protobuf = [ + {file = "protobuf-3.20.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3cc797c9d15d7689ed507b165cd05913acb992d78b379f6014e013f9ecb20996"}, + {file = "protobuf-3.20.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:ff8d8fa42675249bb456f5db06c00de6c2f4c27a065955917b28c4f15978b9c3"}, + {file = "protobuf-3.20.1-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cd68be2559e2a3b84f517fb029ee611546f7812b1fdd0aa2ecc9bc6ec0e4fdde"}, + {file = "protobuf-3.20.1-cp310-cp310-win32.whl", hash = "sha256:9016d01c91e8e625141d24ec1b20fed584703e527d28512aa8c8707f105a683c"}, + {file = "protobuf-3.20.1-cp310-cp310-win_amd64.whl", hash = "sha256:32ca378605b41fd180dfe4e14d3226386d8d1b002ab31c969c366549e66a2bb7"}, + {file = "protobuf-3.20.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9be73ad47579abc26c12024239d3540e6b765182a91dbc88e23658ab71767153"}, + {file = "protobuf-3.20.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:097c5d8a9808302fb0da7e20edf0b8d4703274d140fd25c5edabddcde43e081f"}, + {file = "protobuf-3.20.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e250a42f15bf9d5b09fe1b293bdba2801cd520a9f5ea2d7fb7536d4441811d20"}, + {file = "protobuf-3.20.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:cdee09140e1cd184ba9324ec1df410e7147242b94b5f8b0c64fc89e38a8ba531"}, + {file = "protobuf-3.20.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:af0ebadc74e281a517141daad9d0f2c5d93ab78e9d455113719a45a49da9db4e"}, + {file = "protobuf-3.20.1-cp37-cp37m-win32.whl", hash = "sha256:755f3aee41354ae395e104d62119cb223339a8f3276a0cd009ffabfcdd46bb0c"}, + {file = "protobuf-3.20.1-cp37-cp37m-win_amd64.whl", hash = "sha256:62f1b5c4cd6c5402b4e2d63804ba49a327e0c386c99b1675c8a0fefda23b2067"}, + {file = "protobuf-3.20.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:06059eb6953ff01e56a25cd02cca1a9649a75a7e65397b5b9b4e929ed71d10cf"}, + {file = "protobuf-3.20.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:cb29edb9eab15742d791e1025dd7b6a8f6fcb53802ad2f6e3adcb102051063ab"}, + {file = "protobuf-3.20.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:69ccfdf3657ba59569c64295b7d51325f91af586f8d5793b734260dfe2e94e2c"}, + {file = "protobuf-3.20.1-cp38-cp38-win32.whl", hash = "sha256:dd5789b2948ca702c17027c84c2accb552fc30f4622a98ab5c51fcfe8c50d3e7"}, + {file = "protobuf-3.20.1-cp38-cp38-win_amd64.whl", hash = "sha256:77053d28427a29987ca9caf7b72ccafee011257561259faba8dd308fda9a8739"}, + {file = "protobuf-3.20.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6f50601512a3d23625d8a85b1638d914a0970f17920ff39cec63aaef80a93fb7"}, + {file = "protobuf-3.20.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:284f86a6207c897542d7e956eb243a36bb8f9564c1742b253462386e96c6b78f"}, + {file = "protobuf-3.20.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7403941f6d0992d40161aa8bb23e12575637008a5a02283a930addc0508982f9"}, + {file = "protobuf-3.20.1-cp39-cp39-win32.whl", hash = "sha256:db977c4ca738dd9ce508557d4fce0f5aebd105e158c725beec86feb1f6bc20d8"}, + {file = "protobuf-3.20.1-cp39-cp39-win_amd64.whl", hash = "sha256:7e371f10abe57cee5021797126c93479f59fccc9693dafd6bd5633ab67808a91"}, + {file = "protobuf-3.20.1-py2.py3-none-any.whl", hash = "sha256:adfc6cf69c7f8c50fd24c793964eef18f0ac321315439d94945820612849c388"}, + {file = "protobuf-3.20.1.tar.gz", hash = "sha256:adc31566d027f45efe3f44eeb5b1f329da43891634d61c75a5944e9be6dd42c9"}, +] +py = [ + {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, + {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, +] +pyasn1 = [ + {file = "pyasn1-0.4.8-py2.4.egg", hash = "sha256:fec3e9d8e36808a28efb59b489e4528c10ad0f480e57dcc32b4de5c9d8c9fdf3"}, + {file = "pyasn1-0.4.8-py2.5.egg", hash = "sha256:0458773cfe65b153891ac249bcf1b5f8f320b7c2ce462151f8fa74de8934becf"}, + {file = "pyasn1-0.4.8-py2.6.egg", hash = "sha256:5c9414dcfede6e441f7e8f81b43b34e834731003427e5b09e4e00e3172a10f00"}, + {file = "pyasn1-0.4.8-py2.7.egg", hash = "sha256:6e7545f1a61025a4e58bb336952c5061697da694db1cae97b116e9c46abcf7c8"}, + {file = "pyasn1-0.4.8-py2.py3-none-any.whl", hash = "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d"}, + {file = "pyasn1-0.4.8-py3.1.egg", hash = "sha256:78fa6da68ed2727915c4767bb386ab32cdba863caa7dbe473eaae45f9959da86"}, + {file = "pyasn1-0.4.8-py3.2.egg", hash = "sha256:08c3c53b75eaa48d71cf8c710312316392ed40899cb34710d092e96745a358b7"}, + {file = "pyasn1-0.4.8-py3.3.egg", hash = "sha256:03840c999ba71680a131cfaee6fab142e1ed9bbd9c693e285cc6aca0d555e576"}, + {file = "pyasn1-0.4.8-py3.4.egg", hash = "sha256:7ab8a544af125fb704feadb008c99a88805126fb525280b2270bb25cc1d78a12"}, + {file = "pyasn1-0.4.8-py3.5.egg", hash = "sha256:e89bf84b5437b532b0803ba5c9a5e054d21fec423a89952a74f87fa2c9b7bce2"}, + {file = "pyasn1-0.4.8-py3.6.egg", hash = "sha256:014c0e9976956a08139dc0712ae195324a75e142284d5f87f1a87ee1b068a359"}, + {file = "pyasn1-0.4.8-py3.7.egg", hash = "sha256:99fcc3c8d804d1bc6d9a099921e39d827026409a58f2a720dcdb89374ea0c776"}, + {file = "pyasn1-0.4.8.tar.gz", hash = "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba"}, +] +pyasn1-modules = [ + {file = "pyasn1-modules-0.2.8.tar.gz", hash = "sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e"}, + {file = "pyasn1_modules-0.2.8-py2.4.egg", hash = "sha256:0fe1b68d1e486a1ed5473f1302bd991c1611d319bba158e98b106ff86e1d7199"}, + {file = "pyasn1_modules-0.2.8-py2.5.egg", hash = "sha256:fe0644d9ab041506b62782e92b06b8c68cca799e1a9636ec398675459e031405"}, + {file = "pyasn1_modules-0.2.8-py2.6.egg", hash = "sha256:a99324196732f53093a84c4369c996713eb8c89d360a496b599fb1a9c47fc3eb"}, + {file = "pyasn1_modules-0.2.8-py2.7.egg", hash = "sha256:0845a5582f6a02bb3e1bde9ecfc4bfcae6ec3210dd270522fee602365430c3f8"}, + {file = "pyasn1_modules-0.2.8-py2.py3-none-any.whl", hash = "sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74"}, + {file = "pyasn1_modules-0.2.8-py3.1.egg", hash = "sha256:f39edd8c4ecaa4556e989147ebf219227e2cd2e8a43c7e7fcb1f1c18c5fd6a3d"}, + {file = "pyasn1_modules-0.2.8-py3.2.egg", hash = "sha256:b80486a6c77252ea3a3e9b1e360bc9cf28eaac41263d173c032581ad2f20fe45"}, + {file = "pyasn1_modules-0.2.8-py3.3.egg", hash = "sha256:65cebbaffc913f4fe9e4808735c95ea22d7a7775646ab690518c056784bc21b4"}, + {file = "pyasn1_modules-0.2.8-py3.4.egg", hash = "sha256:15b7c67fabc7fc240d87fb9aabf999cf82311a6d6fb2c70d00d3d0604878c811"}, + {file = "pyasn1_modules-0.2.8-py3.5.egg", hash = "sha256:426edb7a5e8879f1ec54a1864f16b882c2837bfd06eee62f2c982315ee2473ed"}, + {file = "pyasn1_modules-0.2.8-py3.6.egg", hash = "sha256:cbac4bc38d117f2a49aeedec4407d23e8866ea4ac27ff2cf7fb3e5b570df19e0"}, + {file = "pyasn1_modules-0.2.8-py3.7.egg", hash = "sha256:c29a5e5cc7a3f05926aff34e097e84f8589cd790ce0ed41b67aed6857b26aafd"}, +] +pycodestyle = [ + {file = "pycodestyle-2.7.0-py2.py3-none-any.whl", hash = "sha256:514f76d918fcc0b55c6680472f0a37970994e07bbb80725808c17089be302068"}, + {file = "pycodestyle-2.7.0.tar.gz", hash = "sha256:c389c1d06bf7904078ca03399a4816f974a1d590090fecea0c63ec26ebaf1cef"}, +] +pyflakes = [ + {file = "pyflakes-2.3.1-py2.py3-none-any.whl", hash = "sha256:7893783d01b8a89811dd72d7dfd4d84ff098e5eed95cfa8905b22bbffe52efc3"}, + {file = "pyflakes-2.3.1.tar.gz", hash = "sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db"}, +] +pymysql = [ + {file = "PyMySQL-1.0.2-py3-none-any.whl", hash = "sha256:41fc3a0c5013d5f039639442321185532e3e2c8924687abe6537de157d403641"}, + {file = "PyMySQL-1.0.2.tar.gz", hash = "sha256:816927a350f38d56072aeca5dfb10221fe1dc653745853d30a216637f5d7ad36"}, +] +pyparsing = [ + {file = "pyparsing-3.0.8-py3-none-any.whl", hash = "sha256:ef7b523f6356f763771559412c0d7134753f037822dad1b16945b7b846f7ad06"}, + {file = "pyparsing-3.0.8.tar.gz", hash = "sha256:7bf433498c016c4314268d95df76c81b842a4cb2b276fa3312cfb1e1d85f6954"}, +] +pystan = [ + {file = "pystan-2.19.1.1-cp27-cp27m-macosx_10_6_intel.whl", hash = "sha256:4a0820df5fcd13c7a4cae75d59809adee72d1135a604dc2b5f068d4ac8ca349e"}, + {file = "pystan-2.19.1.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:2baa4106ddc7fb90712bd0e5ab8693ce130b001c6166839247511326edc6d0ba"}, + {file = "pystan-2.19.1.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:6c4bbbb0a59144135d9821f2b9c308bfdf70aa61befdc7dc435f4c86bfb4457e"}, + {file = "pystan-2.19.1.1-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:1127522641533a6ccb7684d4008d06c092cbe6f3ee7d44679a87937ee39093ab"}, + {file = "pystan-2.19.1.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:43fdd98561f0cba0637f1fa343ed7d5adc885d04a655ab6302dbfd08f016105d"}, + {file = "pystan-2.19.1.1-cp35-cp35m-win32.whl", hash = "sha256:e6580cec2f5ed1bdb44eab83d54fe87b11e673ed65d6c2064d8d9f76265ce049"}, + {file = "pystan-2.19.1.1-cp35-cp35m-win_amd64.whl", hash = "sha256:c87bd98db2b5c67fa08177de04c98b46d1fcd68ae53dbe55ffc5187868068002"}, + {file = "pystan-2.19.1.1-cp36-cp36m-macosx_10_6_intel.whl", hash = "sha256:e9fbbf10dfc0ef8e7343ee4a3e17fd5c214fb12fc42615673e14908949b410e4"}, + {file = "pystan-2.19.1.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:5020ac3ca3a840f428f090fc5fe75412e2a7948ac7e3de59f4bbfd7a4539c0ef"}, + {file = "pystan-2.19.1.1-cp36-cp36m-win32.whl", hash = "sha256:61340356889547e29e2e6db7ef28f821b91e73fee80a888e81a794a24a249987"}, + {file = "pystan-2.19.1.1-cp36-cp36m-win_amd64.whl", hash = "sha256:bc1193f52bc6c6419dd753bcb0b6958b24fe588dc3da3c7f70bd23dcbda6ec2a"}, + {file = "pystan-2.19.1.1-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:5b67008f5780c7cf0f3fbad5bc54bc9919efc9655d63e0314dc013e85c7a0f14"}, + {file = "pystan-2.19.1.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:2b44502aaa8866e0bcc81df1537e7e08b74aaf4cc9d4bf43e7c8b168f3568ca6"}, + {file = "pystan-2.19.1.1-cp37-cp37m-win32.whl", hash = "sha256:b2ef9031dfbd65757828e2441cb9a76c9217fb5bb93817fee2550722e7a785b3"}, + {file = "pystan-2.19.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:3622520b2e55d2ce70a3027d9910b6197a8bc2ef59e01967be9c4e607a48a9c1"}, + {file = "pystan-2.19.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:837a62976b32e4fd2bd48fee3b419c651e19747280e440d5934bea3822b22115"}, + {file = "pystan-2.19.1.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:e8e0924c318a0ea67260167a74f040078a4ce0d3fd4a7d566aa76f7752a85fab"}, + {file = "pystan-2.19.1.1-cp38-cp38-win32.whl", hash = "sha256:f16c399da3d9d72e9661b131c23d51a59c789416598885714813fcb552234c83"}, + {file = "pystan-2.19.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:9d8c2ae05d1dca854a55b2ae9276af5866e473fb8264d03d5267abadb3c602da"}, + {file = "pystan-2.19.1.1.tar.gz", hash = "sha256:fa8bad8dbc0da22bbe6f36af56c9abbfcf10f92df8ce627d59a36bd8d25eb038"}, +] +pytest = [ + {file = "pytest-6.2.5-py3-none-any.whl", hash = "sha256:7310f8d27bc79ced999e760ca304d69f6ba6c6649c0b60fb0e04a4a77cacc134"}, + {file = "pytest-6.2.5.tar.gz", hash = "sha256:131b36680866a76e6781d13f101efb86cf674ebb9762eb70d3082b6f29889e89"}, +] +pytest-mock = [ + {file = "pytest-mock-3.7.0.tar.gz", hash = "sha256:5112bd92cc9f186ee96e1a92efc84969ea494939c3aead39c50f421c4cc69534"}, + {file = "pytest_mock-3.7.0-py3-none-any.whl", hash = "sha256:6cff27cec936bf81dc5ee87f07132b807bcda51106b5ec4b90a04331cba76231"}, +] +python-dateutil = [ + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, +] +python-dotenv = [ + {file = "python-dotenv-0.20.0.tar.gz", hash = "sha256:b7e3b04a59693c42c36f9ab1cc2acc46fa5df8c78e178fc33a8d4cd05c8d498f"}, + {file = "python_dotenv-0.20.0-py3-none-any.whl", hash = "sha256:d92a187be61fe482e4fd675b6d52200e7be63a12b724abbf931a40ce4fa92938"}, +] +pytz = [ + {file = "pytz-2022.1-py2.py3-none-any.whl", hash = "sha256:e68985985296d9a66a881eb3193b0906246245294a881e7c8afe623866ac6a5c"}, + {file = "pytz-2022.1.tar.gz", hash = "sha256:1e760e2fe6a8163bc0b3d9a19c4f84342afa0a2affebfaa84b01b978a02ecaa7"}, +] +requests = [ + {file = "requests-2.27.1-py2.py3-none-any.whl", hash = "sha256:f22fa1e554c9ddfd16e6e41ac79759e17be9e492b3587efa038054674760e72d"}, + {file = "requests-2.27.1.tar.gz", hash = "sha256:68d7c56fd5a8999887728ef304a6d12edc7be74f1cfa47714fc8b414525c9a61"}, +] +rsa = [ + {file = "rsa-4.8-py3-none-any.whl", hash = "sha256:95c5d300c4e879ee69708c428ba566c59478fd653cc3a22243eeb8ed846950bb"}, + {file = "rsa-4.8.tar.gz", hash = "sha256:5c6bd9dc7a543b7fe4304a631f8a8a3b674e2bbfc49c2ae96200cdbe55df6b17"}, +] +scipy = [ + {file = "scipy-1.7.3-1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:c9e04d7e9b03a8a6ac2045f7c5ef741be86727d8f49c45db45f244bdd2bcff17"}, + {file = "scipy-1.7.3-1-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:b0e0aeb061a1d7dcd2ed59ea57ee56c9b23dd60100825f98238c06ee5cc4467e"}, + {file = "scipy-1.7.3-1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:b78a35c5c74d336f42f44106174b9851c783184a85a3fe3e68857259b37b9ffb"}, + {file = "scipy-1.7.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:173308efba2270dcd61cd45a30dfded6ec0085b4b6eb33b5eb11ab443005e088"}, + {file = "scipy-1.7.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:21b66200cf44b1c3e86495e3a436fc7a26608f92b8d43d344457c54f1c024cbc"}, + {file = "scipy-1.7.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ceebc3c4f6a109777c0053dfa0282fddb8893eddfb0d598574acfb734a926168"}, + {file = "scipy-1.7.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7eaea089345a35130bc9a39b89ec1ff69c208efa97b3f8b25ea5d4c41d88094"}, + {file = "scipy-1.7.3-cp310-cp310-win_amd64.whl", hash = "sha256:304dfaa7146cffdb75fbf6bb7c190fd7688795389ad060b970269c8576d038e9"}, + {file = "scipy-1.7.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:033ce76ed4e9f62923e1f8124f7e2b0800db533828c853b402c7eec6e9465d80"}, + {file = "scipy-1.7.3-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4d242d13206ca4302d83d8a6388c9dfce49fc48fdd3c20efad89ba12f785bf9e"}, + {file = "scipy-1.7.3-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8499d9dd1459dc0d0fe68db0832c3d5fc1361ae8e13d05e6849b358dc3f2c279"}, + {file = "scipy-1.7.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca36e7d9430f7481fc7d11e015ae16fbd5575615a8e9060538104778be84addf"}, + {file = "scipy-1.7.3-cp37-cp37m-win32.whl", hash = "sha256:e2c036492e673aad1b7b0d0ccdc0cb30a968353d2c4bf92ac8e73509e1bf212c"}, + {file = "scipy-1.7.3-cp37-cp37m-win_amd64.whl", hash = "sha256:866ada14a95b083dd727a845a764cf95dd13ba3dc69a16b99038001b05439709"}, + {file = "scipy-1.7.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:65bd52bf55f9a1071398557394203d881384d27b9c2cad7df9a027170aeaef93"}, + {file = "scipy-1.7.3-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:f99d206db1f1ae735a8192ab93bd6028f3a42f6fa08467d37a14eb96c9dd34a3"}, + {file = "scipy-1.7.3-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5f2cfc359379c56b3a41b17ebd024109b2049f878badc1e454f31418c3a18436"}, + {file = "scipy-1.7.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb7ae2c4dbdb3c9247e07acc532f91077ae6dbc40ad5bd5dca0bb5a176ee9bda"}, + {file = "scipy-1.7.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95c2d250074cfa76715d58830579c64dff7354484b284c2b8b87e5a38321672c"}, + {file = "scipy-1.7.3-cp38-cp38-win32.whl", hash = "sha256:87069cf875f0262a6e3187ab0f419f5b4280d3dcf4811ef9613c605f6e4dca95"}, + {file = "scipy-1.7.3-cp38-cp38-win_amd64.whl", hash = "sha256:7edd9a311299a61e9919ea4192dd477395b50c014cdc1a1ac572d7c27e2207fa"}, + {file = "scipy-1.7.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eef93a446114ac0193a7b714ce67659db80caf940f3232bad63f4c7a81bc18df"}, + {file = "scipy-1.7.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:eb326658f9b73c07081300daba90a8746543b5ea177184daed26528273157294"}, + {file = "scipy-1.7.3-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:93378f3d14fff07572392ce6a6a2ceb3a1f237733bd6dcb9eb6a2b29b0d19085"}, + {file = "scipy-1.7.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edad1cf5b2ce1912c4d8ddad20e11d333165552aba262c882e28c78bbc09dbf6"}, + {file = "scipy-1.7.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d1cc2c19afe3b5a546ede7e6a44ce1ff52e443d12b231823268019f608b9b12"}, + {file = "scipy-1.7.3-cp39-cp39-win32.whl", hash = "sha256:2c56b820d304dffcadbbb6cbfbc2e2c79ee46ea291db17e288e73cd3c64fefa9"}, + {file = "scipy-1.7.3-cp39-cp39-win_amd64.whl", hash = "sha256:3f78181a153fa21c018d346f595edd648344751d7f03ab94b398be2ad083ed3e"}, + {file = "scipy-1.7.3.tar.gz", hash = "sha256:ab5875facfdef77e0a47d5fd39ea178b58e60e454a4c85aa1e52fcb80db7babf"}, +] +six = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] +toml = [ + {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, + {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, +] +tomli = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] +typed-ast = [ + {file = "typed_ast-1.5.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ad3b48cf2b487be140072fb86feff36801487d4abb7382bb1929aaac80638ea"}, + {file = "typed_ast-1.5.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:542cd732351ba8235f20faa0fc7398946fe1a57f2cdb289e5497e1e7f48cfedb"}, + {file = "typed_ast-1.5.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dc2c11ae59003d4a26dda637222d9ae924387f96acae9492df663843aefad55"}, + {file = "typed_ast-1.5.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:fd5df1313915dbd70eaaa88c19030b441742e8b05e6103c631c83b75e0435ccc"}, + {file = "typed_ast-1.5.3-cp310-cp310-win_amd64.whl", hash = "sha256:e34f9b9e61333ecb0f7d79c21c28aa5cd63bec15cb7e1310d7d3da6ce886bc9b"}, + {file = "typed_ast-1.5.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f818c5b81966d4728fec14caa338e30a70dfc3da577984d38f97816c4b3071ec"}, + {file = "typed_ast-1.5.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3042bfc9ca118712c9809201f55355479cfcdc17449f9f8db5e744e9625c6805"}, + {file = "typed_ast-1.5.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4fff9fdcce59dc61ec1b317bdb319f8f4e6b69ebbe61193ae0a60c5f9333dc49"}, + {file = "typed_ast-1.5.3-cp36-cp36m-win_amd64.whl", hash = "sha256:8e0b8528838ffd426fea8d18bde4c73bcb4167218998cc8b9ee0a0f2bfe678a6"}, + {file = "typed_ast-1.5.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8ef1d96ad05a291f5c36895d86d1375c0ee70595b90f6bb5f5fdbee749b146db"}, + {file = "typed_ast-1.5.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed44e81517364cb5ba367e4f68fca01fba42a7a4690d40c07886586ac267d9b9"}, + {file = "typed_ast-1.5.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f60d9de0d087454c91b3999a296d0c4558c1666771e3460621875021bf899af9"}, + {file = "typed_ast-1.5.3-cp37-cp37m-win_amd64.whl", hash = "sha256:9e237e74fd321a55c90eee9bc5d44be976979ad38a29bbd734148295c1ce7617"}, + {file = "typed_ast-1.5.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ee852185964744987609b40aee1d2eb81502ae63ee8eef614558f96a56c1902d"}, + {file = "typed_ast-1.5.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:27e46cdd01d6c3a0dd8f728b6a938a6751f7bd324817501c15fb056307f918c6"}, + {file = "typed_ast-1.5.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d64dabc6336ddc10373922a146fa2256043b3b43e61f28961caec2a5207c56d5"}, + {file = "typed_ast-1.5.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8cdf91b0c466a6c43f36c1964772918a2c04cfa83df8001ff32a89e357f8eb06"}, + {file = "typed_ast-1.5.3-cp38-cp38-win_amd64.whl", hash = "sha256:9cc9e1457e1feb06b075c8ef8aeb046a28ec351b1958b42c7c31c989c841403a"}, + {file = "typed_ast-1.5.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e20d196815eeffb3d76b75223e8ffed124e65ee62097e4e73afb5fec6b993e7a"}, + {file = "typed_ast-1.5.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:37e5349d1d5de2f4763d534ccb26809d1c24b180a477659a12c4bde9dd677d74"}, + {file = "typed_ast-1.5.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9f1a27592fac87daa4e3f16538713d705599b0a27dfe25518b80b6b017f0a6d"}, + {file = "typed_ast-1.5.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8831479695eadc8b5ffed06fdfb3e424adc37962a75925668deeb503f446c0a3"}, + {file = "typed_ast-1.5.3-cp39-cp39-win_amd64.whl", hash = "sha256:20d5118e494478ef2d3a2702d964dae830aedd7b4d3b626d003eea526be18718"}, + {file = "typed_ast-1.5.3.tar.gz", hash = "sha256:27f25232e2dd0edfe1f019d6bfaaf11e86e657d9bdb7b0956db95f560cceb2b3"}, +] +typing-extensions = [ + {file = "typing_extensions-4.2.0-py3-none-any.whl", hash = "sha256:6657594ee297170d19f67d55c05852a874e7eb634f4f753dbd667855e07c1708"}, + {file = "typing_extensions-4.2.0.tar.gz", hash = "sha256:f1c24655a0da0d1b67f07e17a5e6b2a105894e6824b92096378bb3668ef02376"}, +] +tzlocal = [ + {file = "tzlocal-2.1-py2.py3-none-any.whl", hash = "sha256:e2cb6c6b5b604af38597403e9852872d7f534962ae2954c7f35efcb1ccacf4a4"}, + {file = "tzlocal-2.1.tar.gz", hash = "sha256:643c97c5294aedc737780a49d9df30889321cbe1204eac2c2ec6134035a92e44"}, +] +uritemplate = [ + {file = "uritemplate-4.1.1-py2.py3-none-any.whl", hash = "sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e"}, + {file = "uritemplate-4.1.1.tar.gz", hash = "sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0"}, +] +urllib3 = [ + {file = "urllib3-1.26.9-py2.py3-none-any.whl", hash = "sha256:44ece4d53fb1706f667c9bd1c648f5469a2ec925fcf3a776667042d645472c14"}, + {file = "urllib3-1.26.9.tar.gz", hash = "sha256:aabaf16477806a5e1dd19aa41f8c2b7950dd3c746362d7e3223dbe6de6ac448e"}, +] +wrapt = [ + {file = "wrapt-1.14.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:5a9a1889cc01ed2ed5f34574c90745fab1dd06ec2eee663e8ebeefe363e8efd7"}, + {file = "wrapt-1.14.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:9a3ff5fb015f6feb78340143584d9f8a0b91b6293d6b5cf4295b3e95d179b88c"}, + {file = "wrapt-1.14.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:4b847029e2d5e11fd536c9ac3136ddc3f54bc9488a75ef7d040a3900406a91eb"}, + {file = "wrapt-1.14.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:9a5a544861b21e0e7575b6023adebe7a8c6321127bb1d238eb40d99803a0e8bd"}, + {file = "wrapt-1.14.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:88236b90dda77f0394f878324cfbae05ae6fde8a84d548cfe73a75278d760291"}, + {file = "wrapt-1.14.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:f0408e2dbad9e82b4c960274214af533f856a199c9274bd4aff55d4634dedc33"}, + {file = "wrapt-1.14.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:9d8c68c4145041b4eeae96239802cfdfd9ef927754a5be3f50505f09f309d8c6"}, + {file = "wrapt-1.14.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:22626dca56fd7f55a0733e604f1027277eb0f4f3d95ff28f15d27ac25a45f71b"}, + {file = "wrapt-1.14.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:65bf3eb34721bf18b5a021a1ad7aa05947a1767d1aa272b725728014475ea7d5"}, + {file = "wrapt-1.14.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:09d16ae7a13cff43660155383a2372b4aa09109c7127aa3f24c3cf99b891c330"}, + {file = "wrapt-1.14.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:debaf04f813ada978d7d16c7dfa16f3c9c2ec9adf4656efdc4defdf841fc2f0c"}, + {file = "wrapt-1.14.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:748df39ed634851350efa87690c2237a678ed794fe9ede3f0d79f071ee042561"}, + {file = "wrapt-1.14.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1807054aa7b61ad8d8103b3b30c9764de2e9d0c0978e9d3fc337e4e74bf25faa"}, + {file = "wrapt-1.14.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:763a73ab377390e2af26042f685a26787c402390f682443727b847e9496e4a2a"}, + {file = "wrapt-1.14.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:8529b07b49b2d89d6917cfa157d3ea1dfb4d319d51e23030664a827fe5fd2131"}, + {file = "wrapt-1.14.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:68aeefac31c1f73949662ba8affaf9950b9938b712fb9d428fa2a07e40ee57f8"}, + {file = "wrapt-1.14.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59d7d92cee84a547d91267f0fea381c363121d70fe90b12cd88241bd9b0e1763"}, + {file = "wrapt-1.14.0-cp310-cp310-win32.whl", hash = "sha256:3a88254881e8a8c4784ecc9cb2249ff757fd94b911d5df9a5984961b96113fff"}, + {file = "wrapt-1.14.0-cp310-cp310-win_amd64.whl", hash = "sha256:9a242871b3d8eecc56d350e5e03ea1854de47b17f040446da0e47dc3e0b9ad4d"}, + {file = "wrapt-1.14.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:a65bffd24409454b889af33b6c49d0d9bcd1a219b972fba975ac935f17bdf627"}, + {file = "wrapt-1.14.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9d9fcd06c952efa4b6b95f3d788a819b7f33d11bea377be6b8980c95e7d10775"}, + {file = "wrapt-1.14.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:db6a0ddc1282ceb9032e41853e659c9b638789be38e5b8ad7498caac00231c23"}, + {file = "wrapt-1.14.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:14e7e2c5f5fca67e9a6d5f753d21f138398cad2b1159913ec9e9a67745f09ba3"}, + {file = "wrapt-1.14.0-cp35-cp35m-win32.whl", hash = "sha256:6d9810d4f697d58fd66039ab959e6d37e63ab377008ef1d63904df25956c7db0"}, + {file = "wrapt-1.14.0-cp35-cp35m-win_amd64.whl", hash = "sha256:d808a5a5411982a09fef6b49aac62986274ab050e9d3e9817ad65b2791ed1425"}, + {file = "wrapt-1.14.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b77159d9862374da213f741af0c361720200ab7ad21b9f12556e0eb95912cd48"}, + {file = "wrapt-1.14.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36a76a7527df8583112b24adc01748cd51a2d14e905b337a6fefa8b96fc708fb"}, + {file = "wrapt-1.14.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0057b5435a65b933cbf5d859cd4956624df37b8bf0917c71756e4b3d9958b9e"}, + {file = "wrapt-1.14.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a0a4ca02752ced5f37498827e49c414d694ad7cf451ee850e3ff160f2bee9d3"}, + {file = "wrapt-1.14.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:8c6be72eac3c14baa473620e04f74186c5d8f45d80f8f2b4eda6e1d18af808e8"}, + {file = "wrapt-1.14.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:21b1106bff6ece8cb203ef45b4f5778d7226c941c83aaaa1e1f0f4f32cc148cd"}, + {file = "wrapt-1.14.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:493da1f8b1bb8a623c16552fb4a1e164c0200447eb83d3f68b44315ead3f9036"}, + {file = "wrapt-1.14.0-cp36-cp36m-win32.whl", hash = "sha256:89ba3d548ee1e6291a20f3c7380c92f71e358ce8b9e48161401e087e0bc740f8"}, + {file = "wrapt-1.14.0-cp36-cp36m-win_amd64.whl", hash = "sha256:729d5e96566f44fccac6c4447ec2332636b4fe273f03da128fff8d5559782b06"}, + {file = "wrapt-1.14.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:891c353e95bb11abb548ca95c8b98050f3620a7378332eb90d6acdef35b401d4"}, + {file = "wrapt-1.14.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23f96134a3aa24cc50614920cc087e22f87439053d886e474638c68c8d15dc80"}, + {file = "wrapt-1.14.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6807bcee549a8cb2f38f73f469703a1d8d5d990815c3004f21ddb68a567385ce"}, + {file = "wrapt-1.14.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6915682f9a9bc4cf2908e83caf5895a685da1fbd20b6d485dafb8e218a338279"}, + {file = "wrapt-1.14.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:f2f3bc7cd9c9fcd39143f11342eb5963317bd54ecc98e3650ca22704b69d9653"}, + {file = "wrapt-1.14.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:3a71dbd792cc7a3d772ef8cd08d3048593f13d6f40a11f3427c000cf0a5b36a0"}, + {file = "wrapt-1.14.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:5a0898a640559dec00f3614ffb11d97a2666ee9a2a6bad1259c9facd01a1d4d9"}, + {file = "wrapt-1.14.0-cp37-cp37m-win32.whl", hash = "sha256:167e4793dc987f77fd476862d32fa404d42b71f6a85d3b38cbce711dba5e6b68"}, + {file = "wrapt-1.14.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d066ffc5ed0be00cd0352c95800a519cf9e4b5dd34a028d301bdc7177c72daf3"}, + {file = "wrapt-1.14.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d9bdfa74d369256e4218000a629978590fd7cb6cf6893251dad13d051090436d"}, + {file = "wrapt-1.14.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2498762814dd7dd2a1d0248eda2afbc3dd9c11537bc8200a4b21789b6df6cd38"}, + {file = "wrapt-1.14.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f24ca7953f2643d59a9c87d6e272d8adddd4a53bb62b9208f36db408d7aafc7"}, + {file = "wrapt-1.14.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b835b86bd5a1bdbe257d610eecab07bf685b1af2a7563093e0e69180c1d4af1"}, + {file = "wrapt-1.14.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b21650fa6907e523869e0396c5bd591cc326e5c1dd594dcdccac089561cacfb8"}, + {file = "wrapt-1.14.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:354d9fc6b1e44750e2a67b4b108841f5f5ea08853453ecbf44c81fdc2e0d50bd"}, + {file = "wrapt-1.14.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1f83e9c21cd5275991076b2ba1cd35418af3504667affb4745b48937e214bafe"}, + {file = "wrapt-1.14.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:61e1a064906ccba038aa3c4a5a82f6199749efbbb3cef0804ae5c37f550eded0"}, + {file = "wrapt-1.14.0-cp38-cp38-win32.whl", hash = "sha256:28c659878f684365d53cf59dc9a1929ea2eecd7ac65da762be8b1ba193f7e84f"}, + {file = "wrapt-1.14.0-cp38-cp38-win_amd64.whl", hash = "sha256:b0ed6ad6c9640671689c2dbe6244680fe8b897c08fd1fab2228429b66c518e5e"}, + {file = "wrapt-1.14.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b3f7e671fb19734c872566e57ce7fc235fa953d7c181bb4ef138e17d607dc8a1"}, + {file = "wrapt-1.14.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87fa943e8bbe40c8c1ba4086971a6fefbf75e9991217c55ed1bcb2f1985bd3d4"}, + {file = "wrapt-1.14.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4775a574e9d84e0212f5b18886cace049a42e13e12009bb0491562a48bb2b758"}, + {file = "wrapt-1.14.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9d57677238a0c5411c76097b8b93bdebb02eb845814c90f0b01727527a179e4d"}, + {file = "wrapt-1.14.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00108411e0f34c52ce16f81f1d308a571df7784932cc7491d1e94be2ee93374b"}, + {file = "wrapt-1.14.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d332eecf307fca852d02b63f35a7872de32d5ba8b4ec32da82f45df986b39ff6"}, + {file = "wrapt-1.14.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:01f799def9b96a8ec1ef6b9c1bbaf2bbc859b87545efbecc4a78faea13d0e3a0"}, + {file = "wrapt-1.14.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47045ed35481e857918ae78b54891fac0c1d197f22c95778e66302668309336c"}, + {file = "wrapt-1.14.0-cp39-cp39-win32.whl", hash = "sha256:2eca15d6b947cfff51ed76b2d60fd172c6ecd418ddab1c5126032d27f74bc350"}, + {file = "wrapt-1.14.0-cp39-cp39-win_amd64.whl", hash = "sha256:bb36fbb48b22985d13a6b496ea5fb9bb2a076fea943831643836c9f6febbcfdc"}, + {file = "wrapt-1.14.0.tar.gz", hash = "sha256:8323a43bd9c91f62bb7d4be74cc9ff10090e7ef820e27bfe8815c57e68261311"}, +] +yarl = [ + {file = "yarl-1.7.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f2a8508f7350512434e41065684076f640ecce176d262a7d54f0da41d99c5a95"}, + {file = "yarl-1.7.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:da6df107b9ccfe52d3a48165e48d72db0eca3e3029b5b8cb4fe6ee3cb870ba8b"}, + {file = "yarl-1.7.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a1d0894f238763717bdcfea74558c94e3bc34aeacd3351d769460c1a586a8b05"}, + {file = "yarl-1.7.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfe4b95b7e00c6635a72e2d00b478e8a28bfb122dc76349a06e20792eb53a523"}, + {file = "yarl-1.7.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c145ab54702334c42237a6c6c4cc08703b6aa9b94e2f227ceb3d477d20c36c63"}, + {file = "yarl-1.7.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1ca56f002eaf7998b5fcf73b2421790da9d2586331805f38acd9997743114e98"}, + {file = "yarl-1.7.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1d3d5ad8ea96bd6d643d80c7b8d5977b4e2fb1bab6c9da7322616fd26203d125"}, + {file = "yarl-1.7.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:167ab7f64e409e9bdd99333fe8c67b5574a1f0495dcfd905bc7454e766729b9e"}, + {file = "yarl-1.7.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:95a1873b6c0dd1c437fb3bb4a4aaa699a48c218ac7ca1e74b0bee0ab16c7d60d"}, + {file = "yarl-1.7.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6152224d0a1eb254f97df3997d79dadd8bb2c1a02ef283dbb34b97d4f8492d23"}, + {file = "yarl-1.7.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:5bb7d54b8f61ba6eee541fba4b83d22b8a046b4ef4d8eb7f15a7e35db2e1e245"}, + {file = "yarl-1.7.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:9c1f083e7e71b2dd01f7cd7434a5f88c15213194df38bc29b388ccdf1492b739"}, + {file = "yarl-1.7.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f44477ae29025d8ea87ec308539f95963ffdc31a82f42ca9deecf2d505242e72"}, + {file = "yarl-1.7.2-cp310-cp310-win32.whl", hash = "sha256:cff3ba513db55cc6a35076f32c4cdc27032bd075c9faef31fec749e64b45d26c"}, + {file = "yarl-1.7.2-cp310-cp310-win_amd64.whl", hash = "sha256:c9c6d927e098c2d360695f2e9d38870b2e92e0919be07dbe339aefa32a090265"}, + {file = "yarl-1.7.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9b4c77d92d56a4c5027572752aa35082e40c561eec776048330d2907aead891d"}, + {file = "yarl-1.7.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c01a89a44bb672c38f42b49cdb0ad667b116d731b3f4c896f72302ff77d71656"}, + {file = "yarl-1.7.2-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c19324a1c5399b602f3b6e7db9478e5b1adf5cf58901996fc973fe4fccd73eed"}, + {file = "yarl-1.7.2-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3abddf0b8e41445426d29f955b24aeecc83fa1072be1be4e0d194134a7d9baee"}, + {file = "yarl-1.7.2-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6a1a9fe17621af43e9b9fcea8bd088ba682c8192d744b386ee3c47b56eaabb2c"}, + {file = "yarl-1.7.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8b0915ee85150963a9504c10de4e4729ae700af11df0dc5550e6587ed7891e92"}, + {file = "yarl-1.7.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:29e0656d5497733dcddc21797da5a2ab990c0cb9719f1f969e58a4abac66234d"}, + {file = "yarl-1.7.2-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:bf19725fec28452474d9887a128e98dd67eee7b7d52e932e6949c532d820dc3b"}, + {file = "yarl-1.7.2-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:d6f3d62e16c10e88d2168ba2d065aa374e3c538998ed04996cd373ff2036d64c"}, + {file = "yarl-1.7.2-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:ac10bbac36cd89eac19f4e51c032ba6b412b3892b685076f4acd2de18ca990aa"}, + {file = "yarl-1.7.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:aa32aaa97d8b2ed4e54dc65d241a0da1c627454950f7d7b1f95b13985afd6c5d"}, + {file = "yarl-1.7.2-cp36-cp36m-win32.whl", hash = "sha256:87f6e082bce21464857ba58b569370e7b547d239ca22248be68ea5d6b51464a1"}, + {file = "yarl-1.7.2-cp36-cp36m-win_amd64.whl", hash = "sha256:ac35ccde589ab6a1870a484ed136d49a26bcd06b6a1c6397b1967ca13ceb3913"}, + {file = "yarl-1.7.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a467a431a0817a292121c13cbe637348b546e6ef47ca14a790aa2fa8cc93df63"}, + {file = "yarl-1.7.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ab0c3274d0a846840bf6c27d2c60ba771a12e4d7586bf550eefc2df0b56b3b4"}, + {file = "yarl-1.7.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d260d4dc495c05d6600264a197d9d6f7fc9347f21d2594926202fd08cf89a8ba"}, + {file = "yarl-1.7.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fc4dd8b01a8112809e6b636b00f487846956402834a7fd59d46d4f4267181c41"}, + {file = "yarl-1.7.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c1164a2eac148d85bbdd23e07dfcc930f2e633220f3eb3c3e2a25f6148c2819e"}, + {file = "yarl-1.7.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:67e94028817defe5e705079b10a8438b8cb56e7115fa01640e9c0bb3edf67332"}, + {file = "yarl-1.7.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:89ccbf58e6a0ab89d487c92a490cb5660d06c3a47ca08872859672f9c511fc52"}, + {file = "yarl-1.7.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:8cce6f9fa3df25f55521fbb5c7e4a736683148bcc0c75b21863789e5185f9185"}, + {file = "yarl-1.7.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:211fcd65c58bf250fb994b53bc45a442ddc9f441f6fec53e65de8cba48ded986"}, + {file = "yarl-1.7.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c10ea1e80a697cf7d80d1ed414b5cb8f1eec07d618f54637067ae3c0334133c4"}, + {file = "yarl-1.7.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:52690eb521d690ab041c3919666bea13ab9fbff80d615ec16fa81a297131276b"}, + {file = "yarl-1.7.2-cp37-cp37m-win32.whl", hash = "sha256:695ba021a9e04418507fa930d5f0704edbce47076bdcfeeaba1c83683e5649d1"}, + {file = "yarl-1.7.2-cp37-cp37m-win_amd64.whl", hash = "sha256:c17965ff3706beedafd458c452bf15bac693ecd146a60a06a214614dc097a271"}, + {file = "yarl-1.7.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fce78593346c014d0d986b7ebc80d782b7f5e19843ca798ed62f8e3ba8728576"}, + {file = "yarl-1.7.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c2a1ac41a6aa980db03d098a5531f13985edcb451bcd9d00670b03129922cd0d"}, + {file = "yarl-1.7.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:39d5493c5ecd75c8093fa7700a2fb5c94fe28c839c8e40144b7ab7ccba6938c8"}, + {file = "yarl-1.7.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1eb6480ef366d75b54c68164094a6a560c247370a68c02dddb11f20c4c6d3c9d"}, + {file = "yarl-1.7.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ba63585a89c9885f18331a55d25fe81dc2d82b71311ff8bd378fc8004202ff6"}, + {file = "yarl-1.7.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e39378894ee6ae9f555ae2de332d513a5763276a9265f8e7cbaeb1b1ee74623a"}, + {file = "yarl-1.7.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c0910c6b6c31359d2f6184828888c983d54d09d581a4a23547a35f1d0b9484b1"}, + {file = "yarl-1.7.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6feca8b6bfb9eef6ee057628e71e1734caf520a907b6ec0d62839e8293e945c0"}, + {file = "yarl-1.7.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8300401dc88cad23f5b4e4c1226f44a5aa696436a4026e456fe0e5d2f7f486e6"}, + {file = "yarl-1.7.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:788713c2896f426a4e166b11f4ec538b5736294ebf7d5f654ae445fd44270832"}, + {file = "yarl-1.7.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:fd547ec596d90c8676e369dd8a581a21227fe9b4ad37d0dc7feb4ccf544c2d59"}, + {file = "yarl-1.7.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:737e401cd0c493f7e3dd4db72aca11cfe069531c9761b8ea474926936b3c57c8"}, + {file = "yarl-1.7.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:baf81561f2972fb895e7844882898bda1eef4b07b5b385bcd308d2098f1a767b"}, + {file = "yarl-1.7.2-cp38-cp38-win32.whl", hash = "sha256:ede3b46cdb719c794427dcce9d8beb4abe8b9aa1e97526cc20de9bd6583ad1ef"}, + {file = "yarl-1.7.2-cp38-cp38-win_amd64.whl", hash = "sha256:cc8b7a7254c0fc3187d43d6cb54b5032d2365efd1df0cd1749c0c4df5f0ad45f"}, + {file = "yarl-1.7.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:580c1f15500e137a8c37053e4cbf6058944d4c114701fa59944607505c2fe3a0"}, + {file = "yarl-1.7.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3ec1d9a0d7780416e657f1e405ba35ec1ba453a4f1511eb8b9fbab81cb8b3ce1"}, + {file = "yarl-1.7.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3bf8cfe8856708ede6a73907bf0501f2dc4e104085e070a41f5d88e7faf237f3"}, + {file = "yarl-1.7.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1be4bbb3d27a4e9aa5f3df2ab61e3701ce8fcbd3e9846dbce7c033a7e8136746"}, + {file = "yarl-1.7.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:534b047277a9a19d858cde163aba93f3e1677d5acd92f7d10ace419d478540de"}, + {file = "yarl-1.7.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6ddcd80d79c96eb19c354d9dca95291589c5954099836b7c8d29278a7ec0bda"}, + {file = "yarl-1.7.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9bfcd43c65fbb339dc7086b5315750efa42a34eefad0256ba114cd8ad3896f4b"}, + {file = "yarl-1.7.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f64394bd7ceef1237cc604b5a89bf748c95982a84bcd3c4bbeb40f685c810794"}, + {file = "yarl-1.7.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044daf3012e43d4b3538562da94a88fb12a6490652dbc29fb19adfa02cf72eac"}, + {file = "yarl-1.7.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:368bcf400247318382cc150aaa632582d0780b28ee6053cd80268c7e72796dec"}, + {file = "yarl-1.7.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:bab827163113177aee910adb1f48ff7af31ee0289f434f7e22d10baf624a6dfe"}, + {file = "yarl-1.7.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0cba38120db72123db7c58322fa69e3c0efa933040ffb586c3a87c063ec7cae8"}, + {file = "yarl-1.7.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:59218fef177296451b23214c91ea3aba7858b4ae3306dde120224cfe0f7a6ee8"}, + {file = "yarl-1.7.2-cp39-cp39-win32.whl", hash = "sha256:1edc172dcca3f11b38a9d5c7505c83c1913c0addc99cd28e993efeaafdfaa18d"}, + {file = "yarl-1.7.2-cp39-cp39-win_amd64.whl", hash = "sha256:797c2c412b04403d2da075fb93c123df35239cd7b4cc4e0cd9e5839b73f52c58"}, + {file = "yarl-1.7.2.tar.gz", hash = "sha256:45399b46d60c253327a460e99856752009fcee5f5d3c80b2f7c0cae1c38d56dd"}, +] +zipp = [ + {file = "zipp-3.8.0-py3-none-any.whl", hash = "sha256:c4f6e5bbf48e74f7a38e7cc5b0480ff42b0ae5178957d564d18932525d5cf099"}, + {file = "zipp-3.8.0.tar.gz", hash = "sha256:56bf8aadb83c24db6c4b577e13de374ccfb67da2078beba1d037c17980bf43ad"}, +] diff --git a/python/poetry.toml b/python/poetry.toml new file mode 100644 index 000000000..4f3a13d88 --- /dev/null +++ b/python/poetry.toml @@ -0,0 +1,3 @@ +[virtualenvs] +path = ".venv" +in-project = true diff --git a/python/pyproject.toml b/python/pyproject.toml new file mode 100644 index 000000000..2b07165b3 --- /dev/null +++ b/python/pyproject.toml @@ -0,0 +1,38 @@ +[tool.poetry] +authors = ["Bucketeer team "] +description = "" +name = "python" +version = "0.1.0" + +[tool.poetry.dependencies] +APScheduler = "3.7.0" +PyMySQL = "1.0.2" +pandas = "^1.2.5" +prometheus-client = "0.7.1" +pystan = "2.19.1.1" +python = "3.7.6" +scipy = "^1.5.3" +environs = "^9.3.4" +grpc-health-checking = "0.0.1" +grpcio-health-checking = "1.27.2" +google-cloud-logging = "1.15.1" +prometheus-async = "19.2.0" +aiohttp = "3.7.4.post0" +google-cloud = "^0.34.0" +protobuf = "^3.20.1" +google-api-python-client = "^2.45.0" + +[tool.poetry.dev-dependencies] +flake8 = "^3.9.2" +pycodestyle = "^2.7.0" +pytest = "^6.2.5" +pytest-mock = "^3.7.0" +grpcio-tools = "^1.45.0" +black = "^22.3.0" + +[build-system] +build-backend = "poetry.core.masonry.api" +requires = ["poetry-core>=1.0.0"] + +[tool.black] +target_version = ['py37'] \ No newline at end of file diff --git a/python/requirements-dev.txt b/python/requirements-dev.txt new file mode 100644 index 000000000..78d0c59fb --- /dev/null +++ b/python/requirements-dev.txt @@ -0,0 +1,71 @@ +aiohttp==3.7.4.post0; python_version >= "3.6" +apscheduler==3.7.0; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0" and python_version < "4") +async-timeout==3.0.1; python_full_version >= "3.5.3" and python_version >= "3.6" +atomicwrites==1.4.0; python_version >= "3.7" and python_full_version < "3.0.0" and sys_platform == "win32" or sys_platform == "win32" and python_version >= "3.7" and python_full_version >= "3.4.0" +attrs==21.4.0; python_version >= "3.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.7" +black==21.12b0; python_full_version >= "3.6.2" +cachetools==4.2.4; python_version >= "3.5" and python_version < "4.0" and (python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6") +certifi==2021.10.8; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6" +chardet==4.0.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6" +charset-normalizer==2.0.12; python_full_version >= "3.6.0" and python_version >= "3.6" +click==8.1.2; python_version >= "3.7" and python_full_version >= "3.6.2" +colorama==0.4.4; sys_platform == "win32" and python_version >= "3.7" and python_full_version >= "3.6.2" and platform_system == "Windows" and (python_version >= "3.7" and python_full_version < "3.0.0" and sys_platform == "win32" or sys_platform == "win32" and python_version >= "3.7" and python_full_version >= "3.5.0") +cython==0.29.28; python_version >= "2.6" and python_full_version < "3.0.0" or python_full_version >= "3.3.0" +environs==9.5.0; python_version >= "3.6" +flake8==3.9.2; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0") +google-api-core==1.31.5; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6" +google-api-python-client==2.46.0; python_version >= "3.6" +google-auth-httplib2==0.1.0; python_version >= "3.6" +google-auth==1.35.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6" +google-cloud-core==1.7.2; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" +google-cloud-logging==1.15.1; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.4.0") +google-cloud==0.34.0 +googleapis-common-protos==1.56.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6" +grpc-health-checking==0.0.1 +grpcio-health-checking==1.27.2 +grpcio-tools==1.45.0; python_version >= "3.6" +grpcio==1.45.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6" +httplib2==0.20.4; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6" +idna==3.3; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6" +importlib-metadata==4.11.3; python_full_version >= "3.6.2" and python_version < "3.8" and python_version >= "3.7" +iniconfig==1.1.1; python_version >= "3.7" +marshmallow==3.15.0; python_version >= "3.7" +mccabe==0.6.1; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" +multidict==6.0.2; python_version >= "3.7" +mypy-extensions==0.4.3; python_full_version >= "3.6.2" +numpy==1.21.6 +packaging==21.3; python_version >= "3.7" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.7" +pandas==1.3.5; python_full_version >= "3.7.1" +pathspec==0.9.0; python_full_version >= "3.6.2" +platformdirs==2.5.2; python_version >= "3.7" and python_full_version >= "3.6.2" +pluggy==1.0.0; python_version >= "3.7" +prometheus-async==19.2.0; (python_version >= "2.7" and python_version < "3.0") or (python_version > "3.0" and python_version < "3.1") or (python_version > "3.1" and python_version < "3.2") or (python_version > "3.2" and python_version < "3.3") or (python_version > "3.3" and python_version < "3.4") or (python_version > "3.4") +prometheus-client==0.7.1 +protobuf==3.20.1; python_version >= "3.7" +py==1.11.0; python_version >= "3.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.7" +pyasn1-modules==0.2.8; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6" +pyasn1==0.4.8; python_version >= "3.6" and python_full_version < "3.0.0" and python_version < "4" and (python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6") or python_full_version >= "3.6.0" and python_version >= "3.6" and python_version < "4" and (python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6") +pycodestyle==2.7.0; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.4.0") +pyflakes==2.3.1; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" +pymysql==1.0.2; python_version >= "3.6" +pyparsing==3.0.8; python_full_version >= "3.6.8" and python_version >= "3.7" +pystan==2.19.1.1 +pytest-mock==3.7.0; python_version >= "3.7" +pytest==6.2.5; python_version >= "3.6" +python-dateutil==2.8.2; python_full_version >= "3.7.1" +python-dotenv==0.20.0; python_version >= "3.6" +pytz==2022.1; python_full_version >= "3.7.1" and python_version < "4" and (python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version < "4") and (python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6") +requests==2.27.1; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6" +rsa==4.8; python_version >= "3.6" and python_version < "4" and (python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6") +scipy==1.7.3; python_version >= "3.7" and python_version < "3.11" +six==1.16.0; python_full_version >= "3.7.1" and python_version < "4" and (python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.3.0" and python_version >= "3.6") and (python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6") and (python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.6.0") +toml==0.10.2; python_version >= "3.7" and python_full_version < "3.0.0" or python_full_version >= "3.3.0" and python_version >= "3.7" +tomli==1.2.3; python_version >= "3.6" and python_full_version >= "3.6.2" +typed-ast==1.5.3; python_version < "3.8" and implementation_name == "cpython" and python_full_version >= "3.6.2" and python_version >= "3.6" +typing-extensions==4.2.0; python_version >= "3.7" and python_full_version >= "3.6.2" and python_version < "3.8" +tzlocal==2.1; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version < "4" +uritemplate==4.1.1; python_version >= "3.6" +urllib3==1.26.9; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version < "4" and python_version >= "3.6" +wrapt==1.14.0; python_version >= "2.7" and python_full_version < "3.0.0" and python_version < "3.0" or python_version > "3.0" and python_full_version < "3.0.0" and python_version < "3.1" or python_version > "3.1" and python_full_version < "3.0.0" and python_version < "3.2" or python_version > "3.2" and python_full_version < "3.0.0" and python_version < "3.3" or python_version > "3.3" and python_full_version < "3.0.0" and python_version < "3.4" or python_version > "3.4" and python_full_version < "3.0.0" or python_version >= "2.7" and python_version < "3.0" and python_full_version >= "3.5.0" or python_version > "3.0" and python_version < "3.1" and python_full_version >= "3.5.0" or python_version > "3.1" and python_version < "3.2" and python_full_version >= "3.5.0" or python_version > "3.2" and python_version < "3.3" and python_full_version >= "3.5.0" or python_version > "3.3" and python_version < "3.4" and python_full_version >= "3.5.0" or python_full_version >= "3.5.0" and python_version > "3.4" +yarl==1.7.2; python_version >= "3.6" +zipp==3.8.0; python_full_version >= "3.6.2" and python_version < "3.8" and python_version >= "3.7" diff --git a/python/requirements.txt b/python/requirements.txt new file mode 100644 index 000000000..56ab26905 --- /dev/null +++ b/python/requirements.txt @@ -0,0 +1,49 @@ +aiohttp==3.7.4.post0; python_version >= "3.6" +apscheduler==3.7.0; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0" and python_version < "4") +async-timeout==3.0.1; python_full_version >= "3.5.3" and python_version >= "3.6" +attrs==21.4.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6" +cachetools==4.2.4; python_version >= "3.5" and python_version < "4.0" and (python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6") +certifi==2021.10.8; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6" +chardet==4.0.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6" +charset-normalizer==2.0.12; python_full_version >= "3.6.0" and python_version >= "3.6" +cython==0.29.28; python_version >= "2.6" and python_full_version < "3.0.0" or python_full_version >= "3.3.0" +environs==9.5.0; python_version >= "3.6" +google-api-core==1.31.5; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6" +google-api-python-client==2.46.0; python_version >= "3.6" +google-auth-httplib2==0.1.0; python_version >= "3.6" +google-auth==1.35.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6" +google-cloud-core==1.7.2; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" +google-cloud-logging==1.15.1; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.4.0") +google-cloud==0.34.0 +googleapis-common-protos==1.56.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6" +grpc-health-checking==0.0.1 +grpcio-health-checking==1.27.2 +grpcio==1.45.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6" +httplib2==0.20.4; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6" +idna==3.3; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6" +marshmallow==3.15.0; python_version >= "3.7" +multidict==6.0.2; python_version >= "3.7" +numpy==1.21.6 +packaging==21.3; python_version >= "3.7" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.7" +pandas==1.3.5; python_full_version >= "3.7.1" +prometheus-async==19.2.0; (python_version >= "2.7" and python_version < "3.0") or (python_version > "3.0" and python_version < "3.1") or (python_version > "3.1" and python_version < "3.2") or (python_version > "3.2" and python_version < "3.3") or (python_version > "3.3" and python_version < "3.4") or (python_version > "3.4") +prometheus-client==0.7.1 +protobuf==3.20.1; python_version >= "3.7" +pyasn1-modules==0.2.8; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6" +pyasn1==0.4.8; python_version >= "3.6" and python_full_version < "3.0.0" and python_version < "4" and (python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6") or python_full_version >= "3.6.0" and python_version >= "3.6" and python_version < "4" and (python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6") +pymysql==1.0.2; python_version >= "3.6" +pyparsing==3.0.8; python_full_version >= "3.6.8" and python_version >= "3.7" +pystan==2.19.1.1 +python-dateutil==2.8.2; python_full_version >= "3.7.1" +python-dotenv==0.20.0; python_version >= "3.6" +pytz==2022.1; python_full_version >= "3.7.1" and python_version < "4" and (python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version < "4") and (python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6") +requests==2.27.1; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6" +rsa==4.8; python_version >= "3.6" and python_version < "4" and (python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6") +scipy==1.7.3; python_version >= "3.7" and python_version < "3.11" +six==1.16.0; python_full_version >= "3.7.1" and python_version < "4" and (python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.3.0" and python_version >= "3.6") and (python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6") and (python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.6.0") +typing-extensions==4.2.0; python_version < "3.8" and python_version >= "3.7" +tzlocal==2.1; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version < "4" +uritemplate==4.1.1; python_version >= "3.6" +urllib3==1.26.9; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version < "4" and python_version >= "3.6" +wrapt==1.14.0; python_version >= "2.7" and python_full_version < "3.0.0" and python_version < "3.0" or python_version > "3.0" and python_full_version < "3.0.0" and python_version < "3.1" or python_version > "3.1" and python_full_version < "3.0.0" and python_version < "3.2" or python_version > "3.2" and python_full_version < "3.0.0" and python_version < "3.3" or python_version > "3.3" and python_full_version < "3.0.0" and python_version < "3.4" or python_version > "3.4" and python_full_version < "3.0.0" or python_version >= "2.7" and python_version < "3.0" and python_full_version >= "3.5.0" or python_version > "3.0" and python_version < "3.1" and python_full_version >= "3.5.0" or python_version > "3.1" and python_version < "3.2" and python_full_version >= "3.5.0" or python_version > "3.2" and python_version < "3.3" and python_full_version >= "3.5.0" or python_version > "3.3" and python_version < "3.4" and python_full_version >= "3.5.0" or python_full_version >= "3.5.0" and python_version > "3.4" +yarl==1.7.2; python_version >= "3.6" diff --git a/python/src/cmd/calculator/main.py b/python/src/cmd/calculator/main.py new file mode 100644 index 000000000..3c5e571b0 --- /dev/null +++ b/python/src/cmd/calculator/main.py @@ -0,0 +1,122 @@ +# Copyright 2022 The Bucketeer Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import logging +import platform + +from environs import Env +from lib.calculator.job import calculate_experiments +from lib.calculator.stats import binomial, normal_inverse_gamma +from lib.environment.stub import stub as environment_stub +from lib.eventcounter.stub import stub as event_counter_stub +from lib.experiment.stub import stub as experiment_stub +from lib.health import health +from lib.log.logger import Logger +from lib.metrics import server as metrics_server +from lib.rpc import rpc +from lib.schedule import job, scheduler +from lib.signal import signal_handler as sh +from lib.storage.mysql import client as mysql_client + + +async def main(): + env = Env() + env.read_env() + mysql_user = env.str("BUCKETEER_CALCULATOR_MYSQL_USER") + mysql_pass = env.str("BUCKETEER_CALCULATOR_MYSQL_PASS") + mysql_host = env.str("BUCKETEER_CALCULATOR_MYSQL_HOST") + mysql_port = env.int("BUCKETEER_CALCULATOR_MYSQL_PORT") + mysql_db_name = env.str("BUCKETEER_CALCULATOR_MYSQL_DB_NAME") + environment_service = env.str( + "BUCKETEER_CALCULATOR_ENVIRONMENT_SERVICE", "localhost:9000" + ) + experiment_service = env.str( + "BUCKETEER_CALCULATOR_EXPERIMENT_SERVICE", "localhost:9000" + ) + event_counter_service = env.str( + "BUCKETEER_CALCULATOR_EVENT_COUNTER_SERVICE", "localhost:9000" + ) + port = env.int("BUCKETEER_CALCULATOR_PORT", 9090) + metrics_port = env.int("BUCKETEER_CALCULATOR_METRICS_PORT", 9002) + log_level = env.log_level("BUCKETEER_CALCULATOR_LOG_LEVEL", logging.INFO) + service_token_path = env.str("BUCKETEER_CALCULATOR_SERVICE_TOKEN") + cert_path = env.str("BUCKETEER_CALCULATOR_CERT") + key_path = env.str("BUCKETEER_CALCULATOR_KEY") + + telepresence_root = env.str("TELEPRESENCE_ROOT", "") + if telepresence_root: + service_token_path = telepresence_root + service_token_path + cert_path = telepresence_root + cert_path + key_path = telepresence_root + key_path + + logger = Logger("calculator", log_level) + _logger = logger.logger + + envrStub = environment_stub.create_stub( + environment_service, cert_path, service_token_path + ) + exprStub = experiment_stub.create_stub( + experiment_service, cert_path, service_token_path + ) + ecStub = event_counter_stub.create_stub( + event_counter_service, cert_path, service_token_path + ) + + mc = mysql_client.Client( + mysql_user, mysql_pass, mysql_host, mysql_port, mysql_db_name + ) + + calculator = calculate_experiments.ExperimentCalculator( + envrStub, + exprStub, + ecStub, + mc, + binomial.Binomial(_logger), + normal_inverse_gamma.NormalInverseGamma(_logger), + _logger, + ) + + jobs = [ + job.Job( + "calculate_experiments", + calculator.run, + hour="*", + minute="*", + second="0", + ), + ] + sch = scheduler.Scheduler(jobs, _logger) + + server = rpc.Server(port, cert_path, key_path, _logger) + signal_handler = sh.SignalHandler(_logger) + metrics = metrics_server.Server(metrics_port, _logger) + + checks = [sch.check] + checker = health.Checker(checks, server, _logger) + + tasks = [ + server.run(), + checker.run(), + signal_handler.run(), + sch.run(), + logger.run(), + metrics.run(), + ] + _logger.info("app starts running", {"pythonVersion": platform.python_version()}) + await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/src/lib/calculator/domain/experiment.py b/python/src/lib/calculator/domain/experiment.py new file mode 100644 index 000000000..c7cd65799 --- /dev/null +++ b/python/src/lib/calculator/domain/experiment.py @@ -0,0 +1,45 @@ +# Copyright 2022 The Bucketeer Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from datetime import datetime, timedelta + +from proto.experiment import experiment_pb2 + + +class Experiment: + def __init__(self, pb: experiment_pb2.Experiment): + self.pb = pb + + def is_updated_to_running(self, now: datetime) -> bool: + now_unix = int(now.timestamp()) + if self.pb.status == experiment_pb2.Experiment.WAITING: + if self.pb.start_at <= now_unix: + return True + return False + + def is_updated_to_finish(self, now: datetime) -> bool: + two_days_ago_unix = int((now - timedelta(days=2)).timestamp()) + if self.pb.status in [ + experiment_pb2.Experiment.WAITING, + experiment_pb2.Experiment.RUNNING, + ]: + if self.pb.stop_at < two_days_ago_unix: + return True + return False + + def is_calculated(self, now: datetime) -> bool: + now_unix = int(now.timestamp()) + if self.pb.start_at <= now_unix: + return True + return False diff --git a/python/src/lib/calculator/domain/experiment_result.py b/python/src/lib/calculator/domain/experiment_result.py new file mode 100644 index 000000000..fece203d1 --- /dev/null +++ b/python/src/lib/calculator/domain/experiment_result.py @@ -0,0 +1,20 @@ +# Copyright 2022 The Bucketeer Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from proto.eventcounter import experiment_result_pb2 as proto + + +class ExperimentResult: + def __init__(self, pb: proto.ExperimentResult): + self.pb = pb diff --git a/python/src/lib/calculator/job/calculate_experiments.py b/python/src/lib/calculator/job/calculate_experiments.py new file mode 100644 index 000000000..516e53979 --- /dev/null +++ b/python/src/lib/calculator/job/calculate_experiments.py @@ -0,0 +1,543 @@ +# Copyright 2022 The Bucketeer Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from datetime import datetime, timedelta +from typing import Dict, List + +import grpc +from lib.calculator.domain import experiment as experiment_domain +from lib.calculator.domain import experiment_result as experiment_result_domain +from lib.calculator.job import metrics +from lib.calculator.storage import \ + mysql_experiment_result as mysql_experiment_result_storage +from proto.environment import service_pb2 as environment_service_pb2 +from proto.eventcounter import experiment_result_pb2, goal_result_pb2 +from proto.eventcounter import service_pb2 as ec_service_pb2 +from proto.eventcounter import (timeseries_pb2, variation_count_pb2, + variation_result_pb2) +from proto.experiment import experiment_pb2 +from proto.experiment import service_pb2 as experiment_service_pb2 +from proto.experiment.command_pb2 import (FinishExperimentCommand, + StartExperimentCommand) + +_PAGE_SIZE = 500 + + +class ExperimentCalculator: + def __init__( + self, + environment_stub, + experiment_stub, + event_counter_stub, + mysql_client, + binomial, + normal_inverse_gamma, + logger, + grpc_timeout=180, + ): + self._grpc_timeout = grpc_timeout + self._environment_stub = environment_stub + self._experiment_stub = experiment_stub + self._event_counter_stub = event_counter_stub + self._mysql_experiment_result_storage = ( + mysql_experiment_result_storage.MySQLExperimentResultStorage(mysql_client) + ) + self._logger = logger + self._binomial_model = binomial + self._normal_inverse_gamma = normal_inverse_gamma + self._now = datetime.now + + def run(self): + environments = self._list_environments() + metrics.target_items_gauge.labels(metrics.typeEnvironment, "").set( + len(environments) + ) + now = self._now() + for env in environments or []: + self._logger.info( + "experiment calculator: start calculator over environment", + {"environmentNamespace": env.namespace}, + ) + experiments = self._list_experiments(env.namespace) + metrics.target_items_gauge.labels( + metrics.typeExperiment, env.namespace + ).set(len(experiments)) + for e in experiments: + de = experiment_domain.Experiment(e) + if not de.is_calculated(now): + continue + experiment_result = self._create_experiment_result(env.namespace, de.pb) + self._mysql_experiment_result_storage.upsert_multi( + env.namespace, + [experiment_result_domain.ExperimentResult(experiment_result)], + ) + self._update_experiment_status(env.namespace, de, now) + + def _create_experiment_result( + self, + environment_namespace: str, + experiment: experiment_pb2.Experiment, + ) -> experiment_result_pb2.ExperimentResult: + variation_ids = [] + for v in experiment.variations: + variation_ids.append(v.id) + end_ats = self._list_end_at( + experiment.start_at, experiment.stop_at, int(self._now().timestamp()) + ) + expr_result = self._experiment_result(experiment.id) + for goal_id in experiment.goal_ids: + goal_result = expr_result.goal_results.add() + goal_result.goal_id = goal_id + variation_results = [] + for v in experiment.variations: + variation_results.append(self._variation_result(v.id)) + for timestamp in end_ats: + eval_vc = self._get_evaluation_count( + environment_namespace, + experiment.start_at, + timestamp, + experiment.feature_id, + experiment.feature_version, + variation_ids, + ) + goal_vc = self._get_goal_count( + environment_namespace, + experiment.start_at, + timestamp, + goal_id, + experiment.feature_id, + experiment.feature_version, + variation_ids, + ) + gr = self._create_goal_result(experiment, eval_vc, goal_vc) + self._append_variation_results( + timestamp, variation_results, gr.variation_results + ) + goal_result.variation_results.extend(variation_results) + return expr_result + + def _append_variation_results( + self, + timestamp: int, + dst_vrs: List[variation_result_pb2.VariationResult], + src_vrs: List[variation_result_pb2.VariationResult], + ): + dst_vrs = sorted(dst_vrs, key=lambda vr: vr.variation_id) + src_vrs = sorted(src_vrs, key=lambda vr: vr.variation_id) + for dst_vr, src_vr in zip(dst_vrs, src_vrs): + dst_vr.experiment_count.CopyFrom(src_vr.experiment_count) + dst_vr.evaluation_count.CopyFrom(src_vr.evaluation_count) + dst_vr.cvr_prob.CopyFrom(src_vr.cvr_prob) + dst_vr.cvr_prob_best.CopyFrom(src_vr.cvr_prob_best) + dst_vr.cvr_prob_beat_baseline.CopyFrom(src_vr.cvr_prob_beat_baseline) + dst_vr.evaluation_user_count_timeseries.MergeFrom( + timeseries_pb2.Timeseries( + timestamps=[timestamp], + values=[float(src_vr.evaluation_count.user_count)], + ) + ) + dst_vr.evaluation_event_count_timeseries.MergeFrom( + timeseries_pb2.Timeseries( + timestamps=[timestamp], + values=[float(src_vr.evaluation_count.event_count)], + ) + ) + dst_vr.goal_user_count_timeseries.MergeFrom( + timeseries_pb2.Timeseries( + timestamps=[timestamp], + values=[float(src_vr.experiment_count.user_count)], + ) + ) + dst_vr.goal_event_count_timeseries.MergeFrom( + timeseries_pb2.Timeseries( + timestamps=[timestamp], + values=[float(src_vr.experiment_count.event_count)], + ) + ) + dst_vr.goal_value_sum_timeseries.MergeFrom( + timeseries_pb2.Timeseries( + timestamps=[timestamp], values=[src_vr.experiment_count.value_sum] + ) + ) + dst_vr.cvr_median_timeseries.MergeFrom( + timeseries_pb2.Timeseries( + timestamps=[timestamp], values=[src_vr.cvr_prob.median] + ) + ) + dst_vr.cvr_percentile025_timeseries.MergeFrom( + timeseries_pb2.Timeseries( + timestamps=[timestamp], values=[src_vr.cvr_prob.percentile025] + ) + ) + dst_vr.cvr_percentile975_timeseries.MergeFrom( + timeseries_pb2.Timeseries( + timestamps=[timestamp], values=[src_vr.cvr_prob.percentile975] + ) + ) + cvr = 0.0 + if src_vr.evaluation_count.user_count != 0: + cvr = float( + src_vr.experiment_count.user_count + / src_vr.evaluation_count.user_count + ) + dst_vr.cvr_timeseries.MergeFrom( + timeseries_pb2.Timeseries(timestamps=[timestamp], values=[cvr]) + ) + value_per_user = 0.0 + if src_vr.experiment_count.user_count != 0: + value_per_user = float( + src_vr.experiment_count.value_sum + / src_vr.experiment_count.user_count + ) + dst_vr.goal_value_sum_per_user_timeseries.MergeFrom( + timeseries_pb2.Timeseries( + timestamps=[timestamp], values=[value_per_user] + ) + ) + + dst_vr.goal_value_sum_per_user_prob.CopyFrom( + src_vr.goal_value_sum_per_user_prob + ) + dst_vr.goal_value_sum_per_user_prob_best.CopyFrom( + src_vr.goal_value_sum_per_user_prob_best + ) + dst_vr.goal_value_sum_per_user_prob_beat_baseline.CopyFrom( + src_vr.goal_value_sum_per_user_prob_beat_baseline + ) + dst_vr.goal_value_sum_per_user_median_timeseries.MergeFrom( + timeseries_pb2.Timeseries( + timestamps=[timestamp], + values=[src_vr.goal_value_sum_per_user_prob.median], + ) + ) + dst_vr.goal_value_sum_per_user_percentile025_timeseries.MergeFrom( + timeseries_pb2.Timeseries( + timestamps=[timestamp], + values=[src_vr.goal_value_sum_per_user_prob.percentile025], + ) + ) + dst_vr.goal_value_sum_per_user_percentile975_timeseries.MergeFrom( + timeseries_pb2.Timeseries( + timestamps=[timestamp], + values=[src_vr.goal_value_sum_per_user_prob.percentile975], + ) + ) + + def _list_environments(self): + try: + environments = [] + cursor = "" + while True: + resp = self._environment_stub.ListEnvironments( + environment_service_pb2.ListEnvironmentsRequest( + page_size=_PAGE_SIZE, cursor=cursor + ), + self._grpc_timeout, + ) + environments.extend(resp.environments) + environmentSize = len(resp.environments) + if environmentSize < _PAGE_SIZE: + return environments + cursor = resp.cursor + # FIXME: Here it will be changed soon, maybe. + # https://github.com/grpc/grpc/issues/9270#issuecomment-398796613 + except grpc.RpcError as rpc_error_call: + self._logger.error( + "experiment calculator: list environments failed", + {"code": rpc_error_call.code(), "details": rpc_error_call.details()}, + ) + raise + + def _list_experiments( + self, + environment_namespace, + ) -> List[experiment_pb2.Experiment]: + experiments = [] + cursor = "" + stopped_at = self._now() - timedelta(days=2) + try: + while True: + req = experiment_service_pb2.ListExperimentsRequest( + environment_namespace=environment_namespace, + statuses=[ + experiment_pb2.Experiment.WAITING, + experiment_pb2.Experiment.RUNNING, + ], + page_size=_PAGE_SIZE, + cursor=cursor, + ) + # from is reserved word + setattr(req, "from", int(stopped_at.timestamp())) + resp = self._experiment_stub.ListExperiments(req, self._grpc_timeout) + + experiments.extend(resp.experiments) + if len(resp.experiments) < _PAGE_SIZE: + return experiments + cursor = resp.cursor + except grpc.RpcError as rpc_error_call: + self._logger.error( + "experiment calculator: list experiments failed", + {"code": rpc_error_call.code(), "details": rpc_error_call.details()}, + ) + raise + + def _list_end_at(self, start_at: int, end_at: int, now: int) -> List[int]: + end_at = end_at if end_at < now else now + timestamps = [] + day = 24 * 60 * 60 + for ts in range(start_at + day, end_at, day): + timestamps.append(ts) + timestamps.append(end_at) + return timestamps + + def _experiment_result(self, experiment_id: str): + return experiment_result_pb2.ExperimentResult( + id=experiment_id, + experiment_id=experiment_id, + updated_at=int(self._now().timestamp()), + ) + + def _variation_count(self, variation_id: str): + vc = variation_count_pb2.VariationCount() + vc.variation_id = variation_id + user_ts = timeseries_pb2.Timeseries() + vc.user_timeseries_count.CopyFrom(user_ts) + event_ts = timeseries_pb2.Timeseries() + vc.event_timeseries_count.CopyFrom(event_ts) + value_sum_ts = timeseries_pb2.Timeseries() + vc.value_sum_timeseries_count.CopyFrom(value_sum_ts) + return vc + + def _variation_result(self, variation_id: str): + vr = variation_result_pb2.VariationResult() + vr.variation_id = variation_id + + eval_user_ts = timeseries_pb2.Timeseries() + vr.evaluation_user_count_timeseries.CopyFrom(eval_user_ts) + eval_event_ts = timeseries_pb2.Timeseries() + vr.evaluation_event_count_timeseries.CopyFrom(eval_event_ts) + + goal_user_ts = timeseries_pb2.Timeseries() + vr.goal_user_count_timeseries.CopyFrom(goal_user_ts) + goal_event_ts = timeseries_pb2.Timeseries() + vr.goal_event_count_timeseries.CopyFrom(goal_event_ts) + goal_value_sum_ts = timeseries_pb2.Timeseries() + vr.goal_value_sum_timeseries.CopyFrom(goal_value_sum_ts) + return vr + + def _get_evaluation_count( + self, + environment_namespace: str, + start_at: int, + end_at: int, + feature_id: str, + feature_version: int, + variation_ids: List[str], + ) -> Dict[str, variation_count_pb2.VariationCount]: + try: + resp = self._event_counter_stub.GetEvaluationCountV2( + ec_service_pb2.GetEvaluationCountV2Request( + environment_namespace=environment_namespace, + start_at=start_at, + end_at=end_at, + feature_id=feature_id, + feature_version=feature_version, + variation_ids=variation_ids, + ), + self._grpc_timeout, + ) + variation_counts = {} + for vc in resp.count.realtime_counts: + variation_counts[vc.variation_id] = vc + return variation_counts + except grpc.RpcError as rpc_error_call: + self._logger.error( + "experiment calculator: get evaluation count failed", + {"code": rpc_error_call.code(), "details": rpc_error_call.details()}, + ) + raise + + def _get_goal_count( + self, + environment_namespace: str, + start_at: int, + end_at: int, + goal_id: str, + feature_id: str, + feature_version: int, + variation_ids: List[str], + ) -> Dict[str, variation_count_pb2.VariationCount]: + try: + resp = self._event_counter_stub.GetGoalCountV2( + ec_service_pb2.GetGoalCountV2Request( + environment_namespace=environment_namespace, + start_at=start_at, + end_at=end_at, + goal_id=goal_id, + feature_id=feature_id, + feature_version=feature_version, + variation_ids=variation_ids, + ), + self._grpc_timeout, + ) + variation_counts = {} + for vc in resp.goal_counts.realtime_counts: + variation_counts[vc.variation_id] = vc + return variation_counts + except grpc.RpcError as rpc_error_call: + self._logger.error( + "experiment calculator: get goal count failed", + {"code": rpc_error_call.code(), "details": rpc_error_call.details()}, + ) + raise + + def _create_goal_result( + self, + experiment: experiment_pb2.Experiment, + evaluation_counts: Dict[str, variation_count_pb2.VariationCount], + goal_counts: Dict[str, variation_count_pb2.VariationCount], + ) -> List[goal_result_pb2.GoalResult]: + goal_result = self._calc_goal_result( + evaluation_counts, goal_counts, experiment.base_variation_id + ) + goal_result.goal_id = experiment.goal_id + return goal_result + + def _calc_goal_result( + self, + evaluation_variation_counts: Dict[str, variation_count_pb2.VariationCount], + goal_variation_counts: Dict[str, variation_count_pb2.VariationCount], + base_vid: str, + ) -> goal_result_pb2.GoalResult: + vids = [] + goal_uc = [] + eval_uc = [] + vrs = {} + value_means = [] + value_vars = [] + # if not set, use 0. + baseline_idx = 0 + gr = goal_result_pb2.GoalResult() + for i, vid in enumerate(goal_variation_counts.keys()): + goal_variation_count = goal_variation_counts[vid] + vr = gr.variation_results.add() + vr.variation_id = goal_variation_count.variation_id + vr.experiment_count.CopyFrom(goal_variation_count) + vid = goal_variation_count.variation_id + if vid not in evaluation_variation_counts.keys(): + self._logger.error( + "experiment calculator: vid not found", {"variationId": vid} + ) + return None + + evaluation_variation_count = evaluation_variation_counts[vid] + vids.append(vid) + goal_uc.append(goal_variation_count.user_count) + eval_uc.append(evaluation_variation_count.user_count) + vr.evaluation_count.CopyFrom(evaluation_variation_count) + value_means.append(goal_variation_count.value_sum_per_user_mean) + value_vars.append(goal_variation_count.value_sum_per_user_variance) + + vrs[vid] = vr + if base_vid == vid: + baseline_idx = i + + # Skip the calculation if evaluation count is less than goal count. + for i in range(len(eval_uc)): + if eval_uc[i] < goal_uc[i]: + return gr + + cvr_result = self._binomial_model.run(vids, goal_uc, eval_uc, baseline_idx) + for vid, variation_result in cvr_result.items(): + vrs[vid].cvr_prob.CopyFrom(variation_result.cvr_prob) + vrs[vid].cvr_prob_best.CopyFrom(variation_result.cvr_prob_best) + vrs[vid].cvr_prob_beat_baseline.CopyFrom( + variation_result.cvr_prob_beat_baseline + ) + + # Skip the calculation if values are zero. + for i in range(len(vids)): + if goal_uc[i] == 0 or value_means[i] == 0.0 or value_vars[i] == 0.0: + return gr + value_result = self._normal_inverse_gamma.run( + vids, value_means, value_vars, goal_uc, baseline_idx + ) + for vid, variation_result in value_result.items(): + vrs[vid].goal_value_sum_per_user_prob.CopyFrom( + variation_result.goal_value_sum_per_user_prob + ) + vrs[vid].goal_value_sum_per_user_prob_best.CopyFrom( + variation_result.goal_value_sum_per_user_prob_best + ) + vrs[vid].goal_value_sum_per_user_prob_beat_baseline.CopyFrom( + variation_result.goal_value_sum_per_user_prob_beat_baseline + ) + return gr + + def _start_experiment( + self, + environment_namespace: str, + id: str, + ) -> None: + try: + self._experiment_stub.StartExperiment( + experiment_service_pb2.StartExperimentRequest( + environment_namespace=environment_namespace, + id=id, + command=StartExperimentCommand(), + ), + self._grpc_timeout, + ) + return + except grpc.RpcError as rpc_error_call: + self._logger.error( + "experiment calculator: start experiment failed", + {"code": rpc_error_call.code(), "details": rpc_error_call.details()}, + ) + raise + + def _finish_experiment( + self, + environment_namespace: str, + id: str, + ) -> None: + try: + self._experiment_stub.FinishExperiment( + experiment_service_pb2.FinishExperimentRequest( + environment_namespace=environment_namespace, + id=id, + command=FinishExperimentCommand(), + ), + self._grpc_timeout, + ) + return + except grpc.RpcError as rpc_error_call: + self._logger.error( + "experiment calculator: finish experiment failed", + {"code": rpc_error_call.code(), "details": rpc_error_call.details()}, + ) + raise + + def _update_experiment_status( + self, + environment_namespace: str, + de: experiment_domain.Experiment, + now: datetime, + ) -> None: + if de.is_updated_to_finish(now): + self._finish_experiment(environment_namespace, de.pb.id) + return + if de.is_updated_to_running(now): + self._start_experiment(environment_namespace, de.pb.id) + return diff --git a/python/src/lib/calculator/job/metrics.py b/python/src/lib/calculator/job/metrics.py new file mode 100644 index 000000000..9436b3398 --- /dev/null +++ b/python/src/lib/calculator/job/metrics.py @@ -0,0 +1,27 @@ +# Copyright 2022 The Bucketeer Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from prometheus_client import Gauge + +typeEnvironment = "Environment" +typeExperiment = "Experiment" +typeGoal = "Goal" + +target_items_gauge = Gauge( + "experiment_calculator_target_items", + "Total number of experiments.", + labelnames=["type", "environment"], + namespace="bucketeer", + subsystem="calculator", +) diff --git a/python/src/lib/calculator/stats/binomial.py b/python/src/lib/calculator/stats/binomial.py new file mode 100644 index 000000000..3c2ebc0b8 --- /dev/null +++ b/python/src/lib/calculator/stats/binomial.py @@ -0,0 +1,174 @@ +# Copyright 2022 The Bucketeer Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from datetime import datetime + +import numpy as np +import pandas as pd +import pystan +from lib.calculator.stats import metrics +from proto.eventcounter import (distribution_summary_pb2, histogram_pb2, + variation_result_pb2) + + +class Binomial: + def __init__(self, logger): + self._logger = logger + self._model = self._compile_model() + + def _compile_model(self): + """ + Return the gcc compilation reslt. + This process takes some time depending on machine resources. + """ + model_code = """ + data { + int g; + int x[g]; + int n[g]; + } + + parameters { + real p[g]; + } + + model { + for(i in 1:g){ + x[i] ~ binomial(n[i], p[i]); + } + } + + generated quantities { + matrix[g, g] prob_upper; + real prob_best[g]; + + for(i in 1:g){ + real others[g-1]; + others = append_array(p[:i-1], p[i+1:]); + prob_best[i] = p[i] > max(others) ? 1 : 0; + for(j in 1:g){ + prob_upper[i, j] = p[i] > p[j] ? 1 : 0; + } + } + } + """ + start = datetime.now() + model = pystan.StanModel(model_code=model_code) + end = datetime.now() + metrics.binomial_compile_duration_histogram.observe( + (end - start).total_seconds() + ) + return model + + def run( + self, + vids, + x, + n, + baseline_idx, + ): + # The index starts from 1 in PyStan. + baseline_idx += 1 + start = datetime.now() + num_variation = len(n) + stan_data = { + "g": num_variation, + "x": np.array(x), + "n": np.array(n), + } + par = [ + "p", + "prob_upper", + "prob_best", + ] + fit = self._model.sampling( + data=stan_data, + iter=21000, + chains=5, + warmup=1000, + seed=1234, + algorithm="NUTS", + ) + metrics.binomial_sampling_duration_histogram.observe( + (datetime.now() - start).total_seconds() + ) + result = self._conv_fit(fit, par, vids, baseline_idx) + metrics.binomial_run_duration_histogram.observe( + (datetime.now() - start).total_seconds() + ) + return result + + def _conv_fit(self, fit, par, vids, baseline_idx): + variation_results = {} + p_posterior_dist = np.array(fit.extract(permuted=True)["p"]) + summary = self._get_summary_df(fit, par) + for i in range(1, len(vids) + 1): + vr = variation_result_pb2.VariationResult() + vr.cvr_prob.CopyFrom(self._create_cvr_prob(summary, p_posterior_dist, i)) + vr.cvr_prob_best.CopyFrom(self._create_cvr_prob_best(summary, i)) + vr.cvr_prob_beat_baseline.CopyFrom( + self._create_cvr_prob_beat_baseline(summary, baseline_idx, i) + ) + variation_results[vids[i - 1]] = vr + return variation_results + + def _get_summary_df(self, fit, par): + summary = fit.summary(pars=par) + return pd.DataFrame( + summary["summary"], + index=summary["summary_rownames"], + columns=summary["summary_colnames"], + ) + + def _create_cvr_prob(self, summary, p_posterior_dist, idx): + prob = distribution_summary_pb2.DistributionSummary() + prob.mean = summary.loc["p[{}]".format(idx), "mean"] + prob.sd = summary.loc["p[{}]".format(idx), "sd"] + prob.rhat = summary.loc["p[{}]".format(idx), "Rhat"] + samples = p_posterior_dist.T[idx - 1] + prob.median = np.median(samples) + prob.percentile025 = np.percentile(samples, 2.5) + prob.percentile975 = np.percentile(samples, 97.5) + hist, bins = np.histogram(samples, bins=100) + prob_histogram = histogram_pb2.Histogram() + prob_histogram.hist[:] = hist + prob_histogram.bins[:] = bins + prob.histogram.CopyFrom(prob_histogram) + return prob + + def _create_cvr_prob_best(self, summary, idx): + prob_best = distribution_summary_pb2.DistributionSummary() + prob_best.mean = summary.loc["prob_best[{}]".format(idx), "mean"] + prob_best.sd = summary.loc["prob_best[{}]".format(idx), "sd"] + prob_best.rhat = summary.loc["prob_best[{}]".format(idx), "Rhat"] + return prob_best + + def _create_cvr_prob_beat_baseline(self, summary, baseline_idx, idx): + prob_beat_baseline = distribution_summary_pb2.DistributionSummary() + if idx is baseline_idx: + prob_beat_baseline.mean = 0.0 + prob_beat_baseline.sd = 0.0 + prob_beat_baseline.rhat = 0.0 + else: + prob_beat_baseline.mean = summary.loc[ + "prob_upper[{},{}]".format(idx, baseline_idx), "mean" + ] + prob_beat_baseline.sd = summary.loc[ + "prob_upper[{},{}]".format(idx, baseline_idx), "sd" + ] + prob_beat_baseline.rhat = summary.loc[ + "prob_upper[{},{}]".format(idx, baseline_idx), "Rhat" + ] + return prob_beat_baseline diff --git a/python/src/lib/calculator/stats/metrics.py b/python/src/lib/calculator/stats/metrics.py new file mode 100644 index 000000000..cbee0fcb5 --- /dev/null +++ b/python/src/lib/calculator/stats/metrics.py @@ -0,0 +1,47 @@ +# Copyright 2022 The Bucketeer Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from prometheus_client import Histogram + +binomial_compile_duration_histogram = Histogram( + "binomial_compile_duration_seconds", + "Duration of binomial model compilation in seconds.", + namespace="bucketeer", + subsystem="calculator", + buckets=(30.0, 60.0, 90.0, 120.0, 180.0, 300.0, float("inf")), +) + +binomial_sampling_duration_histogram = Histogram( + "binomial_sampling_duration_seconds", + "Duration of binomial model sampling in seconds.", + namespace="bucketeer", + subsystem="calculator", + buckets=(0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 2.0, 4.0, 10.0, float("inf")), +) + +binomial_run_duration_histogram = Histogram( + "binomial_run_duration_seconds", + "Duration of binomial model run in seconds.", + namespace="bucketeer", + subsystem="calculator", + buckets=(0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 2.0, 4.0, 10.0, float("inf")), +) + +normal_inverse_gamma_run_duration_histogram = Histogram( + "normal_inverse_gamma_run_duration_seconds", + "Duration of normal inverse gamma model run in seconds.", + namespace="bucketeer", + subsystem="calculator", + buckets=(0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 2.0, 4.0, 10.0, float("inf")), +) diff --git a/python/src/lib/calculator/stats/normal_inverse_gamma.py b/python/src/lib/calculator/stats/normal_inverse_gamma.py new file mode 100644 index 000000000..96cb50ee2 --- /dev/null +++ b/python/src/lib/calculator/stats/normal_inverse_gamma.py @@ -0,0 +1,147 @@ +# Copyright 2022 The Bucketeer Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from collections import namedtuple +from datetime import datetime +from typing import Dict, List + +import numpy as np +from lib.calculator.stats import metrics +from proto.eventcounter import distribution_summary_pb2, variation_result_pb2 + +_PRIOR_MEAN = 30 +_PRIOR_VAR = 2 +_PRIOR_SIZE = 20 +_PRIOR_ALPHA = 10 +_PRIOR_BETA = 1000 + +_Distr = namedtuple("Distr", ("mu", "nu", "alpha", "beta", "n")) + + +class NormalInverseGamma: + def __init__(self, logger): + self._logger = logger + + def run( + self, + vids: List[str], + means: List[float], + vars: List[float], + sizes: List[int], + baseline_idx: int, + post_gen_num: int = 25000, + ) -> Dict[str, variation_result_pb2.VariationResult]: + start = datetime.now() + variation_num = len(means) + posteriors = [] + samples = np.zeros((variation_num, post_gen_num)) + for i in range(variation_num): + post = self._calc_posterior( + sizes[i], + means[i], + vars[i], + _PRIOR_SIZE, + _PRIOR_MEAN, + _PRIOR_VAR, + _PRIOR_ALPHA, + _PRIOR_BETA, + ) + posteriors.append(post) + samples[i, :] = self._gen_rnormgamma(post_gen_num, post) + best = np.zeros((variation_num, post_gen_num)) + beat_baseline = np.zeros((variation_num, post_gen_num)) + for i in range(post_gen_num): + best[:, i] = self._calc_best(samples[:, i]) + beat_baseline[:, i] = self._calc_beat_baseline(samples[:, i], baseline_idx) + prob_best = np.sum(best, axis=1) / post_gen_num + prob_beat_baseline = np.sum(beat_baseline, axis=1) / post_gen_num + + variation_results = {} + for i in range(variation_num): + vr = variation_result_pb2.VariationResult() + vr.goal_value_sum_per_user_prob.CopyFrom( + self._create_value_sum_prob(samples[i]) + ) + vr.goal_value_sum_per_user_prob_best.CopyFrom( + self._create_value_sum_prob_best(prob_best[i]) + ) + vr.goal_value_sum_per_user_prob_beat_baseline.CopyFrom( + self._create_value_sum_prob_beat_baseline(prob_beat_baseline[i]) + ) + variation_results[vids[i]] = vr + + metrics.normal_inverse_gamma_run_duration_histogram.observe( + (datetime.now() - start).total_seconds() + ) + return variation_results + + def _create_value_sum_prob(self, samples: List[float]): + distr = distribution_summary_pb2.DistributionSummary() + distr.median = np.median(samples) + distr.percentile025 = np.percentile(samples, 2.5) + distr.percentile975 = np.percentile(samples, 97.5) + return distr + + def _create_value_sum_prob_best(self, prob_best: float): + distr = distribution_summary_pb2.DistributionSummary() + distr.mean = prob_best + return distr + + def _create_value_sum_prob_beat_baseline(self, prob_beat_baseline: float): + distr = distribution_summary_pb2.DistributionSummary() + distr.mean = prob_beat_baseline + return distr + + def _calc_posterior( + self, + this_n: int, + this_mu: float, + this_sigma: float, + prior_n: int, + prior_mu: float, + prior_nu: float, + prior_alpha: float, + prior_beta: float, + ): + ret_n = this_n + prior_n + # Take the logarithm to avoid from resulting big difference. + n2 = math.log(this_n, 1.1) + post_mu = (prior_nu * prior_mu + n2 * this_mu) / (prior_nu + n2) + post_nu = prior_nu + n2 + post_alpha = prior_alpha + (n2 / 2) + post_beta = ( + prior_beta + + (1 / 2) * (this_sigma**2) * n2 + + (n2 * prior_nu / (prior_nu * n2)) * ((this_mu - prior_mu) ** 2) / 2 + ) + post = _Distr(post_mu, post_nu, post_alpha, post_beta, ret_n) + return post + + def _gen_rnormgamma(self, n, posterior): + return self._gen(n, posterior.mu, posterior.nu, posterior.alpha, posterior.beta) + + def _gen(self, n, mu, lmbd, alpha, beta): + tau = 1 / np.random.gamma(alpha, scale=1 / beta, size=n) + x = np.random.normal(loc=mu, scale=np.sqrt(tau / lmbd), size=n) + return x + + def _calc_best(self, samples: List[float]): + max = np.array([samples == samples.max()]) + return max.astype(int) + + def _calc_beat_baseline(self, samples: List[float], baseline_idx: int): + baseline = samples[baseline_idx] + beat_baseline = np.array([samples > baseline]) + return beat_baseline.astype(int) diff --git a/python/src/lib/calculator/storage/mysql_experiment_result.py b/python/src/lib/calculator/storage/mysql_experiment_result.py new file mode 100644 index 000000000..e82142d5f --- /dev/null +++ b/python/src/lib/calculator/storage/mysql_experiment_result.py @@ -0,0 +1,57 @@ +# Copyright 2022 The Bucketeer Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +from typing import List + +from google.protobuf import json_format +from lib.calculator.domain import experiment_result as domain +from lib.storage.mysql import client as mysql_client + + +class MySQLExperimentResultStorage: + def __init__(self, client: mysql_client.Client): + self._client = client + + def upsert_multi( + self, environment_namespace: str, domains: List[domain.ExperimentResult] + ): + conn = self._client.get_conn() + with conn: + with conn.cursor() as cursor: + for d in domains: + sql = ( + "INSERT INTO experiment_result " + "(id, experiment_id, updated_at, data, environment_namespace) " + "VALUES (%s, %s, %s, %s, %s) " + "ON DUPLICATE KEY UPDATE " + "experiment_id = VALUES(experiment_id), " + "updated_at = VALUES(updated_at), " + "data = VALUES(data)" + ) + dic = json_format.MessageToDict( + message=d.pb, preserving_proto_field_name=True + ) + data = json.dumps(dic, ensure_ascii=False) + cursor.execute( + sql, + ( + d.pb.id, + d.pb.experiment_id, + d.pb.updated_at, + data, + environment_namespace, + ), + ) + conn.commit() diff --git a/python/src/lib/environment/stub/stub.py b/python/src/lib/environment/stub/stub.py new file mode 100644 index 000000000..2ae8c83ab --- /dev/null +++ b/python/src/lib/environment/stub/stub.py @@ -0,0 +1,15 @@ +import grpc +from proto.environment import service_pb2_grpc + + +def create_stub(addr, cert_path, service_token_path): + with open(cert_path, "rb") as f: + credentials = grpc.ssl_channel_credentials(f.read()) + with open(service_token_path, "rb") as f: + access_token = f.read().decode("utf-8") + call_credentials = grpc.access_token_call_credentials(access_token) + composite_credentials = grpc.composite_channel_credentials( + credentials, call_credentials + ) + channel = grpc.secure_channel(addr, composite_credentials) + return service_pb2_grpc.EnvironmentServiceStub(channel) diff --git a/python/src/lib/eventcounter/stub/stub.py b/python/src/lib/eventcounter/stub/stub.py new file mode 100644 index 000000000..b074f4438 --- /dev/null +++ b/python/src/lib/eventcounter/stub/stub.py @@ -0,0 +1,15 @@ +import grpc +from proto.eventcounter import service_pb2_grpc + + +def create_stub(addr, cert_path, service_token_path): + with open(cert_path, "rb") as f: + credentials = grpc.ssl_channel_credentials(f.read()) + with open(service_token_path, "rb") as f: + access_token = f.read().decode("utf-8") + call_credentials = grpc.access_token_call_credentials(access_token) + composite_credentials = grpc.composite_channel_credentials( + credentials, call_credentials + ) + channel = grpc.secure_channel(addr, composite_credentials) + return service_pb2_grpc.EventCounterServiceStub(channel) diff --git a/python/src/lib/experiment/stub/stub.py b/python/src/lib/experiment/stub/stub.py new file mode 100644 index 000000000..5816a6096 --- /dev/null +++ b/python/src/lib/experiment/stub/stub.py @@ -0,0 +1,15 @@ +import grpc +from proto.experiment import service_pb2_grpc + + +def create_stub(addr, cert_path, service_token_path): + with open(cert_path, "rb") as f: + credentials = grpc.ssl_channel_credentials(f.read()) + with open(service_token_path, "rb") as f: + access_token = f.read().decode("utf-8") + call_credentials = grpc.access_token_call_credentials(access_token) + composite_credentials = grpc.composite_channel_credentials( + credentials, call_credentials + ) + channel = grpc.secure_channel(addr, composite_credentials) + return service_pb2_grpc.ExperimentServiceStub(channel) diff --git a/python/src/lib/health/health.py b/python/src/lib/health/health.py new file mode 100644 index 000000000..6d664a205 --- /dev/null +++ b/python/src/lib/health/health.py @@ -0,0 +1,46 @@ +from typing import Callable +import asyncio +from typing import List +from grpc_health.v1 import health +from grpc_health.v1 import health_pb2 +from grpc_health.v1 import health_pb2_grpc + +Checks = List[Callable[[], bool]] + + +class Checker: + def __init__(self, checks: Checks, server, logger): + self._interval = 1 + self._logger = logger + self._checks = checks + self._register_server(server) + + def _register_server(self, server): + self._health_service = health.HealthServicer() + health_pb2_grpc.add_HealthServicer_to_server( + self._health_service, server.server + ) + + def _setServing(self): + self._health_service.set( + service="", status=health_pb2.HealthCheckResponse.SERVING + ) + + def _setNotServing(self): + self._health_service.set( + service="", status=health_pb2.HealthCheckResponse.NOT_SERVING + ) + + async def run(self): + try: + while True: + for check in self._checks: + feedback = check() + if feedback is not True: + self._setNotServing() + break + self._setServing() + await asyncio.sleep(self._interval) + except asyncio.CancelledError: + self._logger.info("checker: CancelledError") + self._health_service.enter_graceful_shutdown() diff --git a/python/src/lib/log/formatter.py b/python/src/lib/log/formatter.py new file mode 100644 index 000000000..e1ee2448f --- /dev/null +++ b/python/src/lib/log/formatter.py @@ -0,0 +1,7 @@ +import logging + + +class Formatter(logging.Formatter): + def format(self, record): + logmsg = super(Formatter, self).format(record) + return {"msg": logmsg, "args": record.args} diff --git a/python/src/lib/log/logger.py b/python/src/lib/log/logger.py new file mode 100644 index 000000000..1c0604613 --- /dev/null +++ b/python/src/lib/log/logger.py @@ -0,0 +1,31 @@ +import asyncio +import logging +import sys + +from google.cloud.logging.handlers import ContainerEngineHandler +from lib.log.formatter import Formatter + + +class Logger: + def __init__(self, log_name: str, log_level: str): + self._interval = 1 + self.logger = self._setup_logging(log_name, log_level) + + def _setup_logging(self, log_name: str, log_level: str): + logger = logging.getLogger(log_name) + handler = ContainerEngineHandler(log_name, stream=sys.stdout) + handler.setFormatter(Formatter()) + logger.addHandler(handler) + logger.setLevel(log_level) + logger.propagate = False + return logger + + async def run(self): + try: + while True: + await asyncio.sleep(self._interval) + except asyncio.CancelledError: + self.logger.info("logger: CancelledError") + logging.shutdown() + # stackdriver loging waits up 5 seconds to flush logs + await asyncio.sleep(5) diff --git a/python/src/lib/metrics/server.py b/python/src/lib/metrics/server.py new file mode 100644 index 000000000..c8cda6b96 --- /dev/null +++ b/python/src/lib/metrics/server.py @@ -0,0 +1,18 @@ +import asyncio + +from prometheus_async import aio + + +class Server: + def __init__(self, port, logger): + self._port = port + self._logger = logger + + async def run(self): + self._logger.info("metrics: starting server on port", {"port": self._port}) + await aio.web.start_http_server(port=self._port) + try: + while True: + await asyncio.sleep(1) + except asyncio.CancelledError: + self._logger.info("metrics: CancelledError") diff --git a/python/src/lib/rpc/rpc.py b/python/src/lib/rpc/rpc.py new file mode 100644 index 000000000..35a9eae77 --- /dev/null +++ b/python/src/lib/rpc/rpc.py @@ -0,0 +1,36 @@ +from concurrent import futures +import os +import grpc +import asyncio + + +class Server: + def __init__(self, port, cert_path, key_path, logger): + self._interval = 1 + self._port = port + self._logger = logger + self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + self._credentials = grpc.ssl_server_credentials( + ( + ( + self._load_credential_from_file(key_path), + self._load_credential_from_file(cert_path), + ), + ) + ) + + @staticmethod + def _load_credential_from_file(filepath): + real_path = os.path.join(os.path.dirname(__file__), filepath) + with open(real_path, "rb") as f: + return f.read() + + async def run(self): + self.server.add_secure_port("[::]:%d" % self._port, self._credentials) + self.server.start() + try: + while True: + await asyncio.sleep(self._interval) + except asyncio.CancelledError: + self._logger.info("server: CancelledError") + self.server.stop(5) diff --git a/python/src/lib/schedule/job.py b/python/src/lib/schedule/job.py new file mode 100644 index 000000000..9c6a5183d --- /dev/null +++ b/python/src/lib/schedule/job.py @@ -0,0 +1,29 @@ +from enum import Enum + + +class Status(Enum): + SUCCESS = 1 + FAIL = 2 + + +class Job: + def __init__( + self, + name, + func, + month="*", + day="*", + day_of_week="*", + hour="*", + minute="*", + second="*", + ): + + self.name = name + self.func = func + self.month = month + self.day = day + self.day_of_week = day_of_week + self.hour = hour + self.minute = minute + self.second = second diff --git a/python/src/lib/schedule/metrics.py b/python/src/lib/schedule/metrics.py new file mode 100644 index 000000000..a20d0b8be --- /dev/null +++ b/python/src/lib/schedule/metrics.py @@ -0,0 +1,31 @@ +from prometheus_client import Histogram +from prometheus_client import Counter + + +CODE_SUCCESS = "Success" +CODE_FAIL = "Fail" + +job_duration_histogram = Histogram( + "scheduler_job_duration_seconds", + "Job Duration.", + labelnames=["name"], + namespace="bucketeer", + subsystem="calculator", + buckets=(0.1, 1.0, 5.0, 10.0, 20.0, 40.0, 60.0, 120.0, float("inf")), +) + +job_started_counter = Counter( + "scheduler_started_jobs_total", + "Total number of started jobs.", + labelnames=["name"], + namespace="bucketeer", + subsystem="calculator", +) + +job_finished_counter = Counter( + "scheduler_finished_jobs_total", + "Total number of finished jobs.", + labelnames=["name", "code"], + namespace="bucketeer", + subsystem="calculator", +) diff --git a/python/src/lib/schedule/scheduler.py b/python/src/lib/schedule/scheduler.py new file mode 100644 index 000000000..fc66cc030 --- /dev/null +++ b/python/src/lib/schedule/scheduler.py @@ -0,0 +1,72 @@ +import asyncio +from datetime import datetime + +from apscheduler.schedulers.asyncio import AsyncIOScheduler +from lib.schedule import metrics +from lib.schedule.job import Job, Status + + +class Scheduler: + def __init__(self, jobs: Job, logger): + self._INTERVAL = 1 + self._logger = logger + self.scheduler = AsyncIOScheduler() + self._job_statuses = {} + for j in jobs: + self.scheduler.add_job( + self._wrapf(j), + "cron", + year="*", + week="*", + month=j.month, + day=j.day, + day_of_week=j.day_of_week, + hour=j.hour, + minute=j.minute, + second=j.second, + max_instances=1, + ) + self._job_statuses[j.name] = Status.SUCCESS + + def _wrapf(self, job: Job): + def f(): + try: + metrics.job_started_counter.labels(job.name).inc() + start = datetime.now() + job.func() + end = datetime.now() + self._job_statuses[job.name] = Status.SUCCESS + self._logger.info( + "scheduler: job succeeded, jobName", {"jobName": job.name} + ) + metrics.job_finished_counter.labels( + job.name, metrics.CODE_SUCCESS + ).inc() + metrics.job_duration_histogram.labels(job.name).observe( + (end - start).total_seconds() + ) + except Exception as e: + metrics.job_finished_counter.labels(job.name, metrics.CODE_FAIL).inc() + self._logger.error( + "scheduler: job failed, jobName", + {"jobName": job.name, "error": str(e)}, + ) + self._job_statuses[job.name] = Status.FAIL + return e + + return f + + def check(self) -> bool: + for status in self._job_statuses.values(): + if status is Status.FAIL: + return False + return True + + async def run(self): + self.scheduler.start() + try: + while True: + await asyncio.sleep(self._INTERVAL) + except asyncio.CancelledError: + self._logger.info("scheduler: CancelledError") + self.scheduler.shutdown(wait=True) diff --git a/python/src/lib/signal/signal_handler.py b/python/src/lib/signal/signal_handler.py new file mode 100644 index 000000000..13db2fe84 --- /dev/null +++ b/python/src/lib/signal/signal_handler.py @@ -0,0 +1,34 @@ +import signal +import asyncio + + +class SignalHandler: + def __init__(self, logger): + self._INTERVAL = 1 + self._logger = logger + self._kill_now = False + self._set_handler(self._exit_gracefully) + + def _set_handler(self, handler): + signal.signal(signal.SIGINT, handler) + signal.signal(signal.SIGTERM, handler) + + def _exit_gracefully(self, signum, frame): + self._logger.info("signal handler: Signal reveived") + self._kill_now = True + # No longer accept any signals + # "*_" means that handler should accept 2 parameters. + self._set_handler(lambda *_: None) + + async def check(self, interval: int): + try: + while True: + if self._kill_now: + self._logger.info("signal handler: stop") + return + await asyncio.sleep(interval) + except asyncio.CancelledError: + self._logger.debug("signal handler: CancelledError") + + async def run(self): + await self.check(self._INTERVAL) diff --git a/python/src/lib/storage/mysql/client.py b/python/src/lib/storage/mysql/client.py new file mode 100644 index 000000000..e24763bb4 --- /dev/null +++ b/python/src/lib/storage/mysql/client.py @@ -0,0 +1,26 @@ +import pymysql.cursors + + +class Client: + def __init__( + self, + mysql_user: str, + mysql_pass: str, + mysql_host: str, + mysql_port: int, + mysql_db_name: str, + ): + self.mysql_user = mysql_user + self.mysql_pass = mysql_pass + self.mysql_host = mysql_host + self.mysql_port = mysql_port + self.mysql_db_name = mysql_db_name + + def get_conn(self): + return pymysql.connect( + host=self.mysql_host, + user=self.mysql_user, + password=self.mysql_pass, + database=self.mysql_db_name, + cursorclass=pymysql.cursors.DictCursor, + ) diff --git a/python/tests/lib/calculator/domain/experiment_result_test.py b/python/tests/lib/calculator/domain/experiment_result_test.py new file mode 100644 index 000000000..b56729ea6 --- /dev/null +++ b/python/tests/lib/calculator/domain/experiment_result_test.py @@ -0,0 +1,26 @@ +# Copyright 2022 The Bucketeer Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from lib.calculator.domain.experiment_result import ExperimentResult + + +def test_experiment_result(): + actual = ExperimentResult(None) + assert isinstance(actual, ExperimentResult) + + +if __name__ == "__main__": + raise SystemExit(pytest.main([__file__])) diff --git a/python/tests/lib/calculator/domain/experiment_test.py b/python/tests/lib/calculator/domain/experiment_test.py new file mode 100644 index 000000000..a32f1e291 --- /dev/null +++ b/python/tests/lib/calculator/domain/experiment_test.py @@ -0,0 +1,162 @@ +# Copyright 2022 The Bucketeer Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import namedtuple +from datetime import datetime + +import pytest +from proto.experiment import experiment_pb2 +from lib.calculator.domain.experiment import Experiment + + +def test_experiment(): + actual = Experiment(experiment_pb2.Experiment()) + assert isinstance(actual, Experiment) + assert isinstance(actual.pb, experiment_pb2.Experiment) + + +def test_is_calculated(): + t1 = _get_datetime("2018-06-24 08:15:27.243860") + t2 = _get_datetime("2018-06-25 08:15:27.243860") + + p = namedtuple("p", "msg start_at now expected") + patterns = [ + p( + msg="true: start_at == now", + start_at=t1, + now=t1, + expected=True, + ), + p( + msg="true: start_at < now", + start_at=t1, + now=t2, + expected=True, + ), + p( + msg="true: start_at > now", + start_at=t2, + now=t1, + expected=False, + ), + ] + for ptn in patterns: + e = experiment_pb2.Experiment() + e.start_at = int(ptn.start_at.timestamp()) + de = Experiment(e) + assert ptn.expected == de.is_calculated(ptn.now) + + +def test_is_updated_to_running(): + t1 = _get_datetime("2018-06-24 08:15:27.243860") + t2 = _get_datetime("2018-06-25 08:15:27.243860") + + p = namedtuple("p", "msg start_at status now expected") + patterns = [ + p( + msg="true: start_at == now", + start_at=t1, + status=experiment_pb2.Experiment.WAITING, + now=t1, + expected=True, + ), + p( + msg="true: start_at < now", + start_at=t1, + status=experiment_pb2.Experiment.WAITING, + now=t2, + expected=True, + ), + p( + msg="true: start_at > now", + start_at=t2, + status=experiment_pb2.Experiment.WAITING, + now=t1, + expected=False, + ), + p( + msg="true: RUNNING", + start_at=t1, + status=experiment_pb2.Experiment.RUNNING, + now=t2, + expected=False, + ), + ] + for ptn in patterns: + e = experiment_pb2.Experiment() + e.start_at = int(ptn.start_at.timestamp()) + e.status = ptn.status + de = Experiment(e) + assert ptn.expected == de.is_updated_to_running(ptn.now) + + +def test_is_updated_to_stop(): + t1 = _get_datetime("2018-06-24 08:15:27.243860") + t2 = _get_datetime("2018-06-25 08:15:27.243860") + t3 = _get_datetime("2018-06-26 08:15:27.243860") + t4 = _get_datetime("2018-06-27 08:15:27.243860") + + p = namedtuple("p", "msg stop_at status now expected") + patterns = [ + p( + msg="true: stop_at + 2 days < now", + stop_at=t1, + status=experiment_pb2.Experiment.RUNNING, + now=t4, + expected=True, + ), + p( + msg="true: stop_at + 2 days == now", + stop_at=t1, + status=experiment_pb2.Experiment.RUNNING, + now=t3, + expected=False, + ), + p( + msg="true: stop_at < now < stop_at + 2 days", + stop_at=t1, + status=experiment_pb2.Experiment.RUNNING, + now=t3, + expected=False, + ), + p( + msg="true: stop_at > now", + stop_at=t2, + status=experiment_pb2.Experiment.RUNNING, + now=t1, + expected=False, + ), + p( + msg="true: STOPPED", + stop_at=t2, + status=experiment_pb2.Experiment.STOPPED, + now=t1, + expected=False, + ), + ] + for ptn in patterns: + e = experiment_pb2.Experiment() + e.stop_at = int(ptn.stop_at.timestamp()) + e.status = ptn.status + de = Experiment(e) + assert ptn.expected == de.is_updated_to_finish(ptn.now) + + +def _get_datetime(d: str) -> datetime: + format = "%Y-%m-%d %H:%M:%S.%f" + return datetime.strptime(d, format) + + +if __name__ == "__main__": + raise SystemExit(pytest.main([__file__])) diff --git a/python/tests/lib/calculator/job/calculate_experiments_test.py b/python/tests/lib/calculator/job/calculate_experiments_test.py new file mode 100644 index 000000000..e70c7f998 --- /dev/null +++ b/python/tests/lib/calculator/job/calculate_experiments_test.py @@ -0,0 +1,958 @@ +# Copyright 2022 The Bucketeer Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import namedtuple +from datetime import datetime, timedelta +from logging import getLogger +from typing import List + +import pytest +from proto.eventcounter import ( + distribution_summary_pb2, + evaluation_count_pb2, + experiment_count_pb2, + histogram_pb2, +) +from proto.eventcounter import service_pb2 as eventcounter_service_pb2 +from proto.eventcounter import timeseries_pb2, variation_count_pb2, variation_result_pb2 +from proto.experiment import experiment_pb2 +from proto.experiment import service_pb2 as experiment_service_pb2 +from proto.feature import variation_pb2 +from pytest_mock import MockerFixture +from lib.calculator.domain import experiment as experiment_domain +from lib.calculator.job.calculate_experiments import ExperimentCalculator + + +def test_get_evaluation_count(mocker): + ec = _experimentCalculator() + insmock = mocker.Mock() + vr0 = variation_count_pb2.VariationCount(variation_id="vid0", user_count=0) + vr1 = variation_count_pb2.VariationCount(variation_id="vid1", user_count=1) + evalCount = evaluation_count_pb2.EvaluationCount( + realtime_counts=[vr0, vr1], + ) + resp = eventcounter_service_pb2.GetEvaluationCountV2Response(count=evalCount) + insmock.GetEvaluationCountV2.return_value = resp + mocker.patch.object(ec, "_event_counter_stub", insmock) + actual = ec._get_evaluation_count("", 0, 0, "", 0, []) + assert actual["vid0"].user_count == 0 + assert actual["vid1"].user_count == 1 + + +def test_get_goal_count(mocker): + ec = _experimentCalculator() + insmock = mocker.Mock() + vr0 = variation_count_pb2.VariationCount(variation_id="vid0", user_count=0) + vr1 = variation_count_pb2.VariationCount(variation_id="vid1", user_count=1) + gc0 = experiment_count_pb2.GoalCounts(goal_id="gid0", realtime_counts=[vr0, vr1]) + resp = eventcounter_service_pb2.GetGoalCountV2Response(goal_counts=gc0) + insmock.GetGoalCountV2.return_value = resp + + mocker.patch.object(ec, "_event_counter_stub", insmock) + actual = ec._get_goal_count("", 0, 0, "gid0", "", 0, []) + + assert actual["vid0"].user_count == 0 + assert actual["vid1"].user_count == 1 + + +def test_create_experiment_result(mocker): + ec = _experimentCalculator() + + ec = _experimentCalculator() + ec_insmock = mocker.Mock() + eval_vr0 = variation_count_pb2.VariationCount( + variation_id="vid0", + user_count=5, + event_count=10, + value_sum=4, + ) + eval_vr1 = variation_count_pb2.VariationCount( + variation_id="vid1", + user_count=4, + event_count=12, + value_sum=7, + ) + evalCount = evaluation_count_pb2.EvaluationCount( + realtime_counts=[eval_vr0, eval_vr1], + ) + resp = eventcounter_service_pb2.GetEvaluationCountV2Response(count=evalCount) + ec_insmock.GetEvaluationCountV2.return_value = resp + + goal_vr0 = variation_count_pb2.VariationCount( + variation_id="vid0", + user_count=2, + event_count=4, + value_sum=1.2, + value_sum_per_user_mean=1.2, + value_sum_per_user_variance=0.5, + ) + goal_vr1 = variation_count_pb2.VariationCount( + variation_id="vid1", + user_count=1, + event_count=2, + value_sum=3.4, + value_sum_per_user_mean=2.3, + value_sum_per_user_variance=0.6, + ) + gc0 = experiment_count_pb2.GoalCounts( + goal_id="gid", realtime_counts=[goal_vr0, goal_vr1] + ) + resp = eventcounter_service_pb2.GetGoalCountV2Response(goal_counts=gc0) + ec_insmock.GetGoalCountV2.return_value = resp + + mocker.patch.object(ec, "_event_counter_stub", ec_insmock) + + # CVR. + cvr_vr = variation_result_pb2.VariationResult() + cvr_prob = distribution_summary_pb2.DistributionSummary() + cvr_prob.mean = 0.456 + cvr_prob.sd = 4.56 + cvr_prob.rhat = 45.6 + cvr_prob.median = 456.7 + cvr_prob.percentile025 = 4567.8 + cvr_prob.percentile975 = 45678.9 + cvr_prob_histogram = histogram_pb2.Histogram() + cvr_prob_histogram.hist[:] = [1, 2] + cvr_prob_histogram.bins[:] = [1, 2, 3] + cvr_prob.histogram.CopyFrom(cvr_prob_histogram) + cvr_vr.cvr_prob.CopyFrom(cvr_prob) + + cvr_prob_best = distribution_summary_pb2.DistributionSummary() + cvr_prob_best.mean = 1.1 + cvr_prob_best.sd = 2.2 + cvr_prob_best.rhat = 3.3 + cvr_vr.cvr_prob_best.CopyFrom(cvr_prob_best) + + cvr_prob_beat_baseline = distribution_summary_pb2.DistributionSummary() + cvr_prob_beat_baseline.mean = 1.11 + cvr_prob_beat_baseline.sd = 2.22 + cvr_prob_beat_baseline.rhat = 3.33 + cvr_vr.cvr_prob_beat_baseline.CopyFrom(cvr_prob_beat_baseline) + + binomial_insmock = mocker.Mock() + binomial_insmock.run.return_value = {"vid0": cvr_vr, "vid1": cvr_vr} + mocker.patch.object(ec, "_binomial_model", binomial_insmock) + + # Value sum per user. + value_vr = variation_result_pb2.VariationResult() + value_prob = distribution_summary_pb2.DistributionSummary() + value_prob.median = 456.78 + value_prob.percentile025 = 4567.89 + value_prob.percentile975 = 45678.99 + value_vr.goal_value_sum_per_user_prob.CopyFrom(value_prob) + + value_prob_best = distribution_summary_pb2.DistributionSummary() + value_prob_best.mean = 1.11 + value_vr.goal_value_sum_per_user_prob_best.CopyFrom(value_prob_best) + + value_prob_beat_baseline = distribution_summary_pb2.DistributionSummary() + value_prob_beat_baseline.mean = 1.111 + value_vr.goal_value_sum_per_user_prob_beat_baseline.CopyFrom( + value_prob_beat_baseline + ) + + nig_insmock = mocker.Mock() + nig_insmock.run.return_value = {"vid0": value_vr, "vid1": value_vr} + mocker.patch.object(ec, "_normal_inverse_gamma", nig_insmock) + + now = datetime.now() + ec._now = lambda: now + experiment = _create_experiment() + experiment_result = ec._create_experiment_result("ns", experiment) + goal_result = experiment_result.goal_results[0] + assert goal_result.goal_id == "gid" + for vr in goal_result.variation_results: + if vr.variation_id == "vid0": + vr0 = vr + continue + if vr.variation_id == "vid1": + vr1 = vr + continue + pytest.fail("unknown variation id: {}".format(vr.variation_id)) + + assert vr0.variation_id == "vid0" + assert vr1.variation_id == "vid1" + + # vr0 + assert vr0.evaluation_count.variation_id == "vid0" + assert vr0.evaluation_count.user_count == 5 + assert vr0.evaluation_count.event_count == 10 + assert vr0.experiment_count.variation_id == "vid0" + assert vr0.experiment_count.user_count == 2 + assert vr0.experiment_count.event_count == 4 + assert vr0.experiment_count.value_sum == 1.2 + assert vr0.cvr_prob.mean == 0.456 + assert vr0.cvr_prob.sd == 4.56 + assert vr0.cvr_prob.rhat == 45.6 + assert vr0.cvr_prob.median == 456.7 + assert vr0.cvr_prob.percentile025 == 4567.8 + assert vr0.cvr_prob.percentile975 == 45678.9 + assert vr0.cvr_prob.histogram.hist == [1, 2] + assert vr0.cvr_prob.histogram.bins == [1, 2, 3] + assert vr0.cvr_prob_best.mean == 1.1 + assert vr0.cvr_prob_best.sd == 2.2 + assert vr0.cvr_prob_best.rhat == 3.3 + assert vr0.cvr_prob_beat_baseline.mean == 1.11 + assert vr0.cvr_prob_beat_baseline.sd == 2.22 + assert vr0.cvr_prob_beat_baseline.rhat == 3.33 + assert vr0.evaluation_user_count_timeseries.timestamps == [ + 1 * 24 * 60 * 60, + 2 * 24 * 60 * 60, + ] + assert vr0.evaluation_user_count_timeseries.values == [5.0, 5.0] + assert vr0.evaluation_event_count_timeseries.timestamps == [ + 1 * 24 * 60 * 60, + 2 * 24 * 60 * 60, + ] + assert vr0.evaluation_event_count_timeseries.values == [10.0, 10.0] + assert vr0.goal_user_count_timeseries.timestamps == [ + 1 * 24 * 60 * 60, + 2 * 24 * 60 * 60, + ] + assert vr0.goal_user_count_timeseries.values == [2.0, 2.0] + assert vr0.goal_event_count_timeseries.timestamps == [ + 1 * 24 * 60 * 60, + 2 * 24 * 60 * 60, + ] + assert vr0.goal_event_count_timeseries.values == [4.0, 4.0] + assert vr0.goal_value_sum_timeseries.timestamps == [ + 1 * 24 * 60 * 60, + 2 * 24 * 60 * 60, + ] + assert vr0.goal_value_sum_timeseries.values == [1.2, 1.2] + assert vr0.cvr_median_timeseries.timestamps == [1 * 24 * 60 * 60, 2 * 24 * 60 * 60] + assert vr0.cvr_median_timeseries.values == [456.7, 456.7] + assert vr0.cvr_percentile025_timeseries.timestamps == [ + 1 * 24 * 60 * 60, + 2 * 24 * 60 * 60, + ] + assert vr0.cvr_percentile025_timeseries.values == [4567.8, 4567.8] + assert vr0.cvr_percentile975_timeseries.timestamps == [ + 1 * 24 * 60 * 60, + 2 * 24 * 60 * 60, + ] + assert vr0.cvr_percentile975_timeseries.values == [45678.9, 45678.9] + assert vr0.cvr_timeseries.timestamps == [1 * 24 * 60 * 60, 2 * 24 * 60 * 60] + assert vr0.cvr_timeseries.values == [0.4, 0.4] + assert vr0.goal_value_sum_per_user_timeseries.timestamps == [ + 1 * 24 * 60 * 60, + 2 * 24 * 60 * 60, + ] + assert vr0.goal_value_sum_per_user_timeseries.values == [0.6, 0.6] + assert vr0.goal_value_sum_per_user_prob.median == 456.78 + assert vr0.goal_value_sum_per_user_prob.percentile025 == 4567.89 + assert vr0.goal_value_sum_per_user_prob.percentile975 == 45678.99 + assert vr0.goal_value_sum_per_user_prob_best.mean == 1.11 + assert vr0.goal_value_sum_per_user_prob_beat_baseline.mean == 1.111 + assert vr0.goal_value_sum_per_user_median_timeseries.timestamps == [ + 1 * 24 * 60 * 60, + 2 * 24 * 60 * 60, + ] + assert vr0.goal_value_sum_per_user_median_timeseries.values == [456.78, 456.78] + assert vr0.goal_value_sum_per_user_percentile025_timeseries.timestamps == [ + 1 * 24 * 60 * 60, + 2 * 24 * 60 * 60, + ] + assert vr0.goal_value_sum_per_user_percentile025_timeseries.values == [ + 4567.89, + 4567.89, + ] + assert vr0.goal_value_sum_per_user_percentile975_timeseries.timestamps == [ + 1 * 24 * 60 * 60, + 2 * 24 * 60 * 60, + ] + assert vr0.goal_value_sum_per_user_percentile975_timeseries.values == [ + 45678.99, + 45678.99, + ] + + # vr1 + assert vr1.evaluation_count.variation_id == "vid1" + assert vr1.evaluation_count.user_count == 4 + assert vr1.evaluation_count.event_count == 12 + assert vr1.experiment_count.variation_id == "vid1" + assert vr1.experiment_count.user_count == 1 + assert vr1.experiment_count.event_count == 2 + assert vr1.experiment_count.value_sum == 3.4 + assert vr1.cvr_prob.mean == 0.456 + assert vr1.cvr_prob.sd == 4.56 + assert vr1.cvr_prob.rhat == 45.6 + assert vr1.cvr_prob.median == 456.7 + assert vr1.cvr_prob.percentile025 == 4567.8 + assert vr1.cvr_prob.percentile975 == 45678.9 + assert vr1.cvr_prob.histogram.hist == [1, 2] + assert vr1.cvr_prob.histogram.bins == [1, 2, 3] + assert vr1.cvr_prob_best.mean == 1.1 + assert vr1.cvr_prob_best.sd == 2.2 + assert vr1.cvr_prob_best.rhat == 3.3 + assert vr1.cvr_prob_beat_baseline.mean == 1.11 + assert vr1.cvr_prob_beat_baseline.sd == 2.22 + assert vr1.cvr_prob_beat_baseline.rhat == 3.33 + assert vr1.evaluation_user_count_timeseries.timestamps == [ + 1 * 24 * 60 * 60, + 2 * 24 * 60 * 60, + ] + assert vr1.evaluation_user_count_timeseries.values == [4.0, 4.0] + assert vr1.evaluation_event_count_timeseries.timestamps == [ + 1 * 24 * 60 * 60, + 2 * 24 * 60 * 60, + ] + assert vr1.evaluation_event_count_timeseries.values == [12.0, 12.0] + assert vr1.goal_user_count_timeseries.timestamps == [ + 1 * 24 * 60 * 60, + 2 * 24 * 60 * 60, + ] + assert vr1.goal_user_count_timeseries.values == [1.0, 1.0] + assert vr1.goal_event_count_timeseries.timestamps == [ + 1 * 24 * 60 * 60, + 2 * 24 * 60 * 60, + ] + assert vr1.goal_event_count_timeseries.values == [2.0, 2.0] + assert vr1.goal_value_sum_timeseries.timestamps == [ + 1 * 24 * 60 * 60, + 2 * 24 * 60 * 60, + ] + assert vr1.goal_value_sum_timeseries.values == [3.4, 3.4] + assert vr1.cvr_median_timeseries.timestamps == [1 * 24 * 60 * 60, 2 * 24 * 60 * 60] + assert vr1.cvr_median_timeseries.values == [456.7, 456.7] + assert vr1.cvr_percentile025_timeseries.timestamps == [ + 1 * 24 * 60 * 60, + 2 * 24 * 60 * 60, + ] + assert vr1.cvr_percentile025_timeseries.values == [4567.8, 4567.8] + assert vr1.cvr_percentile975_timeseries.timestamps == [ + 1 * 24 * 60 * 60, + 2 * 24 * 60 * 60, + ] + assert vr1.cvr_percentile975_timeseries.values == [45678.9, 45678.9] + assert vr1.cvr_timeseries.timestamps == [1 * 24 * 60 * 60, 2 * 24 * 60 * 60] + assert vr1.cvr_timeseries.values == [0.25, 0.25] + assert vr1.goal_value_sum_per_user_timeseries.timestamps == [ + 1 * 24 * 60 * 60, + 2 * 24 * 60 * 60, + ] + assert vr1.goal_value_sum_per_user_timeseries.values == [3.4, 3.4] + assert vr1.goal_value_sum_per_user_prob.median == 456.78 + assert vr1.goal_value_sum_per_user_prob.percentile025 == 4567.89 + assert vr1.goal_value_sum_per_user_prob.percentile975 == 45678.99 + assert vr1.goal_value_sum_per_user_prob_best.mean == 1.11 + assert vr1.goal_value_sum_per_user_prob_beat_baseline.mean == 1.111 + assert vr1.goal_value_sum_per_user_median_timeseries.timestamps == [ + 1 * 24 * 60 * 60, + 2 * 24 * 60 * 60, + ] + assert vr1.goal_value_sum_per_user_median_timeseries.values == [456.78, 456.78] + assert vr1.goal_value_sum_per_user_percentile025_timeseries.timestamps == [ + 1 * 24 * 60 * 60, + 2 * 24 * 60 * 60, + ] + assert vr1.goal_value_sum_per_user_percentile025_timeseries.values == [ + 4567.89, + 4567.89, + ] + assert vr1.goal_value_sum_per_user_percentile975_timeseries.timestamps == [ + 1 * 24 * 60 * 60, + 2 * 24 * 60 * 60, + ] + assert vr1.goal_value_sum_per_user_percentile975_timeseries.values == [ + 45678.99, + 45678.99, + ] + + +def test_update_experiment_status(mocker: MockerFixture): + ec = _experimentCalculator() + m = mocker.Mock() + resp = experiment_service_pb2.StartExperimentResponse() + m.StartExperiment.return_value = resp + resp = experiment_service_pb2.FinishExperimentResponse() + m.FinishExperiment.return_value = resp + mocker.patch.object(ec, "_experiment_stub", m) + + now = datetime.now() + now_unix = int(now.timestamp()) + stop_at_unix = int((now - timedelta(days=3)).timestamp()) + e = experiment_pb2.Experiment( + status=experiment_pb2.Experiment.WAITING, + stop_at=stop_at_unix, + ) + de = experiment_domain.Experiment(e) + ec._update_experiment_status("en", de, now) + m.FinishExperiment.assert_called_once() + + now_unix = int(now.timestamp()) + stop_at_unix = int((now + timedelta(days=1)).timestamp()) + e = experiment_pb2.Experiment( + status=experiment_pb2.Experiment.WAITING, + start_at=now_unix, + stop_at=stop_at_unix, + ) + de = experiment_domain.Experiment(e) + ec._update_experiment_status("en", de, now) + m.StartExperiment.assert_called_once() + + +def test_append_variation_results(mocker): + p = namedtuple("p", "msg timestamp dst_vrs src_vrs expected") + patterns = [ + p( + msg="true: running", + timestamp=2, + dst_vrs=_create_variation_results( + variation_ids=["vid0", "vid1"], + timestamps=[1], + eval_user_counts=[1, 10], + eval_event_counts=[2, 20], + goal_user_counts=[3, 30], + goal_event_counts=[4, 40], + goal_value_sums=[5.5, 50.5], + cvr_prob_median=[6.6, 60.6], + cvr_prob_percentile025=[7.7, 70.7], + cvr_prob_percentile975=[8.8, 80.8], + eval_user_tss=[[1], [10]], + eval_event_tss=[[2], [20]], + goal_user_tss=[[3], [30]], + goal_event_tss=[[4], [40]], + goal_value_sum_tss=[[5.5], [50.5]], + cvr_medians_tss=[[6.6], [60.6]], + cvr_percentile025_tss=[[7.7], [70.7]], + cvr_percentile975_tss=[[8.8], [80.8]], + goal_value_sum_per_user_prob_median=[2.3, 4.3], + goal_value_sum_per_user_prob_percentile025=[0.22, 0.33], + goal_value_sum_per_user_prob_percentile975=[0.44, 0.55], + goal_value_sum_per_user_medians_tss=[[1.2], [2.3]], + goal_value_sum_per_user_percentile025_tss=[[0.11], [0.45]], + goal_value_sum_per_user_percentile975_tss=[[0.12], [0.56]], + ), + src_vrs=_create_variation_results( + variation_ids=["vid0", "vid1"], + timestamps=[2], + eval_user_counts=[2, 20], + eval_event_counts=[3, 30], + goal_user_counts=[4, 40], + goal_event_counts=[5, 50], + goal_value_sums=[6.6, 60.6], + cvr_prob_median=[7.7, 70.7], + cvr_prob_percentile025=[8.8, 80.8], + cvr_prob_percentile975=[9.9, 90.9], + eval_user_tss=[[2], [20]], + eval_event_tss=[[3], [30]], + goal_user_tss=[[4], [40]], + goal_event_tss=[[5], [50]], + goal_value_sum_tss=[[6.6], [60.6]], + cvr_medians_tss=[[7.7], [70.7]], + cvr_percentile025_tss=[[8.8], [80.8]], + cvr_percentile975_tss=[[9.9], [90.9]], + goal_value_sum_per_user_prob_median=[4.3, 5.3], + goal_value_sum_per_user_prob_percentile025=[0.22, 0.33], + goal_value_sum_per_user_prob_percentile975=[0.44, 0.55], + goal_value_sum_per_user_medians_tss=[[1.2], [2.3]], + goal_value_sum_per_user_percentile025_tss=[[0.11], [0.45]], + goal_value_sum_per_user_percentile975_tss=[[0.12], [0.56]], + ), + expected=_create_variation_results( + variation_ids=["vid0", "vid1"], + timestamps=[1, 2], + eval_user_counts=[2, 20], + eval_event_counts=[3, 30], + goal_user_counts=[4, 40], + goal_event_counts=[5, 50], + goal_value_sums=[6.6, 60.6], + cvr_prob_median=[7.7, 70.7], + cvr_prob_percentile025=[8.8, 80.8], + cvr_prob_percentile975=[9.9, 90.9], + eval_user_tss=[[1, 2], [10, 20]], + eval_event_tss=[[2, 3], [20, 30]], + goal_user_tss=[[3, 4], [30, 40]], + goal_event_tss=[[4, 5], [40, 50]], + goal_value_sum_tss=[[5.5, 6.6], [50.5, 60.6]], + cvr_medians_tss=[[6.6, 7.7], [60.6, 70.7]], + cvr_percentile025_tss=[[7.7, 8.8], [70.7, 80.8]], + cvr_percentile975_tss=[[8.8, 9.9], [80.8, 90.9]], + goal_value_sum_per_user_prob_median=[4.3, 5.3], + goal_value_sum_per_user_prob_percentile025=[0.22, 0.33], + goal_value_sum_per_user_prob_percentile975=[0.44, 0.55], + goal_value_sum_per_user_medians_tss=[[1.2, 4.3], [2.3], 5.3], + goal_value_sum_per_user_percentile025_tss=[[0.11, 0.22], [0.45, 0.33]], + goal_value_sum_per_user_percentile975_tss=[[0.12, 0.44], [0.56, 0.55]], + ), + ), + ] + for ptn in patterns: + ec = _experimentCalculator() + ec._append_variation_results(ptn.timestamp, ptn.dst_vrs, ptn.src_vrs) + + actual = sorted(ptn.dst_vrs, key=lambda vr: vr.variation_id) + expected = sorted(ptn.expected, key=lambda vr: vr.variation_id) + + assert ( + expected[0].evaluation_count.user_count + == actual[0].evaluation_count.user_count + ) + assert ( + expected[0].evaluation_count.event_count + == actual[0].evaluation_count.event_count + ) + assert ( + expected[0].evaluation_count.value_sum + == actual[0].evaluation_count.value_sum + ) + + assert ( + expected[0].experiment_count.user_count + == actual[0].experiment_count.user_count + ) + assert ( + expected[0].experiment_count.event_count + == actual[0].experiment_count.event_count + ) + assert ( + expected[0].experiment_count.value_sum + == actual[0].experiment_count.value_sum + ) + + assert expected[0].cvr_prob.median == actual[0].cvr_prob.median + assert expected[0].cvr_prob.percentile025 == actual[0].cvr_prob.percentile025 + assert expected[0].cvr_prob.percentile975 == actual[0].cvr_prob.percentile975 + + assert ( + expected[0].evaluation_user_count_timeseries.timestamps + == actual[0].evaluation_user_count_timeseries.timestamps + ) + assert ( + expected[0].evaluation_user_count_timeseries.values + == actual[0].evaluation_user_count_timeseries.values + ) + + assert ( + expected[0].evaluation_event_count_timeseries.timestamps + == actual[0].evaluation_event_count_timeseries.timestamps + ) + assert ( + expected[0].evaluation_event_count_timeseries.values + == actual[0].evaluation_event_count_timeseries.values + ) + + assert ( + expected[0].goal_user_count_timeseries.timestamps + == actual[0].goal_user_count_timeseries.timestamps + ) + assert ( + expected[0].goal_user_count_timeseries.values + == actual[0].goal_user_count_timeseries.values + ) + + assert ( + expected[0].goal_event_count_timeseries.timestamps + == actual[0].goal_event_count_timeseries.timestamps + ) + assert ( + expected[0].goal_event_count_timeseries.values + == actual[0].goal_event_count_timeseries.values + ) + + assert ( + expected[0].goal_value_sum_timeseries.timestamps + == actual[0].goal_value_sum_timeseries.timestamps + ) + assert ( + expected[0].goal_value_sum_timeseries.values + == actual[0].goal_value_sum_timeseries.values + ) + + assert ( + expected[0].cvr_median_timeseries.timestamps + == actual[0].cvr_median_timeseries.timestamps + ) + assert ( + expected[0].cvr_median_timeseries.values + == actual[0].cvr_median_timeseries.values + ) + + assert ( + expected[0].cvr_percentile025_timeseries.timestamps + == actual[0].cvr_percentile025_timeseries.timestamps + ) + assert ( + expected[0].cvr_percentile025_timeseries.values + == actual[0].cvr_percentile025_timeseries.values + ) + + assert ( + expected[0].cvr_percentile975_timeseries.timestamps + == actual[0].cvr_percentile975_timeseries.timestamps + ) + assert ( + expected[0].cvr_percentile975_timeseries.values + == actual[0].cvr_percentile975_timeseries.values + ) + + assert ( + expected[0].cvr_timeseries.timestamps == actual[0].cvr_timeseries.timestamps + ) + assert expected[0].cvr_timeseries.values == actual[0].cvr_timeseries.values + + assert ( + expected[0].goal_value_sum_per_user_timeseries.timestamps + == actual[0].goal_value_sum_per_user_timeseries.timestamps + ) + assert ( + expected[0].goal_value_sum_per_user_timeseries.values + == actual[0].goal_value_sum_per_user_timeseries.values + ) + + assert ( + expected[0].goal_value_sum_per_user_prob.median + == actual[0].goal_value_sum_per_user_prob.median + ) + assert ( + expected[0].goal_value_sum_per_user_prob.percentile025 + == actual[0].goal_value_sum_per_user_prob.percentile025 + ) + assert ( + expected[0].goal_value_sum_per_user_prob.percentile975 + == actual[0].goal_value_sum_per_user_prob.percentile975 + ) + + assert ( + expected[0].goal_value_sum_per_user_median_timeseries.timestamps + == actual[0].goal_value_sum_per_user_median_timeseries.timestamps + ) + assert ( + expected[0].goal_value_sum_per_user_median_timeseries.values + == actual[0].goal_value_sum_per_user_median_timeseries.values + ) + + assert ( + expected[0].goal_value_sum_per_user_percentile025_timeseries.timestamps + == actual[0].goal_value_sum_per_user_percentile025_timeseries.timestamps + ) + assert ( + expected[0].goal_value_sum_per_user_percentile025_timeseries.values + == actual[0].goal_value_sum_per_user_percentile025_timeseries.values + ) + + assert ( + expected[0].goal_value_sum_per_user_percentile975_timeseries.timestamps + == actual[0].goal_value_sum_per_user_percentile975_timeseries.timestamps + ) + assert ( + expected[0].goal_value_sum_per_user_percentile975_timeseries.values + == actual[0].goal_value_sum_per_user_percentile975_timeseries.values + ) + + +def test_list_end_at(mocker): + day = 24 * 60 * 60 + + p = namedtuple("p", "msg start_at stop_at now expected") + patterns = [ + p( + msg="1 hour", + start_at=0, + stop_at=1 * 60 * 60, + now=32508810000, + expected=[3600], + ), + p( + msg="23 hours", + start_at=0, + stop_at=23 * 60 * 60, + now=32508810000, + expected=[82800], + ), + p( + msg="1 day", + start_at=0, + stop_at=24 * 60 * 60, + now=32508810000, + expected=[86400], + ), + p( + msg="3 days", + start_at=0, + stop_at=300000, + now=32508810000, + expected=[day, 2 * day, 3 * day, 300000], + ), + p( + msg="3 days 18 hours", + start_at=1614848400, # 2021-03-04 09:00:00Z + stop_at=1615086000, # 2021-03-07 03:00:00Z + now=32508810000, + expected=[1614934800, 1615021200, 1615086000], + ), + p( + msg="now is earlier than end_at", + start_at=1614848400, # 2021-03-04 09:00:00Z + stop_at=1615086000, # 2021-03-07 03:00:00Z + now=1614967200, # 2021-03-06 03:00:00Z + expected=[1614934800, 1614967200], + ), + ] + for ptn in patterns: + ec = _experimentCalculator() + assert ptn.expected == ec._list_end_at(ptn.start_at, ptn.stop_at, ptn.now) + + +def _experimentCalculator(): + logger = getLogger(__name__) + return ExperimentCalculator(None, None, None, None, None, None, logger) + + +def _create_experiment(): + expr = experiment_pb2.Experiment() + expr.id = "eid" + expr.start_at = 0 + expr.stop_at = 2 * 24 * 60 * 60 + expr.base_variation_id = "vid1" + expr.goal_ids.extend(["gid"]) + expr.variations.extend( + [ + variation_pb2.Variation(id="vid0"), + variation_pb2.Variation(id="vid1"), + ] + ) + return expr + + +def _create_variation_count( + variation_id: str, + user_count: int = 0, + event_count: int = 0, + value_sum: int = 0, +) -> variation_count_pb2.VariationCount: + vc = variation_count_pb2.VariationCount() + vc.variation_id = variation_id + vc.user_count = user_count + vc.event_count = event_count + vc.value_sum = value_sum + return vc + + +def _create_variation_result( + variation_id: str, + timestamps: List[int], + eval_user_count: int, + eval_event_count: int, + goal_user_count: int, + goal_event_count: int, + goal_value_sum: int, + cvr_prob_median: float, + cvr_prob_percentile025: float, + cvr_prob_percentile975: float, + eval_user_ts: List[int], + eval_event_ts: List[int], + goal_user_ts: List[int], + goal_event_ts: List[int], + goal_value_sum_ts: List[float], + cvr_median_ts: List[float], + cvr_percentile025_ts: List[float], + cvr_percentile975_ts: List[float], + goal_value_sum_per_user_prob_median: float, + goal_value_sum_per_user_prob_percentile025: float, + goal_value_sum_per_user_prob_percentile975: float, + goal_value_sum_per_user_medians_ts: List[float], + goal_value_sum_per_user_percentile025_ts: List[float], + goal_value_sum_per_user_percentile975_ts: List[float], +) -> variation_count_pb2.VariationCount: + vr = variation_result_pb2.VariationResult() + vr.variation_id = variation_id + + eval_vc = _create_variation_count(variation_id, eval_user_count, eval_event_count) + vr.evaluation_count.CopyFrom(eval_vc) + goal_vc = _create_variation_count( + variation_id, goal_user_count, goal_event_count, goal_value_sum + ) + vr.experiment_count.CopyFrom(goal_vc) + + cvr_prob_ds = _create_distribution_summary( + cvr_prob_median, cvr_prob_percentile025, cvr_prob_percentile975 + ) + vr.cvr_prob.CopyFrom(cvr_prob_ds) + + eval_u_ts = timeseries_pb2.Timeseries() + eval_u_ts.timestamps.extend(timestamps) + eval_u_ts.values.extend([float(i) for i in eval_user_ts]) + vr.evaluation_user_count_timeseries.CopyFrom(eval_u_ts) + + eval_e_ts = timeseries_pb2.Timeseries() + eval_e_ts.timestamps.extend(timestamps) + eval_e_ts.values.extend([float(i) for i in eval_event_ts]) + vr.evaluation_event_count_timeseries.CopyFrom(eval_e_ts) + + goal_u_ts = timeseries_pb2.Timeseries() + goal_u_ts.timestamps.extend(timestamps) + goal_u_ts.values.extend([float(i) for i in goal_user_ts]) + vr.goal_user_count_timeseries.CopyFrom(goal_u_ts) + + goal_e_ts = timeseries_pb2.Timeseries() + goal_e_ts.timestamps.extend(timestamps) + goal_e_ts.values.extend([float(i) for i in goal_event_ts]) + vr.goal_event_count_timeseries.CopyFrom(goal_e_ts) + + goal_v_ts = timeseries_pb2.Timeseries() + goal_v_ts.timestamps.extend(timestamps) + goal_v_ts.values.extend(goal_value_sum_ts) + vr.goal_value_sum_timeseries.CopyFrom(goal_v_ts) + + cvr_m_ts = timeseries_pb2.Timeseries() + cvr_m_ts.timestamps.extend(timestamps) + cvr_m_ts.values.extend(cvr_median_ts) + vr.cvr_median_timeseries.CopyFrom(cvr_m_ts) + + cvr_02_ts = timeseries_pb2.Timeseries() + cvr_02_ts.timestamps.extend(timestamps) + cvr_02_ts.values.extend(cvr_percentile025_ts) + vr.cvr_percentile025_timeseries.CopyFrom(cvr_02_ts) + + cvr_97_ts = timeseries_pb2.Timeseries() + cvr_97_ts.timestamps.extend(timestamps) + cvr_97_ts.values.extend(cvr_percentile975_ts) + vr.cvr_percentile975_timeseries.CopyFrom(cvr_97_ts) + + cvr_ts = timeseries_pb2.Timeseries() + cvr_ts.timestamps.extend(timestamps) + cvr_ts.values.extend( + [ + goal_count / eval_count + for goal_count, eval_count in zip(goal_user_ts, eval_user_ts) + ] + ) + vr.cvr_timeseries.CopyFrom(cvr_ts) + + value_sum_per_user_ts = timeseries_pb2.Timeseries() + value_sum_per_user_ts.timestamps.extend(timestamps) + value_sum_per_user_ts.values.extend( + [ + value_sum / goal_count + for value_sum, goal_count in zip(goal_value_sum_ts, goal_user_ts) + ] + ) + vr.goal_value_sum_per_user_timeseries.CopyFrom(value_sum_per_user_ts) + + value_sum_per_user_prob_ds = _create_distribution_summary( + goal_value_sum_per_user_prob_median, + goal_value_sum_per_user_prob_percentile025, + goal_value_sum_per_user_prob_percentile975, + ) + vr.goal_value_sum_per_user_prob.CopyFrom(value_sum_per_user_prob_ds) + + value_sum_per_user_median_ts = timeseries_pb2.Timeseries() + value_sum_per_user_median_ts.timestamps.extend(timestamps) + value_sum_per_user_median_ts.values.extend(goal_value_sum_per_user_medians_ts) + vr.goal_value_sum_per_user_median_timeseries.CopyFrom(value_sum_per_user_median_ts) + + value_sum_per_user_02_ts = timeseries_pb2.Timeseries() + value_sum_per_user_02_ts.timestamps.extend(timestamps) + value_sum_per_user_02_ts.values.extend(goal_value_sum_per_user_percentile025_ts) + vr.goal_value_sum_per_user_percentile025_timeseries.CopyFrom( + value_sum_per_user_02_ts + ) + + value_sum_per_user_97_ts = timeseries_pb2.Timeseries() + value_sum_per_user_97_ts.timestamps.extend(timestamps) + value_sum_per_user_97_ts.values.extend(goal_value_sum_per_user_percentile975_ts) + vr.goal_value_sum_per_user_percentile975_timeseries.CopyFrom( + value_sum_per_user_97_ts + ) + + return vr + + +def _create_distribution_summary( + median: float, + percentile025: float, + percentile975: float, +) -> distribution_summary_pb2.DistributionSummary: + return distribution_summary_pb2.DistributionSummary( + median=median, + percentile025=percentile025, + percentile975=percentile975, + ) + + +def _create_variation_results( + variation_ids: List[str], + timestamps: List[int], + eval_user_counts: List[int], + eval_event_counts: List[int], + goal_user_counts: List[int], + goal_event_counts: List[int], + goal_value_sums: List[int], + cvr_prob_median: List[float], + cvr_prob_percentile025: List[float], + cvr_prob_percentile975: List[float], + eval_user_tss: List[List[int]], + eval_event_tss: List[List[int]], + goal_user_tss: List[List[int]], + goal_event_tss: List[List[int]], + goal_value_sum_tss: List[List[float]], + cvr_medians_tss: List[List[float]], + cvr_percentile025_tss: List[List[float]], + cvr_percentile975_tss: List[List[float]], + goal_value_sum_per_user_prob_median: List[float], + goal_value_sum_per_user_prob_percentile025: List[float], + goal_value_sum_per_user_prob_percentile975: List[float], + goal_value_sum_per_user_medians_tss: List[List[float]], + goal_value_sum_per_user_percentile025_tss: List[List[float]], + goal_value_sum_per_user_percentile975_tss: List[List[float]], +) -> List[variation_result_pb2.VariationResult]: + vrs = [] + for i in range(len(variation_ids)): + vrs.append( + _create_variation_result( + variation_id=variation_ids[i], + timestamps=timestamps, + eval_user_count=eval_user_counts[i], + eval_event_count=eval_event_counts[i], + goal_user_count=goal_user_counts[i], + goal_event_count=goal_event_counts[i], + goal_value_sum=goal_value_sums[i], + cvr_prob_median=cvr_prob_median[i], + cvr_prob_percentile025=cvr_prob_percentile025[i], + cvr_prob_percentile975=cvr_prob_percentile975[i], + eval_user_ts=eval_user_tss[i], + eval_event_ts=eval_event_tss[i], + goal_user_ts=goal_user_tss[i], + goal_event_ts=goal_event_tss[i], + goal_value_sum_ts=goal_value_sum_tss[i], + cvr_median_ts=cvr_medians_tss[i], + cvr_percentile025_ts=cvr_percentile025_tss[i], + cvr_percentile975_ts=cvr_percentile975_tss[i], + goal_value_sum_per_user_prob_median=goal_value_sum_per_user_prob_median[ + i + ], + goal_value_sum_per_user_prob_percentile025=goal_value_sum_per_user_prob_percentile025[ + i + ], + goal_value_sum_per_user_prob_percentile975=goal_value_sum_per_user_prob_percentile975[ + i + ], + goal_value_sum_per_user_medians_ts=goal_value_sum_per_user_medians_tss[ + i + ], + goal_value_sum_per_user_percentile025_ts=goal_value_sum_per_user_percentile025_tss[ + i + ], + goal_value_sum_per_user_percentile975_ts=goal_value_sum_per_user_percentile975_tss[ + i + ], + ) + ) + return vrs + + +if __name__ == "__main__": + raise SystemExit(pytest.main([__file__])) diff --git a/python/tests/lib/calculator/stats/binomial_test.py b/python/tests/lib/calculator/stats/binomial_test.py new file mode 100644 index 000000000..11e656abf --- /dev/null +++ b/python/tests/lib/calculator/stats/binomial_test.py @@ -0,0 +1,71 @@ +# Copyright 2022 The Bucketeer Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from logging import getLogger + +import pytest +from lib.calculator.stats.binomial import Binomial + + +def test_calc_result(mocker): + logger = getLogger(__name__) + binomial = Binomial(logger) + vrs = binomial.run(["vid1", "vid2"], [38, 51], [101, 99], 0) + if len(vrs) != 2: + pytest.fail("incorrect variation result length: {}".format(len(vrs))) + + vr1 = vrs["vid1"] + assert vr1.cvr_prob.mean >= 0.37 + assert vr1.cvr_prob.mean <= 0.38 + assert vr1.cvr_prob.sd >= 0.045 + assert vr1.cvr_prob.sd <= 0.05 + assert vr1.cvr_prob.rhat >= 0.9 + assert vr1.cvr_prob.rhat <= 1.1 + assert len(vr1.cvr_prob.histogram.hist) == 100 + assert len(vr1.cvr_prob.histogram.bins) == 101 + assert vr1.cvr_prob_best.mean >= 0.024 + assert vr1.cvr_prob_best.mean <= 0.026 + assert vr1.cvr_prob_best.sd >= 0.15 + assert vr1.cvr_prob_best.sd <= 0.16 + assert vr1.cvr_prob_best.rhat >= 0.9 + assert vr1.cvr_prob_best.rhat <= 1.1 + assert vr1.cvr_prob_beat_baseline.mean == 0.0 + assert vr1.cvr_prob_beat_baseline.sd == 0.0 + assert vr1.cvr_prob_beat_baseline.rhat == 0.0 + + vr2 = vrs["vid2"] + assert vr2.cvr_prob.mean >= 0.49 + assert vr2.cvr_prob.mean <= 0.52 + assert vr2.cvr_prob.sd >= 0.045 + assert vr2.cvr_prob.sd <= 0.05 + assert vr2.cvr_prob.rhat >= 0.9 + assert vr2.cvr_prob.rhat <= 1.1 + assert len(vr1.cvr_prob.histogram.hist) == 100 + assert len(vr1.cvr_prob.histogram.bins) == 101 + assert vr2.cvr_prob_best.mean >= 0.97 + assert vr2.cvr_prob_best.mean <= 0.98 + assert vr2.cvr_prob_best.sd >= 0.15 + assert vr2.cvr_prob_best.sd <= 0.16 + assert vr2.cvr_prob_best.rhat >= 0.9 + assert vr2.cvr_prob_best.rhat <= 1.1 + assert vr2.cvr_prob_beat_baseline.mean >= 0.97 + assert vr2.cvr_prob_beat_baseline.mean <= 0.98 + assert vr2.cvr_prob_beat_baseline.sd >= 0.15 + assert vr2.cvr_prob_beat_baseline.sd <= 0.16 + assert vr2.cvr_prob_beat_baseline.rhat >= 0.9 + assert vr2.cvr_prob_beat_baseline.rhat <= 1.1 + + +if __name__ == "__main__": + raise SystemExit(pytest.main([__file__])) diff --git a/python/tests/lib/calculator/stats/normal_inverse_gamma_test.py b/python/tests/lib/calculator/stats/normal_inverse_gamma_test.py new file mode 100644 index 000000000..5a730f453 --- /dev/null +++ b/python/tests/lib/calculator/stats/normal_inverse_gamma_test.py @@ -0,0 +1,105 @@ +# Copyright 2022 The Bucketeer Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import namedtuple +from logging import getLogger + +import numpy as np +import pytest +from lib.calculator.stats.normal_inverse_gamma import NormalInverseGamma + + +def test_run(mocker): + logger = getLogger(__name__) + nig = NormalInverseGamma(logger) + # Pseudo sample statistic amount. + v1_mean, v1_sd = 12, 10 + v2_mean, v2_sd = 15, 12 + sample_num = 20000 + # Generate peudo samples + v1 = np.random.normal(loc=v1_mean, scale=v1_sd, size=sample_num) + v2 = np.random.normal(loc=v2_mean, scale=v2_sd, size=sample_num) + # Round up to zero. + v1 = np.where(v1 < 0, 0, v1) + v2 = np.where(v2 < 0, 0, v2) + + vids = ["vid1", "vid2"] + means = [v1.mean(), v2.mean()] + vars = [v1.var(), v2.var()] + sizes = [v1.size, v2.size] + baseline_idx = 0 + vrs = nig.run(vids, means, vars, sizes, baseline_idx) + if len(vrs) != 2: + pytest.fail("incorrect variation result length: {}".format(len(vrs))) + + vr1 = vrs["vid1"] + assert vr1.goal_value_sum_per_user_prob.median > 12 + assert vr1.goal_value_sum_per_user_prob.median < 13.5 + assert vr1.goal_value_sum_per_user_prob.percentile025 > -2.0 + assert vr1.goal_value_sum_per_user_prob.percentile025 < -1.0 + assert vr1.goal_value_sum_per_user_prob.percentile975 > 26.5 + assert vr1.goal_value_sum_per_user_prob.percentile975 < 28.0 + assert vr1.goal_value_sum_per_user_prob_best.mean > 0.4 + assert vr1.goal_value_sum_per_user_prob_best.mean < 0.5 + assert vr1.goal_value_sum_per_user_prob_beat_baseline.mean == 0.0 + + vr2 = vrs["vid2"] + assert vr2.goal_value_sum_per_user_prob.median > 15 + assert vr2.goal_value_sum_per_user_prob.median < 17 + assert vr2.goal_value_sum_per_user_prob.percentile025 > -6.0 + assert vr2.goal_value_sum_per_user_prob.percentile025 < -4.0 + assert vr2.goal_value_sum_per_user_prob.percentile975 > 36.0 + assert vr2.goal_value_sum_per_user_prob.percentile975 < 37.5 + assert vr2.goal_value_sum_per_user_prob_best.mean > 0.4 + assert vr2.goal_value_sum_per_user_prob_best.mean < 0.6 + assert vr2.goal_value_sum_per_user_prob_beat_baseline.mean > 0.4 + assert vr2.goal_value_sum_per_user_prob_beat_baseline.mean < 0.6 + + +def test_calc_beat_baseline(mocker): + p = namedtuple("p", "input expected") + patterns = [ + p( + input=np.array([1, 2]), + expected=np.array([[0, 1]]), + ), + ] + for ptn in patterns: + nig = NormalInverseGamma(getLogger(__name__)) + actual = nig._calc_beat_baseline(ptn.input, 0) + assert ptn.expected[0, 0] == actual[0, 0] + assert ptn.expected[0, 1] == actual[0, 1] + + +def test_calc_best(mocker): + p = namedtuple("p", "input expected") + patterns = [ + p( + input=np.array([1, 2]), + expected=np.array([[0, 1]]), + ), + p( + input=np.array([1, 1]), + expected=np.array([[1, 1]]), + ), + ] + for ptn in patterns: + nig = NormalInverseGamma(getLogger(__name__)) + actual = nig._calc_best(ptn.input) + assert ptn.expected[0, 0] == actual[0, 0] + assert ptn.expected[0, 1] == actual[0, 1] + + +if __name__ == "__main__": + raise SystemExit(pytest.main([__file__])) diff --git a/remove.sh b/remove.sh new file mode 100755 index 000000000..160947790 --- /dev/null +++ b/remove.sh @@ -0,0 +1,7 @@ +#!/bin/bash +# find . -path ‘./pkg*mock*.go’ -exec rm -rf {} \; +find . -path './proto*.pb.go' -exec rm -rf {} \; +find . -path './sdk/android/bucketeer/src/main/proto/proto/*BUILD.bazel' -exec rm -rf {} \; +rm -fr sdk/android/bucketeer/build/ +rm -fr sdk/android/bucketeer/src/main/proto/proto/ +rm -fr vendor \ No newline at end of file diff --git a/renovate.json b/renovate.json new file mode 100644 index 000000000..5053e5f3e --- /dev/null +++ b/renovate.json @@ -0,0 +1,3 @@ +{ + "extends": ["config:base", "schedule:weekly"] +} diff --git a/repositories.bzl b/repositories.bzl new file mode 100644 index 000000000..a6a48a2ba --- /dev/null +++ b/repositories.bzl @@ -0,0 +1,1655 @@ +load("@bazel_gazelle//:deps.bzl", "go_repository") + +def go_repositories(): + go_repository( + name = "co_honnef_go_tools", + importpath = "honnef.co/go/tools", + sum = "h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=", + version = "v0.0.1-2020.1.4", + ) + go_repository( + name = "com_github_alecthomas_template", + importpath = "github.com/alecthomas/template", + sum = "h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=", + version = "v0.0.0-20190718012654-fb15b899a751", + ) + go_repository( + name = "com_github_alecthomas_units", + importpath = "github.com/alecthomas/units", + sum = "h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E=", + version = "v0.0.0-20190717042225-c3de453c63f4", + ) + go_repository( + name = "com_github_antihax_optional", + importpath = "github.com/antihax/optional", + sum = "h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg=", + version = "v1.0.0", + ) + + go_repository( + name = "com_github_apache_thrift", + importpath = "github.com/apache/thrift", + sum = "h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs=", + version = "v0.12.0", + ) + + go_repository( + name = "com_github_aws_aws_sdk_go", + importpath = "github.com/aws/aws-sdk-go", + sum = "h1:/4+rDPe0W95KBmNGYCG+NUvdL8ssPYBMxL+aSCg6nIA=", + version = "v1.17.7", + ) + + go_repository( + name = "com_github_azure_go_ansiterm", + importpath = "github.com/Azure/go-ansiterm", + sum = "h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=", + version = "v0.0.0-20170929234023-d6e3b3328b78", + ) + + go_repository( + name = "com_github_beorn7_perks", + importpath = "github.com/beorn7/perks", + sum = "h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=", + version = "v1.0.1", + ) + + go_repository( + name = "com_github_bitly_go_hostpool", + importpath = "github.com/bitly/go-hostpool", + sum = "h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY=", + version = "v0.0.0-20171023180738-a3a6125de932", + ) + go_repository( + name = "com_github_bkaradzic_go_lz4", + importpath = "github.com/bkaradzic/go-lz4", + sum = "h1:RXc4wYsyz985CkXXeX04y4VnZFGG8Rd43pRaHsOXAKk=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_blang_semver", + importpath = "github.com/blang/semver", + sum = "h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=", + version = "v3.5.1+incompatible", + ) + + go_repository( + name = "com_github_bmizerany_assert", + importpath = "github.com/bmizerany/assert", + sum = "h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=", + version = "v0.0.0-20160611221934-b7ed37b82869", + ) + go_repository( + name = "com_github_burntsushi_toml", + importpath = "github.com/BurntSushi/toml", + sum = "h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_burntsushi_xgb", + importpath = "github.com/BurntSushi/xgb", + sum = "h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=", + version = "v0.0.0-20160522181843-27f122750802", + ) + + go_repository( + name = "com_github_ca_dp_godruid", + importpath = "github.com/ca-dp/godruid", + sum = "h1:aWiNqopVwXEC1+HzANRIMYWXVsiU5LDAVDL6XMpkJJ0=", + version = "v0.0.0-20210401093507-918893fdd0d7", + ) + go_repository( + name = "com_github_census_instrumentation_opencensus_proto", + importpath = "github.com/census-instrumentation/opencensus-proto", + sum = "h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=", + version = "v0.2.1", + ) + go_repository( + name = "com_github_cespare_xxhash", + importpath = "github.com/cespare/xxhash", + sum = "h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=", + version = "v1.1.0", + ) + + go_repository( + name = "com_github_cespare_xxhash_v2", + importpath = "github.com/cespare/xxhash/v2", + sum = "h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=", + version = "v2.1.1", + ) + + go_repository( + name = "com_github_chzyer_logex", + importpath = "github.com/chzyer/logex", + sum = "h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=", + version = "v1.1.10", + ) + go_repository( + name = "com_github_chzyer_readline", + importpath = "github.com/chzyer/readline", + sum = "h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=", + version = "v0.0.0-20180603132655-2972be24d48e", + ) + go_repository( + name = "com_github_chzyer_test", + importpath = "github.com/chzyer/test", + sum = "h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=", + version = "v0.0.0-20180213035817-a1ea475d72b1", + ) + go_repository( + name = "com_github_clickhouse_clickhouse_go", + importpath = "github.com/ClickHouse/clickhouse-go", + sum = "h1:HvD2NhKPLSeO3Ots6YV0ePgs4l3wO0bLqa9Uk1yeMOs=", + version = "v1.3.12", + ) + go_repository( + name = "com_github_client9_misspell", + importpath = "github.com/client9/misspell", + sum = "h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=", + version = "v0.3.4", + ) + + go_repository( + name = "com_github_cloudflare_golz4", + importpath = "github.com/cloudflare/golz4", + sum = "h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg=", + version = "v0.0.0-20150217214814-ef862a3cdc58", + ) + go_repository( + name = "com_github_cncf_udpa_go", + importpath = "github.com/cncf/udpa/go", + sum = "h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI=", + version = "v0.0.0-20210930031921-04548b0d99d4", + ) + go_repository( + name = "com_github_cncf_xds_go", + importpath = "github.com/cncf/xds/go", + sum = "h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw=", + version = "v0.0.0-20211011173535-cb28da3451f1", + ) + + go_repository( + name = "com_github_cockroachdb_apd", + importpath = "github.com/cockroachdb/apd", + sum = "h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_cockroachdb_cockroach_go", + importpath = "github.com/cockroachdb/cockroach-go", + sum = "h1:eApuUG8W2EtBVwxqLlY2wgoqDYOg3WvIHGvW4fUbbow=", + version = "v0.0.0-20190925194419-606b3d062051", + ) + go_repository( + name = "com_github_containerd_containerd", + importpath = "github.com/containerd/containerd", + sum = "h1:LoIzb5y9x5l8VKAlyrbusNPXqBY0+kviRloxFUMFwKc=", + version = "v1.3.3", + ) + + go_repository( + name = "com_github_coreos_go_oidc", + importpath = "github.com/coreos/go-oidc", + sum = "h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM=", + version = "v2.1.0+incompatible", + ) + + go_repository( + name = "com_github_coreos_go_systemd", + importpath = "github.com/coreos/go-systemd", + sum = "h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c=", + version = "v0.0.0-20190719114852-fd7a80b32e1f", + ) + + go_repository( + name = "com_github_creack_pty", + importpath = "github.com/creack/pty", + sum = "h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w=", + version = "v1.1.9", + ) + + go_repository( + name = "com_github_cznic_mathutil", + importpath = "github.com/cznic/mathutil", + sum = "h1:XNT/Zf5l++1Pyg08/HV04ppB0gKxAqtZQBRYiYrUuYk=", + version = "v0.0.0-20180504122225-ca4c9f2c1369", + ) + go_repository( + name = "com_github_davecgh_go_spew", + importpath = "github.com/davecgh/go-spew", + sum = "h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=", + version = "v1.1.1", + ) + + go_repository( + name = "com_github_denisenkom_go_mssqldb", + importpath = "github.com/denisenkom/go-mssqldb", + sum = "h1:tkum0XDgfR0jcVVXuTsYv/erY2NnEDqwRojbxR1rBYA=", + version = "v0.0.0-20190515213511-eb9f6a1743f3", + ) + + go_repository( + name = "com_github_dhui_dktest", + importpath = "github.com/dhui/dktest", + sum = "h1:nZSDcnkpbotzT/nEHNsO+JCKY8i1Qoki1AYOpeLRb6M=", + version = "v0.3.2", + ) + go_repository( + name = "com_github_docker_distribution", + importpath = "github.com/docker/distribution", + sum = "h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=", + version = "v2.7.1+incompatible", + ) + go_repository( + name = "com_github_docker_docker", + importpath = "github.com/docker/docker", + sum = "h1:tmV+YbYOUAYDmAiamzhRKqQXaAUyUY2xVt27Rv7rCzA=", + version = "v1.4.2-0.20200213202729-31a86c4ab209", + ) + go_repository( + name = "com_github_docker_go_connections", + importpath = "github.com/docker/go-connections", + sum = "h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=", + version = "v0.4.0", + ) + go_repository( + name = "com_github_docker_go_units", + importpath = "github.com/docker/go-units", + sum = "h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=", + version = "v0.4.0", + ) + + go_repository( + name = "com_github_eapache_go_resiliency", + importpath = "github.com/eapache/go-resiliency", + sum = "h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_eapache_go_xerial_snappy", + importpath = "github.com/eapache/go-xerial-snappy", + sum = "h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=", + version = "v0.0.0-20180814174437-776d5712da21", + ) + go_repository( + name = "com_github_eapache_queue", + importpath = "github.com/eapache/queue", + sum = "h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=", + version = "v1.1.0", + ) + + go_repository( + name = "com_github_edsrzf_mmap_go", + importpath = "github.com/edsrzf/mmap-go", + sum = "h1:aaQcKT9WumO6JEJcRyTqFVq4XUZiUcKR2/GI31TOcz8=", + version = "v0.0.0-20170320065105-0bce6a688712", + ) + + go_repository( + name = "com_github_envoyproxy_go_control_plane", + importpath = "github.com/envoyproxy/go-control-plane", + sum = "h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0=", + version = "v0.10.2-0.20220325020618-49ff273808a1", + ) + go_repository( + name = "com_github_envoyproxy_protoc_gen_validate", + importpath = "github.com/envoyproxy/protoc-gen-validate", + sum = "h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=", + version = "v0.1.0", + ) + + go_repository( + name = "com_github_fortytw2_leaktest", + importpath = "github.com/fortytw2/leaktest", + sum = "h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=", + version = "v1.3.0", + ) + go_repository( + name = "com_github_frankban_quicktest", + importpath = "github.com/frankban/quicktest", + sum = "h1:Gfh+GAJZOAoKZsIZeZbdn2JF10kN1XHNvjsvQK8gVkE=", + version = "v1.10.0", + ) + go_repository( + name = "com_github_fsnotify_fsnotify", + importpath = "github.com/fsnotify/fsnotify", + sum = "h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=", + version = "v1.4.7", + ) + + go_repository( + name = "com_github_fsouza_fake_gcs_server", + importpath = "github.com/fsouza/fake-gcs-server", + sum = "h1:OeH75kBZcZa3ZE+zz/mFdJ2btt9FgqfjI7gIh9+5fvk=", + version = "v1.17.0", + ) + go_repository( + name = "com_github_ghodss_yaml", + importpath = "github.com/ghodss/yaml", + sum = "h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_go_gl_glfw", + importpath = "github.com/go-gl/glfw", + sum = "h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0=", + version = "v0.0.0-20190409004039-e6da0acd62b1", + ) + + go_repository( + name = "com_github_go_gl_glfw_v3_3_glfw", + importpath = "github.com/go-gl/glfw/v3.3/glfw", + sum = "h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I=", + version = "v0.0.0-20200222043503-6f7a984d4dc4", + ) + go_repository( + name = "com_github_go_ini_ini", + importpath = "github.com/go-ini/ini", + sum = "h1:Mujh4R/dH6YL8bxuISne3xX2+qcQ9p0IxKAP6ExWoUo=", + version = "v1.25.4", + ) + go_repository( + name = "com_github_go_kit_kit", + importpath = "github.com/go-kit/kit", + sum = "h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=", + version = "v0.9.0", + ) + go_repository( + name = "com_github_go_kit_log", + importpath = "github.com/go-kit/log", + sum = "h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ=", + version = "v0.1.0", + ) + + go_repository( + name = "com_github_go_logfmt_logfmt", + importpath = "github.com/go-logfmt/logfmt", + sum = "h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=", + version = "v0.5.0", + ) + + go_repository( + name = "com_github_go_redis_redis", + importpath = "github.com/go-redis/redis", + sum = "h1:9SpNVG76gr6InJGxoZ6IuuxaCOQwDAhzyXg+Bs+0Sb4=", + version = "v6.15.2+incompatible", + ) + + go_repository( + name = "com_github_go_sql_driver_mysql", + importpath = "github.com/go-sql-driver/mysql", + sum = "h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=", + version = "v1.6.0", + ) + go_repository( + name = "com_github_go_stack_stack", + importpath = "github.com/go-stack/stack", + sum = "h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=", + version = "v1.8.0", + ) + go_repository( + name = "com_github_go_test_deep", + importpath = "github.com/go-test/deep", + sum = "h1:u2CU3YKy9I2pmu9pX0eq50wCgjfGIt539SqR7FbHiho=", + version = "v1.0.4", + ) + + go_repository( + name = "com_github_gobuffalo_here", + importpath = "github.com/gobuffalo/here", + sum = "h1:hYrd0a6gDmWxBM4TnrGw8mQg24iSVoIkHEk7FodQcBI=", + version = "v0.6.0", + ) + go_repository( + name = "com_github_gocql_gocql", + importpath = "github.com/gocql/gocql", + sum = "h1:vF83LI8tAakwEwvWZtrIEx7pOySacl2TOxx6eXk4ePo=", + version = "v0.0.0-20190301043612-f6df8288f9b4", + ) + go_repository( + name = "com_github_gofrs_uuid", + importpath = "github.com/gofrs/uuid", + sum = "h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=", + version = "v4.0.0+incompatible", + ) + + go_repository( + name = "com_github_gogo_protobuf", + importpath = "github.com/gogo/protobuf", + sum = "h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=", + version = "v1.3.1", + ) + go_repository( + name = "com_github_golang_glog", + importpath = "github.com/golang/glog", + sum = "h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=", + version = "v0.0.0-20160126235308-23def4e6c14b", + ) + go_repository( + name = "com_github_golang_groupcache", + importpath = "github.com/golang/groupcache", + sum = "h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=", + version = "v0.0.0-20210331224755-41bb18bfe9da", + ) + + go_repository( + name = "com_github_golang_migrate_migrate_v4", + importpath = "github.com/golang-migrate/migrate/v4", + sum = "h1:uqtd0ysK5WyBQ/T1K2uDIooJV0o2Obt6uPwP062DupQ=", + version = "v4.11.0", + ) + go_repository( + name = "com_github_golang_mock", + importpath = "github.com/golang/mock", + sum = "h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=", + version = "v1.6.0", + ) + go_repository( + name = "com_github_golang_protobuf", + importpath = "github.com/golang/protobuf", + sum = "h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=", + version = "v1.5.2", + ) + go_repository( + name = "com_github_golang_snappy", + importpath = "github.com/golang/snappy", + sum = "h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=", + version = "v0.0.3", + ) + go_repository( + name = "com_github_gomodule_redigo", + importpath = "github.com/gomodule/redigo", + sum = "h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0=", + version = "v2.0.0+incompatible", + ) + go_repository( + name = "com_github_google_btree", + importpath = "github.com/google/btree", + sum = "h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_google_go_cmp", + importpath = "github.com/google/go-cmp", + sum = "h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=", + version = "v0.5.8", + ) + + go_repository( + name = "com_github_google_go_github", + importpath = "github.com/google/go-github", + sum = "h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=", + version = "v17.0.0+incompatible", + ) + go_repository( + name = "com_github_google_go_querystring", + importpath = "github.com/google/go-querystring", + sum = "h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_google_gofuzz", + importpath = "github.com/google/gofuzz", + sum = "h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_google_martian", + importpath = "github.com/google/martian", + sum = "h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=", + version = "v2.1.0+incompatible", + ) + go_repository( + name = "com_github_google_martian_v3", + importpath = "github.com/google/martian/v3", + sum = "h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ=", + version = "v3.2.1", + ) + + go_repository( + name = "com_github_google_pprof", + importpath = "github.com/google/pprof", + sum = "h1:VrKTY4lquiy1oJzVZgXrauku9Jx9P+POv/gTLakG4Wk=", + version = "v0.0.0-20220412212628-83db2b799d1f", + ) + go_repository( + name = "com_github_google_renameio", + importpath = "github.com/google/renameio", + sum = "h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=", + version = "v0.1.0", + ) + go_repository( + name = "com_github_google_uuid", + importpath = "github.com/google/uuid", + sum = "h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=", + version = "v1.3.0", + ) + go_repository( + name = "com_github_googleapis_enterprise_certificate_proxy", + importpath = "github.com/googleapis/enterprise-certificate-proxy", + sum = "h1:zO8WHNx/MYiAKJ3d5spxZXZE6KHmIQGQcAzwUzV7qQw=", + version = "v0.1.0", + ) + + go_repository( + name = "com_github_googleapis_gax_go", + importpath = "github.com/googleapis/gax-go", + sum = "h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU=", + version = "v2.0.0+incompatible", + ) + go_repository( + name = "com_github_googleapis_gax_go_v2", + importpath = "github.com/googleapis/gax-go/v2", + sum = "h1:kBRZU0PSuI7PspsSb/ChWoVResUcwNVIdpB049pKTiw=", + version = "v2.5.1", + ) + + go_repository( + name = "com_github_googleapis_go_type_adapters", + importpath = "github.com/googleapis/go-type-adapters", + sum = "h1:9XdMn+d/G57qq1s8dNc5IesGCXHf6V2HZ2JwRxfA2tA=", + version = "v1.0.0", + ) + + go_repository( + name = "com_github_gorilla_context", + importpath = "github.com/gorilla/context", + sum = "h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=", + version = "v1.1.1", + ) + go_repository( + name = "com_github_gorilla_handlers", + importpath = "github.com/gorilla/handlers", + sum = "h1:0QniY0USkHQ1RGCLfKxeNHK9bkDHGRYGNDFBCS+YARg=", + version = "v1.4.2", + ) + go_repository( + name = "com_github_gorilla_mux", + importpath = "github.com/gorilla/mux", + sum = "h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=", + version = "v1.7.4", + ) + go_repository( + name = "com_github_gorilla_websocket", + importpath = "github.com/gorilla/websocket", + sum = "h1:VJtLvh6VQym50czpZzx07z/kw9EgAxI3x1ZB8taTMQQ=", + version = "v1.2.0", + ) + + go_repository( + name = "com_github_grpc_ecosystem_grpc_gateway", + importpath = "github.com/grpc-ecosystem/grpc-gateway", + sum = "h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=", + version = "v1.16.0", + ) + + go_repository( + name = "com_github_hailocab_go_hostpool", + importpath = "github.com/hailocab/go-hostpool", + sum = "h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=", + version = "v0.0.0-20160125115350-e80d13ce29ed", + ) + go_repository( + name = "com_github_hashicorp_errwrap", + importpath = "github.com/hashicorp/errwrap", + sum = "h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_hashicorp_go_multierror", + importpath = "github.com/hashicorp/go-multierror", + sum = "h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_hashicorp_go_uuid", + importpath = "github.com/hashicorp/go-uuid", + sum = "h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=", + version = "v1.0.2", + ) + go_repository( + name = "com_github_hashicorp_golang_lru", + importpath = "github.com/hashicorp/golang-lru", + sum = "h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=", + version = "v0.5.1", + ) + + go_repository( + name = "com_github_hpcloud_tail", + importpath = "github.com/hpcloud/tail", + sum = "h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=", + version = "v1.0.0", + ) + + go_repository( + name = "com_github_ianlancetaylor_demangle", + importpath = "github.com/ianlancetaylor/demangle", + sum = "h1:uGg2frlt3IcT7kbV6LEp5ONv4vmoO2FW4qSO+my/aoM=", + version = "v0.0.0-20210905161508-09a460cdf81d", + ) + + go_repository( + name = "com_github_itchyny_go_flags", + importpath = "github.com/itchyny/go-flags", + sum = "h1:Z5q2ist2sfDjDlExVPBrMqlsEDxDR2h4zuOElB0OEYI=", + version = "v1.5.0", + ) + go_repository( + name = "com_github_itchyny_gojq", + importpath = "github.com/itchyny/gojq", + sum = "h1:6SJ1BQ1VAwJAlIvLSIZmqHP/RUEq3qfVWvsRxrqhsD0=", + version = "v0.12.5", + ) + go_repository( + name = "com_github_itchyny_timefmt_go", + importpath = "github.com/itchyny/timefmt-go", + sum = "h1:7M3LGVDsqcd0VZH2U+x393obrzZisp7C0uEe921iRkU=", + version = "v0.1.3", + ) + go_repository( + name = "com_github_jackc_chunkreader", + importpath = "github.com/jackc/chunkreader", + sum = "h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_jackc_chunkreader_v2", + importpath = "github.com/jackc/chunkreader/v2", + sum = "h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=", + version = "v2.0.1", + ) + go_repository( + name = "com_github_jackc_pgconn", + importpath = "github.com/jackc/pgconn", + sum = "h1:rsDFzIpRk7xT4B8FufgpCCeyjdNpKyghZeSefViE5W8=", + version = "v1.12.1", + ) + go_repository( + name = "com_github_jackc_pgio", + importpath = "github.com/jackc/pgio", + sum = "h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_jackc_pgmock", + importpath = "github.com/jackc/pgmock", + sum = "h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc=", + version = "v0.0.0-20210724152146-4ad1a8207f65", + ) + go_repository( + name = "com_github_jackc_pgpassfile", + importpath = "github.com/jackc/pgpassfile", + sum = "h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_jackc_pgproto3", + importpath = "github.com/jackc/pgproto3", + sum = "h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_jackc_pgproto3_v2", + importpath = "github.com/jackc/pgproto3/v2", + sum = "h1:brH0pCGBDkBW07HWlN/oSBXrmo3WB0UvZd1pIuDcL8Y=", + version = "v2.3.0", + ) + go_repository( + name = "com_github_jackc_pgservicefile", + importpath = "github.com/jackc/pgservicefile", + sum = "h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg=", + version = "v0.0.0-20200714003250-2b9c44734f2b", + ) + + go_repository( + name = "com_github_jackc_pgtype", + importpath = "github.com/jackc/pgtype", + sum = "h1:u4uiGPz/1hryuXzyaBhSk6dnIyyG2683olG2OV+UUgs=", + version = "v1.11.0", + ) + go_repository( + name = "com_github_jackc_pgx_v4", + importpath = "github.com/jackc/pgx/v4", + sum = "h1:JzTglcal01DrghUqt+PmzWsZx/Yh7SC/CTQmSBMTd0Y=", + version = "v4.16.1", + ) + go_repository( + name = "com_github_jackc_puddle", + importpath = "github.com/jackc/puddle", + sum = "h1:gI8os0wpRXFd4FiAY2dWiqRK037tjj3t7rKFeO4X5iw=", + version = "v1.2.1", + ) + go_repository( + name = "com_github_jcmturner_gofork", + importpath = "github.com/jcmturner/gofork", + sum = "h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_jmespath_go_jmespath", + importpath = "github.com/jmespath/go-jmespath", + sum = "h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=", + version = "v0.0.0-20180206201540-c2b33e8439af", + ) + + go_repository( + name = "com_github_jmoiron_sqlx", + importpath = "github.com/jmoiron/sqlx", + sum = "h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_json_iterator_go", + importpath = "github.com/json-iterator/go", + sum = "h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=", + version = "v1.1.7", + ) + go_repository( + name = "com_github_jstemmer_go_junit_report", + importpath = "github.com/jstemmer/go-junit-report", + sum = "h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=", + version = "v0.9.1", + ) + go_repository( + name = "com_github_julienschmidt_httprouter", + importpath = "github.com/julienschmidt/httprouter", + sum = "h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g=", + version = "v1.2.0", + ) + + go_repository( + name = "com_github_kardianos_osext", + importpath = "github.com/kardianos/osext", + sum = "h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=", + version = "v0.0.0-20190222173326-2bc1f35cddc0", + ) + go_repository( + name = "com_github_kisielk_errcheck", + importpath = "github.com/kisielk/errcheck", + sum = "h1:reN85Pxc5larApoH1keMBiu2GWtPqXQ1nc9gx+jOU+E=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_kisielk_gotool", + importpath = "github.com/kisielk/gotool", + sum = "h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_klauspost_compress", + importpath = "github.com/klauspost/compress", + sum = "h1:a/y8CglcM7gLGYmlbP/stPE5sR3hbhFRUjCBfd/0B3I=", + version = "v1.10.10", + ) + go_repository( + name = "com_github_konsorten_go_windows_terminal_sequences", + importpath = "github.com/konsorten/go-windows-terminal-sequences", + sum = "h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=", + version = "v1.0.2", + ) + go_repository( + name = "com_github_kr_logfmt", + importpath = "github.com/kr/logfmt", + sum = "h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=", + version = "v0.0.0-20140226030751-b84e30acd515", + ) + go_repository( + name = "com_github_kr_pretty", + importpath = "github.com/kr/pretty", + sum = "h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=", + version = "v0.2.0", + ) + go_repository( + name = "com_github_kr_pty", + importpath = "github.com/kr/pty", + sum = "h1:AkaSdXYQOWeaO3neb8EM634ahkXXe3jYbVh/F9lq+GI=", + version = "v1.1.8", + ) + go_repository( + name = "com_github_kr_text", + importpath = "github.com/kr/text", + sum = "h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=", + version = "v0.2.0", + ) + + go_repository( + name = "com_github_lib_pq", + importpath = "github.com/lib/pq", + sum = "h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8=", + version = "v1.10.2", + ) + + go_repository( + name = "com_github_markbates_pkger", + importpath = "github.com/markbates/pkger", + sum = "h1:3MPelV53RnGSW07izx5xGxl4e/sdRD6zqseIk0rMASY=", + version = "v0.15.1", + ) + go_repository( + name = "com_github_masterminds_semver_v3", + importpath = "github.com/Masterminds/semver/v3", + sum = "h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=", + version = "v3.1.1", + ) + + go_repository( + name = "com_github_mattn_go_colorable", + importpath = "github.com/mattn/go-colorable", + sum = "h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=", + version = "v0.1.6", + ) + go_repository( + name = "com_github_mattn_go_isatty", + importpath = "github.com/mattn/go-isatty", + sum = "h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA=", + version = "v0.0.13", + ) + + go_repository( + name = "com_github_mattn_go_runewidth", + importpath = "github.com/mattn/go-runewidth", + sum = "h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=", + version = "v0.0.9", + ) + go_repository( + name = "com_github_mattn_go_sqlite3", + importpath = "github.com/mattn/go-sqlite3", + sum = "h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=", + version = "v1.10.0", + ) + go_repository( + name = "com_github_matttproud_golang_protobuf_extensions", + importpath = "github.com/matttproud/golang_protobuf_extensions", + sum = "h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=", + version = "v1.0.1", + ) + + go_repository( + name = "com_github_microsoft_go_winio", + importpath = "github.com/Microsoft/go-winio", + sum = "h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=", + version = "v0.4.14", + ) + + go_repository( + name = "com_github_mna_redisc", + importpath = "github.com/mna/redisc", + sum = "h1:cup2P6113vVbzNCuYOQtIctHFBGlvGvJi8+GGFPYobU=", + version = "v1.1.2", + ) + go_repository( + name = "com_github_modern_go_concurrent", + importpath = "github.com/modern-go/concurrent", + sum = "h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=", + version = "v0.0.0-20180306012644-bacd9c7ef1dd", + ) + go_repository( + name = "com_github_modern_go_reflect2", + importpath = "github.com/modern-go/reflect2", + sum = "h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=", + version = "v1.0.1", + ) + + go_repository( + name = "com_github_morikuni_aec", + importpath = "github.com/morikuni/aec", + sum = "h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=", + version = "v1.0.0", + ) + + go_repository( + name = "com_github_mwitkow_go_conntrack", + importpath = "github.com/mwitkow/go-conntrack", + sum = "h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc=", + version = "v0.0.0-20161129095857-cc309e4a2223", + ) + + go_repository( + name = "com_github_nakagami_firebirdsql", + importpath = "github.com/nakagami/firebirdsql", + sum = "h1:P48LjvUQpTReR3TQRbxSeSBsMXzfK0uol7eRcr7VBYQ=", + version = "v0.0.0-20190310045651-3c02a58cfed8", + ) + go_repository( + name = "com_github_neo4j_drivers_gobolt", + importpath = "github.com/neo4j-drivers/gobolt", + sum = "h1:80c7W+vtw39ES9Q85q9GZh4tJo+1MpQGpFTuo28CP+Y=", + version = "v1.7.4", + ) + go_repository( + name = "com_github_neo4j_neo4j_go_driver", + importpath = "github.com/neo4j/neo4j-go-driver", + sum = "h1:BgVVwYkG3DWcZGiOPUOkwkd54sSg+UHDaLYz3aiNCek=", + version = "v1.7.4", + ) + + go_repository( + name = "com_github_nicksnyder_go_i18n_v2", + importpath = "github.com/nicksnyder/go-i18n/v2", + sum = "h1:MNXbyPvd141JJqlU6gJKrczThxJy+kdCNivxZpBQFkw=", + version = "v2.2.0", + ) + go_repository( + name = "com_github_niemeyer_pretty", + importpath = "github.com/niemeyer/pretty", + sum = "h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=", + version = "v0.0.0-20200227124842-a10e7caefd8e", + ) + + go_repository( + name = "com_github_oneofone_xxhash", + importpath = "github.com/OneOfOne/xxhash", + sum = "h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=", + version = "v1.2.2", + ) + + go_repository( + name = "com_github_onsi_ginkgo", + importpath = "github.com/onsi/ginkgo", + sum = "h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=", + version = "v1.10.1", + ) + go_repository( + name = "com_github_onsi_gomega", + importpath = "github.com/onsi/gomega", + sum = "h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=", + version = "v1.7.0", + ) + + go_repository( + name = "com_github_opencontainers_go_digest", + importpath = "github.com/opencontainers/go-digest", + sum = "h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=", + version = "v1.0.0-rc1", + ) + go_repository( + name = "com_github_opencontainers_image_spec", + importpath = "github.com/opencontainers/image-spec", + sum = "h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=", + version = "v1.0.1", + ) + go_repository( + name = "com_github_openzipkin_zipkin_go", + importpath = "github.com/openzipkin/zipkin-go", + sum = "h1:yXiysv1CSK7Q5yjGy1710zZGnsbMUIjluWBxtLXHPBo=", + version = "v0.1.6", + ) + + go_repository( + name = "com_github_pierrec_lz4", + importpath = "github.com/pierrec/lz4", + sum = "h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI=", + version = "v2.5.2+incompatible", + ) + go_repository( + name = "com_github_pkg_errors", + importpath = "github.com/pkg/errors", + sum = "h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=", + version = "v0.9.1", + ) + go_repository( + name = "com_github_pmezard_go_difflib", + importpath = "github.com/pmezard/go-difflib", + sum = "h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_pquerna_cachecontrol", + importpath = "github.com/pquerna/cachecontrol", + sum = "h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc=", + version = "v0.1.0", + ) + go_repository( + name = "com_github_prometheus_client_golang", + importpath = "github.com/prometheus/client_golang", + sum = "h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvlsMzzNayLOI=", + version = "v1.2.1", + ) + go_repository( + name = "com_github_prometheus_client_model", + importpath = "github.com/prometheus/client_model", + sum = "h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=", + version = "v0.0.0-20190812154241-14fe0d1b01d4", + ) + go_repository( + name = "com_github_prometheus_common", + importpath = "github.com/prometheus/common", + sum = "h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY=", + version = "v0.7.0", + ) + go_repository( + name = "com_github_prometheus_procfs", + importpath = "github.com/prometheus/procfs", + sum = "h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8=", + version = "v0.0.5", + ) + + go_repository( + name = "com_github_rcrowley_go_metrics", + importpath = "github.com/rcrowley/go-metrics", + sum = "h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ=", + version = "v0.0.0-20200313005456-10cdbea86bc0", + ) + go_repository( + name = "com_github_remyoudompheng_bigfft", + importpath = "github.com/remyoudompheng/bigfft", + sum = "h1:HQagqIiBmr8YXawX/le3+O26N+vPPC1PtjaF3mwnook=", + version = "v0.0.0-20190728182440-6a916e37a237", + ) + go_repository( + name = "com_github_robfig_cron", + importpath = "github.com/robfig/cron", + sum = "h1:7ELV9kd3xWoNnXRvGWOMrPiBz/6W47lSwikPlnvMTV8=", + version = "v0.0.0-20171101201047-2315d5715e36", + ) + go_repository( + name = "com_github_rogpeppe_fastuuid", + importpath = "github.com/rogpeppe/fastuuid", + sum = "h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s=", + version = "v1.2.0", + ) + + go_repository( + name = "com_github_rogpeppe_go_internal", + importpath = "github.com/rogpeppe/go-internal", + sum = "h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk=", + version = "v1.3.0", + ) + + go_repository( + name = "com_github_rs_xid", + importpath = "github.com/rs/xid", + sum = "h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc=", + version = "v1.2.1", + ) + go_repository( + name = "com_github_rs_zerolog", + importpath = "github.com/rs/zerolog", + sum = "h1:uPRuwkWF4J6fGsJ2R0Gn2jB1EQiav9k3S6CSdygQJXY=", + version = "v1.15.0", + ) + + go_repository( + name = "com_github_satori_go_uuid", + importpath = "github.com/satori/go.uuid", + sum = "h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_shopify_sarama", + importpath = "github.com/Shopify/sarama", + sum = "h1:tqo2zmyzPf1+gwTTwhI6W+EXDw4PVSczynpHKFtVAmo=", + version = "v1.27.0", + ) + go_repository( + name = "com_github_shopify_toxiproxy", + importpath = "github.com/Shopify/toxiproxy", + sum = "h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=", + version = "v2.1.4+incompatible", + ) + + go_repository( + name = "com_github_shopspring_decimal", + importpath = "github.com/shopspring/decimal", + sum = "h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=", + version = "v1.2.0", + ) + go_repository( + name = "com_github_sirupsen_logrus", + importpath = "github.com/sirupsen/logrus", + sum = "h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=", + version = "v1.4.2", + ) + go_repository( + name = "com_github_slack_go_slack", + importpath = "github.com/slack-go/slack", + sum = "h1:cxOqFgM5RW6mdEyDqAJutFk3qiORK9oHRKi5bPqkY9o=", + version = "v0.6.4", + ) + go_repository( + name = "com_github_spaolacci_murmur3", + importpath = "github.com/spaolacci/murmur3", + sum = "h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=", + version = "v0.0.0-20180118202830-f09979ecbc72", + ) + + go_repository( + name = "com_github_spf13_pflag", + importpath = "github.com/spf13/pflag", + sum = "h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=", + version = "v1.0.3", + ) + + go_repository( + name = "com_github_stretchr_objx", + importpath = "github.com/stretchr/objx", + sum = "h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=", + version = "v0.2.0", + ) + go_repository( + name = "com_github_stretchr_testify", + importpath = "github.com/stretchr/testify", + sum = "h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=", + version = "v1.7.0", + ) + + go_repository( + name = "com_github_tidwall_pretty", + importpath = "github.com/tidwall/pretty", + sum = "h1:BP2bjP495BBPaBcS5rmqviTfrOkN5rO5ceKAMRZCRFc=", + version = "v0.0.0-20180105212114-65a9db5fad51", + ) + + go_repository( + name = "com_github_vividcortex_mysqlerr", + importpath = "github.com/VividCortex/mysqlerr", + sum = "h1:5pZ2TZA+YnzPgzBfiUWGqWmKDVNBdrkf9g+DNe1Tiq8=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_xanzy_go_gitlab", + importpath = "github.com/xanzy/go-gitlab", + sum = "h1:rWtwKTgEnXyNUGrOArN7yyc3THRkpYcKXIXia9abywQ=", + version = "v0.15.0", + ) + go_repository( + name = "com_github_xdg_scram", + importpath = "github.com/xdg/scram", + sum = "h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=", + version = "v0.0.0-20180814205039-7eeb5667e42c", + ) + go_repository( + name = "com_github_xdg_stringprep", + importpath = "github.com/xdg/stringprep", + sum = "h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=", + version = "v1.0.0", + ) + + go_repository( + name = "com_github_yuin_goldmark", + importpath = "github.com/yuin/goldmark", + sum = "h1:dPmz1Snjq0kmkz159iL7S6WzdahUTHnHB5M56WFVifs=", + version = "v1.3.5", + ) + go_repository( + name = "com_github_zenazn_goji", + importpath = "github.com/zenazn/goji", + sum = "h1:RSQQAbXGArQ0dIDEq+PI6WqN6if+5KHu6x2Cx/GXLTQ=", + version = "v0.9.0", + ) + go_repository( + name = "com_gitlab_nyarla_go_crypt", + importpath = "gitlab.com/nyarla/go-crypt", + sum = "h1:7gd+rd8P3bqcn/96gOZa3F5dpJr/vEiDQYlNb/y2uNs=", + version = "v0.0.0-20160106005555-d9a5dc2b789b", + ) + go_repository( + name = "com_google_cloud_go", + importpath = "cloud.google.com/go", + sum = "h1:YXtxp9ymmZjlGzxV7VrYQ8aaQuAgcqxSy6YhDX4I458=", + version = "v0.103.0", + ) + go_repository( + name = "com_google_cloud_go_alloydbconn", + importpath = "cloud.google.com/go/alloydbconn", + sum = "h1:p46IfIof8huGKXzQXKqI/11M36EAbrt7DjxMh3yNNvE=", + version = "v0.2.1", + ) + + go_repository( + name = "com_google_cloud_go_bigquery", + importpath = "cloud.google.com/go/bigquery", + sum = "h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA=", + version = "v1.8.0", + ) + go_repository( + name = "com_google_cloud_go_bigtable", + importpath = "cloud.google.com/go/bigtable", + sum = "h1:2DCxzxiuoWubL6J0yl4rUtFtIJAX566mcefQS3xy6us=", + version = "v1.0.0", + ) + go_repository( + name = "com_google_cloud_go_compute", + importpath = "cloud.google.com/go/compute", + sum = "h1:NLtR56/eKx9K1s2Tw/4hec2vsU1S3WeKRMj8HXbBo6E=", + version = "v1.8.0", + ) + + go_repository( + name = "com_google_cloud_go_datastore", + importpath = "cloud.google.com/go/datastore", + sum = "h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ=", + version = "v1.1.0", + ) + go_repository( + name = "com_google_cloud_go_iam", + importpath = "cloud.google.com/go/iam", + sum = "h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc=", + version = "v0.3.0", + ) + go_repository( + name = "com_google_cloud_go_kms", + importpath = "cloud.google.com/go/kms", + sum = "h1:iElbfoE61VeLhnZcGOltqL8HIly8Nhbe5t6JlH9GXjo=", + version = "v1.4.0", + ) + go_repository( + name = "com_google_cloud_go_monitoring", + importpath = "cloud.google.com/go/monitoring", + sum = "h1:+x5AA2mFkiHK/ySN6NWKbeKBV+Z/DN+h51kBzcW08zU=", + version = "v1.6.0", + ) + go_repository( + name = "com_google_cloud_go_profiler", + importpath = "cloud.google.com/go/profiler", + sum = "h1:R6y/xAeifaUXxd2x6w+jIwKxoKl8Cv5HJvcvASTPWJo=", + version = "v0.3.0", + ) + + go_repository( + name = "com_google_cloud_go_pubsub", + importpath = "cloud.google.com/go/pubsub", + sum = "h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU=", + version = "v1.3.1", + ) + + go_repository( + name = "com_google_cloud_go_spanner", + importpath = "cloud.google.com/go/spanner", + sum = "h1:84YpqQe7n7CG/VEq2OzkdXF2Mq1y+UJj62+wBSKX/mU=", + version = "v1.2.0", + ) + go_repository( + name = "com_google_cloud_go_storage", + importpath = "cloud.google.com/go/storage", + sum = "h1:D2Dn0PslpK7Z3B2AvuUHyIC762bDbGJdlmQlCBR71os=", + version = "v1.25.0", + ) + go_repository( + name = "com_google_cloud_go_trace", + importpath = "cloud.google.com/go/trace", + sum = "h1:oIaB4KahkIUOpLSAAjEJ8y2desbjY/x/RfP4O3KAtTI=", + version = "v1.2.0", + ) + + go_repository( + name = "com_shuralyov_dmitri_gpu_mtl", + importpath = "dmitri.shuralyov.com/gpu/mtl", + sum = "h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY=", + version = "v0.0.0-20190408044501-666a987793e9", + ) + go_repository( + name = "in_gopkg_alecthomas_kingpin_v2", + importpath = "gopkg.in/alecthomas/kingpin.v2", + sum = "h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=", + version = "v2.2.6", + ) + go_repository( + name = "in_gopkg_check_v1", + importpath = "gopkg.in/check.v1", + sum = "h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=", + version = "v1.0.0-20200227125254-8fa46927fb4f", + ) + go_repository( + name = "in_gopkg_errgo_v2", + importpath = "gopkg.in/errgo.v2", + sum = "h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=", + version = "v2.1.0", + ) + go_repository( + name = "in_gopkg_fsnotify_v1", + importpath = "gopkg.in/fsnotify.v1", + sum = "h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=", + version = "v1.4.7", + ) + + go_repository( + name = "in_gopkg_inconshreveable_log15_v2", + importpath = "gopkg.in/inconshreveable/log15.v2", + sum = "h1:RlWgLqCMMIYYEVcAR5MDsuHlVkaIPDAF+5Dehzg8L5A=", + version = "v2.0.0-20180818164646-67afb5ed74ec", + ) + go_repository( + name = "in_gopkg_inf_v0", + importpath = "gopkg.in/inf.v0", + sum = "h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=", + version = "v0.9.1", + ) + go_repository( + name = "in_gopkg_jcmturner_aescts_v1", + importpath = "gopkg.in/jcmturner/aescts.v1", + sum = "h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw=", + version = "v1.0.1", + ) + go_repository( + name = "in_gopkg_jcmturner_dnsutils_v1", + importpath = "gopkg.in/jcmturner/dnsutils.v1", + sum = "h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM=", + version = "v1.0.1", + ) + go_repository( + name = "in_gopkg_jcmturner_goidentity_v3", + importpath = "gopkg.in/jcmturner/goidentity.v3", + sum = "h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI=", + version = "v3.0.0", + ) + go_repository( + name = "in_gopkg_jcmturner_gokrb5_v7", + importpath = "gopkg.in/jcmturner/gokrb5.v7", + sum = "h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlIrg=", + version = "v7.5.0", + ) + go_repository( + name = "in_gopkg_jcmturner_rpc_v1", + importpath = "gopkg.in/jcmturner/rpc.v1", + sum = "h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU=", + version = "v1.1.0", + ) + go_repository( + name = "in_gopkg_square_go_jose_v2", + importpath = "gopkg.in/square/go-jose.v2", + sum = "h1:0kXPskUMGAXXWJlP05ktEMOV0vmzFQUWw6d+aZJQU8A=", + version = "v2.4.0", + ) + go_repository( + name = "in_gopkg_tomb_v1", + importpath = "gopkg.in/tomb.v1", + sum = "h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=", + version = "v1.0.0-20141024135613-dd632973f1e7", + ) + go_repository( + name = "in_gopkg_yaml_v2", + importpath = "gopkg.in/yaml.v2", + sum = "h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=", + version = "v2.3.0", + ) + go_repository( + name = "in_gopkg_yaml_v3", + importpath = "gopkg.in/yaml.v3", + sum = "h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=", + version = "v3.0.0-20210107192922-496545a6307b", + ) + + go_repository( + name = "io_opencensus_go", + importpath = "go.opencensus.io", + sum = "h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=", + version = "v0.23.0", + ) + + go_repository( + name = "io_opencensus_go_contrib_exporter_stackdriver", + importpath = "contrib.go.opencensus.io/exporter/stackdriver", + sum = "h1:HBofEuVSbRgTTAQmE8y9skbElwGgBs1ecH7fxJE7Nrg=", + version = "v0.8.0", + ) + go_repository( + name = "io_opentelemetry_go_proto_otlp", + importpath = "go.opentelemetry.io/proto/otlp", + sum = "h1:rwOQPCuKAKmwGKq2aVNnYIibI6wnV7EvzgfTCzcdGg8=", + version = "v0.7.0", + ) + + go_repository( + name = "io_rsc_binaryregexp", + importpath = "rsc.io/binaryregexp", + sum = "h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=", + version = "v0.2.0", + ) + + go_repository( + name = "io_rsc_quote_v3", + importpath = "rsc.io/quote/v3", + sum = "h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY=", + version = "v3.1.0", + ) + go_repository( + name = "io_rsc_sampler", + importpath = "rsc.io/sampler", + sum = "h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=", + version = "v1.3.0", + ) + go_repository( + name = "org_golang_google_api", + importpath = "google.golang.org/api", + sum = "h1:8JHk7q/+rJla+iRsWj9FQ9/wjv2M1SKtpKSdmLhxPT0=", + version = "v0.92.0", + ) + go_repository( + name = "org_golang_google_appengine", + importpath = "google.golang.org/appengine", + sum = "h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=", + version = "v1.6.7", + ) + go_repository( + name = "org_golang_google_genproto", + importpath = "google.golang.org/genproto", + sum = "h1:zZnTt15U44/Txe/9cN/tVbteBkPMiyXK48hPsKRmqj4=", + version = "v0.0.0-20220812140447-cec7f5303424", + ) + go_repository( + name = "org_golang_google_grpc", + build_file_proto_mode = "disable", + importpath = "google.golang.org/grpc", + sum = "h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w=", + version = "v1.48.0", + ) + go_repository( + name = "org_golang_google_grpc_cmd_protoc_gen_go_grpc", + importpath = "google.golang.org/grpc/cmd/protoc-gen-go-grpc", + sum = "h1:M1YKkFIboKNieVO5DLUEVzQfGwJD30Nv2jfUgzb5UcE=", + version = "v1.1.0", + ) + + go_repository( + name = "org_golang_google_protobuf", + importpath = "google.golang.org/protobuf", + sum = "h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=", + version = "v1.28.1", + ) + go_repository( + name = "org_golang_x_crypto", + importpath = "golang.org/x/crypto", + sum = "h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI=", + version = "v0.0.0-20210711020723-a769d52b0f97", + ) + go_repository( + name = "org_golang_x_exp", + importpath = "golang.org/x/exp", + sum = "h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y=", + version = "v0.0.0-20200224162631-6cc2880d07d6", + ) + go_repository( + name = "org_golang_x_image", + importpath = "golang.org/x/image", + sum = "h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4=", + version = "v0.0.0-20190802002840-cff245a6509b", + ) + go_repository( + name = "org_golang_x_lint", + importpath = "golang.org/x/lint", + sum = "h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=", + version = "v0.0.0-20210508222113-6edffad5e616", + ) + go_repository( + name = "org_golang_x_mobile", + importpath = "golang.org/x/mobile", + sum = "h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs=", + version = "v0.0.0-20190719004257-d2bd2a29d028", + ) + go_repository( + name = "org_golang_x_mod", + importpath = "golang.org/x/mod", + sum = "h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=", + version = "v0.4.2", + ) + go_repository( + name = "org_golang_x_net", + importpath = "golang.org/x/net", + sum = "h1:RDqmgfe7SvlMWoqC3xwQ2blLO3fcWcxMa3eBLRdRW7E=", + version = "v0.0.0-20220812174116-3211cb980234", + ) + go_repository( + name = "org_golang_x_oauth2", + importpath = "golang.org/x/oauth2", + sum = "h1:dtndE8FcEta75/4kHF3AbpuWzV6f1LjnLrM4pe2SZrw=", + version = "v0.0.0-20220808172628-8227340efae7", + ) + go_repository( + name = "org_golang_x_sync", + importpath = "golang.org/x/sync", + sum = "h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8=", + version = "v0.0.0-20220601150217-0de741cfad7f", + ) + go_repository( + name = "org_golang_x_sys", + importpath = "golang.org/x/sys", + sum = "h1:2QkjZIsXupsJbJIdSjjUOgWK3aEtzyuh2mPt3l/CkeU=", + version = "v0.0.0-20220811171246-fbc7d0a398ab", + ) + + go_repository( + name = "org_golang_x_term", + importpath = "golang.org/x/term", + sum = "h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=", + version = "v0.0.0-20210927222741-03fcf44c2211", + ) + go_repository( + name = "org_golang_x_text", + importpath = "golang.org/x/text", + sum = "h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=", + version = "v0.3.7", + ) + go_repository( + name = "org_golang_x_time", + importpath = "golang.org/x/time", + sum = "h1:ftMN5LMiBFjbzleLqtoBZk7KdJwhuybIU+FckUHgoyQ=", + version = "v0.0.0-20220722155302-e5dcc9cfc0b9", + ) + go_repository( + name = "org_golang_x_tools", + importpath = "golang.org/x/tools", + sum = "h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA=", + version = "v0.1.5", + ) + go_repository( + name = "org_golang_x_xerrors", + importpath = "golang.org/x/xerrors", + sum = "h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0=", + version = "v0.0.0-20220609144429-65e65417b02f", + ) + + go_repository( + name = "org_modernc_b", + importpath = "modernc.org/b", + sum = "h1:vpvqeyp17ddcQWF29Czawql4lDdABCDRbXRAS4+aF2o=", + version = "v1.0.0", + ) + + go_repository( + name = "org_modernc_db", + importpath = "modernc.org/db", + sum = "h1:2c6NdCfaLnshSvY7OU09cyAY0gYXUZj4lmg5ItHyucg=", + version = "v1.0.0", + ) + go_repository( + name = "org_modernc_file", + importpath = "modernc.org/file", + sum = "h1:9/PdvjVxd5+LcWUQIfapAWRGOkDLK90rloa8s/au06A=", + version = "v1.0.0", + ) + go_repository( + name = "org_modernc_fileutil", + importpath = "modernc.org/fileutil", + sum = "h1:Z1AFLZwl6BO8A5NldQg/xTSjGLetp+1Ubvl4alfGx8w=", + version = "v1.0.0", + ) + go_repository( + name = "org_modernc_golex", + importpath = "modernc.org/golex", + sum = "h1:wWpDlbK8ejRfSyi0frMyhilD3JBvtcx2AdGDnU+JtsE=", + version = "v1.0.0", + ) + + go_repository( + name = "org_modernc_internal", + importpath = "modernc.org/internal", + sum = "h1:XMDsFDcBDsibbBnHB2xzljZ+B1yrOVLEFkKL2u15Glw=", + version = "v1.0.0", + ) + go_repository( + name = "org_modernc_lldb", + importpath = "modernc.org/lldb", + sum = "h1:6vjDJxQEfhlOLwl4bhpwIz00uyFK4EmSYcbwqwbynsc=", + version = "v1.0.0", + ) + go_repository( + name = "org_modernc_mathutil", + importpath = "modernc.org/mathutil", + sum = "h1:93vKjrJopTPrtTNpZ8XIovER7iCIH1QU7wNbOQXC60I=", + version = "v1.0.0", + ) + + go_repository( + name = "org_modernc_ql", + importpath = "modernc.org/ql", + sum = "h1:bIQ/trWNVjQPlinI6jdOQsi195SIturGo3mp5hsDqVU=", + version = "v1.0.0", + ) + go_repository( + name = "org_modernc_sortutil", + importpath = "modernc.org/sortutil", + sum = "h1:oP3U4uM+NT/qBQcbg/K2iqAX0Nx7B1b6YZtq3Gk/PjM=", + version = "v1.1.0", + ) + go_repository( + name = "org_modernc_strutil", + importpath = "modernc.org/strutil", + sum = "h1:+1/yCzZxY2pZwwrsbH+4T7BQMoLQ9QiBshRC9eicYsc=", + version = "v1.1.0", + ) + + go_repository( + name = "org_modernc_zappy", + importpath = "modernc.org/zappy", + sum = "h1:dPVaP+3ueIUv4guk8PuZ2wiUGcJ1WUVvIheeSSTD0yk=", + version = "v1.0.0", + ) + go_repository( + name = "org_mongodb_go_mongo_driver", + importpath = "go.mongodb.org/mongo-driver", + sum = "h1:aeOqSrhl9eDRAap/3T5pCfMBEBxZ0vuXBP+RMtp2KX8=", + version = "v1.1.0", + ) + go_repository( + name = "org_uber_go_atomic", + importpath = "go.uber.org/atomic", + sum = "h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=", + version = "v1.6.0", + ) + go_repository( + name = "org_uber_go_multierr", + importpath = "go.uber.org/multierr", + sum = "h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=", + version = "v1.5.0", + ) + go_repository( + name = "org_uber_go_tools", + importpath = "go.uber.org/tools", + sum = "h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=", + version = "v0.0.0-20190618225709-2cfd321de3ee", + ) + + go_repository( + name = "org_uber_go_zap", + importpath = "go.uber.org/zap", + sum = "h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU=", + version = "v1.13.0", + ) + + go_repository( + name = "tools_gotest_v3", + importpath = "gotest.tools/v3", + sum = "h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E=", + version = "v3.0.2", + ) diff --git a/static-files/img/bucketeer-dashboard.png b/static-files/img/bucketeer-dashboard.png new file mode 100644 index 0000000000000000000000000000000000000000..ccac0f287788f0614e36f446106e4bab2c30ffbf GIT binary patch literal 223095 zcmX`SbzIZ$_dZT{cZ}FZgGjR>5@VE%t^tB{3BmvcRFH-dW0Z7vN`pw}2$fVq5yn&$ zQBW)t{5^TSKi}UUJa{|?kKOn4zR$VNb*^(gu@+{A%nSkyWMpK_#zsghGBTQAGBU~v zI+}}5;$Z{8FZy63hfp#yM$U^LaqgT)oD#fmR@Gp0nv*JG<)_N6IhsD;MPJSS#wo~0LwVKJBvSLIfwszR zk=@@y_)AY_8AQ5D6}k#0u9B_D$`&{|nR@q09hnA57!4I!g;$6Mmt^d=$4n@svuRot zc>O~1^4yd>JrK2q%p})(B<=hN0&U#6_Sk>0~~!^qQe?( z;>l)OkX#u@(yWRSJ(^%S4@pDd*x4nwgV9}(c=;O(et|^VyR)VEp*MsfJT!QpD-Cvh z#C$2zsugle8St9e|kOFT%j8_w-tyCzZ zQS9h&h7J}}ViXEAUw#06x{cNpPtW%AU3gckNWf@6twugk-Jf%DAtDk<;3;CzN* zEaUu!qQXwE>g9_R&4~p_?CZ>YI5&ym{dQU7-poJU5L9ns#j%|6X2C>0OE<>XT(;7h zdSY?HGF{86Rrr?R#r|ULcNCa7Su;7_v7=-6erIwjaJ$K?39ZC$jc|YMrkG(ogMM~Z zZNaMR(vci$)?h`b29MD1F^z)_J(r+`#scC?1S+;;W&aJTJZ|`)fFUzfHe~R4M6N76 zZ|Q@Hfjvpg?c~mEzWufLcQIS^%4x6?d!krOP_XetGbV&(1}!^Ck?h8o* zuDb|TPbpU?zD?F`zelJTbKvmm>A5@V`6Nv`{<}nwSY5V3V|bI8IU>1r`orZZ`qNFd zNQaCZSZiUe@ZHnT4VIFk6Qy;Y0pz9*lm6 z8(y=!27@Piv~at`PAH&MG!g6IcXzdnRV^M%KGl5nLz%qCKlS^}gP0_WlPSRo*5Op> zelmABcJOA*wKd=H$@MGp#BXBk)-D;!xFxOk+!g2Io$Uh<$w+U@wC0n&jB|;hQ(5W= zRUE`+=TWCbcXcO3l4Rk6h0?!~%So``Bk|!L@t{ZD_#^~`!K+Kb;|xtSTC4&|ThVpk z$7ux(*7PNklG{u8ZhR)@tB37#>~k|U+~xcRB#c#n!X zlK$Y-&$EQJt%4%-kiV;l^-#CCY`mW7Bze1m?(OdF=hl%UA=e-BhSb^HDa=D~ z0{6Ae8C)_h9@gr=hgHy2N*|{%!|e3l;0vRJ*-aIf4VlrR1Af_Ud)B@#;tE+#C1-?7 zjCs*TuuHLPuu8w7lo1tDGjzxd)jGZ8mDFdO);K-KvSoQtZ!Q(V_ojwHzzBH;c)b(7IKW*-> zW^J6UB*j249bV`-iBW7!7)@OhSleZ+U5k$O!qB`GTdJ3XiP}6%h)%b`5S4sW2U`jW zKj8iD#UBlqiOO7l1u3k%*qnxYI`_~O@ichW-R zjp7Ge>OS(W;QO2tvcY^w{Pp|Q6nVv$r{(p7exe`=pAS{~q#MJxEpELWu-mhcfxdN4 z$i{c9!D4?HQH7+)Jt9={%)3=Rw2DApf4<`nx_TyE%k4oQ?-rmcf}MUjag+-3;*jci}R|s5DHx!rao<^rt&T*XsYWZ*)+u+ypaRlaJz_i?-ED`!#O8S zP_o_i)YZ@YUf18eIdjvCJ}C4ZAZDnm%w$=drEKMBnc5_P{JlQpBnPf}mh1$sRVltp zOB}UM^fR~Q$dPX{_vQNmEgsalQH`Q-ie7G-1EQoziXEf^6 z^p*|R%61Wi%GZ_*sCa_;Dk2WcEFr$%pQglkTp#5Bfh7CaZ_qO|PY1V|UzTh}m@(u$ zlq1!6s{BOYcu%0tOd45)?iM_B&D$z_Emqaj}o+#fEdWLq$D46oVP zSk&9Mr+846$r=gqU_$6-)Odi^lK91gpom56XxeWr@#(T1pHHLYJ{)W>uP411A|{Ej z(-2K-vPfGusZw9supQ|?x88lYxKq4F&~GW}ns9IPEMetDL-CzS z$}X3kW32)Dt+hi2B~nB8Hnqzt1wH19H1fu{x=V(KTH3G4-YY5FwOukVD1vOo+x|p| z~h^dNCrqJDu0DKBbsLK-Y{mPGn{9@<=X9u zom}#_%#YNTa|J-Nn}4MlLy_UHu$z8XR5041L-d@M#mJ4yDqD&7tijTw|NpI!BE>2~ zLb=Ev4I7iQz@RD5T33iHrgGbp-gsB~H06^ksy6PZ4$Y_&EmHBd^JRy+>z7Ge*jo=Y zISs16Lawb^rE=MmY6em*UczbyQ=zpkz|vESbQD`n8?X^{2Rxvx&$f${EI$6XQj|!x%Pdd1 z)#TR+8#NAIQXc9|-B6VyqT|{) z^cNIlMECNMePD0tt~SDzT@tu)CoXCrAt`3K$QWTv6BZ)mbSy6hoU2@@AIAeRr=vf( zlszx4s{R55y?2=D;C4t5#qkfCe`y*zgw!ZB zfj>DBbVg%J57(P6p*xTx){u=o&KR74n3?SqbOMi+n7m1+ps8?=yxidqR7z8B$CHmm z4Qk|RZR{|}e+TSKNH-NLLo8%f=w|KLa9C|@vn06lw@Wxw|OvDagx=o9#=`%XHKOfP$qs?MirQ>J*8e}Dl)B1 z*^R88J`<$7zM^p+Kdn^}JH|lAL~g5M^ISCY;iRDG_q+V_Wsdg7AJZ0(A2JwAL1eTsoNsAsfAHtFF6!dG^o>4Z{^Lgfb|78-k*7pchhJuORUf%= z&%l1@^Z1oNeG9}`o_9ZcZ@-&M*@=~;kn_0u>iWpVwj4&?3Lw@;9J=hEv*){)XG zcA4$fq5XVK6QYR0Y(TSw`@=aFcRAc8{B808oRr2c-j@@;bEC@!QN9yLADX zKH0+hn|7B}=$BdzY88P0=qzjrYJTw#HS^SeayB5T&)P&p1` zd+X|m_UP%i9@r6$*W0Q4K^(&OFO83RyA$FgIFXB2N7j+q-n8-4(U*yTxI|TTMUt!V z)lYn1AoMHDtb!ypzH#Owp8Jc3?=r z2uu9E9zx5r4josz28i|5XZw#DN9NIk$qvHU;LEU-&2RZS=@_JE>kR zOh>CR#ttWxG2ZR9?YhoK|0X?f-l#&D7Wq5-N3XvQ(!*L6 zUWL!HEN3PJ8Q7878fM6o?DwRKXIK0|+Qvb5DW|)D&OoM)pE-CjV?q|AWTs2}?~2$3 zhqT^?Z&3yEBE7D^*L?{A1xO^RObI^SkOHe^)hx}6TKmz0V^;g-IrLv#jz{3uEZ$lY zjJr^|5_8f%oT|Z{`096vti+O*Op$0bVaZ3K5l<=hg#5=wk%I$07seB%EU*(Jn4DOW zZiR<^+`cji6*3`rQ4vwTSwg(mG2lm(1ZR_OCU4bLHGtufN|7 zIg8ESIai*!Ey@p@oF)p`ycABpH;_Kt0})Yg{WNVX{-~90t4TjFr(T-z_oP`w{K|H^ zN9L3~; zjVmGW>!G=VPVqv`XaYS8Idxci=8;?~PFBh32<^d58pMZ1|Q3GktxY z#T9ZQB*B`c2n%s9viid`r&Nir4*a`V=%PM%n?K0Kfq+$N<7P(S{z2J8t5WCj_b?FL z8x;>eU@Pd!St;riC;-{kVqvBy4flh{DdefFVnP{&VCyKRx?&@%WAB3Qch`wug~~zr z6Dt)7D%x*Ze6?5Mbx+v4@E-Di#LjD8xBaF2?J?_@7?7?dE$qF{|JNKersRl)W%cqk zCR+NJFK=i-N8UOTEZa&`Fp6KuRQ_TnNeC{~>68&lMaoR^B~mH(E0syl0qzKEKVN=e@CU<-F*Q3~g&n-x?V{3(sPUawQ3ciA8^qd^rqtoTrY49yaB4u6o!jKxY;Eq z#i}`3+Sq|`5QV4x7YV%!Q;~q`Z|JBKN>q_vRLEs+V zUn{4vSFI?_m7L2+fAYd^#S7@idr(x@UBNSPA`e}`hUL2Ph-W9M4BkMMC^t5c1fij5 zR@s-;8t_!`Mch$^T`Kh^H)H_T8jJOYqy!2x97mf~xGM2ueOd^;wo#4;=kn87I-W zSfGewIBrwMzwNLq?Nv&pYYF+0yE)u``J>rAHs41!L`WRvXjY0zT;-Y+a&uQSz48Ww z)6&~)7=0AuiB<9e)klm@?qz|$HZ1nO|AXT(U!7&CpNU5?|MiZ6vH}8%vp=5=?8XR; z`J*4Rtj!uTMN+d&kc5Ps>K$U1&*ID?VtS}(i?E}sFZ>Y1jOo8tDy1p=$+MRVbHlZTmWX?g9WyA=8@+Rcs?_eig70gv7fRUVH<=z2tR_dj{-;a==nQ}t z#%-)|AF7{3mWmiLvozd^5I379L@-@2he7D_#3dF_ei3EPvH0eTK&Pvj6f}*j_jR0$ zb$t-wq$-2JspUlatw;yyO(JlWa@pQFcR&t=rS|~pg0}5UZ}J>t(_~3ZNKi~7=wq-> z55NPa3Bmt;yXM}P*Jgz9B-#$9%&coiJO1=6Dx7hRtt%Sx?W;$tKUwXMz~Wt-N zC77APn@s4g;E0G}W@NOsSqKknx8lh2pJt#8UXU=8%Lo@UOO0JiQqF;C2GdODMQwGV z|F|LT9AQ1M-AB#Z@^D8-8C)$1O(3H zJ7#sQ*WLDGEOh(BGz;Qy70d0FQ$s!m{MVzD8Y=hiu}$*<9kGD*WR&Qyq-HjMf16z zmw>GrzDOT*DNu}>LUJ9!z$HhT%af>yOxK{Jo?yI~vN( z5Pa0_s+xY?=c}jao&Pk#)CWx8D6~8t?vAZ;fP5uluBWL_@O_o+aG1w9M9b@1%q@wN zew8~e>LTs|@=U;H?gMJJkg&9gy_XebRt9pUUn_oBP-Q?4LIJ%`%Uh422qErwRx(Z? zaHqeDAAk+7O_HMtm1MN${5%*XisSWTfNKF_=$(o%WddF|yh~Jen@pwG)~D< z*ZBtn$en>c+v?se<`02usNUO8x!v3kt~%s~e>3WCErK3C=?Gj~{^TkHh_NiK2F6#S zuhO_Noms7?%I8lS?-yqQ>2TJL%)Oio@G-%csTbXney=U(w%OCny2Hwdd}(S}@Ekhu z!Sx$o*)oMLdQ$}fQLPuss9V^>d-PW%0Zle@3=W2b`cRZ~nvJOYGPc0%7W}b@-wd1Lm%m10B(%Y} zIuxi$cd6J8(
7LclHP>c;1=Kieyb|&=FMQGG8)0#5M5fx;PF3u)_I6?zt(D-ib zDh0?>f~?f3{`-{Rlyl@Kk5I;@-0%bpg|ouoPu8(kPXLDF>sFX(dhWR2bGl@#SN*ci zpSift_FMhRodN-14-{@H10(eij$!@{$Bc_^_P+)R6x1v+RJy59Bq1JgOOJx3Na+QF za9f;qss92dP*TX5OCd+17ofbS_a51DV=zl4M?MMkeLC7WKUh750q&Q|PM^WnNR&@1 zRs69{E98kJ7(4azd;cx6HO2b;4=ujd2#pA5$%Z9Hp(+)C4G2nNSH;Qp{tF9puwhyX zWjj8H2c+PIo&>Xu{kQT0AJvS^GO#i^i_+f2!w9IGyfX?eAS8T%`~%r(yyk``bfhtZ zlWk(}S#V3>+;(3VbBwO8YRcHAFytWtG9I zunvSYP1KEP^UR0df;n~d3hZLO1y)xnrAlCMQC2`GC1>FZ5j*>Tpe!~!iPzKT2uT=R zIPH;Pgr71o0CKE8NN4`+#`Z06#`u_3oIZ%ih7|-tJx3os3MOYQY^7Q}S@hz24HLzTFKFtgRMQ{Rq@+7}Bbtc5$gNqkE{+qMUWLwBJj&;ARhuNz zIADh{5RIsr`)FA0;W+n!>Rcvl@RH`QX6IH*L21M$^E2`tBP!%kaNyK8#JTvC#UK>Q zcjDz5*Da{WH6LZz(1R4Js1`Qmd^b{;lhDaO^KPoJYAl8{@%b`PG2CuAUCIAuwT zG)D|1<=2vkJCN$SP$_2_&KN=evQG+xQNMXa=Z{8b723DiIkYzfbli_6bhQfkd70W= zDtO{|t-W3B_G)-Mnwl|W@MMk}u>H1Hk&yKR0nN(RT#ETGGwie4Q){r^`SNN1k_t+t zK>?qA!%w?~;&TreWJ5waTZ=mKU{MkJxT`P=tD|e6@fwiwg^Vpz6|aY6eW$nq(hOE& zin|iW`So#ZOSaG=chE=X-M%WL_xXj#V|aW{-dDi~+PHUNp2Q+aup}#lrD6(hy$7)c zCj+7S^bL?|cleV5St9%GuwKu|&8hzglFto-qWlE_t;fZSW#lbl&cr0A3NA3fhMk`; z;=8HLQN^r+;(D~qtKJAGKHPg6OVK)goKwpU7xOS)8>woWCST?hp<=QqU0;(X@r7z1 z8UQUoWS8y@p#4sw4OO22gu(J96@Ad?RP1I~_Dh&9jZkNn> z48o#&F@Z67`&wP|T~RVE3Xr=;;^EZ~7{WE*V1T(8oi^3THQrL3SIgfqt*HoqT~WfT z`$9T;grz);FcPE_RO%})1zaS+Gz)@OYua%EQny=F5=tsAD~G$IKVK0$ebNxMQS5(F zarw$j!7~FFi08qc?U(6? zuhCkwc!|+04O88}srw0X+}+9GM-P`!n#ba+Rd@vC`PP0s17f&4Xdh`3LJeVrXN$3g zEML#yG{`OvaLK5C?z;&@oXXgEs4{zSaE&W_vMA5q6hOKx^mc*Aeo>T2YeIgSp&*6E zFy~S(FtKlfs5WO^-!dnv={rvvMOGysn21sB6$S)PU2ufuIu|Cz^p|Cmz~vKGe#&>E z$6?WQiJNw&FCKnagF#1XPy!IMMRI^oBt=gO-{RWPw0UFe1OppB_b-u4g$ZQ87i2MW z2hcvHHsT_fbJ;%qV)^NK5KN%9ghyiVg~BrG-^aJ=Hf{xR=`-PUJO7%>#zD*_l53zD ze&A$_D-U4{eAx2+myv@Mg`6Q`QQ#Hcuq)$RO@-?72wb$yx0Hc@Kq{5%Ue>>W_!%xx z!6blgY>db50K_lrRE4r;2WOfljwio9p>I4Q_fEuFj|I5IqLlhbi!_ z7XygRC>(-`YTulTW~9layx#HsYbk04Iw1s0o)QOVNK4C(J3Flo4Zz0y*K;}bBeik5 z+N+5m8C?bshq2iyK(nj*a#|xw^4vdzHKtvJxWvw=7?5A?7xM)E>3>>mOP7|%hj+G% z7#_1+N#qP^qg)Wv{V_eXNAbOGEPR_Ob64Y=k;!Dm>iY&pIM{3-P~!AWBCDw^3cu8$ zdvk#{UGmWi!6wvBj(q@vv3$DY`H<@ELIWVb!CdXuN(v!`E+8>%s4z+~Ivj zOC?dEx%uP1;3+typEhA=2?idg>zOl5FMU#1ykJc-0370I}%k?;Vyv3rwGT& zs&)Zdx-VS@#Ozm6*o%7Nq#6~h!{MfR{kq7k0^8*a(}kMR5irzJPvoH<ZKKO8Ey9Bx!w-q^O*Ex=mQ0!a(KI<*dutKmf%wv7 zSHFM{DL+?n-w*m?GY1;;%4Zs;~3|Hh@4;2*2j;Yjv;WuVb8d0lz#70|EGcm|_Cl&X1Q!jlN%h&{;qkCp*eGhntzne>(^oBKl22pb(^1HcV( z0d73TM>xR;N=)K!4_Ed*R7exdqCO^O38~|U@IK$xO|XZhY>-XRs`4e`eEE)wNLzUoJ)Hc9UY~7Fcj?_y3Sjt|orLE+@d8xJ zrnA=q0$=Uabd3mrSoJStJhvPUmhB(S6@tkiDNG0Q7)@l0RGYYy=J-oWd z5>n4C3?a)k%edQKAZxXo(KY%B|2CwHz~-g+H#s$Z+&WX7j{qbQL_Q)D7SjV4Dty;+ zSNvwPWr#dWe^9GdY*>XY4j#u0{6iId{+Ee7a$WdeA{WG1Rs21#Ig(BejosztYhn==R)*ciHs5mn8#YSGp(%ui)HE z$yZleJ>*C>U&7t@FKez4wff$V=Xg$`IF1dbhi&d!?-L4s`E$uOOzMPgyj_T7f8eL{ zqGI+=mfb6C&+W@6nKf9|->$`jt$!&J=4h7+VJ)GyBX9rb4*)#hQlWIM0Cl z#4y`wrK2_K=Eg z)TCRemVA@$zq7I5BLftJe&>moEMjrVCwB7hd{zIKr3gy`c|s6R-YfG+RWphIL|)*D zvyNmy5V6vG-T;;+8F|#2vb~aR&5}KTvQG6+uCTcMcp9mUqsFiXAyaw(oK;_T6p|XDMHR0EpE>R0;C^Z0I z&c8Qs-VpSpG#Mzt2q1fiIM@Z*n`n=*Mt>FC!G~sUh3;00ZvW~;{YqSX`L11I!^|yY zORDL%iMuM!0BqP78tIggv#%3hx6HG-PyjCfjkfb^MSmLum~KzwEt|Kotd}Cus#W0U zz`Vx5Zbw}FsRdyA$$f2ZOqfl-;Mh=;T}n3QG#J%|l|#&UIdUOV_s?DRWU8dCQRK5@33z9)T;n*AJu1_mOK~+o&Q^(Eu9@` z?pKoEL|LN&;r2*OZ&6&a|0-`;1F&?=-FfB52gY(T<{Q9WkFl;aT&-wZYdhD!Oe3o^ z)g7?OwVCswS3XY;%9-$(I|P`uXOO^*fJEdweWFj3r={=pRr~*8Gon_0LTf$)pQFo8 zThVnB^-$_QS(O`mG8u&;P(G!F&-1v?pX+5u43A`tU0s%e?tZpciREMh+mm*CuSYZh z+M=f!K~at9~|?yqpNvz|k>~W-$eP!0mIfel(X0&g|BEWHmJQ zPv>0?Vv_lGKX3)@5<5S(BlYwL`6X7x{UNyXx1f&>mQCgv!=7%cW-sf!oBbFRq`MK)pg`%4yb)4lQX2$L6H0al7>H0LRcNwao;T zB}oaFRW12GDJ)14o{kZbBl&8nG2a{LTI;ZiL6ldjdFw>b5Y>cnXposa;mHmAwOFCH!1?GwXF6|nCo%8iPZGh5QFA4<4(#0*1{}-7o6!FM^Y|nr8 zara9Wjjcux`ft1!yfvR_U^Ix;C{U>UEFooBM+J(#Ka29@dp7gV6Y2o2ziISIIR|K^ zS!J4O1Aqg#(6*)c0GsNP7FgLY47K|$nIG)K_xq2sJA$S506`DfKF->;06Xkc&yD(a z>P+BHxad>NXNP&2awzC&X(b~$k~nZ*>fg37zK(7%LnzuAiEWb=9>`UGmdVwFITS_7 z`d@ZgI;lG-{J0Layu-O0a`&H~WJZ6z$}Kt{+5O_ZfAK_g!&tpd>O0-xz)imAIee?G zVVWR+>Qx7`50D~X8^9Fk!(jToaSA1{XNup8O~g<{ucW>IBl!#n+a}r-jM>2}@b!E> zDk>*nujftD@@dH$==GO&>YjqLp`vRhetTTKJJJ)FObJmfz$HGbt)#x(yX(@BUofY_ z{u%%B6TT!6xRK=XvEyKU%6hUNXFXJ3FnnVl{2lHhp|w zdu6&)1lyT&;TPvLv2eWVIuEN|g9&6b0fvZ-v#Lx)>ooa(OK^23V4Yio>889MiPXIy z>I@$SPAc!|JQC<{+rG~U;@Z;%E{{({Ca1k70hG7KTvws-vI(+onT8Qb zMX>A!rZ%L5qmg~KCLUG;7QkJeBkz9I76R9spG0kU#z9GfjPUATam}9wz+lOh|0GSR zM7c1{!92%QfEp1}^Kw1f;2xjfhA-?6!-2dvXSo zPtuctPxw@1Tq`F1+-nf*FkQc5OTn7{Ym-AbE(r__IDLm9!@nq+NYOSAY!ym2!igu9 z)z4Sz@QJt?Uo`C9(g_jpJQw`UN(5T{iK`YYvkbZP>V>@s{#!GN;#PylA`0jA*9|7I zVXA)c!7P#PrQ^|Re_iPohW6YJU`X9*zE+2=dVtLlOfj^y+oLP2w28jfjNuT-(6D)X z25Xv4FCeYKsDN^ie*}mTueDU}9vip^K%Mi+*<7-ehpN&GZTVfWltx!|Y+wuy`w_rI z`x*?uVTa^7>)_7dFKII)wnf0e8{s)&S~p~oxP__wU_Wv|{=Kf>F=yKPu$TU}{X6=8 zxxA}Dq50@0@imN8falCJ{-~QaW`_!V_Q5LSaU<5J=`FPqlq5E50vuZBa05`8uX!); z$B(lzFLH&Djq+OsC)`(dj!~hk(Ir^kxlxT*?rMtr!0n~_U^%JJCapf0nkQbrBn+Sp zvugDATTBOJVmsTsnbQn8_ji)9(4fQhJCuarze!WY2J?bLmpSBkr?6J9eK|wITD?2T zso5nfnXQg%#7HT865NAqAqo;+i1kJI2UN#>Hw%ssWcg{PZW`XBfyMZ{HBZPwHYJ$$ zXyDA4ldU1>sCk|Hj&wYmD^^h;h@_fw>eL2yw(+u3ROqSq)b?%o?I8re(o ztLKSYD2jj{k2a7h(^dL|$k%}@3GXg53_>KAZGo}zi*#-&I=F=7XVaV?_ZxZJ1tjII zB}t(&Bv;Vf5PpyP{F(>?f-@se2S{^=UuCU70n!CAvl!;*!khxB`^#PoEQuAFOelHW z!;N|d1oJ%x`<)2%XotCUAT3{A`{I)(IS??Wk9p2%6?{cr&gILS^*KS~np2Ur{Cl)) zAN-d?JR4qF4m0rT^$b#cm}T+u+qStEb?^1R5IrMHj=tD)(E(N^l0%)uztN4Y8@YYJOFg`a>$tPQrIlQpLv2)~DZ?Pdt9>-Sseh4y8Rpg1Rip zmT&WqFUHgWF<)-#8W9~N_;f(_fSOsX&4=KkUR)JS4aB|?Z2Q93EOC$IMa}Ya&`Zw7 z&Z2*|FH6p~N+@({U4NJUp8?}O!Nq{IBOSr*E)23&Gf)F{esAm20h+?Fj>iI$on;*8 zTH)(ihXIIX%W=lDcs2n$0jo=nQb12%;FMAAT3}C-+LZK^$iVUE`Xm9b_`qNB+1TwR zMDZl$Xd1VRd{vh)qel-_CWWZds&@LKx zG_(&w41WdZSOD<9$}7~2iP=9F@K_IxrF>F+q%_P%t8?n~)2iMgkvra=xE>EQjVH5v z)?}{^48S+sP@=%MPC6BA*=;YScEly`>+qYQZt@Sax=6xznQ~egh0LW zl-x$>DUgrBZaPsLwgjl1;*`v;7zktzM)m9l@WK2!}SbG-{UJ?fTsBM z?HcSOKv~$ru2w)F3;4JXByb-z$-q|-#LCKXi-z&kF@Kw)_Pd{<&++4nu(`!vfGTfD={S;Gy|#P00(=#^ocVWz+XKPYyTU)L z4zeP=?!-#jxLGH8Fvn1XB&uPR?qP0ybWNhwkAAF6(+9N~Cu>D1c|wbaAw&yT}KnGS3LCOA{Gcb)xk$Brz1n zr(^>4$B;7GpWQp}j3%Xcczy_n^YJ!$j!@!1z}X54iq1jK^q! z*Csl#2e<9tSUefZiw-G3C{jJAb#%^9FaV2H!Elw2yDL~bIk>brAh>7ruiB&qwwjEw z>I0%Lk|^{oIBPn23l?V@IRtnh&pry%b_X^;^a0oVHe;5H{$_rUPb@*!aEpR1<*CZ? z&@b@?_!V@R<&FIY5|Wb#6DN(*o!SX5hy^Y=ba9}(UOT{xKAQ2%_DxfPcswP*DvWNu z$*dLx3KU1K4|@WL@pDa_g>zB`pl?^cx&2__%gyQ)iQwuQ!T(!dfd@x1QwW@e8>!;9 z{=7mucvF670IaSd+`6^3FeqZUJ8s?F9M;s9hP;lly|E7j7g&t6o{%1~lKdhV{u$Tw zz(#F|Rm#wDVlWJr(w#Fl!9JY;;!j?UqF0j5BLLkvM+Sn08Ojk!V|q?2%y)dd2?hn2 z=+kCARBkTO6>uRvrvxs@t92zwzr>ZndNEhpw*+v@@i~z&c=`dC%J4j3TViFj-S>YA zjj3yNYiI!$7L^jl?tyDwRt}2t@mPbWzU3q{&b;Jp)l`^T0w{#|RN=oMdQM6Hww?WM zpaxInohs{vw?Fq%)^j5T#QpxOOqEK%MNU_fIL~i4ooI60Zb?Dl=yR|5JQF24(w zRgKXvQ2_Q6@hb{;7sDbH%!m$1Vv6%IcH~86lUnQ9n{(a50h4o*ahkWF@64;vi1>dQ z_Yp~$e@IUL@tD9?323^PyYyiFo0{j{qsd6ilQqFV`a;iFL`IQ3(yJoWB1skk&#-x3 zIx#l}oK;=IARTLPmh?B*j09@KTuieqPS9%@rO#!i)_=PTjH^p&-|~l!OD+o~8$}#@ z43PQJ5X$=+F6*5kH==HoU&@P7EtW_>=xoex4~S%$ia~H$F;!PeS(E^NN4+wN?=t~9 zpV-CRKP#Ya1rJ34ZwB2}%C~!z>sYu8xP!(QnOlEw!)HgT^!`=aIGOvd_3pW z$?78B-)^Xavlt|lxak!kON^9P<#|$2&wlcUN}40gMh$^sBV9!(lTnmY4;^MgQS44S z9!aP)&_Q*lz;Y8xMkyOvLZ1;b*Y&>1kmz1|ktOxoI%%xx*6SY<^_1m^>DDQT)GTZ% zDCMFz5TEctkTwai`1!oRFh{_#!tm2XC9J}yl&>I>pzv9$&!MUe4)9cG{fUJFV=a4C5) zaCiU!{6`E%xjeupBZ_P@5~6xS{P#q3uNMwVpe>f>GX3|$j|1_{SHLl2PWKuDH3Nsks4v#G=7Nbv3Bt2{cEM&|@IQNRsJp6My z>Fi*`EZ{XpD(2>vTz10vwf;oO*Ypfll7}Hmv8(jwz#YY}I*YaUR{92_Tw}MvsTMv zjI}R)nK5SI1{+2!zpfM~CH%T1q+HtE>PcVp9SGp`T<#^ilG~-R|Kn8^T}+En++YnxR-k@my+HCMVJwh)lxuD zP8}B3h}mD;d1nTVt|V;FiE>RHoZ)m&Bh^jiS(oG5AXb6_N@!`ZDZ z+&@Y)gT%s{+ux5+ps197>?4Q%NW7E&QIn3^x8M%3d9SUsnA~k}#WO@gMI>XbmCJtu zV)iC%XAozmw76~aQS)mQA10DP*udw6U8ip%_0VvmluJg(=#e|vQ1HxlaFzoci}&Cg zgE`WxGQw5pI72QJ0S()dERm{YL1-&5+?e}Kht7b0JYTMiqlf9Zvr97O_g*6n#~dHY zlz3hY&-4)nLThL53JRE7y9S>_)^&qhaZ?r6S8Jw>bvJl`^N z=km`?t{@dr!T`v+QWRz~kk9G|@*0P7&Qtynw=71! z%5k9Wy^se^7yl@D{+Qj3abvj4+c~)#qgaF)`aGh;k?wWv|KsW`yrTS~y-&l?!_Xlx zbchVyG1LGaUk{&$ngJXQlVY_))Tvl(acA`dfB0#XG%+8sbuFsy*GO84~rbU}r4p zr{Q_2$`+^cFahHxDY3kIdoD<>a!mb(4J#&H zb@9xu3>YFo9@9-+4jMc*$4M zoi0BoMl#hjr*S|8@4a2O?m{+b;LF#~bft~qx$I+!PLlzJia%}5-e(c~wG?_rh7<=A)m`zJj!>O|Nj zg@vTiR921V!!Zf-G2y0*F^`&n|-zHO8UJ|1H+G!l1Cfa^?YK6LI}C z?YUT7l60YAscsO;cd9`a2^;E_GU?1+I4cLT&HW6w$!L-tnbHw%2=dM~3{(UttZ9i=~^B{7R17wijE&|V& z5S=Fq5Tgv_ON<)iN<%bW-_$^UY~)4p`BYE-;0splGkHJ^Cnz{?vFitWo$E5asx#=6o7OmxQMjt@OG2x7hg|Cv ze~NqNd9e$Y)AI%A??;Tc9tnTH8)a#IqAro$lkj&YpSt&X`rF^+H%xqVN{Z|2&Qf{G zRS!#qr+u zONDrEwGmQ${5^$QW8jZ1b=*7fhsHE%z^^;kZU)7WOAn(DFvZ_M^O{uua|M63<*?0D zgsy$u!SN8L4X2~*6Y@um27h9t0~~Z#_69s|O(ULg=9b^{$#AVV4u$Q5!YRE|3;6DD3smH!`C$N^o_dU|)`VuL*UTQ|DkCkIo2-K&rgp$>--GRa|*b`ddB zf~Y=-UlgSkDQYwJS#TSz4O39gotnE`>k+dTQTw@~T!|yMf2w($!411?d7W;}eIXtm?%p_0AW5lMPuIa4_h<-lUCunl&*SCrZaau6}*7s8(MbH7N zn2SFeLn;lNq`W!~^4}kLcxSB{s^;j)XGYY-#Hor&kgW>&s>4j(N_OXf7*p8p8wsn^ z3S=4yXj9amq;Sp<-#ozcy%T{r&aK6Wn)T>!WHD8expdfMT~V>f%sSF-fW>=lCxLkXK0sRMUeaKqx|m~pFOgA z?pq7ALayAy7HI-zXndo~xzFRcD*!BdE}a0db43NeD=f=$#klgC`_5I>xU1M&q3>6(TQkR+W8)mT8>>P~E!4_k zK&s9J^kG)Qnfa?aHMnts4#6HPH{n`698Zl+^+Hy%!*?F2hXWr;d7G0DbN_1|ej5`C zB-`3)h1FKK#-#!8gneIce0Xq)mIQQ_z}EAJoctv*@T+}w;?xl9ISw)3=(iE?!r!-u zwxok5eucl!?ve!p{oRU+bwtj%OSjpL0s{t`@WL?#(xK;V{l*G1dR=`7Rrkzy(4md6r9Hs({8aATVYW$O=B$9nyAv2 zF`WGQ8ID+EOplMiKl(yg^r^ipn$U575$RSX>3I%4xs;fdB2SdJ%G>ySuXUEI*fwy}{UF>o;^D zH+u{$IFnl~a=rdMI7eT)_}%e;?n}IhVlP=nG2LmI7L#Ve`J5weu6h|)&X;N@DU~l> zEQ$EjBDV~BltiQ1gx)XT0UD-}DUamm)xQ#6tJ2cM7Y&c#FpaH?`C@x0UPxVfz$*6v za!A&8sL^&PuzEk0TmN4D`uppquX9~hyd^>3t?Z(05)9g0=K|6dT`-s2H9kblB{O$5 zM|E0SPc^ZUJCRlV+b%zgSxvB}$)CL;+!zWbKAIX%=~izn=j!g#Ln^YI8? z;6MWR_ysN-6Xp6#4Ul{f)#&$!4Zv&$E@|Dk`7^!^?ymYfj2aC70apC}DYVL5u$|j; z`RXp^db{|PL18WmKPb7AsDt_oPh=3;ny+u~ehEXI$G(AGNRY(q^rX^?sA}Io zEgiL!a(Y_Oh>4IlBZ_qjoxs*WW>MAw7x7M-+S>9^h=4xF$ z|L%GZs!T@Z?>(v6=b!|L3<;PGqx*<&6^_1A->Uvz70B8{+9HLfk@3%v_ht0rGImpo z7QgcAt|lRMbFNNKbla2(GT*|fcw)nzi0xbDqN!CGt0XcPsm*jVyv>_b}qyALp?yE=R#MO(dYR2Y~6OJ(b(s-A! zchkD78WvAxRKnXPEcRHR!@2s`^GwXCcvB4uQj)Dd#_c~m;F9e%L3|uOwin7F&CO4CkjSS#Zh%LD-yT)ud)h@Rm303`0O;2(%Cc zWaEd)^<-L?zCaK1%3!)RWAY-iWJm>^ z8}CP(_x1-t*OF-X5O4Hv^>aVr=z9P)Vxw-&L{~W9N}qqzhlV%^*;!8lNRV#o*8T|S z$$jbmiGI!(Bi+|#pD7E={ZarAN!1R~t_puFp$Ah3ho*Bx&cgL;JR# zL}~zUP_b=}A#DH@AW~Ra{NUN6-YWfj6H*2Kb=a|oUm;X`?F#5;xDRQ!Vkq!i5(f9; z{J1_n1PDrMr4x$ZJ#Z>_scHOta=~=n!!tXg2|5}9D2kM8&V0=m%9|fOkS`MtY^kR zWBm`|M?}@WSw*t?NxyLg4Y8bQXgAdkTR~tBBKcJ#nJiJsG;qFrf6?C|b^8BqVNYWW z(~Gy)ob^4F4%<#}#J{?quIfGpB!lxjkc^JNIct1sr_IobQ9y1w1gJo3Xl~+_jw;19 zjF8hy#fT(-QvW?_33pi_Ws|p!oxfkDov8_S?JHYj`J;1VEgZ$;e%%t8?i2b{paM_z zd(rPBv1v~|jP_hT?LOk+FV+MZKn41JW}KHzx9M#L^e)MPKlk2j!jQyOD~FBrV>IMj?>%~${7T5Cgp~g$)ObP-%i2pz7c`9V|ZFq54A6ub55>G|HC2 z3K`$b4eB`=vu#l#d2u@6XuwQs>smsxZ?R6OBkWng>49DbfdxIx4>g~N3V_WgwHwwk ziXG^dEoqK0KNYviizoP|_AAtWma*@+Q;)RyZkPzStn$I8{-GIClOM^)U({$O?IX?= z?ZXOUTUVt|IZD*k&BXp>W>6RU&aU1&XPJ*})*F^GN^;HomS_tD)bE^$3hN8zamCwt z{BGCOqyH}Q{1+wSoFnZCT$}L(O<2-mrL7opE|UJAp}_%Gbsj?W5P%wlg8(sMgNI)13s@dK_OWbfu;es-N9r*cZT?NPSdyj;-Hzb zX@2MI_enoCvF#bzz=Y~^E8gOVAl*CSA6NK-6k6pV(&z%G9%oMN(9TKA&Z^ueD$BDp{n8@h>;2~RV-V`7x1uzufkFO+*T|2Y7*JlhN6 zF0=mh>r@2f)bBV>rMaJFi4UAHS_=C6=c-?Ic+tWOu9dZ?@qfUW{70Q*2kEd+mJB6z zPq-c#AP*nEa7MI`XR2+@V3u-6CH%b{s}4rbJy*9P zv7^_oYb|glg;?1B5YwMHc%J52zm2E0#hNAU01IsjCLcE4l?`tHXz4|Q_tDZj3tC{< zy+{7~!Hc8nL~=EVM>zt)X%VFthLZRl9*lRQ$H$7|zwL-`5N-~uFuUAJ3 zx$`>SkMsFHYnS+$Uc!qf*;JV-cXdkWZwlL(r_@737l za&W7&8V`wXXB-7KNI%Hn;ndmyoEV8g7X%XUH+3aOx?=wh!!KUoEaVuBpF4nS{kGpO zcE?#yb953kQu}q!pNwQU$n*Xo$lag`baHg1p|P5jE4w_Bq+NbvOVD93zdDJcGHBN@ zuM>~f#G@qp8qts}MYEit&7F^$iR&=+x%Q+O*1_u210siY$0@@B668qWM9*({iUb)1 zHgjJ@7BxnfI6+?p)t=?oWu;Nd6hd*v2bG%C0~4lx`E>S|H!`EwqV5lWl8xuFk`<|ONLNXXO&Ih#QB%U5^q>`_=L zrpLyq_HUVHUk}@D}nvhnhV9A<_l$m>Fm30`K`nTt^_3#~xi}MiN)ah#hii3_A z&?gd|nHGK|kt=CyPtLhg&CR6tN6^JoW^6f!_smxUAm0Rk1E7SFXcuG*4*1VNG>RdNb zJ17lf{0Kt2ALPXr@J`Bn7Pb4-A%)qSz#1>lK({Ro4qiNE`c$F5`>V5D6vhk;7S?## z@sFrR@W}mE1`u;Yo3mr^d&cA~u?E{oj##Gp02DVVeE|8>U7DeWJKG|1$sF65aMl04 zlRZ{n6`tK*A_Q%@23a}`50+RYc(RPkTB$NLL(^b7dOgCBhgbZ}SrL%dNf5l+r zM+^a*i(g%?evZVfx_y19b9qHd_iT#F8$@*bc&U?7Fh<9@dzSh>3dWN#eLw9=ellN! z;af+#=QXD{!oXd#aex{4*C^ADP%D@=r|8G8A1?Dcr90TYTN9?j7 zE4?47KJG73H9EKj5NAI;Y+@1}bE1GC#$psYh`Dfs{Y|W~Oyj1Sib+z>#E4{YShF`% zm0zS(&|1u=08!iw{rxL`I<6Y3p_=((_p#yO+g_pur_AcZ^=MKkK)6sF-~*hI>H3VD z8V%jmjSP?pV?^ZCt~-@5`D4KJt|E}YQEJfnlOjshfp1|%aT4ZaS7m;cOw|lVE{gq( z)G14hUQ+xn3Q%2IB|vekgO)5Nrt>wl)DL9KO}xMXrkQ@xNg?7XfGWV*#ELBk>HT4` zuTuYvyIEI^cN=p{<#yj+2Sdce$;S-_Rz?)!#ZV%=J@xe%NM>+;H67iaLyZnhKBY+F z-`s8x9f~0=j#m4sUWgmg=gi0}M9tdFziL3WH>7Q)n_(8E`Fl!S7ivat|7hC3S#XZv z%j*0H&6faVMa~eh9Xt((AU*?JH3-fWwE;B8!;PLa0RgHyP|CksgJn8{r#aHtRx$#D zBCaPMtt@AR6-#@c{wu4f1)GQ|n&{p@(-~irS^y_a^R77n%+BUxU004$W6~hldM^KF zMYYb*+D?0lFFT);R)p6z%U0UZQ+n>GHa_#vIYvS!FdQ5uh(R+TJJv-er51PvehwC=#LC(2UO*!YxcrdaE}2IS!p4G$MrnhBbQ?V8#eL z<=8Yma2sKCeRZ-|IOjOf%7|T=vqGx+q(x{tHH>=P+b?p2?yVa<^1Q1uOv0dT_w~Z= z{OVf{lqqX`pg z$?kV_Uv*4+sn!hxe7I8}flaGq8n-Ink32u64^RKDL;ouVH$u$hG^AA7 zVCXS}Sdh5t;a~6Z=tX}wDvK1|ho4h~NHpWp(GbfqT7sr++&pox#@5bDMTye(niBG8 zpm{abXoi1(|1=xRQA>umWvT$V0f5aj_x#ZwMD8>*CkebAPZQ!}$}9#>>Ah z#0p#C_hmy3j)fWGjC{VUy@iiBp{Df@sQwO^CJ->U4gKTNsnLdwC!gVtphwBlj>|ll z8!Xp9K|i?|8($E4x&Y+0plD43N|s;N1Io8G#46Fn4|3UetwLH*XqpErAV(pbEK zU-_Gdr=KUuUp;1YxgpNo13GJ1ls4@%@23fft{YzC=rjAGbF%|q#kBZsAv02^ir2$r~1_=68iU&#+Eiw)Zuka2U_fP>;Z6Y2$(PWo*TiK*K_S8 zboP>lJHpTS%P_B;orh{}Z@IL~ca36#Xa4GGkkQxZnG{uA3)d7wOpZ*7TrM*~bKfYw zLO}k=DLv?Q!wj0#nL8~>GtlBCvc?booA7a&RGRbUC&UoUZn#6$$2D3Zi#H0lV36jj zcE*jS)+DKjlU&j4`9atKCTeI3tc-`BWDJASXf7c;PGeF>m2B^LDCt2^qkm`b{jJK0T=DF_|oSI@H1{r=8QOq4J%uXnO^lmv6?+=TCzDFjzAtG4Bo|(;c0W1%NV0qQ?0NxB<=JDWED$!XY->yKGvCW;v3^k*=M5t~FmfmcG1^2w43RQzwQPO}2K?e(4 zNeWW65e2%KqBw090#@?)un(iw*_6P=+QH7WU!iuu5Iedbte*teKfoop`oySx9GYm)fXRx^fOJ2%jvn=hvHWrYNTN;# zi&p^z7>^DfWyWe?Gi4I*=5`H-fRiW+VD5tY4_Wp~=JG|wl$E?Z{HmPF4e7ma(&c3! zv3qZ1$lFWW-zm{T~9eleOb;^s^A03pnqzBpqdbyp(f5AM)u^aVP>2TD}YNsJ?12nA*#9U=coduBcKtGXWvY>qx zcc%ItcD~a>g!?aWX%OFsw!dw=Y5_3c)X^!!`8%?|fqDPNmJZ$9yPi)SAiub1^)LdA zR`t?AX@3ej?3~G;mk4VizEl z4WrEJi*zFP6#~aZD^`h5!wN&qAY4MxfgUK-& zgvG+DA*}9MnWGI7ey-eNHJ<5o-#1Jy(nl4TUhSe^)$G)0Kw~c{R2aC)%1eN z6_`|m_Fy{|=>t+OD<+Tf4~s#vIAdnk)r?$+-k5g&CBfnqTJ;oR+C0MIMrs&pR^AAv zD$`Sz;>siyK1~`iLCmgnRG~S?aXLL&~eA!MhNWt@JU`!f=nnR0L|+$GJ}0jG;}vv z6M1Wo@{Sd!7Vhdv;F3s7NrzVcjfBN=MOldm#ELZ(GhI%ZnKo6j$fSdy611?yh6>I8VSNUsj4fLh$SJ z=$&^SA)2EaoS5mmwy8va=Xg=H`q*P6iB(tMi-bRIb`~~mA9+)eIkKyCGHEOU_qCC8 zprnVDuP)czIg8(wRS<%SX+DzMnixmXgp?oDS<$@V6N>btf6^B8^q%%Y`)*`(U(GDV zmQ)Axg2#j*j>-Mnt`><0S1;5pI|X~H40B~z3y2~7oQx(VH;&g`UAqKP`n9gz$&f&G z-wXHn`r-{Bb-L@zD^A}2c13%SLiHxc=V^@L>)d{(408Q%Bf)xJWc$W^F~LYuy?@L_ z6sOwJJJ9)udkkrf7Evc=GBH%txA-en(ZRY6{JBmX*^?ZpxsmEDhIi>a>)CkT33}oU z$vj%yQ_l9VdA!B--H|hxc*3#LV{A*k&WtdnWCJf^QbeHRyK3j!l(MfT=u^4j@yPU$ zM>bdpj-#Cx1#@Q7ds4Xi>t11ZOrHr3I! zW0sMkSoeVmh^l1H^;M8%|Li4|Jb_fny{n0oe4$8zhuCk6atslv#{M~((W8nTr~SOH z@?d~k<#cp5bA*PFybjZUf$knPT1`P zeWM&J?&OtUd`dsc=}h~7Y2<+bb^gOkqc{6=>TbW%-H_(@M>83Z1_V(EdGu@IiRcXH zx5yaG(_Fm6`jwCi_7C4tfiN|wbas-=W4xKzck?7>Gkba3K&84n(GR-wS}vHFTRttq zQbLld+6IFwq3@GsovJkPfD7Ocu|_3E9JX><3d}v|(Ls{J_1A|_Lp)9h*kdwJ-rf03 zLMDr0W}xF7aY03O0`oUg1ZI4i(ulF`a9P<}4KL|}L1!n*WSQ5oPDWOJ6$pe-#fOl9 zcodF%SDLVVGtZk?%@lgrY)%;^Yuh>h_xm!z3f!K~L?#)TEUg3&U1wk|qTaWI1M97u zwavHp@?~mYF>CS0{I)eS#zoA!q^+l9%vV{Fv6fw_C%of(e%pn?eJ)7hZL7U0$(PZ3 z>7-j<-t&DO3b0wod9L_0DMVtGhBxZ$b1Z9ej1_sM%*9e`w5q*`R3kk4_NA{w^&OKtv zOlGNjP#wyRA9v(}KMNWEl+sA&h!QC8v$9OhmSAyR2)pQ9`^Ij>H0t*Gk)_24WOU=e zp^!_laDDTM1~+BVR^sDD=_AItuB5iiE6DQ(O8%+^UiGwd#TVq^ANCb;Q;qWJzKU}* z;c(+CF9ZhT{+QHj;*)drmEMjn{u1)8eOEs{Bo zMCMzU7C(s$&Tgwi*-Q5zM7G`#P5zf^Ujz28k4y#M1E%ag-$Klw}{ z>W6EY=0z=8sw#42>J2VBVBp&c&4WgRt60a7oajVvW&;lU5^PEi+}xLe8}xTy_yEcv zyHn0)0s)2_Nh`x`hyd%<%R}mqLh}TbCIcK(A|XOX6^^G6WJ(11wCn zqx^m2hDF{6DGtcsAMq#JyKDE+fG)O_Y~0WrU~XC7@eURX`JP&aoGh{lkn(&9f>;c% z`9i`?|0H~Cew6$*;LfJ#2Y^^dil>+_|B6?FKx{TRv-(9`RoPNY~p%(j}w?2k)sSm-FUC=a^Q%vtXOP+*AIwMRwjB<2fJNv-oOsq-v?VnIi=`L$A zP>Z9p$RMs}X(2=vdxvku!z1j^F6?Irj5y_S(dmkz;K#3Hrj>eplZDv1RUDwWC3<&G zCjq7m3iBY=+jV??!X-JVq~1Jf7^q8OI*0?7x`>Jf9JFM1d2RSCR)Tb8!4$SS=TrK{7gLImBpuZn-rPJ zTk0(#khJQsgZG{UaU3}<>aK!Z!-mh6tD>`sfRjZkTqBBp?B}K$TyOOXjvkG~=WiZn za>@1+)Apn@*hng-25a$V!o=F2n^;f1Z!ubRmrdpUcFC$lM&MJ|DNq4H2d248NDgXjGHJmAk$?a&@Zc((= zPJO5Y)Z=(8s)7|W9~t9)tGk(Njr|P;`QE^8PpyQCssBnfFgxL6`7LKXkw_hYnjub< zd0jVXOAF=UcKS;NCRf{&8zq@@3*aB%D+e1hc&RUY>+erKxDcioNd+p~W_ipbq6Mda z18EwU_A2q0%p(BHeo~(wH=CxyT9@X8S5PmZq>oNg&6TQ+hbGIYUyqAaDm9%)ifC>s zY|cq@tsJ*qp+esd`{l15mf@K}wT*6Rv|}j$WQVTf$3N2t;aj-XmNolis+`2-oZEX? zQNg#aoV9XBSq+~jCQEAf4qhG6R4kG|W@{JY3)e`4{B|$ZD2`UCXaY!{I!hE>dHh6@ z(!uc89GK%>CSFPM@1T({&j*+~*~LGWCQan(8@rHX#$b$LK3=O5)bOsjx5pRuzXF#}FU#Pjv zGt-KWJ;ALlV;?v}`Yy+QDtjy;1AZMC&0b~+<=ISetzj<`N+FV#;7WWvJ#BiQ(jhoG z`-!>pW0zR!Ps0(|#g(=N`Oeh18?wtCgU(MMydQ3^$^WSAnNqKZ{(##AhN%5v|A|=2 zU945L$Lc^g#lPL{yYIwAgwa-L?W<~5`9foE>|sTyu2T`9dC5gz`z=hDM(F)n%SC4B z^*i~}=#kQ1@EMt2#R?NZs2jd@2uv_3^c|u%9Lw;nn8!f$#9b2v~8KNq=%P(9ymim$4L-cpLaM0P=yZxoRl~ zqnbEgXzI+Y^Z{Rohge^PJ=FCdb&rhFu;CTI84faNqf=+EUB_?vZG3$=@X7?6Z0j?B zRPX51gzTSq2~XeFfymn;or|I+yo|XmmFm5s$*ue>XS8l>u#_f_!y6^0%O2`iV+oT2 z8%P?NL6xR>0}nizTXl`Funf;eURuKqfJqqTqu$1N4{j`CwP^0U*ih?q7k4%H2-#gko;b{i4%#)1J%`v9`%0ahjpWLjCx>S;$>)FilD13ZKT zK#x%ZW_IWX5xM6ya;ak7zaIw}go3P7A%dGE`h6}>yadk(%c+1Ni@^-7uXAOP(?|5w zOV%Z`&8AkA-53#W>3op`$`j?UfK0N~u??&)*@I8m^{gK#(TAbTf- z!MCH(8rDY^mfzle^zxqb=m4hZ6PJ(~Rc}ui%kSN?=05z3gORZglRDG!Z{4tYAuR$1 zWH=q76fb+HZShob90o`)f#(bFpr488xKbU4y?(@512nb zO-0?!dF?MrM#=XLHXf$S45Yp0;;wCpvu`7XLnDPweh|q#OtY9bDwX{aPB%LS*HJ+xfjxS=83!@ z1n1>LJ#LW8gu06xws>&bV_C%a(mt*AZZ{rAfD>rLAo9~~nP1WCZ zr^N>F@LsHjdA?x13#=N-@iYqGw04Q#Y_;Noic;~nc=!!_XO``^Lf*4HrGh(=K$Ifj zkYTZrujv4FxcoA$p!c9U3CI}XT|P2EBaNYp zdGckYL&*I6l!(XRre4SX25S8NU^V+&+|sX(PFZ()nT!Tl?x`N7|MeCqM}%R)USBX< z2wUx=WLSF&Zx|2d{p#U50JedI?+7eb8pWyzDxKV?8$F!K_e;Ty_FAxgf3P!h9Jn}^ zFPu9G&1r)>-A#o&@jI>!A}4Pi&iv>r302pXo8#GTsoGyluLXvo3g3eb-*gJdMwlRFcL`Q8*EHe$!OApTWO97P!| zVf2FARR_md3Jb)w(|YKvO{m%PSC?fGS4_8F|KX@{=R21luSy&9t5QIRRzy_c<1+j` z04PVKQ9}s7#T(u4*~v0nv225RrYY?7ek2kmRo23+@}{!5(TalsV^c65|x9sZnBTS z@t(`mg6@#(rDPi31o8-jsFda!ZPIz~LgRY*6LJ*;=M-v+GxN9F8m#4f+l-S)6S*>w zYsnpsPm5ZQb95b39^$+$nFRF+oFz+t!&&g%TrT^~(*SWr>oE0h*T^}jszAj)%YQ1z zWUV~>T1|1_{`Zg$ihkKdE1(!Lv$Qvbe!TrE?lL;Tbs&mIF> z*SQ^qg@j0`QP})juDHH^d`)pugI%{uC9y4!?q^FDdE1dwAj2Ks2-?}NP$xQ~t(ZPD z>rp1_IPsLDt}`;(O3&ygd?;jH<3oyp$1q#0cEY>c{Er{tQ`}j9`>El0ElQ`OQQn=G zia+g)Kp*noLc~>@o@|s~h!nS6EMn|h3fA7{;T1e6r5E0)!kK=Z<&>4v&#jg^T=*=O z?^bqs2grDdIn`o8Ju9xF);Ot74idcNEtL3@mTPU;#Wve+D4Tm4!GaWeg7-6xLAPgM z$5`Xo{qQ?&<)>za_c6N<{1)`6oShn=IYQYH=cUALe&+vkz^JO*q_2*O`tSHZ^YQWd zKE8+<{@b-Tj{H>%O@ioQcCUsjjv65f2KU=bOwzRpep`A9k>=#PG{scM*sn8Ri+5P7 z;EjM5b$qvR@&nuXwz=C3I6XMAC>?&gKam76_kc8U-(*Q033orB!i~o#WzK9Mq=g2|BB@*@YlKXnW|x1F}20Yr#LITX(11>g32Joxh>(^#A>;40MB z$_B@YOD}7n<6x$S7SiLyFnDk|KE-zdUuaEv{{t?XxNx4Mvc7$m^@>{xA(Sm=@9g+< zUMM0O9&(zINy+V!9GUH0=c%Q?gh5*6oowG()o)UOBqRS2R*y#%b?bF^_|o8Al`oK<8twwmJ9 zM}nB}I$Q5rdoaI4j|wL_E8085{_V_^J;CNde`gnWJu1*$9x zTD<(1iM{`|-kOT-b2CNY)_l_RKBV!8V2d$OQS2uvZr8#+FC{Ar=qv_p5+<3=&3hJD z%*d%V_3rY__*h!+la2VXRdMvlixic-T$9K2yDiYN;&3v8WpDPdif`5GnwS= z32*GF_`wtIZ_~BPhN5{*OY}DKa$g%Y_*1LP5_R$x#rwJ|d@exssoPp+fBho@Aha@! z@Tq4U+Nmu|^B+_IDEurE-hw$xDBpx6-#3c_VH7CrBK6oJgblBx5SFAv@zs}&^h1#B zz+}lkBfx`%32dj{f5Td!vr#BGn`c+>K#3;|=8UUF47;;3xy+p%?4$g%h=`U&hgr!e zB?gBnD0Jn^1YKlnzuyafc@>Qx4XPcFhZI;jeM=p7bNmI2n9FK7>F|I-__6g@>Ti!n zsFLkdpN8$9H20h+y#MoVCdFYlP4Jp%LFdOQ|4*&AenyKC?!c%Iup~%cAZYU+?tACC z^|A1Mv{9e>jVbw}Fwvm&=as+-M=!is;}US`wtskUi0GckCz(r7pd>^C8=L*0m5xSd zFikke_#%3a!YY_rTY0K2Dna|0Ki~YvBu-l1N(t5pBN0@8$%CxoZcqIDalvbNQ~F$^ z3`hRS8FaZe4xqDr$il9Wm1pX>42GBpiR(jClv!w;CTxmul! zGt0%`WSg>Yz^lM)51?|ci$6SXdKFpHS710|(m^$pCAZhx@|6Eu2inYMe$bajWa4!@ z)Lkx1p|BtZ;O#Ot>XKLdas_qt4f@94lfg0HVjJ|aAQ*W2%N0iT`j=nqzFPIB(~Q?g z^V^fC9dFSk;LT_LvVavD-d5FD8M8clZaVEUj?sFhR|-$TLv*Suzy#i38`#^xh3%(i z5O!P_!29shRO<}*KNlb|rIpzy&7`7vcl>8c-_s5EN#{FSRqum;vdM^%AX%vkk4Nb>K&(Rna#6j^<6OEf!-o?x%=(l`87buU?Py)Q z3;CwS6H3jOZ@ypKr#;S(?D2oyJanKopTGeN#dgsYh;XY6-b@ew@~7G1q)A8!2n9SE zi|*Ao+s>B;vxkr1kLJ{Bx7nu$PJTuG_ciU{U-wu9o+gxVf3g^(#Rt@H+i8nY$;TR8 zjwifoLJ9#b+&sh@2|VxB^sDFWQn_>v@L&zVyRed##8wuj1423chann1bi3q#U{~WL z<6S8)!`0PC-nFOUzi`gY_m#&z7yfL<>arEZi`>N-g}uwf@L2X#I}M zj|To1X~|EYB?xa|4I_z$8KVbW%yH7xK6IFJvO(I?;|WNYePiYZRh9QOYUQidED>VyQ64jY9Ds+CGIo&9A_*9^kk&1(%73m8yw-WS6%? zmx2)T5&w2OAG6nOu#wD3)E!qSlV!jt4$2@Yp7|lt$+5=Ke&>XI0t3Ir%yG!;xm!8f z@^uN692ap-qnJjTBQ~{PXJ4E4)>}Qkm6~hpO{HhBIa`76CiLYC-7H>h6rK$Rw5-1w zinjt{3X9gkp-Zp^cCHYh?gZ7Z^VsZ`|74q> zd#PCAfREM z9VS@9+W>)NxxRG2;$kEkqljH*_%9@Se4YKBaEgzS40sa=*}Cz@{_tBOmYNCVli3NM zO{!^4n0XLH8R<2ea)&baWf)qT<0kktoZl1ycUjF9Y4R#IB~YCHv2m%K>gVoZ{}t}@ zd>Bzt?Ybvg`64kO(Eabvsfi5vZLeT(>ZsYTju-FxqfPklPFkx2?d2N&#bVQmZvg2c zVI=ig9%#_NOUNJDNx`!Ws6l#QMkIDE4J5KPV&n>#0h~Do$`RlZ#TQBOBbpV`b0p!a z1Ux_xAVSL0<1VZ8&unN*`qgX&e!<9j>}fKsMh^RI>Jm5CYXP27EadW8=Sk*>6$O89 z#-M`2`LtGhL{WHzr9H+om|S9ajoxzXH(57(Jf_o2K#asF`evdL80-sPp=~TX`sI(h z&>ruLyq(B!Ma&kvUBS4%ldLJh4-UcYEqFH?p7!w>F>fzghsRXVOonpm-plDPK{F2$ zfz9NM-c>z*R2!@lFHSyFNR{!)4G&pDZOsnGl;BQIXqA+}3vG)QJ9q%^+1 zUchur-$#}ehmE0b;j5n=L)wReTfC(2&kUcN!(@#Pf3x}f{JEa>Xa_#+O_?kub1Kn~ z!)4<4XQGOVsfmf8HJxnWTbZ=-3b{BoUCeh!8@HV5XwwNu@J0>hO-s#DeBs{DOMO?Bi&ni|Ee6QUuR-AHR3pt+JsCycS!Pr-0RHO~xX zQFI}@Vm&ouS?^z(4A5a+Zr!u{_|#TpE9%#=*4t?n0k^iAt*-pS!h!wyXGVfK2)*BV zIy~rcSAfQLC?&8SWGIjai`b)n;IkfM{F?QV-nI;-dGp=W(b3UTgmmK9rZ<%A*BifD z+uLI;daop3U2eKpeDqlo!~^VGEr+^a^H&3F4K<*CtWsMe782YE)O@lhA@Gfp0@0;8 zBhYpa=7=%NN6FI93;QI?Jg%*(nbjkc9YhVNvks9XM?!pTB>N9X2&m}vS#5ej%zX|b z#&9fCDt_~@qj2A(kT}vnZ)1@CH`VoZl8~sRBwZ+cwf7IsJEdLL?wiBozNHTDmUh#Z zyB_4##-MpG!f%nNua&d@nYAbxNy#Xo=5CsZp9~RcUkaGekB^sJ1*XMSSIJ|{pSMhySwZ{11N z$xXe*@WsVnU=r1Ll5k3JKpZTtzFHF3O8uU!lL>|?V5}FaHPY#YXAj5=aM?IQ=#m_m zKa#tuvlfs;%>7q3{7AAzyA5!8y>tTGUMN6P?)FlHX?^+ zY=>qnhemMTxZ-6<)?wk*8;K~@=twfOte=DOgSD5C938?(KMtaHyQ0)ErHYSWijr}f z&!Pen_16XS#xHm7-0!Ql|HJIn0aCaKeLq-VTbdO1HnY>)+Eaq%)$#9Nf6X$6n99N5 zALf_xwS6hSuwa-gT|o>wBa;@RQyr~SJ+MjfcYBkHy*cBe0V%{N7fRFP)+G-otqi=>JF3mB&Nbes7W_AqmOW@}jIs7+I#06e4BIz7%C&!dS;HVZ6N(vL)HGWgEu6 zO(k2hj=>m=eGFlkG4s3U`}=&Re=yB6&vW1BT<5y3bM9*YBVkh!<)q2flypngh#xCb z5)x(U=@p-i{pa2bzwhIk%C0EJ*IWM)uzdN)()^<*Zise^OT@gBX3a4{`F{D<2A2yj z-yaF-;<2Ayjck;2t6#U?u^+p5Shs@EHpZUukuD+P(V5brcl)lHY8Cu$8J($p#T!Pi ztJE&Hh!Uz|eHL1R=+|-U{j+fHLVvxx3X8We8v;LvP}^*e)BHq$C<_-`BT9n*SEaqvIuW-k6ptjlEVM*KT~$ajhup28`{0fbqD!HsnXmtrWty zI~6dQxo)1b!@><$cD%NJeO73F7COjz`@8lH#J@f<4>LdYUfizWzvCOB7F}>@p2@L> z`aC-8OXRWPM!O9y^sf-B}7v&Z&6`&E(N z{l|@y_bH#gff;Z4CvP>m)dcrHX%^;Lo_3_iyvfr6O#9RI9BoZ5lJ&DqLIpP-muIw< zKWGp-vk}en&)KrOBNli4NB{Bu*e_xFtGt~^9dNy_rpOnR#b)PSjc}^bcCn>`nCCW) z8Nnw5|DU+*O4sf`cfm5lTw5vdL+EMUbQxE?9{QK#DT`lvzS6k?CQ4Xm?I+^5sN-2ld}Wdf8kSn_-y;3XqI5RZ z-cX1$aZ!{HUUq|*`MH#b%w6)Zo1dO}=8yOrRW>N7n%rr$xL?P3-E}X0%U4!Z<+E|q z^lMIMX_u4Nbcv8xr(hhuTEM+WyfWyf=yWx=Ev?ekd#!Fi-}r7`gIS7X^WvA8HbHS= z^qr!Sjt&;K>#C~prqZza*_|eg;0eE6pT@)|B`CiD7e&~v0bPZlPtaqXg! zmBsO+3VP#OQD}$N9ypd?re3r@vfUB# z2NRQoT2=ID7E%T;gf--*U@VFDv3eRpQ0RTf?2`owM z0pGdziICn7HZ7r2BZSLY)1ITy$rbjJz$`pvrc*O>w`_9d)HM--?0OFF+eP?eISnBP zYY^+f`SkTos$DkDDSJGNQIwmAY&1z>3Xh56L}t&8vUF*F&>eXE z^+Vf^I(Jfm@+t=>`0MM6D(pq??J&2r-s^E?>6S>&!&dfGTt%Yt+Sk`_3Nwvc2e4Ee z^e7=Io2aU_cUW_jzbs-V=%#_j?oBHgE+)O$#Or04_q}eCIycDSHHuFmUN4!Uhd6HU zHTQ=3927B@jayO}ehdMn9nT7$VdPG7CjlDQQy^~jdRQB>KkaK&sj^idAIgIvHnMSR zl9{AZrPN{MfrhpiHDWU_$IcF{N7^=~KU2kiyuzA%INpX}B>XJj!Jd?yzt7+IE%F#S z1qGc=vpIZZ<=bxpS+ClKErNG|DEOz;WSZW7MDA5-jR4DHu2a(H&5k@o%=TbAjf5qJ zvF!$l`1<<7XdK|@gHU+Y#}eE6Pzv|FPMR$O4Ax`*`pC!nFU5I2p( zt1_dRou4xvIyn^@`Gl`k(qA-aHeh3PL<5g!Dtq>l(7E9{&_e~t;`^stM?^TcnYEv| zf#!h`Y^+AE1FXWSjlE9>&V`cIuKPArG8)<`(lCoa5nF{uUHBw(V3!S`_|sXTd#bSI zyf|=c!k7zu1P$SbBi>W5itJyNgmV&&Vo{`re%=VFPU@cRKnIf`l0-^-*~o{ng}vS<&2q%+eG+y^HKl_)kdM) zI|ZHE;n2IfYx*;_?*#+DBGibTZPXnoo-#|9TirdOKB$et0`yiq>H_;Yk`wE!v353O zuO_!+6N~bmXzJP`?+j?yS@cLk57+_po<;KjlUS0FN~fQ72#LeCeWoSYC^NlXJWX9l zN3@!qV~2yhEzlO8OZ_WJjv8;#eBAEMWaYbSLY`xrNp?(D<7y)b4fcRlBb9n~k2sq= z2;YQz6S4yw{GCn&xT83TK?K`88{1=ZpGIA9-6;m$tO8a{gF7c)pccm)aJKYa+pQL2 zh|mtlPRmADxNZSRdx;$K%_rgHUT7yPAvZ3jr)lWads=CZ?&O^pF4RzHd3{1qOAaja zJBZIQG1?)xo!l<4o_VH&i7hRuIvX}w!ot44Oq*%Dp541DR&n&htvGK|O-@Xa`eKQB zwFB$ahO%zK4W~j$uaQcR-e5XO13CXT7Ze`5rhav}Ky0e2eAo{?ud!#&IJ_AByfC`M z8!eikI0(^r*T#scCvYZ^#yD2tJIHq!9m`*ju*d@J)euZ!30H5#s}Z>?FR~aXDw()t zT4|hqC_Fo8AL8D+z*Lv>1|Nujc!>WzG_PfeT{5`eM?(@7UI{Fb?$!$vO3dm?FC_9%*K#R15Z5 z%KW2Xmdxr6<1v_;9OqDh&_s7()HNhtPY!GPmrRs@>0PZkSg6m_c zRPP@(xlxV?4Sqd6J@dLCrSMI%qLM~ZW236Reqw@Xs7u`iC&JQoMMcGL+U6z3oqzw{ zG%dG?39g@w%02p9WUinRN6;W1FzVULFh5=c=+S9_3I444{Tk56?O}-Bt~N(>FEqH! zk-|qv^0TzLZeGKC0InlM)VASsF#|&xn94|6h3LHGnYt z`qsd<^5VQkQ_bc_au3gWv@G)LKYEmBQU3V}ZbcNJ>r>`DhTBr}&ws!2zL$V@{;R3# zc_z{6WIbQ`d{VdqrU848A61-JjXmQQdHPDMt&E^WJnvrkS0x@cydZnB29Lo7>m)7f z?iKobs~0aq*9{#f|Gk{B^GJW-&z2h8|I&b{g|}mh2KPWd_O(%Y62}9?Na(jvTKELN zrj96zA5P9msP-z3&#at^e=-@xB6+#lX0GxUq_$1-jMs-H^sLf6^D?9*MpT?z$&9u> z{R%bl`makf4J+X2^^l9hVBn&+Yt#_NwB?FZrar&0Ojxu8WD3`8!L}` zt+v_1P2bHG>ve`w1Vh#x#d<(7_LcypH@>V>OHVJxwRc33+u1b3Q4Q{0rK4q}UkUQP z(l#V1H*6ChSXzuFK5Awi&mA&%ggn~AC@3l>>ArZD?$&@IXVdpFOyQoP6g5V~u@7=N z(RT3G4CH;yyfw-bEmw`eiUjGfJw<&3{rPe@DGg-goFotvMfPVB_@qrg4ZpPW@i`W} zzkvqPz7k2;M0rnpqw)Qop&U7WB=Pjk^7IvCJGVUQG<|EI*t-!hjo5D{h;4b&_lW5^ z8S#8JPEJk?G{bC5nKBFFJ|mOnWpB^y8MuboY9?riF-2=<0hCIF6H(;tw6G_;$CdM7BTR4&2VR-Y}pq5Xf(y^=cLXjtpBi!|FM{JDZ*22#~ZHDj1RBui*#33 zwuo(*vH?WP=UV9XJ72}Ve%~P5ryyF-#!q6idF4`VraRviEzO{;8V7{4X_6tn?z3`eZ>AnzHk3PQ=`UO8i4A?jPg}BwXZhL0d zE)tgr0@syPRbk$tbP@Sn>H^$xzrq?uWfhns(s3H;*%^0hMpx?J>9SfPZXNTa=0kh; zL@=Zc&rn{*6Y`?53fOH-DP2tywt^pVl2^^zB7gjNs7|BqkoFqrf*>4ljk!_*r)A}twnC# zOlD!@{zobW4*|7oF11^-DtO<6!2oy$^;vq66m!`>p~Ni#CIK5QF`#v)5*OIc*nGeG z`;#WFrRgD8S6#QFh@srlX62o{Ia~LdcirKUN@ZrB@&1khb3oJ3|S?z#gi+{kN*v=dIsGBF=uF8@^r3!j?I$6YCJLxU${p49Z&qWD#4sISc z0e|g*GvA~$|Km4o#GXkk!ZT&p$C=hdJrxZ&9=307*c*Gr^nZql&87qU<|X-@tOxC{ z8g7v%8#dc3E1Cu5>0M|?Y;Pm^p4K|1F8tt#^S#u(Bx<8!qf?Sbk_3FyXJw;1C~U_E z-Z9+%%&xJUs=Wx)T(qJJ`1$>^VT_Nq6HtNcnIFcz0S)U{Y?8Ah>M7B% zE8Lx`x^o=~*_hrvy(Iefz6wUr&AW&g8{ zErT}R+D@ZOfQ78FEt-BmpCXq5fEMQ16}vO74EBC$~FGQ37Jk zVdJ9`>`*Z$-V~#qKg`5bYf*kD18XzM51RGn<0bLc$r>E}PAqI(1NpN!0wKbp{H^bR zhJcWYduMjQbc7la$s!(s&KKb4-^y$|IF+@FMEf;77IGnjZ&j4-f9qB@DeV0Hf+rnU zxS^n2_wU+K&+;8;M3OTG@3jnr+FSJ5u!@>uOj`5b7vvv?VTo~~K`*xg8?7SO0+Eyo z)R0R9z!*{V9ZR}^t7)Zh$%s>p^lc3qmBR}cBQWo3dsOQGE{P!*rT?{KmK31eP|(&Q zob8vH_TiRr`|Wd4oFu{F_w?jzgE?U6FO2@~?_$c1y#K&BMuANz7|J#3&p($|?mqfn zZvXh%;GY>Mo93A|J|rAEa1b<(AkCY^Tm)c0TmOoU&+!ru1Py|I-M{(4B0UQ+d#kAW zPpA7h^vz)CmhQBFR}^HkmeWh^N0z7Z6nSEncAnd9R)Fl0etjB|;M9`j)S||--XvN< z9@~JdeJ*EVE>a#2r6-NJ{*%DbpR;xBk3?U3y4AqoHPx3gbwlQ>Q_`wjg#?!?ykD~G z6MA|iB#KJYGcs>CKDmq9aKPObDR*LfT2p6^>+%+UZ6T*VsFlAsPmbaf9`mVcTeWwu zi5?;7A%jw*dcLs*;P^a4 zj*S5dsQa<7Ej5^c6O>nD*(gldKUxW3&`AnPPqmBi_N2!wbw5r4cjx_b(^xh@1BHvQV39WbM6F7gd>O&r{(KD3HAgycZc4$9*r;{^#NiuKqzs3Sz^&#)fP_s8Z2b9f>vL__QVJ8mTe&7EwT+Hhbf+=qo?Yz zEu4CK3F(>HJ?xTMLw!ZOND+K2r;yh#1+)Viu@N<>$5j2%4O*nSdvTU)!jib zbk;fiq-~(@OSp(($Ur=y8^ zCdeWy;(DL(F1iRxkI7J4i0`y(r{r#laF1HBJFC;K{;J4|Bm}~q6T;N19O-Z5bwt|u zg_12e+wbJhbc(cvFqJz6B-ERZXro3u@55TBK6G7WSzrGgRhrt1hLcZv6c+i4e z`w*ufa%mo{E7O`JgR-bu@&?)=tTkRNgk~!IOd3C|_d?yu*L(6K7ng}f?f#heL@Xa< zhuv~(d8c9ijpO(yQ1!LXXio=@Xdw^3p}a7=+)PY>*{ms(AQjgj&iDBq@km#CYhlftDzp$aaASC%n$&)D2lu5r zgWxIhb#*Fm+(US$m=iXgl8o-XVuqU%^Xwu(U$f3Jr+y|vXRNZBT$+~I6F_k~EdDf^sBz}jZIRtmmubr_Xp^-pwyj98D3_?cC0^=Q zff1BOZ|xuD<_O4zfqTfgHXe6~fx{m+xQQ}x8ZL)x}SlYXbaszlIrv9G^v9V`)AlbH!grtFlq^ z#|+zgdLmCi|4(ql(}~|{8RxQq3R(P-hkQNsM#Fq;II=hT-=6l^YkbdiKAnDOHg8@w zbLNus@)cZdd@I+%lLv{Pt}aBo8M1wEyjI4(Vv*!1U7N!uJdKK09E6^NZv_}?72$DO za1sl3;WtJ><0*?U8{59z{19z!U!8#@GQ+~;TX!>rNC|zLz4@XI>~w;=Vf%<}&@I!7 z4BZ92nweh8%%L@vyw>0_+O*Ow9p!9hFhB8B`BUrAUBAzXK9e0Q1ka9x!CT&5zZPv# zta>|~wI4;m^Nlxa!(SHmUbkRs--p5#HEG^^nUdqA>*OEc)1Tg7ue>@Z?nxtKz#LL4&TY~KDM`E(~hRo(BvQE@T82!_}jUb{ajp| z*4-R7p~^N#&W3jytvYkXzT%;6b)529IbFNbw~+RA9L|O!mWGhiZIm>_o)Utvk^}w$ zQ&CRnIk~`5$E^(bkzniLJKNW+h4CnMrbm64*kib{g4C89Tih0O~z zy3HBR(zt}f$R~ih<3QG*50on`DpF8ThzE$s&cWx{ZZEks=VI*ILqCphre#UxmdOm# zmr=8!h>b!_L4J*Hm>qR6sy{4DR1I9=sEhl78k4l?FNd9_D|3OO{_7JFPGvDwJ&`HA zChHTeEF1o8yd7GCK-7JD9L8s`2{6rWOUC)_$1jD>g*UiDLq4}CR;A-7{% z6U0Jy$IWZ<> z!oIY1Lb%#YO0`&C)^JF4y5acXh6i)K=lHRl9dNhGW~kAR;ii?D9c#4Bm5HPFJJY*m zamUPfWrbbl;|DbbE9ofb&%b$}&Fpaxc0xy6qxO$N)RumV>&pgRtxWG;n+a7gP_BKh$Nig7LM45_ z4n%x41r4=`P!vBCx=WBC8lytr@jZcNYQM5CcDul$_v0@15VAc{M^4#?a>Ebb8sZ~F zC`NK>0JX$7v=SQ`G8xzJez9WwT&uR*t)Tw78Z+83Z}nPU6p|{f!m_M+1h*qzHr+GT z;RI)YURN-RAY5|`e)!5tH^U#51h`@c4}UHWj-3f_BA0NDpET3!S>J`=^tF1xkm2IQ z5WEN|zuv9{AfuO@imG0X6jrpDa@bN<%^2Ks&-D5^u6}tg({EqXGZ{;k)FoyUdq4l( z%T(Q51Ge;>>>dC<7-Sw0cfg+d^u+(fJbL}tanX=vp6Q^zN(DvU)*D02k2AAQ^TB`| z+PDDNxc=hz(i5$rYJl1`fY4w}ef{5okctqoFz=4qhlVps3MQ|up-#Kh|6~t$C*}8d z$88dXJevfav%;hpmxIXsxO!?CO2xgHsCO4CEK0-wS%IY&n|d!ST^Vw_S>-t^8bZL# zcDmY9D1cgjsf=<2+AIJ~z1En`Uc<0$N0?2mc`r~&&xIdL5P2qE`V*9$>8JBT*9(;M zRPIR;GQMQ!-cnV6^_?(s9&&s8svBO1^&0>_iYe9)&nnXHhlyOi*t4YPu7rs2{nz~x z+Vm=Bg9{*N7><||v#ay9eO%LDr1hZ0)}g2LnsI5t=9d?Z(FIzZGW{xE%eO_K2UG6k z-MW2TZ0>ry*1kwP>)??r>pWm0`Nniuz8zz ztN8-DH{XQ#$;xPLG$}|(2w@&UXgTzb8Wf!gWXh%QOOw8bvQ@uYb7>~m5gf2M-{Mgz z*bFf%rY)>LO<}__2Hzw)(DYI|hxB{oetkx!rAEx81)rZm-9ioc z`8BaNerW68@s~1wRV(SEIZ zQ5yH$H~HDbE-u@M9~n}n<(WMR@crfpSPdmFik!P|8)0jU;st5nHz!CvY==_-pd(O- ztEwM~JI^Z;Dc1l@S3fF9zQE%tchp@67(*3D@79P%I+eYitvGf_9%0|Jb@SLXtcPnF z~kS$2_7K*I(&|Gh7wef_yM)8{62YfFpbm#+p(NU z{EwwpO;l>pBbU57-ip|#;e)R$C`636(YQ(L1a%b3#1h@}EICv-=diX27YC(YY9%Fk z+KsL%!LVyKagCFUnv^*fZd^7oNYVpI3;N?ycRlU$s|HK+MjuvX&&^h6XG}e6tFAJX zuWcVi7h7^dIsVGi;(!r@XQM1VkCdF837`AKNMRY9d#^+XZmr2=(W52j-sT>irvIIR zcdxGt4}@6-ELs-{7cce3Fz?g7|%mq#IL`V3nEMo4KGG{uloc+|s-W>Kl^u6qnUhKK`L{H|NES!YQH`?a@18x( zuR4oPczhLm=&JM=iSW=1%hX?V1W=NgfT2WAL=2!o@+t{ogzY1xuE^<3x zjdk(g!UfpGO1A~GxzwGqkQErIQBq%G*WpGq9)5EtZ=vM0$|mpBnfCU#O~ivj6&gkq zPfss(7#Vi;nrgIUR5Wi>U5gYNTwE4P4l?+7&1=H=rN<2ddW!p{x6(P11w-hKaEhwt z?jW@H?dS;5dZvT0q2?%i{=qPlfV-4+NwGLRXrSyWFJIQt0{+BG1$9P*yk$#gV)K`G zaA=BL3!BDHt%!&PF_+ezQWkDJrIQOg?|@)DL zdkpcsQ=sL18@Wf`Yw`uO9Sz;%nGU14Bj+MHs$TS&s*~qfkhG>XuftM*(3}u9}x6!q$G97Mr4F(B z!=2w-U{}(Dy;p?}5xg3w(A z#$Tbv;TYgo02qeSF38^^eO_AD5d{(cbWplV-zCBcLs) zQw!7t!*<#6={V2O)f2DamPyLW-0s|a?TAn$iGZng>N*XailC^4(xzmQ_+B=niuXev ze!xoN1q#DYheXt=iMPv+#8%w!+ch(tM(FaMDb7AiBN5-9LCpk?%I@yx z0h!gCYQef!_q!1;<%vm_rg@{wOgss^>fBe;MBFr;`oB95h8$$TI>jXeX4mk{fC?C^ zupNNoKa3WSNdflB($ES@4q5vVhr{L`JTi8D0jf@?nxr2kQo34}Xpunbt@l4)kS%u& zWZp%G8yrp#Vs#ot>R< z!Y8apa$5CpC$AimiB6zXZK>mM8XIGC$%n2a01feO{&0Jq78|KhrHwUsf4!$j^MD-~ zS>Bl=D*&yh{cCWLyPhTC?W30A&Zj_GY(5f!$I{6|ZGdV*dF^8Z@MX3~rvNbpjLLDQ zWifTGC{sYG5hfFhH8J4@8F7}Wy(>BF z@0In5Y8K$X;a*x=8hWYB0AYbUJg_6NR-U$ITu{h7FFon0f$EDhk_{tw@`pPaj)k2# zE*wKdoer1{1+E&op1*(32^U3!0HCk0KUX_q*rjb4bVHLgE2aZf+duDHNx2Lf_WR>@ z)uVZK2v5KgYZ=A(l8~zuLN&;Wz4lH)qSlwC|FlW5f}f%JKdZlJ5B+9)s%QR>(+`u` z_>sY}1hORuv$SsX&SM_qyEo3-u)p8acRra{A^s+H7@Xt0s>WQvAS+jwUt6iuGSGOH zFp9!N7)I`W*$e$aV+N)Y4w%Bhe(@rD?KATmu062_BxG7E%?iS`bpS`5%0|TfSkWa~ zE9c+qu?MInm^dBPqm|Tw^7KH_!ra>21!ndl2y<^{P!0pZOC)lyWTjZchS+iS%hDtrMc5#FMsYfYrH_o8EY}-F zonq@_)TrM_A>iM;SL5#KLfx;@TaslyDH{Q=N#xUGv4)~faXg=+j89gq;l5u01Bb^z}&qv%yrODMN&m?sfz!v$Kmv>}^F zN03y8R$x&93$+*#m<{h>)56&-uTFOIUuVr2A$Z^we*Pt3;;#xjI-$M_EKZhfyKr2f zXzNk7r(;(vCxa%P&I<#`#k9gI2CN`Hz}%*1W(pm&W||2B!!GwMIS#V@{R*jZ@JJq5 zTH*{)Nh_1jfS*howv#y2#0cLz z^}#L$PzT4WJ&B?>FK_~b zqT`wZdT!oXlOxS)TZTF^qXGMKt!QpJKUwsRH3I)>)Fb~%$y|Yau&RN^#?Ak^fi@bj zu?Gi%U_%2tL4@KU>}C!yAK{lWJ@11^HRZQf<&V9E`^Ip6s>Yb%79zp+2t~eF&4Y*j zB>mw?&iOlMqT)hk>4?Zn##7jrbD??=Ou1I~inHsv*>$)B^QDV&QqqCnD|@To7a3iW zyqIJF3|mMzMKfeu3?3^$r%jUa@K)IR`N7?-xfV+G#!{U3ANDn1C!YLWYV5Q1O(lzs ziTpbb9^qf6_zjzWa?yt!ELd1Y+|={&!Kr(K0W*KEZ=!;Ux7$_S@Y8c2U&442pGwI> zIp!eCwb*|ZI_?zJG~X>&Hj2P_$JP6+hRwS8+mbhSO+NuUGwhF}isP<5j(8B*BQ%Hx z0~LBwXWMvvr6rkPYiHs39s(gGjwP5Mce>myf?)rudXz6qc&^nw_`0g>Uh7-`;K_Q7 zxA$N;TFhrOl((GxmFX(JsRUAKwfv2-_ulwzPy4M$$nEIUpKUZ z1nESrVdGAF=au(A3)%+@P8Qd={kVyfkr^d@XrQsv77S+r4LR(l%v-(*P)4z78ADi2 zoDDhXt8yI|RVNQcrRU^yfEt;BNr3sUVCJU~JR8JO;8Sr^J$y-Bv~A@D)e1#ppNY%s z&xd>?xzr&SKkc8uqF2Mz ziI4Dx@TH98-XiD@v_%~gu_BaoLMru?4Y^YQzbGFp=uezUpoqbkGLs?4%1kTn0lW14 z6Ol^zk=T4PZTS(fBLH?SDpf0o;MedqGov$qP@RP#;sGXLvoTmmLPFB}kCEjZbMXg} z5K@BeZIIg5f1Mtygq~)e`d?@H^IYM<%oBX&$p20HA+I>!t zg}VRZEUyUU<)Yyq+}q0rANz+_U9!S!7h;6mJsS7Z2o)4YY5dcDd z7x7RtY^x`;Dsb&WJM${XBY>c~8ou5y8sRHg;sydouj+c}bI}@&yp^9sYy>=XO$AWl z+V0=p8lbfuOJY*joUQ|8n(faf+GP2I)m;qTjc2O1jO-Z}2)n?^UE}KuDQUR%17Gs|T*wqb+s+v?S_xo%|_3=xl7l{4F8ySU@<08lmzEw^<2 z^utzf^c;p>j<-8&uFM8mEzWOp@F+_8p2O1arv`xjTMcFP7PYfo*JIawgZ%j=b3<+b zlk9^j(=t9JrzKE||2*ep_=bR)+q}+&59|bl>Lv`tDhBc}Fqm)e(6;g$O=Vbv*4Jgm z#Og(pVlC931L)zyRN47M9h|0iaPLK@wEIo+or0_OrwwzS?J7MY6+SrEw8eTcZ_c5c z^^vqUQxr$ys!}Yc{ zL<~z`N>)*J=y5sZ!aeGCky`lKt+#2}$!P0ZKbhPgD;GAAq&qUQwbzxN0)Sov8zyEa zB-5~qFmfwPdQLe`yKVoq`bH1-hUl%w=rsDyh1v`5>tSB@_u3neV%fH4piSq1yyg8v zT9lJ%!fQddd1){wvMqSStcm7)U!SkP}-5_Y$a}Zf0K{*WJ1$3YNbl zez6iN_H;xEq&xc2+#spO6C1lS8DC9u!}jt)kO7SDF5VHoY52YkunI*tnlNVTFj!YK zwm~zJt$`Tx)CXL6m-Ou4dsgIiojj~}HGKb==GLAk{vGgR7fJ`r72{E18oSy>xFy%k zp8Ym2UVhMMq@o7f<}m2#vxPG|nSrYI8vY7AKroD!jX66mo1fcyeR!qYy-wiKkeY}7 z#T`*WarNLABN4zTqb72bb*<)qs={0!uvuvyWfvgS zw{#L-`m$j=P*MM?HilVf{^_tJnNn(=0FIB92b1OA>{IiuDh$d>zzXss+qhwMW zvF&WC1#7t}oBUx*jZQ1ERwHB`3k5EFm{Orr8OVo*fw*f|&@uxmBu`JeXc!Hv2D$`{ z=b3lEpxt^cXX(Pg2Ms#~?~i>3d@T176q*73C))(^_yO zG8rYW1L$_{R|Jv@|RQ7Ld;0!%j*dXk$6R>uiwuH&<}XuvK6d zuNepu)?GYXKU*U*OYJQnPmQ-m2DKn|0Ee`d}-$+7ddkaKdp=1{+CSWAOUAREyX!vNdLI&%e-Q@e0bxZdB_ED z6#dyr1m=Xn?}`yh?w`^tZ@~#$f%JEm1l%T7B1SyVbir;nu77$Wi_@bYaDdwbDx&F4 zGtsShIGLTX#2j|*YZzX#LxS`iGt3+G8r52dP_fXC08QU3%5F9h1wH(j`UzrY6 zr&d;i1|A030nYHv;0ObV?^C-epMpPqHOJB3qzrh=F&#H+jftFc`Qx0vR1h+#q!OuG`e_y8TGA+$2i?%{cP8dbg;SryLcex0T5HF)g;WBABYzn8Eb@chBHI@pZl(iko* zTtgUWS8(x(G@E4<5eJg!(h;v=&^A5NMlCxRTCJxavsByK|L(foKlruNM|zXV_GZS@ z-cg23f4V2aZBFlByKw(SALzmp73PyPRYWj5&vOxH{7b&bWCz9MQ6f~eg{MVP`rb0 zBcMSv0$TV94|GqqipCpTt1uPDT)Uh&F8e{P9SFWT|8d0!ps!W!F6<&1;tGz!HJ4)x zO$2R^48Ym>=QZD=ZV8GLxa&fz<6Y%2vB}hpMewKljzRUP)$iWIwU#1oZH@v6%~u*A zJ_1A3mb#sNYXlLFlaLX110x|jlc2L)Opw)yIxpRU+jg&%rpzIK+S8yuoSFxQ(9Vt) zz4VV2c~(H#SpHzU-W{a|4v=#kxA!TX;zbIB=LI}j(_TIPR7x(KO~tyA1r&bZGrwZ1 zjuC7_F1w^)k<@t|jMsjp*!>^-VZtJ5syO9^a(UUCq;yG-9zlYKcFl`GSNYiCd%$x6;+VF$kW6zSip0so@x|0*{Oe3QF z^8+*%{)*N6o*>>lO$lB`4^g;v(60Gi4dENVvNdiHsvt+7RpYpjff=TWpdG~TT;y}5 z*3aOv7N|wEm45u~==J6oJA}VG^In{@-FF~LOv-ojUNg^i0lq8jp+=k)4nfwMhy-rY=Db0+*OQ;+IXML^E0d_-k{!yJK0>PS+>BZPejrn{6KsREWb+8!#_0R` zNpVDtaDNob-+TZ1mobVGp@y%$)kPldzt)d0agTa~@mBbb;Qf58OTID6;vjKQEF zuq(c7ux6AW^?-tw8oi+o&ev3fY-HtEF=ne(#Fb25*gPE70-3bSnz?`Xx`tWT2VU;3 zcj$7ha?{?3mITIcU<(mZFO{c?$8;Tezm_`*jrgVjOwd(*uxt`eup?FkPKWHa6w51v zkp-YM2Ht|Ns%|IH*+Gyoumb@h0pNl6xGc9lJ%<(6g52*~huj8+SU+q1K?A$B__gX4 zFlF;&`TS6;`1C;;8wqgM%eA+zngQ9PDf0Jp={5CLZyg7p(o?W@#ukk8-?R<8+G#ec zP=4b_?%mmFk(mv&C?E!s&FGUUq*4`4_8kr%`BLZeQpROUuWz}pPdD-c#YBnGN}^y? zX1`XAw(B#}Ve}_}+W&spXyjTG=>%iy-zy7FHa#}ItEphn>m3Qe7P4k7*ed?V{7rPc zn=Q-4s)sBGE{`tw+MQ)~ULLn&MsGZ#^744`b1PLjb;;OJR1AAE1l&60;q@Z)zdEzM zc}IHY)j_3dd9$P{uTKcxf$2AoEKTvTxgO}wJf{GmYSEn+b8p1$b}oBSY7=7E*?4no z?}(x4k5C(2$1AGxbqgx4FPz4%A8sHk*cwJ62>xw;x?H*io2SkV?#}IYzojX=^sS)E zLYrqw#Rk_}p~c2NCk_x2TTPKVl|1i%4tzIt*a8bp@i#a`un1*hOI z_7e|zRg=(jwcK@jZ6P<=3qLZZF4$Dk?-ivz@!DBU-0lU~y`Hh*`>{t>99` z&y}~D1wQ5;xEEsY6)u}%A6l`6?bv)L^{p^T4#~@3kHJHVPnR!%7JX~r<3(Xlv|Ap# z!Dx;%dIQ@lI}iCjM!lHABAj@$S^cd_#v(tU#+e^3=vT?E%9anMDpki~sNob6fI9%wlaiKZx5I zazX-fqXcGbJKZpM(C^IU^RCXE*Lk&@^Xu(7Rm@?^Dd8_x7xi{GY#082HfdL)1OE4* za)z(DFgK{qwWB7wuk)xlT!L~*uzz`kiC^2wLztt9H?MONa0-U%x3(xFV(@c06ijl@>TV5eQOq{nPl#~86%K@_ z`A9-`^ppIkBB)g-X{Wv8)R1@heRhavWfYpo%CDVP{Zi-ti)+2h?v?K)dPWNRb-0Z_q_%S&hCA}0g&Z!j-5x--gqs?S$S|{JA(H*WYv(KKfCfy zJtj5T%P>z^vWVs^hdEtv$m>7zL<>{GoOc)T-klDO3bii8EI5FP zEw%6B1TT1A<#q)6w-s&F7Vk);DO^Y>e@C?F=c3`|<+=ue)2A7cFh~c|{i;r-P)M8C zrwHz&18%QA`ru43`iJ-X1A6Z2^+11RqKo*3lw_aoho~$Tu8DA#^yfE(&m*pgOz&@= zwz@bivUSB7vdP5j))~>koE7EPcjwtR9~<|fr5}n|>o%xUKrJez$vMcW$vLONc6)Qc z7`D2yy1IGz_i5e1Kf?dLL1R>D>N-*F%x7~PNskp zeoE|nm*(3i+nkS{g;-0xx0evyd;fb#IM9#p*;~igOR_I@c$ClS4oL7mTS+&(^!BtN zmBe@bMDvDqe0ELk~H%Fv2>L|aWzdBw;+q#;!bc~+zIaP?gS^e%i}*w)_)B0@kE zFlhwc1)cf3_}_DZIdBnB39#;+MNq%Q+*o6#w0WI7?1NgI+UEuh8f8F8QZJuj7w@SI zy+GC_t~Oo+s@B)rphL}9yGevxfuQQt3?Ees-Sl)b8Noh3Wh*N>Ok!PpY(6yAWm_xm zA{#=FutddR-l%-fc*w|V_EMo%bUxc3jJ7TvT-1jvb*czEdQPvX%#lt}B+W3i?X<0* z;eGQ-?IHiH{@n(00>7V27mpxH*=)n*bD*2--7g6ijn?=60mGqGzCp?J{x}S;h)n6Y z=hyu{l2@_@|FR;}+DR#No3!u$hmNEfJBR0lq-lBT5z^(fSmvkgXje}s2O?sy7Ha)X zoKs}j^E%JYoFQA6*BZN1TDb}S_S;`Ofl_8_Jf-blKc(RM=k-x=ljy0(dEK6K`l*!= zWEI2cUjcmR!dLs05G$6N4$ERvTook)HGdt7@lW_YN&Fd1+!Uw+%rG7 z0xPyWo|ci=P^rgu)|uC8hyIY$5tb(&fD5l0%+v1U72yfxazr}!fF0VJ#g-4wKElIS92D;YPD6$`c)*N!e zlb+(AuX+XTbc!@%6QH8;#ATJW&&TsEh6vwr2XbCe({81Fdu!Bn#if zCP^UIK-PEaFjq{k%dD6LYu2h7>2Y^TT*q^_b5m0xNTC?XTeEJR{u{o;G{^pWQ>X8E z!a@rXqc=xg-w|$v{grOr#&KKozhgX)uw6hcv_{=_{Iv_g>s{(VGl4(;tx@^i=Gmp4 zHsj9aLxn0eet(%bM^0ZIPTvQutqb{0zTt$0hF+r6j`)OmV zoZu1t(2oisvDVNbEYUz}^g~F5N65pNuFQK~+UcGoqfJPkp%^vD4WZx(5?5CmgGU!a zvJ`|IxkKn7Hs~o&9>hQY+}dH@s!1wVX%M!qAHGMPfoJ48-&#+w{~3yWWHOi39(v`; zX0#S=K(9!v#;rk+TweVeK314i>O&;MEQ2zcE$>)t7SqPJ&b0_hMV;?()__`Vyq-mq z)v`)&&SXtav~5ciLX3{=7`IUTn1Z-67!fX=etV#+T}GR)`$tB$B#4ABX|MA6hJf_#}ch(A(?Y+%J>rBq}IWx)IJ0ZmGm|4TmheK4ipHx7kTG`(@J6zZnp zYH&>jEm49sEm(>#OVY|49MVTjneEI&7ONH%(eSF2zJDk6vcKL8hV%Txl75#J@Jed! zNHC|sag)y;%!-7@MWTuG10eC+_jz+Y9|4JQVA4hAe5cJ3x`iq>>+xeK%qfENM3IO&8C!y0Jw{=E0tpmuD&|1 z89PR|5z8sYY_u&AmU=!l4yJ*SVifq2=P%K@U~|iWZr+x$YcrY^O2im76wk&mV(UVV z3p_IS3k?Q~*oN;v@-^HIqD7OYqRf8PWiFol{8&F)La*Z&lpU#rmvwQCH9W1QB4riaE4-{8WKDlh3oV$e=)-LNb1DhyG|Mm z_G2pW;Jqv;ow@(OTY90zDNi17F7&Zb2exFUi;c(u?LCJ)a=4ZAcbwKJ>GW1d*Q1SR zfACk`4``X=)=k8dxfGI^)mq!{eXzg;rD#sa4>s>P@~6vf-+7VJxz+9Jf6rX75ICdK zyq*%@w-q`Upo*=O1y`&H8G;?h>KCrE_xudBUd_(KPl^)|8cU(a7+N|Uh_>Ryt{g?x z&%eiWlME!+$^JG|pEqQs65EW0iJ4nLOA;@+z~|l4n(8OBijlGrJ#(wa1=D&o&>isJ zBEUt`WVbPt-iqVy{x^(^WF+eIdK0etc+kdVz$^5-n{-PDywmhS>vADkNgEQlv#*M5 z|3M|b;B^x#VUERX8Kc*K`Iruf+@N2p!}G|#Uh;6W?ZS~_^PSHI;Co!@I8>ia{buu( zP$XgP+QQN#<983PRz{*ORG=lmE^8dX=}Q?V9}EF@4xgX^HC`MoSxQEj3`jr{Nd`h9 zvvsMd;2F%>G>jBuBRz6bRt9qT$NJj4%gEnJ8IU6>8_N-$#7ZFI8uP}lS*(ax{kfd$ zr#J%_LH{D z9sjfCGD6BI5!b*e7p&M34EU=i`jYV5ZW@=w*|YE=8ag6%z=C)lXoDyYWU=WwNfLdF z)L==T5}nFjqlk5DAz(b?UXl*r*J0l&Y@!WE@it3moH_~Db<+5r}1s z_EmdL&^Gqm$28KFoH(&gXMsfTp<(NXa!8d}*5DHYwkDa}X3ARkR-O{O6O>S> zmr-O=Z?qW6+WxM+ZV*e;?9>m=ra7ekhC@Fciui#J;G4I`BgFUc&eFZ@)l-SGXMgv^ zs`Z^_IRr6?#Y%U?c=Gzmus5C+?v2tg&1Fm#G7_v`2(^9Zg8`;^Jvs1kY5EQfAxhj~ zxDI^NGeoFK?ymC=uYCC7)BOj1-#it=$d+!Rbw&bD=YE}RCfZ2zah;(-%fVW&Jn?bh!iQhC}! zU@C5B-IRj4c?fn!ilENwSdTbvl$6nE{o5a-+x^s^zwGkJTB9?^OIxu?fkrf)y?9%f zDamCJuq{Jt_&=yD8&CLU@td>bB+IYsO>NBSRhlUOScAdKifJ?|K4VzrLJ>a}SbrhO z0Mxx{)s`O;<0+z9`IvV}s)dP#t}{}#R|tYvr;j<84fc|&s1@Y~!iSjLJD)LW=SgH~ zS|w4Az-B4r*ImL54m;hykC3E3#E$L_&Xcm_Z$rCAy8>P4J)Xx^1%G)e?SwS)gZ^|?nw`L) zg`wV^x#F$i7Y}6B1;?)FJP2afs?aOdg@%i5?%%gqgJ&MrYo%|I$bu~3Ylrnh&?g(6W&5k}l-VupjZ@+eMxjdbzcLe9cl>LULNr3DN0TG`~ zoYcxXEv$j#mcad3mbn0rDdVg_3xVpC;}nH?Bi*sh-8fBtX2vd|Z5dIZfW8K$`EIfr zd1V?p%+kS?sfWhPL^cXVvzqW&q}x5_SFlf$%cz`d$!Yzll@<1Mn7yT0evU44{HEyL z>vx#vel4gm@^cbgQIh?!y2`|~bKB~n#Ax^P+jo{Ln~{)!ZsH_(38OsnT6;FB<1qN` zL>i(LI!bj(O=Wab0VAPE`gr#)usa0of7gU7hl`3sIlL9Ry9e<-XYPGyt+yUeX8=f+ zE?(F&LW2H^Y>l+8n4!)H@gdGXM>7sQ{6B)6`8q(55wG&4|Z^$NNE34;?rq%Yf4p$c*daCB~gEUIUVAv|FWQMRz30MG(#m z{q@B(XZO`gu_J5eq4!R5dk;nXH(zqAMhKtk(5aW+&b9eHLhsf4dP!Yfytinyui{Hf z>>Xdv^m(FFb5+F~4M>W_9QsOdbUU$7{@vq{9YPOiEYV7(*7Z5}M}_c1_K!suE1^wV z(x|nQy^fw`>=!b;SS>-;XP5yoyzfu`?yt_bRwO{egru?S^VRLy=8L|xPQnlY+ zCRAxwA%f)k!*y&bQ~xH}OM$ekF;(gDiDI~8K9*9GCFyQt>VU#fWXm*x#J`joxKTWq zMxs||5O)!Bf$H9wuR(wuBDnrz4HBj|F~#(=KOgrmh341SQlC_G`TT;^*o;?@HStoR`nTYnd>LJSAki^s zsX^>&%MW8V^85O6Mi$4Ie?<>g(NGB_AWQvrOz)PscR33j*+bD3x@pj4RJPQouC%oc zG1=sY=ih%Oh|L5gY1f6=9PHUI3dHEdSOvjF`rr;@R$VVrs8uE^&?m=PpmErHE%Oa% zkfb?)DSmtc$-Uwvlv(7D=ug#2S760Jdga2wfhI{?ExE*}Pzv!%LxQC$&~Lj}AmyIy zVq0Tg8eldJ!RzYszG6fp=M^S^)xI^3M4sGOD2isCh09c=s1kr zj#}!SkJ=e!^wj0^+XQJU&JyTnKbhpPOuL$*mlICaG@qxF>y+%7Mr}JGlUN@+IFE$O zjJogYNRW~LbBFAcIu52WCxb7dXV2u{1ST*r!q91nP4?#aup%s?Sr}20oWo+qQ}fxZ zeG97u;{E8H(n_AF&7KAG8eClUmoGSb-wJW`zWtLpYqqwTu=TXP))Iz@1)_)N$VQxZ z=ysaS=7x8NFu03+gI~-!wyZr|pD+-?0M1NU2FvDgE}1k}70Z@DUFX22Gtb_<+134( z15W(;WOK9OPrrSGyH75(Gdcy5m&n=7`GX-oiTzukFzWcZuk!+! z*p-NMGBUsXdPpbhaia=OP8|o|iT|a}HJUY3myaF%a^+L&aTdrqd=c_nw6HT2C)a2u zM8cCX@iVeLoor5jF})5_q$VWaGvLoa{2-hvvNV2UO$xtH+_Y?pQE3Gd_^ju~f!pCC zE1h{6Rp@%DOWWCYtp(5W2!W*^Iv-8h)-GeOD^Koj*h~o@z@PCwn7ExB`jY*a>6CAhk|f*&Dp25V>5N z4Ivt8oM1)^{qUY|sGh^ydv8AZ?d5G^sxBxD(>NituY@CtsMFz! zbh)X9J{uSnueJ9Or~LC^5*8s}*b*_p3xXHq<7Zezqm_XWjGmMY8T7v8uu7YF1&Qb{ zb|MQ%!QMC&ijc(_#B@5uP6a9>nb?0k@#J@t-u`Y&7|v^6l%t!(>6mBCB+0Sv7(FuF zF}6(_&M~WepQZa+1L`;!i(XKyBf_?U6L+d*6cs3x1OB3LL-^wF_xIap z&!qJ2$-&sD*i<5M+QEo$VF$_q)QbGqXzGh-`Cia;G+>4{Jcg~z;F($ZEg*tHOVM2Q zqQWZRDN|5O>32l7o`}f)g>>Hqha!fLuQnaX1w9l!q3_}EIXJ!j+zFBc*_Bq4iHb9c z_BIE8qu*6}aI4>nJG7%BGwvb%Qvn=UFCFmyw%=L)93;9&cuT*Bq0{0E z*k9~Doh@oe7@;z;B%b4n-Jp>}E=v0(I^eL<&~*3-I{>UXAq;(G8Pm3NpN z4hpk6J~0ft2);nQ-)E8t=LBdQ(U5W*yw`Yw?uV$PAz;|^zgcyUC*&hKvLwylSW;8c zzEF~5ilGIZ?uaCPHhJwl9LjdyRvSxFO+u`B4RB>j!GN+lm6YgbNrLF6Wk=RlK1Wk! z*GAl1kb9MNCd!|Bzp;1GG&?X7{w>Mts+QU-@xmO}so%j8n50^J#~)7GWeB3!W%H|f zEE*p@e#b+QU*#$aB}Y;`(3Vi0op)P>Q`*AgpW?!RXr6ZI-+wXG@jBb$XjA4`Obl&C zNRPuZczcv=&+(fxFX4mtz^IVor7*S)u`^?Ue5_ef8d zR1(OsLqI(Hb=`FV^Ct(6&Pf@5KzdtKT2ANw%?wqi+?^g!LsH9*%7~5O;vpFvB>VxS zxzd>ihBJ57)OoThBikqFV2RXDY$YC3#7~%xqKDRiL=&|WJXsIgDI#(;Iq`w>@wp*z zJ!Z-LO`iQHGdrQCI7o&1ecqCw(i&B)>LE98fLwjql(z%At-&1u|3b~Vtzq+b;l|cW zp$3&aRvWZ-jf}~}t)r23uPlqv!br&XP?X#X%kjZKzXHB32_#Y5_48vj(GRi&vUaaA zsdJWOsdwklQl^ykd?MWXcPMz-d)GsZihVw+9vUDxv(bRPgC@5Lx{;D;Mh!FTW7ST z0^!}ZALU3~3Zxf{g13?J8gFzFJ(O1toY_#9D@8M^!(AJO!+IHHf8ziyyi{IdwsyT^MbQ`+&Cw|rv5}JkCG1+j`SVbUSPwYHn7(>x@S62UwX_QN**iXZT zegv+;LQ2(u#8*_v2R)K+4X++EFXwlW?I?hAE9RdBWliVi_Fbq-w=Q6;m#8Q20 zstJ))8pf4_f9qzq@37YR^h>&5pcCra5F5rDxEx8%issWHI(OxW{1U;mQa2{I2IRykcH7z(`KW!W423Q4YBO_mT*RhY~rwGS20ds4->HQFYD7D7k6-n9l|94yrw-R zBA>F(L9xaxarcI8y`1H-Pfx5Y;iH_$H9$adOe!-+tJ7G%*XTz({iY39U={799V1_& zeq0^J0&LY!#S%01>b%u)Vu!nHCqtglnp~!4TAmkZ5Vfgh_z?4FJB?uLu%F5nGneNa zc2=o@%_2X-MB(!nHYTGU=lQTyK>f!N3Lqp)aUbt4{Wq%YB5f)CG^eRNKZ#sU*;&6| zoR$)O(|P5}l~=ux8DuTx#4gFgc0(!1vH-i^LK{~|K2xZ zt0;cHhr~QYxDe|}Y=xsi+gogoZaeOfr=?169#BN3 z1H3(B+?V)ep^&(ss zifQRWD+^S9jA4;9FX3(hUK3Q}WGL{94sDRf9cV)IA9#d0!!c$M9VSO`B?TZD{*|vYAvj0 zCd%qWe@(xuGHEsS)03jl@PLqES0y;LT=cu`aTUa#upC23SCk1UTuN65Y1GQ^Ua-q! zxBsP+n>1}^5;BQl;(w-;SQuwv#fKT&{AD9Z%EBTdjA9l%x?_q+^7GZe0gD+ZHz{2M z*TL8NM+N0?N=m8cI1jW#2buTf!yy*2CGY2Oj=*ZMn0;HZ7F>yzv58p~lC9IUXflcF zNZbOrQ9x~V@*qq;P#8DrK(iwz#tw(5&SiAI2hqKAy34+qJQoF$&l8Zi~mIH9Rb zBS7>vp|#a8K{%<3_TUu%VD-KuYmVGs+xf?g6b#SO=j%_F(_;<%upaC$Y9gP$f*Ik` z!{g3M&p%6A-}t80Ibz<{laBiw^7p(l-p;p3q=66(xrd@zN8+9bR}*g(i6`HFM&WA@ zETQLfpDA$!o9u1p&#uD4oWZ#Pw_d7yb2{wrW6EXoOM0oX&yQtjDxYE(?5p$W~Cnp(bx zql0PDCYPS@!dQLt)R1pSbJC_TQT6^u)*B0NJgflCQN=JAKbyG(YQpSE zzgMVOSYrtn(EX{XIyPEN^vPxmtDi?#-CONBAh>lE!TRh0JdE~parH<-l0Liin|E+d zS~3@DxaUDXAt-@N?lZ{O9^*-q14uCmpN-=`N2{*>n}bJ__W8_H4i@*q2}Uo^v4xtV z5GAZvJCf0BN?J!!mF?k^P!@#Mc<`YFr7}2+`!4(wEvaIKF%6PX6ovI8A(4?$$K&a| zNAitmKvdF-oTSoYtbUx#)e=JGhntic3MP3g|pJUxrcQ?)8Zvs-7A zQ@L`y@A-7i6m6F|;hz}a|A^ziphqu_AP|Agt=eFU{C$1aU0OhHlWO$Vso>i}?(8`i zAGyd^3}J$y7BLTRGV8G$JN~9`$u<2=PAKW5ya;y0MkIjAMdYCAVYh$a>qPrr4a_sc zV#|QMyEp8W@{0H1-aBeseO#;&?Xyy^WWYwl9eIb=zZQ{TYDr~4olbDGkutQy9yR%# z6zM3eeIOe7_4=mcSNVf zalBRG{{9hjs!M-G5J?0%c2(?Jr~GS1iA;M1{3LlwJ!%NYbxI=Bj9@B5$oiavLgYu% znMmH=f~6fp38Nuh#4?pY5M!ufjXei(bAv}RLf26>3HnL6LKS)ST=Nsk0%y19w|+{C zX02x&!rDI#$*pjV^pAU}V7?jP7r(}GfdO6cEQ=sWd|>Z28*K^w7ue;g$g@7)Sd-CQ{FoM>kPToOQ_gcHqU_ z%Xj8zp=GZwtF%o2vPBir^k~tlv#aIk;ccKpyc^K9xw~YRzCvwE{jA?*+ko}D1esCS z4k?{ms-xCcZT(v@tQWCHWOr2#WJ=fwg&6~Cfzp@(O{K=pLNBlBSapB#xDYBniL7E2 za{@>%W?@wR@}D`*?)z+YnM^Htw1}`@vfz4`i^w4sX`ts?hH=RL79O=>Zw=qV0w`&b zh~dLN6RYi<8tV;ExmyM?r)$*lFl=33Z8OC&{*lq8}l#FwlT2hsLV<1}V*k|QmR@cs)6!%yO< z)U)vnM$ST)vH;1!&x*Rk#lFMp|KUSTsV)svHXqd#n&23!r9g0UOCT)RK*bNv47@Lp z%qRpd6l`owf?`Jgs+!yz$0RT8HJUK5{+;yso{z`~))ojd^aP_n*_m()6(rMTA*MW5 z^YnP!gwonh_~$H_6?St=hMr6{jgADgIJvL^I0%K>mwdI=2U^8Nk|57#6s%nNzu!T! z?EMB#>atRJ4VO&}tA1{jGD3b9CU1tzpj!1)YZA;_N=C82nxAcXKxG8m zwK+10u%g05OFKzOIY}U)N}##S(?zTAyIg+tKW2_vAMRRB|ErC0H{iJ)DB^#LCfO_H za!=e0J!jHXsw^@-;WNbSJOSXS{&X*q%X1l2#;xu{gNAOx+zDKC2ClyoT>LbtPzGFZ z+^)LBcMMIU5@xdXu7B=d=o1Uk!^3wL({S0uD=B_llW}pAg)cCu79wJ%yL@uU(@Jh- zd5p6EC%p*LuB1hUqfoBI1x_Eb;|+^iT_x@<>sdOKxvR_~3vvZ@R>{cGDMsN)`9>vC z_`^g;dO~)(Q*XbpgR2pZgxL`T63PThPMU;iSEg+VRhUUAcyH{}0(Yl4c$}9p*edM; z2xfj`a^;R2MC%cIk|?uFF)Hou&z_e;skxvTyNO)>G#NIymDpH&zb`fGx>`_s!|>@M zExkFNXGh!+Usb_qF# z^A!u_=~2fz0w<~hSgA1d{$AHg8YsfD`Eq1o`6sAc28k$tc<@F*Ni~_tThdDGl3D<}IeuIe^zyZaV620{fN9@0$r^VD#3|+V2ED|+PZfVTq z23|gr6v6O>TQzx^h4+k(u+D~7fx(mp5qPD}zcPD7k?1OqpL?5n_q&vAa{95QF7(f_Z^ud1UG+QS&sP5#>>N9K`=@+1L))nE zHxsd9Lgjh?#0sMRn4->jls~I78om{D?coyqZ!EFLeky_+MarTYtFpPx^=e45QC8bO z1_!8P&OpR?zEP6yXdnO;Jp^kF{$xM;a3E=b$*ph>P=)-XeMn9jILHD5RR>NoQl+sp zZ3!)OB5M^lG#K9TZaqhRAaU4-&ykc9opZEtB>bf+fYZ{1wW8=KU(YObmpu0M3KKX z6%Tr<{4vyQ@q{wqCX2Ql`AO@#-wOHpKKd3ytxeh`3_BW6PToM_ zT#NbvcGR&>FL1)F7=+Ga+RgMSd#r-x~O!`uT6d z)%O(j?}AI`?4ihw(Lfccf}7urklSzZH*h6lkB!JuK}GA;b+et!Qv=D;&yT`qpDS|g z*eV6OM27-rbtLIo+Ty$asI+`%l};|~v99Pl$i5DQ*?pYjOZLh3-uqXZO+B{@qw8CX z-ZmiMNZ~Del4c}O8X~Ffq^gw?YwI9lu6LK|`5)#|$5G4mHW>N`Ws1nth4-BUjf^YJ zw#tPP(IvU%8>M4W!{%jUdp17JNHYI_b&Y!yc8~OElUg%L_A0|ujh}PVAiSrXXLkHV zxMEj7Vy#yMY|Hr)NJt{exX(i_2~gA%cUF6)Af`>d?T2+onE3Nu4*K(QS&fNSS=n?I z%qi~LLiWbaDU>f`siJ@s{nL-`oFt#OiYU&Ixi5z1?>5E&?qLRUv(4MQF{TQpcN%{V zSTAkv@qPEoRQ8mhmf~Hmi=w5a?yAdz_j^HC3QJzOeD8V86ERZ^Mvf*kF~bB7vy@V; z7;)RBw?=iUDsb56=cAw5nc(5x7GWyl4OC7e7+3eixHP8kWL1#7(bY(;(h;lBx9L5g zpLxN|$_AGnQTBKLVZR3M^ZVA3pu0*>x1Da%!`EqAh)Iv3r?4}z5a`3bQ6t>A9brg zy6YbF+UUGtPJr@GL-VDR(I=C0dQ6iz$gc@q6m8?G6sw91W1P?h%a@gm?^j+bgOi%VoJDXS80EFD|N%SvGe7$xGR9pH|m-);3qDkaLyid0+yfT}G zG=Ei#pSDXd{qoCfku)*ae$pgD=|L|KAT7!(5qyW?<_0v7iE7|~@a7*4q`Que206$f#%wl2!BOcM4YJSRGCr50aQD$^JO` z5r4Bu=WcGb!!+zC|It=to|ED)4)J~N8H794nMKhZl_}aGT9!k&lxWs=vXE=;3YRM8P!ifJAWc@W(eSzVhK_p*-GWN+7zIdOq&waA}qmF&G{4&$z zSE?)GLjG3qSFLq@hZD0%DD_*&?fNxi_c~;RL~k)4B4x>5=2NA9^&OsISt}7;I6yYH zn^xJVYn%f~$KQv?SP~cVU!YqgBFgiwha7-ByjiL3*lX?5g$%EOx*>tKAt_q&$lU7b z$RB;qhz0&ffnFY%?I*eCSmEAJQNm6(6C&DNENg7M$l$UiLKO{{Gf%2^7Y0Tv4BtL) z1esv`UxJ-j`kI&{N#W7FIkex7O+7!!G8ve&0X1uCw7>mA@x2nGR*wtgM=2)rPp0N~vMiOs6dwdg(^qjh_YPuNYxvU4>lpcEhNaS?RXC?4N#*ZGxfz;ZVXkmrClkqxhNcw)@D+x2@5#UW z;Q(Egqz2ywgrNKDdf3s$Kdiw$ZpgQ$@NWm=f0XdulyKkv#t^80@_SD+0po%0<|IyW zFiK~gj9Y0`kNLW#z8h~oIk_SAyX{qziAwakaHdU=^`3G?|Y`sxJi6}wWSNUy%P-`yG^{m zjib(tV95=!rT-M(=bCWUY4L;~eRN zQgHyU<>pBmQrjav56SmZXfoJTLMz2kz()VZ;~aX>UPiyecs4q+qF?1gXE6HwvXU*S zAX+OLW>fSE;lyjd$8kbvlLP z@t9p@2-4W9O?a))&mpF1pUqJ&E6{deiNRhoS#s7WFY#*?tE^Vop3_E;#c&b3$0`|U z%gv=b89MD-@bzROPQJiX_#IjrBm_ia1bA1n<_Kc2#}wP>>Z`bKY_s4Q>F`jw-h^>! z$j^MVJ5S-An+tz9W{RY?a&s5E zmg%YK;ae=~Q|yc~8=m-DX#zDfO}be;H$%Dn2;^ML%$bk=LDjYQhH>$@9{ryv4CFvJ zing<3p7Db6bnj*kc@GiEhW^`DnC_WBX!Ghx#^iD?$gHMe zpAa45TMSLMyz?q?O5KB7UBqhRTdzp1N2gVdk=H5_(`;I^l%4zVz+4M0tCi$UoNz7_ z0=D|sf-DeG}ai`A+OW&SrslH%T9ml)j9qOjd(wQVLx3-{Z%>_kzYZ=1Fnpx9N zAWuHjrn$6+qyODp%vWcFx(2%#522T0tnL?97DEwr*vRZzJ6?RxIyRmsTB&?>q2CZm zBt1HCnPn^;O^>q~=f5OEX7%=Jx`NV!M3&`dA!#vKiFO7z#htz~PDrvMRr`0kQ%25p z0;evTMc6pE$9@gho+2k_srAx+<64+uRf@ThssJGeR}lHWF0Q0n-lsnXB@ zJ$tl0`v$X1`4sm(Pho0Bf8y{L{{gsr=0tck;>d-OVtNQIO|oo$C7q|Y+#bm{VkW@{ zMo2ADwD!)Ql~cA7dHksdmO=gfO<~*hiph@R@rqNcF0Tb&6nr^t&PbV@4aaDpnyr}# z<$aHq<3Aal2A~|ir;YRUF|X15hl?*hoF@|Hd)6 zu7C~nc4GW8>A;J{tioj;m{7<~3zAXwoU=q!&v@z6GM&qn`2hMI85v3C`qbAXCg}O% z^{V6|aXEIZHm%wKPki-9q@gCef@Om;xyn(bjL#I_=(K2EmO&?cVk7(bZZ?Kdvq!X- zF~Fp$(TsPzdHLF)?wvE@H&eal6JLTSJY?QKeK=B|X#SUlBXj2_5RC+`wJyeAx1!vr zN()$b(*r<_-3TAJEDV>B*(Upb6AKK;S5!0<3?Nxz@xW?W^Occnr@VT&=yyq6jfH}^ z9b?m!sq-FV(SJ0Ih>=Kv{JC2$lHf04C#l=loz!T2O8@1JmJ&#~4h}@yw^{i+G#LN9iUD#2@jAqmmzS?`#gN)B=_fA<5Eo?L^b=J>ynU%uAxRnFn-_cI zj?Rhc-)dnUa?%mjE#Pr~yLl*f7Vzl^6`Oo`4FJ%knea4gbxuANo>XQQn6Z6rWtut1 zlr$tW)a*O?z6xU`-v;oQFU+STBqWk-M2K+rLE~Q$j#b~9( zqoejDBbpbN@7FBW&9QU^paA{fj*I4`mekfA)=nWJ{kwX+hW}=EQ;>JEdXn^l)IhqQ z##8c9Tack^{AUQqqP};_>`#sc*0x`@?1}UfF<6Wx&JjEqV7ZbZ${_%YR}p^!5#M`r zV(`P@y(AaOaCYx+hA3$ofc_F8P8NYly>R&X>Ee0!g+KOf-=pus>2ChsSQ?#!g9CF% z9cSpDNe~h|)!)@o>iAOhW!$!p(nl7{tt-j|Zb+~fVCPHZXS_s~1@2tc~2^hEF)$(%8J{!;c9S+z#9LQ7q>6 zzn!g!#7!7CRPh}rr#0V*MY($E%1rouAnSy%m)ibtago!Plq}+|yEllp8?7leeDjQ> zQ&)ChKd68RY4&t#e|2QHlNAuhG(jP@!@t@6Jt;Wz5{q&x1}^S~_AdL6u1mVQ$X86f zu+qorBL=A90Nn7f6}LmK0G&A>S;6HR!Hzr0-rgQ`bdPlF(otRQ(hrP}kIyTPs5SX1 zDJ6vtjBUS47AL!X{Rj8}{PplYr%Sz#w#mR(WJ!lEUOYL&!^5(j!3&xM46b*X!SSX0 zYzYl`hg;KRd5{lHw0(Yb0Bi@_==jBnQA5{9cC6gLtR|9|M9`|SYn8}guNs=#yZ?jNph zL+tI}B1C?EdEqQpkGXI1InxW;Mx^R@1(;R=jYM>S24CO)qd%aId#V2XTip-P@49SE z3C09uE+`5^9vtz-)C{ob@(zfMgg>Re#G=UqE*HB0_GQ%fU-!sp2)`2`hP_koB(fOn zDw#!`;RE=gz%JNq7>J^i`k?ff+*p$=UbsDv@CcFjCvw@A2W`@Nm`41)s1m&;YPt*!pa^no~i8tT*zjUT{p&Ls#|B8}Q7;dDBFD(XUEhZ)gOuxAq zzsvW>Ailcjf0EgIm%wH3u_jD`AC9ryD27=Jw6iZtzyQDTHZXgxpWWALaQ&zf_fqaN zJ2wHiDWf~re}L;QQZEyWTsa#7WJB;<+S(8l&?O}v=2PZ2H2&AVP=NhOtsr@OwC*Pp*2Y(4&bx8Q#?Ta@x5(cb>e z^Odl$@UMc4AXMND0;0fy3yvHe`*=b7bW{T`YjLSof9Sx|a;rD^%k8@JWi)3{Nl-Eq zThiy_Jkh_KHu!ENi0lROFX2Gebmupk!tml^k0v&5to3q!4)*(Uz32~CB_b-XwUj9q zPhH1JH5D?B0auuD^J7@x8qi2H`|L1*dy?c@ca zCEaOoM5%*QE8?<3R}JkZh#xU2AV~bX()%;z)~n{!F{cTU*l= z2L2R+1AHa2oU9?R;<43;ZsGlX6peP2>^fN=>8@B&3D7~ewVlp%4G=z!V4Sz}T?9=~a{8BfnYP)bK?-$;s!Lr+XhwD9&O0K&7Zu`#*$ zpK9LxVOav-RH_w3jP4D*5x>F(G%aWJI58IHIUS%H3U;I82A<=$Ua$~?zrwITI)YvRp zLIc8P_DDaRV1G}85cT=l$}$luiHgkYz!SaQTjX)je$f|=lclAg*}Hpr)n@(&gmvb9 zlyJY!n`ePLz22LESOW-H@0=lzXXbmK+=5?+Ws)GkHL)SWz33MD{xAYKi2~ z$pDGDxw)r+y2%|N>^O0_*bMBnf~t9*x2sbGilzm&L#`==8x=2bKA3tGDK^(7<_L}Lg2|^8Epq{j7^Rg3Sg0gobBi5=LzKXeoaq@&pWqM!&L&qx+gDP zle;xim$e@BC!u>_!W7P^q{v7B7YN*%)_~=5mr(mZ)xXQ_v?^&_y+F1(I5m~T_s}&r zWd1;&kwD7CgqDJW0=TUEVLly-C+nqElgA5_@r8vaL*DF$vr$o(?QD-V;6cMlB-iQe ze?|%OSn_H(9b}&nRG|MvUXV1j+2ylzM)PEJe%;)DD{AO>F|~7a)ZYrct36uQsT;1> zV&LJ9Jo`nDaUYLy!D^dix-tC$IL}XF*1r)nUS!DfkcAeora&dlcw^<4j|~~RVuhn8 zX$$nGdO82Apy7P|!ru?I>wGTe7-fFk?#Q#?I8aE6W54{I zU1gZ7I&)}2-oB#*qn{b-(g_`TD5U7yE1yU-iW<35z-#RN4fTM5`pn2Z5z*RJTNXhq&E-(esap1IoJv}lI?ZZuZ zBA;aBo(kDiZWA0hu+XPhZHbOUEfRV#Utb7PhO=ikC69>3kPY`#W{w%!v(*l^s1c>>al5cLTk1QJQeqPB9g|2?-# z0at^X=C+#)CL)HF#&Y%Bw+IiLrE(QGQuvG`FeYXcWPfsDH<;TU!f9H>K|@&d{jaIf z2$1czIf@z`HOXn^&V110cU?-b@=ZTM>#Y`_*=*k~i{8=f6VZnnvOf~3#iRT>IN-2X zYJgW3;!zpnkn70!7jyrsmG)JKc;u(edB2{sCGI3;!ST( z9NjSG-0u7H*Lm9bciQCE_bG6U3d^HNdmb6htQ^p`667oLufxX35Z}u#Z@d+8RPI=X z%cN)p7yKy@YIxXEK(~G*s>Y0SuWjH%^>pA_3X@~Sh)ZHWyn)rntycE7Bzq-4Uz$X$ z{jC>M-<`On|C=?K4ohQoN4Vx|##=Q`o5rx`KPDilL(>!uyc6tY$@EKPr7VBf#X>1=iosu18E}B=tX~Mz17*dQG=^= zR;c{C`0LpkctWybu@LjpaUm}W`u}d)7s}$C`)UkMv0HKCu}{o4!YHKz;}M_%f8BFx z_d&q<-E@?$1xuwy`I1SQlgSh|C`4%n$h-%INwil)(sg^r#|3NB)cKT^4omjYq$ZJT z3jfyw(e0&_H^q^MCBHEiU@iQlO$goj=T1_$6GhB8{~%%7%A3oGuBb3;YjV z9b~3ge{OAifk7Glm&izq{(W>QJiZlVu0$fg|A0XxjdW>`S8fVLv}YbypCfcEcs_fV z!?%BmxzHGAvKtJ1=IRJqnv?rIXG;b_w6H+1Emk1hoJE;qr44*vm%ljM?#!9mA!bMkT>?d$=Jr!TeFS z+a_b;vu48<*{(DRv;~Q6VME`^^jbkQK(fN z2t5A}A)<>s{p3n6K+pcB7Mm{C?B9S`4VI;Tz_$kSsjkDz?Sj377Y%>-Baj`KqYF~w z4ch96J`~D$DAa|V%!xQ?(#MynULN@#483qt&nB$hNRn1e4D{pxt1b6b1)Y!zpEC+i zN*GwYr;Da1h+XeAaqe~wIOa%1C+7O@dl_1B9+hqJnB}l+<10HZ^uHp>?zgKg3^7*q zK0SqXKBBQbH@n4ndsh*tWiTx3YYvhNTu^B_ke^e@ZAh!<7Cgv+gY#3*zC5{VzIlVuOR(%o``N!NP7ET-8_wI*{}qj1dNbpf=`S1SYyZ zxKh>On8g`NwPX3WSn02`om*7|2X?#O7s+IdPmTT!-~CE&@O+Wk`d=#i zlQK_Y+?4(^#jpPjhk|(&@e9H@d&pt{Mkd=y`0A?BGPNt+qLfIJRf|l#Qae={chWtk z_D5DLl2me-vPotc{o0qJG(;_~=#oT<%s9FikRH4GV-jMho1A5`YR5QreNyXP83v?D z#^5*lbFLM#7vnTSGv1qRf1F^^!_USe{)@ThbNo7OR+Fcj?BAv{!;oO!3D4%yd&5 z+sFS^sFAnXJ!Q;RE9B8UJEi&}lp_Wy7lK1Nx#b^5w^BR8w*|;R# z&5)v|4Dm$yoCESxObHgZ0EdKo%e4Xz-Xc(7+ zdhKgx9UwRKA43mG>Q%_t^`|zaL7xEauU8t(^)cWl=DkeDHlsn7qb8&>5$Yc{$F3dz zXHE`wDT*ieuvgra$r-S?js|3)>w8MSS(D{ENvH(SGC6$pEB&i?W*Gs$7o4pI*mlTbN0nIE`SGT@;48E! z-KI4$xtJoQe+d-0o9;ep=jAAyr$+K=D++{B(W9n#&Q&iH8vUndVo-&_KJy~y8`tdz zhX1}7wA+tzVfpjl7t+~RwB9G+HCt5b7BAZMDpJ<4=Q_L|csHXvz=mAt`o4qZaq7;? zqUzErT}BtH04Go7L)>e4fdYbihWW}Y?H=CJ)*>|FEoR2Q=!>kANFull;4}U2W2B#p zCmpihBG~@ZoSvtqkXxAzqZ$_!kgCh*aWFRxLNSnP$x!Y533DKijDF)B0=DHsjd)ah z20kTlHOiv6X};VEy=?#A1^J1+NS_g-<%k%GP+8utljEC`#(+@r=7aXf-Qu%9wC@ds z;&>4E;K17>2O?Z$)^E%@VoRbpXo)$3Y2@v*5)-(`{-K(=oH|uzonZbK%aJtjM&bL4 z{WnzJ*vnNe^khace+tyZKBsB18;aB}$4u4xWTlo;ZXfmGXFAV|Uo3yohOUbeQ&N*Z zztHQ$D=;3IM&^XL5lkpzqRSIU#v;5@eJi5>dTIau6D{BcclR%omKT3KMnu> zl)1O}jT{T;!!Je*<;r}IPp4%+z4%`=z3ip>)ZeD{s-x6`meOYWf!pdLyR(Z^@X<+4 zO4~OtE1!=yaI>cOkmb|j0XOOfVL>8M)m7%fRJ1Wk37Zdf1_wznxk9 zO;X(dsxMY?LZ-=bX`0pI4W_ff#^hu{{QaN>&i-i5LOydrXR5rNk>}IjP8MAsR^6LA$<4H0=03bL?N05T^#pW{f9C@Kw zZR>6H-$&jEFYbz!L5L@2)tyugLZf7VgWepC17)kpOL9H@S%x}fKf8-4v zB-z0sY=Z-H5_Tq6e$&g)z8v*UojRN`s^M6B2zqkCPA95NC<&zA25v7+=lK-y;+U&E ztypxvMU{YJTuSnRYpd$#T$E2Dl#@jG49i4Tlg1V|dxgKU$N^Er?U7vh@d4t=Ux(2l@JPDooY({`S!5GQAEA%1}3M~+!%j9_x!nLnD*bzRNrQ1WV}$a+bnvt6+LlJ%O(DA|TP26lKgY=Ojj4F^WqRdgPJY zyYm{Yfc2%OW@|VviiRavQQ3BYCCo2*;ExIiNa)39d_)bX#a_NoLtTXq;32JSmc-{( z^@h}va`X3&dTL3DCBCGuNFEu1Ax3$Y(Nqk)PPEUkr!N}-#YEiWk!C<>cgEgVZgGFI zhd!+zDMsU3*j2U!hAJaEgVkse?6+D=xrhGbk~eoI6We=rB?$$MBQdUAI~A2v;OtDK zYZ+k0aMkAhPXBBUVgu)JeA9r2-oOW<=N+N)djGU0@9VrWjDtqgBg*=9zp3^S+{dN% zGN2V0ChPZ95eS1@$^DL*V!IrEz>4MB~~ z4s(ZtdG?J1y4X>!+%q250VEW4S~*q-NfKO%+ShNRczk;6&~^ikANRjX3a zvARB|Goo#_3X_Bmc*~;bY&Avh@w{h_cC)+DWJZ~fiSMXd1jPvtk$4bvPmmLAMSt(biE4hX~PA~U;2 zW!lN>wuxPayXd9k%NWOhO~w~(8;AB43!)_&X6UbHwmIn+x=dF}qlRORk zhrDC_J3NeC6hDCxE-3#)Jnf>2!B_Hn zPnT1vr1|kM?fC1FSO{RnCo<+ePC{s|dv#iFAC0m#wo<+uV3(-$wK=x_g7n|0kec=0 zc%?XyNbRf@yh~fvG-)RHF^0fvPgPn^NS7~a-Mk6V4qJ6d&Sc$)qw$&A9}AmjU+)-| zfR0wJR`=?8AHpywR6t#jjOdC)l}XGwoa|{4{=GWctP9hnH_b*Mns@0K1=1M9Ly*$_ zekz;YIp{mav?EF{$A~8S-(PkIrVLVK9mVX=XP2Usdf zp^dF*KYlQfV9)3@z-f3uNAaUx8#oK7` z>#ZYS8B#ZsPW*2MpF%KXb99sazb%93{XOJK@oAW>x$U1ZtoI^J|1EKp{o4;M3k zW7VX`$+|adeEzkP;Z0V&3gZ%9E&Mi~9DXc%-QNivL4Bn!XsSJQkyxv*skV(9=wwH0jXfq1n@+r&6l6CqQ7*--lElo24PpoeNz2>%$#4u$a1-eTb4t z3u6YI@~jx*{q3&R-;zwcle9gTBML63$9VMlkOmaR>Oi1wRAuTVs#Bi(wV5MAr3>ge zp?c)>jr)Z_T)h$CA{b+AvnB#7a;#wz3ZU6Z1cq^E0c1MgdCkR*&dvi3@P>yIRsp?s?pMhvA*!6G;CPeD}uN{(YVF@ z@$#!!JIH`Y3goC-POs5Fr(!!6NDsM-`4D}2jZna7df3H0Y%m$U$eq)3a*qgF1}RMe z&pR~^eQ{J$lX^psW)DYUpg-fo>*DGD7x`rrWEQi~ zKlH=9P6%*}Nzd!V{g4n_*MXR-?5oZu6%y0c=y=!#^{fj;#MPxt77=+o=hqT8@@Pffs| z2(ZKp#tg2cwWjODF*cE3xF!72S`{LAB`BmgQbQSwkj8+_&U%oElN?y)ZT$*QYFJB@ z$Z)aPV@qU^D-j$ujwbu**{{D(WYM6B6wyHqZGo2a>u2XQl$}6$1SrnH@pnWIFxL_1|aZcZnV$1pp8g_c<+l`O|$JQehTlw0<8 z;pNggKozM!xt>uPXlr!9C5v$@BB%+ofu_YsJ}AMBg_X%!V>g!R8(E{n@B3lv*Tbdf z1VU4cCC5PH>WP)~T%BZ`O{S+bltYNDW$5&;?$cA833jJ;@ zKs;I~;Q9?xCLeO9$_I{Geot>;6a6=2w)vjX5C=6iM--KPx?Sjd;8_pt)rz{L558zU zRrj?bqj=D$u!2}-PxQqZbo^_o*~&(G#RsXw+dna$f3KP={m#ad=>*t#Y@bJz+3Hmm zP^Yz86EEH6tNfZxG>XiWHhp>Y$;|gG_oyFKJ}bk!Ehy`c;I!Ma%zT&)=g&)8BuJ>ERUg{L2EdbAeMm*A!1Kl|U)1>Vx zn-2*F&9v*ucS&zPq^jeI z%W*%%>qiMMTCh#jvFmQyc`Eos3;i}mB?kdawAC%vUDKa@rn=dkU3!f>tpl+Zi}l0ue=X zCF36(_C0K`ka>tO)>>)XP#%xt3~J;HsBIu7ACFrs>=)~*=@_?xJ~+st6H9B=2gN0Ne{|3GN*k*ZXYA8hawVV|Y_`0oL3 z`4momCq>}LFpcxp2k?kRqKGY zyH>~tOEilU9;X&BhG~;(yx?^QtvT*C@G@`K1%K2d{rbHMLKIs*^h`tSp?{GKQ?7D1 zdQ;DJ9O0Vz>>`({yZ+D{?$+Z9Xc<+#?=~!-gK(Dwo`Lo4DM~^td6{Fgntz) zyW%!vn@btUx}{@8bGNDUbhs64>BFamv2ATn>8N5EXjG*W<@jrFpj+n<|BG)W;|r@1 zbd|9E&4@mW>t3OpF7f4gEbVcy=_|tzOONeXg*I7TQy7x%<#up`^E`SUzuKMKj<5Qy zZ2{XHiv#tP$JpI=u9xN70^IdFR%7;6j)2`sC-4yXoy#;dM`%NyL!TAeWn? z&lYRj{Vr-{fC4vIDBjVZ4p(Fa;^Oc;;UF@CpzVLvy^UvkX|ASDn5=YScKNx94#Rf$ z*kWmc1V{(~&S_4Ivk*v@jz-vMae!-va0gHNR+Pd`@}sEH3z2OhX*@ER6M23VnX&gN z8n|Scd!c1tpkCDA;Q|w1pR9bZGXrahWA|eQc(wyXY?qGvy&jL7&m5M0iNG5Fu*pI+ zn1I_t;NhN{hTx9lW?J!t@IcD!Mb%$g*qMDc9FI(*vJ2PK*q-N)?(_u8dAwWQAa<@E z=lFAo3u%hYwYQQYmh_GPSq!y&B{fzp*s^uNCx;AV0hfq7TD&;HyL20V_e!B+c&ms7 zcbz@=8Zn+$ETm2tJAGmz6#Q&OAU7-%+WaRds*2VBUtGzG^LgGoDLw)hWKL@TpYg*l zlF*5-e6PswHdGRxk!$`EEP)Rq!R^WitH>B7W9ao`LE_xmJ@MP|1x1DMiz@RglI!v+ z5t0^f-nc75rNg_~RB84m8yF)IfMC<;&kYX;ZyJ2+GXykL3uqJLj}ORn8xY-GZ&U7v zcJ({_kk)l@mR`;TGt0`L_Y^1riG!Chq`@7P+hK%k%ehqjJ`X2*v98+5NIypGI$9bz z!G<^p>z)svfVPMgQl@yMLej@-Xv5U}812y;6hUwBgEqd_f(_ZmWlv;G6QD|OTvoSfQZu57;Gs-V_Sk?H?UFq{C zQEf}L5RcGho z{m!0UaIs6v@%1yY$Di4KZ1jSL-5? z92(7;K-H1ahW^9t8a_4p@2{AGFSfRqrXZ=u=(w%4)N}W}4#10WzW{1I*8YhJKX&KLvg$KD+fBX=zrX50ReS(X5a z1@@^_v0F8x4`=qnKqg6#u~t@JEHji2P4E3ERr%s%$ci2oXj>=7E#zlwH<8*Na08_I zVK1+K3nKN?qoHcUYFs2zVWOyn3>pqzZjxR3jJ|PBjD#5eSB2XvC!!~*xF(r3hNHrk z`hnal(w6k`DU@4gtAjXC71t7}ddKF6ybo%K;WskZiwSirkACYqMjDnN%wOQht)#~( z4Hb{%%y4U&fZB;9l|{b_yLzIw;bmy#v(|h)7xSCptZZK)?9>O7oFkS2Jl9wW!u|6k zRU;+5t=D0V#nI}41_2V^3X(T)5K+foNGs6D3{po~kkf*e@|DF>fsXIVVg2gicgd*! z$4bm`MFQ+Nr^xC^Adyoz?_G8F&jMVI^rT^yFSzcV$(0t% zeCgUu%V5)Gn@idu`uOl1=3^!_w}k@a4qTwRw1@vjCuW;#h2AlC69GZEwTIkxtb}Fs z2m=&I|I^<#eB8R&73Y^;=M_8q#eRLnErFprv2rPV43dEPICx=n}hUo&hDje zB~S}SUW?L}2>)7ag-h&?`4wsU3bTsx{ECozz(Dnj%radvi;6m^x^%HAR0Z8 z*m#z93AW_9nylF#&fB@ACm@#kpyZ(K``~OCu~k>2p#|EIr{f9=|CN_{y%AYUb@^W| zSqPs$9Q7OU{1OfCBjE;b5wiH=@b2%$tlQ%IVT0envW!un`hYI0oAdyIMFi|X)=hU$ zpxpoTn&<{W8WfU8mC>uKz;u4RP649^DiD3iK>q=x`dr35ayxe|3ZV@}EkZ!q=9B$$ z=c3>2?Qgcw5SO1g;VJvuYvPZdsv3@{fr^lLAoZ#ri!Jv{q(5Obhz`7?;|g`7IB?GM z{fyzpp$HLIun3>F8uDfn2#$Vgo}ChKVOD`WmNDtQP#K2B7rzJSs)zrcvZ3D)#g9?D zj@j2}0j(aWhYx+4pi01#4JV8kH9bCufNV{&NCMU`2`Sa1Qn3l3<>AIBeU__z&>yc; z#y&^4n86f&u_+I))|^(0dF@+%9z3=nzZ@MC*8Omd($Kz zP^o6UC>otIbA3D|L5nMOy8zdxK;Lj7PP%pw5U?4`4^)PPTI|FVd6AcI&JmA=8hIy$ zf=F{Aq3p)Z&)+2vPEy=#n=~Q?T=i1o+e$RQ(z~h_1hNw$-At?fCca2fq0u1Gs^5EY zJNYd$0|OQuI5Ki9^D~*-6}JIJW@4bUyzmedbJB-vl*px$7(@)d9UCu7S*4Dos{>x5 zAi@~tU$uGU9{yY2;M}L!gRZ1ayOCuKv-3$O#_6%6x8ZbaYIn>hwSILZz=!S9dB1lS zO4f$L65skFu#K=#uc3g!N}VyNEN_tJjioZ+!whOc_se#tV z-L4I{w8oDXl|xR0`LMGJy-)y9NXsNqHs^>2EgHnHpU%LDSByP3;F#sm5T0I zCyeHP2u`i!s0S@4z)})4%Zfe34dOU0yWMGPaURAnwe?HkD&IvzUt1UUwQ)vc-?i6j z_!w~Dcf2P}FoQH&F!90E`LPZvQFQj1TqF8&5OyotE(}33UmK!*MC=Ri5>6A!8g&@| zK6y_YeMv~gXGFRdpJ4pyq2U{Q;QYfD{a^1@q)SIh9gz;b!5Ug(W%q`mU+^d4o0hgyqm2hl_3YnV;?ErizOl8+SI+)O_qi9Eu+Q zlA=7f6lxXl$GXYecm{-P?`@sTqN&mBFF(^oV!X)@8hf+;PQqgLk0R_nHaEW;zO0L$ zJyod`jEj8Rcrrv(;=S<0Nml*pvGc>RmF8`(0cYl^ z?3slkmBeF>U3-8rF{VzPg_;N7FNbzmZg&`U9-aik{*iz zgd8yy{^X85%;u`o7O$o=qF~oN8+}Ty0M#uQyWIJwkrjRd)3Zn{pjkp}#W!#wBZC$% z561(fiQa;4eZ4Sk1biBR1S~y7u~#~%k{0B98fUfmlXA70F(p zD;cFP`V(KG!$F-CSfw>N2`-FK+lOu};4SubG%I1I^%kM0;N~Vc5vKngAD^dNC&l7O zg(aW0wEiu9ClXh?%jVNxRXvbc@a&gw5PJbZT&rTlIXoP4d?ziFHxRS$WRi7~J;7wQ zn>jj}=#2mb+~^2~mxaI_CDv?JbcjH;5RIyaRl8+8N!NK&6@FY^T_nU>FO9=og(Bxd zcYV`3YIwlFIA{H*O@Qrc6$>|$TOv$DSnfu;p3VKDR-4DiH18NYxKhl$`ITxE#5+xs zz1VNFN#S|#SZq;2LZSblo5U5@e?S;$7ZAy=Oo>YoB06h?D4py*uJlvKfE?Jl_2 zP-C}yTv*hd02I+&mNsKFv-%(XS^OW@w}U>Tq|FE2&wHQzCX8?alR>Ps?~K|&3`oiy zu+oXagthXr0NLxzp}`j)riLEHLHGyA0)Ki<^(3o_Rn5ARw(N^><-1M(Wp8*rE4q3k zvY!ACDRs_6ZZon!Ire>!*3na910i_CE^7m?%G`*Q=HERI82$nOuDktEr?SS$d3eZPl6aX^N<;|Y( zvTRpX20y9^L6!(9>93I|2n*rGx%-gBk>dL_~D<=379sP;#830zI zLW8H(By}8rI`0>KHx9}TI6wz#Zg8b&S)i(E3m=XzFQq`pekfn&4a$Efg~=Z$V_AB} zpSz46{Drr;%Ot7*jQ_F9=$5pU8-S=SkyH@joaLDZ7xNvQALG-pI4;gpr%P z^sch7C{(v_S{0)AkuUN*hS?HhWmF4rK4CVqq3iSqO2)YOb)NT9sbq(1N%vGyj8~#) zuiEY=ubf8Wu^c_rpbN*sXJP;cHhC27o-T?Of3x>zTXz>cC|@w}s5EPeF9gw`5dF^g zu^nFa-~6O3wf$&f!vuN?GO!7m^755Ji$O<=9}E}D@*^2{=U**-`t+g$KOS_nvsQ~` zT~RJ`SVEFfU)Dz#&bj4`;GCMacPrvjDiOJ79l%h343Y!+=0SMkYG`bN*+`O&jyX|` z>De!*J|tQE(#r9AfCi!m{{8sIPFP(|W`mpzaHUKcLs-!R!+&5;Xb^K6)zk4o7T+&b zzp>hpJRx*JJ|n3$eOOzd*Q|oVm6cg>EHj}YFh-=i!lsMoVka$IhAfP;w$J(3*Y!;$r5Ek0Th1Tm*RD+CIOM0#~Bp{0w)tiCDe`W zn%dZz-gU1^47gZdrLzqZ$CTNk{qX$t`_D%q5DbD;1r_j+K-4*w75l1)2xrQcYw0^o zW{IjQ1<-Cr&CAicFveLwFY#WBV&ct@q~7@rw8Sx@pET1@RK+GE6PPUqFVUA zB#ZvtIW(80dTC?uZ37Rd$ASucHTzr_rVnpW#U$|8HpH~+&_#ma%P<(tDMrtq&$coJ zTihmo%noGSBsEE9jF{mzUl%ZL5XVXk2~r~tKXxXNF1-xlT zq5#*h5o7l%0o^OYW@dSQ6)`24YqigEp837TaV>KF;Z#@m9jVY+=Qdhyk&)#f!O*-b=9F zX7`kzvzF~9T!pl2eno^mXZXwG-}yTbg}_M3ODJ0s;2-vHkWV9DfeNF`k{otM9^9|R zh(bt4pn+`T?aDDD7-uOx?xzl1nYy2)*c3>PJ=^k<{Z@Keg5-l8A~FYyWHj)Ge zy)ZotCJALOIKIxFJzG<1x$b#JwJ?RMJr1yB;PF}Uv-&Ozf3%LFHdfbGELEtr_;j+` zq;Zfi)GY&8ngOw=72AgwGV8oD!8vxXMAez}pKLck4^(Bpeh1SB5Ve$wz3E?v9(@B{ zn(>mK4|1Rn+~}i%XR&b4{ApU}j1ejHCzO;yLr6Z9($7vsqwI82pBQWSx|+TRCMK;W zFOrQATA{RGj?Ny!_USFk-fE2`-Jm*&iEj6OxE!^&Jdb>O+SIm-`qF76(s7U-Cih7>$h-2bJozpB`rYl~( zq)h0ZzF1imR|ud+=2v{P_sx@Oz~#o4AcQ3eFWSdWBt$RWVpSN&&@CHR{ZTk_Nk$1G zp3?f_WZ-3>)jmwGaXij5mH~f)IW5eLOf01*HKO~MR$i5ytV#(IGfBKPedHT5ax}9u zlS_bvbnayEv;h9g`*mf=`<8zaQ2Q5U$VmlX;#H?=0DS@=p4fmRlhz*uoX+Y1Cf02r z*s(ikd_{P5AjOe6Eb?h_VNU^6^K0WscX3=`&T%~Fk&^)A?1PH!`TEw_K3Vue5TuA! zoZU(oT&g?CcxFZ853&+g*Y<0U?#F%H$mVm!QNBwOTLFKwGZKN4_M;3#e@FW_w;iJs zJCeewt}>TK2Jse^9#l9H4^kgpjn8vrH}ib-DH-))E$;8BM4>1V?pGEAdw(hCX#a=? z0$q7Gxdu2eGIND45yzZ#EOO^Jb|>i8Wp}kM&&RHI(pg5J1+FJj>g1J9>AF$MYINKc zeS1wzTrDHbO4gl|rcRHqyYlndmZR4>sK5)z@l`ZI)=HmPjMU=~3bz&lx59JvL=FOI5h|2d4y17vA}02ez{#57*MG?o}da?}!A4(LR`91Uyppz0puz=@?R%Jx&_S z?)=^+5p3e`h;-%uj)(<^nNC$c+xD$JgO%zCr}e8M4Q+|WYiM#*`NF|X|VDZCjhU9h+Swnq<=rK_&pe2pPXt&oCHWNC4Oj+ds67! z@bh6VbcWMU1)a8u+ajj!E7fDyYec6f@bTfOJy(Gv@uy<*{hvb56;gvw!}$4~NTv_J zr_b3Kq7feM&eEQE8^NqEVV4QK;ou0aop~(9JdM--8@O2VB=dCz@uD!50zFiJ95AJ~ z@|ENz0E$T5AwPp4zocn)aUpjQ;74vT!SBx`@T;~Nz9 z^F@+;oJ%5u2 z+RpX76Vi4;1D0Uozi$^C7y4}TZCxC^_|RMjq(d7gpd&;fpq((`(Z!?BD=m`U^ZZKm zB+aL8m!xjkddll;1-BuujN&fN30SwnJ>1o#RmbD|7>fi+KHf3)6e@drHMnx`l?DS= z(=0q@tLtmm*{j3+xk_@$;gSB+hYZ(XlLRRa=S;DT7{~9UYeO2ZB-%&FG^=rVI6Q)P z+kTyG9cpR#&W#xsV2_>n;S5#l>FQVuS=C{u*Y%Qht*5zzDlxV~YLc*Z2DT z+8X$Q&|nkabvD^xUPHhA%=0DgkdnURn(q=%<1E13uzw{7h6O{@!<j_;t4 z+{V%>5wruLKfR2xXHsI6!=U7eB_%tZRe8-53@W&BTFy5;d$Anr}>1_U#}CS zw<46scipIPC@q`aY5A`H3;-v}KVhwNmIw#cvQlh9C@?}5b2}3BU28Xv5nAwaak3!q zUR}Ju9_)V%%vyGR29&ph8Dq!}ao3(&5rD8GI2U3Xt^*O!lVV)s7Yt3s`QvfA?H6Kq z=j;jHyNS=DDQF(oxni-yT=s9d0ALQsuigDg7v&`8d&wP);6-{-fqg990KJp3ainO_i?v9fM#$nq<@>=t-a~y?w?+>9emFH zki-vn!Oteb)BM=%Gh_V#=T>%7FwjMG@;vhf_iqH~J;CGL{oCedfRJ=L9?c_}gkYZN z5un!P_Hnmg@T2(jKWqNnG6HxRy|bQE1P*?CXm7Xq>F`z7B64rpL9^er`CT8mekKS= zH$DyN1O8p;e4+S0N?;F{Ie+gtMLE4DBKWQ|MULi~8U*$<}mJ=*m!%z7bWse0#m zwuSo(7G5#DP>!fb*+e~eCsHoIi@~y4RXkb8j>a&PpLIM%n2?JdH_B+C#;`1^jPtsx zo^_LNhkUo#_vf5$#=I;;X`0T5Bn%rw{I6)Ij`N+cD`8{INzyZu9nKTmbni55@2c1Y zn6`HCcS{OujKbn;^EVkjSXTc=@*nB6xnsr7MjhNa`7u|;tqsfvW|&8Va;$JXp4Ga&wn>N$x5g(dv^RsdAz-F!-K}CQajHvniO(U%F?^py z%b87Jj*op?FL(0W0>6iz&4rZ)c+YTKOkhh4n{db^lL<=Vg|hN;=&kl3dL%ILm#y8s zYng>MqQXHh9IP}XkX9-V&BfOBtXzGM zYv=@z*osjnN!@od)H+)w9BeeEKo^Y_q=TQfw4pHe0+m0`VqX&)_BR&e zkKNVRx(pk9p;lSZRm=u(jE$m=#%L9mqcr&Iic>{zHtTac5!+|OFTJoqj}8-gfu%&U z3RR8_BQ%X&?GqjC29`E*GY5{)wM$&%*n=@@x#cPietsW1G%Vcv8!HF@V`4zD>FFqa zB1x{CionA~7f20Ot=_hTjd)H(aP4L_I-lJfnx9`N!OhSG4ab`FR{1-5`N0-h47zW~%L z<_pJL7koX;%seeyj@xGYnh{li*aC`eFarV%2D&;if4DQ!dU{~I_}KHLs}B&*0ZV&Y zQc>4?%9+(o0Ke{{8V3z=$|TBxi5{d*(u2sAlpmPZozqYU4XpUbfSz4r%N%F7l0sDSws4OA_NG@4DE|iBe;j% zExq5KV66sjwEE>w(Rl1U>$4ISrRrs~E&98wAjSLs|Eug~M) z?dj(F(1e>mYxhzij(cu08W=Un?Ndb!Jzx7MZRzwQW%MTYFH;hqV@mwI8-58HA6fYo zebD&5X`Ljc)ba^q=Db`i7VGS#8zbimoT6Makv#)yG7?WfDWYzB8@!qjA}Z8Y0jnf| zxeA(sSnr>QZegRU_PK%Y1N;;)5$z%bv_Y?v zxq9cGZ%3^>@4R5HU|1B|XR&M3VyM9=t}+ju@mLD{?thv9sdmPT=ddJ z0YI;$*EOyYOQX(!MlR@#$6s4FrY$xQj329B4+Ka=cP_ao;@}%1=6D?w|0n4GOjHgq zPdbpo0lI;4bi1va-{`mX2AbIihQ0vf2F@oCaU8I_;K`5v=b4!;9ojc%ghUQ$Jo0dq z;E%HKq^7xh2Lm$2?HKeW>&KveIDehWP*u- zDi*-&9qX?25E8i;`7gBvz`!sm&6SQswfGM9kxoh4JD}jrugMxmk0!>R!U34wdKIJe zda2b4aDSgGSKUUI>QHF&BxKa@at-i8w&*r%Sin)Nj$A5afCDTDE1+x$z&C)XfwutO zC7@0xcOg#q&V27MIa-Vdti+cxg_LDwW&m+@Xz_KCdS>fx5*J=`HI?d4`>-tJ3;?1V z)RxmzB_53@zpRYcPxLXxM7RMqdfFf4Nc5ai%ewzgB zY``m*4Dd#PlcnL%k~8T3z$meRwPW>THP<{)6#0}yfP~i!7DqIccUV)1oQFxA01pMI zXLg>D0faIZC!80w6xWJKo#SNkW1&DbK1ryDg1!?%Bqt}9dAn6$2w4OiCt#leXyzdM ztU#5}_?y8W09`^#GmA9toAEUQ7vL5hqri4mG2^0y`@&$Q3nW?FY#7~yp~UHoHpE{F znZJzNQWjbsa82#W$QQmVi+$#oI;v9eCK(!(&9$Z{P~C?HqofoN*%}0inOR=jmg1L$ z^VZy*w=ZvP?(l*fj@xij2NFsJJXn2%Hjt0D&&8aWAxmDT@sUw$twMPXy<%u}TiZaR zQ3#?7QCeGH!ppCbw*eyI)LEKIkukcWCz&1s(uvH=(Q6&--2HSSWFS76?@!=U&C$rn z)WAuZ{?f8soUp#Jft92}e6}9&)M%7Db_rH4G5$FklJt)2u0fk@+IYXc;&Q3GBv4m| znXv`!uEgt2N7YycTc{<(#_4*wmvN`oUa5qM~Yvv4XD$0ij`_rB78-PiZQ#<}+>pat2-L?r5guuzea2<>pPaYXnD z0tDmBm1;`ewZWNhqxz|IXzBG|kXNX1(qaKyEK*p|4fW0K@!h=oQmo|tUn_@2U!-n7 zDcdrXG|}24#k@Z;fR*LaDWKwFt4oV>bO2h?9xz_`Se@*%c(DKU-{anyv&a8E=s$jm zr&bSZJMw6>8eW9ZQgRuD&b0V3_+nSYzOdAn9s4cldG7>)(F4#Ikmwr$D}-T3TqIPH zr&g)C*se5gDXng`>s{*Vr6_m!P?FF0#JUPNiTnHclUrKJ1b~~yc4}GxZompu8BmpTV6b7;A~e%50F$dsMi9wa~Nzm}k9RT#y;=850Z;a)poG`_uzgL+{o zq+$dIf;R{7Do{$<+Ffn_{kj-qaFRj$IezidCc(^&C(=?nY=Top^9d1fbF%Hhc7B_% ze0Qu5lMqa7az}faH;`RBJmh2;1~}Mv%K^@v`Qm<{k|;vz9vI!jZFqvn^!%iRo|u|| z@k~$U&;vt`6Q192Q_*XvMpUH+B#6jj9p}*>;PIE%Nd6-xfR>0blj8CO== zC;LHO2mrvD3(XkEJ3CLZi{7PHen4x0VJE79jaj{ozc@-Lqo%ed36wSk`4B*7T^-&< zKhGuW2cz&pn=Ue19N+>b10jo;${>iVc9c%X1`Wu|7)sD(WQzKNBJXKFb?F7~HjLY` zyc2A3%G~$yW1&pXfd1!fo7g;8^ZHjc7JLi3n0(Oxqn1$Vc1a_@J)IeRz<_`%LH-A{K_b(g7<*))d)EzN2P+<%;{+pe152J8%UF?@j;(N7mPq%|hr!8LNVmmCc-#jTAd#(wD^u6LxaC#p)Hu zH^~M~)FRovk*buy8DI(@*}H9re!a3ZO}^V5%azq|aF#_3K3y9s_%Vemf`Ly6f>DtM zO#W7~eA-ybK2H#l;X=B0p@!t!vo}YEi?dt%@dP?CO4*<@${;zl5rqe{6Rw~ItcXUu~lZqepuiSy;4@GJA%x5 z9RD@X8-AWx>?+^j8?yRvh6Mun#TNsyy|)?`xA(S534KiT9Y<)uGm8H;_1{#d0#;9i zK1LJ)RiHm){dbu&>BNxvLpLK}xQ46Q50&~8vaq7kn3bH3SeN&k_T$YGyPYDUz@uGF zXt5<2q&6cSAgOG+t}s4-u58YSUA5$TeQg{GP)O|08e3SuEOu#qT-ir80guezuhc`uwe@`nX+3Ce0^HU=kq426 zC)uPJqeZDr2?OH4{5&YeHdhVHC+^AVnKHYls>QAkGVS{e6`;jn|6v4@PpaM@1O^%0 z7fY&T?mi6ywR8+aXV%f`Aq*g0(vJJ5*t!0LM`4Hn#WH_ijLq~<$o@{n`F<(LR7hs8 zE`xiNmb%UaR{NL*^1iO%-0jaJ&Em3$87YJimRjCeJMa62a}OpJQ3(NfP9W>%vX%T{ zHS@&~cnu-QEBnHwN)|NI@~6>x>dq*N&>Hs_AJS&`#!J>ji^H3B@z^6EmWjs8V53AiMaD!QF4 zs;yxH#XpGPsGtD%lt>b=odZ-e(RF!J>mhX#egF7JAgHGvLCH`+KL_+3!5&c=(`!e4 zYvL2?wUx-Wo41_)^|Duck%Q@!UyQQ}%UQNbYkqzY9a%$qm`@W#7ML9op*-i~xlV8GBNInrIiGg!PxMWTB#_=08j;PW4+K=R4O+a5KZzD;NbaC=Euo`Tzmos_0h_tvS5Y=$TEo~(0FnacDqu3_x%21XyKCF zF!Ho;u|-gXRAday6Rc>mQsN(Aw7k%?^BjtXs53Su@nR^po!oq(cX2{i<@Vxu;*0g?wHbSb(MmC?Ptp}s+ zs%|$nn7mjQfW@x)LXSQPtn_u~?S+#bxn%4WD_}SplP}=^qQbPWfIDSEVV%s!WWXz9HM*LZh7?JU-`k&WD&5 zz^rWe8nDS2%qxPE4I?&A%>H_d`Jb{zXDYWleO4HN!EQ7p!4$BN$stLjk%DWz#zt-r z0d=w_@0)sO9^yaex{iT~S&o}Aq8K(Kj~a_nx#z{@p%#gdu#Ra-jjlgaxA7xw*p4(S zgY4H&RSIfkxh2Dt;U?a;#XrBBC6JpG$dYBwf10|LBK+N~Stb_05YQJ zVpqWahq4<=|1vr7DT?UtX@Y~h8ev^{sK+-#_1c*T^}@>y_zal-K70rU^bAe$;6QxO z6y85eDJ!dHJR?wH<@!n#5M!jn{MQ z9eT(G5Tgd7xXl&t@xHY$D z{9{^T0^9U@Z|Yv9+2`pxl9M~%_MdjR`cPi~@L(4ht$+f24{b#Of5=b&2FiN2`60o; zfFZCzEE{#7ZSQ}wPtTMoPf@C|M3?Wm@;DCugmKyX>W;&~NlkNilujI0&1|ME+_~}T zhc_KwS;=JQ>Ly#{)5SaSelZ1FBG>yurE{mCY~;S{3cFba4DljH7Q9@o81OwT@A?aLD!c9rwiyd9$c!wpMo2ht5T z_m&MRYE&VaWB1g#0c2r6%H;P1hKSZsNM6I%9c)2cUpDNkH?qZk!{(c1d7lEaA>VuIHUD_-Hz=^Q=}{z!|gBB5DaA!@aREf zboz@8iq1?_@@|ncsqsesIS7Q7nNnOp^aQ}S2?Gl`?srt2DTUx z5KL;E1?5O(&>!6uvQb`}E*wtVqfhZWgYo9@3y>QYYRXo98;1gCd>C)i4w4260*-5jh`=wDPw7l{rs*&m=fE|UT9cHWwYQHl3XzT=_!Nib!V5P>-5R-~c zK5SU`Vo6zPr^KGN2k2lk&!jCv`!;9#Y$`CBM59(cVRP9Knu*)cS%$6t0E+ipL8I0! z{Lc&8r^2i~4%T{vO#JvDhR;-B0MG+S1mLYbn?50`@^s_YzNIr$!1ne2U@S#|#ZW?H ztFtffbdMIrOu%-#@5B`SDc+y&l%$1Y2eq?FhyT{GkZ0g` zKh{R}Bq6k>you8*LP@}2l_GRE&DC3EK~4$aO7IQV9eZ;^RFR47ej)zQ!6m4^C1cI& zJws#Ws07B-@cQ;SU;YTF!5nsfiwm2cp*1|h$4P`o$-CVCx66adAbJ}ZAO_@|H9sHz z!to&!XYug`f4s*@Ufx1_P0yT5a$)vey_o^zGD?_MC=Eh*D2Dsn%T!-S$a4xC`M+Rq z(C@iI-=w3zEVL{FR9eGttU8mov%bbp8S8-Iot%xXeN5l3TQ5*$g5X7HFS#vaXPJR!IEQ}dMYcvzSrhOfsL$g| z4GFUpYrK>_T-sWnBRRI5s{_Stpv2f9a_#UR!5fKmyj z1fIQ+?uml>(tmMEJ7n>@aUBBkhTY$8IzcTydosbj(?`GELx`3_Fsb>E+5}n=9%Ep7cqhyquI+jtn0}h7I~>9aonJ z2VhDpYoGfS^icDzxBx*fpl8SVI`Ww?Y9}DqZU`9T8<2dz@f_FqV@%_R=bV7IpAj2T z<4IuSMQbyP1+9RE#NN-km@7kI$c|8uLwmQRzE%RA$(jC>kgUpNvWtxQ6a}W1FWniI zCFLYv6}(t-XHO0)?T^3>5e1yo*Pr9ep|`{=zb4yh6-A^APtohci38!_3}w^}kTeD& z$|&au4NZN%yUM~_u?1u4E`j(tKqc|+y&m`M(M3?Sm(mWFtT9~NoTU`pS+?Mu9Sr{O zyVBNvRFPt`eQn?P0ZwT*{RNP)TG+qPKmog`dTCCWG{XM@yRSP0^4m|6C_-}O_8x>s z(gKs*Ko)Y55Q5|Fg}_*_EVYv$*urI^6w2^C6D=cybD z=sdC~W);2hsP!8bI8QDNQybU_#DO;oy)I5W_Xr3Q1@GN9*O-7#RilX-1d%j_Q zf4L^~p$v$szB8N{AO_}nX#f{Bfk+#9i7k6ptKW*|;bj=}m6JL~)W8}*lM+BVEO4J& zBMfw?z{qxZPba*Xr~`luZp`}+96IMa>5^yg()0p*TLVjpDYlFYq^_tg+@40qsW$$W zM+wg=-)7^E+2wTCnyc_G=&l8wn=k-kn9#xHm-@ZZxnO@htMB^ghaYAa9gw@F)%>p$ z-7jZv~f2_}sV_Q&8Jmdtq9yI@QDp}v;KZNuDExl(!eo3=+1GU_8h8yNOU+XbX z+FV6TjTuekS8^;xo+re0;9DgJa`r2ia@XbT!5YcoOD~UA35K7om z*Sb7cp?+Nyjn3zHmV#*}c_JbrhNb#(Bm%Y3T8E-wQC7!U(G7jnB>Rx^f`3;Mk25$m zOaxysMuvJE<*8UhCmIhGb2{?v=R}-EpA{3%3Dap6>QY-}swfW2jD)$-H!cy77bi1z zW}KkrtN;y>#1cq=&#<8kYFjf}98_w>XLA z&wNW*;ehBoo+qk~Iqj^)`tK?`7bTD^Y1ln4E5_IV+?TM0qhARe;nI=W2Z~@IY6O~b z9iqB*oBMy>tPDj^_?!nTa8pD?*^J_DX=}t)YzbdmiA z`=*SM=oXWY5e5QjB7{lF{NZzEI|FM>+&N({qL^-$<;&WTP%}q~;H$y-@{_;<+acfP z0(@fe$RHeD{ftbtJ$!)5a0)vi!w~A|@BhDUZJiOb7D~~K8Vl{7Bjron$-z9t?^`F< zI`82gbl=FR$MME*pG4D{y-^KIhzq-~+5x_}>NE6veU6nEtX+ zO3$>+CsL_b6`U71^N7tgVg2|DMj#;QKTW~n?u^z)jrpC2CiPwPnuZ`FjDHpViCMN5 z1AR1Qe9y}aYmoN4N|PMh*ZN8Ym%@ZmQRL){s-hhQ$aYQ@M`J&t$WV>Uj{N5HStZ(? z{;bb(S7Du~6V?@9@lyf^vuEXuwVhMrXJ?O0^5y7xkmd_9iW90kV6AAVDRmNP4P!wP zGus>+kmn~5XioQgXzlW9O>0hi&N(P zXq(oghmh(bBhJd`N#2|H7OY^|S+^n_3a58Yg7(M(<= zTiQijEy{#n2uUhK3`U~(R3QE^-8)^RG%c z_Chbxi)7uU69g0Un(T`fiZdqr8oUHvN}XTNCK7IwlBK4*#Jyjx;w{47!w0gj0uipY|HkRv?Yp! z@G_*d%$ZaMBA-Sr=v75rCtWEl?`F>toygGR?4T)J z=V9-8mn(xwf?tqNkIVp%H}|t>GL;LGB0eMDzz`>lV2|Yba5C11sQFv*|;DK@Mys)8|e4R3t?y)!g7^fWY~ z90jvNOBJoz&1l~eVR12;68nl476!t|-kw&SNqPbjS;$-c@JLHSww<^&(Y{7eT9V@@ z%WSN)ZHyr4>4E&1N*a1EC*HgHbCew$uh&l0x3E1HukG+jL{NoZ&C>iW$tTMZtmNQt zz&cxsbvHcAx4FY`H^qML?XuRYdv-L~8R{LlKp`&7{zF&8`jv#f5e?{*zkq9dkN@ht z5c8HKYX75(lWhA)QptnAnjP45O%L5t|S=1P%v{)iVbYk$d zdAB*UJ_kOU*xRnMB$1C+x3pNt^f6Brn+k)B2g^Vvu5EMuP1PROGPmjmZb|-len|w= zh+#AS@MILj7M!zTND@?qz)If?XLC`}sX814?gWes%mK5lCti>Qwqx%|P@P%&Dpla4 zNbJvbnQuShF#u!d@wF0!ADm6jaM0U7$LahuLMnPnzMS}PKOI96V!5=$_d2Ck8%*!& z&Z7T4G=63@-Eb#bOsZLCq_7E%I6JQ?>rh2!tY{dwKo1sE@{5T|1dnS^Acu8jbzWnq zKzf#R&>X33cQ$RJ`YM$lu-?wOYh601&s^NX`ldAfpGz7qKnS%)&|eU>|1%tFN*`Qc zxW7M%KMfqo-C#>QBE%(&0@w;iCv+b6U{)+;K_ zbdjMLmd144OdcG-K@}S^R{L8Vy8SbLEFqnT_!Wcw4CxLWoVvBeJ56k0zS!C_(TYl6 zp3YaHeh7<&Bl0xAX)YiG*~||=ih#^`wpV5z1?j%qBZ%T21ARQx(seYbae1^>Gdzo|%_($eX@eHnrJ_xzC7ACjLm5R&&7 zB$!AFJmydmN!+C0Ok-5sz#~*PhM2aIL{`%Qu2{c&YWt0stI-aTge1HT`oGT%sCf4i z%$57=qqpyC9!Vgo1F~uS^Xr_WHkp&A$e5q7m?9>h?q}k-PYomC^M-@A4f^prUM$l` z?YHX{vL|A3GV@e9CLlA636V-Mhvu&rqaa~I!tC3G@A?D!r>_!OmElf&lMYr_Ol-wj z1eSc>kG&ECo*ryb)dB%gUA&NrmUlhGa%C1KJ;lBH)L&>_j*{Ui?0IGxRvIPe^kL}3 zMPyriX))Nn)=9!e-H0nf+c2h=+J>Wab4$J((fvRO1*4(GbYzw8KSIbS%At`Exc~C$ zVt*;&t^XqY@2ZijRv6JSPwTAa4rW2AG-d-UgFIYO-_#`e^#Ev^1v4R~^LPpSD>tv) zg0vS;O|c3Bcsy^Ji0H^!5*@@(8k%~fz`#;@<^LY=#_W;#Th3ng9vx7Dq+*J5A)yp# zP~c@2iFBw)+rVe|1j%KMeahkEt5!np^SxWT-$iq^@(YKnl3EEi_Yj6N)hk#s@|fA% zHKYT6{%kf7TkVB8$pbCYcHy{=L883VRXS`&sd+lx z7P?s&b#2o7;%hjzaDeLxaOr$w#?9Y2>ODaOi)36Y3>bz`2lpcpV#e(KjFgaRB00y9 z?4mjS0F@=yPk^=XmFV6I%f;|DT*d6<3=5WE>DRV-7J^G#W?&-yyhW4dHeA(v3$=r} z8*GeBNtQ~cO|!F=PVBBV zgelyfwE;f`=N5Jt5Vqr^=WX+y6+r07(E)?Lppr?d!nST%(v2l^CJ5)XWP6qJu_MMF z%7Hs7^U_2Z)=S~J9O6Iye;ZjpCcu=NC_rQrlIZ)FY$cpKvjlc$|Gn4skfD=;>3!R} zwj*vkDE=foOFmuWfyX*Ct{K>Qh%)$D@J!?%DbGu%=I(Ts;*@&WOixR zr*;WG!h`Ec>4>_tg#;>v72i%I%^p(~c%6&mAq9J!ooi(*=o!qdi=gNvkkk=YKAHo;QY5dr{kJfdVkk;r4X_OQcRfQe#-+lPWY*GGG(%a$ zPApBY(^nm^hTQsE-P@Bb+;82$mLYS-XP$K%N{g=PXA?_qCSJGAxM)&K=ST6>jeF$V zc^v`j=$Ey&NrJrDGt9dw5RD$a6`>r)0sq$a<52tNZtBM`2+3GViO5?-?y9Kp(0nSz z$iFGy7lOny-EZN9sgZS-(O+c1a(ThTpfV;eJs{u$|BR{_;K-_(X4t3L&bS;=tlCYl zU{#FM^h!5s+0U&yHpa+sTC(8Nf%JI2Ub^KOMs8Ity?7#Kl;-1eSe@GnvNeBi-=TR2 zkofxb%SF-C>D*K~`3?DOxL zE~uaE{|ZZW+J!8k#!_#kgwLj)mHBieV?$xDV1Z^khn=2?i$6s7ZFF4|2JIsYF3m>5 zd*f$${W!MK>YAc%`K-Y7?2kZDXwIy2!N{3LWi-oqQ(bTtiVIa<@LyG~?1cA6P8|>o zN14^%NXUz}$+LzM7ST)hH?DPJk!|M4{6-zxs)B;qLsYQf;nd*SvjKDF_}y9~4g%hh zvf0clcONbtV&h*<0pg07<5U?7&u{lVCC@REBG2QQ-Hn{j-w&4 zbiKzu?O6iH5QJ@-UWc}9JOvz5=0}9G2i`f;)@z{1>BcCnl}`#Q;w|J@Ef6El1w5S08a*Qqj=;d&?Nu zqtI`o$wjE93_KPM(^9S}$PFWa^#Hq8O1Ko4wW>8XqHb=kL`#?gaQpeB2oOE`@h~M( zfgOYtKh95`J#zyRQ~I|&ez+2N3gJ$16@G5+;R0HqdRMV*7)~4kehdM(g+pI%IO3lYNWKc zYw+lV=hMvb>ICnEb50;W)DyWzB0AP(Oi07~wRRFNaFz(l3(qjlM~hdwS*p zGts)iG=N0(rF47yZtlZSW$eN}BrHQKm6R9K*w0U?WDV6)EBl?Rr#q~6Al;XvLZL#T zwG2hkh}gQSGCj*oM@SZo#V`R{OO|EKLkaYe5m)_|!iBzQiio{KQzLs1`!@cBM=si` zyhCz0Ai3fICDFtRGO@37TlNOi7KMRNL0gAAHV6KU8}kMFPj5Sh4|j9;mlm}Fn}z%e z`JZjQ1IKX>{cz=GARJvB%|@;(47bqH;w^{F!le76boaB>C3& z7&;V(p1#pC`?x;-jH^QrxN%c4zO!?*#| zHx$7%2wXY>^b#1VR+4ISJinuaNWxc7Bo&iOvA@>m>nDp{J2up&&2SRe6LC-|7IsdT zbu~8uI~9QbV)m@Znw3mi$&FLGBx-u|Ub=RDO&U0949An-vz#W3eP2s% zTJJt43u}L1#bCQu3gXY+DH@9_%x*f24ja3D&yY&eDh<|gyaP*BzUt33JwDeE-o+sB z>c0{zf^CQU*3|x$L3b|ktX%APwBOI+6V2dxa6}UKeqS@J4lL2pf&858hluLV`&=?N zWmrZeiFkQ^xm5BkxXbj&)Gf;bB0P*JkSqB^LtJlhZQ8>I>pwK@hW4*ekwOXt z*f7O~(V-n1X6ZKAjaoE@U6*Eb<>kdJ9g7aGp4-}KdDew4TDnQ^HgD_1@JTHL)pw%dY;1)hV<6=ZIWu`TH&WRu==43Sn$C&z&NhU zA13K}ueFi4lpdDP0bRGc^Bp{Wt1+R{Y}jBo+SBdlj3LoYk8FLCwFw5|RROGL?Q-jg&wjlkSoB!!Y)<7D{G({v_!nDwd{eAdMBJljvzhh6{_9c# zyT6tdwmvWfAFaYo?z6J%@@gcwV)eFZKdaG>2w5#OElBX$vFZI$t*T3+7}FVXTv*Td zDTBhvQd+EM^0WG-R(+kuE~=GH&`T?{#0!u}p6jShsFIW?(m||M=jY@gy|*%Y4QY=E z{_I-%-3M5g69NF`ECjj-p4n$XiN!+dqmE0cJ>Ej!? zF*ge$b5M`}glV5O3weZ%sA{mt)|Ka_C*N>A;rv-kkN(e6ny%RgL^N|}Py6<2GlFk|Rr%af~@w{U#z?8rc%7w*0IWLSFy0n*D~)Y@{+ypwy0TrG^*_#z(A zC3wj~C$NUqf{a*`Hmo9;Hk-HhiVhn{^K+R05QD1a&)`w2oc=ZzM%_|0(fubPt^TDW z6YJL>DqT73E9JaD38 zG=+sDAfx8P7l{;>x6b>FIqdJhQrBKaMgVB0!|e0#9!5(QOYVo2vMj&###?#VM*Q}p za=LWM-qR5Bukc)viTPX0KXu0=uKrj(p&sv;8K(B1ON(Mj4~vJ z<+b8_1LLf@=t#k6T2)!-;sO4G!4<-9hz9$4FvP!a_@PLfsdgS<(!DltL7MDeoV2GX zZ+2G54|xpx0!Q_BigF_4eRvTGcMiN%Ht*s>qy`rY+HD@J;fD)pKsl~ zz{5b`xlFR*i7Uoaz=x4X=`#aXjth+t0_$gyvI1d1t}*V#prIH`Z%$F976-xyB0zuq z%8nzGIg^L6@%)RmDH|wAOv;3iN?dVxWMsSh{jKBbxx%~imCf~RO&IWp>=6P}#ALyj z!^Fg9wta;<$YM?X#?&5Id1{>%GZaD=)FUJ+$}iadP=XsH(d(Ed6zyfLqr)%gcS$M{ zOBg~SUHN#8Nc_=94izK}+|ssvYu~``%`*)>{oa({2X$ZQr$zd$a3u z3&QUQ*%Av!)|EI=#W4UQ7{!DEr6uvdN`k&O$L1hqGixM`_RXkdgw<$@!>rNYh?OAC zZ(Dt`)f<8m(uqf|>tNRBbWLFzn&BJ6p^wuS96vi#H%>^YcO@wfzvbGF)})>@MPq1< zps}V_3ar*V46EQAi3}7<$^j}1Z<4emb|Mbv4_GZ$C~wjQ#?{=!`i}E|Y``CjHt@b1 zatM$YtN5Sk2+)g{W_}=ucR;M+E9iB{*?C4pW-x7L`%|`Tk$f&0w1#3l>mI+VBBnJ5 zuVfryUi~#qfka%7HHW0?o#_jBE<|MI1sX2IL?kbsaqE`U^DbTW?NKh|E+*1?T6JFSv&;4Hnca7fQc_cm zAW1C8T~RZI70t_Vr2rZP%w(QYffW~~N=ueB3eazR`Vu{k0N)@dUb)W*qu#eOrn=sa zCUK7Nu<4W=6yL%OQo8T-n{k=7q92ttmGCBD>aVL-=#HZ)2C|67r`WfoTOC)SMay26 zza&G22%a24e*aTiUGj^7LRyG5Ji}0qGHTvdDOCZ-*)S-zB-RqG z#3w^QkgDw$q^J6am6dHMaaOshiTNr<9u83?$PhP%s&^ap<9TS)E=XekO8f<%|h~p(r2e#&-`?QMhSEjv{0p1^r zh%8$3=?vH@rgcFD%xg_D8ZYjSBaNA*Etj`(c5 zffN&v(_j4a`>`f%(m6F{h2dF!@1tj+%*Pu~0GKbcy}Jsq}rj|Lk_R@@ii9pFZ@jK_x zX4j8DTP#tnjd(LYyP(o@oY;ciaAmq-iQBGIK`np~OK|1#M zoNKo}-U7MB*O_WEIH}3;nVH`Luc|R5SaEAL&!8ZLqghnVCCPcI!+Lu~kV5-QjIB8c z#Kpzc=~ek*Vr4MJBhh;S(Y9fti<13F4_HI{Uuxw-k)k>5LH?$j9n!$)9#zF}Yw zSUusOZ3{U<0;i2M>un%c98bCo4JH*~ig5J%xfur@n$hWr425F1HS5j;>`vQ+DrQK@ z$I~ZQ+s)YZ?QUb%tKS5+ragFwM~hv8c?xg^)8X`E<9hb^A3pCIx?cMxP{Lbp4;u!( zh0@yY)09$5s~5NW%u=?#0u3ed1N$fD3_!-!jf2gdT6Fwu>&qB}$CmQz=3wWcWr7;u zDG?Z3q7<0Z;_q@v5TJq#j5Q=i!EoYvj!N7RxHJcSsWG-J%5?oh@jrF_xL+{+`;Oz{wu0X+&6T+F`8Ie_qXL5UU9mn|6-&*< zS$$rX4ht`rTyKwj37-r#6q)fUPnXdiB7xHxm%m7`k2;yzyQ3|QsXSvQB*-Qfrhj#k;vEaJHSnm9{*!EMlGiTKQZC5}x)b*W9~R{s=MXZ1R_rrC;wfbb>3Q`#qkPab-8q7sGW5 z3dj0Dpv!Wbz>!lWo7*_blzp)?K2h8EZZc+){IpQf9NNS^j(O|E?CB_2JoR}nt*lO` zsJWzQN<}WMf;uC<;jY3FC<=zEk+J4f+w6H)pDnQ|(JI!Zd=r9Z88f?S52GVCFLga` zCbh`|W1>mIjSr0zl6JcdBcKG3LquA21dc6_FP|V8L_jr;i zHlvyw=eH6JptvfKjN6XrdPT075A8AUKfG~WeoQh+HyZkK_2eDgH)KThOPf(5nsB7X z@5KuVhCD=EJOf&eh^Q%eU&=d0y+Gi*wo^oY__BHTA!7fW-MA@Rg=)3_d^r}Jh}dh# z(MyZt`4bLMmwcU{7us8uz!ZPJyO_#$dxMKxogk>`3Z1vy5BcC5YCM|7<8fBcwEI_^ zKnFdVl-AUIU$+HHhW7Qj`LF#=v9h`I@rWfGhsIpvi zDU4pjRGCqWYxS^8^mxF9Ph=8ZN7Q^9!?I7}44Z~pq7lQ4#iWgU26ky^CT3L4kT9WD zy<4N0HdaT9vBC@&*D?PxZGA#3MNm}RKe)`%T<~uDCNCC5vH` z;jtI{u+(wPoT)}+`lnRHl!W21*8pfsoj`m+I1QSDVeEwvV=GUWsm!g?n8oRof++h0zE-QDHZ|LIo=QCB!@MJF? zb(8E4lb;#v=ieO>ki6x)9KtALtpsntt5U03u+u*YgpL;GS@#pOt~sG@#RdsaY4B%H z(b6fln4%bF(J+FLI4L0l2100QS-II}XP+`r1t}&1d8A=nrox#};$d?t3NlePhZGDX zjI?E)I}kSvzE42MV;tE!4G#q|T)*b1$DA(9z?j#&9X{|op9VN2fTuM;$RF^|o;oZj zEe$6X4O7rllkdKeL=^{qrCNbiF&{Z#*kjm)juZmdjDXDm2LV%n7DN$1@^hGV+dfT! z1`|NqF?+1*NDvb2f`C|L#+6+X5yu=zc~`_N5?znXS5Q*w{n-=bz`@}P=gbgE1{0KL zqSBI2^T-RY&LE90QBX&j15!_chYoV65(6p^>xC}Ad62~hhm=6SIdbgwTPmQB~oGx8SJ=D=CV@>0m1)p=Q&^5ViavFM~@!{FiJ zq3tlgxZL4^(e8GZUtganN5G+=%JFkw;Kwu^gn@>{Le=dvQo56PaayCsoS0KCHl1iP zl#7Zjzz%9T3}oJPa>oE9x~LX;<}g#eTMi9!GSaxg2DDLHgSqmU<@My!vkG^V-qLtR z(aX$Q+pXh!3%a1*?`$BPQo0(xyLcuocPD>`8Cu)a9fp4v?RajP4-P!mZZtX6(y+gB zFQkwQ5cG3{CFtTR<#~I=gH$5hs(iHPy!bGK1bR_QorO*tmD5$#fgK!9hd~nzE+AVm zXDG{JyUp@#eE-B~0Kog2z8_DWakXaY>F$QBk6&XeB*FW!99F_Pf37(1?W#3jE_5v_ zsTEC%h#?b~VpFwT(!iV$!f8m*p}KuS{sYmA=Vnw5oX)MR%%kz>y!W%4r!9-v~2{D4wOy z42Wg)|6T?}6A2QA^)2imh^i*1>|%iz2#eHe+8Zzb6|4;|kSt%Scc5-+LY7Kgve;S@ z5N0Cy%)?{v&3aNXU-4-8WJb8u6}R0d$>O%bS3Hp>%tcsSevJF^GUW#s5~zK5XGOzs z7B+P#nn;o3bFfJ$;2e&~*pZe{AOVS>ni-=)r*?TL$Uj+>s?$@HsuxSB>VEel)&1d& z^VfbeFD|bwp>9p8ApaGv7N;GA{+U|Eub0WybQU8j934kmPzgrEvaF~m#& ztCCYc9{|;zN#9BnTz+?ao+$UlYZMuTNAe92am%#^L^WN{2rm!k1KY!fYr*jJJTHVW zNcdtjk@h5XtYayR+Y^hr1#QI$VSPPo9wm;0>1GBE%PZk56NcSDOZ{}4Bu~R67~HqL zH3wb7W_rq%<8`o!M97ym#?*x#G#ySObGqJ;six-s}Bemv`!6ZgZ~P3?*`!@f8+LAdwjd%JhD zHSJ`mL}X-Cy3;jnGBWP4U#?$l5-6un97k_30P-a^ZnmN(uTifH`vj|Dc^vV>Cx>X& z&3=Nj+T`!$wz9JFWy6c_Xc~*rulIJ&#(suKIAPNihL_||n_j6aEBKC1H)#S!*x!pm z`$ahKV_T%6zy2nU#qZnCayQgk%u?z4Jc-8=(+*kJ+4mNX(DOX-ZTOrpWKBwRUQO5; zxN)gB-+mWx|Imq5QAI}y^Vl+bVDs%2W9++;Q1l330TYStu!s81gGXn}9~hSbR|EmK#hIrd8ewSeBbsV~|gS4hO^Seo68! zDKg&o{tm=J`%&%!7L7UN5~7p1>x~-%^>$}JPl840EVr|{v>iE1?`1b~X@Q#T zJAs_Xk+2Na!YMu<_uG*ZS9V11K$7?$R*L+2)s}|Z1HbCI8=-HKkVfRZ=heIb-<18z zcd^W`N%;7dd>yZ;DwR6Txptduxd1T6OLctU)>7j4f|vXIcR}C%kl!`9o1iZ-IaRYu z_I|4;W;n$B=Gk8m!$)evNT-X^E5a&~ol8iP--oA$c2tRS?PC$3yS*k^+;sa=K>hH| zN@sll_G5k6@AC^;6;sB)3>fygbI2k!`tX=nUiG3~Lo`#s%ZqnP@S0n|?m2Lscv)>I z>4DJ{$h(h&I!-KF7=9+RiEaR+1Q2JVdgofQK5{$4PQ3G?`7pmD5?gQ(=#HrBXE?*> zEf0cejc_>(Q6tGX<=6+c90-=FrO8A?e<&(v4V5!-+Fy1jr2N=gmuBrFF$#?*elN26 z^CE$f>wC@e;eBieoQRH2P98u)TBD%5 z`iW>~I?Qiu@a_bc+mgL045ile?h8dwzed!gMP$j(TDJ@0PW5-Z5=O47fn!om&eWn@ z#_cOX0hxj6f+cH?XbEz!hf~w_l_vG^l<+2r*(ft7k{6u!7s8MCA;HV1x8#(R9mjUy zO(XuxZ`2QBR4<}=2k>v<19M>)T~j@fF9E~B`HLqeZ*5JLTJ52kcE4g~eBUlTe=`i- zzdB7_E}YctKD;nzP7K$NPZ9wLg+7Uw>zS#N5&8BvrlB?CAN$T6^*dusr8O0YK@Xez zmwA0lXR5KupQmrFQK169>Q(Jr=7}bM7U<7eZ$}kR7z~Bsu+^5garw%*?nqakPl5nJ z+>$x;ehK$YP$w{-X7BK@!ERF*2>t$_J)-LE2Xv#Oqo`0cjEusH#_#v3)alFyWaJVt z*68@i%BrfD=c{%>V6msxKz*be7ZxY*>hCZ3hYSioMnWWy#Lx3oQP=NG1yj7ow#DhP z8@$**UzW=&mBmwL|Ec}FHj zNO2?kot=MaE2}Dt(U)U}01Y2dlt$8z0oePiz-Kx&Z)pby793qi+I$*tG{gcMEsmR| z&!6`%xBJqfCC0H4VNp)a?nZxDuhyrZbh(jhXlSU@Q#ISIj+*5%3ZsN@YkDO6Plc_X znVuX>5c0Uk=dL*ksp=lFl+J z%J=KS!VeJXl5V8C8zcqk29fUW?vie#LqO^7?v(EClJ2he=Ks2uesIAsGt6_IbMJHQ zeJG_PLS`~N9bp1U`XY(syuBe*RQl%|4c>dVXQivE+AJ(ASgzKDUXF3U=k|Q!U);B6 z@wk!n-1Fd+msg+PT|jd~y1QG_dFh#Yefg5+T~<=CzN=}og*WFNV4%YO;i}ILLrmOf zf1meC0D@BG+(wa0Z{bMbIFzWJ$YtlL)@97-C5T{)*_4EL?Z=?LJV7{#0>v=yzqHnD@`yAL3iVqNTl5`i~3`qmEV|KbYDF z=b4kL$=Knp41U`3%5!6BA_)yuW_3&wTwNRrKX{yh-<#%)W$=z*2v%~Y{C*`xENY? z*PwC7$WsLl8tV6zgdz}r6KBF+3X+{x#0b(QmQYR1P?F8)y!c}=-+s$vTut5hK2mWxfNTkO^RXT>Pmx0jzs$Bgv-%(UtVul&YVx@+gs6< zF1Oa60y;xI?SKyJ$4oJ-Q3q{Z(&Z%ltHEVoSTKQXYrq*09`Urm_(F> z6!vJTIaq8jeaJ}7GxF#NA2d6Am4bZ3;fzm7*&@N`kv3YvhD^RW752SwtDeW~K+|-2 z-Oi7|Zy_w+-Wq)+Wj|S4|c!f&8dK{Rvg`8&a?LjU2c0d>lw0`U4H=`RunQE z?-TXGqj>dFz={czwCV zKW)3@h^Lg>_;I=h&cGrC#ibjioZ4Vcv1V!yL5Ttv#LQXR1QLgn?oH2whXu@#G!_=H zKZTAZGW@Bk@-J@lMg+Bno_2Ss`NPQQ=q@O8+xL5!M*qgMm)-l3aX=HlAs-?q@DO5> z^};FC-oBGIX`+sXT@BFDyVDB{h6fN|mQJ|i@iR4}%*FfzbCQs5Dy~p|< zIAUyYn#s?1T`zcf)&3%6CrQ2bGJ5+>e-fO>qwwkXE@;d3!B3oT_Q3LazKpkAZ3+FT zsp5gR1C5oL`}F4=W@Y4=Z86PXSi zqm_}%!+KD|>`se|i>>Ue)`nxdgY6pAv40Pv4mi5Qw90ktn=5y`M;g?nYt zZ7uyT`C8h7oAAFGowDp-(p;amE@29}pzv}kn=%~w+sNfs=$LQz^yIp& zHd3Z1wrWP7KQ66yPKj;$kX0V$e#HdXA&kKQ6sX8DgswM=M+;V7 zGvBa~dYWvBzkU{HGV>8yY-&mmK6Oo!|8=OA_k5`)1r)Y)Z;d_76nk|Nd>0k3gL5v; zdtNoY-kZHgLH*v~=BdRz)1&V`32Vzw6oM~P_mUbFY?nvfL-%((Bh;oT97rccyI6Cr ze_MUpW0t#|t-mUHvO3Iq@XGy;%d{^afl{o66;&Xn8?nUw5}G2bDDz4H~)M z@%ig^ZiPify@u8{enmac3%HGuS=Jw|4&-YRx8xU}7TiYXsS^q$YZB&ZN{cjWo5)lW zLNaIZ%fD(K@+cRw_*AJm9>~O8c}Q=vt>OJZAm}Or_h`2%aq$S;5dcrQ-XTXrjE|24 zQbljKWC2_v8AeiqJ6++7dc(<;bXrXSpKv&V#;WpMP@FBh4|5phkA8lM~`E%g6}m-|Ez_tngY=6?j_bXK1+7w{CuxrNe~J zmJiV*6SsXH&!#_jPyOOIc4%}r5WDO-WOa3@^Mh_Ez4jkIM~UW8QWj%=Q&UqfT-Vk) zTWO;8aq}m|s9v%H5J^v86Wz{~CkV{Gr(yL5QKF|qI3MhA63 z1Vv1i_ZXq=$&))=&@)TqJu)crj>}$AbgT|Hp`b|8(-)6?iN+bSs(+0sGU#`GeRfIl zSu{f+Vvdt%8hC%&9zM1Q4)Q04i&r@G{+;0R^vfAoevBs~p(U_U&7oSqc5m1$D#l^b z)g1~f%%K|CV7iv2%$6@W=2pdkQs}X+C(4pnA%&cC+tV31TA&qC)5t}wI+f?N%Um2wBeek4e{q&l;Cmg5b%56ZX`c`zz95VZzVpo5F(>f$m*Z8 zD7l@kxHN26(W=`nnz6A>U+!RKkx8OC$YzF5n3|JO)b=>Aq!pHwY)_GAp&$h}?!0$l zdYy)*FPjrm2klC z?Cj7zI({&r;tfKipm;LRUG=jGrbHGZ`!%yyo~>BU!e??}Y{li{gtzRVBEr2mk?ER}p6eo}r* zhRW52m5lPIeP?|>dAcC#H=XVj*B5h|bUyd~W-W?Uwp^++fa&jTcqBg2P_(wX$*X)7b&i%~s5jlt~dVt$3lc5ID7dXYX?_Oy5JnpuBZS3D9A z%ooKI(S5o;`1{Ts_8Zr;IQ4NQdOv)G@IeU>5^JeZ1%xpW8vr%P#_r?(iFDmmDBL$! zu}mV!Hy6$cGU^vB0RESD0M~E8g2S${wEyq(Hcytr#dnSpAP$zd_t7%8_Y7-PH-Nqc z2ORQXB)j%5nLY1N^2U+}4WeH%$%2s$su#H$d<0odMs`cY_RjVxv(9D?RED|lF%lW> z0~HL!Sr316yYii{85n6;r`(y>O~)AF1WbIMsWiYZE?Eym&l@kV3CaD~&#w`ZGcS~m z6x?lSV#4Y=AivC@7T&;WzfJXUeFzW{I8Omais}?j9iK=w9q-W3#N#ZegmA&AVt%yc zO{o@b=Q8QCl-*wM4dG;rkSN!%jB$jNU)q*k%tZUzJf(d8KneA(xtgnj8hekt@%Z3)}(>$Yup)+=$UA|NV<4<9nIUwOp)#D86ox zy8X=bSx*mDuyId7JPLQ^lj49n9p-Mj=U1r}poK@^vZtn|;vV1~p!(&0iIP{aOE8-< zk^lZ(m>9*>fPmd#S6V>hY2&;J5+Y10EZJ)kUmN5u-kV>4mDO>DIh-vs+KVDccYYB5 zDkYUh;m<439|1!Kj$vGmXT%2mQAQoU1Ee9g@ppj0s|I~_YyFzALIS3XpNI3Q@jy(r zbUNFItg$uH**i~xJh?1M1qJ48)#n8|*V>agS7x@oZ(&=0u6fiU&*7h!0 z9RD#<$Ypj_$tB5aXbj#llW9I||K_HQ&&Y@z+XHeJlkqS%B!pmXEqh2@-Cm>YsW1wZ zs89rsKN%e@4yzu1;?LwP3t%qL>;3{}_+J{R>xO?^AL8)2Qu(9s&PJFr5yQbP)mi>& zYr~8m6mIu6!jA0Yu+g`!4<_mU0pk0;B~?_c(pXXr)@!0zVPX##^(*aPps^~?N~#x+ zv0`?ZuAQ7h9edB4AQVuVrn$89Jq8mFng1A0-tkvdr<(Y*)9t>nwsV9!lxL()@?3@y z9IYv2IC96!;tv1~jb&%M6MGtp(? zEmiQ*4LE&$hzL2XvlbrVCtFY|+-dtg02jUd!Z7Ntvllw$3)-q{6Qaw*SFP+pF|8uk z$9<=kmua=si?{g!x}esuR&;O5Z#BsdqRm*|RVfQk^jbc*Q+@B(1C?!zo+umRF_z2L zEXI(=*E)yHKW*PKc)K_KxR>w7_fwZ}!jX06r~g<_>A!*V+V?|69M+7rXZOv#Zm0KU z4r!R-Zw=lu*-+!nN#n{2#&r*C_x>OS56=9FJXOSsp3!zP33en*OXhYKi9hlB_1!-m z)nQ+VKSwJL1-*#UCT!)ygnT6t+1)U@^_tB)DU;kZa>i+fshJ13ljfh_iA71r5c1nc zWGvlo+IgTlCE@en%3}l#D&?KsWxf1uK5gkW$)YznOr&47zeIdmzahG~wrH1>abmZQ z&cb0eVKF^JdDDT9m+HWU%5AjD2ZM|=+#?x3-)05QkJ`NeGh(*5p##U0ganT~<5&s| zI1=HsnHiYeSow<5;S(zxgEGrC;*V6)+2wmeQcgyqoStdakmYzXBly6d zTgO;X*@M?=Qt+BlYH^}~f{H4kgM$k@bLXdjgh3|gODLS$Uu!R!v4aO^IB<7Vt%k0j zTF_TOM+*J`EfTmx;rFYsOP(uY(DV~F0+9VRd;XIQ>sbfW_z;CnnSm-{Jq6S_LsN7$ zQql$-Y6hC@-@h@6j~kljz_Hw0NN+04xpBu7gh5ZzNVr~J=G5+lPGk7=A3N7WLD;PL zzzmLWoE%s>GK2fGY>Hsk?V3k0ZJRSYsB$@Zd8tZR=|5IB{xMA~rtb1|SNTtvikjmF z_!F~1lc+65a0_8DV5g&Nz!xRRbH6>)IdTckzh*1AU0LMTi z-Yhx-jQYid{_ben7a<|N=>`*js<`<0Axa{jE>Qo$sTq7{pBy&m+QmY_dzNU1Dmyc2 zSy;%PMS}sYfOc)sh&(|Hjjz%Z?i2q#*6Qx?nW$G<(CUS$tYPOjMjf%Rcc?pU$_+-6 z5$omEM!M@REKC%Yd{CV&D=DRbWG?3j{cZt9sb6K9&*SnSx8ynBT|8r7^`)PX& zD@GIB!=6V8MgJ)SN&9}u{O>J!Jt>(+6k);MwNJ?eVG@<(>cj0f?U&1f|&ZH*rO)yFJDvMa>j<&V*2uvmn=lY`MnEiE8w}r%ur0_EU=b>MNhRI%`fy5cB5i=T<K!?uW25-WGH$)o3_#MH8j3E@BH+9E~3?Fwqu?1 zJho5tZVC)%Xv6?aJrMpLB5=}~+~Bh4wZZ^lzhP_J^&!4;;V9j?Pnhue`PB7h--iU2 z$?8Gc-rl~Th)0}M&4vjj=XXs)6~uoU7lU8Y8%s=V*^LNGgn?B3yt^evoz~&0MUG+-i24DEA^S|?ZNLbhsjp|f_rTThUO#Aq2`SzD* zGF8ecDsz_V)JRLJLLrt!TPm->D)__-y8!CUo?d|P_@)tV7B$G;F zFq!$wh{I_ODb_0nC4$;%ah1&MS#u`>re+Xo-p7aY1TdczNU;$r+-GQtq6oSz*<83N zprOMTCdDQGBZX*pux~i*7c{e%n?2A{M2YEUVMs!c?w|VP&z6;cHydsAANW;L8y%<4 zSgus1OpS4&SNHn+eirLh5YTREka2qlCYY#XKbUP^AiW=pgYQm#GL4>7*`+gCg8W2a z>S4tuU}65lNJ>!T_!Z%@<{plSH_PfiEb&PMB8*N+M`w)JrZsepC{&4TNDn}^-Qp4J zK$uy-+{t1#m^}*VmEi(Y-1QxxxeUaTA>bzmS{&rN52t4GC^VuwusP$otaK zs+Wp`Clod&sM*(Gw}r!IYP)p(d*xa&C;nU_2W$uo6xk@aW%gkb-DC|f6iGCC>XiR3 zg${PR;HnRBji;ZY-D3)a?i_Kz=i#+?(rYMzRg8jLWPT- zW^c)l05y}roGxXZ97@(d>Gnf8?(^Ao+4es_cfiMJDH>r%kcwABfWVVmij&2YW0P6C z12F^MzGVq?U~6R1JI_l|Ap#~ZX4_}4e2N60!vp)aCtWpRH}ZgikWli7%*m{ymaSss z=T+jzIs$_wwJ)}m>u5GxSnn6k?jdB8bDy^uG2+-|lEwYu@+oBRtJ@#xHd@4>kQU;x3C?`fkC51r|;?cn{81kbkP_VAiW&bObgW@s`z{enQKGT#?hyIT-n zs`Jx9*F=A|>ecNE-urru_FGe)$R*3bm)|rKdF52R1QIJkoXz+&G=IA-IX0d184^)N zDd57xE-QE~@1CzS@{pnYrBnG*&(XyJM)-mO|CPrIG~F-HfSi4V63~9)7RYMv?lG<( zrz}6r6fs2mwwr|f0tqOuF`_5(w}!lvZj0=wYF!0KJ<$}he|OfsasIsB_!+V?#r7VxOq@q zF2)kl@Ld*kyMqcJGXa7#Qo+DWAJC9xTtCz^mjo(!d z4(A~rHlAh@Ib6@o0Vj1qY3at8k9)dSv-3pXC7Z&w0V7USNj&pm4# z6|?>}6*y~1C3y}HF}8j77V;jR!n9JXcEjh+IOpdw)gUK?2@99v+FQQVXf=lhQ#wXF zo(u$RCY0#@k^XQ&!}HBsYN~$?RQ-qNtR6R^=}bm&uO2UM?@pc<#sY(SqU_WVf?m1kh`~19*XRFzdfaqRi^B zsTM{D~hS*=*;6=U5S?^=>nOoD^h&&JXtL9Y0orlb38Md_WFP*D`uLT#nLl|@tq z$gD{HivZo_`E-#OxFTEFsf&W@t3q_T@rP0^@At4FR6Z8rtD*xTAdSqxFbWKL_BpvY zjd@I3qm68=w_0Dpa?E@y`|F?WWwH_-`wVR$Mj-=_hHVdzzmdqq+X^jS7B`SN^-z{v z|3ihrvBIjGGXa1IXO&+@ufHrbJuq#VfYe9tp@VKf0De{?cqbx`WxqQ|!ECk4G(7vS z{972L2ZtIU01ofJ8k95uCRV-Cm9rX4oC0c5unhg?DuL#qqEM7nUx!AU9$Z97!I9}L zmcrT7aw}9aCq5VO_65aO5osJKuyH!cLuau8ARa*emrFaPF=SN<3M>>gJ-zW&vR=ve zO2TITL??$FGFhJwCy~HQ;L_$oCii?j=K0!p%Hn#4vTV(H4}G8R( z92_hztDY~H%hg8<&1vznsR7-wVsB{^oY5&@lmu>H3LLt`Ta-~->cz?IXyfzqz-mP1 z=i5FKsMK+qzxhuAH}$|z;wWmlg|DpfCe3x(`i~Bc}Y#jV0;rw zNfLqvAV0F5o$1->@o&uupCHQTr0zGmxzd)y3;(R{8*ONqR6Fb;yS}oJPdNpusq0cXEiK-@LxQ zVav?avSY{DZd1}>zDAEs$74(X0v_&7oCpdEdiqCsYd_JOq|1c;Z5dW3jXB_S#TNbb zVNYqJ@r7bKkmU1_8SD9fxFkfQm zK@~%d?w0{?qjfzVsx41Oy2Gm5^tAH%^xDhc5l-(U|0hU*^f598p?2hfwjq#jIf0PQTq`p+9v6|^x>jp&& zAtONQxih_P%);Qpxjg@&i1qe99r*==elx|DPrC$@4DtF}nUELEJuUZ%0F?|AfKSbs z-`1v`b$7fKlH)r;wQ(a{6N>^D`~fOJeHd5l0t4L+89RzsJ1t8AjMG1WpLlT*&Irsn zHm&zCcpR3xo4+Ly@Y!H>tG9<+rK5pm}_}ce>3?o&9(m^sEoh)!mQ9>b?8UIGuy9s*0OXK{UN~ z=;m^;Sd(22>Q;I5zAs^GQ-)$PbUd*z;k0<`#X{S-&#b%QA1^BvLBiCEOqg7i*Ye^4 zB@+8b235v|;>Nov#*7ZJwTx9ElxiMnP{eS!3lrWj|41GDZJl<6Z5JO@8h7vXdjG3xyjO$AVQF%x{|-Xrz|+A1 z1oI%#sdv_YxCcBHxv5qbe{pzArIj9=!Z}|WRUIFa-}e*!X>w-Nl9X^p!3ZR0gP zq-=0i+DqEOSTyLAaJZ^`{kq`?7%PDh#;o9cTz&6PUf1Bnf7g~e9->aee=?;PBYvRK zDXSXN8KZNgedG=*Ls^RdX+*hu>!7q0kPXJIZrfPfXw$uE44{22QHAgAg~pZYH(2m~pKjybC$awY${X=k95#`=8!~fLMGNQHauk|nBLlnS8%aGyMFX6x8pQ0If$6GvCo@Grjr zuETVPg*qqr$D5Pvni@7V9TNPM0YQg}g|`D3@~T%b*_XeSy!a6WyovF9qqzkYur~4J zk;JglDLf!cQpdEV-e$sNe zZ^Igg2Z4t%8va)aq=Q8NoG#2*kgf3b@p`MtMn|sQ`DN%;Xg9*2z}sL;KIYBrlvN!s4+tzWJ`W|{+rBV zg)6Xj9;5*71E3#i+|%*xHP^dHDBL#OTfZihdn=+NBRe-lt0Uuj-L^br1{46wjbb8C`@&a^RDZqf$7Zen9Sl2U~ zUty1OUDAQ$RB~S*Oo^&R_?dB~dc#v#F<@TjEu%1phymlH>ddj$aKi%*ZJ6PIdXMAj zhCuJ<`{;8{?65at(jRrq#=#b&Bi}KYKWsD`N{I>wbMDU1wE}~g?hVuEc8`IDRUhI4 zUQkXBskbedqXRT?^^?Bx{uPpR9CKb!WRCC9*0CzYm)MWr2^n)z2Vvjr^bT7xYyJ9l zXMKm|ia=&2SgNZsn(E8j3-_Ne>@sB{rDgf=J@Rf*xlT8Ek~Wg>+Nl*;4F?C?x+C|w z>vH9q&k`yEjG3h+aE$lWK1jhpGlh_f7rMyoANG8)JXme)BsTTyEl(Z}$6uWuQ-GTBwNXqHf|9CkKfPTH>8W36&JeD&^!c?`I= z@{*W6g(a_sD~m8mUtW{itK&ZsootSutm3Ru;DQnrG-RDQuNw-;Gz=d7Gj?T?1jLq-Lh?h#`)S#Fma<2l<=ccR0gJnHAelOp3$ts@>c`I@@Fq6;bTZ_-r9)j@)S?L6Is^3=O(I`r$8b` zJqyNW{quT0VioU5m3!^;({x|?d(YQkL49)5dW}$R4Tm(%#WH+BMKK8Aoi_VgDyrX1 zB`U!75jxYy@qlYrp!->?*y0|W%|P+MG4dERY>vu83R z(MIPSfT@i#LKqHlG)YWlKOptkOIb$GcTC<15TmWTs-k( zdZa8Ej)di~UF#rm6vr+)_hX`}mTvDDZCwV*zYFj6t)7Tn0EU&gfX#ZFDFKEegO6d`Y<7hh>DEh|OZX{oPaD9~Jse}$zI zjjF1u28OB>PA?__?C?nv#Atky1XT}QRs!Y7X)FF~L;!J#&D!eN|M2z(7`Fs<>j6!0 zz)AfaYCExV((_~~3Mc6A->)EbBGCv`IG_fC;`T00GG}$CccVi8-OF81Jm2eZd*h1- zr|pRwR!zw)7L;E}37w@gJYhX@{9r^U$X;FgEGvtY1FNFKP7fLNNfHASlNeTBOKXWV zVX!Jpt4_q?XG8CdhhWq(h6*tA@^KD*uD*ZGcU;+ZWSvb-FtNI7r5i?mwdBV{t z|&~te*S)(h0jllrakUe~LJr8bq9Mw#YFlK1ELzB(q zWHfK7GlU&taSjjiZ#jx^J@p-M<#s2wPY1zqR>qHww|U3HluNnC;WtVlP3qr59%x0H zipXe|{tP+UNn9;^-*xpBED>;HmNgm3E=7i_%l8aSwJgM1Z6r8|$Lk#EF1BPNs8+JH z@K-_N2?}-`kb7u653XvTQk3$8lIujX`WyF;7WB8k^vo6{X8c6Ajq{J7>J8m`xBar9 z9s9+F8dg%_yx=Y4Hu|*qNoEt#cgA}2CK$_Zjh(R&Q`XtKM4o=#l-y`@WCy)rcQk4f zv4D3(WhXL~4xU_cJMmRBImY!r-8$U6RYuIS_3QFN=f)bh9X-f46bdP0!KT_ilvJ<2 zkicF%UoeyFN}LH^XlnZIlnMNMTP4L5PRkPq-%V?O;0YRh?%G5Y7X7t!*DIvz)$+xu z?mCM|o_zgNH<0-|0SJ}H^IT!qYi229JheiIg(0?o#t{3!!-Mrh6vDrgsZ+N>q{1TO ziWH6%z%{=FV;YGHd9?f!W1#Joe4Q0AP@(*O{MfR_UXnuy&5>|YQJH=Q$EEtEmAEo? zm;g}`FaD7s)o-V_kI(pKsl(B#I!xHx>PCRA2Im9o`r;9mjz4= z$QVqvUA3$^!|6Uc*MpcpdPp=6|8_wE7zm zV)WIblN^VJmNyZdyS%?&?vmRQlO0?g-$s27x9DxqY8In8CF5)xztb~JeIt^(T$|{V z2B5Ky_X~IQ^9Cw~Y-$f8=UUf#mEy7=Q zGBQD$SaJkK%Zu)NPIG@y)ixcK60(+dtbk7yCP^yZ^*)O-NhyM^<8+)fG(>22F_1UwEg+ZV8C^lspsHhG~?cAAkTI$i;IR{E#U5szp zex)@xcI9Fh?lsR12!h1d(+fQ%9(}-yh%AKZ) zKUt&y{9B9+hb2$K>A@5sOTKHmQADF!S?ehtJU5SeLIWW9W>B6`bdjVu;RZ?sNNygw zjPCR4QGT~wVUvLz-Bk(Bf#)inzxv}YB!>cB*7p26kEp2HX?-%yxXO7!Kv>Ahz;39h z`+I61wu;)SJDQ0$PJ5Ok8hLHb4`OlEWf(}3VPmClbha4KK%wh-ppfMW5Xvd5%j|Q< z;s{o8$(xa#rj(yT5k@~4xVPckjicr!j(%hZwPyHw#Sb~f>4ft!V zH4I)v50o)>8ioizqI;|?zqCsxoT>mud1)eHR~^!{4aIy@CCEhs649TbJ)a=A5C^xrbQ5kPH=uti*3H zjRa~1@H%3%1Bxn_1Z^rjgbE)423U&&g%Rm1+rEJ=0VbelJi8k;P{M81xs1p_L0vtt za!{Wd1y;1gMv$}3QzJZJlPPLLoheC-Ov}UaDtW_`fDRviVl7@@VUb(s zd-@hR6PIvD?|I^8pq$TNp``gexy59ZKjk1g=(QL5@+AZ6kjJf7@?*Y2j=X zhNSq1CLVy36_%D}x3wYFH|K_r-+tvdXhlj!JF zU@Ns*R?>oh-_a@VA9l5Rq&mUkvhckZiT?{q=jV%JUxB+d@xC#-tw8R8c!pxiN|Wk` zL4A+9l=xBH6lJfs@eNN|a(A=EW?C;T%2hC6Ua@5kJU_X7RN6z_s9VeY$SdSvz|%a8 zF<&b58IQR#p(^{J7wpkV)Be%<302YU_-H9_)k};;SlZ#mczU()#%7M;Axh&_UJu>V18G=X!()wd?_~TZ)TAL1FE28q|D+`PIh}hVW&Q(hm)hvI zxq~s|dWJO4lb($VW>bmR#GhS~?XL%ppHajR;{5u=DGz6@vk%m*46EpKCA^;SGUg@x zLwYTHwyuF>z{7KZ3Kjd~!5#2|Ah~GH;ihpDQB)f>0(KMdNxPcq6OMnM{y=4cuiLuJmtC+Oa~o?5@C39B!dil7@xmJvVV zkaFP^%f}msPUmubh{Yinl8MkgFvw3b%TlmO5|AAEVJl(0mGpk0ZWq=f=_L0cg!L;; zhVh$x6sNFRM3kP8Xd?y^1JoK7(;P;r%G$Kjq{vKKv9f3-yh?+~9uK%cVg-BF$y5zm zJ39!ol|VDON1mRm~oRmPJf^p zq4Tyww@5r8TZbo~W(aAS$CLSy3{R!6ptQlmuwG+un)p@5h#olW!975VM?1aOc>2?% z&?j%DA|S9uY`$nv`&>Cu$;m+D@TjGkji8C4W^sO2QK9$C`&?7#2n+()1zg{PhW?y1 zS~E$wwt539mjDql@`{$WQ=fz6tG=+l8jIX7X+=;xvg>#fyQg+8%R_McA5|; zBYLqRd#ASItav2)p{1KTgF7Z#Lj?hqGKiBdK*-#HKJUk}Ft0EPNVyrB&%47q#pf6&=>4hsYQt0*`nx+#8BNVqy@{yf5HC^wgB5;TK? z_z}st3!2(Lbia&yvteY;NokPTEAHe)_C8E~20=OHw7(X4)4sEV|tE2GOH z@VfufR?>J2c&PqboT99x6Yq{?9+G-u?rWf60SAx(4^-z{p6SE(UMb)7PIGib}J0E-v&t z@EM7i^*yr1ij6h-)C!^<3TLh@%0$INi60SgOx$v(v;fOhh0kG*hZ}n($wWXf5-38_ zmED7deT`W~fFu(>;^@x6t$_sDiSqKqPnnlsL*KK58bOB!EH)zr4p*3FBm zDkq#%K^_A-cqDA3NOSE<1?3lLodVucwf5xzFb8guxqgyC&7!3=(+4&uy{nVtHYGI0RQD|yOpTY`yN7O z$Lnb_QGje6BvPKcO+P9UGSLY}mJV9l!yi8#Wntyl45x;jRGLc$g4n$=JZ19r8u-IF zXN;hlUIWSZ*D96tAKWMH8t{}jCD){_^*G-P&Pl=seA=+PpKpAACnk1}jewRdZ(k65A{n@2D$KJ3g=O%VjT)TjEDbb>AhFyJ$_Izf<3CehH8Wn(c z7c5D6hsv6N9(IF;E@9L(BgQ@$x0FOrF8>+i*E;1UhD?YC^T9P}dEZ#^_kr}0`Rc~F zm9&~h?}`2NXzj%asxVX+bIG`p1*fD2MXB+e{QH4#CbjC3PwOrwcCVb;yXy?s)?HCY zLOlc8@+?H%yE6jVPsaa%xx-mtI?sODn&drDp@i9{i4&rYnZ9oLB-?*N?7m3}c ze7?u{C;dLB8L7mMvyO{QuC$cv0e(HILVP<$` zgP<+Tvr3yoW~AdXy|G<`BXt+3dRD{IsY+=#$l7b`*d79GN1`1u&F6 zPFwRFEgYnvb4r~5@@PYMkc1(GJGVT@*2C z3Nqt(hycS7b#D$In?X1dZfPovJ|b8&Sf~&6VGg;`HUGfJxsy0H-bhY!nIt1KfMH!! zWJuJtxkhOhap(R^68f3TRZ`PI?#hw6M<8^P>{}ue0C~iIb3#nIN#%L`-hmX4&3i15=3h1@ZTfFa*!(j|9xyusQ?zQ6A?6u3 zXY28kvOo2??Gj&HFEXR=cHVSX&g(|EKhoDCAonGw?@lA-Yu=5W90;(=Pv+%aTXrMx zIubxZhX(Hn5iB5CjQu-N0)i9t*lwo#xCP=$fe-$7mC zCB1&b$@~wGJPXcv{*aN@$o90^~fC*U8d-+7GcSp^t0?8QMR3 z^}F<$QDJ|8Ff1q$5)6&m{^lrIBC&U(l3)e_`b8$^PR?}MF_IxY5ezY0Eo=X5l$^in z=CNvht9PsO>{MS3JL?t5dW9CwdWE!K`*A^vN{#GPUs^h>+0p9>g%V}-$3G^}{2`Pag_Vi}7zx(q z7HKbs`{y>Pr-l4`HN{=JC_wdYv;qbs6(;>{;;&NS6-%Yb!xRzXAbAdGd;&V}%KN`~ zX@OZB{?5ORuf&_;rC*$-qAhEpWamK04-<~Z9Gmako*LPM$|X8%M6_#JtQXLkPw;>I zjjo{zwxXsj2wT^+8rl7fLb~FB$PYJ5p|#;OD|^Q&;0hDHe2xU*4`;V#QHAxMYH>xC z)q>_TTB`P?hy(P9kw@qNxmun@$022gd?-7cdmHa{k%Q*w%M)Gtvu z<-Fr7G74hjM9=Q%Vi4h_AQSC##FoSl1_!CcqC3tMH%UV%|99f?sO^j$i!++cY0}$R zyXym0UzT%1QILYp8lHjfFe!fuq9>(yhZ|txxN9Ym;mLPFw5q6}*#GY;q3$ zpW5l$xkiNxLKVwS`ex#ST$SSs5|VH#HR%l(YDd!cuMk1wf>}vXsv>KgRx0lOv>o)c zhdiWlagd0?Sw{BX7Yrs_LkJ~V!mf4ZAixq=U*z&P`NkUycm@PmvYN$R}z2?TCBfh?+W;)J1Wk5sjWn4T%W!W zRoN7~W4RS>4tfY~ax!X5I%l$HKe_cf%K%pmU$U55mTxgh?ba{W)S$G<&PJ_o<=+s* zW78q=RKBDaU3wl;I#5#$cAXx6Q_Hqj&ok7O9e&nmTE0h4V~nOMrH z3y3eL-jYz6io;AmvkIB@jmGifcw3zU{;-jTL2@r34P&AcMiXVN$q{3OwTQs!i+jKh zDuW>|nl%VL)Mv)4Tq?mvd|+#pb#&hu80VQwkHD*SRps)3r?%$JGPQ@im0>bhX(i+K z^?Qao0mTK}F#?{>@IUHj945Lf;K<_nJ!O-l)eQoa{2ms$qVx7gv$XHg!E-y&-vcB=xc;|okJ-oyvVwb9-BNrek@mgIjP0fd znbP79!~f;2)#dROvS4a6#{wSfPupTCbyoAln1PE=2QZZ~WxHRp!*LlG`!Op$QYx$2 z)*KMqxo_4p9q1MUNGPRF2*7?m#i$T}npoC8!b+=rG z@%{{%oV_^_pdS3ydOS#IMdMyf1 zMG|w(iwDe9k9*Hpy+Qr9nnJc^;;uE^m#u2&}UL;R`8z z0_a@lpL>}9j9s*PH6xy*mhot zV`eNNA8ZAlOa%pui*N!EmT8HkG_-9VoRh2ZT(hXfLi|2JffPf!)yFdx?V|%sH8DMIpeF9)+K6*|S5l z<$qHEIz3`F>_|$Rk2WS~>)MugrQ_Aa$O@f4K~VZ_tI~P@C9tRa_u#lq&hM4{LWx{z zSvj}|$%z%=Yy3Dqg$K9U|KBDzv5zl_28!%wLu+EX;8>%SiV{(}o9iSRD8H?fFflNv zaD*q%2>j;-DeE6U#9Uoi)3r~$$@a{dYJ>opOb-KA$8ndI8V253g)Am;k;=;YMW*B< z?uFmFh|{W2A_jI75GmTNIFeBYOZngCt#(J#i$Y=G4F|Y_=>1%=FcXYYleCd z#yM7VFT!3AO}vhLK1jPCYR2v<9C}a_1_|ixsMBR1^RDZkr^QO-tM9gxxb)%72K&~q zdFmS8T%Lb5~I!BNYrIC{EknTolkWji2P&%Z$OF(kyh7pkN9OAq4f9rj3xkN#b;dk#n zXYX_F*}H!i!}0H{*5Vo<23(cn1uC@U#Y^ISMm;?QjPi_>G4}fNS$?`eE@G%Mq1RQU zp*qb7YiRm{=yH^vZIt|6LJZtF&}r1nIDY|Cqp~*|p72vhPo8c4DZb(J@z@L21GS#o zY?0E{Qx(bKB|of8G`Y0C@yPryi2N5wMrDamr7JHLd3OJqVcbmpG}aUuf4X-yEs7y^ zYJi$R^hL<9d7rU3hOg?QsSGlQauk@TUH`+zLLWxF>~BH!QDSMv+znw+a6O~=T9ruR z>aZ`E`4ge}^0g^Lyw!F_^jpRbb+j57*JZ$;5$yT?)xv?h^#f9w&l$*eoqbxI%fqNM zicqmm+@n!Lj-RUL8RTVADcXTk_FcO;g>Tz$GpT@(93vRMc~jWNR8dTtixwSjuR#}y zymB+3C&mc@R*CT{qQPV&-Su||?Ec;xU(#S}pk*4+0{5f#K6r_OT^S>*yxlK4TB@Gu zKbmA(dT-`{e*7H&-f;$A+?z+2hk4!0o6M|l$&S!87Hk}T7Yv*jUiTAQM+W^T8!!jq zQYvhl-kZ}Kc)a4T=ys#)U;TBvKo$}}Qt}Oi_)4JjQal7-p=meU8F-x=D~f$Nln39v z-kvddpLw4qs=RqN#4NouL$&^9!tKIFra+5EZ%{tJUTpy?pO91>dX|s_)!*NHe)oUY ztMN#!({l#&hsQjpJs)i6mnU-XXo^p36=!usa}Q$2lS=h4JtVww@|0MkHEiCddUR^H zU7n(10evn24;F6%ES(1%^O}Dg0C(Z5<;9w(64#9ZCq`GJfQGM_ z`{16!-zkk5Gk5y;+P|RRS>~IYhM76?!PW?6)BEG0pJ^vine*9!YIf!42<01Kb32t# zAfDaY`Z|6?IaQX_ysDJ8q$c!{G&J6(Sm2wd!NEC;taQ|7M(d&vi-{Fu$2WBURd{MN zr;=H`n?D4<4gRO7zOveb%Mm2Xg!8uKVLvi0_hBNVHU4Fsc-H$1xlu~e|4HJAw*zr% zq&DLQCCyBH@tT6_nXQ?O? zDBBi-d3%I#oU``Y>7^P?tmRpP|Ki~EWI)!1;zI6!>7d~3~!MQJV3{VE5}HPX+Om$ON7UCZ}R?WqM_5T&ybZ%{0d0o zySZP!z}cmLllwC=@B>EN$6YN)$l*Or%zgq=F;jbwq}|VVZdqj|LycR-q>X#|Itqr| zSYP;W%>00|vGU4nA29VPkZ*ZTk=DODe#}9PRCQ9ics?t<=yU$=xARVUC@*JrM|rfI zPNR17m%4yE@8`gQy2PE=V@C$&d20Akm9M6~asw(SKr(?EFoS4?OR|4r7`gWC zIx|UogN~ax5)j)?ah;%5^kNra0NDpjEof}%oF7w1L6Q;9K}3n>^-<5WS%7^?$J8nV z+vr<+V^=5e=iu#^zRIf*#kBdG5#HU|+BH^EKMz^m-Da|a?nCTK&|EgSg<{ymTD#PG zfKdatv_7l26pwrD<=Yu@Me~4@9Zh%p!qo{1i71a1!$vy80uUxq8u4_w15d%9JWeJ$%Qaig5W`QbPkk>gViKmr zz28DW?IJ@7#=m&6zq;yxRXK^=V{b8`2W8;Ad=VlYM5SjlFIj-s zZdnD~5;Qw1M2_10%^*PbL|yk8J}Rzlg~^g7oX~l|b*;Xj*XmVCd!F9Sg>Cg_6Yzs) z$!@QYU$3$S;F}0_FKDh%PE}r z_O1vkD5}imkX%;9L;7c)f+X~YFQq19SI@>4NuMpSh$H7*KQv!0^3OpoYFuWpVS0%0 zU$8adgPI{-N(f{MeyH`RrU^Vzy?F7~nOZn5bGeN?eoWrG^ApnVD(ufFcaT`^s<9ME0=BWCCMWkV73*;!jk zX*hIA&mGDhW$}7Z{$Lt*1N4}f*;iekQu&hiTHIfoUW2J)0s=OKT=z-Z&8$GC@u+~y zV56x>o%qODnt%f<=o|Cv67&GKqNAX&(RFZ@zptS4CD!+gl=SwyTS73zlv+Gtc$T>h zxIy}j&3=;BhzM79n}5qgI_%P_@?S*nthITD>vZ8rziPtWUDzgQYv%;-h-)rC*`TVY z5NSNzB$;pgceCAcw$-zkYiH>5qxa5a=u{a`Z6aA~g-5P7C!AyGPhx$>Z%DwowUpaF z=#C#KCUUf^3p1BD?h+{^X;wH{d!rSRIK(cMsEJ z>4(?@1f4u=fR-q%ynj??l5ACs_+TCTQ=Go})dGmu-X zqSJVN0n@$Ofwt}~BxN+rzpO|Ie41YkP9s2sBFeLo02UZe_4~}WyyU?z9+xlnd*@XeF)LK`RfYT_h(+syUZfk*WemNjtWI=S2A5t1h#&jjBe|VN zgJvx-v>}o~QSD&8xOpB|^BpfDcIW)s+O>&?oaw~H#c|edz8zbnbN5_*VJy5K)(Of8ty`gaYEQr>>i|62pjUQT+bmBt)8MtXMnKe zGGjUqbjj==?&zuSj7{iAz=Lr*<$^UTe$4*@F@}W;iAUt#>lAY^eJ~w*>2mC0ag+`5 z{}!t0CJx*u96Q~hp0MnB6NevhJnA>R4}q0wr_UqfJh3vAsuZ&!2`=TSU81AqP`$Hv z%H_ba@s7+*%DTW>S;z@*E5PN2!@W=0ruRgrUAy>~+^W6c(`?5HT6O```X?Fnw>Jt0 z&6`Y%W;B{%B*o=H&z zS4)5Hxs7Ds6C%H-|J$em{)_K5XV#CexYHHP=Ge{V%2E{a6O>B}0v)x%bi%|xV8G^G zR9f7-aiOM{5XOV52_{xJlaq_*L9*zy;eq|qt(^Nq#%OwWXqTx3R%e476p#ysf`HHS zx;FnD4tfY`BN^5Jn7z#%`{$~zzJP>0TWV27>29m39$RhnG|U{dr-*(26vcs~ za$J|MLjH(8WEbHIe#YTl-%?R?9lPn9xfz|TWDQ>_gacC)p=Y^0a|O9sWDrG*$AAS1 zFfhAI!{2S?&MxM6E z%-b715%KZ{5^;3xJ)Iyr2Mv_oZ|GW|A4ajU@c8=fa9}s3ZNeJ=HS?zUZT_~mey#ez zZtwl?%)Y3Zm$TiA1ZH7X{oQJ_lE&{-^>WFJeI~&_Q4Q?77WOwA&lE(gRnjZV&$D~& z*K6J}yT0Dd`99eRlr({T(@xS&da95Q?aG52%eqg7PvMmDdW_s?U(!$UER`3}_#0^; zo}S~!*h}Vqc^7xZEZQso2;Ed8mWRb}Wf-Cvp}b2=CEF#CL?*W9T$L58xG1a@LZL(Z zbfQC&I@b6ysEyj}n^8m#NR0A5HvH;pDRK`J^q^&*VJ|56qWCFVZNH1H^6R`WgT)_I zzH>v8t0w53tEkAZn>^#OK>Lu=2&HX z{X#m!9WYnrpzu6zQG5?c4~P?IkSuYZ(qtr9wv8r6z3ZASkTEy@x=cn0fq~eqgZ|N8 zTg7$MvLY_Vpx;!uTqY+2JJK1?Fadw!8Vzr%Fug3kZP_!kSBS`F^i~x=8**O_M?Ubs z6}!P6QEb4H{FENx$E?#hJ;~3G))?Z>7*w}VFh6iHy?FEmn&Q?M{$rse~nM z4z(^AHOTe!&X9wV#Srf0KJDXm+fP%h^Z;aWjDSH0|4cjvnDa*r0V5Lj+dn;-kRkal5y6zY# zET=eMq6ppAH1A8(JQ@~SL|33#6=mHde_c8VvTrd?e-91xZQ-gom4-SB33tBx59@f0 z!%-<~OQd0c_xS=Mr898SVZ+%y_#Z_|^6vcO2Rf)?+37{O?*GpJfrb^l2Zv4c(gU#F ztch5fhwT=n$Cmvv+gGRP?T30jsclBu(k=3LQeB1VqTyaVbssrI)l&o3g#7Q%5UpwX zrc&_ufrhvJSp&!O^%e)EjWn99Me{pfSWrRrzuoxeALWvH7>$QCq`XF+{>hUwQJSBB zCz)@e<7Q{7=!;I%Y_!Mv6*ZMREc#(+ExW5F16eJ^>49G|DP)Q=nwwi}>0IAeOg024 zP?f=*em9sLJZ$xX+R}vi`8%|4CH{^CD|b3Oi_r*K^Po|K;<-Rx9nSj$tOL(0+kHZZ z?f065tm+Zhug%-2FMOf$QD#amZ$rZ+R=sg`VdnjcB!{nvMO-C65NJ2m}AUs+%L={Bxkb8WPCZ6tib zLWZR}GZxMmK}5Q__RDQF_q!{7IDScP%OX2433tO*@=glset})C z8A)5+O17@mR=%N3J@@BupOVA7+Zv^}iW#6J`NCx+=AhgPkks3w*e%A(ar}bUh^>o= zN|S!jP26aT(|j1lU)C3@d@{^~%={IW#anCM9nH&ygl`*nD_yQPW4B}j`TKOcDn5T^ z!1P?58zSSa4cH-H=mD;zt8o+-^v|TbjDp$MGG&|yK$8Fl3SEvX>ZISa zCV3?+!eThq5!WDD58)4U9ZjPbB}xI)Gt9>w)yX_g7ahSn>hCnmL)h$vzh&QV1pXay z!{WDDJbP(>ztQzWPlfrlq%{fTdI%NaR-sr0cn>+Oc;Hcct;X5?sMmgy0Df7Z)S z9)UdrV?nzdTpw+=Wx^(OkI(~wpEtskoBj8GC1Cq;jH+Nw=WO%q%D)+@G`mUp1!2aH z%Gui|aW1>kfu{L-pjedbV}7^jr#eZK{6bGrq`*EfJq!U~o6D-A{WWaK{%2kJqjp1& zpZN&7Nhk@&dJ88#%R1sLM3Nc`AQ)LW810$p{rE0QAyTSjYQ>ICHF$Qzm6g)~miWf# zZ*`kM#N|VCW%q%@?U5N4xxTFR21sn5STPX9GszCjQyd4aaJ{v!;tDmIh+@T9ylAdQ z^aOwTUw=;82c?1yc)JAeu{WVZ){dA{zf0F+3BT~%j)cb7DSmxVEZD7_u@ggM?*U^2 zP0MTFtM+K$qjtr~_l%^TzCFPNoL3iTzfszdfhiaN1}(H6{O1~xiVYj~ALiY6dQR?Aa%>Fp_uLd4?yd+q8g%2_a#}wP!6Qiz; z;MdMlhmYeJo!8Exso=#+c)4L_w>J~kL$e)4=ihU60C-lty6zN}nWUY;$id#^{gPLi zVnmM1!2+nk7L~jm=6{OsVO^jvX3+XmnPs#w0nY(Hayn8M(jy|jlGul*zb3-LuOZDP zqd$iMt|zcMmk7xrBjzo>J>-{%yolAzzSWX{=@7Lo2Z>G%%4G#p-(ZYgbSQ@LB8ER* z77Xav|I9Cc1U5*_c#cQ~`;e!v1fwb#1GWd=G$z#_tX!LpZf=G4c2+#V;Fdu)ZjdB> z32ug{Jkdbaf>U#=XNv?7xf;wGMo^K~I0@%3dtT$qaX$(oh3x7}NS)U46;~Vo_UZUu zui1lI5J!5LGOO{)^Vjc_)3Ffb-n{!n7--H-d=>_0YTXlQDw1E!KiU-CF*^}@=QdW3gb#_souHYj7Q}qZP@*5XV7ztddiEg@}swk zcHp?~Xi)x-cmVs*@?sxwf2y)mY_w^g69`MKgxNt|V)^zL~BeqB@8PznqWl5p|jvH@V}c$7zGlKo3bZ9?oMq&I-J^-^eed zsS-I(!QU5n2r>(L1uTi5&eiXef!lvJgIlL4UaL)7h!VX`l}MW3<{OJ_;W&ioT(6Jl zTzy%jcsg6HKsvD81oIBz)~8`Foi4urHSw`F10Gffo_}qD+56V%r90?epn?(>Un2fg z=Fd=y58L1M%5OR;N8p=$n*KHEV90!~wWw;iS`HiZVb);p$UrKoR-#b?pQm^o|MNTF z7q+$ldVtT#K}RbY3izlny-wv->wMPZ zM*+=Kl~2P3dCDA`ypsfb&x^(~CZWB4_*lVUP#4UGgC01VJWwv|s#0Jj>)JgB0|v73 zJSNX5Dk`l%`i#auN?;AJ9$gUcDTfd{6c*(+`xI^ACz|Ofr` zCD7*SU^!HW!(%eRzRuos6Ff@U5?Lfr#KJdMOi$p=C3Qs z1~%u8ZNQe5Q^(p-uRY{_sg`YC%U-+K06+$*X%AqzF zhKFNgswHG02gjkP(TXSQaVG1PC-3u~Uf)%EBrhxoREvUNaQ~AA$9mDz)X^Bxk+Eey1!iv1cIRIix297ysgT z;X{&B??RS|jy~2_K`MuHW?Bdd?p&so>SQH*Y(qY{%Hfe??9BARVkXz=EmQt5@0gJ7 zSJtNuXQpi9GaI9`AC)7DQ}|k$3-)=?7Trf>JP!^XB)Nse-WVp+JQn|48XBEmTZmsx z!jFt3@&P{PXZFOd+1v3)8O_?)zT~-yM$N_+I9j2GHQtUgWe7#TwCykYt=(nZ1AS zqpGRrf4_^c3B zTk%C&y_Zq6BPJ&=R#n#uiHv;Nu*CU9;njN$yd(6n717}3tVgbHmvm%UoiHcTIrVl| zTH!_v%8c_CC?EFPuR!N)(yI4*^>F)oEHHtG!#2i;BWc;}&RF^?qWH*r$(p8(e)_mU z9}nfxnhHu9+P{4=k_hRXVoHP9p?R$N@OMe8aZFc_>8#SwRFJ~(q~01h`Q;5U>W`8J zSu0%eUpxQ))oL3klvi!AMK9p##?%BatO07!rBVn7QA?p*(!0*+-8xIb0S1J{~A> z?2<&N@EPTp>uR6lCp>{4DT{N-{D1X6B_l)W`(7BCL#Q?NzA#Nz{29g@A{1_JX?|i^ za>Dl-kGFo-AEHQp$}Kj{o(cv}Ro5ET5y=wODUM@AeKahunN*22poxdNO=4@JbHXJy4+n$pW_1qoP8;daF<6%jnHt&-O%#MljCow8E0l`RJgBS zK3ej?YyNejT?2EFgsm$I&uxVp$pvglsIu-0-owDTZ1iB8Mk->^bWbuV-C%X z(@Hpr-%$U((}eg4t0ELHT9$6;B})1q%O9*5e2PIs=D)PCg4Pb1^WgIb;!^yI07!gg z9Qbk9(;=+C^Mo$AXBI~B>hs?-W2+xdL6+J|{m{}P!>s|skp$T|nNV~o&nr1DCjHIN z4i!`1l97j)J>$*OQm3D_ryW$JEp}YQu@9-Wn|tc|b_K}K!A1G#dOELyX`l&RZeLDK zAp^^@p@$55{mz6l^0)jOFTFOt8aVd-SgwFQS0(1Ps9l}pO)^z`$IzAYW`1>e77X3I zqD9e3`em{~pW(Z6?AWXyE=?4Fl1&G@)@|%K`F43b6&rtvB5W~JuJ3s=z%stFlI@)8 zC=?~Q;!7BCf0}(f(|%9v&~l6x8pb;cCwYQ$1OzQ$u>rRX_vaM>#t-**o91G%Ko!cs z_gJP>znLV`WZcauW5&P-`_OCD=cpfYKOTpKwngdCdQOt%dm^t7$45G7fAH4}xH|_< zNMYO8Jg)-+AX#3U&ur&wo)#Bd7#LHwuf*1VoLLVcHRu={`**( z?6+{8wh7uZ%gy~u1!WWSKZeCqZ*yvsh%gzH<1F5=vbYCy@o?z6;{VpVGxrwh@l<<$ zKUurn!=90`&g=5k0ZG3$xz;Ye-mtee?1zoe%byY?(O4)A0gdA1`Gov&CQw0TQ5H7 z`Co(iB!g-TZ2z-7nw;utmpzg7(`(0Z8Pc`t?7Pxq14`7hD_AVd5(0ll|LuwYNqqJy zc)#A&=kD%qw!w)(v+RTUz-9H?n(^)J76c&`9;55fO01>trqSmH|0=;r3+6X*1Q5eBQ#)kfzRVArv?iSs0|? zNj>2y8Hc-YGDxdAg1g z#&8A0HS9}D#-$|-H@QENZ`d2tVvIW>cKwPjE!hwNBp`)Z>g zxyxrDswU!xqNx1iE~_bRZ_#vmygXisc)w)2^p!kITeLx@2ON7?U62V*c`8Zx^REiZ z|0oj6J3II6=7Ri&5`L74-NCn$W}v6CTDc)*l_sJnd8()z8|Fg(*T@OUa6BBjDy4TC z8o^TJK-V_rep#}m)=rQNO3Q6Zbww;g^|6L{Lg3X7mW_foHPZJ-Y2P-~TjOPrwq^w# zmr5S3ikzgsH05D;$JQVsj^g=UmC*KCaN(7m+U55aYY|Kc| z>Cq0?7rQ;`wz9YHvo6RCH6Kbs=t#^~KK?je+IiV_Dc*8Et$THn+>RwLGTrmbe2Bys zMzQirP7ZxU^iV9rYcti#!J!`(3yFc;uHgb3g3j6*0}4zW(T*m!-LdIuwB(@>mTX^w zFQi;`e1~z2IV~-VRkX=7yS+sPSr{4`l~&{Q`nwHF_6doJbMDz18XCqslSNm<4*o&= z3-v50j?4aVJH+8W^qy%3G7vPE%YD%lKqoAw^kVm)6W5TlJt{588*!{lNH|9Jc<5t- z^-4{s6Eg13+ad85>EFHv()i!Y@6AqrqgOx9@-d@8`hl}fdtNtjj2m(sq?^^TTpuG| zS><*ox#xY#EfFx<%oE_X{$pAg)!}_i`NwRT)ynD0WS|-=IWr`ua#`Y{a`CybXr}g5 z->m#|#xZ8>+bXqVf!26mC-1L8|FkkB$+zBofY0&$J$d>tQ*BSu+@=kV^W&{g#f93x z9GS_)3JM=TUUpXsA0~4t-I|}&M)rh znx9q=Q`#ypnn%J3%C{z?KFOaZ8;?7E+_WPr=# zI6xj3O>oJTaB#>ihTIrfY_7egxxteF-(=pxKU0sV&+%f;(zG)WRU1T)5y-=B18jHY z;a(6C(1EAf`oudfDxcPp63pG{vTmRr6}2&xOiI5C*7VKWpHv->{$Q?0y?dpM$FlKH zoluh+4t-@L>p=NNoo~@)A%6Bcv~6{`aEp2l%`xU<_`%L4pyu}Ox8V;C6d${H#zyP<82jciijJ5w0v+MzqXrm!p;nVB;FVl8dj$3 zZZb3ChXA6iQ}eWqVE7zNg8(}^Q*jA|<(|)BD=ABsCw&{pm!^GD&zyF_;0r1y$!dA? z0b=Do$x(Zu6l8h7h!H&yLhAVZ7#-;3M=-rh>ezRG9FF>BMC??-ch8gtB*_51SSnGl zyj#C4N2wQmPlsol+}NlCZk~5H&R4 zTtLy$(EbO^?CkEwU$jI>(*qwQ@It{@{J-7Z*Ng;`KyMIy_IlF(;h`Ki870L*Igt9K zYX!Z(H;l;Hqse1FjCfC?hEZYe&8k2PW+6)u#-Uq>HI&TN55&61r=~)-p#|VYQ?7Wz zuwC8UjQ8hjogQU~`&<}-zcm~*?Q(nA-*#twdxH`b6lA(3*JPlY=O*;w62z3!(m(i; zZ>65lU2H@-ywgk{3$pk-vj-jI4>z?{mwy$<1%H5$UD(yDCgx;I(0wkp`RcLdv?Z|-|Now5_NP3S+|uVv7OI7v`Tf@YQl2p zybcWe5^wN-%bz6O@29FTGr&3GqjV}w!aZ$h=o)iLb~DI|+kEAq*OkK`5?nPg$%+Fb zFw=!>({lDd^jP^a*gGxt$ESwK^P-T&ia&>VsWI5tuz{In#YKBttGZO)0;9{3yCYom z34v9fK0FK*)fMe8E+hIOY4b0@G@tbAY2_<@VE@AAG(A@WAbO=!w?KoQ7iCugUbgAk zoVAFC#_?~4*wy2HntqP@#Xxl>VboXbpH43p9ANJZZwM@g(-NKUiqs3%Z+0vCwc1XI z#o&9@rjIhfzA`d0LW&i?Aln?xjs;c`Kurdz1BCOBew_*g(Xy4+P-et|?E%VjfvA{A z;1H7{K70A@z@s0v_Rb&DcDL`4*WON(Y~cMoE8yNs>~hfsgrkB?ck+V<7>+`^00-yY z#fAIrwsJt1X{>l?Tbsn>%l7{9Jh|f{ zWj_o=Tte#Etn>Q&P3{Q56soeaGNUNtgT=;%<30+f`|GV()M4O#_!?I{cq_{Xr+&p> z6hyE`wC-ZI%Pr4v#g32=IFq}N+isrEcSk*2ijknw&mpt8_(iF9CGx1)DVF=eqA*DZ z!caQ`VuRus|nk$dby6ypvs7!6i96L=~!ll#QOea zu(Gy}SSV+d50|F*yxA^r8Wq3%URz6y&?w)pfjSQJ&!~)2gH5WFhcx18fm4*v5*}|O z+IZ15aP)yMcU{hK3}XA?#pnZ!gZpzdc0nj(x?(?stnU(XDc!eKJ$4%=vjVPb0ZpGM>a-c$Sq@9B%PCbEw=H>>IQ(8JNZRxfs z4YEn6Y7-G#aZ^U){Tt3_Lz?W;c7FV!VS$dfF}00v7%}Q~`zFjWoocGl?d-x<-7R@7 z(e?|}1h~0B0c~G;N}R{x{0H@QbtWSj!YRBgPG*4-NgTSNfXcV}T*kDzgUkR^&%Pm| z_P;VlN%r3-jOMVgG2ry$jg2po9VpqhZm4CM#`zLdQLX zl_tFySh%>bHuwQW`bP_2uAi@HKY>XpqopP{)6NK3j3^F2KR?nZz-JIS5EpXGjmU={ zZKs(aOd~dhee=EmB9dPHWq~~HBNN&J-%d_fmx5?R`|I7DWp-}?RgQ|l=DfCG>;07w>`VWtUQ!m`31x7X7?7?+^QQvM1@-o<+{`fN`MMXtV zamCN@%yeWyXkI>!@t_52gXcR_p663q$C-z|=mpvS-j8g8@T9__sz{dfykYN$5@eeM z-R#9QxH8_Cgmq*1n;DIyEA)*<@HQ=}ZdcKeVZh+^5}*AXXcVj8x2VuY9xk5=*|%Ss z{COw9qV$5H?76-cB)S6zKd(UlzD3A|^ol(^eqY?K>0F^jS>Tq|G1N#YQs&8Xb{pNn zKk}b7Ot_wnaItwW{lM_lF8i*|tMkiFLqk+oHvd$j&|=novo0e+ z@7c5XF|i`*92FGTwUJmyp~$M2G@E}iW2UF;u5u2T_mFol!mq_g{{0)E=_5mt1kT;a zpOFyUVj!L+ue_WSc9WQxSXf#r?QvNdcABr49(w41*_;9ikTCGSHU~zI)QBer(%8`& zZU7n0!&N_zb=+ro6S59~F7pcu#~>aBl%kIlYw3Naxm`s^u-@ajQGWq@ulj{(pwzvV z_OZNEtNlg;0Ip|X*$|Pb(tZB}0(OB+7naKkklFnwu=&^LKUVaD=$lx{m*szB?YsZc z@U^b4?&6q5AnhwvqL%xc-3G7+9XArS`t0D4hihDF?|*a%8g{iQ*e+amh~PdIe)}J< zKrD@fDv4qPj{#IuTif>7oxQytu7K-zNF7TaE71VqFPR=Lxr~ZmEgkxYot!u?pgSdT z8ODG!whDB{`VhMm@p8G^uIND!K0?G=Edwv6>2m!qh}{RmtS4Yxj!SMcy{K#eRT|hX zH4#JLbEE5_gYY54`#{9YXgF+xJA&ieXIGz>_c;6dkEP@3uwZQK1)>J90$;v#1mVzV zV=&RQ9}iIQ+~ihx8_8h9bZ{vJ$yMcprOnY;eA_cYERYxwR$V@+n#!+5#G?6=(XPovfV{L4~qL3&{DiDUHNn@NC&6#kTc_9DUMFUh!pAJ1vDMo#Eex6fbPufrAK`E`;gUQwM^_LQWQ-F6%077MY@^=<*6_G+V64Zexo%r-S zYb&cK4`3r!dLElBx-Zx{S+|^K9`;OW8AhwHCY^hcMep4*r_c4h zUr1s3>Go1?Zv2S6jP!;)c&^5p5Y%WMR{JO%qA-_{I_&C~LPIb~Mb>ZBG&K=KFsG~x zdw+i)NHl=A+KtXkz&?uGZRfr1N*fi@(Pp}XiKizSONL|D><#dG?u(%k<#yRr0O8Ga zYb4Wix2*n`X+MrawggobI}AYzcPr75mX?+ff!qnIDA>EPG00ZY6Ty-deh0q>K+N*BH?|wRIs8HZhoqu-F_8so+aE(r+Timkr{r>dQoXuuuJ2 zgCx@9RY*8U;S>KB1+CluI#UmQq18?!jkBtmiDtzh5@QgKXMF0i`KNQip*?zfT9aL; zigEu$x85FcG7Y{q7#J}maq33`JD~s0KjM*t^oDpk-Pc>$4m!fl=O7iB3?*?`+1PYt zdMtz1a*j1Q5HoUc;6jn?03oYyGxpIZfU2)gw@g7Ebg~}K6eR_QeNNqzZU8lJPyCc; zY0T!e!~uHZpo+6@Kq|wt?fb&d&ksS=b7j5&mGhLN``hbNRRaSMIj0jOQ#v~(0pe%> zP{X^_X4{15@#{!7fDG6x_x85vneBr%%`_^Rp=5llpV;9`H_7|=Ikkl;*R zAdfnLjVv%~rIjZ*XgO74jYL+2_IYyvfjW@yyMgT+U@%A~k7__LWx|d^fPdXOTS<^G zheKXJq3hwsSpCOyKt1(JVRIJa&4gz*yx5&{3oOljr`|R=SQ8AuJ2%=xSxj zukYsD=lYGwI{2~Eia!rb`?YFKjemc&^Vkp%1qE3$IFRm1E|OyHL;QTN6dr1DH$KWO zUBi;|Ch?Pj)gSRnU1!_SV-O99EHm9mKyZ!YoSB&kL!=G>7CS+SQt0BQ8y6CVNFCrn z$=ccN&+bykB2u%+vhS%P(99V-aW-Qk!dUy6Y!FynoFeWc05ZA|B{hZnUNE6jM<_N0 zg3GgUeu^Lbr(&M>U>OB?C_Q;}{-QrEUO2_NEg!e0u5LOWV@YA*D?2+503Wyyy!{kB zJOrh3@LCKGXDNjU<>OEQBb}K^=2HeMEf1~BBWBYy8qbyS+$kzRFh-y)06qflYi795 zTYH|3%N-weCE{jnro-zGJ( z37lWUq4wawKDc}B$7g*!Ipl>m78zdEr|}={+6Vte2&ErKua2jm1&E6>iflD3FC(y_XcC zd}KQ9+qy;hB&=&y?5;I*hy&&*N9x@KUpw}sJWg=u0hN~VH$?mBqJ3Ee^?G#f98^kP zwYGCF7yaS;{~>*l>GEp%kC%%9+&8>kC$024wa{~$(lBksBs2Bgubnvp^=_(wyIn)V z=>-sX0i8gE%AuieWp)d7#sHKF0j7i?467hr@*_&zivbUJSKzCnAK>i|uQfD?5!>PF z?!LiIc*&e|>p(d%M%{{)7}IsIx)XQC42f55?!tYKyYYcXzF|sI2r)N_>1x?27qZm zq{;A@-(a7s>gs+0+%Cgo*{k7Z zr^E^HomB)J@>zic8VLf=dSDtr)SkD;{owg*Hq>0~%>`{`2TK#qg*)d_qNabOiRgz-cL^Kaks+NE+qtwbxnszX-!i01)?WmJ?3765a-d(eiuaU+j z(LMpaliAvjH13DCAv3!p3w2&`D}NLciIx#b8xW=SWuL=dP`yEv(~YM~b&&o;h>crW z-5}sQ4sq14=F!|)EjEG13a^3pHWQ)*+W`vXP80GhpLs$-<$T>C08W8mKJiK-MR>gv z=3pJccO*S%gWD^l5m7WP>`S=&1N6HV_l~m@a(|2~cKV0cbJS-!Pt{R^A~ ztF%!5kJGE4(WyH?oQmWf5n?bWci9^0b(_%-Td;4&o-qs%mpIG81)6)*@XP;UW!!zB zTD6_Bdh4~78HPYKLV$Q{mg_xLjlxFVIlV^w$F{S-9$9AlU1(=Uop_2NszCqskP!O{ z1fK|iS8_q+5P{kOcaH!SiOa%@3L4#l++1Xz%Y)vs`sGMm|DzxfZ6!g;`u;dJz-Tl} z9Pu-%UzjiigI&W#ziXv&9W!%33GS!YP0I)j6yJy zZ+Jf3{8qfjLF{r~!pTs(A8)vr;~2Xjl+wx`G#115JmP+RWViRUH#cZ*8ae&2zWZtS zQeBSq;CPa5t%H!_LdQ2gh8HK-`kx+0JyO8y==kLar*63zOjmi2&6(_4>XgV*x=E%q z_>qT<2UeyJ7}Y5&E4KBL;Hsc?)1^7l_*^vHgVg z2Y`1UjlITQL(~-gE|(At0d#j|R2`=@wAMh?NDin>OMEHP{rHj1|1^EY2$biwn_R62 zHD*MDq#MB&k8YFPeh}3(e{3$QZDB#$ja_7hc47`>)c8pfR8*`6LiY z5qeY^>xY#MU)<{3Tv*F61qCZ`FVgh%1QbSN{q{`1GCp0o9`QIz9rZ;6y#(OHarIkH zB5--x0Q9=-ZM~XxD@{;~OG=JGCo&d08x8oPkpki*qoqaq;lqdZP$>$xwGh&EK;ekn zZl)^M>lW<06P~MoFMg4)lo{y&UJ?Mp8?#lPUm*cr)UaFL+(Fij*kwCAHfNWY2#ACD zpd*O+n`Mijh8>AXLH@f^L%%=zt!n+ps)h%1J^QoEgrQiUSVd3qwV9_8zsww7u3oMf z1l7jcjV8Q?wmd*fjt)K&vTVY4l?^-N13aOVcEbjX!4G#EKD1Ac*Vt$UfP~R%wIL?tJct z{pp18_$!_29cxpgEB+<h{mE``2F!|&E; z+CD{d?V%EC8+flGy<#PjRCnPR5!yw0|2~0^fTw~qW{3$^R@QvHnLm3uVD@@TVJ{*l z`phDx1xnGn`Jg1a zJ5$jQ($yCbI}m+*KnG<3WI-Gm5QE4DT`aY5z2RZpALq>)maONg0oq3sLdT&AKX0(K zU2N^@5jg!eqZ2w4-MkuhEcpx?B)-w>o^9go`;uBRx~<$rx2n&-f=WfX0)a=}}7c;e>KDG&9o?apx7xQH$Dmc|4?9C;2q%%#Ps^W!{pmYZCvkB(Ia7 z#uQ#e&!&XPbV}w8=5tg>?t_NsBS@eKAoEcdz4*n@_+p9}9?UEZbP8E1JI|8jn;r{^ zw>}n{hlUlYr>PvAe#m$ohlTEVG=^_g9BR!GzfRquuw~^zbAukr?e>R&SI0`O zsfkDEUw>559C|sxT86Bj;+YVNzd-n-Rw#Z@PGEfVk#dy zfAa3u_LIxiLyKs^{-alS*_^*cJP9w+pp_ZKeGcpcoyqK6sLgHL(f&WRr($Nz`lpBuA;Jw1E@UfQCdUUi&h^s^kslt%B zXZ+3RZCZnaD`%2>tpX;+^#Du3-*`N>RIht-aY%;uJ}a76VRH+*gRKi?O6s@2)=WMc zaZI4y2pd)u1!AbH&XzO3M!ncQVdssNmY4o}vu(Ihl9z-~LY_X_CxK5P7C+U)X&!r# z@;-#GS8lNEwU%CdGj0mB{DCJe`DtcfN{B#`^>38d+cONbZWSV8P!v-!8r+Xti!(<- zrYlTK@RF}3Q{jCKD06x@}Q!T*k16jAXMx!bbc?U81Nxjo)iTx@B_Mz)Z=$Lq8qwl&O4gD4pm1^HGL z4B{yR-?e^H)0@ScZ*sVD%fEjuHDzZ;SCv#fSr5lL$2CMcl;L%5mZxfOcRPT$RQZ?| zcHVAq?7Ek6N$t_T{>3!ssChkKw*k3MF!7PEwlJ1pVNU)Pq1U!R^tH%qSiN(}o^*a5 z$F#d-0S_a1bznWhJo#PV%x>rMlAY&yBFB?3`mY<4uZfQt+wpmi_yTmAIo9<8X}`Dp z33C(*G!FPaHpesd6@nzA3Qit57IMRydn1^@!XK=(q#qdh1pydE#o`xw!pv^yyQDSn z56@QOtHZJ3kDtipvR+=hjuJ~hq5ILWac})9I{7JkNhDuq>fZq7(E-bc(H7bfrkQ9I z2emg-YGu5S!WM>k?kn~_FQo%>FbFiHnoZwjvRrSR1^c)#OBYHn@5irg*M z+lqy>7{X8LBu@q3pJfx0oHi`6o_cZfQGSBpvAen}@Cf|{-CwEHQA`gKm%#T4K6FC_ zH#cG5S&LDH8H>(vvFIpcz@x-WwsmAx;{Jb3eRW)v?-T9PU9uo4-Q7z|cSJ--W!uEY+p;5ja^3MP+3 zNA?+A-GMr-Rrt`mDf0vS;B(7KK8JTsA}-$%bliWS!!b~IJayoCZquoZpHdube0;Lk z7b9_KNBeWvG{Nqs{D|x14Eii8NNh%yQEl`5Bb)_wi`wRz@Jo&oamc)Jybh2_Jj^}n-4JKn7nReE^H}7t*Y7uj4y1TYGG4Kvavtt zYxl36oZ_;MS4A28IyZ?Lkf-WAfnolA{$*)kP#k8FE*dA%E+@Bd10`$V=w(RQ`j1G>KXqvWc5 z0QVz|wZ9W4Im-1PCyU!u$*>aGBrCm56%pLnc_Glxr7&bH3)xW>e;Ug@l^W4mUW+kXO0#Ffx=XUa_Y4iif=|XzO zFgUUMM+|YGfys&%@TsC%-M>LK*CO1Ac*lC$0*k5pzH8LI$V)An<2LD^A>#8#3&p*D zEVhBed`02o*ut(DDr9uSuJ5me?t|S{f5;Par$X1ey!G5BDuKH6%z9J>UT)na*9E(_ zM+*xv+eeCP9i}$JK<>_0nuh#1b;((-$6rr=o$DOOnDpFCjdT|T7W$#O+sOCPh`-YD zK4m&Ox2Bb&_#(>hazz7EGj@0dBbsG@ z+{6GS?|{z)G4F`Y@7%PLg{f8YHku=>Bg+~HF?)K%NKP*Rgs`}=3*EjIt%!V;F=2o| zyr>E}yqJLGo~lY(?dYMPqXRMZupPlcy4~z`7V-J)!bW`IE(p_|M5=uTE`J72H!ZqK zTSg@ITc;}hE3|q`Mqd%EvnnPvb9IMT@f)E>!|(6!F^xvadUq1Y@D4?ueCR{5_}!X_CGzGw@@giC0QbmIIjy}SDI8sRw=MfSLyu~ zjf`-ASksN~TH$@Q)nGupcKSnP_^JLPA$&3?QK9@lqjuTfyTcw-$c~5@4xAnC*x-$d zX+i?{K(Qp6wy&I3euouxH36p1R|IOOVve|iTc?UHEldmSgqzDatnWH#}*dh$u#nXJQ_7j^dKK#5DB@ z=4@wA5OUr^JHdH&ZY}LC7lNaiq5~R{e(f?$0Qgzd3fJD7Y2U*b*zOAd`4o=SL}=Dd zQ}cHCOu%2L1uzdi<}P&`Gct{<>Z*kq(Z|SgqiW8PG+hxyBRKw@`#!JmTRFgqcv1z8 zn0SY)?K6)ca(CA*TBTLnF^(W;lg5v{0dhQBFCrf)H@DEk6~eB5S8h#PVP>|uc^(Pu zIQ?13v|ktH>fyLCjgyQjDO~g+p=htXCt>}CoT_R9JnYF=sugw zTW8HbKU9BCyW-P_c4wyRVag>tIadn0oPR|`YJG=UB7V5Q@BrkP0KYV147eds+>MX1 zEePdA(k%MoZ$8D8mMaf*jJU^ix9m+4BDQ4Z7fe}d|7kOc`JHIjORJQph_a1!-7G+=z-w+66~d+Z;` z)lB_wf1KhAI~R!lz6yqe@>!29bJQl%FEGk^uMegMAA3qzQ9CB@PPz7Fhpf2W*U#rO zmUl*+ilk3&#*q>Tktwoe z$1*W_E#F%}UW(S*)aU6nA_mtlkFq*4+g*;FfZOtS!BT5-b>j}G?F7}dpjJ2FPh@vL z?tB`2CKB6#LVbI-b$5^WTHYI(Rxp#9eDvLYvW#tCER#$aCDAsvoq09w9zr_76tt9&Re*(&CxFirKO z6IBoPb+0g0H-3`Kw|{pKSTH!?$XoMoktkn~Bq$_MEZs&G?T)nmL2~Hc%5$Gf&)bar z^3(KJ!zJ~GR#<`3_yr;9)!<-3Y@Gnl{5kDmbY|vKaQUofKil&N6esW7tEh<2zVVb9d1eAVdX@Q$vEO zJY%t8l@)!_BxJIhF4u~N75~q;pU2PF9}M{`Y?4)eS#g1O;PQoOMB2GGl=Is!-pA0^ z4XHC*(6i~*B-mV=H;3avt-QA~ye7foJ-4Dg(--dzmABJF6jf(=d4h2lURO6Tj=u9V z?TqxjWEHu&TR8ba%Q6xOYk1XEdudz5>`Iw6F>a*!!wz_!fHCd4wm7&?PzE2tJ4UdS z*byt=9L4usQ@8ZYn_CM09Ss3o-OoveH6#T^)J+1NrNz|A3%BHY#;*z=!uIF@$Ku_Z zr02;UC!L1h@HO=pLPI_k=d;U{fiOkc=s1P{ej-T=h4(@YG;-8Y~-wyd*Y4h6N zk2gCHj{N4xf@ulPh&0dRdSF)-~T$9I=2nh)ZiVlD>Ns{c+J(@TO_OFtN z2t}}D7xD~eBj6g~FIB`Nbmc3Zc;s{!e1FlCpxu_RjU6aRQK?l_h24&GYG|bm9HvpQ z!$eh4?qC&tasbU$z@{sN(W4UbrTt2|bKlfG1p;|O!i!En72YxhV4#U9RF;a zY>z64jtrf*>s(h)I|&!x1ULj#Vy3z?F1X+MqF;EFU;*k?i?H5`qaCe0@>a~xEZ#qu zHWzG`bhxPQI40vi$rek@9h&_)>nnnCG_G>ss*2twNKR&a4FXDKi;JqzS#`sxqm7J! z8dv}y>@ypfe=rGeU`32a9Bp``-(IJ#oMiAaPkKKvP*^ z8^ijZi4fW=v3 zTlR_inyjuw0Xi&pY{k=rADczP&4I|RsD<$}A=h3@Ha8c;0a4r7&$4e)=&Y78Th8W+ zgMr$`rZ7W4?YMtYj&@}{kaQTz-4ui=9q*!iG0hIBj6R}WK~|x_I_Q!tzkdhDYh*U* zb82cZ+O=)ddj&a04yR&Ri4+HtO=gD%%>WG9YRHf2(ef*uOaNU=!GG&g2BJ!iO$!T~9ZJ0NiTQ(7TO>6hc zYU=>^s5NR&RPLB%6dRrB+z=ei+Ef z19(|fQBh?(rbUh)B~kV{4yKe*`21@TBr3}v=|2lP{;gez_s(ikU>#4#>)iSsuPZ}X z(9>P_+V{cd)%2O+%Gc9;oBOo;8&nRriU_X^ zWJlo@H!m{Dsmxup^Mr?153CG8N>!R5X_J#patR5-(a=oaZ|2h8Su0^qEDPnrsKsRI>b!w z?pyLQ<=Ztvvu`$K84AMMC=)MLbMPJdIWswvrP#MDpV93oXUN!NIN)Jn6mxk|qSL40 zpreeQF3iH@VC3Su9wH`X2qxR7ufP&h$Q2cOM@tr|7NKswd2OnFu{_WLQa-yi7rZbM zzn1nO5`fc$0_y$3mp@{}7P#Tz#2lx0Z`vr3y^*v>A12FKW&h3P0PUfd0Zb|^2~ohk zlUqTfdxveIXR-$GRM)xBoZ~+Ca}cA%xQz~#7A$dm`!=a#w8?f{;A2A!mfAY)UP7>M zs@So6RG~b|SV-1r6cX7B9oEKJxgKxEVTUvEyzl%D(273FqlY5@ zkjUV-L4CXujA(%iH$HgB^(wN+N*&I8G9xBxoHN3Nxi>*mAKw>=U?w0{*Brz>je{CC zJ2w~U=d)43xUNI`q1&&CpW)O0CV-3Jcsp4YW+EY$gW{j_{Wr}H32^5!gCS(U=GP+o zveBRA|A>dV#jqN_T|=iyw@yt$rd_QmXgLKzvtzKuZJE-sL~?P^WDp#XLF6QHM$0*s zmRXAsPK#K6h>1kiR2CP8jGzy8Bt7=vJ(+rr3t(l36pr@VXk66P)G*m&&JPByZaTYktxzgGumw#$zBL8vRB?tAdWck6g%tF=RG|CQ3Vi%q9NnoqCfL4H(n z5z`=N*l;{&@`Ce#2#Y>#i!W;!kbcg}I^O}1Mt`u*HQ`1{ww4S3 zXOY}22k{6R=N{^e2SQC`{DX~qYNd84dq#EQ5Ra0CS3A}c4_*Xg6d_^cHhMe)inR?> zA7{)1Q7mIcO;v9f0vE;DtNvYN1PA7_Dn=k_2lVg_;=eP`ScgLc`qUEtH_8%EuuBLn z9FUy|DD)w>7iuf?2*x#G0|fw7{h(@l5sd!PS+qulm?`_j6Z zud}LjlaZ#?)O1BSbq&whMCL5Pu}YsSsy|6{n|x%Cq~;SqnlBks0@(6IDsCi@XOn*p zc7(*?tcZj}#o-6c0Z6GH&w{G8K#SmSt#72-%)=#wODC(_;iq?J93H17DHFpS?5y(h z!}P-4p`7OZz8zuKRBL13>&*pP%&HTYZIf}tZEFaI@g~TRDa}_ZgCWQ(5bAXJ)(>H2 zx=l!ga6$%r1kjxvDoq&@%|m*oyZ-Y#6HCX{q^^Vmjk^AQH=u+_tVUqdQXm60su-;DwD6NyiWak^^E z@g3-_!`CD`uX2!{6Y7sdL>Bj#)_n5CN;H3kWWMH&`87QxVV=vqef+RPvt&hZ8Y)U1 z#(}6ghaD?vytMg{1D9wC&MC)AicX3M{7r$5fi96$+2sF`104gCY6mL@Bm6?v)gJf1 zw~=$WZ~KCDa@>GUF0Dz3K9LqS$!^EoRV{{hHiP|V^sbQ-4DpZXhaMi?hojFmh`X1 zT(@P?PJ(lH_(H{GxIsvqG#T?8%{<*)cc3&OaT{_^%_a{U77j~rLe`(cpoAPgHU6&a z+cy|Xz2J-^LUu9i0(u}{zZH6RCJ&Tv0X+{Vj*b}IVmJUjOIhkDfc#%j{0n*WO?qWH z;wAroOQ^0&NMs$=of$LCLl4G%AI62~l+gBe{q6K%XyfF&7OT<=oMbcMQpFj$`wR9D zQiS(%MA4(#{OWdeQW8Zh)hlV4lp#9Cw#c7rw0Zs1?!X1#W9O)+A~+6#@&sfAiIdZ_ z;Gzm_cysGspij-c>vv;IO9F3^06kVhI5-JUrzvS>eO@x)QUXOUibiXfA+V#RkGH92 z)BM0C-X`mPY;}jOOlw7l*Shqt{kxsAPK6-lK(hrji=U{3D=G~Eb#LPWnNNM@-V*6< zk(HsN4|czm&+EgD-=hgh5Ff3qTs*z3@eUGnHIAd|G;Jokkkr;@%gQ>#K@Xc$_>eZ+ z@rkrhl^|$Z&YfCqaA$|zK(7hrvs-FKWo;OB!Z^)Gpqyto$IS%tOI_-}xLH=3|D#knNSPv*SA@VWWRx}{_yCQNRG4K=pq;%s zJw_L%z+^XtE)1ZoP_?_v-zXy!iqX}cn(Fo&{b7InW-TEh!6K;^QdPxYI>sM9bKEyK zZP0vq6#(d*6#cHQBWDMp3k$=YoSg-i33?9fe>5W?Qyr`Kt@7}k*LEwh6bgX)o(Sjt zs6|IXh7G2Vz?M2c68T@u2abD&$uadHSro`7BD3n{osG)1_0q3CKZ#6o@rjz=Vr96} zRk~3%`6;k69! zXjpSPzUgSNIsVAavtoRdV>Qdo^74i)XW%diCzf%w`Gy4@3yHi_U$>Md3Km|?a3(jg zcI1~e^=8sNX>ecmQ>ka;>xDY2pKW6z0Z=1^|M|Rdgzd?c?#Ty%Z@;Si%={W_Zjxm+ zAMf)@T}4ElIo**h&x$Zy-qv%J6C;~R_6>g$*%#%V=AW;`=jFZm8JF&aB-)!|WC-e} zYj~3%j^%Ds)oD{HLF5szkmJ>-A0Hs|PTag#ifIzoV^l7cUBAb*NQ4{-8krAI+mYzV zo}R~j@>+_SJpTvgG^qE%LR`7nO-d*|TRaBa;aM8(x~84G^@vNK+Wrt@!XDP^3(n#V zjxFw(sT(j`EX1sHPD86FvBh&LRMQhv41MlJR*e&A`m#;Y@Y*bnEk9-M?t%=wPL*7n zJ%+u%Aj94dI}tz=%uwVthHTDTcP?8W-zJqf30s~FjOeZ1_i$Kz_-AHT4Z>bB)Cu&$ zk9r&6B%o$PpJ1uUKC)EL6*6~}Pnax5NlunD%kS1G4}2>QTNCyN(1uX-Vg|g|NGUAk z%n$&g&pNJEg37tS2^D??@drT`Y_k&O!~0Fe14kDhvSr z+nPxBG9YLOj<(e0!{^ju1y6bZ}KXzM4TrEOL4#AOh5B+$GI1b0jXmXczxF!CIYhkL;)wDU&-+zd%IcqsF^ zm+LK++tx*njd)EzzYIG#hZ}eh@8J0MllRtA3roD+zzW882AChrXa1Tpf~4cCZTAxfqZ2pqqt z?84~x-@-%@Ad#akZnS6|&;>|fME@YHs7?vY1Uu*uj>>ua+nHYvPln|9GM|n4hnbPQ zL*SjMU;&!+DdLFFGibat;|BBTHJLAzHGGw{;)sEvgktM)7A4YSUWlSAEl7JWIkK~} z2a17f(AE#sS@^XpHC6C&Y-U-<38Cmoq_Fk>9i2fikj_$;CNhj|=_}I{86w8Lif**K zn)38FQ-^YreUDCjBR@88gyIiLKzTUJaQ~Qn#RM!#4Q>wjb@l^leaON@MtPTDdsO4v zNLtFuy<{?7g+JGibki8Iv=RSb$8!Ek-3YlG&i8?7LAqx0xT^E5kwQ~agV8R0{~)40 zZg6iyHglNzLb5^G8^W^2qGP3`$IzVf5e~JOjJXQ|<_(&@<(I+bOs@Eajc+InQqbet zSY-oZvmp&a3CT3P%}k#0kzY2x-9qw>Vit>ST+TrH0GJypvxJWp3bzML0VvLo@WL*s zA=dq;pZeCYDi?#C$nFsHogq4%8^h)|T9T@v`h5VurA2*p(fk~qUU)(CVYN)6JQcCZ z^Pky~bt+zqvsJ#bt1RTT$8;ego|LDPYJ~dhRO(#4M3?Bf`*3Dsi!&Q9xlLojIC70k z=fjmUjQmm#$N1-!l@us;(6L!!ls7_*z+2)hYIS1u2-d7OVZ538Joec8-DzrhlugzC-+tjtyR091;Mkhjl&0XJg2rZin64k_hCO;ssh z#QHZhFQ5#KZkBM+sj;oFF{-^FWECbC8j?cNIsDdj=0Ur8g@tRlY3g;5DHT>~R1jt%Z&eDTkWH}|iI(0&>VqDi78c7fyf@?95l;8Ik&v!6 z{wBv!S+hmTcO9slK7M&?T`@G@RXg*Xgj|$Hx-=UV7KcYRDh_lP^KUMqPbMSL(eNl6 z+&kz65m4Jt8UCFV^~u{6xRD~1t7xq%@4d0=xt7==tC)ulh69e>`~A7o=M{>=2uy~i z%O>5B6;nC`B)J;}$kMIvw;Soma$+MK+~W2Rhr$o&OyXWQrlmFA^0Ts%Y6b zRzZ;Bz#J5|wb$`~QjfECo!pF?3u%o>KWOV&L(O&x!_wx|L`F0NB{(k5)J@^o@PAGDEQ199zrDs*w1qF=cf3#>xz@=M+~0Ebt?!FMKh7z^W#k|gmZ&`J zK^_aDI!k>ojM8AhkZXa!SWY9>sJ`xiKl-3cdbessw@!}MBK^Odj;wl123$Eo7#veY zA~`1XF*|tRHjj1xNV4Jio*p>n^4Oo{%+s9CCk>IZ`>TF03 zAu%Tfiva7tT)R>pDxLkZQHg?0Dp_OL@;c8e@Qao4-hj-jXM7@NP=`}@K zy_*AUQguq_^4Twg-aNiyIpSyJSxr*cJg{<{aFFH7KO|oqb^L&LydT~%svc*JQ}uZs%aQ%ehN5K7G>Zh~cQ$ z8=;G%X{IVr3uvcDsrPzYVP-~Xea5J!tBV;wf)R)Ua4ZLYg8(iG@EnEg z7J`6R8u0Q*JGA2l7}54!F1iZB-8T5R4#ihq{kuWae@znoaF`X-c#2uEvs#OkMU^3G zsJm%5gn0pT!k%*EbwI-ADurjVd-A?0S`D)Gp`a&MA^IXojxK~GkU26#*a9daw(kQX zp#)Q$sG>OZV#EL#SSS$B!~K$g)c?VL|NV1uXEj-)LJ)l;bYeZImh&CK{|8j-)DoF+03HqZPY4ZVpNTUqphPU>+|Z5~H=OsB1oaVMYk%5YNmL zy6-MotR}1e(Vkeb(wgg4;oQJ&@LAY@BS~lu5w{4?8NvB3!8=|O+i3CzmuNLdht2nb z?AXb&C4QGaNF2KeSEeuSIVQWAGb5{f2jf^)z%|`$DR<>phA)(%$%x!RUQ$c~ilLR; zf&PHOgA5-O<}(u#WDIn2!DD)DycQJ-BH&h1=oVLYvsl2w&|jO+^ax*!SNi@x+euUF zDC~F=X$OreDVKz08Xtt2R+L?z)2B3nfOyr(_{*WvVYime7a)>!FS=!qAXxY4BIAsR zR8&9P!_8!BGD1g_8R7}tF)U5B-N&}S;%+)2%?EF^3;PO?un-!}Icb^RnNPx=XBt zc;gev6<7J%-?S|vMEho+&L-*FEI1y2p+CqA?$aUlg{4Y zX1Zr{XX+UvpL!aSIe4Ic$*tB}Dr6Z4uUZ9Q{_pJ{8#^mdQ6^d{-z~EbW5-q{n88HH z=f@Z43IoK{Fy`|wegHg7tt&1SiiMXdGTNA9%76w7(Ix(VT78F>(qKo6wL|bjhr~m) zppqyo>?Z-yBNZ533>zLf$PY<}zUW=-e?!e+8A#t#x0f#hVg9{nQSq96#NxJ$K+z(A zNOPGh!yL` zQ`1n)nvdxCK?GFIB`?I84A)eycD7>ly+tmfFVjoo2m{JO#>Y8xydEPh^A-Vs!sPwi zF;r#IE;?`3%Bhkmhob-h`mTA>e!;e}O7l~u(#LCwib(z@PT{vk>%&M}!eipK{tO2+n z$`a)?#a1Mmnr0PMb&_RlR#jC170rwZ-*|kHHe?`iRfuJf6Idr!-Tp{A()h9?Z^@!L zaSJ&z0p@>>#n@<1Fm>``)TQ}|bTVnVUOBiQ*GJIH>ghJ#bYPDJz-;>N#NMsguc{xc11<)>=J0dsol_z{(NvVb`D*h0191QoN zz}pZrb@I}IIcI`i4`VtD8FJNDefQ<{eU?qF*FReVDeWhG zn!+j3_O4C82-4hzBrMAB^kIDfE2+>rM`WxQ#S8_&1<}7v3gPZc-ITjMxzDWR>*o)r zkHVHp^xi(m9OeN)7u_@#Va$J9K=CXALn1DZ@^$icY|>ilqQnBpkXHONvjoSYcFVo8 zBGidgH_f*(B)8)WVJ{D+=0Y#?MNoy+dILroIWjCDA}qd(3?FFEb1Mgyl%&J~h^K>v z-~RXMIMet}M&8}<6jrow^$GW-qT+IqsKy;!u$V;4KvO;`kvCVjdPTl(YI(2A7TCA- z6k|)LEwcF`VdA^g2aWo60s=5ilq>|+Tn#?l1;|wd+}$s+jlB|xcP;+p0eTZ3?Y(;t zNgt)EfhQB^N=2ke|3AnfC-CUYSVYg7pc-Z$^2WCK2iIN~bMZA&wLruS>_I0JH|m46 z$WVfEosAO=rpE#)&Qgsd-?WC_2g~TMmcctpybEn7KTs1{zjkT#^u?a&>0u)PobIP^ zxCCm3J5*iZee~;wYY0%x8h39u1roVNBO5+_eSlyZ^B5LC0LI3z1WR0oH@LhQ0OAf% zl-0fd|LZOcX9)_4H}ZJHS-F#ke@HNAm{oI7u32vr5@flP{Bnb#KGc|BnaCoVw8*9K zyYck)bFO%!*)s`ln?esDA4>2BwV<_}jLSlahv@?pur}dTCzpgMJ23_t?@TxMouJ2$ zTBEFlOj>V{@@LSb)NVJ4%XL_PQmKPw!0@t35zEWLp3kKi0IulRZZ_a%H*uHr&k&jX z0Zk8PM^bj;QDN>3f~0!*8{&p<^pr0hUTVIlk&`^{afgTP~&9lePzZ~3FwTR2PqV$<9BbrIJFKTvoI zef3d4$9})6Vr;edxVqwDZow+-;FvOTdHNW)vW>9w`;WEa4+&YC`N{YW8&z{_&Q+LA zXN|oh)f<=ng%bLfqMq<*_)|eu)@ApeIPfr0MJ88yypoh^ZFI4FYK{scP*4YRo)DWg2Eb?LS!& zEek*ME(<;4LymVyJXcaD9JlaBKID7-eYX-`8O4K?5nTnv6%!@yX{~-1WduRI!4e_T z@sv!*C&uqhhbV_eVPenC<5yEP-(#Y0t(vTqjfb#sTD)o92+jNU(e`U2FmAcAoDQTE{GL1D85MhfZHvzd=e6n%2+QGWGCjo+rwUOqG^I<$zXCb7n_hhViU`PH-Av#Es(Tb9 zyvw*UNs?{)tRw=9k=lkIaR&xq0G( zcrg>lPMXngIssuEBN8nSP#cOVrS!7HRsN$BhI0De1IN~v?} z#8Hogq|?2{WIxOfsBs3W`xiDD`Nzt>$MSECZ+AS)#vhzr2O9kRCdg(qwcoHieMYA_ z90nmlht{u%@=g|#qtaJ9Lg$-_Dx#uR3|3<=9h&!LeMCjB$KFwNlFr2>7+I2sh;DV_ zzj(Oz?I%0!t+~{DE^!YGNc;zCfJQ%L7Z&$;sqIjNGBcK%o;&}P1=mQPljC(Gg-$;^ zR@h%wF7BzDV|hs_z7-s76JN<3y4x~sLisvT{-J*M!8)+PMhFTpl>|MSi&|T;?v7># zCv{mWD(EFN)Cw5SR`mcO2mtb@mYZ!0qYCc?tL1%iB{n%roM@byJa6&0uob`4d4Z! zd(Y2sV0ocznc@~S4BTWO1@!{rfOB7$!nd|yn!nk|NI*%6&Z^Z6bP-!O5dMoe0p?bB zckqe8di3=48-PaEf0zWQRX=o5l*s-(r8}?xvF%XU(SzQoAZ`SSX=2rl`To@twQQy~ z+(!IU7M{iQ@FU;)P3M(R{4%LbABb6(2jebXi9r!&vKOR_zw4}!p#O?mm)SM$uhp2B z#83-c0SIw-b=7J6RE7|wRGyxa^F`jY{bn@$f~w_kB>#FMo`=J> zM>GrQc+9%c@s<$t*gM@Z1AxUKE^9nj?ji&v+_O!rI6N4u{=iFV(nx7$K%<&YZju#idr`IYn73+dv@77#!|hCBnP9$Khzk5GI{|=smtK|D3QSJhnNwW zxPkUeI+fH9F)s0NaX8@x0RpiVNNdjUdhRdyBWwCnr%ED>vG?fR-ZOD4kc_(?~|AX$X6WYSrz&UmjMX_ zbffwE_vjA@PHQqM{r6rNX|@VUQuJyVDniXTAZ$$OLiz>+wj%lf4kJ(YkmXLnxrQ&V zwYRk_7x3sOtXj;t`G!!@6odqHoj`mELPihYeQ1B>ee!prgRq1|nK@gCW(Xq) zjuDtcvwLTfLpsOaozRqC4sC1UcNJ@)DM>+OR|qq`5dO!b&+J}zoMI$l?u-;`dnAyy zPe^r%d91stU^Fd7LSpYp#;PKUSL(emi;L-ENmA$!NH{1=2{PXMLceDbxs_^%hqUt( z>Oa2Z+53K9_BUBc8=$Ygf2*nC7+VY+j`*2S0Ddtt1SQs^DjfonyBDu0#wZ7!y8Waz zS5l4FbG(7i8hZl#(ny3rAp=6USUhBeV#^%$R9F4~JP+|=5GI4y8t-JhgED=p^e;UK zKPajBbaV`Xvmzu){yqzGloyQmtKKR=sblD5QanuPy1~CuPH5uczQiWD!-YBz$S&0> z2=+f(zQ&6wvaG0jFurYmuh2R`fQ`w3O$}2$og2-y-HrWuD%JDy`K#w|vJbP{`{4i} z#eaQo$hW^ak=Cp2WZTB2Id>hu-4j;(>2u*nb(;P_dZ3Ht?SjQliU(_Njg%5(eJg(H z&quTmhJpc(?5J|GoU8|sbUh!~>2_NV?|97pW5Yje0`GlxZ$~p)T~S56N7{{I#N4AR z>J~(60B#=u3h3k!kJ1Oci)n;J$aeseFk4jMxw)a!T(?)ZSNE`lZv%5)S&SIZC(UKD zF>TzEjx`XH(diTBXo-X$uIRYiy@vp}QA2`Kbb2p95i6Tp1PCbm3ZhqvbwxOMibicm zE*5)H@%aLeKZ@{JbefA0zWd&66TCt0J4|zItHla`{BHCuX$(4XxjrAr6K^M_X-B9b z62LBKidPtLvJ>;U20OM(-MeG% z9^4)678^qNtOAZ2<8wmpK0!o`r?WLQq@o3{`H#7;{-&l(sKDED(_1h(V9z8nPE1Wj ztx3}UV&qJnQ4@%ef^Y;Jc;q9s6~qdOc+{pZnD6;V3~vWU^cRNLw%>@$Ne1}k4HWAv zC=^<3J)U2tM;3mX@d62(TrWK^Q@l37IRYevAjgo!b(^&o4b9;4$^bivQ^_2MAA-M~b2-i@pU&d2%o=`?fPuF!`56&CPZnjZ zaZ#2TPp;}|g#@VSr6F5#gLye`{ARskot$x+(#nk&CiRp*gY=*Mjq%x8rb;$wwmYovQISv)s z_f7FO!%3P89JBl3)F=@lctY1-kLO%WRXvCt90Vd(w67Z2(>VaX&{+p(1#0-gtJxLd znB`oVg*q2U2R@K;t;pZ!h2i}p)(*r>_O&{R?jhUtEX zIr!c>Jk8|u>1Jum;d4O6sKQ#HlRWFo~$d{NDK zFh=$F9l_S0xYd8UG3I3Vav-^vzdWz=mY#mw#TQjWu_^rFzxzF-2;GgEN8bq$E*$fb zgJS?cBl_=S8O$aW)ymg0!s1c!`L$@rK1bKho(Q{^#_Ny3Whi+veX1x^xxFtDw|M({ zJ-Q?Sa)PZW=WuwUS4VK}U86Xp(?&vLW8c^qr4wZ*5CrA2?|pI^+?j4vdH^$04H0$8 ztqg?46rPQy-DFMp=pQd`Foa`dCg78%Ki<`0vC4c;6#IE-!4}9-P%|Gayn(r|z3qvp zBGQHO&V12ieDXc~FP!Gf)8Sn>ztt~y^eGilSz*p0!D{ID@H*z_CI}5b<}6g@NR} zU%JRX7z%?7+li}+`b5ED^eER)ra(gsbk~1KP{(K~EH*3NH|B&%zY zbYvglmLALi#Wly0ZZc9kzw6X?Ks3FNKYZjE_Lu}H#YMgk{SQOMXWY@oK;6MkVq1g@ zp5;iGdy;Ms(n?+GOLjd~vCU*2N|Q%==wYzzDxM8(jf!(NtMNdUTqC|D7Dojpc)dle zJUkhyQZ60WifJcAT`+Zn)M?aK12Ua@i8Kk_UH0 zQ6-7u&W?~0$bGXhYdFzXtyOzi$>R$#3)lG#@J2@gO{nZY0s0_dso)6~m>7UPYR;Aw z0(7#%yu2b+;L2^Yv(QQVYd>iVb{CwFVnA_r@9#;Xh+>;$+|o0*!t8S=)_NAi>P7|! zUnDp1o3C#LhNKF^tc%an0eVOD5Xf(6RT2+&0xpy397=#Q*p@|PuJpU`hcrgRR!Rd1SAAuRaV!4>PYE9Y<}cR?`U4UXniA#?xvtFyyMg0wrg#b zi^1!>TdZ7uMgN3s&I-MGlVfAd99X?x{^OI&`~4q({E+H;4sFq2f}Okw{eQj@T#2uZ zzF|}H^!xc4djCBv%vlBz4tR%OE$#VCIbzZxJGRs4Z)@_Tl2xl<)+cUpjHNG`xbfb$ zkk`jNlv$C?)xVmcf`Z~PJ|8(NK+Pa6c5kuY>G|>(Sij$d(>19w`y0>~p&j?`!r_1# zW_BB99_IeUPPt9&{T6w2{&~K?o&6+5ZJ$|UNS5SZ&VI;5gi)B*Id^5WXxuNfcw+^R zTWOTSPH=*(H~e^$8oJ(SKd;Wp2Hp~N zjhRrn6HD8PzkI4QbJ1snUL(HcCJ8NbQq0;6NUNNxsH+SC1WGQP0~-=S%JdJf4mQhiEU*8_2ySt^82I&wG5J5tv1gVj3>F$&e z5Tp^1kPfALfRP3P8M?cBka~{4|NZ7!Yu>QdteN3_&pEq3dtdZa-@lrDSF7!p!%zFB z_ufk#(boF=IVCRUzz5B;Ig!u2xeqHHA6vb(6U+5FwOndoJJr6MS1TeIE7uGPL|~VDa|B1Vx9zfqeV_lLUn71&3=0#hwBKK^D!D zhxJ$A2#@7_{oC@y?tZ+DMYs9Ho`S!G1>84g0?E+b`o~!EEl@iAC<@kjm1b+ zzYK?PMuE%<%o_B4XmZAEsGN&kO|0vk-jrftD12Jq^I5S<7ZI739PuRDPCX9e@YA*| z@m;~Co)S!1?=BD;ae|Q=_czFdFfpy79ib~z;)A6J3sSQ&2#Jd;I=Au{uaZTxy<5dV!0P78EAE1ENu1)g!E{2o#lpiqjIdDh;JkaglqE0 zg!kHTzpI1Ps4B6N3`s~2iwAe|7pmEJC7Pce|9x!mwIqERyMrZx6W~e=d9eW&D=X(L zCiAPibZ5-uV`%kPJ$@<-2uLZtJ0+j~dG_Cf`WC54oANyqIL!r7>US#^UxOwt|wBE!iqti$2qd0AZcD1x=eLHfgXP*T9$bLFfS(-=8zZP4`u>J4EE znDK!w4*bt8EDHC3jQqD)ky+w}_=D`!1rT+v4*qO7G>woaC@jT8Pd0Tfrw#KDdgosllfgx z;Pndi(WCzVx_NHFv}kZedD`wefTSezc##%ARqtThYp1_ypf>6a23_@@JRjs35Ck{| zJpII@m6@py@%sCg|Fs`|6C0b-949|G<`(+aNC-Olo|C*0m|msOHV(RsD#)lWPOf6o zP8o{OqM&~1gP(u;WlbT4@!P6ox!8dwPxlv0NleGoAaj>_69QT;q_lhxp(`K~Xe}!M z?j)J^6KTI;lrO6C_V384pw=41idtGm2e6fbQ&6)d#?n0MwE+ch0H|@<9O^C;A15LR z!_6vyT21}eaq-C+$N#YKORV6|9=MF2nqszXLIzB)*{1j%ziCYz$Yt84W@*vPL?J#w z36)QB!e z2mi`M;7o!Fqn4(?_2=qDGMKjrO6h!v8M7)NZXy!{OyYai>VHn#^aR!*Hk_ zKU=&|npDus-;H5=k0J&OZ3LmKi$DC(5p+W#Nr^0*KgS;DT+gEf~NOuXu##1Bu-J|oNn+eM`=*N@ZPt0v1acS2H8UNSU!zB}ET~VX2l(mQ& z!QR}LHSOY}*Jr^&1hIY8g#rIyRP)u6MSQS*8H?Rqq#}JQ{kfX(u~VMHV`X%QvnikH zCvN$QOv^PkwI2j1GBB03-{^^YNJo$i&E+c*DdG`c2}scevPS8chqE&CW3Z z&Yt0a2Id>p1F_YDM$@0ues#BEt-*VvpcHav={XsCzNcB}TV?eSW1w^>uJhS9u{f+g zc`if$GPC-D5XJF$^M$YUo2>%SO~g^Ri5jrdikwuzOnRs1*)E`mFxd8K6KBFKPk!qy z7ASqg_T3FczR^Z;sF2(QFi)H<4hIaW88}fe zLz9C*LEDLkX@ISyl##}YyemEE+6P7eeJyo^lh)!#gZI*CtV*ZmBu9T8N8CI8JK;9P z@)-Au@y~V(n$M$AW1psnk{eIp==FIKE>LOx3K?Efc%(Gdmzol#8b_jtl52wX?j7J| z4V^7e1~ZvBnNWqGDo}q}rvnvav6<a_I&iasn3Bnl!9i0GHJb@D`d z{jlT<$bXNkIut*Iq30XYdr52~!ACxE;F8&H&Dwn6)ZA2qj_UagMVSm?a{cCCT z-k{IZ_^*j0Raxr|S_BIgW?x43uGKPeF+Heg0+b$vfH;(2hYu;Dj7ye+B!aGts zPvbHaa3*F!p_@-gs780)zZSk4^VmE3)^4HBa+fvsmzS(4^S?(fpv*}G>XzxG%DPP18QYY6VIhQ z6;ApK?V&&?HM|=7=kr5?wY7rsMJpoAzbT@A^=qek!g|NywTj>pQ*oD?z5Ts~rH+hI z>9ndlrD&;!`V~%(U#F8UOc^s&Udm7m87QxU`fwbBBi;?t*4C{vBp9KC~$dk)hrL6%14sY$&5y{ zI?{LU^6mBhwe(oAf-}wWVka5|bK(#wVobAjtrxOe4vXI?nBtv#n2prIJnwQ0h+~UW zW$GD<4EY&3GQ}}#DMj-O+sa1u*+g_-S(5(LATc8_$X2Zz{0v(wg%kF@t+Al6@vbJ0 zz)RD?QHzIdtCfu&%W9DTf3WKK+@6!~G_R?UWVz}%cR~!4RV8?U2Rgp%2py?FC1N9H z*C>b5(Qk#mk9CPN;SAPf4R&G$($lZb>T+~u)PT<({ouV;D11K{yi`+gvv}m6r^Nj@ zH(hP=0|_G$(}BOJAoTgH>!mrRr?pi!ty31^TK46~-UF#N#IE1G|IN82-)e$!1)K}S zoL;(g@u18uZysA;MP2ti*O56|P6ZSke%ej?J0uORouD~(d5dtqHgY_y;i??w6%i@! z-&-RxQTciJie2cfN?bWNDOoXFO1c&uXOH>-ocXi;-{}$DSI-3Zn1nWK#*7ym?Gs!G z{>cX_WLkc>lj>yE;klhpMpEIefCd;aVjl{dMf_Oi;YazzN}Oqn@gcS0TVsSS!FT;O z=xIve!4TQn1aHA^)KFMH4n{;c=w9pWWX6&&)B3)orT=PASX<%w7!j7@QljGLYhSRi z@?W9xM9N;v#5Z$lgH;{$4Yb)C5I%c9SK9wlObOy-q@S4W2S2S?zI&0tp;gf3T^Z9Vx=- z!;!H(J~*v&_h(4iyW6f)lS&#j1dJ>o!G|iIaErvyFa?6?f@k;y%q5(~6`XuqE$>2! z{JbS1j*ePwk3mTeNHxa93U&|~XXH$xbS4iJ6n|2t_f{W7)mQDGix?gszdmZCRS)0G z8?pPK=8W#-DwU0Cr9_Sih2dYSif&qo1lr7H1OHJ*=_+e62uIC%y-d;!`ZoK zuvK==SL9X@FN84tjCmeT8$oafpHsN1U78+*+iS{lZKqkjH-pT2CZ>!v^L&{N3j%Fe zb^byGdxsr2=(U=Qnn1Jzk_4RHG{a)?=q|!9fwH$N=$77U-shk7`d3>~VH59uwV-Ne zbw}&t3@Z3qOk_$Pi;xaL`F!3Pd-0cs0IgG8+1`!qyqmeZx6aWHj=C7YP}y3AnLH`; zS-VLq*ckg1!YevchYgN2UE>m_bJ>gsr3t49zYoBAkEc2Q<_`E2BlG?7g3= zY2B3gyj2r>UqoJwH0sgTHY3`mFSm(mBt2FPGZCH~*|Ui^$&?b3p`Tcd{M;%1{Vs(N z?X|c+&(|TS=#Ggdc$#GR%O+$ApB}LNo9W_&Uu|>%WWi zpS7i~jxgw?C*n2y_a6+tMSPwTOy-}#`3e(OnfdJjM1Hp+ETXZ;=rPi0(t;DQ&op*OSKg=hg4v-Fg(XDa zH}R{f0Zt7Pz>if4M~|P?-r^>f@AU!|Ipo#U-A9&v)Z&s;atX=0?tgVVsy)KIn%^N| zkKgj!Y6+#kEzx{ufKjjeVDsrHqnvR)V+7%u*<}h0(Ynu1rl{anPMFBe?x)$mPNJp1 z3j^Yr<5fQCAcJ;40SjI$?x$QXx^_bfVL)A8R$jmO*Q|u zb3s>;xLe>D@yj)xja9bo0sWga^_FjB&jhqTPoK8FPb)*$|K*icHob&+o)#ty=he7~ z2&zGRMKpD9Fq^De4hUo_$ypkyH09&`!qBgJQj!ylqs&sxKtR^QEW!pfWUEvsn7~z! zjBWW@P2YaEAtdH)Mb!4eBexbR86yD3Sb{2Vs*9lzNMvzyi(Av|uxo#2I@g zrt{;N{_Vq-XEhhL0O$2U6c(+(*&e&p_Uvj6_+B^nl=&uNZ51N%wUEu&z*5s zI_^uk1+Dfp(yG{Gx`cKS&m4ANP@(2UM?ZfPq{_Qc$R(c%(*BV4+ds8di&LmzqN}i? zc3x*2B)v#yHT+(h@$pD%Sf7xMTRF&hjtZ6%@YL=&-EfT`$30Fln_qQY9>X*OlBgiK zDD$Tw6gd)2qwSvXNuQ%7LY>tOM`OJDh>GMCr;ciMpKQH;9s4@>UoqHnnLly9q47CMMB zHdb#Bj|9eFPyI=JKZep6{`!Ny&6}4BVkBWqqhEN;Jn+&B!Ms~dAT6Us-b$Aq{|OHP zH9Qlt)~NhPG!Kc?20YAZ=-&_`DZmGM|?i?Hxb99t7$yyV)_Ik`0s0E?PR7Cakc8Z*qYZy zg3eucLmV8Fm^nvD_A~@$ec!Wr<(l6aB#tG?a?npH_8njnGY@w@AyCv(bM%D**Rng& zW`YzAC3q=@om7s9uHKRABbf7{Jt=P)_9^ia!6>p+aK9Hgz$QnO<;GL}mi1f5dV+_mGFmVb;q-@%yYb3rHGGS>>@UlCt4qx*U;mx=GeW-`l4)?3x*mV z7Hhm>5@(Qf-6A%_!~AN!QOMON_zyk;y`IIwc%Ud-AIF3Rx@a(GXPm0a@uTzfD1L!N z3z;H+iostHl&2Sp^G2c$cQ*E6ax-^-0z#VOGvmc)V6*J;9_%)20Zeqi4WEoVywUS8 zmV<-9%_10-ux0$G&>Oa398W)Bk0zoy`F){xx^c>25jVG>) z#^F_+|L%LBPzR?9$>Ri=j^Vxa zgo5X+!OvOo9JE5^h@SoUN#8LN@PW@LU|-u)$|z(Pp6=qlKNp0_h@`)}&f(w}7RJ&~ zDgVXG{0(d0=Ne0-Vj4F{AYIa)PCM!xPwGCppkXbY^yI?`%fQ2m#(i!8b9t0E)}717 zrUKdQ*io*}t+z$|#WU~IQcegFG>`x{elvG_{i|79e?(*4X}0KN8m?O7xioIx_f2Et zzb|-I|NKplr(gL>c+pbz+HdVR^oL+}>dPkUFO2Q+FIMuBL!->=WKuFm=)bub6nGB4 zXPebD^B;9!wTK!Ddi{0?Gp0H-LPRx3#L48|U+NCg4pst{ zQyQw?Q0N+McmAvW?EkE5+4xA7{RYgGi%1ir!|kKT>=^t$obautSM!49-#y!EtsobpkRup$%y z30K8u)>BFlpwE=q4Rwg|!ichngqJcw&;J@b&3bLgELR0zNrC%)^crgNim-=*E z7=T&h%OC$!&M_MDQr_H|hQdri>%y=&K-hyb0Y>Bi(2dS|GAp%FC1xUP5UR6zwjO~T z5ooOo+e<`Y$Z%w*!h4#*KRvot!w^piCiTW(Yd#@IX_vidn);~8FQ!vTPEO9r#|L^N z0Ij3=s2?kA4(!6F5c6dxB7A&&ZfQ$tgF=NjWH64`9-BNeC{0j@N zM1~=3t&U=t4|6$z=CePPfh|Vh7cVhG4sISd>#aOcHj-!yASfY_$i%>tM|{t;B6q3# zs1(G=6iqn)K!3m92o{ZKw3#T52GI!bJ(7#Fl%GOR6jl~C)PS5xM#7|RNy2}UcI#!e z9Gg8>_uKS+E;ywB<`HM0P_P*>TfYSg*LW={r75SJ7s%8$Q^OxE2_#n}a9X>2{x%-B zY<=(kO@X|m;L|#h=C6(#%gfVgE~fZ|k(i)TJkIACmf|`h`;XABtGR$N@%-Ewsb;Dt)Tlz2Ywhw>U4gaGsU zvA^WeGvx)^twXrMKIZbdw?Kh;NiOrenK`EdCt25q=;BlGM!-r`P(UplNew!Q#wm`L zs4L?haB4I})K`*A*0tknk&TTaLK{yv0Cpx8-B}4$mG=uR<}889{oQTkvo7%69V=|A z0#d@EqKlrW0Pn2y(w^syc8_gkTlBtTJGvH`^N|UyPNfeupe&^dy4lD-PLF-gDs|y+Zcg6R9A~y0bPKXAU_P#rpC2mtH!vZZ+UI%;^A11g#RBV4 zuZeLmmfd~$&24ZkM$$eyagyO8_<;_YFu%EomgjY0FgD)x#X;xMC{c-xO_fQf{O2#a zW>!{JYuR0Q{T4sHX!2_Xm;ZqAU@WqCHH9G+#bIle2z=YlL%oJp4+Gq0hAU)5RXpR5Zc-e)Bj%V1aILGi8)E|)tO zWdqOoS|blC9NtX5S>3Uf4c?XUmVS1O7;gpqL`io$xbgiBKf~?j;_~IS0Ca=9I*zf&^BmbB)A8{{rjCRm?$>F)UuKN`*$jr^nk-IsJy*YPCA_pXr zV@%MEh6>uW@qIZHwtK!jn(KbE%NPKcWw@dx*mDd603rbwiebt7tcIIgU9&Ogj}QpB zHYc7*T_K>J7imRR%jZ)}eIzM-vM7vg7ZQe^W6j`1fKiR$3IrLCgXyBook2x4rM^2tV{ceJpjkCuAjy zz^I3ZD1MBC`7C&d^ZFdFgJTc%3b@!VI=WuGrKoIgV(@SOw_+SwzJe8P8xolTonZie zjTHgYYriwUSznHD>eb%_r%k8jXisQscUMt$K=|L!T?t`qqG5=qO?WQc9Fjm60>lbw=?<)AOmvY;XCEn+^9U~nsFoE`q)!;F#q8y z2Td>{Rw1q_+_a7O+}Vuo{eumhPb(C1&0n-@va1h#s^42Pg^GN%UegE)*B>DNi8=EI z#Umyw9(xwkksEKzT@eTu*f~OmB=Y{Hg&CQ?qpY9r)ovigCN1se}u75DS zRpndJ2)IFI)#@s8Y@%KU<9P`mWDmG9LHWPiWYhQ8%;Li-K+uCX`vm)1i+AjLpbEk7v>xH3N@JcEvhPdS6Pbgp0g zBULvz@H_~Ds2l#oNo1ANB73(^u1E}KF(Icl4tiu6uV~d}Pq}@UN(e#Kv*m85)YJTS zKLXc$46{_O*QwTUn zIp(FSLgLUqSCKUxZ+*;wqZG9+P{_??BP}eo$CzGJ+ly$FcV5eSJsNKDB=)~4;HWY^ zXW;L|*NSN!L8A~oOK0M5ISfw%qDWvzl$I}EK;oq%iEw(uHBYr01f){uN7J9~*{Zp{ zqt?k3f4nO^2ok^QQkDb^ZI1N9`w0GV2!!HVYPNQtU%l1jPS1}#T_HWlj4pYa3MjcO zh4MSCewZ#|bmBt_pEcT1bJ9j=Am{(c(Xs5tNHXky5F-y&K$Un_^w2hQ{2g7=1F|}5 z!E(Ked~LaW=T@hhp@_TX1t6FH(4UAuke`uty)^Bm+T928a{IDNj`x>@W{(FcWR7Z$*#-96yJM^-EZAIU|g&E5L z5NUbIJ9j@|Z1Ve?3UZ^qJpy;pi-9Vz3Wu)UPQ2~3+;N3cwiPIoH{r#lrEowz1^phE zl4Sww=7z^Xgb&FGz)r^h#751LCZrEe8G_Ee$wZ8aYgBLwxtE^fqwD#@3GQ6 zJOf~)nL5?M8!|`$Q%r7WCxYLUf$;8hZB?({ooB)4>e_j{yxw%VWm*#jh)@Ut=31}i zJ$86R2-MououRY5-VcSbd09zV^iRHr3D#oMHLd6JwB_+i`jb>~7m7It!kJmZmHJBn zor$M3ZaTGXxY#M}fN(;FB{u1NuMZr}0(RRFnUY{K*oWmE+saOt5w%_+ja$kA-HL7w z#})=Z1r$62f?$F0l9ETClfAz7YWze!(u0`0wlVZl8S?7d#nGC)r=?-|3qwUTn4-2h zUDG26U)JZ94C3Q=WEd<21NQ{Lf#2%`x*VqSJzqdj^vMg~^Njn50p4ab5(s%mRH=IJ z)WMDM%^F;``{oLFRLWWq9LzjU@X%nF0iKLHWl{_z0JJCJd$2KLwwp3UMbku=$gL3d zijPiyZ_CMmPvYVd^mn~{DN6FnX}w6j$IV!b;)ZdDaOGc+b4?oodL*=fcHB#qV|DN& zw3R;NZHe=4J#Vjv?4`z@?eh6T$8Ew?@2Bk4nqg%7kv+p`w^EH#Gorf>z%RASGq*sf1Ul1`Q*?KrHa!fOjxefE1JPhv|6l)bSPH9U-sprWc^QG47_Z zG#|AM@WE#$`Ie?Fvz072i6{O{=2d^khV$qLOEu*t zU5@^iR?3s0%^e+Um;ti5m@Dfa)o<0+#J~ye9bIDbHLpIwK$ROzlO&`x{y-E`*kt5D8qs9_IL^3^PSA9zS-o&kBRjCgr7$PmWQ6P9EN|TVh_-#0hix5RD7bStR;0{7l<^$j zMl?9^5gcqQy@Y!GUatYVS|%L5D!XWF>y5m!-?3K%MJDi!2$Bb4iK3S=Ez z#09h+)%qcMP%ZkmAv=@)HG6;F)WAw!3RgeeqOl8D_2GEi82jLt2cM#9v^D(VH70tP ztNEnTdbNrO-7n9f_122mo@Kip6)(2!EWZOUPzZ{@+huIHzV{;sNJlu^mv`ZpZTBzK zG9>)$!~xID$OuJSJC_p&ocEGqt88Ixdisc^%j-@w3~eodr?;`$HIsJiT23&c)dC3g ziODA&yXR(3Ul|je$jFlcGmu<&WteX<=!>yGajnHLh}0H3u4Rwp44v1LJ=ZD&I%j^C z9uMR4$?vxLSvh2fio0yaqT9{P&K~!S%#$!~K}IL%W8?kzyt$?Qx!&sscTj~#M1ZXV zvI;~5%DpN07|P!BF+Mv607Eo6859C_;s+u0vxio8HsP!)SYh%krTk#vB=Xi@SX3j} zfQ$$S9qwHMj%U+fZ_gGZtC{Wt;V9s6X=}6n3mA?EjBn`h&(EQU&cn>j@8?cebH3*0 z3iSLX3CF|~5EhFhf2>4BhOPgBA!t86itWLl`>^dL7io`r;&9ha1DF1vJ?>r3PCxX`m9g#e2 zWh)@V6Ls=}5Mr`;q3(O=a^F?KLl62)O@IH;8_f~`y$~xSvOdIMb#rbv!j4iJ1?(MA zf&E6wzMIWhvgS?3PkQ7$X{iBkHTHgx@TrYi_?!ei{JYDJ+Djn=-ulp#G&jw>sAS-( zfMz6?wRl$jq;Kn^?!~`xm%d#wB<%4-9O%y0+Rv816YNzEccf0RPf4VJ^P1BYnJBdB zOMjzLiPJ#dbCyhXy|`eunr!t50>qZPVa|^jegolZ1IY?A+OI6eyKZP-%rKwp5sACA zWO&D+PDC{}576oIAnxwn5L2|Zs76Z$GU} zl#=$Ku?)4#7c~r`4BXAm!%C&yxcp+nB`_}^gcS**V-kxFks{mA*iC&dJ|)9QI~Xs}euZ`~k75GXM5L_0kx8`~zs zjPRuzYvh9mFw4dAD{4`hqb}oJzLx6WG+Fzi=@$!rBiYF35EdP&0YdcleTgQ{}FY}bL5nk;kcwQD63QHB) zzpuYsFs-9o&$_Q4Ny|LUqWw(zf^z_eDU^sBFPx2<&m_dOb7ZE2#;GgyiIfqo&oK#} zEO9J8MhmyeC%cf&7fes(IhlCn14BvrMMqjaeyiMGLpL(F$BUYpOPWglmLFW^Te@1R zd$gbbF_?5~+MRVguLeonkH?4i7nKc1Q5sz?+1G}`H24!BkDtsJKrP#{ZjtFdV~zOX zB@{?Bq=f3?QtSCwC?|HH{lt9Eb@lRi&{CSR{lgu)p2H94Wzw8b$9FFHy!SipU&kGm ztu+EL*f}_=Yife|@u~r3qUCJ8U0}(eTnE;FB1k|l5$fIy_Gu7@)N)kA6{hG4DdRQg z!m;dP*#2xiDuluBjCgORk<>ENi{fqh8AsFVy0F1ZyZJhN4d35MPLs)t-CmoX+T8weC_^-V+GhCmcQOZ*zHMJ6HSPUaiv~@cY$QV4kw26BHsmET}sZ(feL3U-< z$elwOi46NoK8gEg$tSI(j4}F=OyGn>N4~W%TrjBhObQ4fB|m;I!^S1&XcCcmQ7}{( zyWP~L>G;v`j<`IN)co@cYCIMVBDAMOO9>H-T^<$JA{PhqA+H$&B>XSNb4(T&TE%W6 zS@^}p3npB?eEH&ZgUzv|-F{AUGvaZBeYa70_h1S{Ifxc>2102hQ(M68Wz~#cPierl zTPm2`9K}}a;^KM2rqQ`6d#yo5Pv4_&nG>5ft+F@W9Ei)?K*W*lD|olMdms0a&kH9` zYNxnO;>TN_z3EoT+KR`IAA4@p=7bzBI!RZwZrI!$Gq#{_X8SO_EvInbYrrpkT#B3* zZKodbJ8(IQl+U6qEfrh&#d2&=>CXdxyyQ_Xp+7{n;aG{zXU-YXY$@7ynYA0p)G@eM z_M=0qbh4_U0Z|W1F8l7ZS?;v$ceIt=<)Vwh0mwDD^JbPTY?U#9hJu1%DWZNMW++2s zHGc8fGIgS zEfE!kCGw|9xgVn9F4Mk+GLIU4z2Nftkp~Lxs7_bo5H+<#7C~=s?-6FeO%ZZ}LpHAR zcD8+arnItmZjQa>0KYwi^6m>|?)diCke^X??#(=8OcGh@hx5GP+*(^>kASBqATAyj zA8-ak?&!9~93MLiXS~exShzw~R{njNdHr~kW#+YH7D1f!g;=KB#k82`64S8Mt{pc| z#n(wIliieZvFl71=lx0D<=?6~;nUONJj~2^QzwP6p&*fpu^M+$}79=2S?>b8); zU@XXh`^!_*X=!U`?eDtH^9QGH;K#!me7NsMFz*TO|78nh?@wtP&NniM!abr0-=_uU?qJ3%v-jSTMj-d1S&EM2t867&Y~R_`GkQB@j9F%mQC=Y&ix8B97I4KV zg0-Lr?Kr>YBx9&%ssdr8foc?F76>k+Cb=s#D?b+aDx}HRb2VrwJ27-}?S{?oU$*47 z7``PT%YH!gbw9CKhG3V2_tEF+zPH<-vC-SVGwx#P4p~inu$p`LT8bmvm1N;A&SPac zf>Eh?vdc7rF>X7JIwmH@#LEjiwim~LmAtaMQs#b&zug>{fKCizVqfF$>PmKabLx>l zzHQ<#UMM55a)Zm;ty1Jm6)$sxMZnE%vU_2lnzIt~N5b+YxVOj z;)gXUFg9oqM6)BG(UGxHXp!OHH-q*wpKZTaSky)hot}EGJKj-b-v?8g-2<=wNe03l z;jX4JVq#~9he=4+J-EkxcT#yTDzO_3|NYyna#)$cjZEeyLBqt#v*D5;3w#Jap7TLv zX2hl6kpkiz0o5~n+r4q+WhcAG|09D(>e6cjHZkF<;e-;1n{)Kc_wL@0Iw>hh%=0Ml zgI@{-rSse|$I`B@QEXI{%HG^h%#N8gqx`XsWo5dk`{O#<4qM}UjDMz3fd!VjC>8%Q zL-f-3F#3gMmY>MN>B4;o*luK?@Y0iV#e=A^r?s%K_~t81p(`;v2fI%W4-c=| zY~4O`>Hd;j>(@fKcjFly8Bi`|=g34` z%ckJjgvbQy{`j6=C+US^mgt(0rPQ`U-Tdcj zzI}z%8X>LaXkI$*@M?v?r^^P_kJ$$YhfLia9ERkHzRo*bJ`2|=1r|f!E;~oLc6yK0 zD43}urQEX1Y}2-M z(#Uwh3U;Yp6q1%kJvclJ-ppDzwYJ6zp6aNB$LRJCA9GOjU9N(OtUP3?K@U9I{t#}u zCnzl%zCA4_4_`LV_wMYJXi9A}u#lL)9kS4HwXV3&7OCc|AxA?2 zwuUA)yn4Mc?=EsD=DaK&41AA|t<~COpV`N6&Qhm+8FWAIy&Wk!CP*8rq#^uq+@aoQ zR&+&@3SM#a`&|5cYtwAg%F8q4w+2=cG^_{NDP~ktgZ611dpE2zJVH>LRwEdy$6@Er z$;=5fUh>u{^LmYR)R3I7o(e!^O6^0l2(nLZ#s!+aNFYb~hl}yaJs37WLq8rsQm2K3 zO50jc7q4bCAP@xwH>s<}A~SdYhn74KTqT(OkCT*LW&~g>LE3XJa3~cG^iO0)i_3($k~7+RS2*$ME<9jIl)l%x!}ol zcA8QiSBYhB9b?Q00&eK`(j#D6ik4Q4rGHv({@Fd~?LV2hc1fh7G0Nz_>oQ;LfnDyS zSEZz}ytPpaV}gLYMz(cb4%(%|KykslQdBP;nK0JbQ%m!hhwiq4CpDY)afeVLmR^Tx zf6&M2+mgX=l~W0cPw9L8`Mz=Ky);dAJZFT`y(jjtZ$6tq{=1BdpYOc> zi^U8x)?gK`g@TZffreb8Qq%YE^^kXv@Z6r68t?pk7DdS}{$!|wKZ&xe14C&z8XW`} zyxw--6^y;<#*4d}e2ux_`qsntr8$nAz<*~-*FaCIDjI^aQU7waz3r+GIaDuIz@a&J z(dy$oq#F>^Ya=9%YMIrSrZwk3j*8r%bX2f9y5_}GkaAr#`u9|O?RHLT4H&z!;RM)W z-+Nz>a%6dl^|E{3l;PYE&=JqO}_hG4rO z!%FpBDcLVYW{Qv9onHw$+R1#gd+7CSQThcwlQJjr<*i<8>%QX0R~0`4{PLnf=Igkc zm`>uyn;(zyl3q|ffGN1e;GO;;gOPPsb2!4f&p2nw;}R~HCU#K{nr?}G?@$-J0({`L z{NmEl@T@xe!@CK`C7(qkwZHcOs@RXJXjuGo=*JwNM{$yF28W1u@pK)sH;1-PCX0ef zy*2Pqezc8xjn(LY8q;%jV7$48L{H>Ni)a-RcfkJ7+&SOb_ z83ANmhI?s9+7&jP&u@{=gh5D{24`weJ6H4PZ*tVipHj8=IO0`N;Ur`rN%EC3_}wSd z4+(#Yva;_5l*_)n+vwyeaOir8PH3m^{W|dHC8POI>h>7@a6u1Wm97a{mDQw5ZF5&o zG}2XIh>eu8z`|ku##WfHK`G=oX{!x3rFu8qwk4M7G-KLu4UaeT9&aGd$cHK*|H>{# z>qvGx-Y<`XTa7F{e5c`TN*!L%?%s5iD(T=4$+;=SG2ng1Q^hk4iTE=h#R?no>^0n@K80CFE$4XWH<`4UIQ#k%5 z6n=R~>kEGgp?j;>t2iP!Ui~8*O)$)?E*Wh^o zRhS)=?5VH``Z@Z^Ce~|dG9liT=4b*pK`pAB=Wb6VRkl05$BWg~BdP<2C=TaFM>Tsm z)Leui>kdQwZNT$OP0Q~7kRRk;6yhDLt8TeP6wXW(o|>83fuQ2Mhw&>qPhg*lU6rvM zB{AOoP7OpwqspP_5p6$G*z@;9(eN2d`)5i_-TfboyV#nCGFN$;pkLnmayT?E%iIib zICgaU4Wv%b@ooLteNx6NT78;}4i=rk%OoI2j5NGsZv)%+zF+cqi0_7I)7bkV5d`ys zZ?i-W#J#u8l*a+GH}oy%t$e@CRsU@jWcwFC%zS9bv?x~4dJp?>^OmYD}fhDy-7RXY zbg2qYq?V~UR-29Qxj;;9*7-E#El(5;(JLS++sJ;P^zVkLh@d>GcWk9Q_r~|};4vHj^Zl;tfr*_NmGRBi zdBa9&Jxg(Ul!9%>Sw(lq+d%dF*uy)x#&M`+b_|E~H5rd&lka-cB0Njh8!hQI3+flx zz(;&Kj~$WB({HPpf4j*Dam5}Y9cDE?v_5;9^k_JfG&AAhG6@Pt34)%w7j3vrLd{ph zOXR!#3O%n#-zfpDsY^wBv!%_SiPteb@_M($9pe$G;@ ztWTsqz=jcJ@pn&ji&+#EX|6s|rY>~)@2IbKL7nE?@6qyCVH<7Zs3z_MXy9)5*!QIJ zm^*>I9lA&_CV`yi()4mDSDhbGL#T)xAzyU}t7;}X;vE_YnFABXnAOzc&Uo^}iKswW zb2tAHaW(oN@F&}Z5P@Pgj9hHp1I4-Uo>wxC)<+lg493LwgdP3$4kJ)wbPQKnKK4vl<|_V44A84-@% z7BrG=0gN)oy1KcOR?NlcqR)uX#2Or_IkNq!vQKv{nN3inG7`fgg`DytE1>B3>PvDy z_mRCiES_sn_0G=ce-($FE(giqvAz3+G&*W_UilP>y-H~A9T8@t`9s8GJ>wlI*~nWXHRUr0!W(~j5?6W zrRL7J*H(jUkK@6U+6Do4499{gt1##9evg`u(fya$C-`K?tu~>z(!@oROe-@}+ z$7(ftPK~Y}D-H1Suw^36P$EqGu|_7wA12ak3x^t-a%u%VcID6()_krZR{S($t)Q5w zuSJvt%SP-rn8o>i#KY9P!l+#3_oK;VQz=#mztkJd4$>Vw82=#6Y+PoK?fsTDa%r|b zJXxWXieYuz%tWC(Eu9+X=W)=5d52s)f*me=y#HIs0rkIj<(>7Izr$)fOV~ZWwLN&& zJl%_2i_~lpuX3b@XVu_W2Ec^`1fHaqs`VNpW8SyOopp$D6`L27eH8oildj8*SbPfW zJ4fPUB|({06O!Z;DSNS_kEIWy5Y9pB7x+I1GsvJdQP#HC=vcu&zr=@j&U`rx8T-1OfI_uta3+%>ze4luSe?CW5jGZXTe@DPPR)x_n0 zA{t&0Qi}Y0-%9@PWDE!FM0pN7W`2Z{ebH`SO~E{3G)6M}-`rkTpLX6B6HW<;2sGHy zK+3qER9o+o8C@%nbYRCXI^;>L7kwi&;9lbIF|oCf-V}V=`!$d5*|SVuow+=ABSj{z z8)A|Vc|;CMO3!-G(^1wIcO4>H7G)El-+hzWR<>^G=RI7ajh;Z`&$dc`3=zW}I`t{( zV;K$I2t^!o@@{`08CF~w@~JFh>sAvQ-@O0tUt{I-gOOG{ZZaF5PttGwP?*?}5*lSe z>n&0mb%^j>0!8KXUnstZ8M3f3j=|L*r*T~>mh@4uxa|K&(^rQ@y?tMk0@5HQ9TFmq zbeGZ~NJ)ouNq2XNq@+koH_|yscXvt0NcVf@et++M_~Sl412c2ZK6|gd_FCuD{kKew zIGtFA7(pIIv&wvj8bDBr`@fGO?{s>y2ZCucA>u-h3I%ntv{gB_LIiXAM zgfR7b8|->yUlim1i)5ObDlW|*``V_f14pG$q&8Q)&R-#fBHHwrVgcTTn@q3#k1viy zr+qY9S7Q5C>^RHdXIT)2;D(tUX`JHU0Wv!UV|O=pOSfyY0hAtb#oC>a|yk@EJ*vn{E&$f72L<5P3w z7kdxtqtQ{tH|Q5|hI?{j&v=xC<0lM4@m>8BM);m7dvq29-s7G)y+QH2%O4$fn|Jdq z4?@NS#Uj!zR22}d|H85sYyjlsey@hT!2=H{J6W`Z8Y6Sob&Lz%e=MFxh>pf8Lt`E0 zc^NaaP$qu(NQH_(1!vW{@M!70Kn=coJq1S)?7(e)XA>rUOK49F@E~?JTlA=u6nxta zeIyEI_t`KBj)kN)5o+VFcXL;9YjJ_&{jB{rj|2yE4iYtAAbS0rjcu)jaG5whfy4f; z_@xhQTB3})PpwsW8?$~VvW+_of*>`xngHaxE{8*FP8+{__P@O#Fn2a4Zof$`TuV); zUvr|o=b?DmMhK+pIYm{b?L!N6iKp*VkmWrn(5P6SGT_Inc-S;w0|CgnV@kjXmHG&t z@w!G_@H*ImmkY1U*265weW}p{pAm|T_^Rc^?G>MgZXEoGWWD~60m)FyJFMwG=&h*V zvw8~tp!}eirQ;-u(NXOGRxy%h@0wKZ*}X=^Vk!MmweWhOZUQH? z1ME4oJf_VGPl&-o#m>g4$Uja(`&4)>0kZ*=?|sQ6^eDl}ALN5-zC`99mZ-QwKfmWa zWc16_7UMjiSE^;%IGVTgavxq~>n(h`{WRC$gp036o2R@W^e|$RJ!~|b zNZXbXEPb;l@BQD|9Nz#dvyn-8=c0R)Pq{wecY}NUFt2pD=z-Hs3owR+1MU-OrbKIXd&%W4s zatj(PT$S-jT-GW?Ia&T4({0)4e#SZC`H>p+;jKo<3zvb55%;O!r>diZ>2Kw04;PEvyZ+)IgMU z^!B0#6xbF_UR@6cibgM9pt562Ce-BMWWp91B*q)^v@rl|+U zZDBs~O5em?Jw-U37&r!@M~jw=hxX9Xma9*5&{hLui@ezW@8bcPy~hJ3>9>yORg+n<8MCZrS zasWu`qg?A$tS(-Lh8+--kamIgxZW|- z=^MIS^zrY9?3_j^+b_pot79umRWa(D3d?@uOX{`O@5e_l8hdKs<$GbD;Zn@L?v~#3 z`du|To#_r8Yi?+a+hr-50I4<~+?W-@WQ~*i;FAY@o68N>ecz66@Vn#1xQIR6Xyw?s zQZ!sJ!*jB?`CO+BArd8l?Ve+IL?LA;zOpC=TqWqPgq%RMSi6)!h{!H8-VB8!}a+6$IrIb}m?lg+DbOjYjQGYU<}IvcqN-lEi%om(h2D%%Y!>$G&T%6=Ap=`rXImPL(r zBvC34))7$!#Q;kvqaUZc49eUWl;B9)@e{Sns(!EFF1@Xmz3|H8B(TLQO}K5~CzabC#zr){4AEQ1xZ z9714gcNys!`q;VNTDk;!Ia-oL$un!|=yzIyw!UN-A(q}6L5UoOswpXZw|aE$=5aO% zQ<5Dm97zV5ng0jCTk#w~{S4g)uOaR1xh5_sWZ+P*AUb;4%^q!#=>CmSziw>}!C>5q zdR|s;DP`#^t$zTvaJm(WthUl{+B5ODga^b4 zwyhkQ>C!yK`&|89Yru)~<%S|3PLzY;C;My%lQ^ZVnzrEsZN71npJvWoBVw&-C5ms@ z-l|akGBLJ7ibBz-^ezhcK}e_Yql7v4d-La`uht}$sXaeblP%N?yqM;uDEbIS(zgUc zzsC8(ofB1s%X=9z|o%&)zJHNaGK8qfi{{D{Sj8 zmUQo_XOizRP~tFY7xwvyMt|zF-R?^bZA@@p9)Icxkd!ATp@5uEA<4W81>`=?3t#Cv zapF*mhDES!RSphlnM&wfY6)}PyixrT9JyvL0_2DD4gMiPVPh7x>~HyFp;a6h*wg&9 z5($94cWn+zl0ihldP9RMF{GeqbXK4p9rInAzIJ)f@MPt})w&-1NiJAI%SPS=%9dW&>)&a>pmm88_(dM!vHPZH>F}&D?PEw6QUd+k%%m(HLE$! zf_x04`IW}QrfJp^8#Ny`6JtE28%RAnoXZ(;@Sy`&Q4brH@4(M_$FAGKWD~Ngh41*Y zPg*Jihq^1ZsHT3<@OQ*d*?~)LU^7Kacxu8HDWu>nnxMO9(rm0XWUR@K)_+kj|9tX* zyV1j;JwQh5$xY1Wy}2Q8KL(d}y!`oh=TE^Kt0)*(^AtiAKerjs_{NntmH)j9c`4QK zF4DJ)>#qvsPq+2Drw;MMr5bG1fZ>{&)U=m;eV zx(t3EQcCx*7ps}?DQ~@V+#TbD^vQ4ta>YQQxER<_gje(yZUS0*wrd@o(r?K4g5%eX zY?hnQ)bKZdAM&Ir9p{zZNu;0|diy0SnzQ(1Gr~sqlYs5R=Eajs9Qv*A>Mq?wcA4U#vpx4ShB@9tf%Z8x zo`J%t)_mknDxYljrtIiU?!emU(?5{fLX!#g3_40vZ~3zH$Ed;&agnQuZln14KG!4ZGW~9&#lIZF+kKM{bvl3(2K{P`v9l?X;MHNT zN%Q3oLZsgZV^(!l)>pn{Iy#W4EpR8~d^EK(GTbkaxfP=0$|9J%CBfJfUeN7G+ICTh z9&*81oCfq0$17p^`DP$UjSueWPZmK^1}j(k5RJ82rRQ}zSKbPnt8uAGH}`>Bj1!1V zrYF?wew8egfY-KUimUPcmO>@*T(wysc=M~wlx?~|G{^1d7bG1M?CXQqx#CPE9|z=m z|L|Su1yi}C^16(?|E&1OcEv%CJDVJWobGUxVkpeTfQoci%FOSG%Mk4|^Psz>O^j*E zklQ9eS1V1t`pzL$eq*(jQ}2C!y6n*`J~~}frp(jGL~K{7KZ}vy(0#|kQeilI>_hzWm&x7C@CxfNw;&fXQb+mn zU6s?7KJa}S*{J*RgQz2{Y;1`xE_Sp@H-|##>!>>Z{wAS+3S7M zhWv)D!r0Rj@6!&iNzrlN7t3m}fN=1YPP6)lORd=886peDL;$($LDvL^xZkX$Qsr;D zbL7ZpX5f$L?;^YqjO>c3t@5&qbo?q-{HUGqq8usNonJqLG1~eYlALW3+9y_T4lQ%q z0*UpCkxGq%==By7qUGfohThzMHlwXG5w(MwNuQr1x7q|!$sP+h*0(i|JOeMn`K99) z9mNM-J&IkDu%+PgAR{{OU)`n*xG!=sG`UV#L{CXk`_x46n8h%?LX_t64!o!R{Ppw> zF0pNP#dtjbTzW;*)4ZTp&m1BUtTk5_m1`H85EwB$^gP27mtgQa$tIP*=eH@$bRh_s7I} zEQQA3*e{#=IL!aWLw|5@a%s=M=xtL04`gUEM>9RwyNMUTyxfe#Y&P1(;5U z(BhlC7sMcn+3z|-t)(M>h7J(c+fJkuiE%%t@w-4bH-3ta#Nm9Fi||KmsL7TD!KlmW z4HJnf;yg1ys|a2R{^bc*r7#N?$>OcXSNZt{FCE5L8Pj7a1$4R}SoG}%h>V=Id!$H% zwY2qmJA0*VvNAi*6Te?UUJM@&y&z0UaXcNP`3nd`5uRT*n`@oYwK}$B^)3{W_id!+ zfN@fEngo>|(%JiJXhgxpT}$K<1)DWz+0Nu_NnsB;X0KTG4QrXkyI6QTyPV9|nl zf8u*)R`YaO%+}`{QA55T%w*W4B%zN#tr+GBn&{cTHsR?Eg;$npbw59NZBf&Pk&|~B zy{pMN^<%D&=HlRac7%kI+f($gUa&D<{6PVzo{~}W#1t=~no3iE;4)+ur`Aqn3)^yrrV45eah9KU|9}E24lQK5wVN)c^jsG|^Re0xPjt2MZcUk_RfkM}9=rIBvA1X|c*@Z|@i2%xh^|{=g3{ zVn*dFDNzLKwV`2!9Q}z`jYVAi9Faj`gjLlONT0W2wY6=3PVXDqxwddhK(4PDAu{hO zt0%;oCyNzYL1(~Oe46nBoloI+#IlN~Q@NVP_ZQMgwAy@*MG*RcfV|`uu6NRA{gpo? zDYBj-RlXT+4Yx6IV(6F+l9?8LX!;yXqfABkIXJrlJ8Rh{W979n0yRo_62Wt%%%6@- zDv*JQe*NM*NL;@}lTJK&O?8@Qb7VBS2)bH5CK-BOIY)9DH9-|yw6fJp}r`doV^Yi$U_gvqAs(;(pV^k3;6xF#aJAX(P=AV`x9|U;Xfm4birt*%W`u zK~Ec7?Sb)!77hw)N5o6Bb<+sGR-5!cJ2oES~z(%xSO0m>Lb!*)hiVoKlfg>el6?6MDoGgq#R2wPp5zY~9#ZpaSH$oj*MQ4`v3;3s1UUkLYl zW#?al)68PCJk`FO8t$WNoa}Sxqj~9Tbz=z|n(s6#G2(41(;WpFPSU|WY3{Tf0k2Ku zt;+Q`(EP?fSdEKLa5~Ohe!5~n3j7g@vktj)#7Y0E{HIBqi{$di{JEKrZ)?|tM6|2; zzt1#wmb7IiOsM)k&6oVddFbj)X6HQ96F$2~DKDZu+z`dFssOEaA*>I~{Ayldh$!+q1N!yYE}3f$nv6ERBPdnx5W$TDjyU zo1BtTRaxo3zHT%$I$FX{M2pdVekmseQ6Svh*q{O70_p2>S{&Gje@D@&$h#bjufzvm znCY5E0B`Q%LI8xHKz@_7@bpgf78i(!RWrPff6vw1raSY4>#?OG>^==nHP71sJCbB- zMn=z{@LR7~kX zG*rV$(V6)f=k{uJ&zYfFVIdllw|lM5eiUh6`Mj&JV^P<`=rvs>2td5s$VxXA43{(PP$Krfiuv71SGZ|0K10 zREN9v9)7uE5%_qRMFExBsP|v4Mks_7n%;hs-grJCk8(K=M4#ntw^mDE23fB6l>H(- zTVvIf_>s*sT?n%-O3jVV{tinl%FD}NLwMT)PJFML8xP8x*#!lofPfZm)bH`kaU+6Z zp*+pe_g_Nx+FACK5HPGjFB{sIo5GzQE@n3|?cEVqps{Xi(Oe_!c7BeIxRejO@1Ql= z4;c2|o1~7%zjq3bDMP76fYo)adG&!+598|I8>{M} z9sE>t+Yzn9G{}8=29HDawao5G`3G)Wtw~@EXUO)m0F7nqKt9vnUm6KZZaz;fK1^TH zJ}cs6-~kMMnvi{uG^^)E?)!L_jcwBo*7&r!{jS;l$l4%^xx5jfBW?GruEGZ_W;f~S zOhX$%!>aFHha(C$MbF*Jl}O8FT}5v@5Xc$K`;^K0@UJrvFgXax^H7A~1vEhOKp^ZaLSzD|!L3zBie3XYx*OIA`4j`hh5u7?pHzQpy#4}r>UGSsA$u5XSs9XdQ=hNCN-pT%4|zs+XI zn&p2#d>B`3!0V~f^p=axrB}$r-`IF|S3VRVP>y*XQTuU(W+bouO3;-Sm!DM&+YmM4 zRg>|~uTe0q+bpy9VfMYy^dlhBcQ;zC2kS({=65p)9h1af*E{hfVr{!N&Tt0~zJ|rd zr^h>B`qJE(%zos4?0dSVyzXHT8Bh6mc4Y5Idg6291G3#k8M}> z(@pkf+udnrl=Y^Y;Pf$w7L6(NK33;|7FSilc=Cb)hqpp`%AM}70 z+M{)J@rcK+X%FZght@2Wy>xKPxd8ns>n@_47-Dmw)lnrY!yNvXQ+|bQ*yQwcfBLnW zTkU~CC?}JZz>!swf*9vZQs!qY)caghcY+Bq&DQTsKr$axO-Iye@#tCzs@vDt;r z*5lYm9tMIt^LnxU;nO|%n&d!44hP4(;!X=-!^-_%-m7G@MWE?jdAw}cOliM=<$Amn zp;59O<9`-o?~UlT>cgx5_%CJi_MoCq+4mp*;X)nyrv|$^je@9@xoq!${d_lD3Hg_= z>YNMx89>r%-1;Ve3fs~Ok74WEg}>>m_w?ZXPu4(P*~NFD@Z6;aX>PS+GV8LoZS=V<(Tc-5pE3uB>(W0C1GDkyJn8cCR) z9t@K5Z05gSTyjCp-Z-eILi4s#H+W3Xk2>k-g*EYB=BF^_IoQD;bQPK2TXXuTIQ!sg z8+y}{GPF5ujZ$4!6oSON7nOZX0*$VBJC8TLL0pRSj7M8ZVxyG6-x?0>fGf2`<& z)lln-RzgiZ7Xsg-SzpYK1yesDxBU0-%j%<=nwtAn^JX>^EVy37KFXguk6cNZ@;?I@DNIB z*J0bR97=`iu;kdc{(IDi)w1(wH#ZJ~Or-0t?6a@dc(vl?g`iY2-KFQY90JTp>2^)U zC#iQQ?Ub_ye!`C%Y>x<^>TUKGT~~ds)?w8R77t`T=i_{Lhc%A3dxhAEi+j$Mz}*B; zo8QB%U$5}hQc!=tY=$F*#C;+m%e0~k#ETi^?4ab0`ma7%{gt{C2xIedq#+o{406UsZ{m5Xw>^_(z@t`H*q zaIR4yd^m?LG={m7ugJ&kPrmo}JEE`~4T4}v0932)cOw6A?|1wj8T6lSXRlbM>snf( zL6}U=HP};*@NHuJhrWESmcsy+sTmPKxA(hSgtqJmSELM2*fl@_%6XZ6x3uGMP}V?$ zAvty6a^L57kD?@a#I2yDbZORhe;Ov*gCcSY-*Vl{24QJf3as#ZI4GL70^Nl${7%zB zdAHZ>zfPB<@-3<;#mdYe3=oL@56^;J}m>ut-eODrWyE$Q} zRdGHT1~&tm4j>kQs1(TwxRb`C^n-!lV9M%k^*rBBwx7sPTKZ2>1sYM#tpGr`#^YdqTG!_o9_4MK@rbpp7xwa7cs>PncHrCwN)!- zfqe=LGvMI>Ed4Jz!OAC4)(;9j0z}d?G=xFnecHKVPGsQCA#zcyysrGXrF>f7(28lE zh|jRC^+40r)*v>+V_QtoKwTRBmhO3XZ!FRH*R7b&Q&3T z%%fz#N`(rXXb~_Btfi&Z0SjhbZ)k7h!{s6rwzVf7qh6<=#N(cwox#nl4F4N(Yr_Xc z3*&Jw`wofqWJNJowBWTvN!tT% zZaj3IhTm%G=yU;F;>jlLbhcfT*{{E?@W6x>%BSnNrw(Z%yiMKb>ME8!Pa9)N`iB^Ui*u7R}&(gUAsa?lst7jura% zzO=M-4L|!D>2aO%5gq_X;90uO)sF_8L@I`Nd*y&wo#*|LQTnxF;I7fY+{vqxT)RKG z@5dEeiByh-@OA=rzsi}e6YKti=zKJ**7xq|$ zz>06ZWKyR1TfWq*R&|l`L3ADpas_p4l<6u^Y&2?(nb-0*jVa zfXE$&-_EY3S(kSvY3fJtf{ft=pevgxlZJEJZ@voB(4|M z0l-30m=qS^EB}%k)~pTgbPPyAS^>i#GCl{OJlnkjH*|wexk76s^xy0 z?Zn4-J%q^E+na0^GeEA;YXL;{9r^SpnIl*~eSa_HdN?6W$H*X_fK=+Ktq4*b#fZ?ELd&5UTX&)8!=4DdIcOCNvpip~JRG}i zev@b2^YMip^ot5V;kMHdwl#tZcjQ>XW4^+w|5BIuWfJjb2IulFL1ciK=Pcn0e@1Nl zesEPb-e!v@LAWJ7^pU`>)N}Lgcwy`B%>4dLvjmeC8s2=1+|d_wE6)8&z9dW3sL$fi zrRtjmAv{QEqO#jabr;!G4=5yyb-q$<^!%CX9EhUg3zunIzG$L39ZJUhfv)ctlx=9W z?jj&;Gp^ss5lI%Cn)7V2KRhLJe>;}PS*ACl*D_E5+?ziGQ zX*qk=lkRhOxjysBzIPtDXklCAvk2_j(CU;{jGCGnSWx31c#R5Qt&Iqt^bG1>R%f|S zZZj!+JO}Su8y!wC3Ik2?@woT9??k)G^_ovXvR=DC8|^zk=N=J#@&-2;(9YFa6P33= zdXl-$8MWrmz^w(KP;_)-i$-t8iBin!d$EW-LgO~82U)uTJEIE2)w^}8xp5*3f}f3y z;8S$J$Y#4E!U2$Q5aH{#$S>_YO8=X zHzKpU_YIy(T>adfrNk0+!@kWgY&SAzByCvZHC`ws%MbF?H)Cb$(qoNXlrGkJ>mZ ze$uMg9QI{(q{dVr9h*+E#tKRq4D#8y0Vsi#y z5df^h#Bir=SFIIS9ymxA+%M;TJ2901{`<0J4Vi7%)*5&ln_QSddvt3v4ElU`cj8Q? z!iLp>f#;vi%|pyahlh=fjS-Gqo3FHisS}vgpx_yHl~4QouQfC^tIP(I+e*_|@$vBi zGMa1m^E38INlu0FujJEj&OLl#AHG*Lowt?Tn1!{Mt=B!RxD7azMaI!#=&v{qnBvq-9 zYivfZ-flS(3Wa9r!DKC}A;1S{^vC3A=0~2fl8`<`nwHd9A=R}M&ZF_}GViJD~UXv)8SOB6IW?tfU=-qK5pc6Hn^1&%5y#5j z8~Fd7S7@AH3tKll9FP4~T zSMP^c*M60h@8di8*U3K6#pjatRaJ42`Q1Zdy+)P!Xx56CW7CqyP9~qjT1Q8-8gL{| zO-=bP+b-6x_uwn{yq(nzG5`%M-`Nn?UHz)(K^fS2zV%>yyaR8*;FuiDlwEgX`}&)U z-p%_g!f+*w#mH1L|G}mL4D4||uwx3NoZ>`o84C&uJmqurz>dMqB&Wfc9^bnLzvc$- zmvFGY3z$%0G4%*Q0Ws_gb+H50IN&}&ks~G{=^PqTI7uBJR~0(#Km=x0D&U6n$_A@O z`E=cUga{9 z`G`mSIxJmS$&|WbJWP|U#YkbRy4sasQTMPCM3=bWnXo0hY>)^$5|dGjU>SH}=S`=> z!Ls{Ew8*Nd@nJqlG-paeEik+GZC}@xt+@9v&+*%m`; zwy*3;%uW)F3!7U~rl6=Q@)y$7tes6|dD#oM%3r@k8+t7$m(}}v2Ojm9%=oDfL_S(% z<>^=2F7n(1P_|m-a-ebFpM8%8OH?K&E3Gh|8nDi=b#8yUYwybjXmA67)$Rj>r`xvv zgFfM7$41b%!a&+OfS@PjZZOynP(2LEUoAPImm2Z#E<5A;5$zB9 z4RC)I>VG{@FM+0^x3})TuvABy!)0Qj5OTBYQ5Wd%jfCz`yTK+wK-qw0Wbb{o?5?S) z7qF&{egRf3{(~hjln8|(im0e4*sEcAo!9S-*BxZ;ya%RlXQQH&CMG700L{X(z&TP1 zU=?8rt?{US5!{M#8E{w)Kqriff)Qp=7?A=17OXS@&+32Qnf$~Z1%@dYe%S&5scGQz z7KU74pk{Dz5Tpd0|M2zd&4}CSb)RU~svk@gY|%}^C*3iK$-&A_rOPi(Yu)Ys^IZhj zy9s)MGWAdDKMp_eMH3qYr+$Y67%UV^H3uNLvgV!JbD9Ml!9@>v_2k*(Ibzpj! zuA=4dlBw7mkFnF^sKrKCov33Kk3c(qoLBuu=W6Z8rxs(M1kSBr62PTBG4WybgWmr@ z9=Ly!O$40Ija!sz;a8#1Hj~7-F^C6Ppq$3KwICWi>}x_% zKnK`r0y4L!e7=h%n5*dM?3+=*AnkO8?^|EML9T;N=XrVav49W=>AoOrk?@DRD-%#- zVZ|EETD*9`l^Ds%$zwJ=A-?Uq(rjJiZc71vw}pOP^7{YqGF%rdK?b_M8TE?=cQC-d zgZ}D2>ynZZpEfXF8Fzm;Fo=6c>GR^my-)CTe$z?ZV1)i*ZsjcFD+!VoZ+ z7`%avDl(i#_<A7{W$75u72d_4_vG1d59x@F?h@EQRpH-`l_tOlh z8zGF&%?2ryV)SxZGgZ^5oV_xU;eT(3PvkOuFA{h1V>1C7^(^M+x9;qE?J6AUT!c(n z_xG3*xP=v{z17bddb8UUBYm%WrE?u+S(wbf0~QM8@692u(Zf%KzXk!H$PFX9t_L*R z0@Ll=NWj6BAP#MW)mvB^NUHG8KlNd{@Fe%UH1`XgR2C(Nl?AU8J$?Y^PJa@$qQb^E zwwf$Nps=t3a#a6rVHVa;5FCSNBV<9N&DYK+mHWE75a0{}Tfva+%~@|VA2>cf-uBP{ zsxKgFYV7;VVf|e;NDBWYFbJuA4d{fV2e_&aM$NVP zG>;X6aE0--t&Jy3zwtrq3PS_>AR;#bBs@8LN;Qe?{BE8V0sdRr8TA-(ayYE&`;RR; zZo9KPoUZ}b?mFW=F8bZsUjtkmM=t1kexw`^;F8cyzcOS6cxypTzDo8knFZ=^{4Cgo zFK{r*4rKvVYf$74OjiWG0n+-fUV;f93amjdF?g{0kMsep%=}_Q`uJG^Z!211#+T=h z_oGi_u+#`l{V;gE0k~pX%bagO1%QO^rIVUT`P%x!mvtl*A_0QmZ(b?L?EGz0MrT{3 z6`{7v!Gj>f4F@@Cmi>7~&-0RqFcJ@qFG5kSv`kiI_dpD zq_T=yhA#Q3Y$6)h+7$!^qW?VeYe24p)+N@r&`IN;-ZmStmXzodP9Lza)eg}VP16kq z%jI~#A8Ib;u$!$)p-lKt%xS*rW$f1-dT$laMB1F*73>)$a!5cP=S9rlhqUva7~R;u ze`tmJp)Yi<((M)*V7RDuASr}C#x8V{8!|WJ98jm1YE*czP_402$26a){PG?2jS?jvU`x~Kp8ryr~@$8KgTxbe9 zvJq1p`Z*D+8F2y*pAmX5REnt6qt2#lPPvzf4oLudRkr*~gpfmndbagTRzg>I|Hm&u zwX;S@i0x9MT6buHX%;B9j2ID0cBxI&iSvweZel5k?j5gj9;vAkIr$A46c$rIzv;|= z!+UqBxYuV1C1t1iO@vEtc--}H6x zK&e4IfrGbyQRhZpXDpwl+LPywxD|H?jJ9^rXK(`RI)?{!P3Jf2Kt75DuXAL)s3_e6 z|Fu2aZRXZvZLkAWwjj8eBrHPLhG~1T*X&cKi3UQ4f^wY#%p2WZLFYt`Rjo!u4khxlH9}I%5r=z20c|Hyyc6Y-=QLvGILMbIIFUCSkySBPZ)`*WA_qzDq>%&Z4Fo7 z@-i6a(Vt|Z3zq;r#6iJ^Y5A&O6xv<LzAZWYd_UY%hb$wfh)AET(YW7 zh#?FJrRA=ber|*9>AV}K4e$hX<)Xh&COZ%6E1l!MoAS)&rT;~0hDL~JVk#Xxw#Vfa zP-R|!qrc88{_v}B@3s>IlI!2-j10~KseDfuKJBvdTjmkC;%=IC4t%kvVBd2l-|r?# z`}EA`r*T8zq21T)|2BJeI#hMZl1%Cz1BM>;obPkQTQ$0;uKALaHNn6?!^i&}nT{@G z-wHng-(-M93^>M#U%u=L2#N|ZcT!IZ3QHcdYvABu%^v3D7YWVD5x}Al3XVmYSEC28 z5Hj)aIXyi@W%iFV(1kK3gN?=Khsv7nj1k$U&(y}-~nLG#b+e-g9D*43hv=ZWba;}vo2 z!!mr*7XDd@;~0K<*#3ut&64I-q0w2pNDjWO4DiO~RQ1fMv;x60E_a$D__yjc>I8^Y zO+3oe^tl}S}0GMrT45xxCfb{j7U2|Y`|C!K2tD<7w zVEOE~`Gtgvv3jM)mXrI-EiCHl*xG`XW|R&euU zU7lb~Vg9+K^S5y4zlw=)1M)I5Pl^lWA%~sMq)97OOa0Z=Do*lf^@GF73fI$5*%42t2<}>RsSln)e~PN{Vm0RNuZTTxdgIeHTHsO=p0N&Vck(11$7{1myKOwn_m<_D zYhZ$-s+#yf%+Ee|?Q(QxL5g~Kaf>oe8bF0w>0w706~#Q)>LwZp2OPKM77exFhN;7a zW=f!xHvD3YQNuO45PAIP6zA_}^GfS0nC6rSTqLfcGZLj*IBjJzo7D=I0C#dyEj24m zy`DpWGw}C3)8RrZW!%w1)X^tim>DCmp|{#=)%S3v*&lSZHd*QGhoqrK_mb>(Wf6R~ zLw3a}cb(jW4$rWp@$3}%ECYE~8e#&T7e)7uto9lH6bfpi`;f!Nt?yV$hhx=oeP=Up zsh`d_6Fh+ccp9_Nu13q6Y)kMAL-8EU*>Qy}_Mnm5FlZMGWa3(}p;UaH>VQWEjnhsF z19RH(`qCpB1KVge%Lhb2yW0tlW!>W3Z*$ShqfZ^cOj^%E$D%DwJ`Z zJswl9ncr46^CZwm;&)6}oJ41sww>uMiCmA?>CKv5pzpUCmv8&8=bs5cm4)d#4euva z=BSJhGJAZ+y%?g4<|?^}8`-PvuQD(McNf{D!uzs$V|6lDru%_4SV84@=)^R<@ z*v`EI$IM*9SKA1a^J?j7_nuGD)!WSnudjQfcQ|+9+z|mwqy9<#A{{;CoeKKBb%RVz zM6cu^Ut?|b_BH`HzMGY{XT+o|2Clf>?A!Dh0pzo6mlPurY?4dEjAZ24>mpJ?)U0jpp0^P~EVaoC?;j$n$A~XRg-_Dn zw!Oi~d7h|LikriEQ*6hY?f6`q*YQ)iG+DRVHm+lyUO)2&@e2pfFzlf*Yl@U6r-kf2 z2AyC2RC6LKwZZPUT61s;MT24jMo0)BIG_<1m)3RQ3d@c$70k2rzwE-omg>ZxKz+%2 zKnHr?-rexHBSJ9ftX&`nw@?Mc@r){&*tv#s*%OCC@wDMl;2;phAv-;M#{r-jXN_GA z05%@ge{akv17}!F?Kbe&s^{D(#iYk=5lFw<&D4s;yEJWmJ!Y3;@){b@7xnihk7(u% z|Jl7Vja|9pyi}gjIS;-{-~HdZi1Yk>f;X%chIQjo&CByR5okD69iVK08z^nkv%0o= zQl}Mi`uiQhDBjScE}*yfRcR^Dn3nBN^4q#8G6E}9n6Jm#nb^r`GL@LJ`}FG#Qz7i- zu`z~b`M_=?4{RBRE_D&H3`c}iFz?i#KkW53S=~LJqC0LYpl?Ig(3E_0Xmue)CKKon zV+g}>2^{5UcM}U}9;wb$8k7>~6dg@p6@RsSd?Ot-LWTtgEmn&-Bt^#<65fxMC-Y-= z7v!~}l~kr77GL}PhPY_uHX*Ru%>1^Pq`ea-pIB#AyFLyXPNF|E|A5GeW!ihlA`p$~ z-EC|VXQCk$**&9NYHa{9sG`6$kxNSp1XU7qXtEK!dVqAY3aH5++P@Gepoeg9@Px7- zmYUm-M@3;H!QmhkA;ViGFWQ4u1SgFi@?gU|`;YsT=kt?}YB^Xt5D${$KPff0Tai6B&B=@fO@V z<^*e1XK#7>8=8E0U7t|*D|t^hs<-q>xl@L!<`Mo@V{Ra<(XuxLav693yGjkmK=4hc zyGB5D)mMVa4~ya>7PlPntN6~PE1y4dFwkm<=r2rLzxCgVxhtARfNSwivUg|(<=tIs z>!+p5+n&eevKY6GRMsDFN;M78aL1b}jpTCf^_D%|| z-HjO0^nz6N>ED)+(m>ktDxNsaS5SzC(?uu&`Q|gS>Un~oj*TXWPv-aslGkk&CnblG zhb64Wq4`*o(LToM{X#v7xvg9WFT$S=FV@A{1|&rBm7W#*W~?oCgh4TXlAFEM%wfs% z#=d=0ctg`!q`t)Ey5FRVZcLd+&fxV3Gk$`f58N9556z$Qn`Rqae*Zy;Wit)ud$oUX z?BI?6w=5iAy1FL5rF~jRHDk!fi?eeCr<*H)UWxHqxau_>3PFulnb(T%w_JOIiMNWE zM12M;mgQdFO$XwqEpemk_|6;z+T-$t`b~j4O<(^~b^VgA{QBRUV&p1yU8tELYlTPf zc^B6y(c!#VyOw%&JcItj|2qy_dNoyH43WI>_0)sO;fCY07_3)igyfvbxN^cH4((^v zndpr?Us)%TE8msnC@i<;D>Ye<{HdU%cgnL0O|_yQT!VBP+w4F}km3G%0U2pK0%g>TSVRakEt=CbmP!TQJRx(8EVMce=|pzxwkxyg%Z<2~fhbYq%e=(8R&=A%EwXw!K} zM07Ok^lef)(^Zcj6O44{hA{-CXdD%;yuY|(s<=lJ)4t$z-eKXE{Yn*`Krrq6+=x2t zf8UxH6AkwrN@U5uUtI{X%BJU`-zsWN!fI0mP=8)sd72$K@b7o+P^x{*xb%rb8D8S} zR@^KB@ADUfH9;eiV@zBWAM)NDZ*J2`0xdzl*wRMfXz@9-6|(rZ_+TF{H@OG3YuOfz z`Ryq>MXb4$MaR?8!_t?Iw)vdBbPsa6`Ydn=ZBd_Ls~;*Wb0$z(g=gGuJ_xeRD}3TD zdFAY}KRAZ>KVIPLn+21GFa`cn_`~^+E=Jm;uL#fg24iS%AWK=KuWLQk=*yRu6ZY@< zW1Pt!Ci7RfAKSYUZ^P*?&7{e+{aiM_HD>61BUdFsqZ#R#FvdgG5SBmh2G7ErIF@BC zpphL#OK%- zkp%dW0{^$UcHX#-x~8jeZ`Kki+P?pZ2nqc zQAU)(JaUwER+)$jey%>*;@%Md%S5XEiYu;o0+}*Dnzp-7o1vRyTY)~E0Q1&w8ZX*3 zZj#a8h7=}f#OPM(=%}0CmkUS6mLekpEeGxMn(3v!SASlrlZ_}O{cJ@Y+9hGpFjdI$ zpJ>4kbd>Mpl5`YKzN-*|NT*+$i<_pi;x-HDDgKz9l$zlWTZL*weW}&WIRB?EP=EF& zq@a>p+Z7TyBH%!bL!LpK5JKw>RW!wp*q*xOvCR9iWm|aon9@?3_U%Wm!`G@qzW<#y z%3|qq4orPQRYFHyv;S5BF)%ZAywEuzA32f_5;bC3>ufvd9%6CBMH4}`kCVZ`l?HDS z87wopS%Uo_=}EJij(qre8~Bu z3lx8HDPZ?TrPBq3-lcj-ZD_KZR3pE%Q~2n#b*I`d*y{XO?Bi)(`T0`fd3e^QT%IBh z5`Pl=Nd14ODpXZWP|$C)V;a`k+J^Ierm&gl0}W^gkgEZDvZ&5#`KsusTeQM zc4_X#Dyy1XjH{;p2bT8_?;qDnS&A_W)%pHQstVie4`cGWJ;it+z(Nwgw+tx%>;F+b z6}c&vTriCKF?eCZN)3HdXbNvA0k$9W9$o6JB?(W1N!tocfJOY7?ZcZj}Ir z;0Z?@O7ovF$Nq^98Rd-cEmD&iIy-wJ4BtIKP|*^mDa6Os4z5MEQ)tWgL1u;3~g%YIR9IEekQpdCYrlv0DAvZ-)j@W zZ`(2U4kXCj?=VplAfuE)8NG|??}=RTMKbwj&h-CCy6UK?zAh>$-6i!gLacMTyRA&nsTjlXX#*5VJ=V!e0Yx##Y4_P+aw&$`p> zq~)e%IYv)ARFu|xM|*08o;{j}ZSA0Cw#vWyElu+1VJ7066$KaZEn-jVsmZ=x z+yC?TgXiDZZVR&V>HU~*cvq!&~_hcQ9fDOq)ArPgxn6UgXZn=_pw9gQi%a$^S5 zMxg+b&n(gb2c#6=eArNCLJMQAdd)%;zL7Zq^Z`Kf{R zN8V6CNUWwV{upp=r3z19yC-I&Shr# zFKJl$|4+D6!1NV2wLM`5u>a(H22^!(Eel-;hycUm;z;dCqLqb&IFMqqO8t4IE5Iy= zVQ)-heAURPM*L%Dm;>m-t$%#~8vV!jle=6?nT>0N_O*idddWqPDq1MoTlM7UY;(7H zcqgA{w`@~E8tWT5#`#hzD2e{mYg7->n*H|#M(<6E|30a>_BJP0y0#$hhkHf^rof6{ zxYBsj2>~8Dqp*he>^7qg^M3pssRI8kA)IVA6vD%bUv~{!^V=fm%`5-jMfbk=sD;-g z14-ugOTn!aP&lr!l)Nz=5>)Rk8jvQVk#=d9Y-yKqTxXuG`17wM1BV9g|I>Q8xXIk- z@TsS};6P>yYVesNp5uy#UL^m95k|Bx1`UMBP+wSQHt8upq0=f8q1cpyNbGw07OgSj z?N~R=hW+(Q4{*lgOk2Ty)1YOtU~p$@N>)YBKde?dNozHiS|kal?wxTG@Q>L&NF3~i zpNKq=14n#&PZ^MA9;4f6B|5F{Mcm*g2dD>Ai9%2{f|Lw3|3?>$we=jWN$%ThV^;#< zwAs=GwPA)5foDPt(;`(fH7_MCHmUV2$m1&;N@v~aN$M_$o^(#Jr`k@K+f7@yJTm5ufFHq+y%zLkHEQ1_pJnPY;0NGyT!rWS9rjDS z=TWp2WH0C2#>QgSW}IJDZ)Mg1;BX)jEY0s$UpRRQOKiwHz zop=_mrnaW&7z9d64q2A%TaKidfdqcs6B0@rt0^NZ4;ej9eV8>WEniZ6<_2G925uix1KWP&4W2wX zVK02au>@>EW|h#-%5rxyXx?slD#73Xd@5I4V&SFQ51zC-Q!J|F-i2oT+7l?AqxPH2 zv>?RX_^!gh^a_~x4|!w}kokw*2QQOj0A9j?BZK=7_FFV+>d3-CLL?^0_yvNyQ2aAW zfr_h|amzuNb!Dzlyty=NM-#;_5Qba^kUj7Uim;RQIDO$paWfY$rPM!B+-FSd zc*#uW1EFBqOC*%Q3g}OqB|}q&zlqURGJ0vroY#+OOfpmREnkd3BCWk5QCT2|5R4#2 z^vP+ws}fM7*w5QCA`Rkjfq@mNE_yD(`kwdqS;RzUd=PO|6 z^G@p@2k$>5_Knw3y)RNn6long$$|khrg@U^R|Y9L_wuoH5M)eNQ*C*oNu>V3|52KV zZZ*B#L~*^H7DU4bb(|pzsaIpH`l605wTOCebf>=P-~+T20@f9cKJz;aO?Pwdt%H7b zzNaF-ylFj~n3KhQan}~R`wyeRq%yRBzUPcJRaTmWMb>+h6Fu;BQCt57uDRMzG;+>5 zh}A^gJntf5ePaze5Ou4w#;kGNJPb`}@cu6`@CeBc+^>+fqxNzl!!k&|;+$x-!|mFDg_(SEhoJm_5F^2h(bC2KN2Fd9la|vuhqWpX zdFzdY9}^=r7-}8ICf)L#XDfsko|(2o!mIwmS@T=Czp;Glw}CB^r-QiDTe8}dAM-&! z4!-VsYoLu1F~fJksV=!7S^fF-4nJS~|~hzZ2NbaR{2^#+MH3yp<;7z5|q>FL3g2_>yL3 z|Cwl^i;$%O1%{Goe zw|jpvGCU(dax1?T?3Y*9b2AnE9`#G&>Su_ApCbF#g5ujm)mL#9Ury@gF0BKj(+9`{ z0v8=VAm@0hL`0~2g|FbWnXg@UxHX6M*6%94&0|YwTOD#Kj*EUJr%#sSHE3Da%Lo17 zV0O$HBI|mj=`IdEdHPq0&BUCNFoo%^1MT-Hf2%|p{M4gx_3Vrx>c4y#H{})+c=!zv z8uY`hJ`f(>^m^(zo+{;UhSmj$G2|=J0yKAQ660-sO9fV8d(L zoB2lU5r3Y}Q)lD??{vHfAAY8}G!jeV7qa6ZJczJa`GDf3feb4|lGtHrdztk<87XO8 zoBLsv^+eZcrnJ7?;Te?|BsCgt6nHQI3)fzeXv%x+j#uxEQtub@w$spa*t|HR!Q3*G za9@&)f>q|c4F4~O(`lqc5_%lIX^B=PJWAYW?JJ!w$}jJHqREHF<;mgi0r2@qz22t< zAR&}a@DY~{-YMV@YsC?|&OXL0U>r)E6F7Q<+ z9cNG{WBZGA7pB~TeHjMv4YB+G^Q^3&plAZ7W(=vCVO$*c-HnXH}Khpi+6WNpISRk#$V`(fo*n)rNpZ0qCy0 zY16tr!}k$F!0k?C^_dMT$zN z4eGrdkhx9wL=NjE?s{#nbQ=mdQDz-6_874#T&3(84~mgX<_7xR=G_?g*c~Eo_CtuVZiD=$mg0!1U{PCwH0StP%TB$0H zj0s~~1|(Bi^G|I2WYiia>E%IXq}eU~0dMm+ZgZX+bqf3O`56dSsHc_0trc5vjrkKk z4rNUmd_Ei^8Fh9rU0Lcr*!HBS9{D?2j+kgJlexd8i zz^y3HXHHWCI;j9^dH!^>rveJ!NHDsq^x`){0wFAZ@D-N489{Tq#qKE=dNvr#qSO8P zAnrHCY&OtgWJNN(Ie&1VF(kU34uNdoTyU|D_s4%MuuUV2dQQa0$eVRC#cSvW=Q#z$ zeZ(KfjLMbnnf~^@>Er16Ce`QsZT*|4=f4%duj6X>hYROk31gZYFynFK!N4-JtKyr0 zTjlRxy-=aHYTf&kwzvV!xgqQqqW98cq1*x}vtucAB6iE_`hJDu=8T`Bl(7}iBO@QUgNv)QfO9e`EvB|B#R;VsaX1u~c0`pbkHlMEEJ@5`f_gx_ zJV{=9q1G?hV}Y2{E^fV*|}~ zv03*em3iTA96z17XtF^O84j1r&hNLO;w!hN^?0A0FlErWZ*38T z&s7lV{TafOmWgLknsR9mpZUWV{wT|rb%F=QPep<)B^1qwB6?d+e8_V8NcH>Q;40|U zH4~sVFC_$S(tLTCD=n$ch%VD0wNO`J*UAa@Cn_yh*r{IL7M{xaDAIM5fSf_t&z-PheuRrJTmc(*&Yvfiy zYt3Tare6NkL_3q>02(`t&TJh&B6dEVMkaa7RP<=`9kzk&rS^XdIPZ#l5RnRS)&^R1M4Q83}X89ZMGBUgl+UGefMVM?P z`uY<17nemTIY|ChVCj1p{>oH=ML_g@JV7-KQZ2Zl#hB4itd5~sWhMHv+RP<`8CUJQ zJZ932Ck3Yd$%*`tvl-dAWG34NULuq|?+rq!8z_$pK`SVjz6=|El40*No?T0XLaS-Y0Bo@{5BW22#R(_{iHo9zz!@W8hcNI`C zgZV{@<2}z(Utl39Yh^|@q?E;U zbtj7q{3gGh#z*{Z!g0#NQywBfkz>{ z4sKbSvMNf#uhjTLQA@w0>=*nLeAXYOqy>}0B1W(09l&Rj7}5OVnc){oOK;YT4AlRci9i)eb4Q;C1d9mSJ` zWI*&NxE76?(!aP*I^-7Cx3zy3jCGMzbs;8^8J4ou*fYoo*Dpk*3XGD%lEKFcfd7r53n^Dob?ZB$4@8K;-R-c5`Q(CQh8B? zBrq7t3Su#lmzSAm;G)yu0h&G_Rg1{K>*+YGg~AC^Jas8n%P~Xx1Dl3vxbkkzpvsjv z2n1V%m?fU{WS#(?bY(=mMvw_Mqm`T*HshoqIudRJw528e7rLGAnM=9?G*yuFjpPkN zzD5^?W}`#WYRoj=Gf5J@`k@$Hd-v}I&H;)IWjxzdEE|t70~{K@I)I==FL~!z0XT>* zwhF7)C57n`85rTWnpU+h)Qy>km85?W>J5&)E|s3)N>xLUhnkYONDR4(TQ%6a+yA)} zg=WavM4DacF%I=J{F6;cZ01SR*kHs?Y8JvXoG|CUG(_uf#D`jZ+__rPEY=E(oX0=qpztkTIqmFEQ%DY(X8EvcUgfuYs3 zeJV+kH8hd@>VmI|{NOw4@6+{nV)?N_ZH074Hv_=lCs85PJij>b^mhg5w zcv%{yP=QfDw7E);?yi^?GW0{W9etO5x@ocK+Czuoj_mKC2);J|RAvJcS(7)0B&TJI z$@`CwySjB@p%ys_do0AU24i?kVsfr327~y?g`STbDIf-g{M)|EFcw=BGY#}=I<3A! zZ_EM4Qu#7E`@=p#f>sC2Q)lMd{`6A21D1N4x2cUpSI{DnV7wNzgWz{$X=ONt36 ze2vt1?UCQ&Df6yZk!TY%_l56D8NPMQ*r%l1B=4K8lHyxk-Wlci!mc!}Fa~l0(S1~a z(3+aa4JVD3Dc%1(il`=iQ1p z=oWfG#BZ>!tD*xp@vA%rzd6Y7V@0={w_p)0T%=b(doe*Eh+^Sqew2}U_hgf*78;$p zyD6>@2Sd=>o|Y8v4fet?xPDmzloD{r$GOdoYt4gDPtcP&dB*L$iuW*6&)Ni3?KZa} zkwE5h`q`Y*n2$K6*^ii#Ej9MX^%Kdi@6|esDz@r_DV9(A=je(I zseMTC9SEhA-pnPs1h4DobF8TW@l-w>=IVYe;de+DU0D7Pl#UwUkPrM2iwWG|d|K<^ z`$2X)%!EPZ==mVENE(yxK7%s(KvNi&kq$D&+-?Y6>;$%KZ?|~{0Syb_JK^;zW)iyJp;G4 zL1O`Q=(WU-j843Wh@Gq<189MDe=DIT_eK%8zSzlAEiS=M^JsceDivs=QOAQq(~?=G z1ehdBSMhrEcEp=NNg*awoGJObegd=fG1i*Dj9U`z&iikWc2b_o9R8AnG91S6F_n~@ zn)zHRn&T|PZXo|eZE`W6JIB%p9}1ZSr+}P6r70MulZrWZ2N&?!|31Iq)Wg>%cd8ey zbEpSTb?AJ|S0PIz(Fx*uc|mF%ljFkfXb84K99!?AKVaOI?sFQ-cCU0z)Q}H@UxVMh zZ6}l2V4Z;x(ByPbV0&-e)FMf-lD`duF*xwMNWWT3-CPI!rUsG)`G@ojZ)9%N^facfgRq!HV`d}I1bj2Nm zZVcR4R6--&mO)<u+THLn8b5$J(!Vy>7J(bQQ(Z zLTV=TTG|wKfe+fzXxsQO?+se~G_~ZKh_T{wO`s9)(Vga|j+rJpw-|qAxr{QT^^SX0 z(;tK0DkSz#aWFLP#Po&(=K>eW63C`?sD*dB4VbyD=8KG2X}k~Fxdingg%g>SOv7z8-qw5Xb-yFOnu2DFMeBS?cK#${liUKp6>&m?Qg@j0p4@sx8>;qDb zkwuO{bzEyH|M!fD6)RVO6BM)RY6jWxjacEHtMZ8A4)Si;yogy;1*9#Lnmc_DwflX$ z&hV{l0{Y9;gMlEI11;tMiorKhcvbIO*`t>h`jMjNLzd|uit*#hC%|v}sicjbwdn)S z>etix1rwD1hR^XFP7G#Jb@~&XjY@P?Kv_2fvSod_HD^RAt`_}^EN7irF8c0T7a=*O z9s3l1oZxHWMX5ptKv%nM7f>(mUlu&p?|f9d6)jWcy_@WX9b=nT3o$fSv3wi}`6UrD z^47v~qQ;yGVMDIDsP-&kXQm{HH2SMHb8$S7bpQ&blp8OB!dr8TetU*Q7D$U|qQkme z?s4&A?oYBl`4`Dl`4YD4;lKeWgilakOJn34u=yS5jC*4V*IeXHf1+l5qOEQt>}qvz zVAy{~%GGi80w%>La;i~CU)q<;&Bi)lG-uFAG<{tZQNtGey(5Jr_^*y~?`8P;cQIDY zrs27^B>Y~2be_&+d&3TL8bl=a<21kdPkA#&_1jAa1X8aDau63*>)h-ePbj>KvJQ94 z{@Z%jEp%#D`q5&szF5LU#k#kD$SM1blPjcR3d_6om@*0B@LxDLHdI)cNm1DkOvbx3 zIC5zk=Y|tmQN0EvnbY}2nGa?Z4Jt{V zO&?!P?LV2X(9Kq3RGN@rcdqWsg6fusUOVy#ibMjNjp|*D`*K+pnZT?(r$Iu$dix5+yZ;hDy&{LHh%Mc2r) zaxqMk{2MQ<+nQlf>K=y!Z|w zj5U;i%baR4uFe_AN}az`pqHD{;G^c;e9H=gUsYaE_mw~Rtvr+ zy?|@Gb~J9p`ka~GPzR8WidyK;cjkJ_6Ls$AOzI<-d(6CwS;I|A*wmSjymEY;MFLo3 zb+c%op6#u&7_8`!c^fyjkdoGYsIjUdx$Sj-KxC44gTLkOQ`oV?BB47{s&~C(QCc?p zUn2DVWjtDWKtJOF4ZI(cAnP@vr2&f$QbykNc>q!7YW`}T@X2DS$Uyqm`>xM*GvQdYmCV^c#T5$bgDOBDZr9I)lYiOs>MLSBn{eK%ABz4o|%Y=dUa@?1zVO1s8ZhZ&is59Vw_@vAcAjMNEk{>`i-Lr z;l|Yx!ENahcNLKBo`cn(?*_|iNz*SQ(_433SGoNn{aJaHq^f_X zvFc+Q?>}run9G~fEZetO{|wa;ctzzfvu^qVJPfWc<^y$7ii^61Ct3v3RiB)als~|v zRSad^8K}W_136TX67ebFpx9Atxc8REN=(-D{>AgBgXt-qaEPR_^_|1x&WWzk?a#vx z*w8P)nijk5A+nD9*~sA3nZ%+z$_IK*JnX_OL5XGAgJ~l> zhxjV6Ax-)LD_teGw3NURF9>4jZ%z0wEq1~B){X+_*D0y-8!oVQF$mivRw*(de*VFfh5hag~d%Zz7>*3&j-Hs#`fA$t2x>EDaG8T5v|1@q}Aobo(}n)B(pRxD!9-Njf>$eZEw; zF0Rm0LZ>eI)gm{rz$YplX2^TbXFsLp8e#5lPp`&^)f{4Op!b{Xn}&eLu*TccBoPi& zNj62l*4G|&nA{wfAI?<>tq1Ny+#g%Dj-m4n?a_QA-^1%gNe*GT&H((Cm4c*X3*%yFhk)vfF}N|fa~~#7R`9U zi|9z7x0lO=e19Iye zm*@Eq$b7;<2;Rm;Fuu5JJ}~Qs9~fx8wW@62f+fRE!~CFzteWM2gUD3(gTz+PC$=+v zIX*N;Gc<`BaPPINst*GHLjrqj*@=uN-TWxBzQQp3iWaxz@^wgZXHg?s_O~9AWQ=V; z>n(sHi+{H$3NDhOD?@WDtJ_TC&HKlm!u?of?r}sGu6&}*+2lT=0TLYB7fI9d!s|ip zVDAj3eF&uSi{1+^{GV)NO5v67o_zV-xR~ZbdFSnCexE0W1*-&P&`d!?(3~^EBc*i4 ziXRVf@C4}onURvRM9^yx&q1UH8y1r+=H2p%!a=6Kn4E^Y!j$o$=dtE=pyvX)j*z{H`L%%m9qHwB^ni& z+tmq`hOzHQLI6pgCK1FKn$yjV^lofP8)ls3n#Jx+B3!1kMt3k)a?KbJSNT3;R~4ds zJUW}h-Dy$l3BTKr_#-nb7*r~COlm`$C8(u`qtJv61WMsts7-s3+;+t8vtK4kkQx%F zSm-WlMmCh7WIQP1r~>3Eg-Nh#g8%&531v%3^U)9f-9R;6j~5S;(9-JIx4UUqtu%ve z#Q%pmZ*SO8zp_i-_m#uBE$vhLoxbjDzj zO7l`5&)UHFcYl-M0TJv^pK);r98sH@!DQCx4b_Tf&Wgk$vO$&AYUrE9Ib&w*TurSi zU~QG8&4luEx0f><&VV!_7oV{NzATp67f3p4NaMhJ3Va!wapiO} zEi0}I`q3Z4*^hPjS=hIc{}oEl^xMo#md5zsO70gfm;rh$Q74#waeH4@9z`ozbn~>m zHHO~(Tv0H@DRaX6j-0>V%*tXMr1JKy=SLq$jjve%t^ zzQc!d8Z5hkGD9h-hMx%x`=UM$s}3GCaZ)era*ePN*p#S5nv9xhnXyBmhvN8BxQ7j~ z{FA05TQy*ll6e#&YN0;40Yq}DF-%Ho=aSn<#%*mM_4_fQ(C63UgXrmtZ2vgqcW4os zA_1AVc$|RH%&mY_g0Z`vFt6&z^?julk~IQpk>Ds*`7p0+X?1+**5PJOlnYaV3!pq? zJ=4f=X|*VsFRr-N$8K*%R0{^m5;A2AR z9rgN2v?FgAX-FmmKQomlX2_m1FIn~T&EqWJFflM&451{S*n8%W9UDA$M`sFS{dK>i z%4_p?K&C^Y&aXya$nrNzk*IB%mn7SdK2G2Az@A06JMyQ0H0pAl1UNRN22$R$J8P(o z%j{TkqHo}J;N&Z4aedAW)(uyr(@EjbJ*<}&5t%XfcQy-?JRw$Fv2rwUi7q&vaY8S{ zqdQ}i!Wm6VH*)ml9%5dQ2)Ws<>9#%%f$ue&UavmJA(Z)arz3AUwZm{i5##n$s(6w) zEbDx%oD*+qo#OeWR=lLVS1}0U6v7`Tjwpv;a?!tG!Wf8HEUL|j#-byRxDut|Sr*MS zoId3&)@663lTrGX23)3U0J}S(YEE2&<@fq$4}ZJQy>?vJrFssg#)d1>54NR{d$iSj z^8N01A(c>(ZHW{cKtMzs37yXfI^=Y75RK&e11cZ`>bTJ&Zb{1Maq3EUTt2_EHkSi& zzSkN_u${qM)#T{{mJdM1wx{nZ$Y~+y%IJ@V&tdHw@UU~5c@gbi$~tiet=>?yc6Z1Z z11-HAw~to(%8bbV>R((keA>a{HW?I(c+yKw982l-(Bh8v&E`glc^7pguvS@blw6_E zCAINfX1(>;N!;*_S1PK{({{d}W-YqlRW{M2Mo$^KzielvUw0qXI`(rQP``#y1~9)X z24!dE`sk+atY4dK=u0$O`18ZQ;e-_Vz-qf4<~b|gPJNiDZrX>vCPAUX{+q;IV(6?c zf_M9>m4oWdO8Bt|6{Uxd6mzmUaO%6~3G>$Y?$}zh(rb0ms5G)#C+?X9&peBQ9Vk+w*)jJB~ z`*~eG8R5S8UZcN_k^R)=W}WDeb!*gDS^a z%%+o<4CPGOjCL=Rg_40UyjSw!8d{ zWeG0NF2{-N?p#Ci^@9$UlU=pV#(FT)BFH)!MNuXzT6BNKta$`Q!CH=+bb{qsoUKUPehl`XtTnz{UxSi^Ee5JzfcE=oK&-T@Tg@R}d2YOnm{%;%{36MT4q zxs&nZh2?TN9>3$(1uTK0-*`A6L#D8^IDP6P`wl)+fE4tyiJ#UgFErX=8_N6RBAiji z*HZhfH>=aAg~lr7niXdp2OzKc^@2cac+~Gsor@u$`%w=jdcC>vq{E~l!%m{+ z=IO{N-?0qEzJBA{a$-V2<4t7es*)-W#41q77$`C?rpkj&ue!gj0}M4c0Z<%A1#l)? z4`)4i%n03HWJVOiVcHYPn1cUNvBXd?``chWyC7HP+3|%#1A=l4P zQG&|eFn;(eo2fX`nas^0o^1zEvzyaaONX_f?l6Q3?$!bN0YG0k>5k7T-C~hdD z6H&HmVAt!GNjc<&Ut!$>4dGX;SRUq6STu+Fv#Wqgz(#eH3)_&g{cyrjDW!jwL24U^ z=;>wZ5G%b<^&+`?@@_||BGQU7TY1m(N(8jhl~JK@W0m-E@uVFby_3d(2~X)NN)Ljo zCy@v$5G_0vvP6_%`6b_=bZ-XvSe-j_=5q9l)+VjuT9`9DpD=jP;jvkFUw<|rirec= zWm2UYXVXe3hbdAK{VCei0#~%;yRzSECdppuR@|tSeCGT{FRd$)d=C*e2jrWNlZC=) ze4EU2Mj4O4rbje*6TvCOCVR17MVo1cp!gX_qP0RCy7>3E33Rfhfeh5~hF9b2fTV8_ zio1eRcZ5U3aXnbg$o9#er*lPFpSlU$yGi$Ptc3c=kWe*aVm=?W}(9vWcO` zwuwS&KGN^vPG%Ku8Q9SgAu=rhc$ZCr=!$+;S#@+gWNgqmlhUBqvNi%8LtBLCFjrIV zK8=$fH#I_nC1HFDFwbU0JyaTf`;`Jrli0xC-s|)0mvwS~I8RjpeY$pjQ4@e& ztpajdalw-rN7jh*b0xF^<}Y(A!Q)_ghkktO%#t3PjgAYe!A;H?IzYr;*W5CGU<~$3 zEL~YdV5Z5|qgUEvPv(AFU%*59+Vx-6@H4SDXRjnt2q3aP6@WmU%H!T0$3^9aA$v1z z;OBUdPfp@frhOrLH?HGnMwxni5Ov1VdSba8y5RC~{jY{2L|A=7g3BXS>ca(b>5j7{ z?eDv}FVVmKXMDTUm@w}J>gcVSKlcUh#kJK7?#M(N5xp&> zn@iK=A8YsWiNxM{?QHNf_I?js0Ohw}dh`U1BSpM|H)vnt?47{5ur#aaCUKX|rCUi1 z#9fOMFzdmo@!W#!T;g7$naTz|Nt^L=iau4Bjmcp6t!vWR6Fw$!dsO0qfjgiLBR##= z;htNRkmobT$)|jrMd?;iFN5KNgtT~9IoE-NablHYx(b`uY*G5whHaoG8IcllwBLXd z(H84qkf%ZGu4y=WQmHHz>&Bz#{B2`pxlwGY^qXFKY)gyCJI|e8$GB;9So8sfKWrN| z=CG{WkIw2DIcyEoLmNQz}xKZ05 zsVpSAOmp(CRm4exIp>@jD(*R{D_d1D)S?*khF(@ap5~d8i)ais>8~Kb zml5oGhmOn9*DaVR+m{6?eS4K@R4zEZiuf}BLX~}>4@UDgMn?56;%k9Bf&`TG*7?e!(nNiR3oFPIIe!V?4;T?n?Wyac`mADZs>ioN2GP zgR1ngpWvMO6P2WlEY8Rt83%`15taKfvonJa-xBoY+n}B@J`9INt_zP@W6@RtMzno| zuT$wM5)@sZAJD9HTcL+a_Xz6}V1JH1-zqZrqsZt{@=2YS3~z_mgax3%+PL=Rb(Q{Q z$Ma3ucFFX?PlJxbHtOh4-e|F@;l9PhqiGHc5taP3=t7%!{%yvqwjL~xU#!O8J4k8iqs3dilQ><#!(O# z0YNak_(z6fL`WIY{7nCPxmw2>BC%NCeF_w_J15OBfa$tbwHt|oJ0e=T;>`;q>^ z(sgVhiyDDwuFkqZv%weKlA&8AVWY>Tt)y7wrdra<%-Zu0kI;xrcqvf)^TZw*bD)o9 zT1$^bK1a&ki0~CKhivoM3;*fMlEKb|s6izdYJ7E&To5a-oZ%Z_8wQ(OqW(Y1eK4?Ee<4 zQpUXKTyCz~2t9$7(u`_(%r65I!$=gIFvEUSi2YJ}vsleAB~58;YAT^+c_Eu)i?bS7$tmgyb}HCbiK(-r&6UNL^E% ze>}XE;FnTyOuorgwm_NfTf6|l$I;qwL$e|poK=+8v_lSp5#|`2Mp)rzU!q_ zG(LnnXAdf(hMyNsKaA7rui5Ecw67R{VUmjmr;qw!(`)V-Y%4rMciz||VWW(S(+Fk2 z%m=8fRl-O5u{45IDf66U@A+m*kYOB=figLlPInzDz-6RXt<|9YH|MqslZlb_7tUSZ z!-aG{`FW?(P6zzpzkC=bpX?hFosKs*Ze=T>{cexPVxn;+A2I^5c9ueCy7+$pL884j z#qF&WhoUS#?+YOhb;NeUr$LTvtACf6<|9A{pQB z1*Kz7R0MO(DRuq*;`Sr;=F}$(XlM5iuJ+QdviWzhY>JIRu6W%T@4Gj-g|xG!1Z!6p z-2)sH%^zt+eX~i3a-?1$QFofV9M%`$`u=Q&F=C3)TdPUys)K?D*x*nCLDzHg-I?32h2yurts5bc}_LI68AwWq)d<&$@%%-jRG) zW-{Z7_}z<14t(iC7v?fws=AIyqSFV>GsJVY_YJFcz10MK`e^aT@ND3`Q)-pDkrE@J zjD<=JAM1cJ0|$+5!y(z!NgyO*5=e0$_ksW>v;Fyjn?Dg?W$9q=wwvf#44@MH{UnJ| zPK%0dq0`xn?o^*PSgtVmId_SDIXHb%?I0Xi3FfiZN zi8$1I@KPkl8Nq4N3|Tqf8opDAS`jNt>t)lpUo_ue2%<Ow!Ty*fmGQY3)(KMD+wqGt4;BbrFn?^gKQOi!-8-N71^% z@%>Fl?JDf)zrzFDBW0XjCzh^&hU-E7T zEk&awy5eDeOz2^(eKIL$DHOPwxIT&G0aFRmjOT+AT(%9lTZ^lr>lNr-Sgi{_cws?3y<1wM&}aJYc$hi@b2(!{d_>4J|^)AW%*v>sK5 za;07}o=EG!={(65M)wI#Tpn#$j>150FP=^ljn7xpddPh&eDoT}dSSoIUemk-MxiiT z>jF1M;5&pckU`v={I)9}!t4E5u8y&(4cbkXN~||*D3;llR6r?P&(TEa1Yit4c$7LA zwgZ0d(;#P;WHt*^pek&N2Mzbzp>K2%6$%$%znzom{i$zgBF2bgvime1k`<=}P@KyV zinaYbVN7qtYj$hHZNo#Ob{W?cqAOi)AL#-Z@Bg&R$RU8*R{X9{P7;M!)9gwJO6uB* z!_)lqnn#7!t}Q|@%@UP}jWc!)RQCHI=}JxbvHoLcRybaYEOtbjiOL|WIKc0oxB2ys zbkJFU>V}M??jU$_;w>4Z$Ue#3$BCHhNkAn2e=tWFr-yR#>Xch9l&p%jD6FCT&9Z+~ z0i)`07k%U#Ts3tbRD1Ya%HaFxN{#3$ZBmN(*6Dt;GK^wf`_f1gu?t87_pq~bxo;1j zVnW8tm{TAsBb$HBEHoB^Ge^3>#Zy(&o>|5fNQ_Sc}v7Z5bP36xmzN$2oAr|Wa542PN{3kW7qD-Jfg`C_Zif3}(TFv{ zO5C6kf1N!@&k4o7kE(Q8PxHx);fm!u$(1y>-QZL+8sh?B05vdZHuIK6kfwwdp zpgy|(YC@~&g(`WNd|(lJjd-T580KPLWs+|*@E34<>? z&4g)n*R5LPIis@BHw2n)8jBz;-xj40{B`nE;Be7s%>xRFAAU6nf$Y47fof*@b9xl` z=pyKC0y96kZ26*2ku0SpJIE)l8+yb)=)6^886Pb35Zz-@ zX#a~RuZ@_1BA%Tgc+T7ZB1$+tYzYg3HfoAH3Xgis2T&nX1JuOtF?<>^;ucb}L3@Yr zD~^3;jLT~V9R70sHgmA}##g>HRQa&%15REjxBS@W$P2aPZ$p8crvSP?K1yU@zgNTk zEUXLzE<$APvrKuBYq_PuYs2)U~L|9bk$s3^PdZMwT-h@rbFy8^=@?o{0g)V$5Cs)Ml*j*#zxBR+g0)z4&Y8W>*>&w}1Dns} z+A!cBZxTuqp{z^4a{3QT$X6$}yG8+j(`|%h_Sys98EhtgoN@)obH_Fu2s)EGGLMb) zg4gylK#~{Eb+O+Qa~PfY{ppOBhv*%YU@0E^Hiq-3OrniW6r%6CkvYEhf{(NiVI4ft zEA5kGM#>grrf?I^%8o77rYtJjG{#fSYAc&QHVW>Xfw7xM?EH?lA|5S5`d!6|o@f+~ z_^47|uyJbp*K*hA6wc9Uv36CHk|GVi=%-D!jJ|XTFHmY8T9v?UWQ-R2=}F9`$56(g znbFkSYq)4TMXgH8k0Itn>^ZJ|_1%=~@FJzJSZuD=G<0rNzFWEl{GCo#&B}Q7_Icjp zq}e4Hped2e^(!Elz^tJ2qrb{)cXC&ja%_DeA+spW{UAxuyIMpJ&w|Kk|7hC!#hyd z8q5+iaHSe>ooj&6`3LLvY$wydv06(k#iXWL^V`48 z^Jw%JEQ)Hi1IbAfko{1`zSopfW(tok zV`0AxG+TeE_(LX_5(ZKF(%?KVXaiGj|FM|7lfERlaePV0)Ix!L&rXd~}FHdrGs#Phu|I>gUW0Wv#|zR~Z|VK=J`JJ7rx@iG<)z{0NCi zCEa^f>W^}Leo`|ro1dtzjPmx&FU64GAlANge9@wh)+N z|D2tuIb<=2PXZgCU8=zV(zWIj9@5 z8yS(pG|wpMsq{(H_pQvK%FG9MvA*o37pBa|5Iu3`XXl6rTId&$U54>V9m10=UDf_OUS|Ry;CaI8$eAEtykyot92|-8`9OH4njSK2qCXO z@c&4yj56^F{BCqa)s#*blEK(v(M-5&FYKbR4-$$!&Nf_Af8#|kQ3B`v276)UWV@^T z>-njHRVLLcHq9&_#_E4IkBv#dp+6}gr`pb~T4NW~|B ztYS;4Eu&l|25dkZnZ+XSw{U3h)M|#%Q0#zeEDBOjO}LZKW;f_={9UW%T9B}itThF< zPiKHK0bvjIB*N|Y9fanLB=`Cb!qXXGHv48@KJhi5UFGB^)f2Ecpt`{^NX^H_M(7pFQ_ zL|Hf;6qMQb6tzlJgSv8x9_7HlK$;^8CHlXfxqeOSIDh%)$)osc((EMe_r=-BRn2K| ziR?{MIV+o(b}Qyvetj7uob%P0lwir_)y~QgWWtO@TW2tatP<70r@Fo|tk{X68Gy3M zgadvlN<|mEvX6;$2|wIzB#VHck zn#~-`wVC2tG0YBjTg{yr6hBCrsK8sv%GjQU4GK>EIC=5Yu}tMhMfd)H&(P&N?9wmDUc*}z}@}<+J*QIr^oFN5mV-D;G1v{~`#4JZV+I-(K7TqOaXiwDFfdZGe zWElip@|>n5F*c|-T)iF{S$TP@iJg+j0aglmiIwf75;r{82>qS5ep1JuIWgBrn_R!d zK$}F3ZNAs(V~bBK_Htydky2o(hge`r@|FjH?~rJc5J7??MF!beF@(+5IHNNPvE-ik zWJP`Zm~i+m*<_?cLAC0~MMXjhwOJ7a!}HL~-(q7rk5TPTZ-&-#m~cHu^+?(&Dl+f! zr%Wy)VAgeSF%@QUQRO-O8?G0MGXV@!=GnVu0!sAL*unNL9% zSjPfGqi3hUQUxY68K9yY>Qqe^#{0BlMzYPHZ19iQns}@@!e-qmZdZCe>|v)C&b*$K z(;T2-R$Zr(UQN=I-j;+F!ysM;}uD0{KnwvGv*aUAo*7zKkQXW1_m$fQ3 z2_4h4n&3CisKGZ9_9((JEO14eddMdq|OIgltjpC$r$(mR1|%h}Dv+(eM8h2F%a z*O##(Vy1>*U^^t5AY-`#|Ck4~z;+JYf>L)L+@@8X-JFhHVw*IZ=$KY>aib&I51P*% zRb&veMtC3Q{L{?-QaNj^fgQC)#o6Biv#keb)|;7tsOBRXsB9X_{H~R;Qp+wY;rpEC zuKz7BIuf$m091pCNEB+&TbR0_oj<0eaYUNLP`#zXlbZt18{Rh~MxDW2ZWCVtYuSkI z(`C1((dAQ~I8ao=dnfxe_0Wy}A}tNireQYUE!S%({+y$_GP+xTvgxbna=J@^O7^i( zNxm;GeT=fv_2JM{b25CD8o9C|Hf>C&Vc(lO2+>F0Xf1K8*yp1b&Z%}+mi*uDAi%t- zs$KI(ymW*=f_cAAXMK1Mq_~pUZ9Qu^6KnL;$jISz`+E{EifT-l1n88{Pq#9tI|$j# z7$FV{GZVJ*)i3-&o35n&p56I0zwL22ZM7%PZ1|voOcvxXe=aiXmWsA;RK{im_i)Jm ztPMZC$;v$ZrGB;*nU$5ZXTDE8NaKbJnL}0BMO)efU3TFu+lftSYUO5f? zESCM7<2vUmu@#zMQsD()7<^Et5eBgH|50?-$Ck#eW!k?8Yv0h_-%k`K*0e^6hqrqD zGGwnAC8oruxP`4*K1ZlXTVVjA+Mo+|f0h4Jx@kdI9gEaHm$)Z>_I_wOL#b-^c}LJ6%GmKeMa(`h@jstV)A^?d%h`t!HOzj4qaWI*L7t!T++kQxjl;c%58doW zX<5A(8xLDXL^)f1mrRm$T89x$fl1qG_1<2Kv zlZRqYGbym!!4u|U3(UHVbltp!uz7vvvq|*~RV0Y(h(#MbM3|@BpG^7c_p^DhpAQ7E zL`QcIV|@2LF#JJ1-ayImDX;Tq=YlEMJ0eIle$zXMRAc+{`O@iwm&ok$i*-^zI9XZ( zOZBIm4ba*|FHPi7w#Exl-a+Duu&jc!7q)x?=BR}NXz|L(Rm~8s*J00C z>2-w)p@&W|FmLdTp!pRojOTdE6|7B03!Vo4$y#aM2J=?{8Li{78WgAVSV_$F_EovN z5FsS*L=90#`HSU#Q+cK`@JRNc$nRz7c;lOg=5t~-3C3f8B5B4cPMn;qhO8|zj&2;U z>KW4#v2kO^P3#Do*S|sWOTCgjPoV)Nd$WhsQgS(2iL6e`Vf8L z@Ja0H?lZS~PV?4bEV-}K9j5--b*z|mW)1}I5A{5B<~Z-kO_oPDZnZO(f@Fg*@NLQF zf-BcZK9pEx8#~i6@|og=9)aWXgh1Zb?yIXLbsgNVPtZiiYYqCoi4Q+|qE#A)Hs8Y=h0%9T$BQ(HH2M+`x6vBqjR z{`3HK@3bYgy4@S$56@#R$Ff zcFoPRx>Nx6k;5VvI%>G4_MHYCIF%KF}ShP|6YxuM2}>ppc}I9LXJZ zsU*a3_75T{kZh%)DtY#^OIqyYN)Cp_YyoVE15)--#0E6oz5KC^lg$;fai~kw<~Ji$ zFTh5sB`<4y6S7ILWi(}3@^0?6AG=Qn*2JBUGwDDn;=Pg+3;5QN$j_Q@|2A=3kv#75 zMZs0x0JTKK0T5$eVw!t;tLmk-VUdv|^7k<$fs!UKMMhO3BLYi6o7z?x9J$1lN=O@m(4Pj>iUWAwl$@=Z5QLX3MTO0;=Ie4wlsV4Mf zU*!S}LzlSJo<1w)G5Kv|3Vm9%V+mC-$jH2$&;caY2Gr7*)7wlyi8-~n9k5PTiF3istn#eA)Dlcx)03-XW; zsiN}rgis{{*}p^mBz%gdwjo4mRjaOsv>uz@J`Tk{G6iUN31DbQk%?eVif#>g(F=p{)Hi-Q9y`?) zTxj^u9o~JSnXxseo+`>Bto-^*GgMaMNValt6m1AlbVap*YqUQmmcmKo+GZY9l+Kyv zr1Qo>t@6Hh7PpE5`Y~1ejdCgmMECK94#4JpSkgXMeHaz>(v-oob_I-qFNBv|HHO3Y z0h(3(s7j4XWE0#;tx;#_)-L9zBp<^k)&(hNQV$EDmRsuawFzRu`_mhpjo0y1ktep? z*3Fk8NWrFQ_$8PktL|?TD=%NVy%3?jvF_6X+LJ?TP+!kA%}VB2dbi3X|}k{=8tMxy44)|As=W#>3&$fYfT}q97JKlgB$-Gokg_e7W5;=7R!$taRtkP6oe>eN(2Z{{JY);U7{>^AHvEk{; z$ht&mvB*6)tCDGVEN2A1d&^Dbp6Uwl?0T@s$?kGbJA;UeN<;dkaMK|_UL2IyvdO?p zx9KcNb4}lXKdKnj-;@V~1BJfxkbbn`GaAzW{G}24c;8AutUt?afr6HG(8g-vg3Bu_ zR|_?BNWL68N@oNI`M&!lgyzb&Akl0qw+%mHf0BxuGMHQ`9PqnJpI#P*s{5qGBg`CK+(6t7#%A zQH4>}N?{)*zQ?-^K#n7F2Od&HUWB<^x7O+?S4CLqk)LZ92uM7@Ja4ams|i3(xIX-q z^9|T!4m$m$TTq?x*!(9=Fwc&LsT|bop;6;2cr^34m8*F*UG3Z0*I(-t&c8&?=L;+z z(SMDVL!M+hGHOh*`&$#Wj;qX}WGoS(;FLVoYQnsex8oaEu3H;EQaVYC)2%7gbnlX; zwCS=`ADrqn3lSAuVN(PNe8IYZX_eGqW0i86tG5#Tf*^Mf768aW$rLQ4@IzB>WUgTu zp8d?y(Q3hFv^st6Sp-=LcUvUq0DXJEo?T$lq8j6XgX&of-!m*~=)BE!jf@ckjr8i3 zzdkLmRBNt>t@JWJl$R5p{6qweftPP7_@jbQxxiBLn}ZH6TfQaswyMPI?#8EISEDT)zlu>R5kbu~XKr&Sn16j%r3;y7^TMl0aZ~TpCjHtV7SeVg?_G<|5cNPc4n*&=pgJ^ z!lqE%oNSt&zkD@6csAIPgw?g-BVjbX&nq+FUZ*W}*%#05tA05k;>B3oc~AHuQ<{3sV8B*q@-0 zkBiIZ4&-YGOupxI1(V1j*|0=zGPN*PCzzkMz5C#fMlTMS@*8xiuyq9+(9`o(^M%SF z(yP_JWptm061LS-^S2X7K9%b`&c!bdbB=#9=iYPHrIO})M1OtP(f$-tEh zOVqOZccbYy(RP-JrEwY9bRkp?@kqC@*(w~h#gV&2>qbQuFmK?-;JLbzP}32Gqgtj* zRHD6-i}YT*d~CwJ#@?3^WMYAN6*0aDEhm6y%qS8eoYSWj-oX$VXw16mPD=#l=PHIA08xyTL8LtvhlBqfA+S|JrI$U!_EA&{SXXh>&7@AKc$kGfp_rp;bYg316| z#PRK8=gn@8c>2)!m|AbjO$bgvI}UPYYKF*ok^8a4?sdiU%&MSvq6s{4;Ru6ws%uG% zus$6=U4dL1YJtP~r-!(V5)6&Doy)^`v)A6z@HEk;DIk4ugAnx$x=tV6K3LomDI6(i ze?p31lsdwJV6_v>+f1oB4tQzcZzWENe_?>CI28noPp5D{2(I0Vp}IL5NcH{L9kD~= zETNLU_*)eHhQ?n5LLEZVCYozo>hox@*^VPoxQS;q34XAakXRZ|9Q?Am_&iI=Kwu(y z(b5#`S?`7ak66p4(f379AfKHb2ra$j^M5Q3buYns>qfNda>Q_ z%7Vf#FdVs$f~QEqo!S<2oB@0{%QZf-=)@6K4_AO(>x(Ex_O$fJU{PZQ7!m&=ZEuwl z%E2K=K(x3eHcKqHSuhpgl+!?db3?MDp#0?yY6o>=!ZiK6@?=O2@!4er*LBE47p8B}(lqXUr4zxl4 zNZuhpEBZ~Q{LfUomQ|lP42sjmZ z5>Tq#IEm+zI5?$VS+?)Kz=#z8ZLUpJgmhnHBul)oyHr4S)91=O|8D`}9 zqKKv?=?6=T*#HQyc<&#|=|KyU6xop}d8X}GoLZw<7$W(W=JxD!lbtS&uxjDZx^@Gb zgHA3mvV5o1V(U?@?Al zxmN}}KX6qwBHMU?M@(zIz%119b$7v;*h~-)g6(E6@C$ck3hajZf z5n{#fS^@=gTmU00+Ppq8>sv)I*^mr$*00&?!#aaWm@K8;TpZ^gR5*N8C8Q4Sq9BjK z2P$|UIWz9WKE@_OwP@p`d6X2g7%SabzzpW1dl3gW3FK!_sNuU==A^e0fl}|&EhKY- z-C5$4rw%j)E>38!VoKTB={SJoRVBHiwbb2=dEV?0+jQ7#f!i36?^m~(uC_{B(cNd9 z7skh(4RRX{@lYUM)59xYPME-7LUY$Ko(BE(z%hADCE^>&KS&(GqQguN^A|g{fKVrI z=PXFv97$&Rl}gK7WQf(G>}3|NGe8oE=A4UYhyEJkti49nzX@<+88dL>ZKn_8^=BaP z=%4r6|KNimw+ZR}WKb{Cw`q31WvG%43TSB^&m~o=`aM&u z3d5XM?t3PQRd26ir8cs>S-$z=#eX`uU*VwLKj*G~J!Dw%h&F!{j_TblCR@HN@O6D&T_U)!YgZLUHE< z@8v!bu+uV7vI@C(>|hDFgh?&}I!9)B8MctxTSx|rYz}6@Lb8*s#A)vLuUveuqz_y1Y>aAbIna!_&2a?6|^QPFi zR~>6|CTG{dt{q)l=b=oWfZ&v$Gs-FuFsFc;K{#W_%zV(2L9x=j-#qrviin0N9x|1S znIoa7@AP6{BvUg$oOle3xf9Hk8U>d61Ao}~-sQwh%{A6UZy1Y+Cv9~C^rz~J`Id9O z`4=miTIt;HQs1U}piPQ6O*3eFVC!pRUVNO%7%~z(>0`xnOi8vtm8f&fvlmpUJ6}pK zq$@}xp0uyDND#z*zy)i{Im@raoaxR%3!F0|4zzoU2C0KF21aueH@^AY$@sI4pt2tf zo|pj%^MQ8^hjk2Dcyo5-`rBDepaT&>0mp- zW~gGI4vgK_U#bT+Za;OOlipm?=q1^d8E7uX|CBeoVyrdntJl@c1TRNx_5SU1+-;7P z`3}e-T&F+Z6RP;0i>A`4w*t0Kuy@tzBVI4`&vg)6=gGKZ80q@T`E*}JlF)re$aZ9p z!WDn~a2q2^>8R>}15uE>|7E}+=n2DkeI)hdtUS(V36oYs?F_=Eq(l(84sD2E{sSiP z^a@6?doSv8AhuI7o$Jq?=B%_Kcv?<;*g$DfP2j)cdV=#ce!G7_@Y?x{O@F$?Wjmf9 zWzkN{far5nc52oJPwdt}HAl|^Eux^(u>}@_poH*_j)|=#e{*?p)8h$Ido!FM;o})uNmHsbR%8%hE0qM5JBT&7^@%SW?dhW{4mt#Aj5!|BU>&0ERhkU%4b<&^zbB376@Qp%Y>tlVY5UkOr~>8jc+d$KJWvx!yCC zU|Z>>t$F?$TPxBr<23dbBCP>8)#!nC*m^u_mCQVml`-gSTL ztBO89@1$VB)i3NE_v~Y)((?as@#IF)UTAx%h$eeLyRbJ)VCd#7y*w9i^p(4U25vq- zp~^o+hqxet>}RdmCEne;S4uIw@(~4F)Z=A6)|ANKtgDNtEQpx4m3B7NGKAh(oLSok zpHN6xUZpWz-qFf%;F+EGaL}87dsy~gy$Wvsk<`OOaDSV8Mm!(6z+}d>)g0q6A`i)N?z}mDNs@ndWUxW_`@aU71&@@6s{sp^ZPvD^U~6Y@ zcxfcB;8r;V2AJU!oF&u!!RtQ2Xp`i{=DXNRkE9~2){Qe?y5MS*&Sz$$WEgs2`keQ1 zhh^;2H?=IHVO8nc&M~sU{`F%C2S|I2NjyFFoCNz0M)lMg;P!^7A*!eiDj7c24aCVvo=R3I;lyrl zypNV8V+3nUD82p=A7>o$KH~DW-hN-Cp1h_^PeSP7vC+)kD{P91EuBU_*JviNVgVHkQRhQb$qxTN6y&XxVCtZ~#V9pcUGFI60O^HJP%&gq_( zJV(=ef|6~|S(4xVA^GZE@^4@)8?huLgHyIh6DufI*p+(_b1*;_c8SMsz5C)?xk!-( zT4tK@oq|c=P2m!vMD0}WnMt`8QbZ352<_m*1U}j42hP!%QcqQ*`Fd>V|MPlVvc_Mx zpCP%MiSM)ETyATY2{!iFeHPelsja@-GT`wqZ;VrZp6Kd+=a3$G<4{I&9sV`wgZo>F z;OuYb{l)gg?Qmm}+P!IhmJ05j32`&yQ0tO!n~*FPLWPPi-LG=F zn6<6=eW~8R+Ol_hTbyMcDcf+|=@VlfmjUuTG4nRski2(0EJ&3Oi6?z- zU_;F$tsRYzZMwbcyipl41O7+)$2F@RFgn$rH7gc-kwtNurI=CA|6uKD$2vGe$sQjy zEX>cjX$+=vNLg;XsidkQ-qeiIsuwsih`#ATx^YNOYEvd}cluR`S#}Z#93tZqC3@V1vbj081NP9JA_t81_PA5q5>=P*K=C7gvT#(mnLqd z;=>CJh@p?1xEj2M{;p<>5&2%ZC@8HKZMi_pvpjKhD9T9A1|AOwH&%q>G;0KkO<05m6K%W{VPk zFqodBZlbmC@NOKo4tElld44jt(PM*<5<)Xfu3QV=-$u)uc~P3cv9?fbF9!~T)=g%=!~MaF>QV1BXxgAnY`vRO(0oj2DHVZyIcYI z6akxp1dE31^K&6f4JQ=86f^H9VjbeI17+@q?yW*zTj0*GW-!WW2p%Z4xc;Lo5Y>&> zA9%Z_D`{Hwg^YUKigiGwuEDQ@q81JyW!q5*f|1dO1{nA*vi2!JiG~iynBns0Rve{O zQ45Vf1>VdGtY}bKb)%90XVSO%g~WbjCZkiz%N+CcyxtunH_Q!Pjg-&TaHnZ@!lhY) zPF~iR#(guC%W-eInA!@eT%x6lsX^{BGrlB^e))6C`QDTgje9<=h@^}$XN1u3(7Yz- zW8!YkW?AVB0tyW$4;kcvosAViQX`+IV&BmEoq7R-<~)qDBi&Ggaw4tx`em7%{pnDU z$04%NK7HR!3vq>xg<~}qX1X=sNOAHQk)&-xF#0nQ&1OL4Pve--*%+>zT#NfSkb0UJU& zslf8&o`*X7Ero|oyWHcOXs!8&<&jmULqXO>GdyW`i)qNi_DUE{2*_!?s8S5f`)O_g zM!x)(*alT$(#^yT&=cN1x8^mc3lV}@^d}=za=zcfH6WP6+V`NX#&J%S{UBC-D&DjCf zLBaO?Q$+?1a%jd+^(T>KQqc8z-GLfnhKrXUTs6Y2er^gla6!0qYWulKKpm<@#lW=*3O~q<8|yYSIg0?k$H~cT)wNp_ zoGQR-f6N^NY3kCm|Lyn{|CzoSXaCD9)l?18rn-L_cB)!Zms)xy2AJ2_K%WDB@1c-n znKo?!r>hwc#l#1^5gd@)_x2Rd%y(*trJ(`_^Mz~x;M6s;k^)gVNd$##LZ;sFRVMVN zn9uTFa<(JY8zP|s9B3O_G09mGrwR@bA+(lhB@|qDG)4TsB0~5WoXyqUQE;G7uwD-M za2g8pfGI43kvLwR*guAx!aEbh7JL@(wbSm5MtP;LSuCZ-`9FbQI>SPR%n>_HWNjZ8RV*H0U#DS7|2HcYNixqctt^%`m z2z?h^g~CRt01%zZm^77@zh*?&E2qxCe51%eL_tI|ua`T!9?875?kmpFpZJ_{VsNr1$c^SmCN+|gD#X5KB9iN$pyu%unC{gg zhxvuX6K%VmLDOH&-T@h_D)<_Dn$r3L7?7wTh>hamc(Aw7~MaF`e$Q&zgd=7-c z{CJ!bfhjkbx2j{1A{YZ0MrHQF59F+ZGQDg{xtdp?xF7ebJY7>R%k55wE(A~Mv`*)ehsYG14BRc3+kMJ7 zoNW@sCHJ1t9y0d-FW2u3P8L@M;17u#6UF&R^^V~@dsNmHNBc!~iCSkckz-n2Jyee^ zu@v|tj7VlXRtW872FJGbH@2MM@809%S0keZRuTW-DtP;*6+wN3M6n5rd?OrWl#s=r z0GA_k8}MGh>Jz5j0V)*&Lot9rx1LoeC5;%GiiK##luy3Jj?>Gg3&!{h2c=WbJElM| z2*IB0{BQEGlbo=oHX*G0e$9ISjsYz2?-={Bl4$^8J`SSGfhZ-gsCey(_;kcyMGf7- zR5hbYl)OXwwO&=WNr~!raVp%p#84w#uLac=7Y_gkbW+WOw-v%}{G0mL zQDo>1q>K=X!88LX4BXr;ntV4H?vdSTOM#i=05k!Jt zY6{l`sRcEkn>k0#z3m1_W*nlf%Neu)&PzOl;%~DJ$eg6qZ!H8+P5dOhH8`jUYR{Gk z0$O}C43CI8sBEA7fYWjvs?xFf2bG%l3M44}YQz#Aq#ND~(IqX@$Y zpvV*?tVHI4dP>5?nf{}Boo4@E!u&xMGUxq5055wED_=6b8lo1! zBo5Apyr_?0h22=%tQL#doPF>RLTmiHl3|hX|0fnwn+Y)9^m`EiZZxWoS(m;vBwq_y zb#Hnxs7qhF!Vzk*BwH_KB~;6|)d1ILz-`P(;|M!JHa`=AU#(pHTtQ;lm)4tJ0y=j}c=G8ay9YLrB}q^YfYl3f88E z$npbGCmFa&GEa}~bcP}UGmcgCoM=QFoZOvXAurw-9%9n;haE-{lJ>^vv6|8Hm|>{| z5p-&((B^+Hj9+B#{c%)-_82uG#L4WS(BJJTZnShEv+CKAtCNrV2neALoah0PV6e6) z8SvfJOEZC=cH4vZl)Z)3ABI>&#FsTU3Qc)}>vU53yHsuAAVDXib=rSVrWP41Kk#Il zV1UB3u<3#c#>6AAiJ^-ZMr)S~PQ>o1vZhfBQ{o-Rhs@+g)5FPV(- zNMj+i4ZC-#Sf1^u8sbsPFZKE7F%qSM^ASby-^&t&&?FasvB&F+;!>V)XkY+8x|&8B Jt!mEk{|8Z7;++5h literal 0 HcmV?d00001 diff --git a/test/e2e/autoops/BUILD.bazel b/test/e2e/autoops/BUILD.bazel new file mode 100644 index 000000000..44369d9fc --- /dev/null +++ b/test/e2e/autoops/BUILD.bazel @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_test") + +go_test( + name = "go_default_test", + srcs = ["auto_ops_test.go"], + deps = [ + "//pkg/autoops/client:go_default_library", + "//pkg/experiment/client:go_default_library", + "//pkg/feature/client:go_default_library", + "//pkg/gateway/client:go_default_library", + "//pkg/rpc/client:go_default_library", + "//pkg/uuid:go_default_library", + "//proto/autoops:go_default_library", + "//proto/event/client:go_default_library", + "//proto/experiment:go_default_library", + "//proto/feature:go_default_library", + "//proto/gateway:go_default_library", + "//proto/user:go_default_library", + "//test/e2e/util:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + "@org_golang_google_protobuf//encoding/protojson:go_default_library", + ], +) diff --git a/test/e2e/autoops/auto_ops_test.go b/test/e2e/autoops/auto_ops_test.go new file mode 100644 index 000000000..e420a5ad3 --- /dev/null +++ b/test/e2e/autoops/auto_ops_test.go @@ -0,0 +1,1183 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autoops + +import ( + "bytes" + "context" + "encoding/json" + "flag" + "fmt" + "net/http" + "testing" + "time" + + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/wrappers" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/encoding/protojson" + + autoopsclient "github.com/bucketeer-io/bucketeer/pkg/autoops/client" + experimentclient "github.com/bucketeer-io/bucketeer/pkg/experiment/client" + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + gatewayclient "github.com/bucketeer-io/bucketeer/pkg/gateway/client" + rpcclient "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + autoopsproto "github.com/bucketeer-io/bucketeer/proto/autoops" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/client" + experimentproto "github.com/bucketeer-io/bucketeer/proto/experiment" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" + gatewayproto "github.com/bucketeer-io/bucketeer/proto/gateway" + userproto "github.com/bucketeer-io/bucketeer/proto/user" + "github.com/bucketeer-io/bucketeer/test/e2e/util" +) + +const ( + goalEventType eventType = iota + 1 // eventType starts from 1 for validation. + goalBatchEventType + evaluationEventType + metricsEventType + prefixTestName = "e2e-test" + retryTimes = 60 + timeout = 10 * time.Second + prefixID = "e2e-test" + version = "/v1" + service = "/gateway" + eventsAPI = "/events" + authorizationKey = "authorization" +) + +var ( + webGatewayAddr = flag.String("web-gateway-addr", "", "Web gateway endpoint address") + webGatewayPort = flag.Int("web-gateway-port", 443, "Web gateway endpoint port") + webGatewayCert = flag.String("web-gateway-cert", "", "Web gateway crt file") + apiKeyPath = flag.String("api-key", "", "Api key path for web gateway") + gatewayAddr = flag.String("gateway-addr", "", "Gateway endpoint address") + gatewayPort = flag.Int("gateway-port", 443, "Gateway endpoint port") + gatewayCert = flag.String("gateway-cert", "", "Gateway crt file") + serviceTokenPath = flag.String("service-token", "", "Service token path") + environmentNamespace = flag.String("environment-namespace", "", "Environment namespace") + testID = flag.String("test-id", "", "test ID") +) + +type eventType int + +type event struct { + ID string `json:"id,omitempty"` + Event json.RawMessage `json:"event,omitempty"` + EnvironmentNamespace string `json:"environment_namespace,omitempty"` + Type eventType `json:"type,omitempty"` +} + +type successResponse struct { + Data json.RawMessage `json:"data"` +} + +type registerEventsRequest struct { + Events []event `json:"events,omitempty"` +} + +type registerEventsResponse struct { + Errors map[string]*gatewayproto.RegisterEventsResponse_Error `json:"errors,omitempty"` +} + +func TestCreateAndListAutoOpsRule(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + autoOpsClient := newAutoOpsClient(t) + defer autoOpsClient.Close() + featureClient := newFeatureClient(t) + defer featureClient.Close() + experimentClient := newExperimentClient(t) + defer experimentClient.Close() + + featureID := createFeatureID(t) + createFeature(ctx, t, featureClient, featureID) + feature := getFeature(t, featureClient, featureID) + goalID := createGoal(ctx, t, experimentClient) + clause := createOpsEventRateClause(t, feature.Variations[0].Id, goalID) + createAutoOpsRule(ctx, t, autoOpsClient, featureID, []*autoopsproto.OpsEventRateClause{clause}, nil, nil) + autoOpsRules := listAutoOpsRulesByFeatureID(t, autoOpsClient, featureID) + if len(autoOpsRules) != 1 { + t.Fatal("not enough rules") + } + actual := autoOpsRules[0] + if actual.Id == "" { + t.Fatal("id is empty") + } + if actual.FeatureId != featureID { + t.Fatalf("different feature ID, expected: %v, actual: %v", featureID, actual.FeatureId) + } + if actual.OpsType != autoopsproto.OpsType_DISABLE_FEATURE { + t.Fatalf("different ops type, expected: %v, actual: %v", autoopsproto.OpsType_DISABLE_FEATURE, actual.OpsType) + } + oerc := unmarshalOpsEventRateClause(t, actual.Clauses[0]) + if oerc.VariationId != feature.Variations[0].Id { + t.Fatalf("different variation id, expected: %v, actual: %v", feature.Variations[0].Id, oerc.VariationId) + } + if oerc.GoalId != goalID { + t.Fatalf("different goal id, expected: %v, actual: %v", "gid", oerc.GoalId) + } + if oerc.MinCount != int64(5) { + t.Fatalf("different goal id, expected: %v, actual: %v", "gid", oerc.GoalId) + } + if oerc.ThreadsholdRate != float64(0.5) { + t.Fatalf("different goal id, expected: %v, actual: %v", "gid", oerc.GoalId) + } + if oerc.Operator != autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL { + t.Fatalf("different goal id, expected: %v, actual: %v", "gid", oerc.GoalId) + } +} + +func TestGetAutoOpsRule(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + autoOpsClient := newAutoOpsClient(t) + defer autoOpsClient.Close() + featureClient := newFeatureClient(t) + defer featureClient.Close() + experimentClient := newExperimentClient(t) + defer experimentClient.Close() + + featureID := createFeatureID(t) + createFeature(ctx, t, featureClient, featureID) + feature := getFeature(t, featureClient, featureID) + goalID := createGoal(ctx, t, experimentClient) + clause := createOpsEventRateClause(t, feature.Variations[0].Id, goalID) + createAutoOpsRule(ctx, t, autoOpsClient, featureID, []*autoopsproto.OpsEventRateClause{clause}, nil, nil) + autoOpsRules := listAutoOpsRulesByFeatureID(t, autoOpsClient, featureID) + if len(autoOpsRules) != 1 { + t.Fatal("not enough rules") + } + actual := getAutoOpsRules(t, autoOpsRules[0].Id) + if actual.Id == "" { + t.Fatal("id is empty") + } + if actual.FeatureId != featureID { + t.Fatalf("different feature ID, expected: %v, actual: %v", featureID, actual.FeatureId) + } + if actual.OpsType != autoopsproto.OpsType_DISABLE_FEATURE { + t.Fatalf("different ops type, expected: %v, actual: %v", autoopsproto.OpsType_DISABLE_FEATURE, actual.OpsType) + } + oerc := unmarshalOpsEventRateClause(t, actual.Clauses[0]) + if oerc.VariationId != feature.Variations[0].Id { + t.Fatalf("different variation id, expected: %v, actual: %v", feature.Variations[0].Id, oerc.VariationId) + } + if oerc.GoalId != goalID { + t.Fatalf("different goal id, expected: %v, actual: %v", "gid", oerc.GoalId) + } + if oerc.MinCount != int64(5) { + t.Fatalf("different goal id, expected: %v, actual: %v", "gid", oerc.GoalId) + } + if oerc.ThreadsholdRate != float64(0.5) { + t.Fatalf("different goal id, expected: %v, actual: %v", "gid", oerc.GoalId) + } + if oerc.Operator != autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL { + t.Fatalf("different goal id, expected: %v, actual: %v", "gid", oerc.GoalId) + } +} + +func TestDeleteAutoOpsRule(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + autoOpsClient := newAutoOpsClient(t) + defer autoOpsClient.Close() + featureClient := newFeatureClient(t) + defer featureClient.Close() + experimentClient := newExperimentClient(t) + defer experimentClient.Close() + + featureID := createFeatureID(t) + createFeature(ctx, t, featureClient, featureID) + feature := getFeature(t, featureClient, featureID) + goalID := createGoal(ctx, t, experimentClient) + clause := createOpsEventRateClause(t, feature.Variations[0].Id, goalID) + createAutoOpsRule(ctx, t, autoOpsClient, featureID, []*autoopsproto.OpsEventRateClause{clause}, nil, nil) + autoOpsRules := listAutoOpsRulesByFeatureID(t, autoOpsClient, featureID) + if len(autoOpsRules) != 1 { + t.Fatal("not enough rules") + } + deleteAutoOpsRules(t, autoOpsClient, autoOpsRules[0].Id) + resp, err := autoOpsClient.GetAutoOpsRule(ctx, &autoopsproto.GetAutoOpsRuleRequest{ + EnvironmentNamespace: *environmentNamespace, + Id: autoOpsRules[0].Id, + }) + if resp != nil { + t.Fatal("autoOpsRules is not deleted") + } + if err == nil { + t.Fatal("err is empty") + } + if status.Code(err) != codes.NotFound { + t.Fatalf("different error code, expected: %s, actual: %s", codes.NotFound, status.Code(err)) + } +} + +func TestExecuteAutoOpsRule(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + autoOpsClient := newAutoOpsClient(t) + defer autoOpsClient.Close() + featureClient := newFeatureClient(t) + defer featureClient.Close() + experimentClient := newExperimentClient(t) + defer experimentClient.Close() + + featureID := createFeatureID(t) + createFeature(ctx, t, featureClient, featureID) + feature := getFeature(t, featureClient, featureID) + goalID := createGoal(ctx, t, experimentClient) + clause := createOpsEventRateClause(t, feature.Variations[0].Id, goalID) + createAutoOpsRule(ctx, t, autoOpsClient, featureID, []*autoopsproto.OpsEventRateClause{clause}, nil, nil) + autoOpsRules := listAutoOpsRulesByFeatureID(t, autoOpsClient, featureID) + if len(autoOpsRules) != 1 { + t.Fatal("not enough rules") + } + _, err := autoOpsClient.ExecuteAutoOps(ctx, &autoopsproto.ExecuteAutoOpsRequest{ + EnvironmentNamespace: *environmentNamespace, + Id: autoOpsRules[0].Id, + ChangeAutoOpsRuleTriggeredAtCommand: &autoopsproto.ChangeAutoOpsRuleTriggeredAtCommand{}, + }) + if err != nil { + t.Fatalf("failed to execute auto ops: %s", err.Error()) + } + feature = getFeature(t, featureClient, featureID) + if feature.Enabled == true { + t.Fatalf("feature is enabled") + } + autoOpsRules = listAutoOpsRulesByFeatureID(t, autoOpsClient, featureID) + if autoOpsRules[0].TriggeredAt == 0 { + t.Fatalf("triggered at is empty") + } +} + +// Test for old SDK client. Tag is not set in the EvaluationEvent and GoalEvent +// Evaluation field in the GoalEvent is deprecated. +func TestOpsEventRateBatchWithoutTag(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + autoOpsClient := newAutoOpsClient(t) + defer autoOpsClient.Close() + experimentClient := newExperimentClient(t) + defer experimentClient.Close() + featureClient := newFeatureClient(t) + defer featureClient.Close() + + featureID := createFeatureID(t) + createFeature(ctx, t, featureClient, featureID) + feature := getFeature(t, featureClient, featureID) + goalID := createGoal(ctx, t, experimentClient) + clause := createOpsEventRateClause(t, feature.Variations[0].Id, goalID) + createAutoOpsRule(ctx, t, autoOpsClient, featureID, []*autoopsproto.OpsEventRateClause{clause}, nil, nil) + autoOpsRules := listAutoOpsRulesByFeatureID(t, autoOpsClient, featureID) + if len(autoOpsRules) != 1 { + t.Fatal("not enough rules") + } + // Wait until trasformer and watcher's targetstores are refreshed. + time.Sleep(40 * time.Second) + + userIDs := createUserIDs(t, 10) + for _, uid := range userIDs { + grpcRegisterEvaluationEvent(t, featureID, feature.Version, uid, feature.Variations[0].Id, "") + } + for _, uid := range userIDs[:6] { + registerGoalEventWithEvaluations(t, featureID, feature.Version, goalID, uid, feature.Variations[0].Id) + } + for i := 0; i < retryTimes; i++ { + feature = getFeature(t, featureClient, featureID) + if !feature.Enabled { + autoOpsRules = listAutoOpsRulesByFeatureID(t, autoOpsClient, featureID) + if autoOpsRules[0].TriggeredAt == 0 { + t.Fatalf("triggered at must not be zero") + } + break + } + if i == retryTimes-1 { + t.Fatalf("retry timeout") + } + time.Sleep(time.Second) + } +} + +func TestGrpcOpsEventRateBatch(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + autoOpsClient := newAutoOpsClient(t) + defer autoOpsClient.Close() + experimentClient := newExperimentClient(t) + defer experimentClient.Close() + featureClient := newFeatureClient(t) + defer featureClient.Close() + + featureID := createFeatureID(t) + createFeature(ctx, t, featureClient, featureID) + feature := getFeature(t, featureClient, featureID) + goalID := createGoal(ctx, t, experimentClient) + clause := createOpsEventRateClause(t, feature.Variations[0].Id, goalID) + createAutoOpsRule(ctx, t, autoOpsClient, featureID, []*autoopsproto.OpsEventRateClause{clause}, nil, nil) + autoOpsRules := listAutoOpsRulesByFeatureID(t, autoOpsClient, featureID) + if len(autoOpsRules) != 1 { + t.Fatal("not enough rules") + } + // Wait until trasformer and watcher's targetstores are refreshed. + time.Sleep(40 * time.Second) + + userIDs := createUserIDs(t, 10) + for _, uid := range userIDs { + grpcRegisterEvaluationEvent(t, featureID, feature.Version, uid, feature.Variations[0].Id, feature.Tags[0]) + } + for _, uid := range userIDs[:6] { + grpcRegisterGoalEvent(t, goalID, uid, feature.Tags[0]) + } + for i := 0; i < retryTimes; i++ { + feature = getFeature(t, featureClient, featureID) + if !feature.Enabled { + autoOpsRules = listAutoOpsRulesByFeatureID(t, autoOpsClient, featureID) + if autoOpsRules[0].TriggeredAt == 0 { + t.Fatalf("triggered at must not be zero") + } + break + } + if i == retryTimes-1 { + t.Fatalf("retry timeout") + } + time.Sleep(time.Second) + } +} + +func TestOpsEventRateBatch(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + autoOpsClient := newAutoOpsClient(t) + defer autoOpsClient.Close() + experimentClient := newExperimentClient(t) + defer experimentClient.Close() + featureClient := newFeatureClient(t) + defer featureClient.Close() + + featureID := createFeatureID(t) + createFeature(ctx, t, featureClient, featureID) + feature := getFeature(t, featureClient, featureID) + goalID := createGoal(ctx, t, experimentClient) + clause := createOpsEventRateClause(t, feature.Variations[0].Id, goalID) + createAutoOpsRule(ctx, t, autoOpsClient, featureID, []*autoopsproto.OpsEventRateClause{clause}, nil, nil) + autoOpsRules := listAutoOpsRulesByFeatureID(t, autoOpsClient, featureID) + if len(autoOpsRules) != 1 { + t.Fatal("not enough rules") + } + // Wait until trasformer and watcher's targetstores are refreshed. + time.Sleep(40 * time.Second) + + userIDs := createUserIDs(t, 10) + for _, uid := range userIDs { + registerEvaluationEvent(t, featureID, feature.Version, uid, feature.Variations[0].Id, feature.Tags[0]) + } + for _, uid := range userIDs[:6] { + registerGoalEvent(t, goalID, uid, feature.Tags[0]) + } + for i := 0; i < retryTimes; i++ { + feature = getFeature(t, featureClient, featureID) + if !feature.Enabled { + autoOpsRules = listAutoOpsRulesByFeatureID(t, autoOpsClient, featureID) + if autoOpsRules[0].TriggeredAt == 0 { + t.Fatalf("triggered at must not be zero") + } + break + } + if i == retryTimes-1 { + t.Fatalf("retry timeout") + } + time.Sleep(time.Second) + } +} + +func TestDatetimeBatch(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + autoOpsClient := newAutoOpsClient(t) + defer autoOpsClient.Close() + featureClient := newFeatureClient(t) + defer featureClient.Close() + + featureID := createFeatureID(t) + createFeature(ctx, t, featureClient, featureID) + clause := createDatetimeClause(t) + createAutoOpsRule(ctx, t, autoOpsClient, featureID, nil, []*autoopsproto.DatetimeClause{clause}, nil) + autoOpsRules := listAutoOpsRulesByFeatureID(t, autoOpsClient, featureID) + if len(autoOpsRules) != 1 { + t.Fatal("not enough rules") + } + // Wait until watcher's targetstore is refreshed and autoOps is executed. + time.Sleep(50 * time.Second) + + feature := getFeature(t, featureClient, featureID) + if feature.Enabled { + t.Fatalf("feature must be disabled") + } + autoOpsRules = listAutoOpsRulesByFeatureID(t, autoOpsClient, featureID) + if autoOpsRules[0].TriggeredAt == 0 { + t.Fatalf("triggered at must not be zero") + } +} + +func TestCreateAndListWebhook(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + autoOpsClient := newAutoOpsClient(t) + defer autoOpsClient.Close() + + name := newWebhookName(t) + description := newUUID(t) + resp := createWebhook(ctx, t, autoOpsClient, name, description) + webhooks := listWebhooks(ctx, t, autoOpsClient) + var actual *autoopsproto.Webhook + for _, w := range webhooks { + if w.Id == resp.Webhook.Id { + actual = w + break + } + } + if actual == nil { + t.Fatal("webhook is nil") + } + if actual.Id == "" { + t.Fatal("id is empty") + } + if actual.Name != name { + t.Fatalf("diffrent name, expected: %v, actual: %v", name, actual.Name) + } + if actual.Description != description { + t.Fatalf("diffrent description, expected: %v, actual: %v", description, actual.Description) + } +} + +func TestGetWebhook(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + autoOpsClient := newAutoOpsClient(t) + defer autoOpsClient.Close() + + name := newWebhookName(t) + description := newUUID(t) + resp := createWebhook(ctx, t, autoOpsClient, name, description) + actual := getWebhook(ctx, t, autoOpsClient, resp.Webhook.Id) + if actual == nil { + t.Fatal("webhook is nil") + } + if actual.Id == "" { + t.Fatal("id is empty") + } + if actual.Name != name { + t.Fatalf("diffrent name, expected: %v, actual: %v", name, actual.Name) + } + if actual.Description != description { + t.Fatalf("diffrent description, expected: %v, actual: %v", description, actual.Description) + } +} + +func TestUpdateWebhook(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + autoOpsClient := newAutoOpsClient(t) + defer autoOpsClient.Close() + + name := newWebhookName(t) + description := newUUID(t) + resp := createWebhook(ctx, t, autoOpsClient, name, description) + webhook := getWebhook(ctx, t, autoOpsClient, resp.Webhook.Id) + if webhook == nil { + t.Fatal("webhook is nil") + } + newDesc := newUUID(t) + newName := newWebhookName(t) + updateWebhookDescription(ctx, t, autoOpsClient, resp.Webhook.Id, newDesc) + updateWebhookName(ctx, t, autoOpsClient, resp.Webhook.Id, newName) + actual := getWebhook(ctx, t, autoOpsClient, resp.Webhook.Id) + if actual.Id == "" { + t.Fatal("id is empty") + } + if actual.Name != newName { + t.Fatalf("diffrent name, expected: %v, actual: %v", name, actual.Name) + } + if actual.Description != newDesc { + t.Fatalf("diffrent description, expected: %v, actual: %v", description, actual.Description) + } +} + +func TestDeleteWebhook(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + autoOpsClient := newAutoOpsClient(t) + defer autoOpsClient.Close() + + name := newWebhookName(t) + description := newUUID(t) + resp := createWebhook(ctx, t, autoOpsClient, name, description) + webhook := getWebhook(ctx, t, autoOpsClient, resp.Webhook.Id) + if webhook == nil { + t.Fatal("webhook is nil") + } + deleteWebhook(ctx, t, autoOpsClient, resp.Webhook.Id) + getResp, err := autoOpsClient.GetWebhook(ctx, &autoopsproto.GetWebhookRequest{ + Id: resp.Webhook.Id, + EnvironmentNamespace: *environmentNamespace, + }) + if getResp != nil { + t.Fatal("webhook is not deleted") + } + if status.Code(err) != codes.NotFound { + t.Fatalf("different error code, expected: %s, actual: %s", codes.NotFound, status.Code(err)) + } +} + +func TestHttpWebhook(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + featureClient := newFeatureClient(t) + defer featureClient.Close() + autoOpsClient := newAutoOpsClient(t) + defer autoOpsClient.Close() + experimentClient := newExperimentClient(t) + defer experimentClient.Close() + + name := newWebhookName(t) + description := newUUID(t) + resp := createWebhook(ctx, t, autoOpsClient, name, description) + webhook := getWebhook(ctx, t, autoOpsClient, resp.Webhook.Id) + if webhook == nil { + t.Fatal("webhook is nil") + } + featureID := createFeatureID(t) + createFeature(ctx, t, featureClient, featureID) + condition := createWebhookClause_Condition(autoopsproto.WebhookClause_Condition_EQUAL, `.body."Alert id"`, `123`) + clause := createWebhookClause(resp.Webhook.Id, []*autoopsproto.WebhookClause_Condition{condition}) + createAutoOpsRule(ctx, t, autoOpsClient, featureID, nil, nil, []*autoopsproto.WebhookClause{clause}) + autoOpsRules := listAutoOpsRulesByFeatureID(t, autoOpsClient, featureID) + expectedNum := 1 + if len(autoOpsRules) != expectedNum { + t.Fatal("not enough rules") + } + + sendHttpWebhook(t, resp.Url, `{"body":{"Alert id": 123}}`) + feature := getFeature(t, featureClient, featureID) + if feature.Enabled == true { + t.Fatalf("feature is enabled") + } + autoOpsRules = listAutoOpsRulesByFeatureID(t, autoOpsClient, featureID) + if autoOpsRules[0].TriggeredAt == 0 { + t.Fatalf("triggered at is empty") + } +} + +func sendHttpWebhook(t *testing.T, url, payload string) { + t.Helper() + resp, err := http.Post(url, "application/json", bytes.NewBuffer([]byte(payload))) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + t.Fatalf("Send HTTP webhook request failed: %d, %s", resp.StatusCode, url) + } +} + +func unmarshalOpsEventRateClause(t *testing.T, clause *autoopsproto.Clause) *autoopsproto.OpsEventRateClause { + c := &autoopsproto.OpsEventRateClause{} + if err := ptypes.UnmarshalAny(clause.Clause, c); err != nil { + t.Fatal(err) + } + return c +} + +func createGoal(ctx context.Context, t *testing.T, client experimentclient.Client) string { + t.Helper() + goalID := createGoalID(t) + cmd := &experimentproto.CreateGoalCommand{ + Id: goalID, + Name: goalID, + Description: goalID, + } + _, err := client.CreateGoal(ctx, &experimentproto.CreateGoalRequest{ + Command: cmd, + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + return cmd.Id +} + +func createOpsEventRateClause(t *testing.T, variationID string, goalID string) *autoopsproto.OpsEventRateClause { + return &autoopsproto.OpsEventRateClause{ + VariationId: variationID, + GoalId: goalID, + MinCount: int64(5), + ThreadsholdRate: float64(0.5), + Operator: autoopsproto.OpsEventRateClause_GREATER_OR_EQUAL, + } +} + +func createDatetimeClause(t *testing.T) *autoopsproto.DatetimeClause { + return &autoopsproto.DatetimeClause{ + Time: time.Now().Add(5 * time.Second).Unix(), + } +} + +func createWebhookClause(webhookID string, condition []*autoopsproto.WebhookClause_Condition) *autoopsproto.WebhookClause { + return &autoopsproto.WebhookClause{ + WebhookId: webhookID, + Conditions: condition, + } +} + +func createWebhookClause_Condition(operator autoopsproto.WebhookClause_Condition_Operator, filter, value string) *autoopsproto.WebhookClause_Condition { + return &autoopsproto.WebhookClause_Condition{ + Filter: filter, + Value: value, + Operator: operator, + } +} + +func createAutoOpsRule(ctx context.Context, t *testing.T, client autoopsclient.Client, featureID string, oercs []*autoopsproto.OpsEventRateClause, dcs []*autoopsproto.DatetimeClause, wc []*autoopsproto.WebhookClause) { + cmd := &autoopsproto.CreateAutoOpsRuleCommand{ + FeatureId: featureID, + OpsType: autoopsproto.OpsType_DISABLE_FEATURE, + OpsEventRateClauses: oercs, + DatetimeClauses: dcs, + WebhookClauses: wc, + } + _, err := client.CreateAutoOpsRule(ctx, &autoopsproto.CreateAutoOpsRuleRequest{ + EnvironmentNamespace: *environmentNamespace, + Command: cmd, + }) + if err != nil { + t.Fatal(err) + } +} + +func createWebhook(ctx context.Context, t *testing.T, client autoopsclient.Client, name, description string) *autoopsproto.CreateWebhookResponse { + t.Helper() + cmd := &autoopsproto.CreateWebhookCommand{ + Name: name, + Description: description, + } + resp, err := client.CreateWebhook(ctx, &autoopsproto.CreateWebhookRequest{ + EnvironmentNamespace: *environmentNamespace, + Command: cmd, + }) + if err != nil { + t.Fatal(err) + } + return resp +} + +func newAutoOpsClient(t *testing.T) autoopsclient.Client { + t.Helper() + creds, err := rpcclient.NewPerRPCCredentials(*serviceTokenPath) + if err != nil { + t.Fatal("Failed to create RPC credentials:", err) + } + client, err := autoopsclient.NewClient( + fmt.Sprintf("%s:%d", *webGatewayAddr, *webGatewayPort), + *webGatewayCert, + rpcclient.WithPerRPCCredentials(creds), + rpcclient.WithDialTimeout(30*time.Second), + rpcclient.WithBlock(), + ) + if err != nil { + t.Fatal("Failed to create auto ops client:", err) + } + return client +} + +func newUUID(t *testing.T) string { + t.Helper() + id, err := uuid.NewUUID() + if err != nil { + t.Fatal(err) + } + return id.String() +} + +func newWebhookName(t *testing.T) string { + if *testID != "" { + return fmt.Sprintf("%s-%s-webhook-name-%s", prefixID, *testID, newUUID(t)) + } + return fmt.Sprintf("%s-webhook-name-%s", prefixID, newUUID(t)) +} + +func createFeature(ctx context.Context, t *testing.T, client featureclient.Client, featureID string) { + t.Helper() + cmd := newCreateFeatureCommand(featureID) + createReq := &featureproto.CreateFeatureRequest{ + Command: cmd, + EnvironmentNamespace: *environmentNamespace, + } + if _, err := client.CreateFeature(ctx, createReq); err != nil { + t.Fatal(err) + } + enableFeature(t, featureID, client) +} + +func getFeature(t *testing.T, client featureclient.Client, featureID string) *featureproto.Feature { + t.Helper() + getReq := &featureproto.GetFeatureRequest{ + Id: featureID, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + response, err := client.GetFeature(ctx, getReq) + if err != nil { + t.Fatal("Failed to get feature:", err) + } + return response.Feature +} + +func newFeatureClient(t *testing.T) featureclient.Client { + t.Helper() + creds, err := rpcclient.NewPerRPCCredentials(*serviceTokenPath) + if err != nil { + t.Fatal("Failed to create RPC credentials:", err) + } + featureClient, err := featureclient.NewClient( + fmt.Sprintf("%s:%d", *webGatewayAddr, *webGatewayPort), + *webGatewayCert, + rpcclient.WithPerRPCCredentials(creds), + rpcclient.WithDialTimeout(30*time.Second), + rpcclient.WithBlock(), + ) + if err != nil { + t.Fatal("Failed to create feature client:", err) + } + return featureClient +} + +func newCreateFeatureCommand(featureID string) *featureproto.CreateFeatureCommand { + return &featureproto.CreateFeatureCommand{ + Id: featureID, + Name: "e2e-test-gateway-feature-name", + Description: "e2e-test-gateway-feature-description", + Variations: []*featureproto.Variation{ + { + Value: "A", + Name: "Variation A", + Description: "Thing does A", + }, + { + Value: "B", + Name: "Variation B", + Description: "Thing does B", + }, + }, + Tags: []string{ + "e2e-test-tag-1", + "e2e-test-tag-2", + "e2e-test-tag-3", + }, + DefaultOnVariationIndex: &wrappers.Int32Value{Value: int32(0)}, + DefaultOffVariationIndex: &wrappers.Int32Value{Value: int32(1)}, + } +} + +func enableFeature(t *testing.T, featureID string, client featureclient.Client) { + t.Helper() + enableReq := &featureproto.EnableFeatureRequest{ + Id: featureID, + Command: &featureproto.EnableFeatureCommand{}, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.EnableFeature(ctx, enableReq); err != nil { + t.Fatalf("Failed to enable feature id: %s. Error: %v", featureID, err) + } +} + +func listAutoOpsRulesByFeatureID(t *testing.T, client autoopsclient.Client, featureID string) []*autoopsproto.AutoOpsRule { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + resp, err := client.ListAutoOpsRules(ctx, &autoopsproto.ListAutoOpsRulesRequest{ + EnvironmentNamespace: *environmentNamespace, + PageSize: int64(500), + FeatureIds: []string{featureID}, + }) + if err != nil { + t.Fatal("failed to list auto ops rules", err) + } + return resp.AutoOpsRules +} + +func listWebhooks(ctx context.Context, t *testing.T, client autoopsclient.Client) []*autoopsproto.Webhook { + t.Helper() + resp, err := client.ListWebhooks(ctx, &autoopsproto.ListWebhooksRequest{ + PageSize: int64(500), + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal("failed to list webhooks", err) + } + return resp.Webhooks +} + +func getWebhook(ctx context.Context, t *testing.T, client autoopsclient.Client, id string) *autoopsproto.Webhook { + t.Helper() + resp, err := client.GetWebhook(ctx, &autoopsproto.GetWebhookRequest{ + Id: id, + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal("failed to get webhook", err) + } + return resp.Webhook +} + +func getAutoOpsRules(t *testing.T, id string) *autoopsproto.AutoOpsRule { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + c := newAutoOpsClient(t) + defer c.Close() + resp, err := c.GetAutoOpsRule(ctx, &autoopsproto.GetAutoOpsRuleRequest{ + EnvironmentNamespace: *environmentNamespace, + Id: id, + }) + if err != nil { + t.Fatal("failed to list auto ops rules", err) + } + return resp.AutoOpsRule +} + +func updateWebhookDescription(ctx context.Context, t *testing.T, client autoopsclient.Client, id, desc string) { + t.Helper() + _, err := client.UpdateWebhook(ctx, &autoopsproto.UpdateWebhookRequest{ + Id: id, + EnvironmentNamespace: *environmentNamespace, + ChangeWebhookDescriptionCommand: &autoopsproto.ChangeWebhookDescriptionCommand{ + Description: desc, + }, + }) + if err != nil { + t.Fatal(err) + } +} + +func updateWebhookName(ctx context.Context, t *testing.T, client autoopsclient.Client, id, name string) { + t.Helper() + _, err := client.UpdateWebhook(ctx, &autoopsproto.UpdateWebhookRequest{ + Id: id, + EnvironmentNamespace: *environmentNamespace, + ChangeWebhookNameCommand: &autoopsproto.ChangeWebhookNameCommand{ + Name: name, + }, + }) + if err != nil { + t.Fatal(err) + } +} + +func deleteAutoOpsRules(t *testing.T, client autoopsclient.Client, id string) { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + _, err := client.DeleteAutoOpsRule(ctx, &autoopsproto.DeleteAutoOpsRuleRequest{ + EnvironmentNamespace: *environmentNamespace, + Id: id, + Command: &autoopsproto.DeleteAutoOpsRuleCommand{}, + }) + if err != nil { + t.Fatal("failed to list auto ops rules", err) + } +} + +func deleteWebhook(ctx context.Context, t *testing.T, client autoopsclient.Client, id string) { + t.Helper() + _, err := client.DeleteWebhook(ctx, &autoopsproto.DeleteWebhookRequest{ + Id: id, + EnvironmentNamespace: *environmentNamespace, + Command: &autoopsproto.DeleteWebhookCommand{}, + }) + if err != nil { + t.Fatal(err) + } +} + +// Test for old SDK client +// Evaluation field in the GoalEvent is deprecated. +func registerGoalEventWithEvaluations( + t *testing.T, + featureID string, + featureVersion int32, + goalID, userID, variationID string, +) { + t.Helper() + c := newGatewayClient(t) + defer c.Close() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + goal, err := ptypes.MarshalAny(&eventproto.GoalEvent{ + Timestamp: time.Now().Unix(), + GoalId: goalID, + UserId: userID, + Value: 0.3, + User: &userproto.User{}, + Evaluations: []*featureproto.Evaluation{ + { + Id: fmt.Sprintf("%s-evaluation-id-%s", prefixTestName, newUUID(t)), + FeatureId: featureID, + FeatureVersion: featureVersion, + UserId: userID, + VariationId: variationID, + }, + }, + }) + if err != nil { + t.Fatal(err) + } + req := &gatewayproto.RegisterEventsRequest{ + Events: []*eventproto.Event{ + { + Id: newUUID(t), + Event: goal, + }, + }, + } + response, err := c.RegisterEvents(ctx, req) + if err != nil { + t.Fatal(err) + } + if len(response.Errors) > 0 { + t.Fatalf("Failed to register events. Error: %v", response.Errors) + } +} + +func grpcRegisterGoalEvent( + t *testing.T, + goalID, userID, tag string, +) { + t.Helper() + c := newGatewayClient(t) + defer c.Close() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + goal, err := ptypes.MarshalAny(&eventproto.GoalEvent{ + Timestamp: time.Now().Unix(), + GoalId: goalID, + UserId: userID, + Value: 0.3, + User: &userproto.User{}, + Tag: tag, + }) + if err != nil { + t.Fatal(err) + } + req := &gatewayproto.RegisterEventsRequest{ + Events: []*eventproto.Event{ + { + Id: newUUID(t), + Event: goal, + }, + }, + } + response, err := c.RegisterEvents(ctx, req) + if err != nil { + t.Fatal(err) + } + if len(response.Errors) > 0 { + t.Fatalf("Failed to register events. Error: %v", response.Errors) + } +} + +func registerGoalEvent( + t *testing.T, + goalID, userID, tag string, +) { + t.Helper() + c := newGatewayClient(t) + defer c.Close() + goal, err := protojson.Marshal(&eventproto.GoalEvent{ + Timestamp: time.Now().Unix(), + GoalId: goalID, + UserId: userID, + Value: 0.3, + User: &userproto.User{}, + Tag: tag, + }) + if err != nil { + t.Fatal(err) + } + events := []util.Event{ + { + ID: newUUID(t), + Event: goal, + Type: util.GoalEventType, + }, + } + response := util.RegisterEvents(t, events, *gatewayAddr, *apiKeyPath) + if len(response.Errors) > 0 { + t.Fatalf("Failed to register events. Error: %v", response.Errors) + } +} + +func newGatewayClient(t *testing.T) gatewayclient.Client { + t.Helper() + creds, err := gatewayclient.NewPerRPCCredentials(*apiKeyPath) + if err != nil { + t.Fatal("Failed to create RPC credentials:", err) + } + client, err := gatewayclient.NewClient( + fmt.Sprintf("%s:%d", *gatewayAddr, *gatewayPort), + *gatewayCert, + rpcclient.WithPerRPCCredentials(creds), + rpcclient.WithDialTimeout(30*time.Second), + rpcclient.WithBlock(), + ) + if err != nil { + t.Fatal("Failed to create gateway client:", err) + } + return client +} + +func newExperimentClient(t *testing.T) experimentclient.Client { + t.Helper() + creds, err := rpcclient.NewPerRPCCredentials(*serviceTokenPath) + if err != nil { + t.Fatal("Failed to create RPC credentials:", err) + } + client, err := experimentclient.NewClient( + fmt.Sprintf("%s:%d", *webGatewayAddr, *webGatewayPort), + *webGatewayCert, + rpcclient.WithPerRPCCredentials(creds), + rpcclient.WithDialTimeout(30*time.Second), + rpcclient.WithBlock(), + ) + if err != nil { + t.Fatal("Failed to create experiment client:", err) + } + return client +} + +func grpcRegisterEvaluationEvent( + t *testing.T, + featureID string, + featureVersion int32, + userID, variationID, tag string, +) { + t.Helper() + c := newGatewayClient(t) + defer c.Close() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + evaluation, err := ptypes.MarshalAny(&eventproto.EvaluationEvent{ + Timestamp: time.Now().Unix(), + FeatureId: featureID, + FeatureVersion: featureVersion, + UserId: userID, + VariationId: variationID, + User: &userproto.User{}, + Reason: &featureproto.Reason{}, + Tag: tag, + }) + if err != nil { + t.Fatal(err) + } + req := &gatewayproto.RegisterEventsRequest{ + Events: []*eventproto.Event{ + { + Id: newUUID(t), + Event: evaluation, + }, + }, + } + response, err := c.RegisterEvents(ctx, req) + if err != nil { + t.Fatal(err) + } + if len(response.Errors) > 0 { + t.Fatalf("Failed to register events. Error: %v", response.Errors) + } +} + +func registerEvaluationEvent( + t *testing.T, + featureID string, + featureVersion int32, + userID, variationID, tag string, +) { + t.Helper() + evaluation, err := protojson.Marshal(&eventproto.EvaluationEvent{ + Timestamp: time.Now().Unix(), + FeatureId: featureID, + FeatureVersion: featureVersion, + UserId: userID, + VariationId: variationID, + User: &userproto.User{}, + Reason: &featureproto.Reason{}, + Tag: tag, + }) + if err != nil { + t.Fatal(err) + } + events := []util.Event{ + { + ID: newUUID(t), + Event: evaluation, + Type: util.EvaluationEventType, + }, + } + response := util.RegisterEvents(t, events, *gatewayAddr, *apiKeyPath) + if len(response.Errors) > 0 { + t.Fatalf("Failed to register events. Error: %v", response.Errors) + } +} + +func createUserIDs(t *testing.T, total int) []string { + userIDs := make([]string, 0) + for i := 0; i < total; i++ { + id := newUUID(t) + userID := fmt.Sprintf("%s-user-%s", prefixTestName, id) + userIDs = append(userIDs, userID) + } + return userIDs +} + +func createFeatureID(t *testing.T) string { + if *testID != "" { + return fmt.Sprintf("%s-%s-feature-id-%s", prefixTestName, *testID, newUUID(t)) + } + return fmt.Sprintf("%s-feature-id-%s", prefixTestName, newUUID(t)) +} + +func createGoalID(t *testing.T) string { + if *testID != "" { + return fmt.Sprintf("%s-%s-goal-id-%s", prefixTestName, *testID, newUUID(t)) + } + return fmt.Sprintf("%s-goal-id-%s", prefixTestName, newUUID(t)) +} diff --git a/test/e2e/environment/BUILD.bazel b/test/e2e/environment/BUILD.bazel new file mode 100644 index 000000000..c849c422e --- /dev/null +++ b/test/e2e/environment/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_test") + +go_test( + name = "go_default_test", + srcs = [ + "environment_test.go", + "project_test.go", + ], + deps = [ + "//pkg/environment/client:go_default_library", + "//pkg/rpc/client:go_default_library", + "//proto/environment:go_default_library", + ], +) diff --git a/test/e2e/environment/environment_test.go b/test/e2e/environment/environment_test.go new file mode 100644 index 000000000..d49c7b2a7 --- /dev/null +++ b/test/e2e/environment/environment_test.go @@ -0,0 +1,172 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package environment + +import ( + "context" + "flag" + "fmt" + "testing" + "time" + + environmentclient "github.com/bucketeer-io/bucketeer/pkg/environment/client" + rpcclient "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + environmentproto "github.com/bucketeer-io/bucketeer/proto/environment" +) + +const ( + timeout = 10 * time.Second +) + +var ( + // FIXME: To avoid compiling the test many times, webGatewayAddr, webGatewayPort & apiKey has been also added here to prevent from getting: "flag provided but not defined" error during the test. These 3 are being use in the Gateway test + webGatewayAddr = flag.String("web-gateway-addr", "", "Web gateway endpoint address") + webGatewayPort = flag.Int("web-gateway-port", 443, "Web gateway endpoint port") + webGatewayCert = flag.String("web-gateway-cert", "", "Web gateway crt file") + apiKeyPath = flag.String("api-key", "", "Api key path for web gateway") + gatewayAddr = flag.String("gateway-addr", "", "Gateway endpoint address") + gatewayPort = flag.Int("gateway-port", 443, "Gateway endpoint port") + gatewayCert = flag.String("gateway-cert", "", "Gateway crt file") + serviceTokenPath = flag.String("service-token", "", "Service token path") + environmentNamespace = flag.String("environment-namespace", "", "Environment namespace") + testID = flag.String("test-id", "", "test ID") +) + +func TestGetEnvironment(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + c := newEnvironmentClient(t) + defer c.Close() + id := getEnvironmentID(t) + resp, err := c.GetEnvironment(ctx, &environmentproto.GetEnvironmentRequest{Id: id}) + if err != nil { + t.Fatal(err) + } + if resp.Environment.Id != id { + t.Fatalf("different ids, expected: %v, actual: %v", id, resp.Environment.Id) + } + if resp.Environment.Namespace != *environmentNamespace { + t.Fatalf("different namespaces, expected: %v, actual: %v", *environmentNamespace, resp.Environment.Namespace) + } +} + +func TestGetEnvironmentByNamespace(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + c := newEnvironmentClient(t) + defer c.Close() + resp, err := c.GetEnvironmentByNamespace(ctx, &environmentproto.GetEnvironmentByNamespaceRequest{Namespace: *environmentNamespace}) + if err != nil { + t.Fatal(err) + } + id := getEnvironmentID(t) + if resp.Environment.Id != id { + t.Fatalf("different ids, expected: %v, actual: %v", id, resp.Environment.Id) + } + if resp.Environment.Namespace != *environmentNamespace { + t.Fatalf("different namespaces, expected: %v, actual: %v", *environmentNamespace, resp.Environment.Namespace) + } +} + +func TestListEnvironmentsByProject(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + c := newEnvironmentClient(t) + defer c.Close() + resp, err := c.ListEnvironments(ctx, &environmentproto.ListEnvironmentsRequest{ProjectId: defaultProjectID}) + if err != nil { + t.Fatal(err) + } + if len(resp.Environments) == 0 { + t.Fatal("environments is empty, expected at least 1") + } + for _, env := range resp.Environments { + if env.ProjectId != defaultProjectID { + t.Fatalf("different project id, expected: %s, actual: %s", defaultProjectID, env.ProjectId) + } + } +} + +func TestListEnvironments(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + c := newEnvironmentClient(t) + defer c.Close() + pageSize := int64(1) + resp, err := c.ListEnvironments(ctx, &environmentproto.ListEnvironmentsRequest{PageSize: pageSize}) + if err != nil { + t.Fatal(err) + } + responseSize := int64(len(resp.Environments)) + if responseSize != pageSize { + t.Fatalf("different sizes, expected: %d actual: %d", pageSize, responseSize) + } +} + +func TestUpdateEnvironment(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + c := newEnvironmentClient(t) + defer c.Close() + id := getEnvironmentID(t) + newDesc := fmt.Sprintf("Description %v", time.Now().Unix()) + _, err := c.UpdateEnvironment(ctx, &environmentproto.UpdateEnvironmentRequest{ + Id: id, + ChangeDescriptionCommand: &environmentproto.ChangeDescriptionEnvironmentCommand{Description: newDesc}, + }) + if err != nil { + t.Fatal(err) + } + getResp, err := c.GetEnvironment(ctx, &environmentproto.GetEnvironmentRequest{Id: id}) + if err != nil { + t.Fatal(err) + } + if getResp.Environment.Id != id { + t.Fatalf("different ids, expected: %v, actual: %v", id, getResp.Environment.Id) + } + if getResp.Environment.Namespace != *environmentNamespace { + t.Fatalf("different namespaces, expected: %v, actual: %v", *environmentNamespace, getResp.Environment.Namespace) + } + if getResp.Environment.Description != newDesc { + t.Fatalf("different descriptions, expected: %v, actual: %v", newDesc, getResp.Environment.Description) + } +} + +func getEnvironmentID(t *testing.T) string { + t.Helper() + if *environmentNamespace == "" { + return "production" + } + return *environmentNamespace +} + +func newEnvironmentClient(t *testing.T) environmentclient.Client { + t.Helper() + creds, err := rpcclient.NewPerRPCCredentials(*serviceTokenPath) + if err != nil { + t.Fatal("Failed to create RPC credentials:", err) + } + client, err := environmentclient.NewClient( + fmt.Sprintf("%s:%d", *webGatewayAddr, *webGatewayPort), + *webGatewayCert, + rpcclient.WithPerRPCCredentials(creds), + rpcclient.WithDialTimeout(30*time.Second), + rpcclient.WithBlock(), + ) + if err != nil { + t.Fatal("Failed to create environment client:", err) + } + return client +} diff --git a/test/e2e/environment/project_test.go b/test/e2e/environment/project_test.go new file mode 100644 index 000000000..784e83637 --- /dev/null +++ b/test/e2e/environment/project_test.go @@ -0,0 +1,85 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package environment + +import ( + "context" + "fmt" + "testing" + "time" + + environmentproto "github.com/bucketeer-io/bucketeer/proto/environment" +) + +const ( + defaultProjectID = "default" +) + +func TestGetProject(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + c := newEnvironmentClient(t) + defer c.Close() + id := defaultProjectID + resp, err := c.GetProject(ctx, &environmentproto.GetProjectRequest{Id: id}) + if err != nil { + t.Fatal(err) + } + if resp.Project.Id != id { + t.Fatalf("different ids, expected: %v, actual: %v", id, resp.Project.Id) + } +} + +func TestListProjects(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + c := newEnvironmentClient(t) + defer c.Close() + pageSize := int64(1) + resp, err := c.ListProjects(ctx, &environmentproto.ListProjectsRequest{PageSize: pageSize}) + if err != nil { + t.Fatal(err) + } + responseSize := int64(len(resp.Projects)) + if responseSize != pageSize { + t.Fatalf("different sizes, expected: %d actual: %d", pageSize, responseSize) + } +} + +func TestUpdateProject(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + c := newEnvironmentClient(t) + defer c.Close() + id := defaultProjectID + newDesc := fmt.Sprintf("Description %v", time.Now().Unix()) + _, err := c.UpdateProject(ctx, &environmentproto.UpdateProjectRequest{ + Id: id, + ChangeDescriptionCommand: &environmentproto.ChangeDescriptionProjectCommand{Description: newDesc}, + }) + if err != nil { + t.Fatal(err) + } + getResp, err := c.GetProject(ctx, &environmentproto.GetProjectRequest{Id: id}) + if err != nil { + t.Fatal(err) + } + if getResp.Project.Id != id { + t.Fatalf("different ids, expected: %v, actual: %v", id, getResp.Project.Id) + } + if getResp.Project.Description != newDesc { + t.Fatalf("different descriptions, expected: %v, actual: %v", newDesc, getResp.Project.Description) + } +} diff --git a/test/e2e/eventcounter/BUILD.bazel b/test/e2e/eventcounter/BUILD.bazel new file mode 100644 index 000000000..7dae3c1ad --- /dev/null +++ b/test/e2e/eventcounter/BUILD.bazel @@ -0,0 +1,24 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_test") + +go_test( + name = "go_default_test", + srcs = ["eventcounter_test.go"], + deps = [ + "//pkg/eventcounter/client:go_default_library", + "//pkg/experiment/client:go_default_library", + "//pkg/feature/client:go_default_library", + "//pkg/gateway/client:go_default_library", + "//pkg/rpc/client:go_default_library", + "//pkg/uuid:go_default_library", + "//proto/event/client:go_default_library", + "//proto/eventcounter:go_default_library", + "//proto/experiment:go_default_library", + "//proto/feature:go_default_library", + "//proto/gateway:go_default_library", + "//proto/user:go_default_library", + "//test/e2e/util:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + "@org_golang_google_protobuf//encoding/protojson:go_default_library", + ], +) diff --git a/test/e2e/eventcounter/eventcounter_test.go b/test/e2e/eventcounter/eventcounter_test.go new file mode 100644 index 000000000..1f4682d9a --- /dev/null +++ b/test/e2e/eventcounter/eventcounter_test.go @@ -0,0 +1,2102 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package eventcounter + +import ( + "context" + "flag" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + "time" + + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/wrappers" + "google.golang.org/protobuf/encoding/protojson" + + ecclient "github.com/bucketeer-io/bucketeer/pkg/eventcounter/client" + experimentclient "github.com/bucketeer-io/bucketeer/pkg/experiment/client" + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + gatewayclient "github.com/bucketeer-io/bucketeer/pkg/gateway/client" + rpcclient "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/client" + ecproto "github.com/bucketeer-io/bucketeer/proto/eventcounter" + experimentproto "github.com/bucketeer-io/bucketeer/proto/experiment" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" + gatewayproto "github.com/bucketeer-io/bucketeer/proto/gateway" + userproto "github.com/bucketeer-io/bucketeer/proto/user" + "github.com/bucketeer-io/bucketeer/test/e2e/util" +) + +const ( + prefixTestName = "e2e-test" + timeout = 20 * time.Second + retryTimes = 360 +) + +var ( + webGatewayAddr = flag.String("web-gateway-addr", "", "Web gateway endpoint address") + webGatewayPort = flag.Int("web-gateway-port", 443, "Web gateway endpoint port") + webGatewayCert = flag.String("web-gateway-cert", "", "Web gateway crt file") + apiKeyPath = flag.String("api-key", "", "Api key path for web gateway") + gatewayAddr = flag.String("gateway-addr", "", "Gateway endpoint address") + gatewayPort = flag.Int("gateway-port", 443, "Gateway endpoint port") + gatewayCert = flag.String("gateway-cert", "", "Gateway crt file") + serviceTokenPath = flag.String("service-token", "", "Service token path") + environmentNamespace = flag.String("environment-namespace", "", "Environment namespace") + testID = flag.String("test-id", "", "test ID") +) + +func TestGrpcGoalCountV2(t *testing.T) { + t.Parallel() + featureClient := newFeatureClient(t) + defer featureClient.Close() + experimentClient := newExperimentClient(t) + defer experimentClient.Close() + ecClient := newEventCounterClient(t) + defer ecClient.Close() + uuid := newUUID(t) + + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + tag := fmt.Sprintf("%s-tag-%s", prefixTestName, uuid) + userID := createUserID(t, uuid) + featureID := createFeatureID(t, uuid) + + variationVarA := "a" + variationVarB := "b" + cmd := newCreateFeatureCommand(featureID, []string{variationVarA, variationVarB}) + createFeature(t, featureClient, cmd) + addTag(t, tag, featureID, featureClient) + enableFeature(t, featureID, featureClient) + f := getFeature(t, featureClient, featureID) + goalIDs := createGoals(ctx, t, experimentClient, 1) + variations := make(map[string]*featureproto.Variation) + variationIDs := []string{} + for _, v := range f.Variations { + variationIDs = append(variationIDs, v.Id) + variations[v.Value] = v + } + grpcRegisterEvaluationEvent(t, featureID, f.Version, userID, f.Variations[0].Id, tag) + + grpcRegisterGoalEvent(t, goalIDs[0], userID, tag, float64(0.2)) + grpcRegisterGoalEvent(t, goalIDs[0], userID, tag, float64(0.3)) + + for i := 0; i < retryTimes; i++ { + if i == retryTimes-1 { + t.Fatalf("retry timeout") + } + time.Sleep(time.Second) + + resp := getGoalCountV2(t, ecClient, goalIDs[0], featureID, f.Version, variationIDs) + if len(resp.GoalCounts.RealtimeCounts) == 0 { + t.Fatalf("no count returned") + } + if resp.GoalCounts.GoalId != goalIDs[0] { + t.Fatalf("goal ID is not correct: %s", resp.GoalCounts.GoalId) + } + + vcA := getVariationCount(resp.GoalCounts.RealtimeCounts, variations[variationVarA].Id) + if vcA == nil { + t.Fatalf("variation a is missing") + } + if vcA.UserCount != 1 { + continue + } + if vcA.EventCount != 2 { + continue + } + if vcA.ValueSum != float64(0.5) { + continue + } + if vcA.ValueSumPerUserMean != float64(0.5) { + continue + } + if vcA.ValueSumPerUserVariance != float64(0) { + continue + } + + vcB := getVariationCount(resp.GoalCounts.RealtimeCounts, variations[variationVarB].Id) + if vcB == nil { + t.Fatalf("variation b is missing") + } + if vcB.UserCount != 0 { + continue + } + if vcB.EventCount != 0 { + continue + } + if vcB.ValueSum != float64(0) { + continue + } + if vcB.ValueSumPerUserMean != float64(0.0) { + continue + } + if vcB.ValueSumPerUserVariance != float64(0.0) { + continue + } + break + } +} + +func TestGoalCountV2(t *testing.T) { + t.Parallel() + featureClient := newFeatureClient(t) + defer featureClient.Close() + experimentClient := newExperimentClient(t) + defer experimentClient.Close() + ecClient := newEventCounterClient(t) + defer ecClient.Close() + uuid := newUUID(t) + + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + tag := fmt.Sprintf("%s-tag-%s", prefixTestName, uuid) + userID := createUserID(t, uuid) + featureID := createFeatureID(t, uuid) + + variationVarA := "a" + variationVarB := "b" + cmd := newCreateFeatureCommand(featureID, []string{variationVarA, variationVarB}) + createFeature(t, featureClient, cmd) + addTag(t, tag, featureID, featureClient) + enableFeature(t, featureID, featureClient) + f := getFeature(t, featureClient, featureID) + goalIDs := createGoals(ctx, t, experimentClient, 1) + variations := make(map[string]*featureproto.Variation) + variationIDs := []string{} + for _, v := range f.Variations { + variationIDs = append(variationIDs, v.Id) + variations[v.Value] = v + } + registerEvaluationEvent(t, featureID, f.Version, userID, f.Variations[0].Id, tag) + + registerGoalEvent(t, goalIDs[0], userID, tag, float64(0.2)) + registerGoalEvent(t, goalIDs[0], userID, tag, float64(0.3)) + + for i := 0; i < retryTimes; i++ { + if i == retryTimes-1 { + t.Fatalf("retry timeout") + } + time.Sleep(time.Second) + + resp := getGoalCountV2(t, ecClient, goalIDs[0], featureID, f.Version, variationIDs) + if len(resp.GoalCounts.RealtimeCounts) == 0 { + t.Fatalf("no count returned") + } + if resp.GoalCounts.GoalId != goalIDs[0] { + t.Fatalf("goal ID is not correct: %s", resp.GoalCounts.GoalId) + } + + vcA := getVariationCount(resp.GoalCounts.RealtimeCounts, variations[variationVarA].Id) + if vcA == nil { + t.Fatalf("variation a is missing") + } + if vcA.UserCount != 1 { + continue + } + if vcA.EventCount != 2 { + continue + } + if vcA.ValueSum != float64(0.5) { + continue + } + if vcA.ValueSumPerUserMean != float64(0.5) { + continue + } + if vcA.ValueSumPerUserVariance != float64(0) { + continue + } + + vcB := getVariationCount(resp.GoalCounts.RealtimeCounts, variations[variationVarB].Id) + if vcB == nil { + t.Fatalf("variation b is missing") + } + if vcB.UserCount != 0 { + continue + } + if vcB.EventCount != 0 { + continue + } + if vcB.ValueSum != float64(0) { + continue + } + if vcB.ValueSumPerUserMean != float64(0.0) { + continue + } + if vcB.ValueSumPerUserVariance != float64(0.0) { + continue + } + break + } +} + +// Test for old SDK client. Tag is not set in the EvaluationEvent and GoalEvent +// Evaluation field in the GoalEvent is deprecated. +func TestExperimentResultWithoutTag(t *testing.T) { + t.Parallel() + featureClient := newFeatureClient(t) + defer featureClient.Close() + experimentClient := newExperimentClient(t) + defer experimentClient.Close() + ecClient := newEventCounterClient(t) + defer ecClient.Close() + uuid := newUUID(t) + + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + tag := fmt.Sprintf("%s-tag-%s", prefixTestName, uuid) + userIDs := []string{} + for i := 0; i < 5; i++ { + userIDs = append(userIDs, fmt.Sprintf("%s-%d", createUserID(t, uuid), i)) + } + featureID := createFeatureID(t, uuid) + + cmd := newCreateFeatureCommand(featureID, []string{"a", "b"}) + createFeature(t, featureClient, cmd) + addTag(t, tag, featureID, featureClient) + enableFeature(t, featureID, featureClient) + f := getFeature(t, featureClient, featureID) + goalIDs := createGoals(ctx, t, experimentClient, 1) + startAt := time.Now().Local().Add(-1 * time.Hour) + stopAt := startAt.Local().Add(time.Hour * 2) + experiment := createExperimentWithMultiGoals(ctx, t, experimentClient, "ExperimentResult", featureID, goalIDs, f.Variations[0].Id, startAt, stopAt) + + // CVRs is 3/4 + // Register 3 events and 2 user counts for user 1, 2 and 3 + // Register variation a + grpcRegisterEvaluationEvent(t, featureID, f.Version, userIDs[0], experiment.Variations[0].Id, "") + grpcRegisterEvaluationEvent(t, featureID, f.Version, userIDs[1], experiment.Variations[0].Id, "") + grpcRegisterEvaluationEvent(t, featureID, f.Version, userIDs[2], experiment.Variations[0].Id, "") + // Increment evaluation event count + grpcRegisterEvaluationEvent(t, featureID, f.Version, userIDs[0], experiment.Variations[0].Id, "") + // Register goal variation + registerGoalEventWithEvaluations(t, featureID, f.Version, goalIDs[0], userIDs[0], experiment.Variations[0].Id, float64(0.3)) + registerGoalEventWithEvaluations(t, featureID, f.Version, goalIDs[0], userIDs[1], experiment.Variations[0].Id, float64(0.2)) + registerGoalEventWithEvaluations(t, featureID, f.Version, goalIDs[0], userIDs[2], experiment.Variations[0].Id, float64(0.1)) + + // CVRs is 2/3 + // Register 3 events and 2 user counts for user 4 and 5 + // Register variation + grpcRegisterEvaluationEvent(t, featureID, f.Version, userIDs[3], experiment.Variations[1].Id, "") + grpcRegisterEvaluationEvent(t, featureID, f.Version, userIDs[4], experiment.Variations[1].Id, "") + // Increment evaluation event count + grpcRegisterEvaluationEvent(t, featureID, f.Version, userIDs[3], experiment.Variations[1].Id, "") + // Register goal + registerGoalEventWithEvaluations(t, featureID, f.Version, goalIDs[0], userIDs[3], experiment.Variations[1].Id, float64(0.1)) + registerGoalEventWithEvaluations(t, featureID, f.Version, goalIDs[0], userIDs[4], experiment.Variations[1].Id, float64(0.15)) + + for i := 0; i < retryTimes; i++ { + resp := getExperimentResult(t, ecClient, experiment.Id) + if resp != nil { + er := resp.ExperimentResult + if er.Id != experiment.Id { + t.Fatalf("experiment ID is not correct: %s", er.Id) + } + if len(er.GoalResults) == 0 { + continue + } + if len(er.GoalResults) != 1 { + t.Fatalf("the number of goal results is not correct: %d", len(er.GoalResults)) + } + gr := er.GoalResults[0] + if gr.GoalId != goalIDs[0] { + t.Fatalf("goal ID is not correct: %s", gr.GoalId) + } + if len(gr.VariationResults) != 2 { + t.Fatalf("the number of variation results is not correct: %d", len(gr.VariationResults)) + } + for _, vr := range gr.VariationResults { + // variation a + if vr.VariationId == experiment.Variations[0].Id { + vv := experiment.Variations[0].Value + // Evaluation + if vr.EvaluationCount.EventCount != 4 { + t.Fatalf("variation: %s: evaluation event count is not correct: %d", vv, vr.EvaluationCount.EventCount) + } + if vr.EvaluationCount.UserCount != 3 { + t.Fatalf("variation: %s: evaluation user count is not correct: %d", vv, vr.EvaluationCount.UserCount) + } + // Experiment + if vr.ExperimentCount.EventCount != 3 { + t.Fatalf("variation: %s: experiment event count is not correct: %d", vv, vr.ExperimentCount.EventCount) + } + if vr.ExperimentCount.UserCount != 3 { + t.Fatalf("variation: %s: experiment user count is not correct: %d", vv, vr.ExperimentCount.UserCount) + } + if vr.ExperimentCount.ValueSum != float64(0.6) { + t.Fatalf("variation: %s: experiment value sum is not correct: %f", vv, vr.ExperimentCount.ValueSum) + } + // cvr prob best + if vr.CvrProbBest.Mean <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob best mean is not correct: %f", vv, vr.CvrProbBest.Mean) + } + if vr.CvrProbBest.Sd <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob best sd is not correct: %f", vv, vr.CvrProbBest.Sd) + } + if vr.CvrProbBest.Rhat <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob best rhat is not correct: %f", vv, vr.CvrProbBest.Rhat) + } + // cvr prob beat baseline + if vr.CvrProbBeatBaseline.Mean != float64(0.0) { + t.Fatalf("variation: %s: cvr prob beat baseline mean is not correct: %f", vv, vr.CvrProbBeatBaseline.Mean) + } + if vr.CvrProbBeatBaseline.Sd != float64(0.0) { + t.Fatalf("variation: %s: cvr prob beat baseline best sd is not correct: %f", vv, vr.CvrProbBeatBaseline.Sd) + } + if vr.CvrProbBeatBaseline.Rhat != float64(0.0) { + t.Fatalf("variation: %s: cvr prob beat baseline best rhat is not correct: %f", vv, vr.CvrProbBeatBaseline.Rhat) + } + // value sum per user prob best + if vr.GoalValueSumPerUserProbBest.Mean <= float64(0.0) { + t.Fatalf("variation: %s: value sum per user prob best mean is not correct: %f", vv, vr.GoalValueSumPerUserProbBest.Mean) + } + // value sum per user prob beat baseline + if vr.GoalValueSumPerUserProbBeatBaseline.Mean != float64(0.0) { + t.Fatalf("variation: %s: value sum per user prob beat baseline mean is not correct: %f", vv, vr.GoalValueSumPerUserProbBeatBaseline.Mean) + } + continue + } + // variation b + if vr.VariationId == experiment.Variations[1].Id { + vv := experiment.Variations[1].Value + // Evaluation + if vr.EvaluationCount.EventCount != 3 { + t.Fatalf("variation: %s: evaluation event count is not correct: %d", vv, vr.EvaluationCount.EventCount) + } + if vr.EvaluationCount.UserCount != 2 { + t.Fatalf("variation: %s: evaluation user count is not correct: %d", vv, vr.EvaluationCount.UserCount) + } + // Experiment + if vr.ExperimentCount.EventCount != 2 { + t.Fatalf("variation: %s: experiment event count is not correct: %d", vv, vr.ExperimentCount.EventCount) + } + if vr.ExperimentCount.UserCount != 2 { + t.Fatalf("variation: %s: experiment user count is not correct: %d", vv, vr.ExperimentCount.UserCount) + } + if vr.ExperimentCount.ValueSum != float64(0.25) { + t.Fatalf("variation: %s: experiment value sum is not correct: %f", vv, vr.ExperimentCount.ValueSum) + } + // cvr prob best + if vr.CvrProbBest.Mean <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob best mean is not correct: %f", vv, vr.CvrProbBest.Mean) + } + if vr.CvrProbBest.Sd <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob best sd is not correct: %f", vv, vr.CvrProbBest.Sd) + } + if vr.CvrProbBest.Rhat <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob best rhat is not correct: %f", vv, vr.CvrProbBest.Rhat) + } + // cvr prob beat baseline + if vr.CvrProbBeatBaseline.Mean <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob beat baseline mean is not correct: %f", vv, vr.CvrProbBeatBaseline.Mean) + } + if vr.CvrProbBeatBaseline.Sd <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob beat baseline best sd is not correct: %f", vv, vr.CvrProbBeatBaseline.Sd) + } + if vr.CvrProbBeatBaseline.Rhat <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob beat baseline best rhat is not correct: %f", vv, vr.CvrProbBeatBaseline.Rhat) + } + // value sum per user prob best + if vr.GoalValueSumPerUserProbBest.Mean <= float64(0.0) { + t.Fatalf("variation: %s: value sum per user prob best mean is not correct: %f", vv, vr.GoalValueSumPerUserProbBest.Mean) + } + // value sum per user prob beat baseline + if vr.GoalValueSumPerUserProbBeatBaseline.Mean <= float64(0.0) { + t.Fatalf("variation: %s: value sum per user prob beat baseline mean is not correct: %f", vv, vr.GoalValueSumPerUserProbBeatBaseline.Mean) + } + continue + } + t.Fatalf("unknown variation results: %s", vr.VariationId) + } + break + } + if i == retryTimes-1 { + t.Fatalf("retry timeout") + } + time.Sleep(time.Second) + } +} + +func TestGrpcExperimentResult(t *testing.T) { + t.Parallel() + featureClient := newFeatureClient(t) + defer featureClient.Close() + experimentClient := newExperimentClient(t) + defer experimentClient.Close() + ecClient := newEventCounterClient(t) + defer ecClient.Close() + uuid := newUUID(t) + + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + tag := fmt.Sprintf("%s-tag-%s", prefixTestName, uuid) + userIDs := []string{} + for i := 0; i < 5; i++ { + userIDs = append(userIDs, fmt.Sprintf("%s-%d", createUserID(t, uuid), i)) + } + featureID := createFeatureID(t, uuid) + + cmd := newCreateFeatureCommand(featureID, []string{"a", "b"}) + createFeature(t, featureClient, cmd) + addTag(t, tag, featureID, featureClient) + enableFeature(t, featureID, featureClient) + f := getFeature(t, featureClient, featureID) + goalIDs := createGoals(ctx, t, experimentClient, 1) + startAt := time.Now().Local().Add(-1 * time.Hour) + stopAt := startAt.Local().Add(time.Hour * 2) + experiment := createExperimentWithMultiGoals(ctx, t, experimentClient, "ExperimentResult", featureID, goalIDs, f.Variations[0].Id, startAt, stopAt) + + // CVRs is 3/4 + // Register 3 events and 2 user counts for user 1, 2 and 3 + // Register variation a + grpcRegisterEvaluationEvent(t, featureID, f.Version, userIDs[0], experiment.Variations[0].Id, tag) + grpcRegisterEvaluationEvent(t, featureID, f.Version, userIDs[1], experiment.Variations[0].Id, tag) + grpcRegisterEvaluationEvent(t, featureID, f.Version, userIDs[2], experiment.Variations[0].Id, tag) + // Increment evaluation event count + grpcRegisterEvaluationEvent(t, featureID, f.Version, userIDs[0], experiment.Variations[0].Id, tag) + // Register goal variation + grpcRegisterGoalEvent(t, goalIDs[0], userIDs[0], tag, float64(0.3)) + grpcRegisterGoalEvent(t, goalIDs[0], userIDs[1], tag, float64(0.2)) + grpcRegisterGoalEvent(t, goalIDs[0], userIDs[2], tag, float64(0.1)) + // Increment experiment event count + grpcRegisterGoalEvent(t, goalIDs[0], userIDs[0], tag, float64(0.3)) + + // CVRs is 2/3 + // Register 3 events and 2 user counts for user 4 and 5 + // Register variation + grpcRegisterEvaluationEvent(t, featureID, f.Version, userIDs[3], experiment.Variations[1].Id, tag) + grpcRegisterEvaluationEvent(t, featureID, f.Version, userIDs[4], experiment.Variations[1].Id, tag) + // Increment evaluation event count + grpcRegisterEvaluationEvent(t, featureID, f.Version, userIDs[3], experiment.Variations[1].Id, tag) + // Register goal + grpcRegisterGoalEvent(t, goalIDs[0], userIDs[3], tag, float64(0.1)) + grpcRegisterGoalEvent(t, goalIDs[0], userIDs[4], tag, float64(0.15)) + // Increment experiment event count + grpcRegisterGoalEvent(t, goalIDs[0], userIDs[3], tag, float64(0.1)) + + for i := 0; i < retryTimes; i++ { + resp := getExperimentResult(t, ecClient, experiment.Id) + if resp != nil { + er := resp.ExperimentResult + if er.Id != experiment.Id { + t.Fatalf("experiment ID is not correct: %s", er.Id) + } + if len(er.GoalResults) == 0 { + continue + } + if len(er.GoalResults) != 1 { + t.Fatalf("the number of goal results is not correct: %d", len(er.GoalResults)) + } + gr := er.GoalResults[0] + if gr.GoalId != goalIDs[0] { + t.Fatalf("goal ID is not correct: %s", gr.GoalId) + } + if len(gr.VariationResults) != 2 { + t.Fatalf("the number of variation results is not correct: %d", len(gr.VariationResults)) + } + for _, vr := range gr.VariationResults { + // variation a + if vr.VariationId == experiment.Variations[0].Id { + vv := experiment.Variations[0].Value + // Evaluation + if vr.EvaluationCount.EventCount != 4 { + t.Fatalf("variation: %s: evaluation event count is not correct: %d", vv, vr.EvaluationCount.EventCount) + } + if vr.EvaluationCount.UserCount != 3 { + t.Fatalf("variation: %s: evaluation user count is not correct: %d", vv, vr.EvaluationCount.UserCount) + } + // Experiment + if vr.ExperimentCount.EventCount != 4 { + t.Fatalf("variation: %s: experiment event count is not correct: %d", vv, vr.ExperimentCount.EventCount) + } + if vr.ExperimentCount.UserCount != 3 { + t.Fatalf("variation: %s: experiment user count is not correct: %d", vv, vr.ExperimentCount.UserCount) + } + if vr.ExperimentCount.ValueSum != float64(0.9) { + t.Fatalf("variation: %s: experiment value sum is not correct: %f", vv, vr.ExperimentCount.ValueSum) + } + // cvr prob best + if vr.CvrProbBest.Mean <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob best mean is not correct: %f", vv, vr.CvrProbBest.Mean) + } + if vr.CvrProbBest.Sd <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob best sd is not correct: %f", vv, vr.CvrProbBest.Sd) + } + if vr.CvrProbBest.Rhat <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob best rhat is not correct: %f", vv, vr.CvrProbBest.Rhat) + } + // cvr prob beat baseline + if vr.CvrProbBeatBaseline.Mean != float64(0.0) { + t.Fatalf("variation: %s: cvr prob beat baseline mean is not correct: %f", vv, vr.CvrProbBeatBaseline.Mean) + } + if vr.CvrProbBeatBaseline.Sd != float64(0.0) { + t.Fatalf("variation: %s: cvr prob beat baseline best sd is not correct: %f", vv, vr.CvrProbBeatBaseline.Sd) + } + if vr.CvrProbBeatBaseline.Rhat != float64(0.0) { + t.Fatalf("variation: %s: cvr prob beat baseline best rhat is not correct: %f", vv, vr.CvrProbBeatBaseline.Rhat) + } + // value sum per user prob best + if vr.GoalValueSumPerUserProbBest.Mean <= float64(0.0) { + t.Fatalf("variation: %s: value sum per user prob best mean is not correct: %f", vv, vr.GoalValueSumPerUserProbBest.Mean) + } + // value sum per user prob beat baseline + if vr.GoalValueSumPerUserProbBeatBaseline.Mean != float64(0.0) { + t.Fatalf("variation: %s: value sum per user prob beat baseline mean is not correct: %f", vv, vr.GoalValueSumPerUserProbBeatBaseline.Mean) + } + continue + } + // variation b + if vr.VariationId == experiment.Variations[1].Id { + vv := experiment.Variations[1].Value + // Evaluation + if vr.EvaluationCount.EventCount != 3 { + t.Fatalf("variation: %s: evaluation event count is not correct: %d", vv, vr.EvaluationCount.EventCount) + } + if vr.EvaluationCount.UserCount != 2 { + t.Fatalf("variation: %s: evaluation user count is not correct: %d", vv, vr.EvaluationCount.UserCount) + } + // Experiment + if vr.ExperimentCount.EventCount != 3 { + t.Fatalf("variation: %s: experiment event count is not correct: %d", vv, vr.ExperimentCount.EventCount) + } + if vr.ExperimentCount.UserCount != 2 { + t.Fatalf("variation: %s: experiment user count is not correct: %d", vv, vr.ExperimentCount.UserCount) + } + if vr.ExperimentCount.ValueSum != float64(0.35) { + t.Fatalf("variation: %s: experiment value sum is not correct: %f", vv, vr.ExperimentCount.ValueSum) + } + // cvr prob best + if vr.CvrProbBest.Mean <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob best mean is not correct: %f", vv, vr.CvrProbBest.Mean) + } + if vr.CvrProbBest.Sd <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob best sd is not correct: %f", vv, vr.CvrProbBest.Sd) + } + if vr.CvrProbBest.Rhat <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob best rhat is not correct: %f", vv, vr.CvrProbBest.Rhat) + } + // cvr prob beat baseline + if vr.CvrProbBeatBaseline.Mean <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob beat baseline mean is not correct: %f", vv, vr.CvrProbBeatBaseline.Mean) + } + if vr.CvrProbBeatBaseline.Sd <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob beat baseline best sd is not correct: %f", vv, vr.CvrProbBeatBaseline.Sd) + } + if vr.CvrProbBeatBaseline.Rhat <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob beat baseline best rhat is not correct: %f", vv, vr.CvrProbBeatBaseline.Rhat) + } + // value sum per user prob best + if vr.GoalValueSumPerUserProbBest.Mean <= float64(0.0) { + t.Fatalf("variation: %s: value sum per user prob best mean is not correct: %f", vv, vr.GoalValueSumPerUserProbBest.Mean) + } + // value sum per user prob beat baseline + if vr.GoalValueSumPerUserProbBeatBaseline.Mean <= float64(0.0) { + t.Fatalf("variation: %s: value sum per user prob beat baseline mean is not correct: %f", vv, vr.GoalValueSumPerUserProbBeatBaseline.Mean) + } + continue + } + t.Fatalf("unknown variation results: %s", vr.VariationId) + } + break + } + if i == retryTimes-1 { + t.Fatalf("retry timeout") + } + time.Sleep(time.Second) + } + res := getExperiment(t, experimentClient, experiment.Id) + if res.Experiment.Status != experimentproto.Experiment_RUNNING { + expected, _ := experimentproto.Experiment_Status_name[int32(experimentproto.Experiment_RUNNING)] + actual, _ := experimentproto.Experiment_Status_name[int32(res.Experiment.Status)] + t.Fatalf("the status of experiment is not correct. expected: %s, but got %s", expected, actual) + } +} + +func TestExperimentResult(t *testing.T) { + t.Parallel() + featureClient := newFeatureClient(t) + defer featureClient.Close() + experimentClient := newExperimentClient(t) + defer experimentClient.Close() + ecClient := newEventCounterClient(t) + defer ecClient.Close() + uuid := newUUID(t) + + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + tag := fmt.Sprintf("%s-tag-%s", prefixTestName, uuid) + userIDs := []string{} + for i := 0; i < 5; i++ { + userIDs = append(userIDs, fmt.Sprintf("%s-%d", createUserID(t, uuid), i)) + } + featureID := createFeatureID(t, uuid) + + cmd := newCreateFeatureCommand(featureID, []string{"a", "b"}) + createFeature(t, featureClient, cmd) + addTag(t, tag, featureID, featureClient) + enableFeature(t, featureID, featureClient) + f := getFeature(t, featureClient, featureID) + goalIDs := createGoals(ctx, t, experimentClient, 1) + startAt := time.Now().Local().Add(-1 * time.Hour) + stopAt := startAt.Local().Add(time.Hour * 2) + experiment := createExperimentWithMultiGoals(ctx, t, experimentClient, "ExperimentResult", featureID, goalIDs, f.Variations[0].Id, startAt, stopAt) + + // CVRs is 3/4 + // Register 3 events and 2 user counts for user 1, 2 and 3 + // Register variation a + registerEvaluationEvent(t, featureID, f.Version, userIDs[0], experiment.Variations[0].Id, tag) + registerEvaluationEvent(t, featureID, f.Version, userIDs[1], experiment.Variations[0].Id, tag) + registerEvaluationEvent(t, featureID, f.Version, userIDs[2], experiment.Variations[0].Id, tag) + // Increment evaluation event count + registerEvaluationEvent(t, featureID, f.Version, userIDs[0], experiment.Variations[0].Id, tag) + // Register goal variation + registerGoalEvent(t, goalIDs[0], userIDs[0], tag, float64(0.3)) + registerGoalEvent(t, goalIDs[0], userIDs[1], tag, float64(0.2)) + registerGoalEvent(t, goalIDs[0], userIDs[2], tag, float64(0.1)) + // Increment experiment event count + registerGoalEvent(t, goalIDs[0], userIDs[0], tag, float64(0.3)) + + // CVRs is 2/3 + // Register 3 events and 2 user counts for user 4 and 5 + // Register variation + registerEvaluationEvent(t, featureID, f.Version, userIDs[3], experiment.Variations[1].Id, tag) + registerEvaluationEvent(t, featureID, f.Version, userIDs[4], experiment.Variations[1].Id, tag) + // Increment evaluation event count + registerEvaluationEvent(t, featureID, f.Version, userIDs[3], experiment.Variations[1].Id, tag) + // Register goal + registerGoalEvent(t, goalIDs[0], userIDs[3], tag, float64(0.1)) + registerGoalEvent(t, goalIDs[0], userIDs[4], tag, float64(0.15)) + // Increment experiment event count + registerGoalEvent(t, goalIDs[0], userIDs[3], tag, float64(0.1)) + + for i := 0; i < retryTimes; i++ { + resp := getExperimentResult(t, ecClient, experiment.Id) + if resp != nil { + er := resp.ExperimentResult + if er.Id != experiment.Id { + t.Fatalf("experiment ID is not correct: %s", er.Id) + } + if len(er.GoalResults) == 0 { + continue + } + if len(er.GoalResults) != 1 { + t.Fatalf("the number of goal results is not correct: %d", len(er.GoalResults)) + } + gr := er.GoalResults[0] + if gr.GoalId != goalIDs[0] { + t.Fatalf("goal ID is not correct: %s", gr.GoalId) + } + if len(gr.VariationResults) != 2 { + t.Fatalf("the number of variation results is not correct: %d", len(gr.VariationResults)) + } + for _, vr := range gr.VariationResults { + // variation a + if vr.VariationId == experiment.Variations[0].Id { + vv := experiment.Variations[0].Value + // Evaluation + if vr.EvaluationCount.EventCount != 4 { + t.Fatalf("variation: %s: evaluation event count is not correct: %d", vv, vr.EvaluationCount.EventCount) + } + if vr.EvaluationCount.UserCount != 3 { + t.Fatalf("variation: %s: evaluation user count is not correct: %d", vv, vr.EvaluationCount.UserCount) + } + // Experiment + if vr.ExperimentCount.EventCount != 4 { + t.Fatalf("variation: %s: experiment event count is not correct: %d", vv, vr.ExperimentCount.EventCount) + } + if vr.ExperimentCount.UserCount != 3 { + t.Fatalf("variation: %s: experiment user count is not correct: %d", vv, vr.ExperimentCount.UserCount) + } + if vr.ExperimentCount.ValueSum != float64(0.9) { + t.Fatalf("variation: %s: experiment value sum is not correct: %f", vv, vr.ExperimentCount.ValueSum) + } + // cvr prob best + if vr.CvrProbBest.Mean <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob best mean is not correct: %f", vv, vr.CvrProbBest.Mean) + } + if vr.CvrProbBest.Sd <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob best sd is not correct: %f", vv, vr.CvrProbBest.Sd) + } + if vr.CvrProbBest.Rhat <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob best rhat is not correct: %f", vv, vr.CvrProbBest.Rhat) + } + // cvr prob beat baseline + if vr.CvrProbBeatBaseline.Mean != float64(0.0) { + t.Fatalf("variation: %s: cvr prob beat baseline mean is not correct: %f", vv, vr.CvrProbBeatBaseline.Mean) + } + if vr.CvrProbBeatBaseline.Sd != float64(0.0) { + t.Fatalf("variation: %s: cvr prob beat baseline best sd is not correct: %f", vv, vr.CvrProbBeatBaseline.Sd) + } + if vr.CvrProbBeatBaseline.Rhat != float64(0.0) { + t.Fatalf("variation: %s: cvr prob beat baseline best rhat is not correct: %f", vv, vr.CvrProbBeatBaseline.Rhat) + } + // value sum per user prob best + if vr.GoalValueSumPerUserProbBest.Mean <= float64(0.0) { + t.Fatalf("variation: %s: value sum per user prob best mean is not correct: %f", vv, vr.GoalValueSumPerUserProbBest.Mean) + } + // value sum per user prob beat baseline + if vr.GoalValueSumPerUserProbBeatBaseline.Mean != float64(0.0) { + t.Fatalf("variation: %s: value sum per user prob beat baseline mean is not correct: %f", vv, vr.GoalValueSumPerUserProbBeatBaseline.Mean) + } + continue + } + // variation b + if vr.VariationId == experiment.Variations[1].Id { + vv := experiment.Variations[1].Value + // Evaluation + if vr.EvaluationCount.EventCount != 3 { + t.Fatalf("variation: %s: evaluation event count is not correct: %d", vv, vr.EvaluationCount.EventCount) + } + if vr.EvaluationCount.UserCount != 2 { + t.Fatalf("variation: %s: evaluation user count is not correct: %d", vv, vr.EvaluationCount.UserCount) + } + // Experiment + if vr.ExperimentCount.EventCount != 3 { + t.Fatalf("variation: %s: experiment event count is not correct: %d", vv, vr.ExperimentCount.EventCount) + } + if vr.ExperimentCount.UserCount != 2 { + t.Fatalf("variation: %s: experiment user count is not correct: %d", vv, vr.ExperimentCount.UserCount) + } + if vr.ExperimentCount.ValueSum != float64(0.35) { + t.Fatalf("variation: %s: experiment value sum is not correct: %f", vv, vr.ExperimentCount.ValueSum) + } + // cvr prob best + if vr.CvrProbBest.Mean <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob best mean is not correct: %f", vv, vr.CvrProbBest.Mean) + } + if vr.CvrProbBest.Sd <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob best sd is not correct: %f", vv, vr.CvrProbBest.Sd) + } + if vr.CvrProbBest.Rhat <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob best rhat is not correct: %f", vv, vr.CvrProbBest.Rhat) + } + // cvr prob beat baseline + if vr.CvrProbBeatBaseline.Mean <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob beat baseline mean is not correct: %f", vv, vr.CvrProbBeatBaseline.Mean) + } + if vr.CvrProbBeatBaseline.Sd <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob beat baseline best sd is not correct: %f", vv, vr.CvrProbBeatBaseline.Sd) + } + if vr.CvrProbBeatBaseline.Rhat <= float64(0.0) { + t.Fatalf("variation: %s: cvr prob beat baseline best rhat is not correct: %f", vv, vr.CvrProbBeatBaseline.Rhat) + } + // value sum per user prob best + if vr.GoalValueSumPerUserProbBest.Mean <= float64(0.0) { + t.Fatalf("variation: %s: value sum per user prob best mean is not correct: %f", vv, vr.GoalValueSumPerUserProbBest.Mean) + } + // value sum per user prob beat baseline + if vr.GoalValueSumPerUserProbBeatBaseline.Mean <= float64(0.0) { + t.Fatalf("variation: %s: value sum per user prob beat baseline mean is not correct: %f", vv, vr.GoalValueSumPerUserProbBeatBaseline.Mean) + } + continue + } + t.Fatalf("unknown variation results: %s", vr.VariationId) + } + break + } + if i == retryTimes-1 { + t.Fatalf("retry timeout") + } + time.Sleep(time.Second) + } + res := getExperiment(t, experimentClient, experiment.Id) + if res.Experiment.Status != experimentproto.Experiment_RUNNING { + expected, _ := experimentproto.Experiment_Status_name[int32(experimentproto.Experiment_RUNNING)] + actual, _ := experimentproto.Experiment_Status_name[int32(res.Experiment.Status)] + t.Fatalf("the status of experiment is not correct. expected: %s, but got %s", expected, actual) + } +} + +func TestGrpcMultiGoalsEventCounterRealtime(t *testing.T) { + t.Parallel() + featureClient := newFeatureClient(t) + defer featureClient.Close() + experimentClient := newExperimentClient(t) + defer experimentClient.Close() + ecClient := newEventCounterClient(t) + defer ecClient.Close() + uuid := newUUID(t) + + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + tag := fmt.Sprintf("%s-tag-%s", prefixTestName, uuid) + userID := createUserID(t, uuid) + featureID := createFeatureID(t, uuid) + + variationVarA := "a" + variationVarB := "b" + cmd := newCreateFeatureCommand(featureID, []string{variationVarA, variationVarB}) + createFeature(t, featureClient, cmd) + addTag(t, tag, featureID, featureClient) + enableFeature(t, featureID, featureClient) + f := getFeature(t, featureClient, featureID) + goalIDs := createGoals(ctx, t, experimentClient, 3) + variations := make(map[string]*featureproto.Variation) + variationIDs := []string{} + for _, v := range f.Variations { + variationIDs = append(variationIDs, v.Id) + variations[v.Value] = v + } + grpcRegisterEvaluationEvent(t, featureID, f.Version, userID, f.Variations[0].Id, tag) + grpcRegisterEvaluationEvent(t, featureID, f.Version+1, userID, f.Variations[1].Id, tag) + + grpcRegisterGoalEvent(t, goalIDs[0], userID, tag, float64(0.3)) + grpcRegisterGoalEvent(t, goalIDs[1], userID, tag, float64(0.2)) + + for i := 0; i < retryTimes; i++ { + if i == retryTimes-1 { + t.Fatalf("retry timeout") + } + time.Sleep(time.Second) + + // Goal 0. + resp := getGoalCountV2(t, ecClient, goalIDs[0], featureID, f.Version, variationIDs) + if len(resp.GoalCounts.RealtimeCounts) == 0 { + t.Fatalf("no count returned") + } + if resp.GoalCounts.GoalId != goalIDs[0] { + t.Fatalf("goal ID is not correct: %s", resp.GoalCounts.GoalId) + } + + vcA := getVariationCount(resp.GoalCounts.RealtimeCounts, variations[variationVarA].Id) + if vcA == nil { + t.Fatalf("variation a is missing") + } + if vcA.UserCount != 1 { + continue + } + if vcA.EventCount != 1 { + continue + } + if vcA.ValueSum != float64(0.3) { + continue + } + + vcB := getVariationCount(resp.GoalCounts.RealtimeCounts, variations[variationVarB].Id) + if vcB == nil { + t.Fatalf("variation b is missing") + } + if vcB.UserCount != 0 { + continue + } + if vcB.EventCount != 0 { + continue + } + if vcB.ValueSum != float64(0) { + continue + } + + // Goal 1. + resp = getGoalCountV2(t, ecClient, goalIDs[1], featureID, f.Version+1, variationIDs) + if len(resp.GoalCounts.RealtimeCounts) == 0 { + t.Fatalf("no count returned") + } + if resp.GoalCounts.GoalId != goalIDs[1] { + t.Fatalf("goal ID is not correct: %s", resp.GoalCounts.GoalId) + } + + vcA = getVariationCount(resp.GoalCounts.RealtimeCounts, variations[variationVarA].Id) + if vcA == nil { + t.Fatalf("variation a is missing") + } + if vcA.UserCount != 0 { + continue + } + if vcA.EventCount != 0 { + continue + } + if vcA.ValueSum != float64(0) { + continue + } + + vcB = getVariationCount(resp.GoalCounts.RealtimeCounts, variations[variationVarB].Id) + if vcB == nil { + t.Fatalf("variation b is missing") + } + if vcB.UserCount != 1 { + continue + } + if vcB.EventCount != 1 { + continue + } + if vcB.ValueSum != float64(0.2) { + continue + } + + // Goal 2. + resp = getGoalCountV2(t, ecClient, goalIDs[2], featureID, f.Version, variationIDs) + if len(resp.GoalCounts.RealtimeCounts) == 0 { + t.Fatalf("no count returned") + } + if resp.GoalCounts.GoalId != goalIDs[2] { + t.Fatalf("goal ID is not correct: %s", resp.GoalCounts.GoalId) + } + + vcA = getVariationCount(resp.GoalCounts.RealtimeCounts, variations[variationVarA].Id) + if vcA == nil { + t.Fatalf("variation a is missing") + } + if vcA.UserCount != 0 { + continue + } + if vcA.EventCount != 0 { + continue + } + if vcA.ValueSum != float64(0) { + continue + } + + vcB = getVariationCount(resp.GoalCounts.RealtimeCounts, variations[variationVarB].Id) + if vcB == nil { + t.Fatalf("variation b is missing") + } + if vcB.UserCount != 0 { + continue + } + if vcB.EventCount != 0 { + continue + } + if vcB.ValueSum != float64(0) { + continue + } + break + } +} + +func TestMultiGoalsEventCounterRealtime(t *testing.T) { + t.Parallel() + featureClient := newFeatureClient(t) + defer featureClient.Close() + experimentClient := newExperimentClient(t) + defer experimentClient.Close() + ecClient := newEventCounterClient(t) + defer ecClient.Close() + uuid := newUUID(t) + + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + tag := fmt.Sprintf("%s-tag-%s", prefixTestName, uuid) + userID := createUserID(t, uuid) + featureID := createFeatureID(t, uuid) + + variationVarA := "a" + variationVarB := "b" + cmd := newCreateFeatureCommand(featureID, []string{variationVarA, variationVarB}) + createFeature(t, featureClient, cmd) + addTag(t, tag, featureID, featureClient) + enableFeature(t, featureID, featureClient) + f := getFeature(t, featureClient, featureID) + goalIDs := createGoals(ctx, t, experimentClient, 3) + variations := make(map[string]*featureproto.Variation) + variationIDs := []string{} + for _, v := range f.Variations { + variationIDs = append(variationIDs, v.Id) + variations[v.Value] = v + } + registerEvaluationEvent(t, featureID, f.Version, userID, f.Variations[0].Id, tag) + registerEvaluationEvent(t, featureID, f.Version+1, userID, f.Variations[1].Id, tag) + + registerGoalEvent(t, goalIDs[0], userID, tag, float64(0.3)) + registerGoalEvent(t, goalIDs[1], userID, tag, float64(0.2)) + + for i := 0; i < retryTimes; i++ { + if i == retryTimes-1 { + t.Fatalf("retry timeout") + } + time.Sleep(time.Second) + + // Goal 0. + resp := getGoalCountV2(t, ecClient, goalIDs[0], featureID, f.Version, variationIDs) + if len(resp.GoalCounts.RealtimeCounts) == 0 { + t.Fatalf("no count returned") + } + if resp.GoalCounts.GoalId != goalIDs[0] { + t.Fatalf("goal ID is not correct: %s", resp.GoalCounts.GoalId) + } + + vcA := getVariationCount(resp.GoalCounts.RealtimeCounts, variations[variationVarA].Id) + if vcA == nil { + t.Fatalf("variation a is missing") + } + if vcA.UserCount != 1 { + continue + } + if vcA.EventCount != 1 { + continue + } + if vcA.ValueSum != float64(0.3) { + continue + } + + vcB := getVariationCount(resp.GoalCounts.RealtimeCounts, variations[variationVarB].Id) + if vcB == nil { + t.Fatalf("variation b is missing") + } + if vcB.UserCount != 0 { + continue + } + if vcB.EventCount != 0 { + continue + } + if vcB.ValueSum != float64(0) { + continue + } + + // Goal 1. + resp = getGoalCountV2(t, ecClient, goalIDs[1], featureID, f.Version+1, variationIDs) + if len(resp.GoalCounts.RealtimeCounts) == 0 { + t.Fatalf("no count returned") + } + if resp.GoalCounts.GoalId != goalIDs[1] { + t.Fatalf("goal ID is not correct: %s", resp.GoalCounts.GoalId) + } + + vcA = getVariationCount(resp.GoalCounts.RealtimeCounts, variations[variationVarA].Id) + if vcA == nil { + t.Fatalf("variation a is missing") + } + if vcA.UserCount != 0 { + continue + } + if vcA.EventCount != 0 { + continue + } + if vcA.ValueSum != float64(0) { + continue + } + + vcB = getVariationCount(resp.GoalCounts.RealtimeCounts, variations[variationVarB].Id) + if vcB == nil { + t.Fatalf("variation b is missing") + } + if vcB.UserCount != 1 { + continue + } + if vcB.EventCount != 1 { + continue + } + if vcB.ValueSum != float64(0.2) { + continue + } + + // Goal 2. + resp = getGoalCountV2(t, ecClient, goalIDs[2], featureID, f.Version, variationIDs) + if len(resp.GoalCounts.RealtimeCounts) == 0 { + t.Fatalf("no count returned") + } + if resp.GoalCounts.GoalId != goalIDs[2] { + t.Fatalf("goal ID is not correct: %s", resp.GoalCounts.GoalId) + } + + vcA = getVariationCount(resp.GoalCounts.RealtimeCounts, variations[variationVarA].Id) + if vcA == nil { + t.Fatalf("variation a is missing") + } + if vcA.UserCount != 0 { + continue + } + if vcA.EventCount != 0 { + continue + } + if vcA.ValueSum != float64(0) { + continue + } + + vcB = getVariationCount(resp.GoalCounts.RealtimeCounts, variations[variationVarB].Id) + if vcB == nil { + t.Fatalf("variation b is missing") + } + if vcB.UserCount != 0 { + continue + } + if vcB.EventCount != 0 { + continue + } + if vcB.ValueSum != float64(0) { + continue + } + break + } +} + +func TestGoalBatchEventCounter(t *testing.T) { + t.Parallel() + featureClient := newFeatureClient(t) + defer featureClient.Close() + experimentClient := newExperimentClient(t) + defer experimentClient.Close() + ecClient := newEventCounterClient(t) + defer ecClient.Close() + uuid := newUUID(t) + + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + tag := fmt.Sprintf("%s-tag-%s", prefixTestName, uuid) + userID := createUserID(t, uuid) + featureID := createFeatureID(t, uuid) + + variationVarA := "a" + variationVarB := "b" + cmd := newCreateFeatureCommand(featureID, []string{variationVarA, variationVarB}) + createFeature(t, featureClient, cmd) + addTag(t, tag, featureID, featureClient) + enableFeature(t, featureID, featureClient) + f := getFeature(t, featureClient, featureID) + goalIDs := createGoals(ctx, t, experimentClient, 1) + variations := make(map[string]*featureproto.Variation) + variationIDs := []string{} + for _, v := range f.Variations { + variationIDs = append(variationIDs, v.Id) + variations[v.Value] = v + } + + // Get user evaluations, which creates user in redis. + for i := 0; i < retryTimes; i++ { + resp := getEvaluation(t, tag, userID) + if len(resp.Evaluations.Evaluations) == 1 { + break + } + if i == retryTimes-1 { + t.Fatalf("State did not change. Expected: %v, actual: %v", featureproto.UserEvaluations_FULL, resp.State) + } + time.Sleep(time.Second) + } + time.Sleep(5 * time.Second) + + registerEvaluationEvent(t, featureID, f.Version, userID, f.Variations[0].Id, tag) + // Send goal batch events. + registerGoalBatchEvent(t, tag, goalIDs[0], userID) + + // Check the count + for i := 0; i < retryTimes; i++ { + if i == retryTimes-1 { + t.Fatalf("retry timeout") + } + time.Sleep(time.Second) + + resp := getGoalCountV2(t, ecClient, goalIDs[0], featureID, f.Version, variationIDs) + if len(resp.GoalCounts.RealtimeCounts) == 0 { + t.Fatalf("no count returned") + } + if resp.GoalCounts.GoalId != goalIDs[0] { + t.Fatalf("goal ID is not correct: %s", resp.GoalCounts.GoalId) + } + + vcA := getVariationCount(resp.GoalCounts.RealtimeCounts, variations[variationVarA].Id) + if vcA == nil { + t.Fatalf("variation a is missing") + } + if vcA.UserCount != 1 { + continue + } + if vcA.EventCount != 1 { + continue + } + if vcA.ValueSum != float64(0.3) { + continue + } + if vcA.ValueSumPerUserMean != float64(0.3) { + continue + } + if vcA.ValueSumPerUserVariance != float64(0) { + continue + } + + vcB := getVariationCount(resp.GoalCounts.RealtimeCounts, variations[variationVarB].Id) + if vcB == nil { + t.Fatalf("variation b is missing") + } + if vcB.UserCount != 0 { + continue + } + if vcB.EventCount != 0 { + continue + } + if vcB.ValueSum != float64(0) { + continue + } + if vcB.ValueSumPerUserMean != float64(0.0) { + continue + } + if vcB.ValueSumPerUserVariance != float64(0.0) { + continue + } + break + } +} + +func TestHTTPTrack(t *testing.T) { + t.Parallel() + featureClient := newFeatureClient(t) + defer featureClient.Close() + experimentClient := newExperimentClient(t) + defer experimentClient.Close() + ecClient := newEventCounterClient(t) + defer ecClient.Close() + uuid := newUUID(t) + + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + tag := fmt.Sprintf("%s-tag-%s", prefixTestName, uuid) + userID := createUserID(t, uuid) + featureID := createFeatureID(t, uuid) + value := float64(1.23) + + variationVarA := "a" + variationVarB := "b" + cmd := newCreateFeatureCommand(featureID, []string{variationVarA, variationVarB}) + createFeature(t, featureClient, cmd) + addTag(t, tag, featureID, featureClient) + enableFeature(t, featureID, featureClient) + f := getFeature(t, featureClient, featureID) + goalIDs := createGoals(ctx, t, experimentClient, 1) + variations := make(map[string]*featureproto.Variation) + variationIDs := []string{} + for _, v := range f.Variations { + variationIDs = append(variationIDs, v.Id) + variations[v.Value] = v + } + + // Get user evaluations, which creates user in redis. + for i := 0; i < retryTimes; i++ { + resp := getEvaluation(t, tag, userID) + if len(resp.Evaluations.Evaluations) == 1 { + break + } + if i == retryTimes-1 { + t.Fatalf("State did not change. Expected: %v, actual: %v", featureproto.UserEvaluations_FULL, resp.State) + } + time.Sleep(time.Second) + } + time.Sleep(5 * time.Second) + + registerEvaluationEvent(t, featureID, f.Version, userID, f.Variations[0].Id, tag) + // Send track events. + sendHTTPTrack(t, userID, goalIDs[0], tag, value) + + // Check the count + for i := 0; i < retryTimes; i++ { + if i == retryTimes-1 { + t.Fatalf("retry timeout") + } + time.Sleep(time.Second) + + resp := getGoalCountV2(t, ecClient, goalIDs[0], featureID, f.Version, variationIDs) + if len(resp.GoalCounts.RealtimeCounts) == 0 { + t.Fatalf("no count returned") + } + if resp.GoalCounts.GoalId != goalIDs[0] { + t.Fatalf("goal ID is not correct: %s", resp.GoalCounts.GoalId) + } + + // variation a + vcA := getVariationCount(resp.GoalCounts.RealtimeCounts, variations[variationVarA].Id) + if vcA == nil { + t.Fatalf("variation a is missing") + } + if vcA.UserCount != 1 { + continue + } + if vcA.EventCount != 1 { + continue + } + if vcA.ValueSum != value { + continue + } + // variation b + vcB := getVariationCount(resp.GoalCounts.RealtimeCounts, variations[variationVarB].Id) + if vcB == nil { + t.Fatalf("variation a is missing") + } + if vcB.UserCount != 0 { + continue + } + if vcB.EventCount != 0 { + continue + } + if vcB.ValueSum != float64(0) { + continue + } + break + } +} + +func TestGrpcEvaluationEventCountV2(t *testing.T) { + t.Parallel() + featureClient := newFeatureClient(t) + defer featureClient.Close() + ecClient := newEventCounterClient(t) + defer ecClient.Close() + uuid := newUUID(t) + + tag := fmt.Sprintf("%s-tag-%s", prefixTestName, uuid) + userID := createUserID(t, uuid) + featureID := createFeatureID(t, uuid) + variationVarA := "a" + variationVarB := "b" + + cmd := newCreateFeatureCommand(featureID, []string{variationVarA, variationVarB}) + createFeature(t, featureClient, cmd) + addTag(t, tag, featureID, featureClient) + enableFeature(t, featureID, featureClient) + f := getFeature(t, featureClient, featureID) + variations := make(map[string]*featureproto.Variation) + variationIDs := []string{} + for _, v := range f.Variations { + variationIDs = append(variationIDs, v.Id) + variations[v.Value] = v + } + + grpcRegisterEvaluationEvent(t, featureID, f.Version, userID, variations[variationVarA].Id, tag) + + for i := 0; i < retryTimes; i++ { + if i == retryTimes-1 { + t.Fatalf("retry timeout") + } + time.Sleep(time.Second) + + resp := getEvaluationCountV2(t, ecClient, featureID, f.Version, variationIDs) + if resp == nil { + continue + } + ec := resp.Count + if ec.FeatureId != featureID { + t.Fatalf("feature ID is not correct: %s", ec.FeatureId) + } + if ec.FeatureVersion != f.Version { + t.Fatalf("feature version is not correct: %d", ec.FeatureVersion) + } + + vcA := getVariationCount(ec.RealtimeCounts, variations[variationVarA].Id) + if vcA == nil { + t.Fatalf("variation a is missing") + } + if vcA.UserCount != 1 { + continue + } + if vcA.EventCount != 1 { + continue + } + if vcA.ValueSum != float64(0) { + continue + } + + vcB := getVariationCount(ec.RealtimeCounts, variations[variationVarB].Id) + if vcB == nil { + t.Fatalf("variation b is missing") + } + if vcB.UserCount != 0 { + continue + } + if vcB.EventCount != 0 { + continue + } + if vcB.ValueSum != float64(0) { + continue + } + break + } +} + +func TestEvaluationEventCountV2(t *testing.T) { + t.Parallel() + featureClient := newFeatureClient(t) + defer featureClient.Close() + ecClient := newEventCounterClient(t) + defer ecClient.Close() + uuid := newUUID(t) + + tag := fmt.Sprintf("%s-tag-%s", prefixTestName, uuid) + userID := createUserID(t, uuid) + featureID := createFeatureID(t, uuid) + variationVarA := "a" + variationVarB := "b" + + cmd := newCreateFeatureCommand(featureID, []string{variationVarA, variationVarB}) + createFeature(t, featureClient, cmd) + addTag(t, tag, featureID, featureClient) + enableFeature(t, featureID, featureClient) + f := getFeature(t, featureClient, featureID) + variations := make(map[string]*featureproto.Variation) + variationIDs := []string{} + for _, v := range f.Variations { + variationIDs = append(variationIDs, v.Id) + variations[v.Value] = v + } + + registerEvaluationEvent(t, featureID, f.Version, userID, variations[variationVarA].Id, tag) + + for i := 0; i < retryTimes; i++ { + if i == retryTimes-1 { + t.Fatalf("retry timeout") + } + time.Sleep(time.Second) + + resp := getEvaluationCountV2(t, ecClient, featureID, f.Version, variationIDs) + if resp == nil { + continue + } + ec := resp.Count + if ec.FeatureId != featureID { + t.Fatalf("feature ID is not correct: %s", ec.FeatureId) + } + if ec.FeatureVersion != f.Version { + t.Fatalf("feature version is not correct: %d", ec.FeatureVersion) + } + + vcA := getVariationCount(ec.RealtimeCounts, variations[variationVarA].Id) + if vcA == nil { + t.Fatalf("variation a is missing") + } + if vcA.UserCount != 1 { + continue + } + if vcA.EventCount != 1 { + continue + } + if vcA.ValueSum != float64(0) { + continue + } + + vcB := getVariationCount(ec.RealtimeCounts, variations[variationVarB].Id) + if vcB == nil { + t.Fatalf("variation b is missing") + } + if vcB.UserCount != 0 { + continue + } + if vcB.EventCount != 0 { + continue + } + if vcB.ValueSum != float64(0) { + continue + } + break + } +} + +func getVariationCount(vcs []*ecproto.VariationCount, id string) *ecproto.VariationCount { + for _, vc := range vcs { + if vc.VariationId == id { + return vc + } + } + return nil +} + +func newExperimentClient(t *testing.T) experimentclient.Client { + t.Helper() + creds, err := rpcclient.NewPerRPCCredentials(*serviceTokenPath) + if err != nil { + t.Fatal("Failed to create RPC credentials:", err) + } + client, err := experimentclient.NewClient( + fmt.Sprintf("%s:%d", *webGatewayAddr, *webGatewayPort), + *webGatewayCert, + rpcclient.WithPerRPCCredentials(creds), + rpcclient.WithDialTimeout(30*time.Second), + rpcclient.WithBlock(), + ) + if err != nil { + t.Fatal("Failed to create experiment client:", err) + } + return client +} + +func createGoals(ctx context.Context, t *testing.T, client experimentclient.Client, total int) []string { + t.Helper() + goalIDs := make([]string, 0) + for i := 0; i < total; i++ { + uuid := newUUID(t) + cmd := &experimentproto.CreateGoalCommand{ + Id: createGoalID(t, uuid), + Name: createGoalID(t, uuid), + Description: fmt.Sprintf("%s-goal-description", prefixTestName), + } + _, err := client.CreateGoal(ctx, &experimentproto.CreateGoalRequest{ + Command: cmd, + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + goalIDs = append(goalIDs, cmd.Id) + } + return goalIDs +} + +func createExperimentWithMultiGoals( + ctx context.Context, + t *testing.T, + client experimentclient.Client, + name string, + featureID string, + goalIDs []string, + baseVariationID string, + startAt, stopAt time.Time, +) *experimentproto.Experiment { + cmd := &experimentproto.CreateExperimentCommand{ + Name: name + strings.Join(goalIDs, ","), + FeatureId: featureID, + GoalIds: goalIDs, + StartAt: startAt.Unix(), + StopAt: stopAt.Unix(), + BaseVariationId: baseVariationID, + } + resp, err := client.CreateExperiment(ctx, &experimentproto.CreateExperimentRequest{ + Command: cmd, + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + return resp.Experiment +} + +func grpcRegisterGoalEvent( + t *testing.T, + goalID, userID, tag string, + value float64, +) { + t.Helper() + c := newGatewayClient(t) + defer c.Close() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + goal, err := ptypes.MarshalAny(&eventproto.GoalEvent{ + Timestamp: time.Now().Unix(), + GoalId: goalID, + UserId: userID, + Value: value, + User: &userproto.User{}, + Tag: tag, + }) + if err != nil { + t.Fatal(err) + } + req := &gatewayproto.RegisterEventsRequest{ + Events: []*eventproto.Event{ + { + Id: newUUID(t), + Event: goal, + }, + }, + } + response, err := c.RegisterEvents(ctx, req) + if err != nil { + t.Fatal(err) + } + if len(response.Errors) > 0 { + t.Fatalf("Failed to register events. Error: %v", response.Errors) + } +} + +func registerGoalEvent( + t *testing.T, + goalID, userID, tag string, + value float64, +) { + t.Helper() + c := newGatewayClient(t) + defer c.Close() + goal, err := protojson.Marshal(&eventproto.GoalEvent{ + Timestamp: time.Now().Unix(), + GoalId: goalID, + UserId: userID, + Value: value, + User: &userproto.User{}, + Tag: tag, + }) + if err != nil { + t.Fatal(err) + } + events := []util.Event{ + { + ID: newUUID(t), + Event: goal, + Type: util.GoalEventType, + }, + } + response := util.RegisterEvents(t, events, *gatewayAddr, *apiKeyPath) + if len(response.Errors) > 0 { + t.Fatalf("Failed to register events. Error: %v", response.Errors) + } +} + +// Test for old SDK client +// Evaluation field in the GoalEvent is deprecated. +func registerGoalEventWithEvaluations( + t *testing.T, + featureID string, + featureVersion int32, + goalID, userID, variationID string, + value float64, +) { + t.Helper() + c := newGatewayClient(t) + defer c.Close() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + goal, err := ptypes.MarshalAny(&eventproto.GoalEvent{ + Timestamp: time.Now().Unix(), + GoalId: goalID, + UserId: userID, + Value: value, + User: &userproto.User{}, + Evaluations: []*featureproto.Evaluation{ + { + Id: fmt.Sprintf("%s-evaluation-id-%s", prefixTestName, newUUID(t)), + FeatureId: featureID, + FeatureVersion: featureVersion, + UserId: userID, + VariationId: variationID, + }, + }, + }) + if err != nil { + t.Fatal(err) + } + req := &gatewayproto.RegisterEventsRequest{ + Events: []*eventproto.Event{ + { + Id: newUUID(t), + Event: goal, + }, + }, + } + response, err := c.RegisterEvents(ctx, req) + if err != nil { + t.Fatal(err) + } + if len(response.Errors) > 0 { + t.Fatalf("Failed to register events. Error: %v", response.Errors) + } +} + +func registerGoalBatchEvent(t *testing.T, tag, goalID, userID string) { + t.Helper() + c := newGatewayClient(t) + defer c.Close() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + gb, err := ptypes.MarshalAny(&eventproto.GoalBatchEvent{ + UserId: userID, + UserGoalEventsOverTags: []*eventproto.UserGoalEventsOverTag{ + { + Tag: tag, + UserGoalEvents: []*eventproto.UserGoalEvent{ + { + Timestamp: time.Now().Unix(), + GoalId: goalID, + Value: 0.3, + }, + }, + }, + }, + }) + if err != nil { + t.Fatal(err) + } + req := &gatewayproto.RegisterEventsRequest{ + Events: []*eventproto.Event{ + { + Id: newUUID(t), + Event: gb, + }, + }, + } + response, err := c.RegisterEvents(ctx, req) + if err != nil { + t.Fatal(err) + } + if len(response.Errors) > 0 { + t.Fatalf("Failed to register a goal batch event. Error: %v", response.Errors) + } +} + +func sendHTTPTrack(t *testing.T, userID, goalID, tag string, value float64) { + data, err := ioutil.ReadFile(*apiKeyPath) + if err != nil { + t.Fatal(err) + } + url := fmt.Sprintf("https://%s/track?timestamp=%d&apikey=%s&userid=%s&goalid=%s&tag=%s&value=%f", + *gatewayAddr, + time.Now().Unix(), + strings.TrimSpace(string(data)), + userID, goalID, tag, value) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + t.Fatal(err) + } + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + t.Fatalf("Send HTTP track request failed: %d", resp.StatusCode) + } +} + +func grpcRegisterEvaluationEvent( + t *testing.T, + featureID string, + featureVersion int32, + userID, variationID, tag string, +) { + t.Helper() + c := newGatewayClient(t) + defer c.Close() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + evaluation, err := ptypes.MarshalAny(&eventproto.EvaluationEvent{ + Timestamp: time.Now().Unix(), + FeatureId: featureID, + FeatureVersion: featureVersion, + UserId: userID, + VariationId: variationID, + User: &userproto.User{Data: map[string]string{"appVersion": "0.1.0"}}, + Reason: &featureproto.Reason{}, + Tag: tag, + }) + if err != nil { + t.Fatal(err) + } + req := &gatewayproto.RegisterEventsRequest{ + Events: []*eventproto.Event{ + { + Id: newUUID(t), + Event: evaluation, + }, + }, + } + response, err := c.RegisterEvents(ctx, req) + if err != nil { + t.Fatal(err) + } + if len(response.Errors) > 0 { + t.Fatalf("Failed to register events. Error: %v", response.Errors) + } +} + +func registerEvaluationEvent( + t *testing.T, + featureID string, + featureVersion int32, + userID, variationID, tag string, +) { + t.Helper() + evaluation, err := protojson.Marshal(&eventproto.EvaluationEvent{ + Timestamp: time.Now().Unix(), + FeatureId: featureID, + FeatureVersion: featureVersion, + UserId: userID, + VariationId: variationID, + User: &userproto.User{}, + Reason: &featureproto.Reason{}, + Tag: tag, + }) + if err != nil { + t.Fatal(err) + } + events := []util.Event{ + { + ID: newUUID(t), + Event: evaluation, + Type: util.EvaluationEventType, + }, + } + response := util.RegisterEvents(t, events, *gatewayAddr, *apiKeyPath) + if len(response.Errors) > 0 { + t.Fatalf("Failed to register events. Error: %v", response.Errors) + } +} + +func newGatewayClient(t *testing.T) gatewayclient.Client { + t.Helper() + creds, err := gatewayclient.NewPerRPCCredentials(*apiKeyPath) + if err != nil { + t.Fatal("Failed to create RPC credentials:", err) + } + client, err := gatewayclient.NewClient( + fmt.Sprintf("%s:%d", *gatewayAddr, *gatewayPort), + *gatewayCert, + rpcclient.WithPerRPCCredentials(creds), + rpcclient.WithDialTimeout(30*time.Second), + rpcclient.WithBlock(), + ) + if err != nil { + t.Fatal("Failed to create gateway client:", err) + } + return client +} + +func newUUID(t *testing.T) string { + t.Helper() + id, err := uuid.NewUUID() + if err != nil { + t.Fatal(err) + } + return id.String() +} + +func newFeatureClient(t *testing.T) featureclient.Client { + t.Helper() + creds, err := rpcclient.NewPerRPCCredentials(*serviceTokenPath) + if err != nil { + t.Fatal("Failed to create RPC credentials:", err) + } + featureClient, err := featureclient.NewClient( + fmt.Sprintf("%s:%d", *webGatewayAddr, *webGatewayPort), + *webGatewayCert, + rpcclient.WithPerRPCCredentials(creds), + rpcclient.WithDialTimeout(30*time.Second), + rpcclient.WithBlock(), + ) + if err != nil { + t.Fatal("Failed to create feature client:", err) + } + return featureClient +} + +func newEventCounterClient(t *testing.T) ecclient.Client { + t.Helper() + creds, err := rpcclient.NewPerRPCCredentials(*serviceTokenPath) + if err != nil { + t.Fatal("Failed to create RPC credentials:", err) + } + client, err := ecclient.NewClient( + fmt.Sprintf("%s:%d", *webGatewayAddr, *webGatewayPort), + *webGatewayCert, + rpcclient.WithPerRPCCredentials(creds), + rpcclient.WithDialTimeout(30*time.Second), + rpcclient.WithBlock(), + ) + if err != nil { + t.Fatal("Failed to create experiment client:", err) + } + return client +} + +func newCreateFeatureCommand(featureID string, variations []string) *featureproto.CreateFeatureCommand { + cmd := &featureproto.CreateFeatureCommand{ + Id: featureID, + Name: featureID, + Description: "e2e-test-eventcounter-feature-description", + Variations: []*featureproto.Variation{}, + Tags: []string{ + "e2e-test-tag-1", + "e2e-test-tag-2", + "e2e-test-tag-3", + }, + DefaultOnVariationIndex: &wrappers.Int32Value{Value: int32(0)}, + DefaultOffVariationIndex: &wrappers.Int32Value{Value: int32(1)}, + } + for _, v := range variations { + cmd.Variations = append(cmd.Variations, &featureproto.Variation{ + Value: v, + Name: "Variation " + v, + Description: "Thing does " + v, + }) + } + return cmd +} + +func createFeature(t *testing.T, client featureclient.Client, cmd *featureproto.CreateFeatureCommand) { + t.Helper() + createReq := &featureproto.CreateFeatureRequest{ + Command: cmd, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.CreateFeature(ctx, createReq); err != nil { + t.Fatal(err) + } +} + +func addTag(t *testing.T, tag string, featureID string, client featureclient.Client) { + t.Helper() + addReq := &featureproto.UpdateFeatureDetailsRequest{ + Id: featureID, + AddTagCommands: []*featureproto.AddTagCommand{ + {Tag: tag}, + }, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.UpdateFeatureDetails(ctx, addReq); err != nil { + t.Fatal(err) + } +} + +func enableFeature(t *testing.T, featureID string, client featureclient.Client) { + t.Helper() + enableReq := &featureproto.EnableFeatureRequest{ + Id: featureID, + Command: &featureproto.EnableFeatureCommand{}, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.EnableFeature(ctx, enableReq); err != nil { + t.Fatalf("Failed to enable feature id: %s. Error: %v", featureID, err) + } +} + +func getEvaluation(t *testing.T, tag string, userID string) *gatewayproto.GetEvaluationsResponse { + t.Helper() + c := newGatewayClient(t) + defer c.Close() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + req := &gatewayproto.GetEvaluationsRequest{ + Tag: tag, + User: &userproto.User{Id: userID}, + } + response, err := c.GetEvaluations(ctx, req) + if err != nil { + t.Fatal(err) + } + return response +} + +func getExperiment(t *testing.T, c experimentclient.Client, id string) *experimentproto.GetExperimentResponse { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + req := &experimentproto.GetExperimentRequest{ + EnvironmentNamespace: *environmentNamespace, + Id: id, + } + res, err := c.GetExperiment(ctx, req) + if err != nil { + // pass not found error + if err.Error() != "rpc error: code = NotFound desc = eventcounter: not found" { + t.Fatal(err) + } + } + return res +} + +func getExperimentResult(t *testing.T, c ecclient.Client, experimentID string) *ecproto.GetExperimentResultResponse { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + req := &ecproto.GetExperimentResultRequest{ + ExperimentId: experimentID, + EnvironmentNamespace: *environmentNamespace, + } + response, err := c.GetExperimentResult(ctx, req) + if err != nil { + // pass not found error + if err.Error() != "rpc error: code = NotFound desc = eventcounter: not found" { + t.Fatal(err) + } + } + return response +} + +func getEvaluationCountV2(t *testing.T, c ecclient.Client, featureID string, featureVersion int32, variationIDs []string) *ecproto.GetEvaluationCountV2Response { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + now := time.Now() + req := &ecproto.GetEvaluationCountV2Request{ + EnvironmentNamespace: *environmentNamespace, + StartAt: now.Add(-30 * 24 * time.Hour).Unix(), + EndAt: now.Unix(), + FeatureId: featureID, + FeatureVersion: featureVersion, + VariationIds: variationIDs, + } + response, err := c.GetEvaluationCountV2(ctx, req) + if err != nil { + t.Fatal(err) + } + return response +} + +func getGoalCountV2(t *testing.T, c ecclient.Client, goalID, featureID string, featureVersion int32, variationIDs []string) *ecproto.GetGoalCountV2Response { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + now := time.Now() + req := &ecproto.GetGoalCountV2Request{ + EnvironmentNamespace: *environmentNamespace, + StartAt: now.Add(-30 * 24 * time.Hour).Unix(), + EndAt: now.Unix(), + GoalId: goalID, + FeatureId: featureID, + FeatureVersion: featureVersion, + VariationIds: variationIDs, + } + response, err := c.GetGoalCountV2(ctx, req) + if err != nil { + t.Fatal(err) + } + return response +} + +func getFeature(t *testing.T, client featureclient.Client, featureID string) *featureproto.Feature { + t.Helper() + getReq := &featureproto.GetFeatureRequest{ + Id: featureID, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + response, err := client.GetFeature(ctx, getReq) + if err != nil { + t.Fatal("Failed to get feature:", err) + } + return response.Feature +} + +func createFeatureID(t *testing.T, uuid string) string { + if *testID != "" { + return fmt.Sprintf("%s-%s-feature-id-%s", prefixTestName, *testID, uuid) + } + return fmt.Sprintf("%s-feature-id-%s", prefixTestName, uuid) +} + +func createGoalID(t *testing.T, uuid string) string { + if *testID != "" { + return fmt.Sprintf("%s-%s-goal-id-%s", prefixTestName, *testID, uuid) + } + return fmt.Sprintf("%s-goal-id-%s", prefixTestName, uuid) +} + +func createUserID(t *testing.T, uuid string) string { + if *testID != "" { + return fmt.Sprintf("%s-%s-user-id-%s", prefixTestName, *testID, uuid) + } + return fmt.Sprintf("%s-user-id-%s", prefixTestName, uuid) +} diff --git a/test/e2e/experiment/BUILD.bazel b/test/e2e/experiment/BUILD.bazel new file mode 100644 index 000000000..9b5aa762c --- /dev/null +++ b/test/e2e/experiment/BUILD.bazel @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_test") + +go_test( + name = "go_default_test", + srcs = ["experiment_test.go"], + deps = [ + "//pkg/experiment/client:go_default_library", + "//pkg/feature/client:go_default_library", + "//pkg/rpc/client:go_default_library", + "//pkg/uuid:go_default_library", + "//proto/experiment:go_default_library", + "//proto/feature:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + ], +) diff --git a/test/e2e/experiment/experiment_test.go b/test/e2e/experiment/experiment_test.go new file mode 100644 index 000000000..4800d2e7d --- /dev/null +++ b/test/e2e/experiment/experiment_test.go @@ -0,0 +1,775 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package experiment + +import ( + "context" + "flag" + "fmt" + "strings" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/wrappers" + + experimentclient "github.com/bucketeer-io/bucketeer/pkg/experiment/client" + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + rpcclient "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + experimentproto "github.com/bucketeer-io/bucketeer/proto/experiment" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +const ( + prefixTestName = "e2e-test" + timeout = 10 * time.Second + retryTimes = 250 +) + +var ( + // FIXME: To avoid compiling the test many times, webGatewayAddr, webGatewayPort & apiKey has been also added here to prevent from getting: "flag provided but not defined" error during the test. These 3 are being use in the Gateway test + webGatewayAddr = flag.String("web-gateway-addr", "", "Web gateway endpoint address") + webGatewayPort = flag.Int("web-gateway-port", 443, "Web gateway endpoint port") + webGatewayCert = flag.String("web-gateway-cert", "", "Web gateway crt file") + apiKeyPath = flag.String("api-key", "", "Api key path for web gateway") + gatewayAddr = flag.String("gateway-addr", "", "Gateway endpoint address") + gatewayPort = flag.Int("gateway-port", 443, "Gateway endpoint port") + gatewayCert = flag.String("gateway-cert", "", "Gateway crt file") + serviceTokenPath = flag.String("service-token", "", "Service token path") + environmentNamespace = flag.String("environment-namespace", "", "Environment namespace") + testID = flag.String("test-id", "", "test ID") +) + +func TestCreateAndGetExperiment(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + c := newExperimentClient(t) + defer c.Close() + featureID := createFeatureID(t) + createFeature(ctx, t, featureID) + goalIDs := createGoals(ctx, t, c, 2) + startAt := time.Now() + stopAt := startAt.Local().Add(time.Hour * 1) + feature := getFeature(ctx, t, featureID) + expected := createExperimentWithMultiGoals(ctx, t, c, featureID, feature.Variations[0].Id, goalIDs, startAt, stopAt) + getResp, err := c.GetExperiment(ctx, &experimentproto.GetExperimentRequest{ + Id: expected.Id, + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + actual := getResp.Experiment + if !proto.Equal(expected, actual) { + t.Fatalf("Experiment is not equal. Expected: %v, actual: %v", expected, actual) + } +} + +func TestListExperiments(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + c := newExperimentClient(t) + defer c.Close() + featureID := createFeatureID(t) + createFeature(ctx, t, featureID) + feature := getFeature(ctx, t, featureID) + goalIDs := createGoals(ctx, t, c, 5) + startAt := time.Now() + stopAt := startAt.Local().Add(time.Hour * 1) + expectedExps := createExperimentsWithMultiGoals(ctx, t, c, featureID, feature.Variations[0].Id, goalIDs, startAt, stopAt, 5) + actualExps := []*experimentproto.Experiment{} + for _, e := range expectedExps { + getResp, err := c.GetExperiment(ctx, &experimentproto.GetExperimentRequest{ + Id: e.Id, + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + actualExps = append(actualExps, getResp.Experiment) + } + compareExperiments(t, expectedExps, actualExps) +} + +func TestStopExperiment(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + c := newExperimentClient(t) + defer c.Close() + featureID := createFeatureID(t) + createFeature(ctx, t, featureID) + goalIDs := createGoals(ctx, t, c, 1) + feature := getFeature(ctx, t, featureID) + startAt := time.Now() + stopAt := startAt.Local().Add(time.Hour * 1) + e := createExperimentWithMultiGoals(ctx, t, c, featureID, feature.Variations[0].Id, goalIDs, startAt, stopAt) + if _, err := c.StopExperiment(ctx, &experimentproto.StopExperimentRequest{ + Id: e.Id, + Command: &experimentproto.StopExperimentCommand{}, + EnvironmentNamespace: *environmentNamespace, + }); err != nil { + t.Fatal(err) + } + getResp, err := c.GetExperiment(ctx, &experimentproto.GetExperimentRequest{ + Id: e.Id, + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + if !getResp.Experiment.Stopped { + t.Fatal("Experiment was not stopped") + } +} + +func TestArchiveExperiment(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + c := newExperimentClient(t) + defer c.Close() + featureID := createFeatureID(t) + createFeature(ctx, t, featureID) + goalIDs := createGoals(ctx, t, c, 1) + feature := getFeature(ctx, t, featureID) + startAt := time.Now() + stopAt := startAt.Local().Add(time.Hour * 1) + e := createExperimentWithMultiGoals(ctx, t, c, featureID, feature.Variations[0].Id, goalIDs, startAt, stopAt) + if _, err := c.ArchiveExperiment(ctx, &experimentproto.ArchiveExperimentRequest{ + Id: e.Id, + Command: &experimentproto.ArchiveExperimentCommand{}, + EnvironmentNamespace: *environmentNamespace, + }); err != nil { + t.Fatal(err) + } + getResp, err := c.GetExperiment(ctx, &experimentproto.GetExperimentRequest{ + Id: e.Id, + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + if !getResp.Experiment.Archived { + t.Fatal("Experiment was not archived") + } +} + +func TestDeleteExperiment(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + c := newExperimentClient(t) + defer c.Close() + featureID := createFeatureID(t) + createFeature(ctx, t, featureID) + goalIDs := createGoals(ctx, t, c, 1) + feature := getFeature(ctx, t, featureID) + startAt := time.Now() + stopAt := startAt.Local().Add(time.Hour * 1) + e := createExperimentWithMultiGoals(ctx, t, c, featureID, feature.Variations[0].Id, goalIDs, startAt, stopAt) + if _, err := c.DeleteExperiment(ctx, &experimentproto.DeleteExperimentRequest{ + Id: e.Id, + Command: &experimentproto.DeleteExperimentCommand{}, + EnvironmentNamespace: *environmentNamespace, + }); err != nil { + t.Fatal(err) + } + getResp, err := c.GetExperiment(ctx, &experimentproto.GetExperimentRequest{ + Id: e.Id, + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + if !getResp.Experiment.Deleted { + t.Fatal("Experiment was not deleted") + } +} + +func TestUpdateExperiment(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + c := newExperimentClient(t) + defer c.Close() + featureID := createFeatureID(t) + createFeature(ctx, t, featureID) + goalIDs := createGoals(ctx, t, c, 1) + now := time.Now() + feature := getFeature(ctx, t, featureID) + startAt := time.Now() + stopAt := startAt.Local().Add(time.Hour * 1) + e := createExperimentWithMultiGoals(ctx, t, c, featureID, feature.Variations[0].Id, goalIDs, startAt, stopAt) + startAt = now.Local().Add(time.Minute * 30) + stopAt = now.Local().Add(time.Minute * 60) + if _, err := c.UpdateExperiment(ctx, &experimentproto.UpdateExperimentRequest{ + Id: e.Id, + ChangeExperimentPeriodCommand: &experimentproto.ChangeExperimentPeriodCommand{StartAt: startAt.Unix(), StopAt: stopAt.Unix()}, + EnvironmentNamespace: *environmentNamespace, + }); err != nil { + t.Fatal(err) + } + getResp, err := c.GetExperiment(ctx, &experimentproto.GetExperimentRequest{ + Id: e.Id, + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + if startAt.Unix() != getResp.Experiment.StartAt { + t.Fatalf("StartAt is not equal. Expected: %d, actual: %d", startAt.Unix(), getResp.Experiment.StartAt) + } + if stopAt.Unix() != getResp.Experiment.StopAt { + t.Fatalf("StopAt is not equal. Expected: %d, actual: %d", stopAt.Unix(), getResp.Experiment.StopAt) + } +} + +func TestCreateAndGetGoal(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + c := newExperimentClient(t) + defer c.Close() + goalID := createGoal(ctx, t, c) + expectedName := fmt.Sprintf("%s-goal-name", goalID) + expectedDescription := fmt.Sprintf("%s-goal-description", goalID) + getResp, err := c.GetGoal(ctx, &experimentproto.GetGoalRequest{ + Id: goalID, + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + actual := getResp.Goal + if goalID != actual.Id { + t.Fatalf("Goal id is not equal. Expected: %v, actual: %v", goalID, actual.Id) + } + if expectedName != actual.Name { + t.Fatalf("Goal name is not equal. Expected: %v, actual: %v", expectedName, actual.Name) + } + if expectedDescription != actual.Description { + t.Fatalf("Goal description is not equal. Expected: %v, actual: %v", expectedDescription, actual.Description) + } + if actual.Deleted { + t.Fatal("Goal deleted flag is true") + } +} + +func TestListGoalsCursor(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + c := newExperimentClient(t) + defer c.Close() + createGoals(ctx, t, c, 2) + expectedSize := 1 + listResp, err := c.ListGoals(ctx, &experimentproto.ListGoalsRequest{ + PageSize: int64(expectedSize), + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + if listResp.Cursor == "" { + t.Fatal("Cursor is empty") + } + actualSize := len(listResp.Goals) + if expectedSize != actualSize { + t.Fatalf("Different sizes. Expected: %v, actual: %v", expectedSize, actualSize) + } + listResp, err = c.ListGoals(ctx, &experimentproto.ListGoalsRequest{ + PageSize: int64(expectedSize), + Cursor: listResp.Cursor, + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + actualSize = len(listResp.Goals) + if expectedSize != actualSize { + t.Fatalf("Different sizes. Expected: %v, actual: %v", expectedSize, actualSize) + } +} + +func TestListGoalsPageSize(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + c := newExperimentClient(t) + defer c.Close() + createGoals(ctx, t, c, 3) + expectedSize := 3 + listResp, err := c.ListGoals(ctx, &experimentproto.ListGoalsRequest{ + PageSize: int64(expectedSize), + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + actualSize := len(listResp.Goals) + if expectedSize != actualSize { + t.Fatalf("Different sizes. Expected: %v, actual: %v", expectedSize, actualSize) + } +} + +func TestUpdateGoal(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + c := newExperimentClient(t) + defer c.Close() + goalID := createGoal(ctx, t, c) + expectedName := fmt.Sprintf("%s-goal-new-name", prefixTestName) + expectedDescription := fmt.Sprintf("%s-goal-new-description", prefixTestName) + _, err := c.UpdateGoal(ctx, &experimentproto.UpdateGoalRequest{ + Id: goalID, + RenameCommand: &experimentproto.RenameGoalCommand{Name: expectedName}, + ChangeDescriptionCommand: &experimentproto.ChangeDescriptionGoalCommand{Description: expectedDescription}, + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + getResp, err := c.GetGoal(ctx, &experimentproto.GetGoalRequest{ + Id: goalID, + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + actual := getResp.Goal + if goalID != actual.Id { + t.Fatalf("Goal id is not equal. Expected: %v, actual: %v", goalID, actual.Id) + } + if expectedName != actual.Name { + t.Fatalf("Goal name is not equal. Expected: %v, actual: %v", expectedName, actual.Name) + } + if expectedDescription != actual.Description { + t.Fatalf("Goal description is not equal. Expected: %v, actual: %v", expectedDescription, actual.Description) + } + if actual.Deleted { + t.Fatal("Goal deleted flag is true") + } +} + +func TestArchiveGoal(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + c := newExperimentClient(t) + defer c.Close() + goalID := createGoal(ctx, t, c) + _, err := c.ArchiveGoal(ctx, &experimentproto.ArchiveGoalRequest{ + Id: goalID, + Command: &experimentproto.ArchiveGoalCommand{}, + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + getResp, err := c.GetGoal(ctx, &experimentproto.GetGoalRequest{ + Id: goalID, + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + if !getResp.Goal.Archived { + t.Fatal("Goal archived flag is false") + } +} + +func TestDeleteGoal(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + c := newExperimentClient(t) + defer c.Close() + goalID := createGoal(ctx, t, c) + _, err := c.DeleteGoal(ctx, &experimentproto.DeleteGoalRequest{ + Id: goalID, + Command: &experimentproto.DeleteGoalCommand{}, + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + getResp, err := c.GetGoal(ctx, &experimentproto.GetGoalRequest{ + Id: goalID, + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + if !getResp.Goal.Deleted { + t.Fatal("Goal deleted flag is false") + } +} + +func TestStatusUpdateFromWaitingToRunning(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), 150*time.Second) + defer cancel() + c := newExperimentClient(t) + defer c.Close() + featureID := createFeatureID(t) + createFeature(ctx, t, featureID) + goalIDs := createGoals(ctx, t, c, 1) + startAt := time.Now() + stopAt := startAt.Local().Add(time.Hour) + feature := getFeature(ctx, t, featureID) + expected := createExperimentWithMultiGoals(ctx, t, c, featureID, feature.Variations[0].Id, goalIDs, startAt, stopAt) + resp, err := c.GetExperiment(ctx, &experimentproto.GetExperimentRequest{ + Id: expected.Id, + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + if resp.Experiment.Status != experimentproto.Experiment_WAITING { + t.Fatalf("Experiment status is not waiting. actual: %d", resp.Experiment.Status) + } + for i := 0; i < retryTimes; i++ { + resp, err = c.GetExperiment(ctx, &experimentproto.GetExperimentRequest{ + Id: expected.Id, + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + if resp.Experiment.Status == experimentproto.Experiment_RUNNING { + break + } + if i == retryTimes-1 { + t.Fatalf("retry timeout") + } + time.Sleep(time.Second) + } +} + +func TestStatusUpdateFromRunningToStopped(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second) + defer cancel() + c := newExperimentClient(t) + defer c.Close() + featureID := createFeatureID(t) + createFeature(ctx, t, featureID) + goalIDs := createGoals(ctx, t, c, 1) + now := time.Now() + startAt := now.Local().Add(-4 * 24 * time.Hour) + stopAt := now.Local().Add(-3 * 24 * time.Hour) + feature := getFeature(ctx, t, featureID) + expected := createExperimentWithMultiGoals(ctx, t, c, featureID, feature.Variations[0].Id, goalIDs, startAt, stopAt) + resp, err := c.GetExperiment(ctx, &experimentproto.GetExperimentRequest{ + Id: expected.Id, + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + if resp.Experiment.Status != experimentproto.Experiment_WAITING { + t.Fatalf("Experiment status is not waiting. actual: %d", resp.Experiment.Status) + } + if _, err = c.StartExperiment(ctx, &experimentproto.StartExperimentRequest{ + Id: expected.Id, + Command: &experimentproto.StartExperimentCommand{}, + EnvironmentNamespace: *environmentNamespace, + }); err != nil { + t.Fatal(err) + } + resp, err = c.GetExperiment(ctx, &experimentproto.GetExperimentRequest{ + Id: expected.Id, + EnvironmentNamespace: *environmentNamespace, + }) + if resp.Experiment.Status != experimentproto.Experiment_RUNNING { + t.Fatalf("Experiment status is not running. actual: %d", resp.Experiment.Status) + } + for i := 0; i < retryTimes; i++ { + resp, err = c.GetExperiment(ctx, &experimentproto.GetExperimentRequest{ + Id: expected.Id, + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + if resp.Experiment.Status == experimentproto.Experiment_STOPPED { + break + } + if i == retryTimes-1 { + t.Fatalf(fmt.Sprintf("retry timeout: %s", resp.Experiment.Name)) + } + time.Sleep(time.Second) + } +} + +func TestStatusUpdateFromWaitingToStopped(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second) + defer cancel() + c := newExperimentClient(t) + defer c.Close() + featureID := createFeatureID(t) + createFeature(ctx, t, featureID) + goalIDs := createGoals(ctx, t, c, 1) + now := time.Now() + startAt := now.Local().Add(-4 * 24 * time.Hour) + stopAt := now.Local().Add(-3 * 24 * time.Hour) + feature := getFeature(ctx, t, featureID) + expected := createExperimentWithMultiGoals(ctx, t, c, featureID, feature.Variations[0].Id, goalIDs, startAt, stopAt) + resp, err := c.GetExperiment(ctx, &experimentproto.GetExperimentRequest{ + Id: expected.Id, + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + if resp.Experiment.Status != experimentproto.Experiment_WAITING { + t.Fatalf("Experiment status is not waiting. actual: %d", resp.Experiment.Status) + } + for i := 0; i < retryTimes; i++ { + resp, err = c.GetExperiment(ctx, &experimentproto.GetExperimentRequest{ + Id: expected.Id, + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + if resp.Experiment.Status == experimentproto.Experiment_STOPPED { + break + } + if i == retryTimes-1 { + t.Fatalf(fmt.Sprintf("retry timeout: %s", resp.Experiment.Name)) + } + time.Sleep(time.Second) + } +} + +func createGoal(ctx context.Context, t *testing.T, client experimentclient.Client) string { + t.Helper() + goalID := createGoalID(t) + cmd := &experimentproto.CreateGoalCommand{ + Id: goalID, + Name: fmt.Sprintf("%s-goal-name", goalID), + Description: fmt.Sprintf("%s-goal-description", goalID), + } + _, err := client.CreateGoal(ctx, &experimentproto.CreateGoalRequest{ + Command: cmd, + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + return goalID +} + +func createGoals(ctx context.Context, t *testing.T, client experimentclient.Client, total int) []string { + t.Helper() + goalIDs := make([]string, 0, total) + for i := 0; i < total; i++ { + goalIDs = append(goalIDs, createGoal(ctx, t, client)) + } + return goalIDs +} + +func createExperimentsWithMultiGoals( + ctx context.Context, + t *testing.T, + client experimentclient.Client, + featureID, baseVariationID string, + goalIDs []string, + startAt, stopAt time.Time, + total int, +) []*experimentproto.Experiment { + e := []*experimentproto.Experiment{} + for i := 0; i < total; i++ { + e = append(e, createExperimentWithMultiGoals(ctx, t, client, featureID, baseVariationID, goalIDs, startAt, stopAt)) + } + return e +} + +func createExperimentWithMultiGoals( + ctx context.Context, + t *testing.T, + client experimentclient.Client, + featureID, baseVariationID string, + goalIDs []string, + startAt, stopAt time.Time, +) *experimentproto.Experiment { + cmd := &experimentproto.CreateExperimentCommand{ + FeatureId: featureID, + StartAt: startAt.Unix(), + StopAt: stopAt.Unix(), + GoalIds: goalIDs, + Name: strings.Join(goalIDs, ","), + BaseVariationId: baseVariationID, + } + resp, err := client.CreateExperiment(ctx, &experimentproto.CreateExperimentRequest{ + Command: cmd, + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal(err) + } + return resp.Experiment +} + +func newExperimentClient(t *testing.T) experimentclient.Client { + t.Helper() + creds, err := rpcclient.NewPerRPCCredentials(*serviceTokenPath) + if err != nil { + t.Fatal("Failed to create RPC credentials:", err) + } + client, err := experimentclient.NewClient( + fmt.Sprintf("%s:%d", *webGatewayAddr, *webGatewayPort), + *webGatewayCert, + rpcclient.WithPerRPCCredentials(creds), + rpcclient.WithDialTimeout(30*time.Second), + rpcclient.WithBlock(), + ) + if err != nil { + t.Fatal("Failed to create experiment client:", err) + } + return client +} + +func newUUID(t *testing.T) string { + t.Helper() + id, err := uuid.NewUUID() + if err != nil { + t.Fatal(err) + } + return id.String() +} + +func getFeature(ctx context.Context, t *testing.T, featureID string) *featureproto.Feature { + t.Helper() + client := newFeatureClient(t) + defer client.Close() + req := &featureproto.GetFeatureRequest{ + Id: featureID, + EnvironmentNamespace: *environmentNamespace, + } + resp, err := client.GetFeature(ctx, req) + if err != nil { + t.Fatal(err) + } + return resp.Feature +} + +func createFeature(ctx context.Context, t *testing.T, featureID string) { + t.Helper() + client := newFeatureClient(t) + defer client.Close() + cmd := newCreateFeatureCommand(featureID) + createReq := &featureproto.CreateFeatureRequest{ + Command: cmd, + EnvironmentNamespace: *environmentNamespace, + } + if _, err := client.CreateFeature(ctx, createReq); err != nil { + t.Fatal(err) + } + enableFeature(t, featureID, client) +} + +func newFeatureClient(t *testing.T) featureclient.Client { + t.Helper() + creds, err := rpcclient.NewPerRPCCredentials(*serviceTokenPath) + if err != nil { + t.Fatal("Failed to create RPC credentials:", err) + } + featureClient, err := featureclient.NewClient( + fmt.Sprintf("%s:%d", *webGatewayAddr, *webGatewayPort), + *webGatewayCert, + rpcclient.WithPerRPCCredentials(creds), + rpcclient.WithDialTimeout(30*time.Second), + rpcclient.WithBlock(), + ) + if err != nil { + t.Fatal("Failed to create feature client:", err) + } + return featureClient +} + +func newCreateFeatureCommand(featureID string) *featureproto.CreateFeatureCommand { + return &featureproto.CreateFeatureCommand{ + Id: featureID, + Name: featureID, + Description: "e2e-test-gateway-feature-description", + Variations: []*featureproto.Variation{ + { + Value: "A", + Name: "Variation A", + Description: "Thing does A", + }, + { + Value: "B", + Name: "Variation B", + Description: "Thing does B", + }, + }, + Tags: []string{ + "e2e-test-tag-1", + "e2e-test-tag-2", + "e2e-test-tag-3", + }, + DefaultOnVariationIndex: &wrappers.Int32Value{Value: int32(0)}, + DefaultOffVariationIndex: &wrappers.Int32Value{Value: int32(1)}, + } +} + +func enableFeature(t *testing.T, featureID string, client featureclient.Client) { + t.Helper() + enableReq := &featureproto.EnableFeatureRequest{ + Id: featureID, + Command: &featureproto.EnableFeatureCommand{}, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.EnableFeature(ctx, enableReq); err != nil { + t.Fatalf("Failed to enable feature id: %s. Error: %v", featureID, err) + } +} + +func compareExperiments(t *testing.T, expected []*experimentproto.Experiment, actual []*experimentproto.Experiment) { + t.Helper() + if len(actual) != len(expected) { + t.Fatalf("Different sizes. Expected: %d, actual: %d", len(expected), len(actual)) + } + for i := 0; i < len(expected); i++ { + if !proto.Equal(actual[i], expected[i]) { + t.Fatalf("Experiments do not match. Expected: %v, actual: %v", expected[i], actual[i]) + } + } +} + +func createFeatureID(t *testing.T) string { + if *testID != "" { + return fmt.Sprintf("%s-%s-feature-id-%s", prefixTestName, *testID, newUUID(t)) + } + return fmt.Sprintf("%s-feature-id-%s", prefixTestName, newUUID(t)) +} + +func createGoalID(t *testing.T) string { + if *testID != "" { + return fmt.Sprintf("%s-%s-goal-id-%s", prefixTestName, *testID, newUUID(t)) + } + return fmt.Sprintf("%s-goal-id-%s", prefixTestName, newUUID(t)) +} diff --git a/test/e2e/feature/BUILD.bazel b/test/e2e/feature/BUILD.bazel new file mode 100644 index 000000000..fb76a6345 --- /dev/null +++ b/test/e2e/feature/BUILD.bazel @@ -0,0 +1,33 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_test") + +go_test( + name = "go_default_test", + srcs = [ + "feature_last_used_info_test.go", + "feature_test.go", + "segment_test.go", + "segment_user_test.go", + "tag_test.go", + "user_evaluations_test.go", + ], + deps = [ + "//pkg/feature/client:go_default_library", + "//pkg/feature/domain:go_default_library", + "//pkg/gateway/client:go_default_library", + "//pkg/rpc/client:go_default_library", + "//pkg/uuid:go_default_library", + "//proto/event/client:go_default_library", + "//proto/feature:go_default_library", + "//proto/gateway:go_default_library", + "//proto/user:go_default_library", + "//test/e2e/util:go_default_library", + "//test/util:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@com_github_stretchr_testify//assert:go_default_library", + "@com_github_stretchr_testify//require:go_default_library", + "@io_bazel_rules_go//proto/wkt:any_go_proto", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + "@org_golang_google_protobuf//encoding/protojson:go_default_library", + ], +) diff --git a/test/e2e/feature/feature_last_used_info_test.go b/test/e2e/feature/feature_last_used_info_test.go new file mode 100644 index 000000000..0e9539c81 --- /dev/null +++ b/test/e2e/feature/feature_last_used_info_test.go @@ -0,0 +1,187 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package feature + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/golang/protobuf/ptypes" + "google.golang.org/protobuf/encoding/protojson" + + gatewayclient "github.com/bucketeer-io/bucketeer/pkg/gateway/client" + rpcclient "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/client" + "github.com/bucketeer-io/bucketeer/proto/feature" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" + gatewayproto "github.com/bucketeer-io/bucketeer/proto/gateway" + userproto "github.com/bucketeer-io/bucketeer/proto/user" + "github.com/bucketeer-io/bucketeer/test/e2e/util" +) + +const ( + featureRecorderRetryTimes = 60 +) + +func TestGprcGetFeatureLastUsedInfo(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + cmd := newCreateFeatureCommand(newFeatureID(t)) + createFeature(t, client, cmd) + enableFeature(t, cmd.Id, client) + f := getFeature(t, cmd.Id, client) + lastUsedAt := time.Now() + grpcRegisterEvaluationEvents(t, []*feature.Feature{f}, f.Tags[0], lastUsedAt) + for i := 0; i < featureRecorderRetryTimes; i++ { + actual := getFeature(t, cmd.Id, client) + if actual.LastUsedInfo != nil { + if actual.LastUsedInfo.FeatureId != f.Id { + t.Fatalf("feature ID is not correct: expected: %s, actual: %s", f.Id, actual.LastUsedInfo.FeatureId) + } + if actual.LastUsedInfo.Version != f.Version { + t.Fatalf("feature version is not correct: expected: %d, actual: %d", f.Version, actual.LastUsedInfo.Version) + } + if actual.LastUsedInfo.CreatedAt != lastUsedAt.Unix() { + t.Fatalf("created at is not correct: expected: %d, actual: %d", lastUsedAt.Unix(), actual.LastUsedInfo.CreatedAt) + } + if actual.LastUsedInfo.LastUsedAt != lastUsedAt.Unix() { + t.Fatalf("lastUsedAt at is not correct: expected: %d, actual: %d", lastUsedAt.Unix(), actual.LastUsedInfo.LastUsedAt) + } + break + } + if i == featureRecorderRetryTimes-1 { + t.Fatalf("LastUsedInfo cannot be fetched.") + } + time.Sleep(time.Second) + } +} + +func TestGetFeatureLastUsedInfo(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + cmd := newCreateFeatureCommand(newFeatureID(t)) + createFeature(t, client, cmd) + enableFeature(t, cmd.Id, client) + f := getFeature(t, cmd.Id, client) + lastUsedAt := time.Now() + registerEvaluationEvents(t, []*feature.Feature{f}, f.Tags[0], lastUsedAt) + for i := 0; i < featureRecorderRetryTimes; i++ { + actual := getFeature(t, cmd.Id, client) + if actual.LastUsedInfo != nil { + if actual.LastUsedInfo.FeatureId != f.Id { + t.Fatalf("feature ID is not correct: expected: %s, actual: %s", f.Id, actual.LastUsedInfo.FeatureId) + } + if actual.LastUsedInfo.Version != f.Version { + t.Fatalf("feature version is not correct: expected: %d, actual: %d", f.Version, actual.LastUsedInfo.Version) + } + if actual.LastUsedInfo.CreatedAt != lastUsedAt.Unix() { + t.Fatalf("created at is not correct: expected: %d, actual: %d", lastUsedAt.Unix(), actual.LastUsedInfo.CreatedAt) + } + if actual.LastUsedInfo.LastUsedAt != lastUsedAt.Unix() { + t.Fatalf("lastUsedAt at is not correct: expected: %d, actual: %d", lastUsedAt.Unix(), actual.LastUsedInfo.LastUsedAt) + } + break + } + if i == featureRecorderRetryTimes-1 { + t.Fatalf("LastUsedInfo cannot be fetched.") + } + time.Sleep(time.Second) + } +} + +func grpcRegisterEvaluationEvents(t *testing.T, features []*feature.Feature, tag string, now time.Time) { + t.Helper() + c := newGatewayClient(t) + defer c.Close() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + events := make([]*eventproto.Event, 0) + for _, f := range features { + evaluation, err := ptypes.MarshalAny(&eventproto.EvaluationEvent{ + Timestamp: now.Unix(), + FeatureId: f.Id, + FeatureVersion: f.Version, + UserId: "user-id", + VariationId: "variation-id", + User: &userproto.User{}, + Reason: &featureproto.Reason{}, + Tag: tag, + }) + if err != nil { + t.Fatal(err) + } + events = append(events, &eventproto.Event{ + Id: newUUID(t), + Event: evaluation, + }) + } + req := &gatewayproto.RegisterEventsRequest{Events: events} + _, err := c.RegisterEvents(ctx, req) + if err != nil { + t.Fatal(err) + } +} + +func registerEvaluationEvents(t *testing.T, features []*feature.Feature, tag string, now time.Time) { + t.Helper() + c := newGatewayClient(t) + defer c.Close() + events := make([]util.Event, 0) + for _, f := range features { + evaluation, err := protojson.Marshal(&eventproto.EvaluationEvent{ + Timestamp: now.Unix(), + FeatureId: f.Id, + FeatureVersion: f.Version, + UserId: "user-id", + VariationId: "variation-id", + User: &userproto.User{}, + Reason: &featureproto.Reason{}, + Tag: tag, + }) + if err != nil { + t.Fatal(err) + } + events = append(events, util.Event{ + ID: newUUID(t), + Event: evaluation, + Type: util.EvaluationEventType, + }) + } + response := util.RegisterEvents(t, events, *gatewayAddr, *apiKeyPath) + if len(response.Errors) > 0 { + t.Fatalf("Failed to register events. Error: %v", response.Errors) + } +} + +func newGatewayClient(t *testing.T) gatewayclient.Client { + t.Helper() + creds, err := gatewayclient.NewPerRPCCredentials(*apiKeyPath) + if err != nil { + t.Fatal("Failed to create RPC credentials:", err) + } + client, err := gatewayclient.NewClient( + fmt.Sprintf("%s:%d", *gatewayAddr, *gatewayPort), + *gatewayCert, + rpcclient.WithPerRPCCredentials(creds), + rpcclient.WithDialTimeout(30*time.Second), + rpcclient.WithBlock(), + ) + if err != nil { + t.Fatal("Failed to create gateway client:", err) + } + return client +} diff --git a/test/e2e/feature/feature_test.go b/test/e2e/feature/feature_test.go new file mode 100644 index 000000000..fcde35223 --- /dev/null +++ b/test/e2e/feature/feature_test.go @@ -0,0 +1,1823 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package feature + +import ( + "context" + "flag" + "fmt" + "reflect" + "sort" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + "github.com/golang/protobuf/ptypes/wrappers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + "github.com/bucketeer-io/bucketeer/proto/feature" + userproto "github.com/bucketeer-io/bucketeer/proto/user" + "github.com/bucketeer-io/bucketeer/test/util" +) + +const ( + prefixID = "e2e-test" + timeout = 10 * time.Second +) + +var ( + // FIXME: To avoid compiling the test many times, webGatewayAddr, webGatewayPort & apiKey has been also added here to prevent from getting: "flag provided but not defined" error during the test. These 3 are being use in the Gateway test + webGatewayAddr = flag.String("web-gateway-addr", "", "Web gateway endpoint address") + webGatewayPort = flag.Int("web-gateway-port", 443, "Web gateway endpoint port") + webGatewayCert = flag.String("web-gateway-cert", "", "Web gateway crt file") + apiKeyPath = flag.String("api-key", "", "Api key path for web gateway") + gatewayAddr = flag.String("gateway-addr", "", "Gateway endpoint address") + gatewayPort = flag.Int("gateway-port", 443, "Gateway endpoint port") + gatewayCert = flag.String("gateway-cert", "", "Gateway crt file") + serviceTokenPath = flag.String("service-token", "", "Service token path") + environmentNamespace = flag.String("environment-namespace", "", "Environment namespace") + testID = flag.String("test-id", "", "test ID") + + tags = []string{"e2e-test-tag-1", "e2e-test-tag-2", "e2e-test-tag-3"} +) + +func TestCreateFeature(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + cmd := newCreateFeatureCommand(newFeatureID(t)) + createFeature(t, client, cmd) + f := getFeature(t, cmd.Id, client) + if cmd.Id != f.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, f.Id) + } + if cmd.Name != f.Name { + t.Fatalf("Different names. Expected: %s actual: %s", cmd.Name, f.Name) + } + if cmd.Description != f.Description { + t.Fatalf("Different descriptions. Expected: %s actual: %s", cmd.Description, f.Description) + } + if f.Enabled { + t.Fatalf("Enabled flag is true") + } + for i := range f.Variations { + compareVariation(t, cmd.Variations[i], f.Variations[i]) + } + if !reflect.DeepEqual(cmd.Tags, f.Tags) { + t.Fatalf("Different tags. Expected: %v actual: %v: ", cmd.Tags, f.Tags) + } + defaultOnVariation := findVariation(f.DefaultStrategy.FixedStrategy.Variation, f.Variations) + cmdDefaultOnVariation := cmd.Variations[int(cmd.DefaultOnVariationIndex.Value)] + if cmdDefaultOnVariation.Value != defaultOnVariation.Value { + t.Fatalf("Different default on variation value. Expected: %s actual: %s", cmdDefaultOnVariation.Value, defaultOnVariation.Value) + } + defaultOffVariation := findVariation(f.OffVariation, f.Variations) + cmdDefaultOffVariation := cmd.Variations[int(cmd.DefaultOffVariationIndex.Value)] + if cmdDefaultOffVariation.Value != defaultOffVariation.Value { + t.Fatalf("Different default off variation value. Expected: %s actual: %s", cmdDefaultOffVariation.Value, defaultOffVariation.Value) + } +} + +func TestArchiveFeature(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + req := &feature.ArchiveFeatureRequest{ + Id: featureID, + Command: &feature.ArchiveFeatureCommand{}, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.ArchiveFeature(ctx, req); err != nil { + t.Fatal(err) + } + f := getFeature(t, featureID, client) + if cmd.Id != f.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, f.Id) + } + if !f.Archived { + t.Fatal("Delete flag is false") + } +} + +func TestUnarchiveFeature(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + req := &feature.ArchiveFeatureRequest{ + Id: featureID, + Command: &feature.ArchiveFeatureCommand{}, + EnvironmentNamespace: *environmentNamespace, + } + if _, err := client.ArchiveFeature(ctx, req); err != nil { + t.Fatal(err) + } + reqUnarchive := &feature.UnarchiveFeatureRequest{ + Id: featureID, + Command: &feature.UnarchiveFeatureCommand{}, + EnvironmentNamespace: *environmentNamespace, + } + if _, err := client.UnarchiveFeature(ctx, reqUnarchive); err != nil { + t.Fatal(err) + } + f := getFeature(t, featureID, client) + if cmd.Id != f.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, f.Id) + } + if f.Archived { + t.Fatal("Delete flag is true") + } +} + +func TestDeleteFeature(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + deleteReq := &feature.DeleteFeatureRequest{ + Id: featureID, + Command: &feature.DeleteFeatureCommand{}, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.DeleteFeature(ctx, deleteReq); err != nil { + t.Fatal(err) + } + f := getFeature(t, featureID, client) + if cmd.Id != f.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, f.Id) + } + if !f.Deleted { + t.Fatal("Delete flag is false") + } +} + +func TestEnableFeature(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + cmd := newCreateFeatureCommand(newFeatureID(t)) + createFeature(t, client, cmd) + enableFeature(t, cmd.Id, client) + f := getFeature(t, cmd.Id, client) + if cmd.Id != f.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, f.Id) + } + if !f.Enabled { + t.Fatal("Enabled flag is false") + } +} + +func TestDisableFeature(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + cmd := newCreateFeatureCommand(newFeatureID(t)) + createFeature(t, client, cmd) + enableFeature(t, cmd.Id, client) + disableReq := &feature.DisableFeatureRequest{ + Id: cmd.Id, + Command: &feature.DisableFeatureCommand{}, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.DisableFeature(ctx, disableReq); err != nil { + t.Fatal(err) + } + f := getFeature(t, cmd.Id, client) + if cmd.Id != f.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, f.Id) + } + if f.Enabled { + t.Fatal("Enabled flag is true") + } +} + +func TestListArchivedFeatures(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + size := int64(1) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + req := &feature.ArchiveFeatureRequest{ + Id: featureID, + Command: &feature.ArchiveFeatureCommand{}, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.ArchiveFeature(ctx, req); err != nil { + t.Fatal(err) + } + listReq := &feature.ListFeaturesRequest{ + PageSize: size, + Archived: &wrappers.BoolValue{Value: true}, + EnvironmentNamespace: *environmentNamespace, + } + response, err := client.ListFeatures(ctx, listReq) + if err != nil { + t.Fatal(err) + } + responseSize := int64(len(response.Features)) + if responseSize != size { + t.Fatalf("Different sizes. Expected: %d actual: %d", size, responseSize) + } +} + +func TestListFeaturesPageSize(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + size := int64(1) + createRandomIDFeatures(t, 2, client) + listReq := &feature.ListFeaturesRequest{ + PageSize: size, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + response, err := client.ListFeatures(ctx, listReq) + if err != nil { + t.Fatal(err) + } + responseSize := int64(len(response.Features)) + if responseSize != size { + t.Fatalf("Different sizes. Expected: %d actual: %d", size, responseSize) + } +} + +func TestListFeaturesCursor(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + createRandomIDFeatures(t, 3, client) + size := int64(1) + listReq := &feature.ListFeaturesRequest{ + PageSize: size, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + response, err := client.ListFeatures(ctx, listReq) + if err != nil { + t.Fatal(err) + } + if response.Cursor == "" { + t.Fatal("Cursor is empty") + } + features := response.Features + listReq = &feature.ListFeaturesRequest{ + PageSize: size, + Cursor: response.Cursor, + EnvironmentNamespace: *environmentNamespace, + } + response, err = client.ListFeatures(ctx, listReq) + if err != nil { + t.Fatal(err) + } + require.EqualValues(t, size, len(features)) + require.EqualValues(t, size, len(response.Features)) + require.NotEqual(t, features[0].Id, response.Features[0].Id) +} + +func TestListFeaturesOrderByName(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + size := int64(3) + createRandomIDFeatures(t, 3, client) + + testcases := []struct { + orderDirection feature.ListFeaturesRequest_OrderDirection + checkSortedFunc func(a []string) bool + }{ + { + orderDirection: feature.ListFeaturesRequest_ASC, + checkSortedFunc: sort.StringsAreSorted, + }, + { + orderDirection: feature.ListFeaturesRequest_DESC, + checkSortedFunc: util.StringsAreReverseSorted, + }, + } + + for _, tc := range testcases { + listReq := &feature.ListFeaturesRequest{ + PageSize: size, + OrderBy: feature.ListFeaturesRequest_NAME, + OrderDirection: tc.orderDirection, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + response, err := client.ListFeatures(ctx, listReq) + if err != nil { + t.Fatal(err) + } + names := make([]string, 0, len(response.Features)) + for _, f := range response.Features { + names = append(names, f.Name) + } + if !tc.checkSortedFunc(names) { + t.Fatalf("Features aren't sorted by Name %s. Features: %v", tc.orderDirection, response.Features) + } + } +} + +func TestListFeaturesOrderByCreatedAt(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + size := int64(3) + createRandomIDFeatures(t, 3, client) + + testcases := []struct { + orderDirection feature.ListFeaturesRequest_OrderDirection + checkSortedFunc func(a []int64) bool + }{ + { + orderDirection: feature.ListFeaturesRequest_ASC, + checkSortedFunc: util.Int64sAreSorted, + }, + { + orderDirection: feature.ListFeaturesRequest_DESC, + checkSortedFunc: util.Int64sAreReverseSorted, + }, + } + + for _, tc := range testcases { + listReq := &feature.ListFeaturesRequest{ + PageSize: size, + OrderBy: feature.ListFeaturesRequest_CREATED_AT, + OrderDirection: tc.orderDirection, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + response, err := client.ListFeatures(ctx, listReq) + if err != nil { + t.Fatal(err) + } + createdAts := make([]int64, 0, len(response.Features)) + for _, f := range response.Features { + createdAts = append(createdAts, f.CreatedAt) + } + if !tc.checkSortedFunc(createdAts) { + t.Fatalf("Features aren't sorted by CreatedAt %s. Features: %v", tc.orderDirection, response.Features) + } + } +} + +func TestListFeaturesOrderByUpdatedAt(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + size := int64(3) + createRandomIDFeatures(t, 3, client) + + testcases := []struct { + orderDirection feature.ListFeaturesRequest_OrderDirection + checkSortedFunc func(a []int64) bool + }{ + { + orderDirection: feature.ListFeaturesRequest_ASC, + checkSortedFunc: util.Int64sAreSorted, + }, + { + orderDirection: feature.ListFeaturesRequest_DESC, + checkSortedFunc: util.Int64sAreReverseSorted, + }, + } + + for _, tc := range testcases { + listReq := &feature.ListFeaturesRequest{ + PageSize: size, + OrderBy: feature.ListFeaturesRequest_UPDATED_AT, + OrderDirection: tc.orderDirection, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + response, err := client.ListFeatures(ctx, listReq) + if err != nil { + t.Fatal(err) + } + updatedAts := make([]int64, 0, len(response.Features)) + for _, f := range response.Features { + updatedAts = append(updatedAts, f.UpdatedAt) + } + if !tc.checkSortedFunc(updatedAts) { + t.Fatalf("Features aren't sorted by UpdatedAt %s. Features: %v", tc.orderDirection, response.Features) + } + } +} + +func TestListEnabledFeaturesPageSize(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + ids := []string{newFeatureID(t), newFeatureID(t), newFeatureID(t)} + createFeatures(t, ids, client) + enableFeatures(t, ids, client) + size := int64(2) + listReq := &feature.ListEnabledFeaturesRequest{ + PageSize: size, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + response, err := client.ListEnabledFeatures(ctx, listReq) + if err != nil { + t.Fatal(err) + } + responseSize := int64(len(response.Features)) + if responseSize != size { + t.Fatalf("Different sizes. Expected: %d actual: %d", size, responseSize) + } + checkEnabledFlag(t, response.Features) +} + +func TestListEnabledFeaturesCursor(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + ids := []string{newFeatureID(t), newFeatureID(t), newFeatureID(t), newFeatureID(t)} + createFeatures(t, ids, client) + enableFeatures(t, ids, client) + size := int64(2) + listReq := &feature.ListEnabledFeaturesRequest{ + PageSize: size, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + response, err := client.ListEnabledFeatures(ctx, listReq) + if err != nil { + t.Fatal(err) + } + if response.Cursor == "" { + t.Fatal("Cursor is empty") + } + features := response.Features + firstPageIds := make([]string, 0, len(features)) + for _, feature := range features { + firstPageIds = append(firstPageIds, feature.Id) + } + checkEnabledFlag(t, features) + listReq = &feature.ListEnabledFeaturesRequest{ + PageSize: size, + Cursor: response.Cursor, + EnvironmentNamespace: *environmentNamespace, + } + response, err = client.ListEnabledFeatures(ctx, listReq) + if err != nil { + t.Fatal(err) + } + checkEnabledFlag(t, features) + for _, feature := range response.Features { + // TODO: Features should be tagged while creating and then check the returned features are in that created list. + // assert.Contains(t, ids, feature.Id) + assert.NotContains(t, firstPageIds, feature.Id) + } +} + +func TestRename(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + cmd := newCreateFeatureCommand(newFeatureID(t)) + createFeature(t, client, cmd) + expected := "new-feature-name" + updateReq := &feature.UpdateFeatureDetailsRequest{ + Id: cmd.Id, + RenameFeatureCommand: &feature.RenameFeatureCommand{Name: expected}, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.UpdateFeatureDetails(ctx, updateReq); err != nil { + t.Fatal(err) + } + f := getFeature(t, cmd.Id, client) + if cmd.Id != f.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, f.Id) + } + if expected != f.Name { + t.Fatalf("Different names. Expected: %s actual: %s", expected, f.Name) + } +} + +func TestChangeDescription(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + cmd := newCreateFeatureCommand(newFeatureID(t)) + createFeature(t, client, cmd) + expected := "new-feature-description" + updateReq := &feature.UpdateFeatureDetailsRequest{ + Id: cmd.Id, + ChangeDescriptionCommand: &feature.ChangeDescriptionCommand{Description: expected}, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.UpdateFeatureDetails(ctx, updateReq); err != nil { + t.Fatal(err) + } + f := getFeature(t, cmd.Id, client) + if cmd.Id != f.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, f.Id) + } + if expected != f.Description { + t.Fatalf("Different names. Expected: %s actual: %s", expected, f.Description) + } +} + +func TestAddTags(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + cmd := newCreateFeatureCommand(newFeatureID(t)) + createFeature(t, client, cmd) + newTags := []string{"e2e-test-tag-4", "e2e-test-tag-5", "e2e-test-tag-6"} + addReq := &feature.UpdateFeatureDetailsRequest{ + Id: cmd.Id, + AddTagCommands: []*feature.AddTagCommand{ + {Tag: newTags[0]}, + {Tag: newTags[1]}, + {Tag: newTags[2]}, + }, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.UpdateFeatureDetails(ctx, addReq); err != nil { + t.Fatal(err) + } + f := getFeature(t, cmd.Id, client) + if cmd.Id != f.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, f.Id) + } + cmd.Tags = append(cmd.Tags, newTags...) + if !reflect.DeepEqual(cmd.Tags, f.Tags) { + t.Fatalf("Different tags. Expected: %v actual: %v: ", cmd.Tags, f.Tags) + } +} + +func TestRemoveTags(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + cmd := newCreateFeatureCommand(newFeatureID(t)) + createFeature(t, client, cmd) + removeTargetTags := []*feature.RemoveTagCommand{ + {Tag: cmd.Tags[0]}, + {Tag: cmd.Tags[2]}, + } + removeReq := &feature.UpdateFeatureDetailsRequest{ + Id: cmd.Id, + RemoveTagCommands: removeTargetTags, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.UpdateFeatureDetails(ctx, removeReq); err != nil { + t.Fatal(err) + } + f := getFeature(t, cmd.Id, client) + if cmd.Id != f.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, f.Id) + } + if len(f.Tags) != 1 { + t.Fatalf("Tags should have only 1 element. Expected: %s actual: %v", cmd.Tags[1], f.Tags) + } + if f.Tags[0] != cmd.Tags[1] { + t.Fatalf("The wrong tag might has been deleted. Expected to be deleted: %v actual: %v", removeTargetTags, f.Tags) + } +} + +func TestAddVariation(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + targetVariationValues := []string{newUUID(t), newUUID(t)} + targetVariations := newVariations(targetVariationValues) + addCmd := newAddVariationsCommand(t, targetVariations) + updateVariations(t, featureID, addCmd, client) + feature := getFeature(t, featureID, client) + if feature.Id != cmd.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, feature.Id) + } + var matched int + for _, e := range targetVariations { + for _, g := range feature.Variations { + if e.Value == g.Value { + compareVariation(t, e, g) + matched++ + } + } + } + size := len(targetVariations) + if matched != size { + t.Fatalf("The number of variations added does not match. Expected: %d actual: %d", size, matched) + } +} + +func TestRemoveVariation(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + targetVariationID := getFeature(t, featureID, client).Variations[2].Id + removeCmd := newRemoveVariationsCommand(t, []string{targetVariationID}) + updateVariations(t, featureID, removeCmd, client) + feature := getFeature(t, featureID, client) + if feature.Id != cmd.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, feature.Id) + } + if len(feature.Variations) != 3 { + t.Fatal("Variations should have 3 elements. Actual:", feature.Variations) + } + if findVariation(targetVariationID, feature.Variations) != nil { + t.Fatalf("The wrong variation might has been deleted. Expected: %s actual: %v", targetVariationID, feature.Variations) + } +} + +func TestRemoveVariations(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + feature := getFeature(t, featureID, client) + targetVariationIDS := []string{feature.Variations[2].Id, feature.Variations[3].Id} + cmds := newRemoveVariationsCommand(t, targetVariationIDS) + updateVariations(t, featureID, cmds, client) + feature = getFeature(t, featureID, client) + if feature.Id != cmd.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, feature.Id) + } + if len(feature.Variations) != 2 { + t.Fatal("Variations should have only 2 elements. Actual:", feature.Variations) + } + if variation := findOneOfVariations(targetVariationIDS, feature.Variations); variation != nil { + t.Fatalf("The wrong variation might has been deleted. Expected: %v actual: %v", targetVariationIDS, feature.Variations) + } +} + +func TestChangeVariationValue(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + targetVariationID := getFeature(t, featureID, client).Variations[1].Id + targetVariationValue := "new-variation-value" + changeCmd, err := util.MarshalCommand(&feature.ChangeVariationValueCommand{ + Id: targetVariationID, + Value: targetVariationValue, + }) + if err != nil { + t.Fatal(err) + } + updateVariations(t, featureID, []*feature.Command{{Command: changeCmd}}, client) + f := getFeature(t, featureID, client) + if f.Id != cmd.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, f.Id) + } + expected := &feature.Variation{ + Value: targetVariationValue, + Name: cmd.Variations[1].Name, + Description: cmd.Variations[1].Description, + } + compareVariation(t, expected, f.Variations[1]) +} + +func TestChangeVariationName(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + targetVariationID := getFeature(t, featureID, client).Variations[1].Id + targetVariationName := "new-variation-name" + changeCmd, err := util.MarshalCommand(&feature.ChangeVariationNameCommand{ + Id: targetVariationID, + Name: targetVariationName, + }) + if err != nil { + t.Fatal(err) + } + updateVariations(t, featureID, []*feature.Command{{Command: changeCmd}}, client) + f := getFeature(t, featureID, client) + if f.Id != cmd.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, f.Id) + } + expected := &feature.Variation{ + Value: cmd.Variations[1].Value, + Name: targetVariationName, + Description: cmd.Variations[1].Description, + } + compareVariation(t, expected, f.Variations[1]) +} + +func TestChangeVariationDescription(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + targetVariationID := getFeature(t, featureID, client).Variations[1].Id + targetVariationDescription := "new-variation-description" + changeCmd, err := util.MarshalCommand(&feature.ChangeVariationDescriptionCommand{ + Id: targetVariationID, + Description: targetVariationDescription, + }) + if err != nil { + t.Fatal(err) + } + updateVariations(t, featureID, []*feature.Command{{Command: changeCmd}}, client) + f := getFeature(t, featureID, client) + if f.Id != cmd.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, f.Id) + } + expected := &feature.Variation{ + Value: cmd.Variations[1].Value, + Name: cmd.Variations[1].Name, + Description: targetVariationDescription, + } + compareVariation(t, expected, f.Variations[1]) +} + +func TestChangeFixedStrategy(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + f := getFeature(t, featureID, client) + rule := newFixedStrategyRule(f.Variations[0].Id) + addCmd, _ := util.MarshalCommand(&feature.AddRuleCommand{Rule: rule}) + updateFeatureTargeting(t, client, addCmd, featureID) + expected := f.Variations[1].Id + changeCmd, err := util.MarshalCommand(&feature.ChangeFixedStrategyCommand{ + Id: featureID, + RuleId: rule.Id, + Strategy: &feature.FixedStrategy{Variation: expected}, + }) + require.NoError(t, err) + updateFeatureTargeting(t, client, changeCmd, featureID) + f = getFeature(t, featureID, client) + if f.Id != cmd.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, f.Id) + } + actual := f.Rules[0].Strategy.FixedStrategy.Variation + if expected != actual { + t.Fatalf("Variation id is not equal. Expected: %s actual: %s", expected, actual) + } +} + +func TestChangeRolloutStrategy(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + f := getFeature(t, featureID, client) + rule := newRolloutStrategyRule(f.Variations) + addCmd, _ := util.MarshalCommand(&feature.AddRuleCommand{Rule: rule}) + updateFeatureTargeting(t, client, addCmd, featureID) + expected := &feature.RolloutStrategy{ + Variations: []*feature.RolloutStrategy_Variation{ + { + Variation: f.Variations[0].Id, + Weight: 12000, + }, + { + Variation: f.Variations[1].Id, + Weight: 30000, + }, + { + Variation: f.Variations[2].Id, + Weight: 50000, + }, + { + Variation: f.Variations[3].Id, + Weight: 8000, + }, + }, + } + changeCmd, err := util.MarshalCommand(&feature.ChangeRolloutStrategyCommand{ + Id: featureID, + RuleId: rule.Id, + Strategy: expected, + }) + require.NoError(t, err) + updateFeatureTargeting(t, client, changeCmd, featureID) + f = getFeature(t, featureID, client) + if f.Id != cmd.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, f.Id) + } + actual := f.Rules[0].Strategy.RolloutStrategy + if !proto.Equal(expected, actual) { + t.Fatalf("Strategy is not equal. Expected: %s actual: %s", expected, actual) + } +} + +func TestChangeOffVariation(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + expected := getFeature(t, featureID, client).Variations[1].Id + changeCmd, err := util.MarshalCommand(&feature.ChangeOffVariationCommand{Id: expected}) + if err != nil { + t.Fatal(err) + } + updateFeatureTargeting(t, client, changeCmd, featureID) + f := getFeature(t, featureID, client) + if f.Id != cmd.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, f.Id) + } + if expected != f.OffVariation { + t.Fatalf("Off variation does not match. Expected: %s actual: %s", expected, f.OffVariation) + } +} + +func TestAddUserToVariation(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + expected := "new-user" + addCmd, err := util.MarshalCommand(&feature.AddUserToVariationCommand{ + Id: getFeature(t, featureID, client).Variations[1].Id, + User: expected, + }) + if err != nil { + t.Fatal(err) + } + updateFeatureTargeting(t, client, addCmd, featureID) + f := getFeature(t, featureID, client) + if f.Id != cmd.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, f.Id) + } + if expected != f.Targets[1].Users[0] { + t.Fatalf("User does not match. Expected to be deleted: %s actual: %s", expected, f.Targets[1].Users[0]) + } +} + +func TestRemoveUserToVariation(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + variationID := getFeature(t, featureID, client).Variations[1].Id + expected := "new-user" + addCmd, err := util.MarshalCommand(&feature.AddUserToVariationCommand{ + Id: variationID, + User: expected, + }) + if err != nil { + t.Fatal(err) + } + updateFeatureTargeting(t, client, addCmd, featureID) + removeCmd, err := util.MarshalCommand(&feature.RemoveUserFromVariationCommand{ + Id: variationID, + User: expected, + }) + if err != nil { + t.Fatal(err) + } + updateFeatureTargeting(t, client, removeCmd, featureID) + f := getFeature(t, featureID, client) + if f.Id != cmd.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, f.Id) + } + if len(f.Targets[1].Users) > 0 { + t.Fatalf("User was not deleted. Expected: %s actual: %s", expected, f.Targets[0].Users[0]) + } +} + +func TestAddRule(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + rule := newFixedStrategyRule(getFeature(t, featureID, client).Variations[1].Id) + addCmd, _ := util.MarshalCommand(&feature.AddRuleCommand{Rule: rule}) + updateFeatureTargeting(t, client, addCmd, featureID) + f := getFeature(t, featureID, client) + r := f.Rules[0] + if f.Id != cmd.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, f.Id) + } + if !proto.Equal(rule.Strategy, r.Strategy) { + t.Fatalf("Strategy is not equal. Expected: %v actual: %v", rule.Strategy, r.Strategy) + } + expectedSize := len(rule.Clauses) + actualSize := len(r.Clauses) + if expectedSize != actualSize { + t.Fatalf("Clauses have different sizes. Expected: %d actual: %d", expectedSize, actualSize) + } + for i := range rule.Clauses { + if r.Clauses[i].Id == "" { + t.Fatalf("ID is empty") + } + compareClause(t, rule.Clauses[i], r.Clauses[i]) + } +} + +func TestChangeRuleToFixedStrategy(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + f := getFeature(t, featureID, client) + rule := newRolloutStrategyRule(f.Variations) + addCmd, _ := util.MarshalCommand(&feature.AddRuleCommand{Rule: rule}) + updateFeatureTargeting(t, client, addCmd, featureID) + expected := &feature.Strategy{ + Type: feature.Strategy_FIXED, + FixedStrategy: &feature.FixedStrategy{Variation: f.Variations[1].Id}, + } + changeCmd, err := util.MarshalCommand(&feature.ChangeRuleStrategyCommand{ + Id: featureID, + RuleId: rule.Id, + Strategy: expected, + }) + require.NoError(t, err) + updateFeatureTargeting(t, client, changeCmd, featureID) + f = getFeature(t, featureID, client) + assert.Equal(t, cmd.Id, f.Id, fmt.Sprintf("Different ids. Expected: %s actual: %s", cmd.Id, f.Id)) + actual := f.Rules[0].Strategy + if !proto.Equal(expected, actual) { + t.Fatalf("Strategy is not equal. Expected: %s actual: %s", expected, actual) + } +} + +func TestChangeRuleToRolloutStrategy(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + f := getFeature(t, featureID, client) + rule := newFixedStrategyRule(f.Variations[0].Id) + addCmd, _ := util.MarshalCommand(&feature.AddRuleCommand{Rule: rule}) + updateFeatureTargeting(t, client, addCmd, featureID) + expected := &feature.Strategy{ + Type: feature.Strategy_ROLLOUT, + RolloutStrategy: &feature.RolloutStrategy{ + Variations: []*feature.RolloutStrategy_Variation{ + { + Variation: f.Variations[0].Id, + Weight: 12000, + }, + { + Variation: f.Variations[1].Id, + Weight: 30000, + }, + { + Variation: f.Variations[2].Id, + Weight: 50000, + }, + { + Variation: f.Variations[3].Id, + Weight: 8000, + }, + }, + }, + } + changeCmd, err := util.MarshalCommand(&feature.ChangeRuleStrategyCommand{ + Id: featureID, + RuleId: rule.Id, + Strategy: expected, + }) + require.NoError(t, err) + updateFeatureTargeting(t, client, changeCmd, featureID) + f = getFeature(t, featureID, client) + assert.Equal(t, cmd.Id, f.Id, fmt.Sprintf("Different ids. Expected: %s actual: %s", cmd.Id, f.Id)) + actual := f.Rules[0].Strategy + if !proto.Equal(expected, actual) { + t.Fatalf("Strategy is not equal. Expected: %s actual: %s", expected, actual) + } +} + +func TestDeleteRule(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + rule := newFixedStrategyRule(getFeature(t, featureID, client).Variations[1].Id) + addCmd, _ := util.MarshalCommand(&feature.AddRuleCommand{Rule: rule}) + updateFeatureTargeting(t, client, addCmd, featureID) + f := getFeature(t, featureID, client) + rule = f.Rules[0] + removeCmd, _ := util.MarshalCommand(&feature.DeleteRuleCommand{Id: rule.Id}) + updateFeatureTargeting(t, client, removeCmd, featureID) + r := getFeature(t, featureID, client).Rules + if f.Id != cmd.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, f.Id) + } + if len(r) > 0 { + t.Fatalf("The wrong rule might has been delete. Expected: %v actual: %v", rule, r) + } +} + +func TestAddClause(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + rule := newFixedStrategyRule(getFeature(t, featureID, client).Variations[1].Id) + addCmd, _ := util.MarshalCommand(&feature.AddRuleCommand{Rule: rule}) + updateFeatureTargeting(t, client, addCmd, featureID) + r := getFeature(t, featureID, client).Rules[0] + clause := newClause() + addCmd, _ = util.MarshalCommand(&feature.AddClauseCommand{ + RuleId: r.Id, + Clause: clause, + }) + updateFeatureTargeting(t, client, addCmd, featureID) + f := getFeature(t, featureID, client) + r = f.Rules[0] + if f.Id != cmd.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, f.Id) + } + expectedSize := 3 + actualSize := len(r.Clauses) + if expectedSize != actualSize { + t.Fatalf("Clauses have different sizes. Expected: %d actual: %d", expectedSize, actualSize) + } + compareClause(t, clause, r.Clauses[2]) +} + +func TestDeleteClause(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + rule := newFixedStrategyRule(getFeature(t, featureID, client).Variations[1].Id) + addCmd, _ := util.MarshalCommand(&feature.AddRuleCommand{Rule: rule}) + updateFeatureTargeting(t, client, addCmd, featureID) + r := getFeature(t, featureID, client).Rules[0] + expected := r.Clauses[1] + removeCmd, _ := util.MarshalCommand(&feature.DeleteClauseCommand{ + Id: expected.Id, + RuleId: r.Id, + }) + updateFeatureTargeting(t, client, removeCmd, featureID) + f := getFeature(t, featureID, client) + r = f.Rules[0] + if f.Id != cmd.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, f.Id) + } + expectedSize := 1 + actualSize := len(r.Clauses) + if expectedSize != actualSize { + t.Fatalf("Clauses have different sizes. Expected: %d actual: %d", expectedSize, actualSize) + } + if proto.Equal(expected, r.Clauses[0]) { + t.Fatalf("The wrong clause might has been delete. Expected: %v actual: %v", expected, r.Clauses[0]) + } +} + +func TestChangeClauseAttribute(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + rule := newFixedStrategyRule(getFeature(t, featureID, client).Variations[1].Id) + addCmd, _ := util.MarshalCommand(&feature.AddRuleCommand{Rule: rule}) + updateFeatureTargeting(t, client, addCmd, featureID) + r := getFeature(t, featureID, client).Rules[0] + c := r.Clauses[1] + expected := &feature.Clause{ + Attribute: "change-clause-attribute", + Operator: c.Operator, + Values: c.Values, + } + changeCmd, _ := util.MarshalCommand(&feature.ChangeClauseAttributeCommand{ + Id: c.Id, + RuleId: r.Id, + Attribute: expected.Attribute, + }) + updateFeatureTargeting(t, client, changeCmd, featureID) + f := getFeature(t, featureID, client) + r = f.Rules[0] + if f.Id != cmd.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, f.Id) + } + compareClause(t, expected, r.Clauses[1]) +} + +func TestChangeClauseOperator(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + rule := newFixedStrategyRule(getFeature(t, featureID, client).Variations[1].Id) + addCmd, _ := util.MarshalCommand(&feature.AddRuleCommand{Rule: rule}) + updateFeatureTargeting(t, client, addCmd, featureID) + r := getFeature(t, featureID, client).Rules[0] + c := r.Clauses[1] + expected := &feature.Clause{ + Attribute: c.Attribute, + Operator: feature.Clause_EQUALS, + Values: c.Values, + } + changeCmd, _ := util.MarshalCommand(&feature.ChangeClauseOperatorCommand{ + Id: c.Id, + RuleId: r.Id, + Operator: expected.Operator, + }) + updateFeatureTargeting(t, client, changeCmd, featureID) + f := getFeature(t, featureID, client) + r = f.Rules[0] + if f.Id != cmd.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", cmd.Id, f.Id) + } + compareClause(t, expected, r.Clauses[1]) +} + +func TestAddClauseValue(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + rule := newFixedStrategyRule(getFeature(t, featureID, client).Variations[1].Id) + addCmd, _ := util.MarshalCommand(&feature.AddRuleCommand{Rule: rule}) + updateFeatureTargeting(t, client, addCmd, featureID) + r := getFeature(t, featureID, client).Rules[0] + c := r.Clauses[1] + values := append(c.Values, "new-value") + expected := &feature.Clause{ + Attribute: c.Attribute, + Operator: c.Operator, + Values: values, + } + changeCmd, _ := util.MarshalCommand(&feature.AddClauseValueCommand{ + Id: c.Id, + RuleId: r.Id, + Value: expected.Values[2], + }) + updateFeatureTargeting(t, client, changeCmd, featureID) + f := getFeature(t, featureID, client) + r = f.Rules[0] + if f.Id != cmd.Id { + t.Fatalf("different ids. expected: %s actual: %s", cmd.Id, f.Id) + } + compareClause(t, expected, r.Clauses[1]) +} + +func TestRemoveClauseValue(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + rule := newFixedStrategyRule(getFeature(t, featureID, client).Variations[1].Id) + addCmd, _ := util.MarshalCommand(&feature.AddRuleCommand{Rule: rule}) + updateFeatureTargeting(t, client, addCmd, featureID) + r := getFeature(t, featureID, client).Rules[0] + c := r.Clauses[0] + expected := &feature.Clause{ + Attribute: c.Attribute, + Operator: c.Operator, + Values: []string{c.Values[0]}, + } + removeCmd, _ := util.MarshalCommand(&feature.RemoveClauseValueCommand{ + Id: c.Id, + RuleId: r.Id, + Value: c.Values[1], + }) + updateFeatureTargeting(t, client, removeCmd, featureID) + f := getFeature(t, featureID, client) + r = f.Rules[0] + if f.Id != cmd.Id { + t.Fatalf("different ids. expected: %s actual: %s", cmd.Id, f.Id) + } + expectedSize := 1 + actualSize := len(r.Clauses[0].Values) + if expectedSize != actualSize { + t.Fatalf("Values have different sizes. Expected: %d actual: %d", expectedSize, actualSize) + } + compareClause(t, expected, r.Clauses[0]) +} + +func TestEvaluateFeatures(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + featureID1 := newFeatureID(t) + cmd1 := newCreateFeatureCommand(featureID1) + createFeature(t, client, cmd1) + featureID2 := newFeatureID(t) + cmd2 := newCreateFeatureCommand(featureID2) + createFeature(t, client, cmd2) + enableFeature(t, cmd2.Id, client) + userID := "user-id-01" + tag := tags[0] + res := evaluateFeatures(t, client, userID, tag) + if len(res.UserEvaluations.Evaluations) < 2 { + t.Fatalf("length of user evaluations is not enough. Expected: >=%d, Actual: %d", 2, len(res.UserEvaluations.Evaluations)) + } +} + +// TODO: implement the process to delete new environments so that we can run "TestCloneFeature" +/* +func TestCloneFeature(t *testing.T) { + client := newFeatureClient(t) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + enableFeature(t, featureID, client) + f := getFeature(t, featureID, client) + expected := "new-user" + addCmd, err := util.MarshalCommand(&feature.AddUserToVariationCommand{ + Id: f.Variations[1].Id, + User: expected, + }) + require.NoError(t, err) + updateFeatureTargeting(t, client, addCmd, featureID) + targetEnvironmentNamespace := newUUID(t) + c := newEnvironmentClient(t) + defer c.Close() + envCmd := newEnvironmentCommand(targetEnvironmentNamespace) + createEnvironment(t, c, envCmd) + req := &feature.CloneFeatureRequest{ + Id: featureID, + Command: &feature.CloneFeatureCommand{ + EnvironmentNamespace: targetEnvironmentNamespace, + }, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.CloneFeature(ctx, req); err != nil { + t.Fatal(err) + } + f = getFeature(t, featureID, client) + cf := getClonedFeature(t, featureID, targetEnvironmentNamespace, client) + if cf.Id != f.Id { + t.Fatalf("Different ids. Expected: %s actual: %s", f.Id, cf.Id) + } + if cf.Name != f.Name { + t.Fatalf("Different names. Expected: %s actual: %s", f.Name, cf.Name) + } + if cf.Description != f.Description { + t.Fatalf("Different descriptions. Expected: %s actual: %s", f.Description, cf.Description) + } + if cf.Enabled { + t.Fatalf("Enabled flag is true") + } + if cf.Targets[1].Users[0] != f.Targets[1].Users[0] { + t.Fatalf("User does not match. Expected to be deleted: %s actual: %s", f.Targets[1].Users[0], cf.Targets[1].Users[0]) + } + expectedVersion := int32(1) + if cf.Version != expectedVersion { + t.Fatalf("Different version. Expected: %d actual %d", expectedVersion, f.Version) + } + for i := range cf.Variations { + compareVariation(t, f.Variations[i], cf.Variations[i]) + } + if !reflect.DeepEqual(f.Tags, cf.Tags) { + t.Fatalf("Different tags. Expected: %v actual: %v: ", f.Tags, cf.Tags) + } + featureDefaultOnVariation := findVariation(f.DefaultStrategy.FixedStrategy.Variation, f.Variations) + clonedFeatureDefaultOnVariation := findVariation(cf.DefaultStrategy.FixedStrategy.Variation, cf.Variations) + if clonedFeatureDefaultOnVariation.Value != featureDefaultOnVariation.Value { + t.Fatalf("Different default on variation value. Expected: %s actual: %s", featureDefaultOnVariation.Value, clonedFeatureDefaultOnVariation.Value) + } + featureDefaultOffVariation := findVariation(f.OffVariation, f.Variations) + clonedFeatureDefaultOffVariation := findVariation(cf.OffVariation, cf.Variations) + if clonedFeatureDefaultOffVariation.Value != featureDefaultOffVariation.Value { + t.Fatalf("Different default off variation value. Expected: %s actual: %s", featureDefaultOffVariation.Value, clonedFeatureDefaultOffVariation.Value) + } + for i := range cf.Rules { + if cf.Rules[i].Strategy.FixedStrategy.Variation != f.Rules[i].Strategy.FixedStrategy.Variation { + t.Fatalf("Different variation in rules. Expected: %s actual %s", f.Rules[i].Strategy.FixedStrategy.Variation, cf.Rules[i].Strategy.FixedStrategy.Variation) + } + } + rule := newRolloutStrategyRule(f.Variations) + addCmd, err = util.MarshalCommand(&feature.AddRuleCommand{Rule: rule}) + require.NoError(t, err) + updateFeatureTargeting(t, client, addCmd, featureID) + strategy := &feature.RolloutStrategy{ + Variations: []*feature.RolloutStrategy_Variation{ + { + Variation: f.Variations[0].Id, + Weight: 12000, + }, + { + Variation: f.Variations[1].Id, + Weight: 30000, + }, + { + Variation: f.Variations[2].Id, + Weight: 50000, + }, + { + Variation: f.Variations[3].Id, + Weight: 8000, + }, + }, + } + changeCmd, err := util.MarshalCommand(&feature.ChangeRolloutStrategyCommand{ + Id: featureID, + RuleId: rule.Id, + Strategy: strategy, + }) + require.NoError(t, err) + updateFeatureTargeting(t, client, changeCmd, featureID) + changeCmd, err = util.MarshalCommand(&feature.ChangeDefaultStrategyCommand{ + Strategy: &feature.Strategy{ + Type: feature.Strategy_ROLLOUT, + RolloutStrategy: strategy, + }, + }) + require.NoError(t, err) + updateFeatureTargeting(t, client, changeCmd, featureID) + anotherTargetEnvironmentNamespace := newUUID(t) + c = newEnvironmentClient(t) + defer c.Close() + envCmd = newEnvironmentCommand(anotherTargetEnvironmentNamespace) + createEnvironment(t, c, envCmd) + req = &feature.CloneFeatureRequest{ + Id: featureID, + Command: &feature.CloneFeatureCommand{ + EnvironmentNamespace: anotherTargetEnvironmentNamespace, + }, + EnvironmentNamespace: *environmentNamespace, + } + if _, err := client.CloneFeature(ctx, req); err != nil { + t.Fatal(err) + } + f = getFeature(t, featureID, client) + cf = getClonedFeature(t, featureID, anotherTargetEnvironmentNamespace, client) + for i := range cf.Rules { + for idx := range cf.Rules[i].Strategy.RolloutStrategy.Variations { + if cf.Rules[i].Strategy.RolloutStrategy.Variations[idx].Weight != f.Rules[i].Strategy.RolloutStrategy.Variations[idx].Weight { + t.Fatalf("Diffrent strategy on variation weight. Expected: %d actual: %d", f.Rules[i].Strategy.RolloutStrategy.Variations[idx].Weight, cf.Rules[i].Strategy.RolloutStrategy.Variations[idx].Weight) + } + } + } + for i := range cf.DefaultStrategy.RolloutStrategy.Variations { + if cf.DefaultStrategy.RolloutStrategy.Variations[i].Weight != f.DefaultStrategy.RolloutStrategy.Variations[i].Weight { + t.Fatalf("Different default on variation weight. Expected: %d actual %d", f.DefaultStrategy.RolloutStrategy.Variations[i].Weight, cf.DefaultStrategy.RolloutStrategy.Variations[i].Weight) + } + } +} +*/ + +func newFeatureID(t *testing.T) string { + if *testID != "" { + return fmt.Sprintf("%s-%s-feature-id-%s", prefixID, *testID, newUUID(t)) + } + return fmt.Sprintf("%s-feature-id-%s", prefixID, newUUID(t)) +} + +func newUUID(t *testing.T) string { + t.Helper() + id, err := uuid.NewUUID() + if err != nil { + t.Fatal(err) + } + return id.String() +} + +func newFeatureClient(t *testing.T) featureclient.Client { + t.Helper() + creds, err := client.NewPerRPCCredentials(*serviceTokenPath) + if err != nil { + t.Fatal("Failed to create RPC credentials:", err) + } + featureClient, err := featureclient.NewClient( + fmt.Sprintf("%s:%d", *webGatewayAddr, *webGatewayPort), + *webGatewayCert, + client.WithPerRPCCredentials(creds), + client.WithDialTimeout(30*time.Second), + client.WithBlock(), + ) + if err != nil { + t.Fatal("Failed to create feature client:", err) + } + return featureClient +} + +func newCreateFeatureCommand(featureID string) *feature.CreateFeatureCommand { + return &feature.CreateFeatureCommand{ + Id: featureID, + Name: "e2e-test-feature-name", + Description: "e2e-test-feature-description", + Variations: []*feature.Variation{ + { + Value: "A", + Name: "Variation A", + Description: "Thing does A", + }, + { + Value: "B", + Name: "Variation B", + Description: "Thing does B", + }, + { + Value: "C", + Name: "Variation C", + Description: "Thing does C", + }, + { + Value: "D", + Name: "Variation D", + Description: "Thing does D", + }, + }, + Tags: []string{ + "e2e-test-tag-1", + "e2e-test-tag-2", + "e2e-test-tag-3", + }, + DefaultOnVariationIndex: &wrappers.Int32Value{Value: int32(0)}, + DefaultOffVariationIndex: &wrappers.Int32Value{Value: int32(1)}, + } +} + +func newAddVariationsCommand(t *testing.T, vs []*feature.Variation) []*feature.Command { + var cmds []*feature.Command + for _, v := range vs { + cmd, err := util.MarshalCommand(&feature.AddVariationCommand{ + Value: v.Value, + Name: v.Name, + Description: v.Description, + }) + if err != nil { + t.Fatal(err) + } + cmds = append(cmds, &feature.Command{Command: cmd}) + } + return cmds +} + +func newRemoveVariationsCommand(t *testing.T, featureIDS []string) []*feature.Command { + var cmds []*feature.Command + for _, id := range featureIDS { + cmd, err := util.MarshalCommand(&feature.RemoveVariationCommand{Id: id}) + if err != nil { + t.Fatal(err) + } + cmds = append(cmds, &feature.Command{Command: cmd}) + } + return cmds +} + +func newVariations(randomValues []string) []*feature.Variation { + var vs []*feature.Variation + for _, value := range randomValues { + v := &feature.Variation{ + Value: fmt.Sprintf("%s", value), + Name: fmt.Sprintf("Variation %s", value), + Description: fmt.Sprintf("Thing does %s", value), + } + vs = append(vs, v) + } + return vs +} + +func createRandomIDFeatures(t *testing.T, size int, client featureclient.Client) { + t.Helper() + for i := 0; i < size; i++ { + createFeature(t, client, newCreateFeatureCommand(newFeatureID(t))) + } +} + +func createFeatures(t *testing.T, featureIDS []string, client featureclient.Client) { + t.Helper() + for _, id := range featureIDS { + createFeature(t, client, newCreateFeatureCommand(id)) + } +} + +func createFeature(t *testing.T, client featureclient.Client, cmd *feature.CreateFeatureCommand) { + t.Helper() + createReq := &feature.CreateFeatureRequest{ + Command: cmd, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.CreateFeature(ctx, createReq); err != nil { + t.Fatal(err) + } +} + +func getFeature(t *testing.T, featureID string, client featureclient.Client) *feature.Feature { + t.Helper() + getReq := &feature.GetFeatureRequest{ + Id: featureID, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + response, err := client.GetFeature(ctx, getReq) + if err != nil { + t.Fatal("Failed to get feature:", err) + } + return response.Feature +} + +/* +func getClonedFeature(t *testing.T, featureID, en string, client featureclient.Client) *feature.Feature { + t.Helper() + getReq := &feature.GetFeatureRequest{ + Id: featureID, + EnvironmentNamespace: en, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + response, err := client.GetFeature(ctx, getReq) + if err != nil { + t.Fatal("Failed to get feature:", err) + } + return response.Feature +} +*/ + +func enableFeatures(t *testing.T, featureIDS []string, client featureclient.Client) { + t.Helper() + for _, featureID := range featureIDS { + enableFeature(t, featureID, client) + } +} + +func enableFeature(t *testing.T, featureID string, client featureclient.Client) { + t.Helper() + enableReq := &feature.EnableFeatureRequest{ + Id: featureID, + Command: &feature.EnableFeatureCommand{}, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.EnableFeature(ctx, enableReq); err != nil { + t.Fatalf("Failed to enable feature id: %s. Error: %v", featureID, err) + } +} + +func checkEnabledFlag(t *testing.T, features []*feature.Feature) { + t.Helper() + for _, feature := range features { + if !feature.Enabled { + t.Fatal("Feature enabled flag is false. ID:", feature.Id) + } + } +} + +func compareVariation(t *testing.T, expected *feature.Variation, actual *feature.Variation) { + t.Helper() + if expected.Value != actual.Value { + t.Fatalf("Different values. Expected: %s actual: %s", expected.Value, actual.Value) + } + if expected.Name != actual.Name { + t.Fatalf("Different names. Expected: %s actual: %s", expected.Name, actual.Name) + } + if expected.Description != actual.Description { + t.Fatalf("Different descriptions. Expected: %s actual: %s", expected.Description, actual.Description) + } +} + +func updateVariations(t *testing.T, featureID string, commands []*feature.Command, client featureclient.Client) { + t.Helper() + updateReq := &feature.UpdateFeatureVariationsRequest{ + Id: featureID, + Commands: commands, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.UpdateFeatureVariations(ctx, updateReq); err != nil { + t.Fatal(err) + } +} + +func findOneOfVariations(ids []string, vs []*feature.Variation) *feature.Variation { + for _, id := range ids { + if variation := findVariation(id, vs); variation != nil { + return variation + } + } + return nil +} + +func findVariation(id string, vs []*feature.Variation) *feature.Variation { + for i := range vs { + if vs[i].Id == id { + return vs[i] + } + } + return nil +} + +func newFixedStrategyRule(variationID string) *feature.Rule { + uuid, _ := uuid.NewUUID() + return &feature.Rule{ + Id: uuid.String(), + Strategy: &feature.Strategy{ + Type: feature.Strategy_FIXED, + FixedStrategy: &feature.FixedStrategy{ + Variation: variationID, + }, + }, + Clauses: []*feature.Clause{ + { + Attribute: "attribute-1", + Operator: feature.Clause_EQUALS, + Values: []string{"value-1", "value-2"}, + }, + { + Attribute: "attribute-2", + Operator: feature.Clause_IN, + Values: []string{"value-1", "value-2"}, + }, + }, + } +} + +func newRolloutStrategyRule(variations []*feature.Variation) *feature.Rule { + uuid, _ := uuid.NewUUID() + return &feature.Rule{ + Id: uuid.String(), + Strategy: &feature.Strategy{ + Type: feature.Strategy_ROLLOUT, + RolloutStrategy: &feature.RolloutStrategy{ + Variations: []*feature.RolloutStrategy_Variation{ + { + Variation: variations[0].Id, + Weight: 70000, + }, + { + Variation: variations[1].Id, + Weight: 12000, + }, + { + Variation: variations[2].Id, + Weight: 10000, + }, + { + Variation: variations[3].Id, + Weight: 8000, + }, + }, + }, + }, + Clauses: []*feature.Clause{ + { + Attribute: "attribute-1", + Operator: feature.Clause_EQUALS, + Values: []string{"value-1", "value-2"}, + }, + { + Attribute: "attribute-2", + Operator: feature.Clause_IN, + Values: []string{"value-1", "value-2"}, + }, + }, + } +} + +func newClause() *feature.Clause { + return &feature.Clause{ + Attribute: "attribute-3", + Operator: feature.Clause_EQUALS, + Values: []string{"value-3-a", "value-3-b"}, + } +} + +func compareClause(t *testing.T, expected *feature.Clause, actual *feature.Clause) { + t.Helper() + if expected.Attribute != actual.Attribute { + t.Fatalf("Attribute does not match. Expected: %s actual %s", expected.Attribute, actual.Attribute) + } + if expected.Operator != actual.Operator { + t.Fatalf("Operator does not match. Expected: %v actual %v", expected.Operator, actual.Operator) + } + if !reflect.DeepEqual(expected.Values, actual.Values) { + t.Fatalf("Values does not match. Expected: %v actual %v", expected.Values, actual.Values) + } +} + +func updateFeatureTargeting(t *testing.T, client featureclient.Client, cmd *any.Any, featureID string) { + t.Helper() + updateReq := &feature.UpdateFeatureTargetingRequest{ + Id: featureID, + Commands: []*feature.Command{ + {Command: cmd}, + }, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.UpdateFeatureTargeting(ctx, updateReq); err != nil { + t.Fatal(err) + } +} + +func compareFeatures(t *testing.T, expected []*feature.Feature, actual []*feature.Feature) { + t.Helper() + if len(actual) != len(expected) { + t.Fatalf("Different sizes. Expected: %d, actual: %d", len(expected), len(actual)) + } + for i := 0; i < len(expected); i++ { + if !proto.Equal(actual[i], expected[i]) { + t.Fatalf("Features do not match. Expected: %v, actual: %v", expected[i], actual[i]) + } + } +} + +func evaluateFeatures(t *testing.T, client featureclient.Client, userID, tag string) *feature.EvaluateFeaturesResponse { + t.Helper() + req := &feature.EvaluateFeaturesRequest{ + User: &userproto.User{Id: userID}, + EnvironmentNamespace: *environmentNamespace, + Tag: tag, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + res, err := client.EvaluateFeatures(ctx, req) + if err != nil { + t.Fatal(err) + return nil + } + return res +} + +/* +func newEnvironmentClient(t *testing.T) environmentclient.Client { + t.Helper() + creds, err := rpcclient.NewPerRPCCredentials(*serviceTokenPath) + if err != nil { + t.Fatal("Failed to create RPC credentials:", err) + } + client, err := environmentclient.NewClient( + fmt.Sprintf("%s:%d", *webGatewayAddr, *webGatewayPort), + *webGatewayCert, + rpcclient.WithPerRPCCredentials(creds), + rpcclient.WithDialTimeout(30*time.Second), + rpcclient.WithBlock(), + ) + if err != nil { + t.Fatal("Failed to create environment client:", err) + } + return client +} + +func newEnvironmentCommand(id string) *environment.CreateEnvironmentCommand { + namespace := strings.Replace(id, "-", "", -1) + return &environment.CreateEnvironmentCommand{ + Namespace: namespace, + Name: "e2e-test-environment-namespace", + Description: "e2e-test-environment-namespace-description", + Id: id, + ProjectId: defaultProjectID, + } +} + +func createEnvironment(t *testing.T, client environmentclient.Client, cmd *environment.CreateEnvironmentCommand) { + t.Helper() + createReq := &environment.CreateEnvironmentRequest{ + Command: cmd, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.CreateEnvironment(ctx, createReq); err != nil { + t.Fatal(err) + } +} +*/ diff --git a/test/e2e/feature/segment_test.go b/test/e2e/feature/segment_test.go new file mode 100644 index 000000000..63d10cd20 --- /dev/null +++ b/test/e2e/feature/segment_test.go @@ -0,0 +1,219 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package feature + +import ( + "context" + "fmt" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "github.com/stretchr/testify/assert" + + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +const ( + prefixSegment = "e2e-test" +) + +func TestCreateSegment(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + client := newFeatureClient(t) + cmd := &featureproto.CreateSegmentCommand{ + Name: newSegmentName(t), + Description: fmt.Sprintf("%s-description", prefixSegment), + } + req := &featureproto.CreateSegmentRequest{ + Command: cmd, + EnvironmentNamespace: *environmentNamespace, + } + res, err := client.CreateSegment(ctx, req) + assert.NoError(t, err) + assert.NotEmpty(t, res.Segment.Id) + assert.Equal(t, cmd.Name, res.Segment.Name) + assert.Equal(t, cmd.Description, res.Segment.Description) + assert.Zero(t, res.Segment.Rules) + assert.NotZero(t, res.Segment.CreatedAt) + assert.Zero(t, res.Segment.UpdatedAt) + assert.Equal(t, int64(1), res.Segment.Version) + assert.Equal(t, false, res.Segment.Deleted) +} + +func TestGetSegment(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + client := newFeatureClient(t) + expected := createSegment(ctx, t, client) + actual := getSegment(ctx, t, client, expected.Id) + if !proto.Equal(expected, actual) { + t.Fatalf("Different segments. Expected: %v, actual: %v", expected, actual) + } +} + +func TestChangeSegmentName(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + client := newFeatureClient(t) + id := createSegment(ctx, t, client).Id + cmd := &featureproto.ChangeSegmentNameCommand{ + Name: fmt.Sprintf("%s-change-name", prefixSegment), + } + cmdChange, err := ptypes.MarshalAny(cmd) + assert.NoError(t, err) + res, err := client.UpdateSegment( + ctx, + &featureproto.UpdateSegmentRequest{ + Id: id, + Commands: []*featureproto.Command{ + {Command: cmdChange}, + }, + EnvironmentNamespace: *environmentNamespace, + }, + ) + assert.NotNil(t, res) + assert.NoError(t, err) + segment := getSegment(ctx, t, client, id) + assert.Equal(t, cmd.Name, segment.Name) +} + +func TestChangeSegmentDescription(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + client := newFeatureClient(t) + id := createSegment(ctx, t, client).Id + cmd := &featureproto.ChangeSegmentDescriptionCommand{ + Description: fmt.Sprintf("%s-change-description", prefixSegment), + } + cmdChange, err := ptypes.MarshalAny(cmd) + assert.NoError(t, err) + res, err := client.UpdateSegment( + ctx, + &featureproto.UpdateSegmentRequest{ + Id: id, + Commands: []*featureproto.Command{ + {Command: cmdChange}, + }, + EnvironmentNamespace: *environmentNamespace, + }, + ) + assert.NotNil(t, res) + assert.NoError(t, err) + segment := getSegment(ctx, t, client, id) + assert.Equal(t, cmd.Description, segment.Description) +} + +func TestDeleteSegment(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + client := newFeatureClient(t) + id := createSegment(ctx, t, client).Id + res, err := client.DeleteSegment( + ctx, + &featureproto.DeleteSegmentRequest{ + Id: id, + Command: &featureproto.DeleteSegmentCommand{}, + EnvironmentNamespace: *environmentNamespace, + }, + ) + assert.NotNil(t, res) + assert.NoError(t, err) + segment := getSegment(ctx, t, client, id) + assert.Equal(t, true, segment.Deleted) +} + +func TestListSegmentsPageSize(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + client := newFeatureClient(t) + for i := 0; i < 2; i++ { + createSegment(ctx, t, client) + } + pageSize := int64(1) + res, err := client.ListSegments(ctx, &featureproto.ListSegmentsRequest{ + PageSize: pageSize, + EnvironmentNamespace: *environmentNamespace, + }) + assert.NoError(t, err) + assert.Equal(t, pageSize, int64(len(res.Segments))) +} + +func TestListSegmentsCursor(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + client := newFeatureClient(t) + for i := 0; i < 4; i++ { + createSegment(ctx, t, client) + } + pageSize := int64(2) + res, err := client.ListSegments(ctx, &featureproto.ListSegmentsRequest{ + PageSize: pageSize, + EnvironmentNamespace: *environmentNamespace, + }) + assert.NoError(t, err) + assert.NotEmpty(t, res.Cursor) + resCursor, err := client.ListSegments( + ctx, + &featureproto.ListSegmentsRequest{ + PageSize: pageSize, + Cursor: res.Cursor, + EnvironmentNamespace: *environmentNamespace, + }, + ) + assert.NoError(t, err) + segmentsSize := len(res.Segments) + assert.Equal(t, segmentsSize, len(resCursor.Segments)) + for i := 0; i < segmentsSize; i++ { + if proto.Equal(res.Segments[i], resCursor.Segments[i]) { + t.Fatalf("Equal segments. Expected: %v, actual: %v", res.Segments, resCursor.Segments) + } + } +} + +func getSegment(ctx context.Context, t *testing.T, client featureclient.Client, id string) *featureproto.Segment { + t.Helper() + req := &featureproto.GetSegmentRequest{ + Id: id, + EnvironmentNamespace: *environmentNamespace, + } + res, err := client.GetSegment(ctx, req) + assert.NoError(t, err) + return res.Segment +} + +func createSegment(ctx context.Context, t *testing.T, client featureclient.Client) *featureproto.Segment { + t.Helper() + cmd := &featureproto.CreateSegmentCommand{ + Name: newSegmentName(t), + Description: fmt.Sprintf("%s-%s", "description", prefixSegment), + } + req := &featureproto.CreateSegmentRequest{ + Command: cmd, + EnvironmentNamespace: *environmentNamespace, + } + res, err := client.CreateSegment(ctx, req) + assert.NoError(t, err) + return res.Segment +} + +func newSegmentName(t *testing.T) string { + if *testID != "" { + return fmt.Sprintf("%s-%s-name-%s", prefixSegment, *testID, newUUID(t)) + } + return fmt.Sprintf("%s-name-%s", prefixSegment, newUUID(t)) +} diff --git a/test/e2e/feature/segment_user_test.go b/test/e2e/feature/segment_user_test.go new file mode 100644 index 000000000..234a89ccb --- /dev/null +++ b/test/e2e/feature/segment_user_test.go @@ -0,0 +1,296 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package feature + +import ( + "context" + "fmt" + "sort" + "testing" + "time" + + "github.com/golang/protobuf/proto" + wrappersproto "github.com/golang/protobuf/ptypes/wrappers" + "github.com/stretchr/testify/assert" + + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +const ( + segmentUserRetryTimes = 20 +) + +func TestAddSegmentUserCommand(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + client := newFeatureClient(t) + segmentID := createSegment(ctx, t, client).Id + userID := newUserID(t) + testcases := []struct { + userID string + state featureproto.SegmentUser_State + }{ + { + userID: userID, + state: featureproto.SegmentUser_INCLUDED, + }, + } + for _, tc := range testcases { + addSegmentUser(ctx, t, client, segmentID, []string{tc.userID}, tc.state) + user := getSegmentUser(ctx, t, client, segmentID, tc.userID, tc.state) + id := domain.SegmentUserID(segmentID, tc.userID, tc.state) + assert.Equal(t, id, user.Id) + assert.Equal(t, segmentID, user.SegmentId) + assert.Equal(t, tc.userID, user.UserId) + assert.Equal(t, tc.state, user.State) + assert.Equal(t, false, user.Deleted) + } +} + +func TestDeleteSegmentUserCommand(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + client := newFeatureClient(t) + segmentID := createSegment(ctx, t, client).Id + userID := newUserID(t) + testcases := []struct { + userID string + state featureproto.SegmentUser_State + }{ + { + userID: userID, + state: featureproto.SegmentUser_INCLUDED, + }, + } + for _, tc := range testcases { + addSegmentUser(ctx, t, client, segmentID, []string{tc.userID}, tc.state) + deleteSegmentUser(ctx, t, client, segmentID, []string{tc.userID}, tc.state) + listRes := listSegmentUsers( + ctx, + t, + client, + segmentID, + &wrappersproto.Int32Value{Value: int32(tc.state)}, + ) + assert.Empty(t, len(listRes.Users)) + } +} + +func TestListSegmentUsersPageSize(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + client := newFeatureClient(t) + segmentID := createSegment(ctx, t, client).Id + userIDs := []string{newUserID(t), newUserID(t)} + addSegmentUser(ctx, t, client, segmentID, userIDs, featureproto.SegmentUser_INCLUDED) + pageSize := int64(1) + res, err := client.ListSegmentUsers(ctx, &featureproto.ListSegmentUsersRequest{ + PageSize: pageSize, + SegmentId: segmentID, + State: &wrappersproto.Int32Value{Value: int32(featureproto.SegmentUser_INCLUDED)}, + EnvironmentNamespace: *environmentNamespace, + }) + assert.NoError(t, err) + assert.Equal(t, pageSize, int64(len(res.Users))) +} + +func TestListSegmentUsersCursor(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + client := newFeatureClient(t) + segmentID := createSegment(ctx, t, client).Id + userIDs := []string{newUserID(t), newUserID(t), newUserID(t), newUserID(t)} + addSegmentUser(ctx, t, client, segmentID, userIDs, featureproto.SegmentUser_INCLUDED) + var lastUsers []*featureproto.SegmentUser + pageSize := int64(2) + state := &wrappersproto.Int32Value{Value: int32(featureproto.SegmentUser_INCLUDED)} + cursor := "" + for i := 0; i < 3; i++ { + res, err := client.ListSegmentUsers(ctx, &featureproto.ListSegmentUsersRequest{ + PageSize: pageSize, + Cursor: cursor, + SegmentId: segmentID, + State: state, + EnvironmentNamespace: *environmentNamespace, + }) + assert.NoError(t, err) + assert.NotEmpty(t, res.Cursor) + cursor = res.Cursor + switch i { + case 0: + assert.Equal(t, int(pageSize), len(res.Users)) + copySegmentUsers(lastUsers, res.Users) + break + case 1: + assert.Equal(t, int(pageSize), len(res.Users)) + if containsSegmentUser(lastUsers, res.Users) { + t.Fatalf("Segment user from the last response was found in the actual response. Last response: %v, actual response: %v", lastUsers, res.Users) + } + break + case 2: + assert.Zero(t, len(res.Users)) + break + } + } +} + +func TestListSegmentUsersWithoutState(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + client := newFeatureClient(t) + segmentID := createSegment(ctx, t, client).Id + userIDs := []string{newUserID(t)} + addSegmentUser(ctx, t, client, segmentID, userIDs, featureproto.SegmentUser_INCLUDED) + res := listSegmentUsers(ctx, t, client, segmentID, nil) + assert.Equal(t, 1, len(res.Users)) + assert.Equal(t, segmentID, res.Users[0].SegmentId) + assert.Equal(t, userIDs[0], res.Users[0].UserId) + assert.Equal(t, featureproto.SegmentUser_INCLUDED, res.Users[0].State) +} + +func TestBulkUploadAndDownloadSegmentUsers(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + client := newFeatureClient(t) + segmentID := createSegment(ctx, t, client).Id + uids := []string{newUserID(t), newUserID(t), newUserID(t)} + sort.Strings(uids) + userIDs := []byte(fmt.Sprintf("%s\n%s\n%s\n", uids[0], uids[1], uids[2])) + uploadRes, err := client.BulkUploadSegmentUsers(ctx, &featureproto.BulkUploadSegmentUsersRequest{ + EnvironmentNamespace: *environmentNamespace, + SegmentId: segmentID, + Command: &featureproto.BulkUploadSegmentUsersCommand{ + Data: userIDs, + State: featureproto.SegmentUser_INCLUDED, + }, + }) + assert.NoError(t, err) + assert.NotNil(t, uploadRes) + for i := 0; i < segmentUserRetryTimes; i++ { + downloadRes, err := bulkDownloadSegmentUsers(t, client, segmentID) + if err == nil { + assert.Equal(t, string(userIDs), string(downloadRes.Data)) + break + } + if i == segmentUserRetryTimes-1 { + t.Fatalf("SegmentUsers cannot be downloaded.") + } + time.Sleep(time.Second) + } +} + +func addSegmentUser(ctx context.Context, t *testing.T, client featureclient.Client, segmentID string, userIDs []string, state featureproto.SegmentUser_State) { + t.Helper() + req := &featureproto.AddSegmentUserRequest{ + Id: segmentID, + Command: &featureproto.AddSegmentUserCommand{ + UserIds: userIDs, + State: state, + }, + EnvironmentNamespace: *environmentNamespace, + } + res, err := client.AddSegmentUser(ctx, req) + assert.NotNil(t, res) + assert.NoError(t, err) +} + +func deleteSegmentUser(ctx context.Context, t *testing.T, client featureclient.Client, segmentID string, userIDs []string, state featureproto.SegmentUser_State) { + req := &featureproto.DeleteSegmentUserRequest{ + Id: segmentID, + Command: &featureproto.DeleteSegmentUserCommand{ + UserIds: userIDs, + State: state, + }, + EnvironmentNamespace: *environmentNamespace, + } + res, err := client.DeleteSegmentUser(ctx, req) + assert.NotNil(t, res) + assert.NoError(t, err) +} + +func getSegmentUser(ctx context.Context, t *testing.T, client featureclient.Client, segmentID string, userID string, state featureproto.SegmentUser_State) *featureproto.SegmentUser { + t.Helper() + req := &featureproto.GetSegmentUserRequest{ + SegmentId: segmentID, + UserId: userID, + State: state, + EnvironmentNamespace: *environmentNamespace, + } + res, err := client.GetSegmentUser(ctx, req) + assert.NoError(t, err) + return res.User +} + +func listSegmentUsers(ctx context.Context, t *testing.T, client featureclient.Client, segmentID string, state *wrappersproto.Int32Value) *featureproto.ListSegmentUsersResponse { + t.Helper() + req := &featureproto.ListSegmentUsersRequest{ + SegmentId: segmentID, + State: state, + EnvironmentNamespace: *environmentNamespace, + } + res, err := client.ListSegmentUsers(ctx, req) + assert.NoError(t, err) + return res +} + +func copySegmentUsers(dst []*featureproto.SegmentUser, src []*featureproto.SegmentUser) { + dst = make([]*featureproto.SegmentUser, 0, len(src)) + for _, s := range src { + dst = append(dst, &featureproto.SegmentUser{ + Id: s.Id, + SegmentId: s.SegmentId, + UserId: s.UserId, + State: s.State, + Deleted: s.Deleted, + }) + } +} + +func containsSegmentUser(lastUsers []*featureproto.SegmentUser, actualUsers []*featureproto.SegmentUser) bool { + for _, user := range lastUsers { + for _, u := range actualUsers { + if proto.Equal(user, u) { + return true + } + } + } + return false +} + +func bulkDownloadSegmentUsers(t *testing.T, client featureclient.Client, segmentID string) (*featureproto.BulkDownloadSegmentUsersResponse, error) { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + return client.BulkDownloadSegmentUsers(ctx, &featureproto.BulkDownloadSegmentUsersRequest{ + EnvironmentNamespace: *environmentNamespace, + SegmentId: segmentID, + State: featureproto.SegmentUser_INCLUDED, + }) +} + +func newUserID(t *testing.T) string { + if *testID != "" { + return fmt.Sprintf("%s-%s-user-id-%s", prefixID, *testID, newUUID(t)) + } + return fmt.Sprintf("%s-user-id-%s", prefixID, newUUID(t)) +} diff --git a/test/e2e/feature/tag_test.go b/test/e2e/feature/tag_test.go new file mode 100644 index 000000000..50ac9c9ec --- /dev/null +++ b/test/e2e/feature/tag_test.go @@ -0,0 +1,111 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package feature + +import ( + "context" + "testing" + "time" + + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + "github.com/bucketeer-io/bucketeer/proto/feature" +) + +func TestCreateAndListTag(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + client := newFeatureClient(t) + cmd := newCreateFeatureCommand(newFeatureID(t)) + createFeature(t, client, cmd) + actual := listTags(ctx, t, client) + tags := findTags(actual, cmd.Tags) + if len(tags) != len(cmd.Tags) { + t.Fatalf("Different sizes. Expected: %d, Actual: %d", len(cmd.Tags), len(tags)) + } +} + +func TestUpdateTag(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + client := newFeatureClient(t) + featureID := newFeatureID(t) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + actual := listTags(ctx, t, client) + tags := findTags(actual, cmd.Tags) + if len(tags) != len(cmd.Tags) { + t.Fatalf("Different sizes. Expected: %d, Actual: %d", len(cmd.Tags), len(tags)) + } + + newTag := "tag-1" + addTag(t, newTag, featureID, client) + expected := append(cmd.Tags, newTag) + time.Sleep(time.Second * 3) + actual = listTags(ctx, t, client) + tags = findTags(actual, expected) + if len(tags) != len(expected) { + t.Fatalf("Different sizes. Expected: %d, Actual: %d", len(expected), len(tags)) + } +} + +func findTags(tags []*feature.Tag, targetIDs []string) []*feature.Tag { + var result []*feature.Tag + for _, tag := range tags { + if exist := existTag(targetIDs, tag.Id); !exist { + continue + } + result = append(result, tag) + } + return result +} + +func existTag(tags []string, target string) bool { + for _, tag := range tags { + if tag == target { + return true + } + } + return false +} + +func listTags(ctx context.Context, t *testing.T, client featureclient.Client) []*feature.Tag { + t.Helper() + resp, err := client.ListTags(ctx, &feature.ListTagsRequest{ + PageSize: int64(500), + EnvironmentNamespace: *environmentNamespace, + }) + if err != nil { + t.Fatal("failed to list tags", err) + } + return resp.Tags +} + +func addTag(t *testing.T, tag string, featureID string, client featureclient.Client) { + t.Helper() + addReq := &feature.UpdateFeatureDetailsRequest{ + Id: featureID, + AddTagCommands: []*feature.AddTagCommand{ + {Tag: tag}, + }, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.UpdateFeatureDetails(ctx, addReq); err != nil { + t.Fatal(err) + } +} diff --git a/test/e2e/feature/user_evaluations_test.go b/test/e2e/feature/user_evaluations_test.go new file mode 100644 index 000000000..dee91356a --- /dev/null +++ b/test/e2e/feature/user_evaluations_test.go @@ -0,0 +1,86 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package feature + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/assert" + + "github.com/bucketeer-io/bucketeer/pkg/feature/domain" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" +) + +func TestUpsertAndGetUserEvaluations(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + client := newFeatureClient(t) + timestamp := time.Now().Unix() + userID := fmt.Sprintf("e2e-test-ue-user-%s", newUUID(t)) + reqUpsert := &featureproto.UpsertUserEvaluationRequest{ + EnvironmentNamespace: *environmentNamespace, + Tag: tags[0], + Evaluation: createEvaluation(t, userID, int32(timestamp)), + } + respUpsert, err := client.UpsertUserEvaluation(ctx, reqUpsert) + assert.NoError(t, err) + assert.NotNil(t, respUpsert) + req := &featureproto.GetUserEvaluationsRequest{ + EnvironmentNamespace: *environmentNamespace, + Tag: tags[0], + UserId: userID, + } + resp, err := client.GetUserEvaluations(ctx, req) + assert.NoError(t, err) + assert.True(t, containsEvaluation(t, reqUpsert.Evaluation, resp.Evaluations)) +} + +func createEvaluation(t *testing.T, userID string, featureVersion int32) *featureproto.Evaluation { + t.Helper() + return &featureproto.Evaluation{ + Id: domain.EvaluationID( + "feature-id", + featureVersion, + userID, + ), + FeatureId: "feature-id", + FeatureVersion: featureVersion, + UserId: userID, + VariationId: "variation-id", + VariationValue: "variation-value", + Reason: &featureproto.Reason{ + Type: featureproto.Reason_DEFAULT, + }, + } +} + +func containsEvaluation( + t *testing.T, + evaluation *featureproto.Evaluation, + evaluations []*featureproto.Evaluation, +) bool { + t.Helper() + for _, e := range evaluations { + if proto.Equal(e, evaluation) { + return true + } + } + return false +} diff --git a/test/e2e/gateway/BUILD.bazel b/test/e2e/gateway/BUILD.bazel new file mode 100644 index 000000000..af3593432 --- /dev/null +++ b/test/e2e/gateway/BUILD.bazel @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_test") + +go_test( + name = "go_default_test", + srcs = [ + "api_grpc_test.go", + "api_test.go", + ], + data = glob(["testdata/**"]), + deps = [ + "//pkg/feature/client:go_default_library", + "//pkg/gateway/client:go_default_library", + "//pkg/rpc/client:go_default_library", + "//pkg/uuid:go_default_library", + "//proto/event/client:go_default_library", + "//proto/feature:go_default_library", + "//proto/gateway:go_default_library", + "//proto/user:go_default_library", + "//test/e2e/util:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@com_github_stretchr_testify//assert:go_default_library", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + "@org_golang_google_protobuf//encoding/protojson:go_default_library", + ], +) diff --git a/test/e2e/gateway/api_grpc_test.go b/test/e2e/gateway/api_grpc_test.go new file mode 100644 index 000000000..1128ce9c0 --- /dev/null +++ b/test/e2e/gateway/api_grpc_test.go @@ -0,0 +1,444 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gateway + +import ( + "context" + "flag" + "fmt" + "testing" + "time" + + "github.com/golang/protobuf/ptypes/wrappers" + "github.com/stretchr/testify/assert" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/golang/protobuf/ptypes" + + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + gatewayclient "github.com/bucketeer-io/bucketeer/pkg/gateway/client" + rpcclient "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + eventproto "github.com/bucketeer-io/bucketeer/proto/event/client" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" + gatewayproto "github.com/bucketeer-io/bucketeer/proto/gateway" + userproto "github.com/bucketeer-io/bucketeer/proto/user" +) + +const ( + prefixTestName = "e2e-test" + timeout = 20 * time.Second +) + +var ( + webGatewayAddr = flag.String("web-gateway-addr", "", "Web gateway endpoint address") + webGatewayPort = flag.Int("web-gateway-port", 443, "Web gateway endpoint port") + webGatewayCert = flag.String("web-gateway-cert", "", "Web gateway crt file") + apiKeyPath = flag.String("api-key", "", "Api key path for web gateway") + gatewayAddr = flag.String("gateway-addr", "", "Gateway endpoint address") + gatewayPort = flag.Int("gateway-port", 443, "Gateway endpoint port") + gatewayCert = flag.String("gateway-cert", "", "Gateway crt file") + serviceTokenPath = flag.String("service-token", "", "Service token path") + environmentNamespace = flag.String("environment-namespace", "", "Environment namespace") + testID = flag.String("test-id", "", "test ID") +) + +func TestAPIKey(t *testing.T) { + t.Parallel() + creds, err := gatewayclient.NewPerRPCCredentials("testdata/invalid-apikey") + if err != nil { + t.Fatal("Failed to create RPC credentials:", err) + } + c, err := gatewayclient.NewClient( + fmt.Sprintf("%s:%d", *gatewayAddr, *gatewayPort), + *gatewayCert, + rpcclient.WithPerRPCCredentials(creds), + rpcclient.WithDialTimeout(timeout), + rpcclient.WithBlock(), + ) + if err != nil { + t.Fatal("Failed to create gateway client:", err) + } + defer c.Close() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + req := &gatewayproto.GetEvaluationsRequest{ + Tag: "tag", + User: &userproto.User{Id: "userID"}, + } + response, err := c.GetEvaluations(ctx, req) + assert.Nil(t, response) + st, ok := status.FromError(err) + if !ok { + t.Fatalf("not ok") + } + assert.Equal(t, st.Code(), codes.PermissionDenied) +} + +func TestGrpcGetEvaluationsWithoutCreatingFeature(t *testing.T) { + t.Parallel() + uuid := newUUID(t) + tag := fmt.Sprintf("%s-tag-%s", prefixTestName, uuid) + userID := newUserID(t, uuid) + response := grpcGetEvaluations(t, tag, userID) + if response.State != featureproto.UserEvaluations_FULL { + t.Fatalf("Different states. Expected: %v, actual: %v", featureproto.UserEvaluations_FULL, response.State) + } + if response.Evaluations != nil { + evaluationSize := len(response.Evaluations.Evaluations) + if evaluationSize > 0 { + t.Fatalf("Different sizes. Expected: 0, actual: %v", evaluationSize) + } + } +} + +func TestGrpcGetEvaluationsFeatureFlagEnabled(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + defer client.Close() + uuid := newUUID(t) + tag := fmt.Sprintf("%s-tag-%s", prefixTestName, uuid) + userID := newUserID(t, uuid) + featureID := newFeatureID(t, uuid) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + addTag(t, tag, featureID, client) + enableFeature(t, featureID, client) + time.Sleep(3 * time.Second) + response := grpcGetEvaluations(t, tag, userID) + if response.State != featureproto.UserEvaluations_FULL { + t.Fatalf("Different states. Expected: %v, actual: %v", featureproto.UserEvaluations_FULL, response.State) + } + if response.Evaluations == nil { + t.Fatal("Evaluations field is nil") + } + evaluationSize := len(response.Evaluations.Evaluations) + if evaluationSize != 1 { + t.Fatalf("Wrong evaluation size. Expected 1, actual: %d", evaluationSize) + } + reason := response.Evaluations.Evaluations[0].Reason.Type + if reason != featureproto.Reason_DEFAULT { + t.Fatalf("Reason doesn't match. Expected: %v, actual: %v", featureproto.Reason_DEFAULT, reason) + } +} + +func TestGrpcGetEvaluationsFeatureFlagDisabled(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + defer client.Close() + uuid := newUUID(t) + tag := fmt.Sprintf("%s-tag-%s", prefixTestName, uuid) + userID := newUserID(t, uuid) + featureID := newFeatureID(t, uuid) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + addTag(t, tag, featureID, client) + time.Sleep(3 * time.Second) + response := grpcGetEvaluations(t, tag, userID) + if response.State != featureproto.UserEvaluations_FULL { + t.Fatalf("Different states. Expected: %v, actual: %v", featureproto.UserEvaluations_FULL, response.State) + } + if response.Evaluations == nil { + t.Fatal("Evaluations field is nil") + } + evaluationSize := len(response.Evaluations.Evaluations) + if evaluationSize != 1 { + t.Fatalf("Wrong evaluation size. Expected 1, actual: %d", evaluationSize) + } + reason := response.Evaluations.Evaluations[0].Reason.Type + if reason != featureproto.Reason_OFF_VARIATION { + t.Fatalf("Reason doesn't match. Expected: %v, actual: %v", featureproto.Reason_OFF_VARIATION, reason) + } +} + +func TestGrpcGetEvaluationsFullState(t *testing.T) { + t.Parallel() + c := newGatewayClient(t) + defer c.Close() + uuid := newUUID(t) + tag := fmt.Sprintf("%s-tag-%s", prefixTestName, uuid) + userID := newUserID(t, uuid) + featureID := newFeatureID(t, uuid) + createFeatureWithTag(t, tag, featureID) + featureID2 := fmt.Sprintf("%s-feature-id-%s", prefixTestName, newUUID(t)) + createFeatureWithTag(t, tag, featureID2) + time.Sleep(3 * time.Second) + response := grpcGetEvaluations(t, tag, userID) + if response.State != featureproto.UserEvaluations_FULL { + t.Fatalf("Different states. Expected: %v, actual: %v", featureproto.UserEvaluations_FULL, response.State) + } + if response.Evaluations == nil { + t.Fatal("Evaluations field is nil") + } + evaluationSize := len(response.Evaluations.Evaluations) + if evaluationSize != 2 { + t.Fatalf("Wrong evaluation size. Expected 2, actual: %d", evaluationSize) + } +} + +func TestGrpcGetEvaluation(t *testing.T) { + t.Parallel() + c := newGatewayClient(t) + defer c.Close() + uuid := newUUID(t) + tag := fmt.Sprintf("%s-tag-%s", prefixTestName, uuid) + userID := newUserID(t, uuid) + featureID := newFeatureID(t, uuid) + createFeatureWithTag(t, tag, featureID) + featureID2 := fmt.Sprintf("%s-feature-id-%s", prefixTestName, newUUID(t)) + createFeatureWithTag(t, tag, featureID2) + time.Sleep(3 * time.Second) + response := grpcGetEvaluation(t, tag, featureID2, userID) + if response.Evaluation == nil { + t.Fatal("Evaluation field is nil") + } + targetFeatureID := response.Evaluation.FeatureId + if targetFeatureID != featureID2 { + t.Fatalf("Wrong feature id. Expected: %s, actual: %s", featureID2, targetFeatureID) + } +} + +func TestGrpcRegisterEvents(t *testing.T) { + t.Parallel() + c := newGatewayClient(t) + defer c.Close() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + evaluation, err := ptypes.MarshalAny(&eventproto.EvaluationEvent{ + Timestamp: time.Now().Unix(), + FeatureId: "feature-id", + FeatureVersion: 1, + UserId: "user-id", + VariationId: "variation-id", + User: &userproto.User{ + Id: "user-id", + }, + Reason: &featureproto.Reason{}, + Tag: "tag", + }) + if err != nil { + t.Fatal(err) + } + goal, err := ptypes.MarshalAny(&eventproto.GoalEvent{ + Timestamp: time.Now().Unix(), + GoalId: "goal-id", + UserId: "user-id", + Value: 0.3, + User: &userproto.User{ + Id: "user-id", + }, + Tag: "tag", + }) + if err != nil { + t.Fatal(err) + } + req := &gatewayproto.RegisterEventsRequest{ + Events: []*eventproto.Event{ + { + Id: newUUID(t), + Event: evaluation, + EnvironmentNamespace: "", + }, + { + Id: newUUID(t), + Event: goal, + EnvironmentNamespace: "", + }, + }, + } + response, err := c.RegisterEvents(ctx, req) + if err != nil { + t.Fatal(err) + } + if len(response.Errors) > 0 { + t.Fatalf("Failed to register events. Error: %v", response.Errors) + } +} + +func newGatewayClient(t *testing.T) gatewayclient.Client { + t.Helper() + creds, err := gatewayclient.NewPerRPCCredentials(*apiKeyPath) + if err != nil { + t.Fatal("Failed to create RPC credentials:", err) + } + client, err := gatewayclient.NewClient( + fmt.Sprintf("%s:%d", *gatewayAddr, *gatewayPort), + *gatewayCert, + rpcclient.WithPerRPCCredentials(creds), + rpcclient.WithDialTimeout(30*time.Second), + rpcclient.WithBlock(), + ) + if err != nil { + t.Fatal("Failed to create gateway client:", err) + } + return client +} + +func newUUID(t *testing.T) string { + t.Helper() + id, err := uuid.NewUUID() + if err != nil { + t.Fatal(err) + } + return id.String() +} + +func createFeatureWithTag(t *testing.T, tag, featureID string) { + client := newFeatureClient(t) + defer client.Close() + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + addTag(t, tag, cmd.Id, client) + enableFeature(t, featureID, client) +} + +func newFeatureClient(t *testing.T) featureclient.Client { + t.Helper() + creds, err := rpcclient.NewPerRPCCredentials(*serviceTokenPath) + if err != nil { + t.Fatal("Failed to create RPC credentials:", err) + } + featureClient, err := featureclient.NewClient( + fmt.Sprintf("%s:%d", *webGatewayAddr, *webGatewayPort), + *webGatewayCert, + rpcclient.WithPerRPCCredentials(creds), + rpcclient.WithDialTimeout(30*time.Second), + rpcclient.WithBlock(), + ) + if err != nil { + t.Fatal("Failed to create feature client:", err) + } + return featureClient +} + +func newCreateFeatureCommand(featureID string) *featureproto.CreateFeatureCommand { + return &featureproto.CreateFeatureCommand{ + Id: featureID, + Name: featureID, + Description: "e2e-test-gateway-feature-description", + Variations: []*featureproto.Variation{ + { + Value: "A", + Name: "Variation A", + Description: "Thing does A", + }, + { + Value: "B", + Name: "Variation B", + Description: "Thing does B", + }, + }, + Tags: []string{ + "e2e-test-tag-1", + "e2e-test-tag-2", + "e2e-test-tag-3", + }, + DefaultOnVariationIndex: &wrappers.Int32Value{Value: int32(0)}, + DefaultOffVariationIndex: &wrappers.Int32Value{Value: int32(1)}, + } +} + +func createFeature(t *testing.T, client featureclient.Client, cmd *featureproto.CreateFeatureCommand) { + t.Helper() + createReq := &featureproto.CreateFeatureRequest{ + Command: cmd, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.CreateFeature(ctx, createReq); err != nil { + t.Fatal(err) + } +} + +func addTag(t *testing.T, tag string, featureID string, client featureclient.Client) { + t.Helper() + addReq := &featureproto.UpdateFeatureDetailsRequest{ + Id: featureID, + AddTagCommands: []*featureproto.AddTagCommand{ + {Tag: tag}, + }, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.UpdateFeatureDetails(ctx, addReq); err != nil { + t.Fatal(err) + } +} + +func enableFeature(t *testing.T, featureID string, client featureclient.Client) { + t.Helper() + enableReq := &featureproto.EnableFeatureRequest{ + Id: featureID, + Command: &featureproto.EnableFeatureCommand{}, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.EnableFeature(ctx, enableReq); err != nil { + t.Fatalf("Failed to enable feature id: %s. Error: %v", featureID, err) + } +} + +func grpcGetEvaluations(t *testing.T, tag, userID string) *gatewayproto.GetEvaluationsResponse { + t.Helper() + c := newGatewayClient(t) + defer c.Close() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + req := &gatewayproto.GetEvaluationsRequest{ + Tag: tag, + User: &userproto.User{Id: userID}, + } + response, err := c.GetEvaluations(ctx, req) + if err != nil { + t.Fatal(err) + } + return response +} + +func grpcGetEvaluation(t *testing.T, tag, featureID, userID string) *gatewayproto.GetEvaluationResponse { + t.Helper() + c := newGatewayClient(t) + defer c.Close() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + req := &gatewayproto.GetEvaluationRequest{ + Tag: tag, + User: &userproto.User{Id: userID}, + FeatureId: featureID, + } + response, err := c.GetEvaluation(ctx, req) + if err != nil { + t.Fatal(err) + } + return response +} + +func newUserID(t *testing.T, uuid string) string { + if *testID != "" { + return fmt.Sprintf("%s-%s-user-%s", prefixTestName, *testID, uuid) + } + return fmt.Sprintf("%s-user-%s", prefixTestName, uuid) +} + +func newFeatureID(t *testing.T, uuid string) string { + if *testID != "" { + return fmt.Sprintf("%s-%s-feature-id-%s", prefixTestName, *testID, uuid) + } + return fmt.Sprintf("%s-feature-id-%s", prefixTestName, uuid) +} diff --git a/test/e2e/gateway/api_test.go b/test/e2e/gateway/api_test.go new file mode 100644 index 000000000..fb3fb87f5 --- /dev/null +++ b/test/e2e/gateway/api_test.go @@ -0,0 +1,198 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gateway + +import ( + "encoding/json" + "fmt" + "testing" + "time" + + "google.golang.org/protobuf/encoding/protojson" + + eventproto "github.com/bucketeer-io/bucketeer/proto/event/client" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" + userproto "github.com/bucketeer-io/bucketeer/proto/user" + "github.com/bucketeer-io/bucketeer/test/e2e/util" +) + +func TestGetEvaluationsWithoutCreatingFeature(t *testing.T) { + t.Parallel() + uuid := newUUID(t) + tag := fmt.Sprintf("%s-tag-%s", prefixTestName, uuid) + userID := newUserID(t, uuid) + response := util.GetEvaluations(t, tag, userID, *gatewayAddr, *apiKeyPath) + + if response.Evaluations != nil { + evaluationSize := len(response.Evaluations.Evaluations) + if evaluationSize > 0 { + t.Fatalf("Different sizes. Expected: 0, actual: %v", evaluationSize) + } + } +} + +func TestGetEvaluationsFeatureFlagEnabled(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + defer client.Close() + uuid := newUUID(t) + tag := fmt.Sprintf("%s-tag-%s", prefixTestName, uuid) + userID := newUserID(t, uuid) + featureID := newFeatureID(t, uuid) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + addTag(t, tag, featureID, client) + enableFeature(t, featureID, client) + time.Sleep(3 * time.Second) + response := util.GetEvaluations(t, tag, userID, *gatewayAddr, *apiKeyPath) + + if response.Evaluations == nil { + t.Fatal("Evaluations field is nil") + } + evaluationSize := len(response.Evaluations.Evaluations) + if evaluationSize != 1 { + t.Fatalf("Wrong evaluation size. Expected 1, actual: %d", evaluationSize) + } + reason := response.Evaluations.Evaluations[0].Reason.Type + if reason != featureproto.Reason_DEFAULT { + t.Fatalf("Reason doesn't match. Expected: %v, actual: %v", featureproto.Reason_DEFAULT, reason) + } +} + +func TestGetEvaluationsFeatureFlagDisabled(t *testing.T) { + t.Parallel() + client := newFeatureClient(t) + defer client.Close() + uuid := newUUID(t) + tag := fmt.Sprintf("%s-tag-%s", prefixTestName, uuid) + userID := newUserID(t, uuid) + featureID := newFeatureID(t, uuid) + cmd := newCreateFeatureCommand(featureID) + createFeature(t, client, cmd) + addTag(t, tag, featureID, client) + time.Sleep(3 * time.Second) + response := util.GetEvaluations(t, tag, userID, *gatewayAddr, *apiKeyPath) + + if response.Evaluations == nil { + t.Fatal("Evaluations field is nil") + } + evaluationSize := len(response.Evaluations.Evaluations) + if evaluationSize != 1 { + t.Fatalf("Wrong evaluation size. Expected 1, actual: %d", evaluationSize) + } + reason := response.Evaluations.Evaluations[0].Reason.Type + if reason != featureproto.Reason_OFF_VARIATION { + t.Fatalf("Reason doesn't match. Expected: %v, actual: %v", featureproto.Reason_OFF_VARIATION, reason) + } +} + +func TestGetEvaluationsFullState(t *testing.T) { + t.Parallel() + c := newGatewayClient(t) + defer c.Close() + uuid := newUUID(t) + tag := fmt.Sprintf("%s-tag-%s", prefixTestName, uuid) + userID := newUserID(t, uuid) + featureID := newFeatureID(t, uuid) + createFeatureWithTag(t, tag, featureID) + featureID2 := fmt.Sprintf("%s-feature-id-%s", prefixTestName, newUUID(t)) + createFeatureWithTag(t, tag, featureID2) + time.Sleep(3 * time.Second) + response := util.GetEvaluations(t, tag, userID, *gatewayAddr, *apiKeyPath) + + if response.Evaluations == nil { + t.Fatal("Evaluations field is nil") + } + evaluationSize := len(response.Evaluations.Evaluations) + if evaluationSize != 2 { + t.Fatalf("Wrong evaluation size. Expected 2, actual: %d", evaluationSize) + } +} + +func TestGetEvaluation(t *testing.T) { + t.Parallel() + c := newGatewayClient(t) + defer c.Close() + uuid := newUUID(t) + tag := fmt.Sprintf("%s-tag-%s", prefixTestName, uuid) + userID := newUserID(t, uuid) + featureID := newFeatureID(t, uuid) + createFeatureWithTag(t, tag, featureID) + featureID2 := fmt.Sprintf("%s-feature-id-%s", prefixTestName, newUUID(t)) + createFeatureWithTag(t, tag, featureID2) + time.Sleep(3 * time.Second) + response := util.GetEvaluation(t, tag, featureID2, userID, *gatewayAddr, *apiKeyPath) + if response.Evaluation == nil { + t.Fatal("Evaluation field is nil") + } + targetFeatureID := response.Evaluation.FeatureId + if targetFeatureID != featureID2 { + t.Fatalf("Wrong feature id. Expected: %s, actual: %s", featureID2, targetFeatureID) + } +} + +func TestRegisterEvents(t *testing.T) { + t.Parallel() + evaluation, err := protojson.Marshal(&eventproto.EvaluationEvent{ + Timestamp: time.Now().Unix(), + FeatureId: "feature-id", + FeatureVersion: 1, + UserId: "user-id", + VariationId: "variation-id", + User: &userproto.User{ + Id: "user-id", + }, + Reason: &featureproto.Reason{}, + Tag: "tag", + }) + if err != nil { + t.Fatal(err) + } + goal, err := protojson.Marshal(&eventproto.GoalEvent{ + Timestamp: time.Now().Unix(), + GoalId: "goal-id", + UserId: "user-id", + Value: 0.3, + User: &userproto.User{ + Id: "user-id", + }, + Tag: "tag", + }) + if err != nil { + t.Fatal(err) + } + response := util.RegisterEvents( + t, + []util.Event{ + { + ID: newUUID(t), + Event: json.RawMessage(evaluation), + EnvironmentNamespace: "", + Type: util.EvaluationEventType, + }, + { + ID: newUUID(t), + Event: json.RawMessage(goal), + EnvironmentNamespace: "", + Type: util.GoalEventType, + }, + }, + *gatewayAddr, + *apiKeyPath, + ) + if len(response.Errors) > 0 { + t.Fatalf("Failed to register events. Error: %v", response.Errors) + } +} diff --git a/test/e2e/gateway/testdata/invalid-apikey b/test/e2e/gateway/testdata/invalid-apikey new file mode 100644 index 000000000..7f26568c5 --- /dev/null +++ b/test/e2e/gateway/testdata/invalid-apikey @@ -0,0 +1 @@ +e2e-invalid-apikey \ No newline at end of file diff --git a/test/e2e/notification/BUILD.bazel b/test/e2e/notification/BUILD.bazel new file mode 100644 index 000000000..8d09da342 --- /dev/null +++ b/test/e2e/notification/BUILD.bazel @@ -0,0 +1,18 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_test") + +go_test( + name = "go_default_test", + srcs = [ + "admin_subscription_test.go", + "subscription_test.go", + ], + deps = [ + "//pkg/notification/client:go_default_library", + "//pkg/notification/domain:go_default_library", + "//pkg/rpc/client:go_default_library", + "//pkg/uuid:go_default_library", + "//proto/notification:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + ], +) diff --git a/test/e2e/notification/admin_subscription_test.go b/test/e2e/notification/admin_subscription_test.go new file mode 100644 index 000000000..ffb24d38f --- /dev/null +++ b/test/e2e/notification/admin_subscription_test.go @@ -0,0 +1,293 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autoops + +import ( + "context" + "fmt" + "testing" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + notificationclient "github.com/bucketeer-io/bucketeer/pkg/notification/client" + "github.com/bucketeer-io/bucketeer/pkg/notification/domain" + proto "github.com/bucketeer-io/bucketeer/proto/notification" +) + +func TestCreateGetDeleteAdminSubscription(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + notificationClient := newNotificationClient(t) + defer notificationClient.Close() + + name := fmt.Sprintf("%s-name-%s", prefixTestName, newUUID(t)) + sourceTypes := []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ADMIN_ACCOUNT, + } + webhookURL := fmt.Sprintf("%s-webhook-url-%s", prefixTestName, newUUID(t)) + recipient := &proto.Recipient{ + Type: proto.Recipient_SlackChannel, + SlackChannelRecipient: &proto.SlackChannelRecipient{WebhookUrl: webhookURL}, + } + id, err := domain.ID(recipient) + if err != nil { + t.Fatal(err) + } + createAdminSubscription(ctx, t, notificationClient, name, sourceTypes, recipient) + resp, err := notificationClient.GetAdminSubscription(ctx, &proto.GetAdminSubscriptionRequest{ + Id: id, + }) + if err != nil { + t.Fatal(err) + } + subscription := resp.Subscription + if subscription == nil { + t.Fatalf("Subscription not found") + } + if subscription.Name != name { + t.Fatalf("Incorrect name. Expected: %s actual: %s", name, subscription.Name) + } + if len(subscription.SourceTypes) != 1 { + t.Fatalf("The number of notification types is incorrect. Expected: %d actual: %d", 1, len(subscription.SourceTypes)) + } + if subscription.SourceTypes[0] != sourceTypes[0] { + t.Fatalf("Incorrect notification type. Expected: %s actual: %s", sourceTypes[0], subscription.SourceTypes[0]) + } + if subscription.Recipient.Type != proto.Recipient_SlackChannel { + t.Fatalf("Incorrect recipient type. Expected: %s actual: %s", proto.Recipient_SlackChannel, subscription.Recipient.Type) + } + if subscription.Recipient.SlackChannelRecipient.WebhookUrl != webhookURL { + t.Fatalf("Incorrect webhook URL. Expected: %s actual: %s", webhookURL, subscription.Recipient.SlackChannelRecipient.WebhookUrl) + } + if subscription.Disabled != false { + t.Fatalf("Incorrect deleted. Expected: %t actual: %t", false, subscription.Disabled) + } + _, err = notificationClient.DeleteAdminSubscription(ctx, &proto.DeleteAdminSubscriptionRequest{ + Id: id, + Command: &proto.DeleteAdminSubscriptionCommand{}, + }) + if err != nil { + t.Fatal(err) + } + _, err = notificationClient.GetAdminSubscription(ctx, &proto.GetAdminSubscriptionRequest{ + Id: id, + }) + if err != nil { + st, _ := status.FromError(err) + if st.Code() != codes.NotFound { + t.Fatal(err) + } + } +} + +func TestCreateListDeleteAdminSubscription(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + notificationClient := newNotificationClient(t) + defer notificationClient.Close() + + name := fmt.Sprintf("%s-name-%s", prefixTestName, newUUID(t)) + sourceTypes := []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + } + webhookURL := fmt.Sprintf("%s-webhook-url-%s", prefixTestName, newUUID(t)) + recipient := &proto.Recipient{ + Type: proto.Recipient_SlackChannel, + SlackChannelRecipient: &proto.SlackChannelRecipient{WebhookUrl: webhookURL}, + } + id, err := domain.ID(recipient) + if err != nil { + t.Fatal(err) + } + createAdminSubscription(ctx, t, notificationClient, name, sourceTypes, recipient) + subscriptions := listAdminSubscriptions(t, notificationClient, []proto.Subscription_SourceType{proto.Subscription_DOMAIN_EVENT_ACCOUNT}) + var subscription *proto.Subscription + for _, s := range subscriptions { + if s.Id == id { + subscription = s + break + } + } + if subscription == nil { + t.Fatalf("Subscription not found") + } + if subscription.Name != name { + t.Fatalf("Incorrect name. Expected: %s actual: %s", name, subscription.Name) + } + if len(subscription.SourceTypes) != 1 { + t.Fatalf("The number of notification types is incorrect. Expected: %d actual: %d", 1, len(subscription.SourceTypes)) + } + if subscription.SourceTypes[0] != sourceTypes[0] { + t.Fatalf("Incorrect notification type. Expected: %s actual: %s", sourceTypes[0], subscription.SourceTypes[0]) + } + if subscription.Recipient.Type != proto.Recipient_SlackChannel { + t.Fatalf("Incorrect recipient type. Expected: %s actual: %s", proto.Recipient_SlackChannel, subscription.Recipient.Type) + } + if subscription.Recipient.SlackChannelRecipient.WebhookUrl != webhookURL { + t.Fatalf("Incorrect webhook URL. Expected: %s actual: %s", webhookURL, subscription.Recipient.SlackChannelRecipient.WebhookUrl) + } + if subscription.Disabled != false { + t.Fatalf("Incorrect deleted. Expected: %t actual: %t", false, subscription.Disabled) + } + _, err = notificationClient.DeleteAdminSubscription(ctx, &proto.DeleteAdminSubscriptionRequest{ + Id: id, + Command: &proto.DeleteAdminSubscriptionCommand{}, + }) + if err != nil { + t.Fatal(err) + } + _, err = notificationClient.GetAdminSubscription(ctx, &proto.GetAdminSubscriptionRequest{ + Id: id, + }) + if err != nil { + st, _ := status.FromError(err) + if st.Code() != codes.NotFound { + t.Fatal(err) + } + } +} + +func TestUpdateAdminSubscription(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + notificationClient := newNotificationClient(t) + defer notificationClient.Close() + + name := fmt.Sprintf("%s-name-%s", prefixTestName, newUUID(t)) + sourceTypes := []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + } + webhookURL := fmt.Sprintf("%s-webhook-url-%s", prefixTestName, newUUID(t)) + recipient := &proto.Recipient{ + Type: proto.Recipient_SlackChannel, + SlackChannelRecipient: &proto.SlackChannelRecipient{WebhookUrl: webhookURL}, + } + id, err := domain.ID(recipient) + if err != nil { + t.Fatal(err) + } + createAdminSubscription(ctx, t, notificationClient, name, sourceTypes, recipient) + _, err = notificationClient.UpdateAdminSubscription(ctx, &proto.UpdateAdminSubscriptionRequest{ + Id: id, + AddSourceTypesCommand: &proto.AddAdminSubscriptionSourceTypesCommand{ + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ADMIN_ACCOUNT, + }, + }, + DeleteSourceTypesCommand: &proto.DeleteAdminSubscriptionSourceTypesCommand{ + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + }, + }, + }) + if err != nil { + t.Fatal(err) + } + resp, err := notificationClient.GetAdminSubscription(ctx, &proto.GetAdminSubscriptionRequest{ + Id: id, + }) + if err != nil { + t.Fatal(err) + } + subscription := resp.Subscription + if subscription == nil { + t.Fatalf("Subscription not found") + } + if subscription.Name != name { + t.Fatalf("Incorrect name. Expected: %s actual: %s", name, subscription.Name) + } + if len(subscription.SourceTypes) != 1 { + t.Fatalf("The number of notification types is incorrect. Expected: %d actual: %d", 1, len(subscription.SourceTypes)) + } + if subscription.SourceTypes[0] != proto.Subscription_DOMAIN_EVENT_ADMIN_ACCOUNT { + t.Fatalf("Incorrect notification type. Expected: %s actual: %s", sourceTypes[0], subscription.SourceTypes[0]) + } + if subscription.Recipient.Type != proto.Recipient_SlackChannel { + t.Fatalf("Incorrect recipient type. Expected: %s actual: %s", proto.Recipient_SlackChannel, subscription.Recipient.Type) + } + if subscription.Recipient.SlackChannelRecipient.WebhookUrl != webhookURL { + t.Fatalf("Incorrect webhook URL. Expected: %s actual: %s", webhookURL, subscription.Recipient.SlackChannelRecipient.WebhookUrl) + } + if subscription.Disabled != false { + t.Fatalf("Incorrect deleted. Expected: %t actual: %t", false, subscription.Disabled) + } + _, err = notificationClient.DeleteAdminSubscription(ctx, &proto.DeleteAdminSubscriptionRequest{ + Id: id, + Command: &proto.DeleteAdminSubscriptionCommand{}, + }) + if err != nil { + t.Fatal(err) + } + _, err = notificationClient.GetSubscription(ctx, &proto.GetSubscriptionRequest{ + Id: id, + }) + if err != nil { + st, _ := status.FromError(err) + if st.Code() != codes.NotFound { + t.Fatal(err) + } + } +} + +func createAdminSubscription( + ctx context.Context, + t *testing.T, + client notificationclient.Client, + name string, + sourceTypes []proto.Subscription_SourceType, + recipient *proto.Recipient) { + + t.Helper() + cmd := newCreateAdminSubscriptionCommand(name, sourceTypes, recipient) + createReq := &proto.CreateAdminSubscriptionRequest{ + Command: cmd, + } + if _, err := client.CreateAdminSubscription(ctx, createReq); err != nil { + t.Fatal(err) + } +} + +func newCreateAdminSubscriptionCommand( + name string, + sourceTypes []proto.Subscription_SourceType, + recipient *proto.Recipient) *proto.CreateAdminSubscriptionCommand { + + return &proto.CreateAdminSubscriptionCommand{ + Name: name, + SourceTypes: sourceTypes, + Recipient: recipient, + } +} + +func listAdminSubscriptions( + t *testing.T, + client notificationclient.Client, + sourceTypes []proto.Subscription_SourceType) []*proto.Subscription { + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + resp, err := client.ListAdminSubscriptions(ctx, &proto.ListAdminSubscriptionsRequest{ + PageSize: int64(500), + SourceTypes: sourceTypes, + }) + if err != nil { + t.Fatal("failed to list subscriptions", err) + } + return resp.Subscriptions +} diff --git a/test/e2e/notification/subscription_test.go b/test/e2e/notification/subscription_test.go new file mode 100644 index 000000000..4820a3f76 --- /dev/null +++ b/test/e2e/notification/subscription_test.go @@ -0,0 +1,426 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autoops + +import ( + "context" + "flag" + "fmt" + "testing" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + notificationclient "github.com/bucketeer-io/bucketeer/pkg/notification/client" + "github.com/bucketeer-io/bucketeer/pkg/notification/domain" + rpcclient "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + proto "github.com/bucketeer-io/bucketeer/proto/notification" +) + +const ( + prefixTestName = "e2e-test" + timeout = 10 * time.Second +) + +var ( + webGatewayAddr = flag.String("web-gateway-addr", "", "Web gateway endpoint address") + webGatewayPort = flag.Int("web-gateway-port", 443, "Web gateway endpoint port") + webGatewayCert = flag.String("web-gateway-cert", "", "Web gateway crt file") + apiKeyPath = flag.String("api-key", "", "Api key path for web gateway") + gatewayAddr = flag.String("gateway-addr", "", "Gateway endpoint address") + gatewayPort = flag.Int("gateway-port", 443, "Gateway endpoint port") + gatewayCert = flag.String("gateway-cert", "", "Gateway crt file") + serviceTokenPath = flag.String("service-token", "", "Service token path") + environmentNamespace = flag.String("environment-namespace", "", "Environment namespace") + testID = flag.String("test-id", "", "test ID") +) + +func TestCreateGetDeleteSubscription(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + notificationClient := newNotificationClient(t) + defer notificationClient.Close() + + name := fmt.Sprintf("%s-name-%s", prefixTestName, newUUID(t)) + sourceTypes := []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + } + webhookURL := fmt.Sprintf("%s-webhook-url-%s", prefixTestName, newUUID(t)) + recipient := &proto.Recipient{ + Type: proto.Recipient_SlackChannel, + SlackChannelRecipient: &proto.SlackChannelRecipient{WebhookUrl: webhookURL}, + } + id, err := domain.ID(recipient) + if err != nil { + t.Fatal(err) + } + createSubscription(ctx, t, notificationClient, name, sourceTypes, recipient) + resp, err := notificationClient.GetSubscription(ctx, &proto.GetSubscriptionRequest{ + EnvironmentNamespace: *environmentNamespace, + Id: id, + }) + if err != nil { + t.Fatal(err) + } + subscription := resp.Subscription + if subscription == nil { + t.Fatalf("Subscription not found") + } + if subscription.Name != name { + t.Fatalf("Incorrect name. Expected: %s actual: %s", name, subscription.Name) + } + if len(subscription.SourceTypes) != 1 { + t.Fatalf("The number of notification types is incorrect. Expected: %d actual: %d", 1, len(subscription.SourceTypes)) + } + if subscription.SourceTypes[0] != sourceTypes[0] { + t.Fatalf("Incorrect notification type. Expected: %s actual: %s", sourceTypes[0], subscription.SourceTypes[0]) + } + if subscription.Recipient.Type != proto.Recipient_SlackChannel { + t.Fatalf("Incorrect recipient type. Expected: %s actual: %s", proto.Recipient_SlackChannel, subscription.Recipient.Type) + } + if subscription.Recipient.SlackChannelRecipient.WebhookUrl != webhookURL { + t.Fatalf("Incorrect webhook URL. Expected: %s actual: %s", webhookURL, subscription.Recipient.SlackChannelRecipient.WebhookUrl) + } + if subscription.Disabled != false { + t.Fatalf("Incorrect deleted. Expected: %t actual: %t", false, subscription.Disabled) + } + _, err = notificationClient.DeleteSubscription(ctx, &proto.DeleteSubscriptionRequest{ + EnvironmentNamespace: *environmentNamespace, + Id: id, + Command: &proto.DeleteSubscriptionCommand{}, + }) + if err != nil { + t.Fatal(err) + } + _, err = notificationClient.GetSubscription(ctx, &proto.GetSubscriptionRequest{ + EnvironmentNamespace: *environmentNamespace, + Id: id, + }) + if err != nil { + st, _ := status.FromError(err) + if st.Code() != codes.NotFound { + t.Fatal(err) + } + } +} + +func TestCreateListDeleteSubscription(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + notificationClient := newNotificationClient(t) + defer notificationClient.Close() + + name := fmt.Sprintf("%s-name-%s", prefixTestName, newUUID(t)) + sourceTypes := []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + } + webhookURL := fmt.Sprintf("%s-webhook-url-%s", prefixTestName, newUUID(t)) + recipient := &proto.Recipient{ + Type: proto.Recipient_SlackChannel, + SlackChannelRecipient: &proto.SlackChannelRecipient{WebhookUrl: webhookURL}, + } + id, err := domain.ID(recipient) + if err != nil { + t.Fatal(err) + } + createSubscription(ctx, t, notificationClient, name, sourceTypes, recipient) + subscriptions := listSubscriptions(t, notificationClient, []proto.Subscription_SourceType{proto.Subscription_DOMAIN_EVENT_ACCOUNT}) + var subscription *proto.Subscription + for _, s := range subscriptions { + if s.Id == id { + subscription = s + break + } + } + if subscription == nil { + t.Fatalf("Subscription not found") + } + if subscription.Name != name { + t.Fatalf("Incorrect name. Expected: %s actual: %s", name, subscription.Name) + } + if len(subscription.SourceTypes) != 1 { + t.Fatalf("The number of notification types is incorrect. Expected: %d actual: %d", 1, len(subscription.SourceTypes)) + } + if subscription.SourceTypes[0] != sourceTypes[0] { + t.Fatalf("Incorrect notification type. Expected: %s actual: %s", sourceTypes[0], subscription.SourceTypes[0]) + } + if subscription.Recipient.Type != proto.Recipient_SlackChannel { + t.Fatalf("Incorrect recipient type. Expected: %s actual: %s", proto.Recipient_SlackChannel, subscription.Recipient.Type) + } + if subscription.Recipient.SlackChannelRecipient.WebhookUrl != webhookURL { + t.Fatalf("Incorrect webhook URL. Expected: %s actual: %s", webhookURL, subscription.Recipient.SlackChannelRecipient.WebhookUrl) + } + if subscription.Disabled != false { + t.Fatalf("Incorrect deleted. Expected: %t actual: %t", false, subscription.Disabled) + } + _, err = notificationClient.DeleteSubscription(ctx, &proto.DeleteSubscriptionRequest{ + EnvironmentNamespace: *environmentNamespace, + Id: id, + Command: &proto.DeleteSubscriptionCommand{}, + }) + if err != nil { + t.Fatal(err) + } + _, err = notificationClient.GetSubscription(ctx, &proto.GetSubscriptionRequest{ + EnvironmentNamespace: *environmentNamespace, + Id: id, + }) + if err != nil { + st, _ := status.FromError(err) + if st.Code() != codes.NotFound { + t.Fatal(err) + } + } +} + +func TestUpdateSubscription(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + notificationClient := newNotificationClient(t) + defer notificationClient.Close() + + name := fmt.Sprintf("%s-name-%s", prefixTestName, newUUID(t)) + sourceTypes := []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + } + webhookURL := fmt.Sprintf("%s-webhook-url-%s", prefixTestName, newUUID(t)) + recipient := &proto.Recipient{ + Type: proto.Recipient_SlackChannel, + SlackChannelRecipient: &proto.SlackChannelRecipient{WebhookUrl: webhookURL}, + } + id, err := domain.ID(recipient) + if err != nil { + t.Fatal(err) + } + createSubscription(ctx, t, notificationClient, name, sourceTypes, recipient) + _, err = notificationClient.UpdateSubscription(ctx, &proto.UpdateSubscriptionRequest{ + EnvironmentNamespace: *environmentNamespace, + Id: id, + AddSourceTypesCommand: &proto.AddSourceTypesCommand{ + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ADMIN_ACCOUNT, + }, + }, + DeleteSourceTypesCommand: &proto.DeleteSourceTypesCommand{ + SourceTypes: []proto.Subscription_SourceType{ + proto.Subscription_DOMAIN_EVENT_ACCOUNT, + }, + }, + }) + if err != nil { + t.Fatal(err) + } + resp, err := notificationClient.GetSubscription(ctx, &proto.GetSubscriptionRequest{ + EnvironmentNamespace: *environmentNamespace, + Id: id, + }) + if err != nil { + t.Fatal(err) + } + subscription := resp.Subscription + if subscription == nil { + t.Fatalf("Subscription not found") + } + if subscription.Name != name { + t.Fatalf("Incorrect name. Expected: %s actual: %s", name, subscription.Name) + } + if len(subscription.SourceTypes) != 1 { + t.Fatalf("The number of notification types is incorrect. Expected: %d actual: %d", 1, len(subscription.SourceTypes)) + } + if subscription.SourceTypes[0] != proto.Subscription_DOMAIN_EVENT_ADMIN_ACCOUNT { + t.Fatalf("Incorrect notification type. Expected: %s actual: %s", sourceTypes[0], subscription.SourceTypes[0]) + } + if subscription.Recipient.Type != proto.Recipient_SlackChannel { + t.Fatalf("Incorrect recipient type. Expected: %s actual: %s", proto.Recipient_SlackChannel, subscription.Recipient.Type) + } + if subscription.Recipient.SlackChannelRecipient.WebhookUrl != webhookURL { + t.Fatalf("Incorrect webhook URL. Expected: %s actual: %s", webhookURL, subscription.Recipient.SlackChannelRecipient.WebhookUrl) + } + if subscription.Disabled != false { + t.Fatalf("Incorrect deleted. Expected: %t actual: %t", false, subscription.Disabled) + } + _, err = notificationClient.DeleteSubscription(ctx, &proto.DeleteSubscriptionRequest{ + EnvironmentNamespace: *environmentNamespace, + Id: id, + Command: &proto.DeleteSubscriptionCommand{}, + }) + if err != nil { + t.Fatal(err) + } + _, err = notificationClient.GetSubscription(ctx, &proto.GetSubscriptionRequest{ + EnvironmentNamespace: *environmentNamespace, + Id: id, + }) + if err != nil { + st, _ := status.FromError(err) + if st.Code() != codes.NotFound { + t.Fatal(err) + } + } +} + +func TestListEnabledSubscriptions(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + notificationClient := newNotificationClient(t) + defer notificationClient.Close() + + name := fmt.Sprintf("%s-name-%s", prefixTestName, newUUID(t)) + sourceTypes := []proto.Subscription_SourceType{ + proto.Subscription_MAU_COUNT, + } + webhookURL := fmt.Sprintf("%s-webhook-url-%s", prefixTestName, newUUID(t)) + recipient := &proto.Recipient{ + Type: proto.Recipient_SlackChannel, + SlackChannelRecipient: &proto.SlackChannelRecipient{WebhookUrl: webhookURL}, + } + id, err := domain.ID(recipient) + if err != nil { + t.Fatal(err) + } + createSubscription(ctx, t, notificationClient, name, sourceTypes, recipient) + _, err = notificationClient.DisableSubscription(ctx, &proto.DisableSubscriptionRequest{ + EnvironmentNamespace: *environmentNamespace, + Id: id, + Command: &proto.DisableSubscriptionCommand{}, + }) + if err != nil { + t.Fatal(err) + } + subscriptions := listEnabledSubscriptions( + t, + notificationClient, + []proto.Subscription_SourceType{proto.Subscription_MAU_COUNT}, + ) + var contains bool + for _, s := range subscriptions { + if s.Id == id { + contains = true + break + } + } + if contains { + t.Fatal("List enabled subscriptions include disabled subscription") + } + _, err = notificationClient.DeleteSubscription(ctx, &proto.DeleteSubscriptionRequest{ + EnvironmentNamespace: *environmentNamespace, + Id: id, + Command: &proto.DeleteSubscriptionCommand{}, + }) + if err != nil { + t.Fatal(err) + } +} + +func newNotificationClient(t *testing.T) notificationclient.Client { + t.Helper() + creds, err := rpcclient.NewPerRPCCredentials(*serviceTokenPath) + if err != nil { + t.Fatal("Failed to create RPC credentials:", err) + } + client, err := notificationclient.NewClient( + fmt.Sprintf("%s:%d", *webGatewayAddr, *webGatewayPort), + *webGatewayCert, + rpcclient.WithPerRPCCredentials(creds), + rpcclient.WithDialTimeout(30*time.Second), + rpcclient.WithBlock(), + ) + if err != nil { + t.Fatal("Failed to create auto ops client:", err) + } + return client +} + +func newUUID(t *testing.T) string { + t.Helper() + id, err := uuid.NewUUID() + if err != nil { + t.Fatal(err) + } + return id.String() +} + +func createSubscription( + ctx context.Context, + t *testing.T, + client notificationclient.Client, + name string, + sourceTypes []proto.Subscription_SourceType, + recipient *proto.Recipient) { + + t.Helper() + cmd := newCreateSubscriptionCommand(name, sourceTypes, recipient) + createReq := &proto.CreateSubscriptionRequest{ + EnvironmentNamespace: *environmentNamespace, + Command: cmd, + } + if _, err := client.CreateSubscription(ctx, createReq); err != nil { + t.Fatal(err) + } +} + +func newCreateSubscriptionCommand( + name string, + sourceTypes []proto.Subscription_SourceType, + recipient *proto.Recipient) *proto.CreateSubscriptionCommand { + + return &proto.CreateSubscriptionCommand{ + Name: name, + SourceTypes: sourceTypes, + Recipient: recipient, + } +} + +func listSubscriptions( + t *testing.T, + client notificationclient.Client, + sourceTypes []proto.Subscription_SourceType) []*proto.Subscription { + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + resp, err := client.ListSubscriptions(ctx, &proto.ListSubscriptionsRequest{ + EnvironmentNamespace: *environmentNamespace, + PageSize: int64(500), + SourceTypes: sourceTypes, + }) + if err != nil { + t.Fatal("failed to list subscriptions", err) + } + return resp.Subscriptions +} + +func listEnabledSubscriptions( + t *testing.T, + client notificationclient.Client, + sourceTypes []proto.Subscription_SourceType) []*proto.Subscription { + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + resp, err := client.ListEnabledSubscriptions(ctx, &proto.ListEnabledSubscriptionsRequest{ + EnvironmentNamespace: *environmentNamespace, + PageSize: int64(500), + SourceTypes: sourceTypes, + }) + if err != nil { + t.Fatal("failed to list enabled subscriptions", err) + } + return resp.Subscriptions +} diff --git a/test/e2e/push/BUILD.bazel b/test/e2e/push/BUILD.bazel new file mode 100644 index 000000000..317846549 --- /dev/null +++ b/test/e2e/push/BUILD.bazel @@ -0,0 +1,15 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_test") + +go_test( + name = "go_default_test", + srcs = ["push_test.go"], + deps = [ + "//pkg/feature/client:go_default_library", + "//pkg/push/client:go_default_library", + "//pkg/rpc/client:go_default_library", + "//pkg/uuid:go_default_library", + "//proto/feature:go_default_library", + "//proto/push:go_default_library", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + ], +) diff --git a/test/e2e/push/push_test.go b/test/e2e/push/push_test.go new file mode 100644 index 000000000..7dd604594 --- /dev/null +++ b/test/e2e/push/push_test.go @@ -0,0 +1,233 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autoops + +import ( + "context" + "flag" + "fmt" + "testing" + "time" + + "github.com/golang/protobuf/ptypes/wrappers" + + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + pushclient "github.com/bucketeer-io/bucketeer/pkg/push/client" + rpcclient "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" + pushproto "github.com/bucketeer-io/bucketeer/proto/push" +) + +const ( + prefixTestName = "e2e-test" + timeout = 10 * time.Second +) + +var ( + webGatewayAddr = flag.String("web-gateway-addr", "", "Web gateway endpoint address") + webGatewayPort = flag.Int("web-gateway-port", 443, "Web gateway endpoint port") + webGatewayCert = flag.String("web-gateway-cert", "", "Web gateway crt file") + apiKeyPath = flag.String("api-key", "", "Api key path for web gateway") + gatewayAddr = flag.String("gateway-addr", "", "Gateway endpoint address") + gatewayPort = flag.Int("gateway-port", 443, "Gateway endpoint port") + gatewayCert = flag.String("gateway-cert", "", "Gateway crt file") + serviceTokenPath = flag.String("service-token", "", "Service token path") + environmentNamespace = flag.String("environment-namespace", "", "Environment namespace") + testID = flag.String("test-id", "", "test ID") +) + +func TestCreateAndListPush(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + featureClient := newFeatureClient(t) + defer featureClient.Close() + pushClient := newPushClient(t) + defer pushClient.Close() + + featureID := newFeatureID(t) + tag := fmt.Sprintf("%s-tag-%s", prefixTestName, newUUID(t)) + fcmAPIKey := fmt.Sprintf("%s-fcm-api-key-%s", prefixTestName, newUUID(t)) + createFeature(ctx, t, featureClient, featureID, tag) + createPush(ctx, t, pushClient, fcmAPIKey, tag) + pushes := listPushes(t, pushClient) + var push *pushproto.Push + for _, p := range pushes { + if p.FcmApiKey == fcmAPIKey { + push = p + break + } + } + if push == nil { + t.Fatalf("Push not found") + } + if push.FcmApiKey != fcmAPIKey { + t.Fatalf("Incorrect FcmApiKey. Expected: %s actual: %s", fcmAPIKey, push.FcmApiKey) + } + if len(push.Tags) != 1 { + t.Fatalf("The number of tags is incorrect. Expected: %d actual: %d", 1, len(push.Tags)) + } + if push.Tags[0] != tag { + t.Fatalf("Incorrect tag. Expected: %s actual: %s", tag, push.Tags[0]) + } + if push.Deleted != false { + t.Fatalf("Incorrect deleted. Expected: %t actual: %t", false, push.Deleted) + } +} + +func newFeatureClient(t *testing.T) featureclient.Client { + t.Helper() + creds, err := rpcclient.NewPerRPCCredentials(*serviceTokenPath) + if err != nil { + t.Fatal("Failed to create RPC credentials:", err) + } + featureClient, err := featureclient.NewClient( + fmt.Sprintf("%s:%d", *webGatewayAddr, *webGatewayPort), + *webGatewayCert, + rpcclient.WithPerRPCCredentials(creds), + rpcclient.WithDialTimeout(30*time.Second), + rpcclient.WithBlock(), + ) + if err != nil { + t.Fatal("Failed to create feature client:", err) + } + return featureClient +} + +func createFeature(ctx context.Context, t *testing.T, client featureclient.Client, featureID, tag string) { + t.Helper() + cmd := newCreateFeatureCommand(featureID, tag) + createReq := &featureproto.CreateFeatureRequest{ + Command: cmd, + EnvironmentNamespace: *environmentNamespace, + } + if _, err := client.CreateFeature(ctx, createReq); err != nil { + t.Fatal(err) + } + enableFeature(t, featureID, client) +} + +func enableFeature(t *testing.T, featureID string, client featureclient.Client) { + t.Helper() + enableReq := &featureproto.EnableFeatureRequest{ + Id: featureID, + Command: &featureproto.EnableFeatureCommand{}, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.EnableFeature(ctx, enableReq); err != nil { + t.Fatalf("Failed to enable feature id: %s. Error: %v", featureID, err) + } +} + +func newCreateFeatureCommand(featureID, tag string) *featureproto.CreateFeatureCommand { + return &featureproto.CreateFeatureCommand{ + Id: featureID, + Name: "e2e-test-push-feature-name", + Description: "e2e-test-push-feature-description", + Variations: []*featureproto.Variation{ + { + Value: "A", + Name: "Variation A", + Description: "Thing does A", + }, + { + Value: "B", + Name: "Variation B", + Description: "Thing does B", + }, + }, + Tags: []string{tag}, + DefaultOnVariationIndex: &wrappers.Int32Value{Value: int32(0)}, + DefaultOffVariationIndex: &wrappers.Int32Value{Value: int32(1)}, + } +} + +func newPushClient(t *testing.T) pushclient.Client { + t.Helper() + creds, err := rpcclient.NewPerRPCCredentials(*serviceTokenPath) + if err != nil { + t.Fatal("Failed to create RPC credentials:", err) + } + client, err := pushclient.NewClient( + fmt.Sprintf("%s:%d", *webGatewayAddr, *webGatewayPort), + *webGatewayCert, + rpcclient.WithPerRPCCredentials(creds), + rpcclient.WithDialTimeout(30*time.Second), + rpcclient.WithBlock(), + ) + if err != nil { + t.Fatal("Failed to create auto ops client:", err) + } + return client +} + +func newUUID(t *testing.T) string { + t.Helper() + id, err := uuid.NewUUID() + if err != nil { + t.Fatal(err) + } + return id.String() +} + +func createPush(ctx context.Context, t *testing.T, client pushclient.Client, fcmAPIKey, tag string) { + t.Helper() + cmd := newCreatePushCommand(t, fcmAPIKey, []string{tag}) + createReq := &pushproto.CreatePushRequest{ + EnvironmentNamespace: *environmentNamespace, + Command: cmd, + } + if _, err := client.CreatePush(ctx, createReq); err != nil { + t.Fatal(err) + } +} + +func newCreatePushCommand(t *testing.T, fcmAPIKey string, tags []string) *pushproto.CreatePushCommand { + return &pushproto.CreatePushCommand{ + Name: newPushName(t), + FcmApiKey: fcmAPIKey, + Tags: tags, + } +} + +func listPushes(t *testing.T, client pushclient.Client) []*pushproto.Push { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + resp, err := client.ListPushes(ctx, &pushproto.ListPushesRequest{ + EnvironmentNamespace: *environmentNamespace, + PageSize: int64(500), + }) + if err != nil { + t.Fatal("failed to list pushes", err) + } + return resp.Pushes +} + +func newFeatureID(t *testing.T) string { + if *testID != "" { + return fmt.Sprintf("%s-%s-feature-id-%s", prefixTestName, *testID, newUUID(t)) + } + return fmt.Sprintf("%s-feature-id-%s", prefixTestName, newUUID(t)) +} + +func newPushName(t *testing.T) string { + if *testID != "" { + return fmt.Sprintf("%s-%s-push-name", prefixTestName, *testID) + } + return fmt.Sprintf("%s-push-name", prefixTestName) +} diff --git a/test/e2e/user/BUILD.bazel b/test/e2e/user/BUILD.bazel new file mode 100644 index 000000000..540a13a19 --- /dev/null +++ b/test/e2e/user/BUILD.bazel @@ -0,0 +1,20 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_test") + +go_test( + name = "go_default_test", + srcs = ["user_test.go"], + deps = [ + "//pkg/feature/client:go_default_library", + "//pkg/gateway/client:go_default_library", + "//pkg/rpc/client:go_default_library", + "//pkg/user/client:go_default_library", + "//pkg/uuid:go_default_library", + "//proto/feature:go_default_library", + "//proto/gateway:go_default_library", + "//proto/user:go_default_library", + "//test/util:go_default_library", + "@com_github_stretchr_testify//assert:go_default_library", + "@io_bazel_rules_go//proto/wkt:any_go_proto", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + ], +) diff --git a/test/e2e/user/user_test.go b/test/e2e/user/user_test.go new file mode 100644 index 000000000..04392da14 --- /dev/null +++ b/test/e2e/user/user_test.go @@ -0,0 +1,444 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package feature + +import ( + "context" + "flag" + "fmt" + "testing" + "time" + + "github.com/golang/protobuf/ptypes/any" + "github.com/golang/protobuf/ptypes/wrappers" + "github.com/stretchr/testify/assert" + + featureclient "github.com/bucketeer-io/bucketeer/pkg/feature/client" + gatewayclient "github.com/bucketeer-io/bucketeer/pkg/gateway/client" + rpcclient "github.com/bucketeer-io/bucketeer/pkg/rpc/client" + userclient "github.com/bucketeer-io/bucketeer/pkg/user/client" + "github.com/bucketeer-io/bucketeer/pkg/uuid" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" + gatewayproto "github.com/bucketeer-io/bucketeer/proto/gateway" + userproto "github.com/bucketeer-io/bucketeer/proto/user" + "github.com/bucketeer-io/bucketeer/test/util" +) + +const ( + prefixTestName = "e2e-test" + retryTimes = 60 + timeout = 10 * time.Second +) + +var ( + // FIXME: To avoid compiling the test many times, webGatewayAddr, webGatewayPort & apiKey has been also added here to prevent from getting: "flag provided but not defined" error during the test. These 3 are being use in the Gateway test + webGatewayAddr = flag.String("web-gateway-addr", "", "Web gateway endpoint address") + webGatewayPort = flag.Int("web-gateway-port", 443, "Web gateway endpoint port") + webGatewayCert = flag.String("web-gateway-cert", "", "Web gateway crt file") + apiKeyPath = flag.String("api-key", "", "Api key path for web gateway") + gatewayAddr = flag.String("gateway-addr", "", "Gateway endpoint address") + gatewayPort = flag.Int("gateway-port", 443, "Gateway endpoint port") + gatewayCert = flag.String("gateway-cert", "", "Gateway crt file") + serviceTokenPath = flag.String("service-token", "", "Service token path") + environmentNamespace = flag.String("environment-namespace", "", "Environment namespace") + testID = flag.String("test-id", "", "test ID") +) + +func TestGetUser(t *testing.T) { + t.Parallel() + uuid := newUUID(t) + tag := fmt.Sprintf("%s-tag-%s", prefixTestName, uuid) + userID := newUserID(t, uuid) + now := time.Now() + featureID := newFeatureID(t, uuid) + featureclient := newFeatureClient(t) + defer featureclient.Close() + userClient := newUserClient(t) + defer userClient.Close() + feature := createFeatureWithTag(t, featureclient, featureID, tag) + time.Sleep(3 * time.Second) + user := &userproto.User{ + Id: userID, + } + + // Check evaluations + for i := 0; i < retryTimes; i++ { + resp := getEvaluations(t, tag, user) + if resp.State == featureproto.UserEvaluations_FULL { + evaluationsSize := len(resp.Evaluations.Evaluations) + if evaluationsSize != 1 { + t.Fatalf("The number is evaluations is not correct. Expected: 0, actual: %d", evaluationsSize) + } + if resp.Evaluations == nil { + t.Fatal("Evaluations field is nil") + } + variationID := resp.Evaluations.Evaluations[0].Variation.Id + if feature.Variations[0].Id != variationID { + t.Fatalf("Variation doesn't match. Expected: %s, actual: %s", feature.Variations[0].Id, variationID) + } + break + } + if i == retryTimes-1 { + t.Fatalf("State did not change. Expected: %v, actual: %v", featureproto.UserEvaluations_FULL, resp.State) + } + time.Sleep(time.Second) + } + + // Check user + var latestSeen int64 + for i := 0; i < retryTimes; i++ { + actual, _ := getUser(t, userClient, userID) + if actual != nil { + if actual.Id != userID { + t.Fatalf("User ID is not correct: expected: %s, actual: %s", userID, actual.Id) + } + if len(actual.TaggedData[tag].Value) != 0 { + t.Fatalf("The user metadata should be zero. Actual: %v", actual.TaggedData[tag].Value) + } + if actual.LastSeen < now.Unix() { + t.Fatalf("Last seen is not correct: expected: %d, actual: %d", now.Unix(), actual.LastSeen) + } + latestSeen = actual.LastSeen + break + } + if i == retryTimes-1 { + t.Fatalf("User cannot be fetched.") + } + time.Sleep(time.Second) + } + + // Change user attribute + user.Data = map[string]string{"k-0": "v-0"} + + // Check evaluations change + for i := 0; i < retryTimes; i++ { + resp := getEvaluations(t, tag, user) + if resp.State == featureproto.UserEvaluations_FULL { + evaluationsSize := len(resp.Evaluations.Evaluations) + if evaluationsSize != 1 { + t.Fatalf("The evaluations size is not correct. Expected: 1, actual: %d. Data: %v", evaluationsSize, resp.Evaluations.Evaluations) + } + if resp.Evaluations == nil { + t.Fatal("Evaluations field is nil") + } + variationID := resp.Evaluations.Evaluations[0].VariationId + if feature.Variations[1].Id == variationID { + break + } + } + if i == retryTimes-1 { + t.Fatalf("Evaluations did not change. Variation Expected: %s, actual: %s", feature.Variations[1].Id, resp.Evaluations.Evaluations[0].VariationId) + } + time.Sleep(time.Second) + } + + // Check user changes + for i := 0; i < retryTimes; i++ { + actual, _ := getUser(t, userClient, userID) + if actual != nil { + if actual.Id != userID { + t.Fatalf("User ID is not correct: expected: %s, actual: %s", userID, actual.Id) + } + if len(actual.TaggedData[tag].Value) == 1 { + for _, data := range actual.TaggedData { + for k, v := range data.Value { + if k != "k-0" { + t.Fatalf("Data key is different. Expected: %s, Actual: %s", k, "k-0") + } + if v != "v-0" { + t.Fatalf("Data value is different. Expected: %s, Actual: %s", v, "v-0") + } + if actual.LastSeen < latestSeen { + t.Fatalf("Last seen is not correct: expected: %d, actual: %d", now.Unix(), actual.LastSeen) + } + } + } + break + } + } + if i == retryTimes-1 { + t.Fatalf("User did not change when adding the user data.") + } + time.Sleep(time.Second) + } + + // Use different tag + tagServer := fmt.Sprintf("%s-tag-server-%s", prefixTestName, uuid) + resp := getEvaluations(t, tagServer, user) + assert.NotNil(t, resp) + time.Sleep(time.Second) + + // Check if user's data has changed + for i := 0; i < retryTimes; i++ { + actual, _ := getUser(t, userClient, userID) + if actual != nil { + if actual.Id != userID { + t.Fatalf("User ID is not correct: expected: %s, actual: %s", userID, actual.Id) + } + // At this point it has one tagged data waiting for the second one be persisted + if len(actual.TaggedData) == 1 { + continue + } + if len(actual.TaggedData[tagServer].Value) != 1 { + t.Fatalf("User data size should not be different than one at this point. Actual: %d. Data: %v", + len(actual.TaggedData[tagServer].Value), + actual.TaggedData[tagServer].Value, + ) + } + for _, data := range actual.TaggedData { + for k, v := range data.Value { + if k != "k-0" { + t.Fatalf("Data key is different. Expected: %s, Actual: %s", k, "k-0") + } + if v != "v-0" { + t.Fatalf("Data value is different. Expected: %s, Actual: %s", v, "v-0") + } + if actual.LastSeen < latestSeen { + t.Fatalf("Last seen is not correct: expected: %d, actual: %d", now.Unix(), actual.LastSeen) + } + } + } + break + } + if i == retryTimes-1 { + t.Fatalf("User did not change when using different tags.") + } + time.Sleep(time.Second) + } +} + +func newGatewayClient(t *testing.T) gatewayclient.Client { + t.Helper() + creds, err := gatewayclient.NewPerRPCCredentials(*apiKeyPath) + if err != nil { + t.Fatal("Failed to create RPC credentials:", err) + } + client, err := gatewayclient.NewClient( + fmt.Sprintf("%s:%d", *gatewayAddr, *gatewayPort), + *gatewayCert, + rpcclient.WithPerRPCCredentials(creds), + rpcclient.WithDialTimeout(30*time.Second), + rpcclient.WithBlock(), + ) + if err != nil { + t.Fatal("Failed to create gateway client:", err) + } + return client +} + +func createFeatureWithTag(t *testing.T, client featureclient.Client, featureID, tag string) *featureproto.Feature { + cmd := newCreateFeatureCommand(featureID, "a", "b", []string{tag}) + createFeature(t, client, cmd) + f := getFeature(t, featureID, client) + rule := newFixedStrategyRule(f.Variations[1].Id, "k-0", "v-0") + addCmd, _ := util.MarshalCommand(&featureproto.AddRuleCommand{Rule: rule}) + updateFeatureTargeting(t, client, addCmd, featureID) + enableFeature(t, featureID, client) + return getFeature(t, featureID, client) +} + +func enableFeature(t *testing.T, featureID string, client featureclient.Client) { + t.Helper() + enableReq := &featureproto.EnableFeatureRequest{ + Id: featureID, + Command: &featureproto.EnableFeatureCommand{}, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.EnableFeature(ctx, enableReq); err != nil { + t.Fatalf("Failed to enable feature id: %s. Error: %v", featureID, err) + } +} + +func newFeatureClient(t *testing.T) featureclient.Client { + t.Helper() + creds, err := rpcclient.NewPerRPCCredentials(*serviceTokenPath) + if err != nil { + t.Fatal("Failed to create RPC credentials:", err) + } + featureClient, err := featureclient.NewClient( + fmt.Sprintf("%s:%d", *webGatewayAddr, *webGatewayPort), + *webGatewayCert, + rpcclient.WithPerRPCCredentials(creds), + rpcclient.WithDialTimeout(30*time.Second), + rpcclient.WithBlock(), + ) + if err != nil { + t.Fatal("Failed to create feature client:", err) + } + return featureClient +} + +func newCreateFeatureCommand(featureID string, varA, varB string, tags []string) *featureproto.CreateFeatureCommand { + return &featureproto.CreateFeatureCommand{ + Id: featureID, + Name: "e2e-test-gateway-feature-name", + Description: "e2e-test-gateway-feature-description", + Variations: []*featureproto.Variation{ + { + Value: "A", + Name: "Variation A", + Description: "Thing does A", + }, + { + Value: "B", + Name: "Variation B", + Description: "Thing does B", + }, + }, + Tags: tags, + DefaultOnVariationIndex: &wrappers.Int32Value{Value: int32(0)}, + DefaultOffVariationIndex: &wrappers.Int32Value{Value: int32(1)}, + } +} + +func createFeature(t *testing.T, client featureclient.Client, cmd *featureproto.CreateFeatureCommand) { + t.Helper() + createReq := &featureproto.CreateFeatureRequest{ + Command: cmd, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.CreateFeature(ctx, createReq); err != nil { + t.Fatal(err) + } +} + +func newUserClient(t *testing.T) userclient.Client { + t.Helper() + creds, err := rpcclient.NewPerRPCCredentials(*serviceTokenPath) + if err != nil { + t.Fatal("Failed to create RPC credentials:", err) + } + userClient, err := userclient.NewClient( + fmt.Sprintf("%s:%d", *webGatewayAddr, *webGatewayPort), + *webGatewayCert, + rpcclient.WithPerRPCCredentials(creds), + rpcclient.WithDialTimeout(30*time.Second), + rpcclient.WithBlock(), + ) + if err != nil { + t.Fatal("Failed to create user client:", err) + } + return userClient +} + +func getUser(t *testing.T, client userclient.Client, userID string) (*userproto.User, error) { + t.Helper() + req := &userproto.GetUserRequest{ + UserId: userID, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + resp, err := client.GetUser(ctx, req) + if err != nil { + return nil, err + } + return resp.User, nil +} + +func newUUID(t *testing.T) string { + t.Helper() + id, err := uuid.NewUUID() + if err != nil { + t.Fatal(err) + } + return id.String() +} + +func getEvaluations(t *testing.T, tag string, user *userproto.User) *gatewayproto.GetEvaluationsResponse { + t.Helper() + c := newGatewayClient(t) + defer c.Close() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + req := &gatewayproto.GetEvaluationsRequest{ + Tag: tag, + User: user, + } + response, err := c.GetEvaluations(ctx, req) + if err != nil { + t.Fatal(err) + } + return response +} + +func newFixedStrategyRule(variationID string, attr string, value string) *featureproto.Rule { + uuid, _ := uuid.NewUUID() + return &featureproto.Rule{ + Id: uuid.String(), + Strategy: &featureproto.Strategy{ + Type: featureproto.Strategy_FIXED, + FixedStrategy: &featureproto.FixedStrategy{ + Variation: variationID, + }, + }, + Clauses: []*featureproto.Clause{ + { + Attribute: attr, + Operator: featureproto.Clause_EQUALS, + Values: []string{value}, + }, + }, + } +} + +func updateFeatureTargeting(t *testing.T, client featureclient.Client, cmd *any.Any, featureID string) { + t.Helper() + updateReq := &featureproto.UpdateFeatureTargetingRequest{ + Id: featureID, + Commands: []*featureproto.Command{ + {Command: cmd}, + }, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if _, err := client.UpdateFeatureTargeting(ctx, updateReq); err != nil { + t.Fatal(err) + } +} + +func getFeature(t *testing.T, featureID string, client featureclient.Client) *featureproto.Feature { + t.Helper() + getReq := &featureproto.GetFeatureRequest{ + Id: featureID, + EnvironmentNamespace: *environmentNamespace, + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + response, err := client.GetFeature(ctx, getReq) + if err != nil { + t.Fatal("Failed to get feature:", err) + } + return response.Feature +} + +func newFeatureID(t *testing.T, uuid string) string { + if *testID != "" { + return fmt.Sprintf("%s-%s-feature-id-%s", prefixTestName, *testID, uuid) + } + return fmt.Sprintf("%s-feature-id-%s", prefixTestName, uuid) +} + +func newUserID(t *testing.T, uuid string) string { + if *testID != "" { + return fmt.Sprintf("%s-%s-user-%s", prefixTestName, *testID, uuid) + } + return fmt.Sprintf("%s-user-%s", prefixTestName, uuid) +} diff --git a/test/e2e/util/BUILD.bazel b/test/e2e/util/BUILD.bazel new file mode 100644 index 000000000..5b61e2f51 --- /dev/null +++ b/test/e2e/util/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["rest.go"], + importpath = "github.com/bucketeer-io/bucketeer/test/e2e/util", + visibility = ["//visibility:public"], + deps = [ + "//proto/event/client:go_default_library", + "//proto/feature:go_default_library", + "//proto/gateway:go_default_library", + "//proto/user:go_default_library", + ], +) diff --git a/test/e2e/util/rest.go b/test/e2e/util/rest.go new file mode 100644 index 000000000..b0ebd64e8 --- /dev/null +++ b/test/e2e/util/rest.go @@ -0,0 +1,183 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "testing" + + eventproto "github.com/bucketeer-io/bucketeer/proto/event/client" + featureproto "github.com/bucketeer-io/bucketeer/proto/feature" + gwproto "github.com/bucketeer-io/bucketeer/proto/gateway" + userproto "github.com/bucketeer-io/bucketeer/proto/user" +) + +const ( + version = "/v1" + service = "/gateway" + evaluationsAPI = "/evaluations" + evaluationAPI = "/evaluation" + eventsAPI = "/events" + authorizationKey = "authorization" +) + +type eventType int + +const ( + GoalEventType eventType = iota + 1 // eventType starts from 1 for validation. + GoalBatchEventType + EvaluationEventType + MetricsEventType +) + +type successResponse struct { + Data json.RawMessage `json:"data"` +} + +type registerEventsRequest struct { + Events []Event `json:"events,omitempty"` +} + +type registerEventsResponse struct { + Errors map[string]*gwproto.RegisterEventsResponse_Error `json:"errors,omitempty"` +} + +type getEvaluationsRequest struct { + Tag string `json:"tag,omitempty"` + User *userproto.User `json:"user,omitempty"` + UserEvaluationsID string `json:"user_evaluations_id,omitempty"` + SourceID eventproto.SourceId `json:"source_id,omitempty"` +} + +type getEvaluationsResponse struct { + Evaluations *featureproto.UserEvaluations `json:"evaluations,omitempty"` + UserEvaluationsID string `json:"user_evaluations_id,omitempty"` +} + +type getEvaluationRequest struct { + Tag string `json:"tag,omitempty"` + User *userproto.User `json:"user,omitempty"` + FeatureID string `json:"feature_id,omitempty"` + SourceId eventproto.SourceId `json:"source_id,omitempty"` +} + +type getEvaluationResponse struct { + Evaluation *featureproto.Evaluation `json:"evaluations,omitempty"` +} + +type Event struct { + ID string `json:"id,omitempty"` + Event json.RawMessage `json:"event,omitempty"` + EnvironmentNamespace string `json:"environment_namespace,omitempty"` + Type eventType `json:"type,omitempty"` +} + +func GetEvaluations(t *testing.T, tag, userID, gatewayAddr, apiKeyPath string) *getEvaluationsResponse { + t.Helper() + url := fmt.Sprintf("https://%s%s%s%s", + gatewayAddr, + version, + service, + evaluationsAPI, + ) + req := &getEvaluationsRequest{ + Tag: tag, + User: &userproto.User{ + Id: userID, + }, + } + resp := SendHTTPRequest(t, url, req, apiKeyPath) + var ger getEvaluationsResponse + if err := json.Unmarshal(resp.Data, &ger); err != nil { + t.Fatal(err) + } + return &ger +} + +func GetEvaluation(t *testing.T, tag, featureID, userID, gatewayAddr, apiKeyPath string) *getEvaluationResponse { + t.Helper() + url := fmt.Sprintf("https://%s%s%s%s", + gatewayAddr, + version, + service, + evaluationAPI, + ) + req := &getEvaluationRequest{ + Tag: tag, + User: &userproto.User{Id: userID}, + FeatureID: featureID, + } + resp := SendHTTPRequest(t, url, req, apiKeyPath) + var ger getEvaluationResponse + if err := json.Unmarshal(resp.Data, &ger); err != nil { + t.Fatal(err) + } + return &ger +} + +func RegisterEvents(t *testing.T, events []Event, gatewayAddr, apiKeyPath string) *registerEventsResponse { + t.Helper() + url := fmt.Sprintf("https://%s%s%s%s", + gatewayAddr, + version, + service, + eventsAPI, + ) + req := ®isterEventsRequest{ + Events: events, + } + resp := SendHTTPRequest(t, url, req, apiKeyPath) + var rer registerEventsResponse + if err := json.Unmarshal(resp.Data, &rer); err != nil { + t.Fatal(err) + } + return &rer +} + +func SendHTTPRequest(t *testing.T, url string, body interface{}, apiKeyPath string) *successResponse { + data, err := ioutil.ReadFile(apiKeyPath) + if err != nil { + t.Fatal(err) + } + encoded, err := json.Marshal(body) + if err != nil { + t.Fatal(err) + } + req, err := http.NewRequest("POST", url, bytes.NewBuffer(encoded)) + if err != nil { + t.Fatal(err) + } + req.Header.Add(authorizationKey, string(data)) + req.Header.Add("Content-Type", "application/json") + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + t.Fatalf("Send HTTP request failed: %d", resp.StatusCode) + } + var sr successResponse + err = json.NewDecoder(resp.Body).Decode(&sr) + if err != nil { + t.Fatal(err) + } + return &sr +} diff --git a/test/util/BUILD.bazel b/test/util/BUILD.bazel new file mode 100644 index 000000000..b3bc12b53 --- /dev/null +++ b/test/util/BUILD.bazel @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "command.go", + "sort.go", + ], + importpath = "github.com/bucketeer-io/bucketeer/test/util", + visibility = ["//visibility:public"], + deps = [ + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@io_bazel_rules_go//proto/wkt:any_go_proto", + ], +) diff --git a/test/util/command.go b/test/util/command.go new file mode 100644 index 000000000..3cc878c97 --- /dev/null +++ b/test/util/command.go @@ -0,0 +1,25 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "github.com/golang/protobuf/proto" // nolint:staticcheck + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/any" +) + +func MarshalCommand(pb proto.Message) (*any.Any, error) { + return ptypes.MarshalAny(pb) +} diff --git a/test/util/sort.go b/test/util/sort.go new file mode 100644 index 000000000..f582cf9cb --- /dev/null +++ b/test/util/sort.go @@ -0,0 +1,44 @@ +// Copyright 2022 The Bucketeer Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import "sort" + +// Int64Slice attaches the methods of sort.Interface to []int64, sorting in increasing order. +type Int64Slice []int64 + +func (p Int64Slice) Len() int { + return len(p) +} + +func (p Int64Slice) Less(i, j int) bool { + return p[i] < p[j] +} + +func (p Int64Slice) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +func Int64sAreSorted(a []int64) bool { + return sort.IsSorted(Int64Slice(a)) +} + +func Int64sAreReverseSorted(a []int64) bool { + return sort.IsSorted(sort.Reverse(Int64Slice(a))) +} + +func StringsAreReverseSorted(a []string) bool { + return sort.IsSorted(sort.Reverse(sort.StringSlice(a))) +} diff --git a/tools/build/status.sh b/tools/build/status.sh new file mode 100755 index 000000000..c05d60d18 --- /dev/null +++ b/tools/build/status.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +echo HASH $(git rev-parse --verify HEAD) +echo BUILDDATE $(date '+%Y/%m/%d %H:%M:%S %Z') diff --git a/tools/bzl/nodejs/BUILD.bazel b/tools/bzl/nodejs/BUILD.bazel new file mode 100644 index 000000000..e69de29bb diff --git a/tools/bzl/nodejs/defs.bzl b/tools/bzl/nodejs/defs.bzl new file mode 100644 index 000000000..41a8389d1 --- /dev/null +++ b/tools/bzl/nodejs/defs.bzl @@ -0,0 +1,3 @@ +load("//tools/bzl/nodejs/protobufjs:ts_proto_library.bzl", _ts_proto_library = "ts_proto_library") + +ts_proto_library = _ts_proto_library diff --git a/tools/bzl/nodejs/protobufjs/BUILD.bazel b/tools/bzl/nodejs/protobufjs/BUILD.bazel new file mode 100644 index 000000000..e69de29bb diff --git a/tools/bzl/nodejs/protobufjs/ts_proto_library.bzl b/tools/bzl/nodejs/protobufjs/ts_proto_library.bzl new file mode 100644 index 000000000..1f0cd72f7 --- /dev/null +++ b/tools/bzl/nodejs/protobufjs/ts_proto_library.bzl @@ -0,0 +1,41 @@ +load("@build_bazel_rules_nodejs//:providers.bzl", "DeclarationInfo") + +def _ts_proto_library(ctx): + srcs_files = [f for t in ctx.attr.srcs for f in t.files.to_list()] + dts = None + js = None + for src_file in srcs_files: + is_dts = src_file.short_path.endswith(".d.ts") + if is_dts: + dts = src_file + else: + js = src_file + + return struct( + files = depset([dts]), + typescript = struct( + declarations = depset([dts]), + es5_sources = depset([js]), + es6_sources = depset([js]), + transitive_declarations = depset([dts]), + transitive_es5_sources = depset([js]), + transitive_es6_sources = depset([js]), + type_blacklisted_declarations = depset(), + ), + providers = [ + DeclarationInfo( + declarations = depset([dts]), + transitive_declarations = depset([dts]), + type_blacklisted_declarations = depset([]), + ), + ], + ) + +ts_proto_library = rule( + implementation = _ts_proto_library, + attrs = { + "srcs": attr.label_list( + allow_files = ["js", "d.ts"], + ), + }, +) diff --git a/tools/gen/gen.sh b/tools/gen/gen.sh new file mode 100644 index 000000000..6417037d3 --- /dev/null +++ b/tools/gen/gen.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +set -eu + +function usage() { + cat <<_EOT_ +Usage: + $0 Args + +Description: + script for creating PR. + +Environment Variables: + - DIR + - DESCRIPTOR_PATH +_EOT_ + exit 1 +} + +# validations +[[ -z $DIR ]] && usage +[[ -z $DESCRIPTOR_PATH ]] && usage + +cd $DIR + +make proto-go + +descriptor_file="proto_descriptor.pb" + +# api-gateway +api_gateway_values_path="./manifests/bucketeer/charts/api-gateway/values.yaml" +encoded_descriptor=$(cat ${DESCRIPTOR_PATH}/gateway/${descriptor_file} | base64 | tr -d \\n | sed -E "s|\/|\\\/|") +sed -i -E "s|(descriptor): .+|\1: \"${encoded_descriptor}\"|" ${api_gateway_values_path} + +# web-gateway +web_gateway_values_path="./manifests/bucketeer/charts/web-gateway/values.yaml" +proto_descriptor_dirnames=$(find ${DESCRIPTOR_PATH} -name "$descriptor_file" -not -path "**/gateway/*" -print0 | xargs -0 -n1 dirname | awk -F/ '{print $NF}') +for service_name in $proto_descriptor_dirnames +do + encoded_descriptor=$(cat ${DESCRIPTOR_PATH}/${service_name}/${descriptor_file} | base64 | tr -d \\n | sed -E "s|\/|\\\/|") + sed -i -E "s|(${service_name}Descriptor): .+|\1: \"${encoded_descriptor}\"|" ${web_gateway_values_path} +done \ No newline at end of file diff --git a/tools/runner/Dockerfile b/tools/runner/Dockerfile new file mode 100644 index 000000000..3f5ac758f --- /dev/null +++ b/tools/runner/Dockerfile @@ -0,0 +1,100 @@ +FROM ubuntu:18.04 + +ENV GO_VERSION go1.17.2 +ENV PYTHON2_VERSION 2.7.17 +ENV PYTHON3_VERSION 3.7.6 + +ENV GOLANGCILINT_VERSION v1.39.0 +ENV MOCKGEN_VERSION v1.3.1 + +ENV PROTO_VERSION 3.18.1 +ENV PROTOLOCK_VERSION v0.15.0 +ENV GOPROTOGEN_VERSION v1.3.2 +ENV GOOGLEAPIS_VERSION 83e756a66b80b072bd234abcfe89edf459090974 + +ENV BAZELISK_VERSION v1.3.0 +ENV BUILDIFIER_VERSION 3.3.0 + +ENV GCLOUD_VERSION 366.0.0 +ENV DOCKER_VERSION 5:18.09.3~3-0~ubuntu-bionic + +RUN apt update && apt -qq -y install \ + pkg-config \ + zip \ + g++ \ + zlib1g-dev \ + wget \ + git \ + curl \ + jq \ + gcc \ + make \ + whois \ + clang-format-9 \ + build-essential \ + libffi-dev \ + libssl-dev \ + zlib1g-dev \ + liblzma-dev \ + libbz2-dev \ + libreadline-dev \ + libsqlite3-dev +RUN mv /usr/bin/clang-format-9 /usr/bin/clang-format + +RUN apt install -y -qq lsb-core software-properties-common +RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - +RUN add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" +RUN apt update +RUN apt install -y docker-ce=${DOCKER_VERSION} +RUN curl -sSL https://get.docker.com/ | sh + +RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTO_VERSION}/protoc-${PROTO_VERSION}-linux-x86_64.zip +RUN unzip protoc-${PROTO_VERSION}-linux-x86_64.zip -d protoc3 +RUN mv protoc3/bin/* /usr/local/bin/ +RUN mv protoc3/include/* /usr/local/include/ + +ENV GOPATH /go +ENV PATH $GOPATH/bin:/usr/local/go/bin:$PATH +RUN wget -P /tmp https://dl.google.com/go/$GO_VERSION.linux-amd64.tar.gz \ + && tar -C /usr/local -xzf /tmp/$GO_VERSION.linux-amd64.tar.gz \ + && rm /tmp/$GO_VERSION.linux-amd64.tar.gz \ + && mkdir -p "$GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH" + +RUN git clone https://github.com/googleapis/googleapis.git $GOPATH/src/github.com/googleapis/googleapis +RUN cd $GOPATH/src/github.com/googleapis/googleapis && \ + git checkout ${GOOGLEAPIS_VERSION} + +RUN go install golang.org/x/tools/cmd/goimports@latest +RUN go install github.com/golangci/golangci-lint/cmd/golangci-lint@${GOLANGCILINT_VERSION} +RUN go install github.com/golang/mock/mockgen@${MOCKGEN_VERSION} +RUN go install github.com/golang/protobuf/protoc-gen-go@${GOPROTOGEN_VERSION} +RUN go install github.com/nilslice/protolock/...@${PROTOLOCK_VERSION} +RUN go install github.com/bazelbuild/buildtools/buildifier@${BUILDIFIER_VERSION} +RUN go install github.com/bazelbuild/bazelisk@${BAZELISK_VERSION} + +ENV PYENV_ROOT /root/.pyenv +ENV PATH $PYENV_ROOT/bin:$PATH +RUN git clone https://github.com/pyenv/pyenv.git ~/.pyenv +RUN pyenv init - +RUN pyenv install -f ${PYTHON2_VERSION} +RUN pyenv install -f ${PYTHON3_VERSION} +RUN ln -sf /root/.pyenv/versions/${PYTHON2_VERSION}/bin/python /usr/bin/python +RUN ln -sf /root/.pyenv/versions/${PYTHON3_VERSION}/bin/python /usr/bin/python3 +RUN ln -sf /root/.pyenv/versions/${PYTHON2_VERSION}/bin/pip /usr/bin/pip +RUN ln -sf /root/.pyenv/versions/${PYTHON3_VERSION}/bin/pip /usr/bin/pip3 + +RUN rm /usr/bin/lsb_release +RUN pip3 install yq pyyaml requests +RUN curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/install-poetry.py | python3 - +ENV PATH /root/.local/bin:$PATH + +ENV PATH /google-cloud-sdk/bin:$PATH +RUN curl -Lo /tmp/google-cloud-sdk.tar.gz https://dl.google.com/dl/cloudsdk/release/google-cloud-sdk.tar.gz && \ + tar -C / -xzf /tmp/google-cloud-sdk.tar.gz && \ + rm /tmp/google-cloud-sdk.tar.gz && \ + /google-cloud-sdk/install.sh --usage-reporting=true && \ + gcloud --quiet components update --version=$GCLOUD_VERSION + +RUN curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg +RUN echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | tee /etc/apt/sources.list.d/github-cli.list > /dev/null +RUN apt update -y && apt install -y gh diff --git a/ui/web-v2/.editorconfig b/ui/web-v2/.editorconfig new file mode 100644 index 000000000..6e87a003d --- /dev/null +++ b/ui/web-v2/.editorconfig @@ -0,0 +1,13 @@ +# Editor configuration, see http://editorconfig.org +root = true + +[*] +charset = utf-8 +indent_style = space +indent_size = 2 +insert_final_newline = true +trim_trailing_whitespace = true + +[*.md] +max_line_length = off +trim_trailing_whitespace = false diff --git a/ui/web-v2/.eslintignore b/ui/web-v2/.eslintignore new file mode 100644 index 000000000..40d7f7e64 --- /dev/null +++ b/ui/web-v2/.eslintignore @@ -0,0 +1 @@ +apps/**/proto diff --git a/ui/web-v2/.eslintrc.json b/ui/web-v2/.eslintrc.json new file mode 100644 index 000000000..a6f91167f --- /dev/null +++ b/ui/web-v2/.eslintrc.json @@ -0,0 +1,38 @@ +{ + "root": true, + "ignorePatterns": ["**/*"], + "plugins": ["@nrwl/nx"], + "overrides": [ + { + "files": ["*.ts", "*.tsx", "*.js", "*.jsx"], + "rules": { + "@nrwl/nx/enforce-module-boundaries": [ + "error", + { + "enforceBuildableLibDependency": true, + "allow": [], + "depConstraints": [ + { + "sourceTag": "*", + "onlyDependOnLibsWithTags": ["*"] + } + ] + } + ] + } + }, + { + "files": ["*.ts", "*.tsx"], + "extends": ["plugin:@nrwl/nx/typescript"], + "parserOptions": { + "project": "./tsconfig.*?.json" + }, + "rules": {} + }, + { + "files": ["*.js", "*.jsx"], + "extends": ["plugin:@nrwl/nx/javascript"], + "rules": {} + } + ] +} diff --git a/ui/web-v2/.gitignore b/ui/web-v2/.gitignore new file mode 100644 index 000000000..2cc883764 --- /dev/null +++ b/ui/web-v2/.gitignore @@ -0,0 +1,48 @@ +# See http://help.github.com/ignore-files/ for more about ignoring files. + +# compiled output +/dist +/tmp +/out-tsc + +# dependencies +/node_modules + +# IDEs and editors +/.idea +.project +.classpath +.c9/ +*.launch +.settings/ +*.sublime-workspace + +# IDE - VSCode +.vscode/* +!.vscode/settings.json +!.vscode/tasks.json +!.vscode/launch.json +!.vscode/extensions.json + +# misc +/.sass-cache +/connect.lock +/coverage +/libpeerconnection.log +npm-debug.log +yarn-error.log +testem.log +/typings +extracted-messages.json + +# System Files +.DS_Store +Thumbs.db + +**/proto/**/*.js +**/proto/**/*.ts +**/certs/tls.crt +**/certs/tls.key + +**/proto/**/*.js +**/proto/**/*.ts diff --git a/ui/web-v2/.prettierignore b/ui/web-v2/.prettierignore new file mode 100644 index 000000000..d0b804da2 --- /dev/null +++ b/ui/web-v2/.prettierignore @@ -0,0 +1,4 @@ +# Add files here to ignore them from prettier formatting + +/dist +/coverage diff --git a/ui/web-v2/.prettierrc b/ui/web-v2/.prettierrc new file mode 100644 index 000000000..92cde390a --- /dev/null +++ b/ui/web-v2/.prettierrc @@ -0,0 +1,3 @@ +{ + "singleQuote": true +} \ No newline at end of file diff --git a/ui/web-v2/BUILD.bazel b/ui/web-v2/BUILD.bazel new file mode 100644 index 000000000..ab162677e --- /dev/null +++ b/ui/web-v2/BUILD.bazel @@ -0,0 +1,31 @@ +load("@io_bazel_rules_docker//container:container.bzl", "container_image") +load("@npm-v2//nx:index.bzl", "nx") + +package(default_visibility = ["//visibility:public"]) + +nx( + name = "bundle", + args = [ + "build", + "--outputPath=../../$(@D)", + ], + chdir = "ui/web-v2", + configuration_env_vars = ["RELEASE_CHANNEL"], + data = [ + "babel.config.json", + "nx.json", + "tsconfig.base.json", + "package.json", + "workspace.json", + "@npm-v2//:node_modules", + ] + glob(["apps/admin/**/*"]), + output_dir = True, +) + +container_image( + name = "bundle-image", + base = "@bucketeer-web-nginx//image", + data_path = "/ui/web-v2/bundle", + directory = "/var/www", + files = [":bundle"], +) diff --git a/ui/web-v2/Dockerfile b/ui/web-v2/Dockerfile new file mode 100644 index 000000000..cc7f3c8d7 --- /dev/null +++ b/ui/web-v2/Dockerfile @@ -0,0 +1,12 @@ +FROM nginx:1.13.11-alpine + +RUN apk update \ + && apk add ca-certificates \ + && update-ca-certificates \ + && ln -sf /var/nginx/nginx.conf /etc/nginx/nginx.conf + +WORKDIR /etc/nginx + +CMD ["nginx", "-g", "daemon off;"] + +EXPOSE 80 443 diff --git a/ui/web-v2/Makefile b/ui/web-v2/Makefile new file mode 100644 index 000000000..0956a207d --- /dev/null +++ b/ui/web-v2/Makefile @@ -0,0 +1,43 @@ +#################################### +# Yarn +#################################### + +.PHONY: install +install: + yarn install + +.PHONY: build +build: + yarn build + +.PHONY: start +start: + yarn start + +.PHONY: lint +lint: + yarn lint + +#################################### +# Generate proto definition files +#################################### + +ROOT_DIR := $(CURDIR)/../.. +SRC_DIR := $(CURDIR)/apps/admin/src +PROTOBUF_INCLUDE_DIR := $(ROOT_DIR)/proto/external/protocolbuffers/protobuf/v3.18.1 +NPM_BIN_DIR := $(CURDIR)/node_modules/.bin +BIN_DIR := bazel-out/darwin-fastbuild/bin + +.PHONY: gen_proto +gen_proto: clean_proto + protoc \ + --plugin=protoc-gen-ts=$(NPM_BIN_DIR)/protoc-gen-ts \ + --js_out=import_style=commonjs,binary:$(SRC_DIR) \ + --ts_out=service=grpc-web:$(SRC_DIR) \ + -I $(ROOT_DIR) \ + -I $(PROTOBUF_INCLUDE_DIR) \ + $(shell find $(ROOT_DIR)/proto -type f -name "*.proto" -not -path "**/gateway/*.proto" -not -path "**/google/protobuf/*.proto") + +.PHONY: clean_proto +clean_proto: + rm -rf $(SRC_DIR)/proto/**/*.{js,ts} diff --git a/ui/web-v2/README.md b/ui/web-v2/README.md new file mode 100644 index 000000000..8aac316d4 --- /dev/null +++ b/ui/web-v2/README.md @@ -0,0 +1,57 @@ +# Web Console + +This console is built using [Tailwind CSS](https://tailwindcss.com). + +## Installation + +First, ensure that the node and yarn are installed in your local environment, using the same version configured [here](https://github.com/bucketeer-io/bucketeer/blob/master/WORKSPACE). + +### Install dependencies + +```sh +yarn install +``` + +### Configure the TLS certificate + +Place the TLS cert and key files under the following directory. + +- `./apps/admin/certs/tls.crt` +- `./apps/admin/certs/tls.key` + +## Local Development + +### Set the API and Web endpoint + +```sh +export NX_DEV_WEB_API_ENDPOINT=https://example.com +export NX_DEV_AUTH_REDIRECT_ENDPOINT=https://local.example.com +``` + +### Serve locally + +```sh +yarn start +``` + +### Build + +```sh +yarn build +``` + +### Lint codes + +```sh +yarn lint +``` + +### Internationalization + +You need to run the following command when [the message file](https://github.com/bucketeer-io/bucketeer/blob/master/ui/web-v2/apps/admin/src/lang/messages.ts) is modified. + +```sh +yarn translate +``` + +It will generate [en.json](https://github.com/bucketeer-io/bucketeer/blob/master/ui/web-v2/apps/admin/src/assets/lang/en.json). Then, you need manually do the same modifications on the other languages files, including the translation under the same directory. diff --git a/ui/web-v2/apps/.gitkeep b/ui/web-v2/apps/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/ui/web-v2/apps/admin/.babelrc b/ui/web-v2/apps/admin/.babelrc new file mode 100644 index 000000000..d4fb1a5fe --- /dev/null +++ b/ui/web-v2/apps/admin/.babelrc @@ -0,0 +1,4 @@ +{ + "presets": ["@nrwl/react/babel", "@emotion/babel-preset-css-prop"], + "plugins": [] +} diff --git a/ui/web-v2/apps/admin/.browserslistrc b/ui/web-v2/apps/admin/.browserslistrc new file mode 100644 index 000000000..f1d12df4f --- /dev/null +++ b/ui/web-v2/apps/admin/.browserslistrc @@ -0,0 +1,16 @@ +# This file is used by: +# 1. autoprefixer to adjust CSS to support the below specified browsers +# 2. babel preset-env to adjust included polyfills +# +# For additional information regarding the format and rule options, please see: +# https://github.com/browserslist/browserslist#queries +# +# If you need to support different browsers in production, you may tweak the list below. + +last 1 Chrome version +last 1 Firefox version +last 2 Edge major versions +last 2 Safari major version +last 2 iOS major versions +Firefox ESR +not IE 9-11 # For IE 9-11 support, remove 'not'. \ No newline at end of file diff --git a/ui/web-v2/apps/admin/.eslintrc.json b/ui/web-v2/apps/admin/.eslintrc.json new file mode 100644 index 000000000..daf85130e --- /dev/null +++ b/ui/web-v2/apps/admin/.eslintrc.json @@ -0,0 +1,55 @@ +{ + "extends": [ + "plugin:@nrwl/nx/react", + "../../.eslintrc.json" + ], + "ignorePatterns": [ + "!**/*" + ], + "rules": {}, + "overrides": [ + { + "files": [ + "*.ts", + "*.tsx", + "*.js", + "*.jsx" + ], + "rules": { + "react/react-in-jsx-scope": "off" + } + }, + { + "files": [ + "*.ts", + "*.tsx" + ], + "extends": [ + "plugin:@nrwl/nx/typescript", + "plugin:import/warnings", + "plugin:import/typescript" + ], + "rules": { + "eqeqeq": "off", + "no-restricted-globals": "off", + "array-callback-return": "off", + "react/jsx-no-useless-fragment": "off", + "react-hooks/exhaustive-deps": "off", + "@typescript-eslint/no-explicit-any": "off", + "@typescript-eslint/ban-types": "off", + "@typescript-eslint/ban-ts-comment": "off", + "@typescript-eslint/no-empty-interface": "off", + "@typescript-eslint/no-empty-function": "off", + "import/order": [ + "warn", + { + "alphabetize": { + "order": "asc" + }, + "newlines-between": "always" + } + ] + } + } + ] +} diff --git a/ui/web-v2/apps/admin/babel-jest.config.json b/ui/web-v2/apps/admin/babel-jest.config.json new file mode 100644 index 000000000..bf04d5f81 --- /dev/null +++ b/ui/web-v2/apps/admin/babel-jest.config.json @@ -0,0 +1,14 @@ +{ + "presets": [ + [ + "@babel/preset-env", + { + "targets": { + "node": "current" + } + } + ], + "@babel/preset-typescript", + "@babel/preset-react" + ] +} diff --git a/ui/web-v2/apps/admin/certs/.gitkeep b/ui/web-v2/apps/admin/certs/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/ui/web-v2/apps/admin/jest.config.js b/ui/web-v2/apps/admin/jest.config.js new file mode 100644 index 000000000..20a2dc72a --- /dev/null +++ b/ui/web-v2/apps/admin/jest.config.js @@ -0,0 +1,13 @@ +module.exports = { + displayName: 'admin', + preset: '../../jest.preset.js', + transform: { + '^(?!.*\\.(js|jsx|ts|tsx|css|json)$)': '@nrwl/react/plugins/jest', + '^.+\\.[tj]sx?$': [ + 'babel-jest', + { cwd: __dirname, configFile: './babel-jest.config.json' }, + ], + }, + moduleFileExtensions: ['ts', 'tsx', 'js', 'jsx'], + coverageDirectory: '../../coverage/apps/admin', +}; diff --git a/ui/web-v2/apps/admin/src/assets/.gitkeep b/ui/web-v2/apps/admin/src/assets/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/ui/web-v2/apps/admin/src/assets/lang/en.json b/ui/web-v2/apps/admin/src/assets/lang/en.json new file mode 100644 index 000000000..f8e76ae11 --- /dev/null +++ b/ui/web-v2/apps/admin/src/assets/lang/en.json @@ -0,0 +1,455 @@ +{ + "account.add.header.description": "The account is required to access the admin console. The account has three roles: viewer, editor, and owner.", + "account.add.header.title": "Create an account", + "account.confirm.disable.description": "Are you sure you want to disable this account?", + "account.confirm.disable.title": "Disable account", + "account.confirm.enable.description": "Are you sure you want to enable this account?", + "account.confirm.enable.title": "Enable account", + "account.filter.enabled": "Enabled", + "account.filter.role": "Role", + "account.list.header.description": "On this page, you can check all accounts for this environment. Select an account to manage the role settings or click on the Add button to add a new one.", + "account.list.header.title": "Accounts", + "account.list.noData.description": "You can add new team members, disable, or manage access controls for members by setting roles.", + "account.list.noResult.searchKeyword": "email", + "account.role.editor": "Editor", + "account.role.owner": "Owner", + "account.role.viewer": "Viewer", + "account.search.placeholder": "Email", + "account.sort.emailAz": "Email A-Z", + "account.sort.emailZa": "Email Z-A", + "account.sort.newest": "Newest", + "account.sort.oldest": "Oldest", + "account.update.header.description": "The account is required to access the admin console. The account has three roles: viewer, editor, and owner.", + "account.update.header.title": "Update the account", + "adminAccount.add.header.description": "The admin account has access to all projects and environments.", + "adminAccount.add.header.title": "Create an account", + "adminAccount.confirm.disable.description": "Are you sure you want to disable the {accountId} account?", + "adminAccount.confirm.disable.title": "Disable account", + "adminAccount.confirm.enable.description": "Are you sure you want to enable the {accountId} account?", + "adminAccount.confirm.enable.title": "Enable account", + "adminAccount.filter.enabled": "Enabled", + "adminAccount.list.header.description": "On this tab, you can check all admin accounts, disable or add a new one.", + "adminAccount.list.header.title": "Accounts", + "adminAccount.list.noData.description": "You can add new admin accounts.", + "adminAccount.list.noResult.searchKeyword": "Email", + "adminAccount.search.placeholder": "Email", + "adminAccount.sort.emailAz": "Email A-Z", + "adminAccount.sort.emailZa": "Email Z-A", + "adminAccount.sort.newest": "Newest", + "adminAccount.sort.oldest": "Oldest", + "adminAuditLog.list.header.description": "On this tab you can check all admin audit logs.", + "adminEnvironment.add.header.description": "You can manage your feature flag's development lifecycle, from local development through production.", + "adminEnvironment.add.header.title": "Create an environment", + "adminEnvironment.filter.project": "Project", + "adminEnvironment.list.header.description": "On this tab, you can check all environments. Select an environment to update or click on the Add button to add a new one.", + "adminEnvironment.list.header.title": "Environments", + "adminEnvironment.list.noResult.searchKeyword": "ID and description", + "adminEnvironment.search.placeholder": "ID and description", + "adminEnvironment.sort.idAz": "ID A-Z", + "adminEnvironment.sort.idZa": "ID Z-A", + "adminEnvironment.sort.newest": "Newest", + "adminEnvironment.sort.oldest": "Oldest", + "adminEnvironment.update.header.description": "You can manage your feature flag's development lifecycle, from local development through production.", + "adminEnvironment.update.header.title": "Update the environment", + "adminProject.action.convertProject": "Convert to paid version", + "adminProject.add.header.description": "You can manage multiple different user projects by using projects.", + "adminProject.add.header.title": "Create a project", + "adminProject.confirm.convertProject.description": "Are you sure you want to convert the {projectId} project to paid version?", + "adminProject.confirm.convertProject.title": "Convert project", + "adminProject.confirm.disable.description": "Are you sure you want to disable the {projectId} project?", + "adminProject.confirm.disable.title": "Disable project", + "adminProject.confirm.enable.description": "Are you sure you want to enable the {projectId} project?", + "adminProject.confirm.enable.title": "Enable project", + "adminProject.creator": "Creator", + "adminProject.filter.enabled": "Enabled", + "adminProject.list.header.description": "On this page, you can check all projects. Select a project to update or click on the Add button to add a new one.", + "adminProject.list.header.title": "Projects", + "adminProject.list.noResult.searchKeyword": "ID and email", + "adminProject.search.placeholder": "ID and email", + "adminProject.sort.idAz": "ID A-Z", + "adminProject.sort.idZa": "ID Z-A", + "adminProject.sort.newest": "Newest", + "adminProject.sort.oldest": "Oldest", + "adminProject.trialPeriod": "Trial period", + "adminProject.update.header.description": "You can manage multiple different user projects by using projects.", + "adminProject.update.header.title": "Update the project", + "adminSettings.list.header.description": "On this page, you can check all admin settings. Select a tab to manage the settings.", + "adminSettings.list.header.title": "Admin Settings", + "adminSettings.tab.account": "Accounts", + "adminSettings.tab.auditLogs": "Audit Logs", + "adminSettings.tab.environments": "Environments", + "adminSettings.tab.notifications": "Notifications", + "adminSettings.tab.projects": "Projects", + "analysis.clientData": "Client data", + "analysis.conversionRate": "Conversion rate", + "analysis.evaluationTotal": "Evaluation total", + "analysis.evaluationUser": "Evaluation user", + "analysis.goalTotal": "Goal total", + "analysis.goalUser": "Goal user", + "analysis.goalValueMean": "GoalValue mean", + "analysis.goalValueTotal": "GoalValue total", + "analysis.goalValueVariance": "GoalValue variance", + "analysis.header.description": "On this page, you can get insight by querying goal counts by feature flags and user metadata.", + "analysis.header.title": "Analysis", + "analysis.segment": "Segment", + "analysis.variation": "Variation", + "apiKey.add.header.description": "The API key is required for the client SDK to access the server API.", + "apiKey.add.header.title": "Create an API Key", + "apiKey.confirm.disable.description": "Are you sure you want to disable this API Key?", + "apiKey.confirm.disable.title": "Disable API Key", + "apiKey.confirm.enable.description": "Are you sure you want to enable this API Key?", + "apiKey.confirm.enable.title": "Enable API Key", + "apiKey.filter.enabled": "Enabled", + "apiKey.list.header.description": "On this page, you can check all API Keys for this environment. Select an API Key to manage the settings or click on the Add button to add a new one.", + "apiKey.list.header.title": "API Keys", + "apiKey.list.noData.description": "You can add an API Key to allow requests from the client SDK.", + "apiKey.list.noResult.searchKeyword": "name", + "apiKey.search.placeholder": "Name", + "apiKey.update.header.description": "The API key is required for the client SDK to access the server API.", + "apiKey.update.header.title": "Update the API Key", + "auditLog.filter.dates": "Dates", + "auditLog.filter.type": "Type", + "auditLog.list.header.description": "On this page, you can check all audit logs for this environment.", + "auditLog.list.header.title": "Audit Logs", + "auditLog.list.noData.description": "The history will be created when you add or edit something on the Admin Console.", + "auditLog.list.noResult.searchKeyword": "email", + "auditLog.search.placeholder": "Email", + "auditLog.sort.newest": "Newest", + "auditLog.sort.oldest": "Oldest", + "autoOps.clauseType": "Rule type", + "autoOps.datetime.datetime": "Date time", + "autoOps.datetimeClauseType": "Schedule", + "autoOps.disableFeatureType": "Disable feature", + "autoOps.enableFeatureType": "Enable feature", + "autoOps.eventRateClauseType": "Event rate", + "autoOps.operation": "Operation", + "autoOps.operationType": "Operation type", + "autoOps.opsEventRateClause.featureVersion": "Feature version", + "autoOps.opsEventRateClause.goal": "Goal", + "autoOps.opsEventRateClause.minCount": "Minimum count", + "autoOps.rule": "Rule", + "button.add": "Add", + "button.addCondition": "Add condition", + "button.addOperation": "Add operation", + "button.addRule": "Add rule", + "button.addVariation": "Add variation", + "button.archive": "Archive", + "button.cancel": "Cancel", + "button.clearAll": "Clear All", + "button.copyFlags": "Copy flags", + "button.edit": "Edit", + "button.result": "Result", + "button.saveWithComment": "Save with comment", + "button.submit": "Submit", + "copy.copied": "Copied!", + "copy.copyToClipboard": "Copy to clipboard", + "created": "Created", + "description": "Description", + "disabled": "Disabled", + "enabled": "Enabled", + "environment.select.label": "(Project) Environment", + "error": "Error", + "experiment.action.archive": "Archive", + "experiment.add.header.description": "Get started by filling in the information below to create your new experiment.", + "experiment.add.header.title": "Create a experiment", + "experiment.baselineVariation": "Baseline variation", + "experiment.confirm.archive.description": "Are you sure you want to archive the {experimentName} experiment?", + "experiment.confirm.archive.title": "Archive experiment", + "experiment.feature": "Feature flag", + "experiment.filter.archived": "Archived", + "experiment.filter.maintainer": "Maintainer", + "experiment.filter.status": "Status", + "experiment.goalIds": "Goals", + "experiment.list.header.description": "Use this page to see all experiments in this environment. Select an experiment to manage settings and display the results.", + "experiment.list.header.title": "Experiments", + "experiment.list.noData.description": "By using Experiments, you can improve web page loading time, test new features, etc.", + "experiment.list.noResult.searchKeyword": "name and description", + "experiment.maintainer": "Maintainer", + "experiment.period": "Period", + "experiment.result.conversionRate.helpText": "Calculated as (number of unique users who fired the goal event / number of unique users for whom a variation was returned).", + "experiment.result.conversionRate.label": "Conversion rate", + "experiment.result.evaluationUser.helpText": "The number of unique users for which variations have been returned. The number of users actually assigned to the feature flag variation. It does not include offline users or potential new users.", + "experiment.result.evaluationUser.label": "Evaluation user", + "experiment.result.goalUser.helpText": "The number of unique users who fired the goal event. The count will not increase if the same user reaches the goal event multiple times.", + "experiment.result.goalUser.label": "Goal user", + "experiment.result.goals.helpText": "The total number of goal events fired by the client.", + "experiment.result.goals.label": "Goal total", + "experiment.result.improvement.helpText": "A measure of improvement in an indicator related to variation compared to a baseline (also called a control group). It is calculated by comparing the range of values for variation with the range of values for baseline.", + "experiment.result.improvement.label": "Improvement", + "experiment.result.noData.description": "The result is created when the experiment starts.", + "experiment.result.noData.errorMessage": "The data is not ready. Please come back later.", + "experiment.result.noData.experimentResult": "Experiment result", + "experiment.result.probabilityToBeatBaseline.helpText": "Estimated likelihood of exceeding baseline (also known as a control group). A criterion of at least 95% is recommended.", + "experiment.result.probabilityToBeatBaseline.label": "Probability to beat baseline", + "experiment.result.probabilityToBest.helpText": "Possibility of being the best variation. Possibility of being presumed to outperform all other variations. We recommend a criterion of at least 95%.", + "experiment.result.probabilityToBest.label": "Probability to best", + "experiment.result.valuePerUser.helpText": "The total number of values assigned to the goal event per user. It is calculated as (the sum of the numbers assigned to the goal event / the number of unique users who fired the goal event).", + "experiment.result.valuePerUser.label": "Value/User", + "experiment.result.valueSum.helpText": "The total number of values assigned to a goal event. This value is different for each goal.", + "experiment.result.valueSum.label": "Value total", + "experiment.result.variation.label": "Variation", + "experiment.search.placeholder": "Name, Description", + "experiment.startAt": "Start at", + "experiment.status.forceStopped": "Stopped", + "experiment.status.running": "Running", + "experiment.status.status": "Status", + "experiment.status.stopped": "Finished", + "experiment.status.waiting": "Waiting", + "experiment.stop.dialog.description": "Do you really stop an experiment?", + "experiment.stop.dialog.title": "Confirm", + "experiment.stop.stopExperiment": "Stop experiment", + "experiment.stopAt": "Stop at", + "experiment.update.header.description": "Fill in the information below to update your experiment.", + "experiment.update.header.title": "Update a experiment", + "feature.action.archive": "Archive", + "feature.action.clone": "Clone", + "feature.action.unarchive": "Unarchive", + "feature.add.header.description": "Get started by filling in the information below to create your new feature flag.", + "feature.add.header.title": "Create a feature flag", + "feature.clause.operator.after": "after", + "feature.clause.operator.before": "before", + "feature.clause.operator.endWith": "ends with", + "feature.clause.operator.equal": "=", + "feature.clause.operator.greater": ">", + "feature.clause.operator.greaterOrEqual": ">=", + "feature.clause.operator.in": "contains", + "feature.clause.operator.less": "<", + "feature.clause.operator.lessOrEqual": "<=", + "feature.clause.operator.segment": "is included in", + "feature.clause.operator.startWith": "starts with", + "feature.clause.type.compare": "Compare", + "feature.clause.type.date": "Date", + "feature.clause.type.segment": "User segment", + "feature.clone.header.description": "It will copy the full targeting configuration, including on/off variation from the original flag to the new flag.", + "feature.clone.header.title": "Clone feature flag", + "feature.confirm.archiveDescription": "This will archive and return the default value defined in your code for all users. We recommend removing the code references to \"{featureId}\" from your application before archiving.", + "feature.confirm.archiveTitle": "Archive Feature Flag", + "feature.confirm.description": "This will make changes to the flag and increment the version.", + "feature.confirm.title": "Confirmation required", + "feature.confirm.unarchiveDescription": "Are you sure you want to unarchive the feature flag \"{featureId}\"?", + "feature.confirm.unarchiveTitle": "Unarchive Feature Flag", + "feature.defaultStrategy": "Default strategy", + "feature.filter.archived": "Archived", + "feature.filter.enabled": "Enabled", + "feature.filter.hasExperiment": "Has experiment", + "feature.filter.maintainer": "Maintainer", + "feature.flagStatus.inactive": "Inactive", + "feature.flagStatus.new": "New", + "feature.flagStatus.receivingRequests": "Receiving requests", + "feature.id": "ID", + "feature.list.active": "Active", + "feature.list.archive": "Archive", + "feature.list.header.description": "Use this page to see all feature flags in this project. Select a flag to manage the environment-specific targeting and rollout rules.", + "feature.list.header.title": "Feature Flags", + "feature.list.noData.description": "Create feature flags to manage who sees your features.", + "feature.list.noResult.searchKeyword": "ID, name and description", + "feature.offVariation": "off variation", + "feature.onVariation": "on variation", + "feature.resetRandomSampling": "Reset random sampling", + "feature.rule": "Rule rollout percentage", + "feature.search.placeholder": "ID, Name, Description", + "feature.sort.nameAz": "Name A-Z", + "feature.sort.nameZa": "Name Z-A", + "feature.sort.newest": "Newest", + "feature.sort.oldest": "Oldest", + "feature.status": "status", + "feature.strategy.selectRolloutPercentage": "Select rollout percentage", + "feature.tab.autoOps": "Auto Ops Rules", + "feature.tab.evaluation": "Evaluation", + "feature.tab.experiments": "Experiments", + "feature.tab.history": "History", + "feature.tab.settings": "Settings", + "feature.tab.targeting": "Targeting", + "feature.tab.variations": "Variations", + "feature.targetingDescription": "Enable targeting settings. You can configure targeting users, complex rules, default strategy, and off variation.", + "feature.targetings": "Targetings", + "feature.type.boolean": "boolean", + "feature.type.json": "json", + "feature.type.number": "number", + "feature.type.string": "string", + "feature.updateComment": "Comment for update", + "feature.variation": "variation", + "feature.variationType": "Flag type", + "filter.add": "Add filter", + "filter.filter": "Filter", + "goal.action.archive": "Archive", + "goal.add.header.description": "The goal lets you measure user behaviors affected by your feature flags in experiments.", + "goal.add.header.title": "Create a goal", + "goal.confirm.archive.description": "We recommend removing the code references to \"{goalId}\" from your application before archiving.", + "goal.confirm.archive.title": "Archive goal", + "goal.filter.archived": "Archived", + "goal.filter.status": "Status", + "goal.list.header.description": "Use this page to see all goals in this environment. Select a goal to manage settings.", + "goal.list.header.title": "Goals", + "goal.list.noData.description": "Goals are the metrics used to measure the effectiveness of a Feature Flag.", + "goal.list.noResult.searchKeyword": "name and description", + "goal.status.inUse": "in use", + "goal.status.notInUse": "not in use", + "goal.status.status": "Status", + "goal.update.header.description": "The goal lets you measure user behaviors affected by your feature flags in experiments.", + "goal.update.header.title": "Update the goal", + "id": "ID", + "input.destinationEnvironment": "Destination environment", + "input.email": "Email", + "input.error.invalidEmailAddress": "Invalid email address.", + "input.error.invalidEmailDomain": "Invalid email domain.", + "input.error.invalidId": "Invalid ID. ID must only contain lowercase letters, numbers or '-', and must start with an alphanumeric.", + "input.error.maxLength": "The maximum length for this field is {max} characters.", + "input.error.minSelectOptionLength": "Must select at least one option.", + "input.error.mustBeUnique": "This must be unique.", + "input.error.not100Percentage": "Total should be 100%.", + "input.error.notJson": "Invalid JSON.", + "input.error.notLaterThanCurrentTime": "This must be later than the current time.", + "input.error.notLaterThanOrEqualDays": "This must be later than or equal to {days} days ago.", + "input.error.notLaterThanStartAt": "This must be later than the start at.", + "input.error.notLessThanOrEquals30Days": "The period must be less than or equals to 30 days.", + "input.error.notNumber": "This must be a number.", + "input.error.required": "This is required.", + "input.featureFlag": "Feature Flag", + "input.optional": "(optional)", + "input.originEnvironment": "Origin environment", + "input.projectId": "Project ID", + "input.role": "Role", + "maintainer": "Maintainer", + "name": "Name", + "no": "No", + "noData.title": "There are no {title} yet.", + "noResult.changeFilterSelection": "Change your filter selection", + "noResult.checkTypos": "Check for typos", + "noResult.searchByKeyword": "Search by {keyword}", + "noResult.title": "No {title} match. You can try this:", + "notification.add.header.description": "A notification lets you know when someone adds or updates something on the admin console and operational tasks status.", + "notification.add.header.title": "Create a notification", + "notification.confirm.delete.description": "The {notificationName} notification will be deleted permanently.", + "notification.confirm.delete.title": "Delete notification", + "notification.confirm.disable.description": "Are you sure you want to disable the {notificationName} notification?", + "notification.confirm.disable.title": "Disable notification", + "notification.confirm.enable.description": "Are you sure you want to enable the {notificationName} notification?", + "notification.confirm.enable.title": "Enable notification", + "notification.filter.enabled": "Enabled", + "notification.filterOptions.disabled": "Disabled", + "notification.filterOptions.enabled": "Enabled", + "notification.list.header.description": "Select a notification to manage the settings or click on the Add button to add a new one.", + "notification.list.header.title": "Notification", + "notification.list.noData.description": "You can receive notifications when operations such as additions and changes are made on the admin console, or the status of operational tasks.", + "notification.list.noResult.searchKeyword": "Name", + "notification.search.placeholder": "Name", + "notification.slackIncomingWebhookUrl": "Slack incoming webhook URL", + "notification.sort.nameAz": "Name A-Z", + "notification.sort.nameZa": "Name Z-A", + "notification.sort.newest": "Newest", + "notification.sort.oldest": "Oldest", + "notification.update.header.description": "A notification lets you know when someone adds or updates something on the admin console and operational tasks status.", + "notification.update.header.title": "Update the notification", + "push.add.header.description": "By using the push feature, the SDK can be updated in real-time. Every time a feature flag is updated, a notification is sent to the client.", + "push.add.header.title": "Create a push", + "push.confirm.delete.description": "The {pushName} push will be deleted permanently.", + "push.confirm.delete.title": "Delete push", + "push.input.fcmApiKey": "Firebase Cloud Messaging API Key", + "push.list.header.description": "Select a push to manage the settings or click on the Add button to add a new one.", + "push.list.header.title": "Push", + "push.list.noData.description": "You can create a push to update the SDK client in real-time. Every time a feature flag is updated, a notification is sent to the client.", + "push.list.noResult.searchKeyword": "Name", + "push.search.placeholder": "Name", + "push.sort.nameAz": "Name A-Z", + "push.sort.nameZa": "Name Z-A", + "push.sort.newest": "Newest", + "push.sort.oldest": "Oldest", + "push.update.header.description": "By using the push feature, the SDK can be updated in real-time. Every time a feature flag is updated, a notification is sent to the client.", + "push.update.header.title": "Update the push", + "readMore": "Read more", + "reason.client": "Client", + "reason.offVariation": "Off variation", + "reason.reason": "Evaluation reason", + "reason.rule": "Rule", + "reason.target": "Target", + "segment.action.delete": "Delete segment", + "segment.action.download": "Download user list", + "segment.add.header.description": "User segment allows you to manage all user targets for a single feature flag variation. You can use it to make changes to a large number of users or to test beta features on a small number of users.", + "segment.add.header.title": "Create a segment", + "segment.confirm.delete.cannotDelete": "The 「{segmentName}」 segment is being used, and you cannot delete it.", + "segment.confirm.delete.description": "The 「{segmentName}」 segment will be deleted permanently.", + "segment.confirm.delete.title": "Delete segment", + "segment.fileUpload.browseFiles": "Browse files", + "segment.fileUpload.fileFormat": "Accepted file type: .csv and .txt (Max size: 2MB)", + "segment.fileUpload.fileMaxSize": "The maximum size of the file is 1MB", + "segment.fileUpload.fileSize": "{fileSize} bytes", + "segment.fileUpload.segmentInUse": "This segment is in use and should remove from the feature flag before updating it", + "segment.fileUpload.unsupportedType": "The file format is not supported", + "segment.fileUpload.uploadInProgress": "The file cannot be updated due to upload in progress", + "segment.fileUpload.userList": "List of user IDs", + "segment.filter.status": "Status of use", + "segment.filterOptions.inUse": "In use", + "segment.filterOptions.notInUse": "Not in use", + "segment.list.header.description": "On this page, you can check all segments for this environment. Select a segment to manage the settings or click on the Add button to add a new one.", + "segment.list.header.title": "Segments", + "segment.list.noData.description": "You can create a user segment to manage all user targets for a single feature flag variation.", + "segment.list.noResult.searchKeyword": "Name and description", + "segment.search.placeholder": "Name and Description", + "segment.select.noData.description": "Please add user segments on the user segment list page.", + "segment.sort.nameAz": "Name A-Z", + "segment.sort.nameZa": "Name Z-A", + "segment.sort.newest": "Newest", + "segment.sort.oldest": "Oldest", + "segment.status.uploadFailed": "UPLOAD FAILED", + "segment.status.uploading": "UPLOADING", + "segment.update.header.description": "User segment allows you to manage all user targets for a single feature flag variation. You can use it to make changes to a large number of users or to test beta features on a small number of users.", + "segment.update.header.title": "Update the segment", + "segment.userCount": "users", + "settings.list.header.description": "On this page, you can check all settings for this environment. Select a tab to manage the settings.", + "settings.list.header.title": "Settings", + "settings.tab.notifications": "Notifications", + "settings.tab.pushes": "Pushes", + "sideMenu.accounts": "Accounts", + "sideMenu.adminSettings": "Admin Settings", + "sideMenu.analysis": "Analysis", + "sideMenu.apiKeys": "API Keys", + "sideMenu.auditLog": "Audit Logs", + "sideMenu.documentation": "Documentation", + "sideMenu.experiments": "Experiments", + "sideMenu.featureFlags": "Feature Flags", + "sideMenu.goals": "Goals", + "sideMenu.logout": "Logout", + "sideMenu.settings": "Settings", + "sideMenu.user": "Users", + "sideMenu.userSegments": "User Segments", + "sort": "Sort", + "sourceType.account": "Account", + "sourceType.accountDescription": "Get notified when someone adds or updates an account", + "sourceType.adminAccount": "Account", + "sourceType.adminAccountDescription": "Get notified when someone adds or updates an account", + "sourceType.adminNotification": "Notification", + "sourceType.adminNotificationDescription": "Get notified when someone adds or updates a notification", + "sourceType.apiKey": "API Key", + "sourceType.apiKeyDescription": "Get notified when someone adds or updates an API Key", + "sourceType.autoOps": "Auto-Ops", + "sourceType.autoOpsDescription": "Get notified when the Auto-Ops is triggered", + "sourceType.environment": "Environment", + "sourceType.environmentDescription": "Get notified when someone adds or updates an environment", + "sourceType.experiment": "Experiment", + "sourceType.experimentDescription": "Get notified when someone adds or updates an experiment", + "sourceType.featureFlag": "Feature Flag", + "sourceType.featureFlagDescription": "Get notified when someone adds or updates a feature flag", + "sourceType.goal": "Goal", + "sourceType.goalDescription": "Get notified when someone adds or updates a goal", + "sourceType.mauCount": "Monthly Active Users count", + "sourceType.mauCountDescription": "Get notified the monthly active users count on the first day of every month", + "sourceType.notification": "Notification", + "sourceType.notificationDescription": "Get notified when someone adds or updates a notification", + "sourceType.project": "Project", + "sourceType.projectDescription": "Get notified when someone adds or updates a project", + "sourceType.push": "Push", + "sourceType.pushDescription": "Get notified when someone adds or updates a push", + "sourceType.runningExperiments": "Running Experiments", + "sourceType.runningExperimentsDescription": "Get notified daily of the list of running experiments", + "sourceType.segment": "User Segment", + "sourceType.segmentDescription": "Get notified when someone adds or updates a user segment", + "sourceType.staleFeatureFlag": "Stale feature flag", + "sourceType.staleFeatureFlagDescription": "Get notified when a feature flag becomes stale", + "success": "Success", + "tags": "Tags", + "total": "Total", + "type": "Type", + "warning": "Warning", + "yes": "Yes" +} diff --git a/ui/web-v2/apps/admin/src/assets/lang/ja.json b/ui/web-v2/apps/admin/src/assets/lang/ja.json new file mode 100644 index 000000000..82036fac8 --- /dev/null +++ b/ui/web-v2/apps/admin/src/assets/lang/ja.json @@ -0,0 +1,456 @@ +{ + "account.add.header.description": "管理コンソールにアクセスするには、アカウントが必要です。 アカウントは、閲覧者、編集者、オーナーの3つロールがあります。", + "account.add.header.title": "アカウントの作成", + "account.confirm.disable.description": "を無効にしますか?", + "account.confirm.disable.title": "アカウントの無効", + "account.confirm.enable.description": "を有効にしますか?", + "account.confirm.enable.title": "アカウントの有効", + "account.filter.enabled": "有効 / 無効", + "account.filter.role": "権限", + "account.list.header.description": "このページでは、環境にあるすべてのアカウントを確認できます。アカウントを選択すると権限の変更や新規作成ができます。", + "account.list.header.title": "アカウント", + "account.list.noData.description": "新しいチームメンバーの追加、アクセス制御の無効化及び管理ができます。", + "account.list.noResult.searchKeyword": "メール", + "account.role.editor": "編集者", + "account.role.owner": "オーナー", + "account.role.viewer": "閲覧者", + "account.search.placeholder": "メール", + "account.sort.emailAz": "メール A-Z", + "account.sort.emailZa": "メール Z-A", + "account.sort.newest": "新しい順", + "account.sort.oldest": "古い順", + "account.update.header.description": "管理コンソールにアクセスするには、アカウントが必要です。 アカウントは、閲覧者、編集者、オーナーの3つロールがあります。", + "account.update.header.title": "アカウントの更新", + "adminAccount.add.header.description": "管理者アカウントは、すべてのプロジェクトと環境にアクセスできます。", + "adminAccount.add.header.title": "アカウントの作成", + "adminAccount.confirm.disable.description": "「{accountId}」を無効にしますか?", + "adminAccount.confirm.disable.title": "アカウントの無効", + "adminAccount.confirm.enable.description": "「{accountId}」を有効にしますか?", + "adminAccount.confirm.enable.title": "アカウントの有効", + "adminAccount.filter.enabled": "有効 / 無効", + "adminAccount.list.header.description": "このタブでは、すべての管理者アカウントを確認できます。必要に応じて無効や新規作成ができます。", + "adminAccount.list.header.title": "アカウント", + "adminAccount.list.noData.description": "新しい管理者アカウントを追加できます。", + "adminAccount.list.noResult.searchKeyword": "メール", + "adminAccount.search.placeholder": "メール", + "adminAccount.sort.emailAz": "メール A-Z", + "adminAccount.sort.emailZa": "メール Z-A", + "adminAccount.sort.newest": "新しい順", + "adminAccount.sort.oldest": "古い順", + "adminAuditLog.list.header.description": "このタブでは、管理者監査ログを確認できます。", + "adminEnvironment.add.header.description": "ローカル開発から本番まで、フィーチャーフラグの開発ライフサイクルを管理できます。", + "adminEnvironment.add.header.title": "環境の作成", + "adminEnvironment.filter.project": "プロジェクト", + "adminEnvironment.list.header.description": "このタブでは、すべての環境を確認できます。環境を選択すると変更や新規作成ができます。", + "adminEnvironment.list.header.title": "環境", + "adminEnvironment.list.noResult.searchKeyword": "ID、説明", + "adminEnvironment.search.placeholder": "ID、説明", + "adminEnvironment.sort.idAz": "ID A-Z", + "adminEnvironment.sort.idZa": "ID Z-A", + "adminEnvironment.sort.newest": "新しい順", + "adminEnvironment.sort.oldest": "古い順", + "adminEnvironment.update.header.description": "ローカル開発から本番まで、フィーチャーフラグの開発ライフサイクルを管理できます。", + "adminEnvironment.update.header.title": "環境の更新", + "adminProject.action.convertProject": "有償版に変換", + "adminProject.add.header.description": "プロジェクトを使用することによって、複数の異なるユーザープロジェクトを管理できます。", + "adminProject.add.header.title": "プロジェクトの作成", + "adminProject.confirm.convertProject.description": "「{projectId}」を有償版に変換しますか?", + "adminProject.confirm.convertProject.title": "プロジェクトの変換", + "adminProject.confirm.disable.description": "「{projectId}」を無効にしますか?", + "adminProject.confirm.disable.title": "プロジェクトの無効", + "adminProject.confirm.enable.description": "「{projectId}」を有効にしますか?", + "adminProject.confirm.enable.title": "プロジェクトの有効", + "adminProject.creator": "作成者", + "adminProject.filter.enabled": "有効 / 無効", + "adminProject.list.header.description": "このタブでは、すべてのプロジェクトを確認できます。プロジェクトを選択すると変更や新規作成ができます。", + "adminProject.list.header.title": "プロジェクト", + "adminProject.list.noResult.searchKeyword": "ID、メール", + "adminProject.search.placeholder": "ID、メール", + "adminProject.sort.idAz": "ID A-Z", + "adminProject.sort.idZa": "ID Z-A", + "adminProject.sort.newest": "新しい順", + "adminProject.sort.oldest": "古い順", + "adminProject.trialPeriod": "トライアル期間", + "adminProject.update.header.description": "プロジェクトを使用することによって、複数の異なるユーザープロジェクトを管理できます。", + "adminProject.update.header.title": "プロジェクトの更新", + "adminSettings.list.header.description": "このページでは、管理者の設定を確認できます。タブを選択すると設定の管理ができます。", + "adminSettings.list.header.title": "管理者設定", + "adminSettings.tab.account": "アカウント", + "adminSettings.tab.auditLogs": "監査ログ", + "adminSettings.tab.environments": "環境", + "adminSettings.tab.notifications": "通知", + "adminSettings.tab.projects": "プロジェクト", + "analysis.clientData": "クライアントデータ", + "analysis.conversionRate": "コンバージョン率", + "analysis.evaluationTotal": "エバリュエーション数", + "analysis.evaluationUser": "エバリュエーションユーザー", + "analysis.goalTotal": "ゴール数", + "analysis.goalUser": "ゴールユーザー", + "analysis.goalValueMean": "付与された値の平均値", + "analysis.goalValueTotal": "付与された値の総和", + "analysis.goalValueVariance": "付与された値の分散", + "analysis.header.description": "このページではフィーチャーフラグやユーザーデータといった条件ごとにゴール数を取得することで、インサイトを得ることができます。", + "analysis.header.title": "分析", + "analysis.segment": "セグメント", + "analysis.variation": "バリエーション", + "apiKey.add.header.description": "APIキーはクライアントSDKがサーバーAPIにアクセスするために必要です。", + "apiKey.add.header.title": "APIキーの作成", + "apiKey.confirm.disable.description": "を無効にしますか?", + "apiKey.confirm.disable.title": "APIキーの無効", + "apiKey.confirm.enable.description": "を有効にしますか?", + "apiKey.confirm.enable.title": "APIキーの有効", + "apiKey.filter.enabled": "有効 / 無効", + "apiKey.list.header.description": "このページでは、この環境にあるすべてのAPIキーを確認できます。", + "apiKey.list.header.title": "APIキー", + "apiKey.list.noData.description": "APIキーを作成して、クライアントSDKからのリクエストを許可できます。", + "apiKey.list.noResult.searchKeyword": "名前", + "apiKey.search.placeholder": "名前", + "apiKey.update.header.description": "APIキーはクライアントSDKがサーバーAPIにアクセスするために必要です。", + "apiKey.update.header.title": "APIキーの更新", + "auditLog.filter.dates": "表示期間", + "auditLog.filter.type": "タイプ", + "auditLog.list.header.description": "このページでは、環境にあるすべての監視ログを確認できます。", + "auditLog.list.header.title": "監視ログ", + "auditLog.list.noData.description": "管理コンソールで追加・変更などを行うと履歴が作成されます。", + "auditLog.list.noResult.searchKeyword": "メール", + "auditLog.search.placeholder": "メール", + "auditLog.sort.newest": "新しい順", + "auditLog.sort.oldest": "古い順", + "autoOps.clauseType": "ルールタイプ", + "autoOps.datetime.datetime": "日時", + "autoOps.datetimeClauseType": "スケジュール", + "autoOps.disableFeatureType": "フィーチャーフラグ無効化", + "autoOps.enableFeatureType": "フィーチャーフラグ有効化", + "autoOps.eventRateClauseType": "イベントレート", + "autoOps.operation": "オペレーション", + "autoOps.operationType": "オペレーションタイプ", + "autoOps.opsEventRateClause.featureVersion": "フィーチャーフラグバージョン", + "autoOps.opsEventRateClause.goal": "ゴール", + "autoOps.opsEventRateClause.minCount": "最低カウント", + "autoOps.rule": "ルール", + "button.add": "追加", + "button.addCondition": "条件を追加", + "button.addOperation": "オペレーションを追加", + "button.addRule": "ルールを追加", + "button.addVariation": "バリエーションを追加", + "button.archive": "フラグのアーカイブ", + "button.cancel": "キャンセル", + "button.clearAll": "すべてクリア", + "button.copyFlags": "フラグのコピー", + "button.edit": "編集", + "button.result": "結果", + "button.saveWithComment": "コメントを付けて保存", + "button.submit": "送信", + "copy.copied": "コピーしました", + "copy.copyToClipboard": "クリップボードにコピー", + "created": "作成日", + "description": "説明", + "disabled": "無効", + "enabled": "有効", + "environment.select.label": "(プロジェクト) 環境", + "error": "エラー", + "experiment.action.archive": "アーカイブ", + "experiment.add.header.description": "以下の情報を入力して、新しいエクスペリメントを始めましょう。", + "experiment.add.header.title": "エクスペリメントの作成", + "experiment.baselineVariation": "ベースラインバリエーション", + "experiment.confirm.archive.description": "「{experimentName}」をアーカイブしますか?", + "experiment.confirm.archive.title": "エクスペリメントのアーカイブ", + "experiment.feature": "フィーチャーフラグ", + "experiment.filter.archived": "アーカイブ", + "experiment.filter.maintainer": "管理者", + "experiment.filter.status": "ステータス", + "experiment.goalIds": "ゴール", + "experiment.list.header.description": "このページでは、この環境におけるすべてのエクスペリメントを見ることができます。エクスペリメントを選択すると、設定の管理や結果の表示ができます。", + "experiment.list.header.title": "エクスペリメント", + "experiment.list.noData.description": "エクスペリメントを使用することによって、Webページの読み込み時間の改善、新規機能などのテストができます。", + "experiment.list.noResult.searchKeyword": "名前、説明", + "experiment.maintainer": "管理者", + "experiment.period": "実施期間", + "experiment.result.conversionRate.helpText": "コンバージョン率。(ゴールイベントを発火させたユニークユーザー数 / バリエーションが返されたユニークユーザー数)で計算されます。", + "experiment.result.conversionRate.label": "コンバージョン率", + "experiment.result.evaluationUser.helpText": "バリエーションが返されたユニークユーザー数。実際にフィーチャーフラグのバリエーションに割り当てられたユーザーの数。オフラインユーザーやこれから新規で割り当てられる潜在的なユーザーは含まれません。", + "experiment.result.evaluationUser.label": "ターゲットユーザー", + "experiment.result.goals.helpText": "クライアントによりゴールイベントが発火された総数", + "experiment.result.goals.label": "ゴール", + "experiment.result.goalUser.helpText": "ゴールイベントを発火させたユニークユーザー数。同じユーザーが複数回ゴールイベントに到達していても、カウントは増加しません。", + "experiment.result.goalUser.label": "ゴールユーザー", + "experiment.result.improvement.helpText": "ベースライン(コントロール グループとも呼ばれます)と比較した場合の、バリエーションに関する指標の改善の測定結果。バリエーションの値の範囲をベースラインの値の範囲と比較することで計算されます。", + "experiment.result.improvement.label": "改善", + "experiment.result.noData.description": "結果はエクスペリメントが開始すると作成されます。", + "experiment.result.noData.errorMessage": "まだデータが作成されていません。もう少したってからお試しください。", + "experiment.result.noData.experimentResult": "エクスペリメント結果", + "experiment.result.probabilityToBeatBaseline.helpText": "ベースライン(コントロール グループとも呼ばれます)を上回ることが推定される可能性。判断基準は95%以上を推奨しています。", + "experiment.result.probabilityToBeatBaseline.label": "ベースラインを上回る可能性", + "experiment.result.probabilityToBest.helpText": "他のすべてのバリエーションを上回ることが推定される可能性。判断基準は95%以上を推奨しています。", + "experiment.result.probabilityToBest.label": "最良のバリエーションである可能性", + "experiment.result.valuePerUser.helpText": "1ユーザーあたりのゴールイベントに付与された数値の合算値。(ゴールイベントに付与された数値の合算値 / ゴールイベントを発火させたユニークユーザー数)で計算されます。", + "experiment.result.valuePerUser.label": "付与された値/ユーザー", + "experiment.result.valueSum.helpText": "ゴールイベントに付与された数値の合算値。この数値はゴールごとに異なります。", + "experiment.result.valueSum.label": "付与された値", + "experiment.result.variation.label": "バリエーション", + "experiment.search.placeholder": "名前、 説明", + "experiment.startAt": "開始日時", + "experiment.status.forceStopped": "停止", + "experiment.status.running": "実施中", + "experiment.status.status": "ステータス", + "experiment.status.stopped": "終了", + "experiment.status.waiting": "実施待ち", + "experiment.stop.dialog.description": "本当にエクスペリメントを停止しますか?", + "experiment.stop.dialog.title": "確認", + "experiment.stop.stopExperiment": "エクスペリメントを停止する", + "experiment.stopAt": "終了日時", + "experiment.update.header.description": "以下の情報を入力して、エクスペリメントを更新してください。", + "experiment.update.header.title": "エクスペリメントの更新", + "feature.action.archive": "アーカイブ", + "feature.action.clone": "クローン", + "feature.action.unarchive": "アーカイブを解除", + "feature.add.header.description": "以下の情報を入力して、新しいフィーチャーフラグを作成してください。", + "feature.add.header.title": "フィーチャーフラグの追加", + "feature.clause.operator.after": "次の値より前", + "feature.clause.operator.before": "次の値より後", + "feature.clause.operator.endWith": "次の値で終わる", + "feature.clause.operator.equal": "=", + "feature.clause.operator.greater": ">", + "feature.clause.operator.greaterOrEqual": ">=", + "feature.clause.operator.in": "次の値を含む", + "feature.clause.operator.less": "<", + "feature.clause.operator.lessOrEqual": "<=", + "feature.clause.operator.segment": "次のセグメントに含まれる", + "feature.clause.operator.startWith": "次の値で始まる", + "feature.clause.type.compare": "比較", + "feature.clause.type.date": "日付", + "feature.clause.type.segment": "ユーザーセグメント", + "feature.clone.header.description": "元のフラグから新しいフラグへのオン/オフのバリエーションを含む、すべてのターゲティング設定をコピーします。", + "feature.clone.header.title": "フィーチャーフラグのクローン", + "feature.confirm.archiveDescription": "アーカイブされるとすべてのユーザーに対して、コードで定義されたデフォルト値が返されます。アーカイブする前に、アプリケーションから「{featureId}」を使用した部分のコードを削除することをお勧めします。", + "feature.confirm.archiveTitle": "フィーチャーフラグのアーカイブ", + "feature.confirm.description": "フィーチャーフラグを更新するため、バージョンが上がります。", + "feature.confirm.title": "確認してください", + "feature.confirm.unarchiveDescription": "「{featureId}」をアーカイブから解除しますか?", + "feature.confirm.unarchiveTitle": "アーカイブの解除", + "feature.defaultStrategy": "デフォルトストラテジー", + "feature.filter.archived": "アーカイブ", + "feature.filter.enabled": "有効 / 無効", + "feature.filter.hasExperiment": "エクスペリメントがある", + "feature.filter.maintainer": "管理者", + "feature.filter": "フィルター", + "feature.flagStatus.inactive": "休止", + "feature.flagStatus.new": "新規作成", + "feature.flagStatus.receivingRequests": "アクティブ", + "feature.id": "ID", + "feature.list.active": "アクティブ", + "feature.list.archive": "アーカイブ", + "feature.list.header.description": "このページでは、このプロジェクトのすべてのフィーチャーフラグを確認できます。フラグを選択して、ターゲティングとロールアウトのルールを管理します。", + "feature.list.header.title": "フィーチャーフラグ", + "feature.list.noData.description": "フィーチャーフラグを作成して、機能を表示するユーザーを管理できます。", + "feature.list.noResult.searchKeyword": "ID、名前、説明", + "feature.offVariation": "オフバリエーション", + "feature.onVariation": "オンバリエーション", + "feature.resetRandomSampling": "ランダムサンプリングをリセットする", + "feature.rule": "ルール", + "feature.search.placeholder": "ID、 名前、 説明", + "feature.sort.nameAz": "名前 A-Z", + "feature.sort.nameZa": "名前 Z-A", + "feature.sort.newest": "新しい順", + "feature.sort.oldest": "古い順", + "feature.status": "ステータス", + "feature.strategy.selectRolloutPercentage": "割合で選択", + "feature.tab.autoOps": "自動オペレーション", + "feature.tab.evaluation": "エバリュエーション", + "feature.tab.experiments": "エクスペリメント", + "feature.tab.history": "履歴", + "feature.tab.settings": "設定", + "feature.tab.targeting": "ターゲティング", + "feature.tab.variations": "バリエーション", + "feature.targetingDescription": "ターゲティングを設定します。 ユーザーターゲティング、複雑なルール設定、デフォルトストラテジー、オフバリエーションを設定できます。", + "feature.targetings": "ターゲティング・ユーザー", + "feature.type.boolean": "ブーリアン", + "feature.type.json": "JSON", + "feature.type.number": "数値", + "feature.type.string": "文字列", + "feature.updateComment": "変更コメント", + "feature.variation": "バリエーション", + "feature.variationType": "フラグの型", + "filter.add": "フィルターを追加", + "filter.filter": "フィルター", + "goal.action.archive": "アーカイブ", + "goal.add.header.description": "ゴールを使用することで、エクスペリメントでフィーチャーフラグの影響受けるユーザーの行動を計測できます。", + "goal.add.header.title": "ゴールの作成", + "goal.confirm.archive.description": "アーカイブする前に、アプリケーションから「{goalId}」を使用した部分のコードを削除することをお勧めします。", + "goal.confirm.archive.title": "ゴールのアーカイブ", + "goal.filter.archived": "アーカイブ", + "goal.filter.status": "ステータス", + "goal.list.header.description": "このページでは、この環境にあるすべてのゴールを確認できます。設定を管理するゴールを選択します。", + "goal.list.header.title": "ゴール", + "goal.list.noData.description": "ゴールは、フィーチャーフラグの効果を測るための指標です。", + "goal.list.noResult.searchKeyword": "名前、説明", + "goal.status.inUse": "使用", + "goal.status.notInUse": "未使用", + "goal.status.status": "ステータス", + "goal.update.header.description": "ゴールを使用することで、エクスペリメントでフィーチャーフラグの影響受けるユーザーの行動を計測できます。", + "goal.update.header.title": "ゴールの更新", + "id": "ID", + "input.destinationEnvironment": "環境先", + "input.email": "メール", + "input.error.invalidEmailAddress": "無効なメールアドレスです。", + "input.error.invalidEmailDomain": "無効なドメインのメールアドレスです。", + "input.error.invalidId": "無効なIDです。 IDには、小文字、数字、または「-」のみを含める必要があり、英数字で始める必要があります。", + "input.error.maxLength": "{max}文字以内で入力してください。", + "input.error.minSelectOptionLength": "少なくとも1つのオプションを選択する必要があります。", + "input.error.mustBeUnique": "同じ値は入力できません。", + "input.error.not100Percentage": "合計は100%である必要があります。", + "input.error.notJson": "不正なJSONです。", + "input.error.notLaterThanOrEqualDays": "{days}日前以降を指定してください。", + "input.error.notLaterThanCurrentTime": "現在時刻より後を指定してください。", + "input.error.notLaterThanStartAt": "開始日時より後を指定してください。", + "input.error.notLessThanOrEquals30Days": "期間は30日以内を指定してください。", + "input.error.notNumber": "数値を入力してください。", + "input.error.required": "必須項目です。", + "input.featureFlag": "フィーチャーフラグ", + "input.optional": "(任意)", + "input.originEnvironment": "環境元", + "input.projectId": "プロジェクトID", + "input.role": "権限", + "maintainer": "フラグ管理者", + "name": "名前", + "no": "いいえ", + "noData.title": "まだ{title}がありません。", + "noResult.changeFilterSelection": "フィルター選択を変更する", + "noResult.checkTypos": "誤字脱字をチェックする", + "noResult.searchByKeyword": "{keyword}で検索する", + "noResult.title": "{title}が一致しません。以下を試してください。", + "notification.add.header.description": "通知機能を使用することによって、管理コンソールでの追加・更新をした時や、運用タスクの状況を確認することができます。", + "notification.add.header.title": "通知の作成", + "notification.confirm.delete.description": "「{notificationName}」が永久に削除されます。", + "notification.confirm.delete.title": "通知の削除", + "notification.confirm.disable.description": "「{notificationName}」を無効にしますか?", + "notification.confirm.disable.title": "通知の無効", + "notification.confirm.enable.description": "「{notificationName}」を有効にしますか?", + "notification.confirm.enable.title": "通知の有効", + "notification.filter.enabled": "有効 / 無効", + "notification.filterOptions.disabled": "無効", + "notification.filterOptions.enabled": "有効", + "notification.list.header.description": "通知を選択すると、設定の管理や新規作成ができます。", + "notification.list.header.title": "通知", + "notification.list.noData.description": "管理コンソールで追加や変更などの操作が行われた時、もしくは運用タスクの状況を通知で受け取ることができます。", + "notification.list.noResult.searchKeyword": "名前", + "notification.search.placeholder": "名前", + "notification.slackIncomingWebhookUrl": "Slack incoming webhook URL", + "notification.sort.nameAz": "名前 A-Z", + "notification.sort.nameZa": "名前 Z-A", + "notification.sort.newest": "新しい順", + "notification.sort.oldest": "古い順", + "notification.update.header.description": "通知機能を使用することによって、管理コンソールでの追加・更新をした時や、運用タスクの状況を確認することができます。", + "notification.update.header.title": "通知の更新", + "push.add.header.description": "プッシュ機能を使用することによって、リアルタイムでSDKを更新することができます。フィーチャーフラグが更新されるたびに、プッシュ通知がクライアントに送信されます。", + "push.add.header.title": "プッシュの作成", + "push.confirm.delete.description": "「{pushName}」が永久に削除されます。", + "push.confirm.delete.title": "プッシュの削除", + "push.input.fcmApiKey": "Firebase Cloud Messaging APIキー", + "push.list.header.description": "プッシュを選択すると、設定の管理や新規作成ができます。", + "push.list.header.title": "プッシュ", + "push.list.noData.description": "プッシュを作成し、リアルタイムでSDKクライアントを更新できます。フィーチャーフラグが更新されるたびに、通知がクライアントに送信されます。", + "push.list.noResult.searchKeyword": "名前", + "push.search.placeholder": "名前", + "push.sort.nameAz": "名前 A-Z", + "push.sort.nameZa": "名前 Z-A", + "push.sort.newest": "新しい順", + "push.sort.oldest": "古い順", + "push.update.header.description": "プッシュ機能を使用することによって、リアルタイムでSDKを更新することができます。フィーチャーフラグが更新されるたびに、プッシュ通知がクライアントに送信されます。", + "push.update.header.title": "プッシュの更新", + "readMore": "もっと読む", + "reason.client": "クライアント", + "reason.offVariation": "オフバリエーション", + "reason.reason": "エバリュエーション理由", + "reason.rule": "ルール", + "reason.target": "ターゲット", + "segment.action.delete": "セグメントを削除", + "segment.action.download": "ユーザー一覧をダウンロード", + "segment.add.header.description": "ユーザーセグメントを使用すると、単一のフィーチャーフラグバリエーションのすべてのユーザーターゲットを管理できます。例えば、既存のベータユーザー群に対して、機能をテスト可能になります。", + "segment.add.header.title": "ユーザーセグメントの作成", + "segment.list.noData.description": "ユーザーセグメントを作成し、単一のフィーチャーフラグバリエーションのすべてのユーザーターゲットを管理できます。", + "segment.list.noResult.searchKeyword": "名前、説明", + "segment.confirm.delete.cannotDelete": "この「{segmentName}」セグメントが使用中のため、削除できません。", + "segment.confirm.delete.description": "この「{segmentName}」セグメントが永久に削除されます。", + "segment.confirm.delete.title": "ユーザーセグメントの削除", + "segment.fileUpload.browseFiles": "ファイルを選択", + "segment.fileUpload.fileFormat": "アップロード可能な形式: .csv, .txt (最大サイズ: 2MB)", + "segment.fileUpload.fileMaxSize": "ファイルの最大サイズは2MBです", + "segment.fileUpload.fileSize": "{fileSize} バイト", + "segment.fileUpload.segmentInUse": "このセグメントは使用中のため、ファイルを更新する前にフィーチャーフラグから削除してください", + "segment.fileUpload.unsupportedType": "対応していないファイル形式です", + "segment.fileUpload.uploadInProgress": "アップロード中のためファイルの更新ができません", + "segment.fileUpload.userList": "ユーザーID一覧", + "segment.filter.status": "使用状況", + "segment.filterOptions.inUse": "使用中", + "segment.filterOptions.notInUse": "未使用", + "segment.list.header.description": "このページでは、環境にあるすべてのユーザーセグメントを確認できます。セグメントを選択すると設定変更、または追加ボタンをクリックして新規作成ができます。", + "segment.list.header.title": "ユーザーセグメント", + "segment.search.placeholder": "名前、 説明", + "segment.select.noData.description": "ユーザーセグメント一覧ページからユーザーセグメントを追加してください。", + "segment.sort.nameAz": "名前 A-Z", + "segment.sort.nameZa": "名前 Z-A", + "segment.sort.newest": "新しい順", + "segment.sort.oldest": "古い順", + "segment.status.uploadFailed": "アップロードに失敗", + "segment.status.uploading": "アップロード中", + "segment.update.header.description": "ユーザーセグメントを使用すると、単一のフィーチャーフラグバリエーションのすべてのユーザーターゲットを管理できます。これを使用して、多数のユーザーターゲットに対して一度に変更を加えることができます。例えば、既存のベータユーザー群に対して、機能をテスト可能になります。", + "segment.update.header.title": "ユーザーセグメントの更新", + "segment.userCount": "ユーザー数", + "settings.list.header.description": "このページでは、環境にあるすべての設定を確認できます。タブを選択すると設定の管理ができます。", + "settings.list.header.title": "設定", + "settings.tab.notifications": "通知", + "settings.tab.pushes": "プッシュ", + "sideMenu.accounts": "アカウント", + "sideMenu.adminSettings": "管理者設定", + "sideMenu.analysis": "分析", + "sideMenu.apiKeys": "APIキー", + "sideMenu.auditLog": "監視ログ", + "sideMenu.documentation": "ドキュメント", + "sideMenu.experiments": "エクスペリメント", + "sideMenu.featureFlags": "フィーチャーフラグ", + "sideMenu.goals": "ゴール", + "sideMenu.logout": "ログアウト", + "sideMenu.settings": "設定", + "sideMenu.user": "ユーザー", + "sideMenu.userSegments": "ユーザーセグメント", + "sort": "並び替え", + "sourceType.account": "アカウント", + "sourceType.accountDescription": "アカウントを追加・更新すると通知されます", + "sourceType.adminAccount": "アカウント", + "sourceType.adminAccountDescription": "アカウントを追加・更新すると通知されます", + "sourceType.adminNotification": "通知", + "sourceType.adminNotificationDescription": "通知を追加・更新すると通知されます", + "sourceType.apiKey": "APIキー", + "sourceType.apiKeyDescription": "APIキーを追加・更新すると通知されます", + "sourceType.autoOps": "自動オペレーション", + "sourceType.autoOpsDescription": "自動オペレーションが実行されると通知されます", + "sourceType.environment": "環境", + "sourceType.environmentDescription": "環境を追加・更新すると通知されます", + "sourceType.experiment": "エクスペリメント", + "sourceType.experimentDescription": "エクスペリメントを追加・更新すると通知されます", + "sourceType.featureFlag": "フィーチャーフラグ", + "sourceType.featureFlagDescription": "フィーチャーフラグを追加・更新すると通知されます", + "sourceType.goal": "ゴール", + "sourceType.goalDescription": "ゴールを追加・更新すると通知されます", + "sourceType.mauCount": "月間アクティブユーザー数", + "sourceType.mauCountDescription": "毎月1日に月間アクティブユーザー数が通知されます", + "sourceType.notification": "通知", + "sourceType.notificationDescription": "通知を追加・更新すると通知されます", + "sourceType.project": "プロジェクト", + "sourceType.projectDescription": "プロジェクトを追加・更新すると通知されます", + "sourceType.push": "プッシュ", + "sourceType.pushDescription": "プッシュを追加・更新すると通知されます", + "sourceType.runningExperiments": "実施中のエクスペリメント", + "sourceType.runningExperimentsDescription": "毎日実行中のエクスペリメントがある場合は通知されます。", + "sourceType.segment": "ユーザーセグメント", + "sourceType.segmentDescription": "ユーザーセグメントを追加・更新すると通知されます", + "sourceType.staleFeatureFlag": "使用されていないフィーチャーフラグ", + "sourceType.staleFeatureFlagDescription": "フィーチャーフラグが使用されなくなると通知されます", + "success": "成功", + "tags": "タグ", + "total": "合計", + "type": "タイプ", + "warning": "警告", + "yes": "はい" +} diff --git a/ui/web-v2/apps/admin/src/assets/logo.png b/ui/web-v2/apps/admin/src/assets/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..897aacd22c400b91747edf5dd28e8ad5a3bac9e3 GIT binary patch literal 10804 zcmZvC1z1#HyDuFA0|(!jKXYQi2LdN(czU5CaUI64DIv^Q`9=uC1v|c8~rZ78Vwns*0ix78W+%ovchm zaQD4hE#ZCFVY}!k%VU*|FmB%slCAVrKWb=TJ-(BPu<)_(W8wZax%*&Y(_`WPBV%Ey zVKe-ftb;9tg>$E;yld}6ZL$8wzq7`&ziWRlmAm$L{7?VF5^Qe8stWpGZKY%N!P4VS zkDHDC-#M~z{^O60myP=$8B6)j4y&RXtLd&Ic2qHN!NMY;`rELvzGghUi$84pMjxWD zp$@bFg9JYOO(FmVIsWy+l7a&7M35Ea11l6{@8AN2O0)lC0lbs{ss-6u|1p8sNwe!~ zXtOGSovm2K1fB{!WtX|f%E~I`Z21wWqxkaQ?sp?;b{h!95hy6=?(Qz&E+PPSwiXnU zkdP33Dl8~0%ztOW@8aPA`2gj2aN+n@kpIL{v~sa zU)R6tgxG%k?@SIZ|Auu3Q1I_BfMQF+s-jM=gliiUl-(tycq!+kwgf>^k1UhX`h}_4uucGVS|yhw`KGV9`JlDRaK3PXGbEy-iJiopElfve;6TdjL0r zg@96jlB3RCJKHU1N8$_V3;qkTCYY8!k@05i0W+6*tbn(oAskyWc%F`8g~X2e-WguA zhPi?JS-OAoRcQwk8erBIgMBa`BI7Uxq(6`A%487`E^ET9MJ`Jk(DRJ-MWp4k5JElw}VWMhZTauj2X_u zBAFuT;2o^{fEt(I;*|1$lIK_;g)BuX9w05(h!IX2TNWx?!-~s_>Gka3((Ss;A>-M< zOKgShKFt~K$E0%Z|D-c3XkH@Nbs`q%LSLfPaE6ud{nqmEOUq%vwLgSs_U&gPS3ceQ z-gUC>1Qq0^nFaP{WainX-9B#mp^$yYne1iA_fG+Hj4dl{he;j7lJ1*&xU}7$gHx)n zub@x4I-ma-J+5iTj`GZDB!My)u~@adLbB`7yKK)6(_hmX6zQF$$3;)$!ptD%=Re(8 z`afO5^XUz3stR!Nc@X(kQQBKVw`L5&>%*W6iD7c3Ph{Vk98P*B&|2 zp-i~z6tey6PX6?3v$>@e?XwiV_4XUTghLz5}!Nv*5vid{nyw^EaBh4gVHW+X9N z#9Q5ug{NElUBA{`Lw&t*1D)O+00w>&6Is!JU1?Y0KMvv$q|OZMbET-WHIbDyuu!q) z(BeLNtNvxr(bcr?jc}(WD(SoC&S&^-2io9yRakUh($sNn5dbNL#I=OR3;RX6-W$(! zB)@XzbBt`I^(4IzxMa!BGMO&Z*wUBT2o{K*OeKqLi3^SJC2tI%$M1V+Q=s-i-5%J) zoYTBAm!wXcQk6+6z&HsABWdu%Zj9}W#dwF@d~VD(~3(tsoGaqxaV2OQP18oLfCUy7vtu4hk#{V5rS0pW2?` zGBc%2LLKHl)I8x&fT<8c#LSq`qs-KO*;^8~_o!#Wk@QyrYYX==qO;_dl$HVU&qAuS z`zhkJC#rn$T^)K8eiV*YCtlo9Gj0(U_!_s9}6&k#KdTJB-7fwMN@ZVzT!3KY=o zzRL9d;_@1{qN;K*QTJST2vGEMPb%(1S_%vpXkN_?YEEW~yhVq*jk4cZUiLL(!%tCfx(g zU)W@SUto9^3qRaF!}ZF=MLj7j|Y%cFaa&rse2UV6WL^VzPS z&PW(MF65%T>FxYuHR`;R+AW;!Q87PH_8P zj~*z$+kLB_0zFA@t56kJ9u>FPr{$Y3pH1z>Jl`EjyIyrgM$AMnJNLhI`NN;xS}tH( zp%OYI$H#b7L*bNIoW~+nO8)1qwsq#NTyDwYFEh-dlgA9(T>VM;PhIE%0bIaa%Xl&Iy}rtY3evQcWD<*S06L(YSg;@@ljsfH(0_BFz3Z?AH_r)JBROmtI%hN zwog*`Ev|?wAjX2JafQEMT}QSuhjEBUT(CRRS4F7L8*5(KvYKRWMGkbLnhE`O;e+t~ zfj0ccKh}jjK#n0i$9U*hC)6dBVpCUszA+Ic81Dyp`?98`OeE6cNT&Nr=A&W=LUTGG zx?ylR zBImXGM=G5e3GRDG_qHO{r^;?C$${jX@y-a>(x2a8$IgpQF9ehl*;lh=*AXNM=rpp0}`%feq#Ja`rixQNC(#J&C+vUj0AFj(9P zAz;92PxZz}jdCW2$-v7wK;18%F;+W{`(b+=XwnAy4jp`JHNrahB+%g$QNe4+6dE|7 z>%I`lSZ(+3l?8Q6n?6jzxy*%uF1NO z3BG9e*Nt;|mc!&2&x{YicY8T8EHOLDCAqf&!PzFgd0Z)uyOCZl)bF8*vQ`7Yy)A){ z&RWGF1Uqz5+6x-8s4zs__|iPnZ{0Byxy=0^RGUUazUl^Emns>2o1irP@{rBf9jd$0 z7U}b-rZyn(JaTh#Rf2jCyQcP(QAA6FmI_9o8HhP3h;nvUTz->?Zp?-dL?d$J9y}>y zf=Qkq`U;u zIlrc1rUhqo#QtPQyBd{fs#pZe`T`41?nQs|nV8p+{GxsGb7Zd%qF^{%T`C0=^G>g$ zJgN_*4eAe!3P9DQrAL{r!&DoH&?}Vgjzg-STZ9ySeb#)5KZgbs;`G0ew%2QAY+s>- z1^!G3u!tkysL@Iz51w}FUZihW1rgNz!r=M=mI)x{(MMPC`#X&OE#Kl^EN|fDsXbu# z6(*7p0%?EtMnnAXQLd0(taIBc3EHgthUI;*14aec?<>LN!@$HccyHg}mq__87h_4{a_ zc>`?NP+je8N3W~`&As+-Na=_(u|k~M`lM<41r&Mms~^qf6|cx}Bw_6+e1guRLvAiZSQe29!4SLM21wybf9#Ox2G6bOWsobW)x|oK0sGO;gH(kS(&TK1I7r z9Jen%=UgT+=`QnipPZx5M~}~+S1pPT|54M=5YC}3b_iiZJP|qW@$TH0vy$pJw6!|K@TQ;kqFDb+5*=>B! zwZHQ3WEBkY#R@C#%bHk%K|F9(!2`>R$+i$^4%z*(coCZ&fHTplVwzgxiPEhMp?5GZ z@Yx6Zi{+QGRD21m31nJ!?n7PW3Jn#-7N%V8yL{1$qqEb^eD(w3##$X4JP>T42dCCO z=Cq{OT0Qe@i<}%AlqPNUNpB8gEYj*6Ovc6WFY{lUJkw$3K}COu?axHZ5gz7`JRE); z;}a)<`VbIrEkq|6t(es7rqaX zuvkQuAx#vx%(%->Zi}(-w}23d(*^Ggo=o=v4xk1twF+S(SN9Ny*lCG$5lML*9C7Eb zt@t-&I`ra^$<|(7`~DH?Rc%yyKw3spW9_uX;w|qdB3}T~%=(lsJ#0#|y#{I!hEpuBVpj%7AaW*`b z#^E>97d_9Wi{ZaJA|jJjb}g4l+sHpz#PU(G;#zsW<$dYc?^zs0CBtexHjw(0P29lV zox+IX>{&t&QVsb$YOyx#hv{C~Z9X68x~@dnFUytO3RK^=!1K06fYEX{IYn3RIgCw$aD3# zttb2NMJOZtNc`3Jn_M}=oN(>3)fwz#If?hvQXA4N-uK1zZ!MMWg3eP$MKe1ag1@9e zdlROCzeJ-;)JuXMe?I8~TtNogrXh)*v8$Gm`ztd~EzLAWrq~+NsYA5~F%BH{u&_{3 zghw4eN%7$I>+US{Z9vWQ+Z{szGGe<>vz4w_HB7po#3Sh>D9o!`Zl38&RTh>iT7AoU z;g2F-*0FBYeoX?2$fi?9_kv~M2JuQt;;W{U_Hb)PwA#l<$Ai$u#Z1MN^8vi=SOpA) zuW%jy@Yle|p!DwP)opU#1%i#K#QrLhfdzq<9^(xsB4D3*v`APzZOf{b%AC0M7(KhX zhK)tD=utV+Gq5QyJ^rld0~7u>BdJgCx+K)}JWL+xL0QWJr9^ss+rtT-Akrt!Al>KW zlwsK5{fIwHc_ZT4LbEjG^|N_n&Zfw=pXove>1Kwp(A_uBYIXMjh!f4~UGtZ?GkW^4 z+-6KAFA;tWLe2A6iSbk!CWPsl%3m33z#`tHHx{8jO`^BnI1tFVJzTz*zhzKwA$8md zrD^i11s4W&Z~dT}uGv?ee^pq?pJTz@D>HxmbV!Qbu6(MJX|mq`=aXZ;`lbFL3|{=D zPM68|b9Z&-{cMwKhc=_3v){**t3ewef`gs#N6xj|(yU@`fIeow2_#)Zf!k9Ygp3N) za+Hy4y;fzZSQXjx+SxfN)&84@NJ1*@C2~aKuL$J&Nq@g0h~3B?b+tl%@m11!aOX8Q zc!Ibz@#T;bdlFb>H9PiYJts!1bJH$4c&!&9YCu$*y*v=^;4M z2NPY^)aqF=Qzj6&sg^QEeo)UpVi+wMkU*oc-Xy@m|CIty<$Ppg&XR0X$r;yhUV8a@tZkID~1| zvpOZn zgD1cHn^S9pTehjrP5s4#kG^vzGzywz4;CWxfDXK8s^Vz^$;8(zGS{(}4_o;}WQD`) zn(L2zt74{wNP8tXr5{B~?|KGZ%(tk=$B&b`$$a)EOq0b)#}#})PB?>0s6+nkx%;MS zo+63CL5Fz9V{-;N{akYTxiVaf1t5Nm3L#FW{B`g0&Qzb$XYbV%TX!n73=8+yL(r&4h76x%4~gvsNe;(*T<-mRD2Yxh zq4VqR?zn{K4IKHh#&8cMd}2zNc|zCh7@wleG@0^qlQFGZ}K0y=X;iS5VYIE zU@x{MD!k{jGBnDBchV2E_@BkgCN_$V2!82RZ1GN3DYPrdFD~G6(4kwTa^@-%l;KU(iPT`46frzpo#JE(m{) z;jj+_4YkwU_^gcvb#|_hLEn!(b^Ak)B&=XG!p4*ae=b^=Q`3xu4m@n9e&gqyAIagU zqrP#wXEvWAD;XfX_lls%;R(T;12(uCe6+UJc0@pF8kc?HG>?z)ZR-^wJC$WtLDETg z{@n>D#ZI{$+D+qwjANyr{Ph7=9kym_z`82h74Dp^;J9Zb+V*Y%U(!f@G2)3})|Z1Q z5P%7PMCt!jUWHDI@n-sa_#;k%@Xh~1qNX$#eN z*P1I#jT9a``r3%rtd^_*Kf)CPU&`hbM6gLn!!A9hTuz6aEvlJ_Tky=&b#?1haNUu^ zLsAqq%~5vH`%z$Y{!K4sfcqH!4w&(_^TYPvP4$wJryAa*|d}^ zbDqI%)0bqi4!igW@QzY+8u-)B){;n^(M4KE@p_k<3Ft!gI?0z)v5z&BBGgR+0mcgT z@9x~C-`YUacvC5c^39)MI5cF3iiz4<{i)F>>*lCsJQzM~sVCD>ha1{8&(?rzyAY`i zBEQ6l`Pg67u^pNQST4-Jx$@S9eQtT;(Jsg(y_r-g5hbO?UbmeqfvYl9d006YiijI{ z0;VM*$7Z``G0c=(Oi*~0pP7D$7qsOhtiu}9Tz&LhEb>f3%=HLbyu0XH=>S-xxCz_5 z$J$HKXACu|i(x~-$OQ;)r)1j>K@S=8PDYVFWZgZ)Sbbs zO=H)LI1!t9`V9)9-x_B!9#vB#7|-koYNzcuwDTBCocG7drj_F8Mlk7tSq z&}f&rqBw_8DCg=9sCMF;x2hNU7434$Bv2U8jRl2=3d^Z#(R@!B*{S=o&FadU>3goF z=>Bw9C20Qpey%s0fgqq0pRxbah=gH+*OD(@FLY>x6Ju_#1Z%d{WD+ca3OBkEd7KQ4 zV4P9HG&@`7DN}6Cjpq;N*N9ogoJsR=#{=?VtLF{OOB0*3Q&j$LaT%dOThF5ar#H9n zx@{PC;(bZKmoL_^tt{W|FExkn|5d6o0T9zD8w3u0J$m7$El56PKTQLcAhUT z-)`wLFun6w?hmQHpR3<$gYn8MqS~^G%H1CqCewK*`MEzfdh?j<1sm#p<69H%A3UFR z{ZC|npa?CgEr03JtTH@k`2vwXmBF1SkzXrSu-ULiiNjg24K@_}$J^tM-@xN)v(Pnh zw94NL(+mz>Ar~)+4wOq$E7joZ9 zi@+3r?ol`(pC#UKT3uIAQ!HA~&{#VAPc=PSIHa#IS7=DHx8*V7QJUaIT^{-yoX;dl zD&0G3-!A#VtusHz%8V#Ih$m1(6;q|EbunZ;7|*r4*LHnyzI-+rQLleaw*ZoKKQO{< z3q{@fn+mdX+>Y%s-@E=SkCZiH)hB4C|bRK>ZvDmyWWIrQdgVg$(UY@Rc5A^mw6a zicmJ%LX`#ZC%`a(*4R2Ip~#B`(R&b#aIunK(P6~)&fKz70d3_^zst%V`Gyo1^gc8; zp^9G)sG+(JH)CMMM48cDJh#$I$iNfEi3(QDg8BY zo}Zuh+hGBB_tWw0-sT%olcMAVDFu{an#Md?my8U+p4bRE6A;lM=)dBS=&Ccr1#@yV zX`!1H1X*0UIksGNocnLeRxyhgw+uZBN6N8!P(xUM4!?#h=H~uf`ZkrM$8H?0(mFNU zZjC%d#|Rj0S<)*^(eZeZc9Y((=6WSzmQhywST`xuT{x2Q)yoe4M%xzUm&YVpRh2=2;mRnz-aCP6xz0Xxw8a{7w6>-$Fpr=rEomB2 zpj37T2|H->Ojtce7kEz)3#o{m4h4!ZzHwN71JZf}uDThVXLth!0@XtclcjahQ<<8r z+ek$i+s!0hD6hw-H1c7nMXGqvX8eeYqfhfS6{=qXA2r{raRa3Q3T1NTNp*iM%j}%M z?`LuU)spZMY~YC6s*0lV5xN<6YgqZf34W_EKZ2FitKr$S8XN(BY|L#_oivm9LocG( zvcMbX)^Ki>fm$Sh^By8*XZ_y2vx>xCeSB>|`}1e5M-AEYUQ6 zdFymsA>cQ#ww-#9Vuh-QZsX;w_0?h-Iu)7`Sl&OcjJL*v#oN*+n;uP>B?&wUtW7Wx z!vwE8=p#RmzuNTsn22Gr>E%DxVm1c)jq~!F3fDmAX`d8Ne$K>awxvm(;27y)<(s$? z?i>Doi0kem*tmngZ50oSUQ0_vFzo zRxiJHM=CVN<&0n7oud}!N^d%KZ`!3Oc+T>Ye7W0s{a-H2JWll= z3jKloZGQqcoEV(8__QLjWBwE=jg8aMgjbq}h2=n;%n-{Kk5lE6@1!Jsk?=Frj)T9lt(jxb!p zfA20s#<{ym{*aL${lr=&g-XaeUx#$>f}Jn7T%RMAx|M&PL&l_qZNl$3Dx`mzFL=Gx zMXgwT!r|=+CF}lRvKEDbR_yV3lN4>j%634oC*p_Lny*%yV8ear>K36Oj1mhw$Xar<8v0Ki%bE-2u83G%l_JEHz$`Kc-EwW{<4`--Re>E z8qo;OreLOfm&li!HBu=i$kZLZr9LO>* z(#HG{W_ibix_#Xjg zZ&f9iG-ggIUfA-cZw&+=+^W64lAi7R8jLQ>$@_~Ot*<*ebD&nkGq-2=IiJx&kW?jXQ92(8TX4`K#kBrM84hKVuLt* zS64?LTqaD!c1WARuJ%V?-a;;+TOL`hX8O~o>Kv%voY#PG&x literal 0 HcmV?d00001 diff --git a/ui/web-v2/apps/admin/src/components/APIKeyAddForm/index.tsx b/ui/web-v2/apps/admin/src/components/APIKeyAddForm/index.tsx new file mode 100644 index 000000000..54d36b7fe --- /dev/null +++ b/ui/web-v2/apps/admin/src/components/APIKeyAddForm/index.tsx @@ -0,0 +1,97 @@ +import { Dialog } from '@headlessui/react'; +import React, { FC, memo } from 'react'; +import { useFormContext } from 'react-hook-form'; +import { useIntl } from 'react-intl'; + +import { messages } from '../../lang/messages'; + +export interface APIKeyAddFormProps { + onSubmit: () => void; + onCancel: () => void; +} + +export const APIKeyAddForm: FC = memo( + ({ onSubmit, onCancel }) => { + const { formatMessage: f } = useIntl(); + const methods = useFormContext(); + const { + register, + formState: { errors, isSubmitting, isValid }, + } = methods; + + return ( +
+
+
+
+
+ + {f(messages.apiKey.add.header.title)} + +
+
+

+ {f(messages.apiKey.add.header.description)} +

+
+
+
+
+
+ +
+ +

+ {errors.name && ( + {errors.name.message} + )} +

+
+
+
+
+
+
+
+ +
+ +
+
+
+ ); + } +); diff --git a/ui/web-v2/apps/admin/src/components/APIKeyList/index.tsx b/ui/web-v2/apps/admin/src/components/APIKeyList/index.tsx new file mode 100644 index 000000000..63ba2fa36 --- /dev/null +++ b/ui/web-v2/apps/admin/src/components/APIKeyList/index.tsx @@ -0,0 +1,182 @@ +import { FC, memo } from 'react'; +import { useIntl } from 'react-intl'; +import { shallowEqual, useSelector } from 'react-redux'; + +import { APIKEY_LIST_PAGE_SIZE } from '../../constants/apiKey'; +import { messages } from '../../lang/messages'; +import { AppState } from '../../modules'; +import { selectAll } from '../../modules/apiKeys'; +import { useCurrentEnvironment, useIsOwner } from '../../modules/me'; +import { APIKey } from '../../proto/account/api_key_pb'; +import { APIKeySearchOptions } from '../../types/apiKey'; +import { classNames } from '../../utils/css'; +import { APIKeySearch } from '../APIKeySearch'; +import { CopyChip } from '../CopyChip'; +import { ListSkeleton } from '../ListSkeleton'; +import { Pagination } from '../Pagination'; +import { RelativeDateText } from '../RelativeDateText'; +import { Switch } from '../Switch'; + +export interface APIKeyListProps { + searchOptions: APIKeySearchOptions; + onChangePage: (page: number) => void; + onSwitchEnabled: ( + apiKeyId: string, + apiKeyName: string, + enabled: boolean + ) => void; + onChangeSearchOptions: (options: APIKeySearchOptions) => void; + onAdd: () => void; + onUpdate: (a: APIKey.AsObject) => void; +} + +export const APIKeyList: FC = memo( + ({ + searchOptions, + onChangePage, + onSwitchEnabled, + onChangeSearchOptions, + onAdd, + onUpdate, + }) => { + const { formatMessage: f, formatDate, formatTime } = useIntl(); + const editable = useIsOwner(); + const currentEnvironment = useCurrentEnvironment(); + const apiKeys = useSelector( + (state) => selectAll(state.apiKeys), + shallowEqual + ); + const isLoading = useSelector( + (state) => state.apiKeys.loading, + shallowEqual + ); + const totalCount = useSelector( + (state) => state.apiKeys.totalCount, + shallowEqual + ); + + return ( +
+
+ +
+ {isLoading ? ( + + ) : apiKeys.length == 0 ? ( + searchOptions.q || searchOptions.enabled ? ( +
+
+

+ {f(messages.noResult.title, { + title: f(messages.apiKey.list.header.title), + })} +

+
+
    +
  • + {f(messages.noResult.searchByKeyword, { + keyword: f(messages.apiKey.list.noResult.searchKeyword), + })} +
  • +
  • {f(messages.noResult.changeFilterSelection)}
  • +
  • {f(messages.noResult.checkTypos)}
  • +
+
+
+
+ ) : ( +
+ ) + ) : ( +
+ + + {apiKeys.map((apiKey) => { + return ( + + + + + + ); + })} + +
+
+ +
+ {f(messages.created)} + +
+
+
+ + + {apiKey.id} + + + + + onSwitchEnabled( + apiKey.id, + apiKey.name, + apiKey.disabled + ) + } + size={'small'} + readOnly={!editable} + /> +
+ +
+ )} +
+ ); + } +); diff --git a/ui/web-v2/apps/admin/src/components/APIKeySearch/index.tsx b/ui/web-v2/apps/admin/src/components/APIKeySearch/index.tsx new file mode 100644 index 000000000..b4f5c67f1 --- /dev/null +++ b/ui/web-v2/apps/admin/src/components/APIKeySearch/index.tsx @@ -0,0 +1,191 @@ +import { PlusIcon } from '@heroicons/react/solid'; +import { FC, memo, useCallback, useState } from 'react'; +import { useIntl } from 'react-intl'; +import { shallowEqual, useSelector } from 'react-redux'; + +import { intl } from '../../lang'; +import { messages } from '../../lang/messages'; +import { AppState } from '../../modules'; +import { selectAll } from '../../modules/apiKeys'; +import { useIsOwner } from '../../modules/me'; +import { APIKey } from '../../proto/account/api_key_pb'; +import { APIKeySearchOptions } from '../../types/apiKey'; +import { + SORT_OPTIONS_CREATED_AT_ASC, + SORT_OPTIONS_CREATED_AT_DESC, + SORT_OPTIONS_NAME_ASC, + SORT_OPTIONS_NAME_DESC, +} from '../../types/list'; +import { classNames } from '../../utils/css'; +import { FilterChip } from '../FilterChip'; +import { FilterPopover, Option } from '../FilterPopover'; +import { FilterRemoveAllButtonProps } from '../FilterRemoveAllButton'; +import { SearchInput } from '../SearchInput'; +import { SortItem, SortSelect } from '../SortSelect'; + +const sortItems: SortItem[] = [ + { + key: SORT_OPTIONS_CREATED_AT_DESC, + message: intl.formatMessage(messages.feature.sort.newest), + }, + { + key: SORT_OPTIONS_CREATED_AT_ASC, + message: intl.formatMessage(messages.feature.sort.oldest), + }, + { + key: SORT_OPTIONS_NAME_ASC, + message: intl.formatMessage(messages.feature.sort.nameAz), + }, + { + key: SORT_OPTIONS_NAME_DESC, + message: intl.formatMessage(messages.feature.sort.nameZa), + }, +]; + +export enum FilterTypes { + ENABLED = 'enabled', +} + +export const filterOptions: Option[] = [ + { + value: FilterTypes.ENABLED, + label: intl.formatMessage(messages.apiKey.filter.enabled), + }, +]; + +export const enabledOptions: Option[] = [ + { + value: 'true', + label: intl.formatMessage(messages.enabled), + }, + { + value: 'false', + label: intl.formatMessage(messages.disabled), + }, +]; + +export interface APIKeySearchProps { + options: APIKeySearchOptions; + onChange: (options: APIKeySearchOptions) => void; + onAdd: () => void; +} + +export const APIKeySearch: FC = memo( + ({ options, onChange, onAdd }) => { + const { formatMessage: f } = useIntl(); + const editable = useIsOwner(); + const isLoading = useSelector( + (state) => state.apiKeys.loading, + shallowEqual + ); + const apiKeys = useSelector( + (state) => selectAll(state.apiKeys), + shallowEqual + ); + const [filterValues, setFilterValues] = useState([]); + + const handleFilterKeyChange = useCallback( + (key: string): void => { + switch (key) { + case FilterTypes.ENABLED: + setFilterValues(enabledOptions); + return; + } + }, + [setFilterValues, apiKeys] + ); + + const handleUpdateOption = ( + optionPart: Partial + ): void => { + onChange({ ...options, ...optionPart }); + }; + + const handleFilterAdd = (key: string, value?: string): void => { + switch (key) { + case FilterTypes.ENABLED: + handleUpdateOption({ + enabled: value, + }); + return; + } + }; + return ( +
+
+
+ + handleUpdateOption({ + q: query, + }) + } + /> +
+
+ +
+
+
+ + handleUpdateOption({ + sort: sort, + }) + } + /> +
+ {editable && ( +
+ +
+ )} +
+ {options.enabled && ( +
+ {options.enabled && ( + option.value === options.enabled + ).label + }`} + onRemove={() => + handleUpdateOption({ + enabled: null, + }) + } + /> + )} + {options.enabled && ( + + handleUpdateOption({ + enabled: null, + }) + } + /> + )} +
+ )} +
+ ); + } +); diff --git a/ui/web-v2/apps/admin/src/components/APIKeyUpdateForm/index.tsx b/ui/web-v2/apps/admin/src/components/APIKeyUpdateForm/index.tsx new file mode 100644 index 000000000..2134fdc12 --- /dev/null +++ b/ui/web-v2/apps/admin/src/components/APIKeyUpdateForm/index.tsx @@ -0,0 +1,102 @@ +import { Dialog } from '@headlessui/react'; +import React, { FC, memo } from 'react'; +import { useFormContext } from 'react-hook-form'; +import { useIntl } from 'react-intl'; + +import { messages } from '../../lang/messages'; +import { useIsEditable, useIsOwner } from '../../modules/me'; + +export interface APIKeyUpdateFormProps { + onSubmit: () => void; + onCancel: () => void; +} + +export const APIKeyUpdateForm: FC = memo( + ({ onSubmit, onCancel }) => { + const { formatMessage: f } = useIntl(); + const editable = useIsOwner(); + const methods = useFormContext(); + const { + register, + formState: { errors, isSubmitting, isDirty, isValid }, + } = methods; + + return ( +
+
+
+
+
+ + {f(messages.apiKey.update.header.title)} + +
+
+

+ {f(messages.apiKey.update.header.description)} +

+
+
+
+
+
+ +
+ +

+ {errors.name && ( + {errors.name.message} + )} +

+
+
+
+
+
+
+
+ +
+ {editable && ( + + )} +
+
+
+ ); + } +); diff --git a/ui/web-v2/apps/admin/src/components/AccountAddForm/index.tsx b/ui/web-v2/apps/admin/src/components/AccountAddForm/index.tsx new file mode 100644 index 000000000..0afe26364 --- /dev/null +++ b/ui/web-v2/apps/admin/src/components/AccountAddForm/index.tsx @@ -0,0 +1,150 @@ +import { Dialog } from '@headlessui/react'; +import React, { FC, memo } from 'react'; +import { useFormContext, Controller } from 'react-hook-form'; +import { useIntl } from 'react-intl'; + +import { intl } from '../../lang'; +import { messages } from '../../lang/messages'; +import { Account } from '../../proto/account/account_pb'; +import { Select } from '../Select'; + +export interface AccountAddFormProps { + onSubmit: () => void; + onCancel: () => void; +} + +export interface Option { + value: string; + label: string; +} + +export const roleOptions: Option[] = [ + { + value: Account.Role.VIEWER.toString(), + label: intl.formatMessage(messages.account.role.viewer), + }, + { + value: Account.Role.EDITOR.toString(), + label: intl.formatMessage(messages.account.role.editor), + }, + { + value: Account.Role.OWNER.toString(), + label: intl.formatMessage(messages.account.role.owner), + }, +]; + +export const AccountAddForm: FC = memo( + ({ onSubmit, onCancel }) => { + const { formatMessage: f } = useIntl(); + const methods = useFormContext(); + const { + register, + control, + formState: { errors, isSubmitting, isValid }, + } = methods; + + return ( +
+
+
+
+
+ + {f(messages.account.add.header.title)} + +
+
+

+ {f(messages.account.add.header.description)} +

+
+
+
+
+
+ +
+ +

+ {errors.email && ( + {errors.email.message} + )} +

+
+
+
+ +
+ { + return ( + +

+ {errors.email && ( + {errors.email.message} + )} +

+
+
+
+ +
+ { + return ( + +

+ {errors.email && ( + {errors.email.message} + )} +

+
+
+
+
+
+
+
+ +
+ +
+
+
+ ); + } +); diff --git a/ui/web-v2/apps/admin/src/components/AdminAccountList/index.tsx b/ui/web-v2/apps/admin/src/components/AdminAccountList/index.tsx new file mode 100644 index 000000000..f55557d85 --- /dev/null +++ b/ui/web-v2/apps/admin/src/components/AdminAccountList/index.tsx @@ -0,0 +1,161 @@ +import { FC, memo } from 'react'; +import { useIntl } from 'react-intl'; +import { shallowEqual, useSelector } from 'react-redux'; + +import { ACCOUNT_LIST_PAGE_SIZE } from '../../constants/account'; +import { messages } from '../../lang/messages'; +import { AppState } from '../../modules'; +import { selectAll } from '../../modules/accounts'; +import { useIsOwner } from '../../modules/me'; +import { Account } from '../../proto/account/account_pb'; +import { AdminAccountSearchOptions } from '../../types/adminAccount'; +import { classNames } from '../../utils/css'; +import { AdminAccountSearch } from '../AdminAccountSearch'; +import { ListSkeleton } from '../ListSkeleton'; +import { Pagination } from '../Pagination'; +import { RelativeDateText } from '../RelativeDateText'; +import { Switch } from '../Switch'; + +export interface AdminAccountListProps { + searchOptions: AdminAccountSearchOptions; + onChangePage: (page: number) => void; + onSwitchEnabled: (accountId: string, enabled: boolean) => void; + onChangeSearchOptions: (options: AdminAccountSearchOptions) => void; + onAdd: () => void; +} + +export const AdminAccountList: FC = memo( + ({ + searchOptions, + onChangePage, + onSwitchEnabled, + onChangeSearchOptions, + onAdd, + }) => { + const { formatMessage: f } = useIntl(); + const editable = useIsOwner(); + const accounts = useSelector( + (state) => selectAll(state.accounts), + shallowEqual + ); + const isLoading = useSelector( + (state) => state.accounts.loading, + shallowEqual + ); + const totalCount = useSelector( + (state) => state.accounts.totalCount, + shallowEqual + ); + + return ( +
+
+ +
+ {isLoading ? ( + + ) : accounts.length == 0 ? ( + searchOptions.q || searchOptions.enabled ? ( +
+
+

+ {f(messages.noResult.title, { + title: f(messages.account.list.header.title), + })} +

+
+
    +
  • + {f(messages.noResult.searchByKeyword, { + keyword: f( + messages.account.list.noResult.searchKeyword + ), + })} +
  • +
  • {f(messages.noResult.changeFilterSelection)}
  • +
  • {f(messages.noResult.checkTypos)}
  • +
+
+
+
+ ) : ( +
+
+

+ {f(messages.noData.title, { + title: f(messages.account.list.header.title), + })} +

+

+ {f(messages.account.list.noData.description)} +

+ + {f(messages.readMore)} + +
+
+ ) + ) : ( +
+ + + {accounts.map((account) => { + return ( + + + + + ); + })} + +
+
+ + {account.id} + +
+ {f(messages.created)} + +
+
+
+ + onSwitchEnabled(account.id, account.disabled) + } + size={'small'} + readOnly={!editable} + /> +
+ +
+ )} +
+ ); + } +); diff --git a/ui/web-v2/apps/admin/src/components/AdminAccountSearch/index.tsx b/ui/web-v2/apps/admin/src/components/AdminAccountSearch/index.tsx new file mode 100644 index 000000000..d50fe3797 --- /dev/null +++ b/ui/web-v2/apps/admin/src/components/AdminAccountSearch/index.tsx @@ -0,0 +1,196 @@ +import { PlusIcon } from '@heroicons/react/solid'; +import { FC, memo, useCallback, useState } from 'react'; +import { useIntl } from 'react-intl'; +import { shallowEqual, useSelector } from 'react-redux'; + +import { intl } from '../../lang'; +import { messages } from '../../lang/messages'; +import { AppState } from '../../modules'; +import { selectAll } from '../../modules/accounts'; +import { useIsOwner } from '../../modules/me'; +import { Account } from '../../proto/account/account_pb'; +import { AdminAccountSearchOptions } from '../../types/adminAccount'; +import { + SORT_OPTIONS_CREATED_AT_ASC, + SORT_OPTIONS_CREATED_AT_DESC, + SORT_OPTIONS_NAME_ASC, + SORT_OPTIONS_NAME_DESC, +} from '../../types/list'; +import { classNames } from '../../utils/css'; +import { FilterChip } from '../FilterChip'; +import { FilterPopover, Option } from '../FilterPopover'; +import { FilterRemoveAllButtonProps } from '../FilterRemoveAllButton'; +import { SearchInput } from '../SearchInput'; +import { SortItem, SortSelect } from '../SortSelect'; + +const sortItems: SortItem[] = [ + { + key: SORT_OPTIONS_CREATED_AT_DESC, + message: intl.formatMessage(messages.account.sort.newest), + }, + { + key: SORT_OPTIONS_CREATED_AT_ASC, + message: intl.formatMessage(messages.account.sort.oldest), + }, + { + key: SORT_OPTIONS_NAME_ASC, + message: intl.formatMessage(messages.account.sort.emailAz), + }, + { + key: SORT_OPTIONS_NAME_DESC, + message: intl.formatMessage(messages.account.sort.emailZa), + }, +]; + +export enum FilterTypes { + ENABLED = 'enabled', +} + +export const filterOptions: Option[] = [ + { + value: FilterTypes.ENABLED, + label: intl.formatMessage(messages.account.filter.enabled), + }, +]; + +export const enabledOptions: Option[] = [ + { + value: 'true', + label: intl.formatMessage(messages.enabled), + }, + { + value: 'false', + label: intl.formatMessage(messages.disabled), + }, +]; + +export interface AdminAccountSearchProps { + options: AdminAccountSearchOptions; + onChange: (options: AdminAccountSearchOptions) => void; + onAdd: () => void; +} + +export const AdminAccountSearch: FC = memo( + ({ options, onChange, onAdd }) => { + const { formatMessage: f } = useIntl(); + const editable = useIsOwner(); + const isLoading = useSelector( + (state) => state.accounts.loading, + shallowEqual + ); + const accounts = useSelector( + (state) => selectAll(state.accounts), + shallowEqual + ); + const [filterValues, setFilterValues] = useState([]); + + const handleFilterKeyChange = useCallback( + (key: string): void => { + switch (key) { + case FilterTypes.ENABLED: + setFilterValues(enabledOptions); + return; + } + }, + [setFilterValues, accounts] + ); + + const handleUpdateOption = ( + optionPart: Partial + ): void => { + onChange({ ...options, ...optionPart }); + }; + + const handleFilterAdd = (key: string, value?: string): void => { + switch (key) { + case FilterTypes.ENABLED: + handleUpdateOption({ + enabled: value, + }); + return; + } + }; + return ( +
+
+
+ + handleUpdateOption({ + q: query, + }) + } + /> +
+
+ +
+
+
+ + handleUpdateOption({ + sort: sort, + }) + } + /> +
+ {editable && ( +
+ +
+ )} +
+ {options.enabled && ( +
+ {options.enabled && ( + option.value === options.enabled + ).label + }`} + onRemove={() => + handleUpdateOption({ + enabled: null, + }) + } + /> + )} + {options.enabled && ( + + handleUpdateOption({ + enabled: null, + }) + } + /> + )} +
+ )} +
+ ); + } +); diff --git a/ui/web-v2/apps/admin/src/components/AdminNotificationAddForm/index.tsx b/ui/web-v2/apps/admin/src/components/AdminNotificationAddForm/index.tsx new file mode 100644 index 000000000..dbb787721 --- /dev/null +++ b/ui/web-v2/apps/admin/src/components/AdminNotificationAddForm/index.tsx @@ -0,0 +1,147 @@ +import { Dialog } from '@headlessui/react'; +import { FC, memo } from 'react'; +import { Controller, useFormContext } from 'react-hook-form'; +import { useIntl } from 'react-intl'; + +import { SOURCE_TYPE_ITEMS } from '../../constants/adminNotification'; +import { messages } from '../../lang/messages'; +import { CheckBoxList } from '../CheckBoxList'; + +export interface AdminNotificationAddFormProps { + onSubmit: () => void; + onCancel: () => void; +} + +export const AdminNotificationAddForm: FC = memo( + ({ onSubmit, onCancel }) => { + const { formatMessage: f } = useIntl(); + const methods = useFormContext(); + const { + register, + control, + formState: { errors, isValid, isSubmitted }, + } = methods; + + return ( +
+
+
+
+
+ + {f(messages.notification.add.header.title)} + +
+
+

+ {f(messages.notification.add.header.description)} +

+
+
+
+
+
+ +
+ +

+ {errors.name && ( + {errors.name.message} + )} +

+
+
+
+
+
+
+
+ +
+ { + return ( + { + field.onChange(values); + }} + disabled={isSubmitted} + /> + ); + }} + /> +

+ {errors.sourceTypes && ( + {errors.sourceTypes.message} + )} +

+
+
+
+
+
+
+ +
+ +

+ {errors.webhookUrl && ( + {errors.webhookUrl.message} + )} +

+
+
+
+
+
+
+
+
+ +
+ +
+
+
+ ); + } +); diff --git a/ui/web-v2/apps/admin/src/components/AdminNotificationList/index.tsx b/ui/web-v2/apps/admin/src/components/AdminNotificationList/index.tsx new file mode 100644 index 000000000..88b797484 --- /dev/null +++ b/ui/web-v2/apps/admin/src/components/AdminNotificationList/index.tsx @@ -0,0 +1,185 @@ +import MUDeleteIcon from '@material-ui/icons/Delete'; +import { FC, memo } from 'react'; +import { useIntl } from 'react-intl'; +import { shallowEqual, useSelector } from 'react-redux'; + +import { NOTIFICATION_LIST_PAGE_SIZE } from '../../constants/notification'; +import { messages } from '../../lang/messages'; +import { AppState } from '../../modules'; +import { useIsEditable } from '../../modules/me'; +import { selectAll } from '../../modules/notifications'; +import { Subscription } from '../../proto/notification/subscription_pb'; +import { NotificationSearchOptions } from '../../types/notification'; +import { classNames } from '../../utils/css'; +import { ListSkeleton } from '../ListSkeleton'; +import { NotificationSearch } from '../NotificationSearch'; +import { Pagination } from '../Pagination'; +import { RelativeDateText } from '../RelativeDateText'; +import { Switch } from '../Switch'; + +export interface AdminNotificationListProps { + searchOptions: NotificationSearchOptions; + onChangePage: (page: number) => void; + onChangeSearchOptions: (options: NotificationSearchOptions) => void; + onAdd: () => void; + onUpdate: (notification: Subscription.AsObject) => void; + onSwitch: (notification: Subscription.AsObject) => void; + onDelete: (notification: Subscription.AsObject) => void; +} + +export const AdminNotificationList: FC = memo( + ({ + searchOptions, + onChangePage, + onChangeSearchOptions, + onAdd, + onUpdate, + onSwitch, + onDelete, + }) => { + const editable = useIsEditable(); + const { formatMessage: f } = useIntl(); + const notificationList = useSelector( + (state) => selectAll(state.adminNotification), + shallowEqual + ); + const isLoading = useSelector( + (state) => state.adminNotification.loading, + shallowEqual + ); + const totalCount = useSelector( + (state) => state.adminNotification.totalCount, + shallowEqual + ); + + return ( +
+
+

+ {f(messages.notification.list.header.description)} +

+ + {f(messages.readMore)} + +
+
+
+ +
+ {isLoading ? ( + + ) : notificationList.length == 0 ? ( + searchOptions.q || searchOptions.enabled ? ( +
+
+

+ {f(messages.noResult.title, { + title: f(messages.notification.list.header.title), + })} +

+
+
    +
  • + {f(messages.noResult.searchByKeyword, { + keyword: f( + messages.notification.list.noResult.searchKeyword + ), + })} +
  • +
  • {f(messages.noResult.changeFilterSelection)}
  • +
  • {f(messages.noResult.checkTypos)}
  • +
+
+
+
+ ) : ( +
+
+

+ {f(messages.noData.title, { + title: f(messages.notification.list.header.title), + })} +

+

+ {f(messages.notification.list.noData.description)} +

+
+
+ ) + ) : ( +
+ + + {notificationList.map((notification) => ( + + + + {editable && ( + + )} + + ))} + +
+
+ +
+ {f(messages.created)} + +
+
+
+ onSwitch(notification)} + size={'small'} + readOnly={!editable} + /> + + +
+ +
+ )} +
+
+ ); + } +); diff --git a/ui/web-v2/apps/admin/src/components/AdminNotificationUpdateForm/index.tsx b/ui/web-v2/apps/admin/src/components/AdminNotificationUpdateForm/index.tsx new file mode 100644 index 000000000..804c7e888 --- /dev/null +++ b/ui/web-v2/apps/admin/src/components/AdminNotificationUpdateForm/index.tsx @@ -0,0 +1,161 @@ +import { Dialog } from '@headlessui/react'; +import { FC, memo, useState } from 'react'; +import { Controller, useFormContext } from 'react-hook-form'; +import { useIntl } from 'react-intl'; + +import { SOURCE_TYPE_ITEMS } from '../../constants/adminNotification'; +import { messages } from '../../lang/messages'; +import { useIsEditable } from '../../modules/me'; +import { CheckBoxList } from '../CheckBoxList'; + +export interface AdminNotificationUpdateFormProps { + onSubmit: () => void; + onCancel: () => void; +} + +export const AdminNotificationUpdateForm: FC = + memo(({ onSubmit, onCancel }) => { + const { formatMessage: f } = useIntl(); + const methods = useFormContext(); + const editable = useIsEditable(); + const { + register, + control, + getValues, + formState: { errors, isValid, isDirty, isSubmitted }, + } = methods; + + const [defaultValues] = useState(() => + SOURCE_TYPE_ITEMS.filter((item) => + getValues().sourceTypes.includes(Number(item.value)) + ) + ); + + return ( +
+
+
+
+
+ + {f(messages.notification.update.header.title)} + +
+
+

+ {f(messages.notification.update.header.description)} +

+
+
+
+
+
+ +
+ +

+ {errors.name && ( + {errors.name.message} + )} +

+
+
+
+
+
+
+
+ +
+ { + return ( + { + const convList = values.map((value) => + Number(value) + ); + field.onChange(convList.sort()); + }} + disabled={!editable || isSubmitted} + defaultValues={defaultValues} + /> + ); + }} + /> +

+ {errors.sourceTypes && ( + {errors.sourceTypes.message} + )} +

+
+
+
+
+
+
+ +
+ +

+ {errors.webhookUrl && ( + {errors.webhookUrl.message} + )} +

+
+
+
+
+
+
+
+
+ +
+ {editable && ( + + )} +
+
+
+ ); + }); diff --git a/ui/web-v2/apps/admin/src/components/AnalysisForm/index.tsx b/ui/web-v2/apps/admin/src/components/AnalysisForm/index.tsx new file mode 100644 index 000000000..ac9667ba0 --- /dev/null +++ b/ui/web-v2/apps/admin/src/components/AnalysisForm/index.tsx @@ -0,0 +1,311 @@ +import { FC, memo, useCallback, useEffect } from 'react'; +import { Controller, useFormContext } from 'react-hook-form'; +import { useIntl } from 'react-intl'; +import { shallowEqual, useDispatch, useSelector } from 'react-redux'; + +import { ANALYSIS_USER_METADATA_REGEX } from '../../constants/analysis'; +import { intl } from '../../lang'; +import { messages } from '../../lang/messages'; +import { AppState } from '../../modules'; +import { + selectAll as selectAllFeatures, + listFeatures, +} from '../../modules/features'; +import { listGoals, selectAll as selectAllGoals } from '../../modules/goals'; +import { useCurrentEnvironment } from '../../modules/me'; +import { + listUserMetadata, + selectAll as selectAllUserMetadata, +} from '../../modules/userMetadata'; +import { Goal } from '../../proto/experiment/goal_pb'; +import { ListGoalsRequest } from '../../proto/experiment/service_pb'; +import { Feature } from '../../proto/feature/feature_pb'; +import { Reason } from '../../proto/feature/reason_pb'; +import { ListFeaturesRequest } from '../../proto/feature/service_pb'; +import { AppDispatch } from '../../store'; +import { classNames } from '../../utils/css'; +import { DatetimePicker } from '../DatetimePicker'; +import { DetailSkeleton } from '../DetailSkeleton'; +import { Option, Select } from '../Select'; + +function getReasonKey(val: number): string { + const keys = [...Object.keys(Reason.Type)]; + const values = [...Object.values(Reason.Type)]; + return keys[values.findIndex((v) => v == val)]; +} + +export const reasonOptions: Option[] = [ + { + value: getReasonKey(Reason.Type.TARGET), + label: intl.formatMessage(messages.reason.target), + }, + { + value: getReasonKey(Reason.Type.RULE), + label: intl.formatMessage(messages.reason.rule), + }, + { + value: getReasonKey(Reason.Type.OFF_VARIATION), + label: intl.formatMessage(messages.reason.offVariation), + }, + { + value: getReasonKey(Reason.Type.CLIENT), + label: intl.formatMessage(messages.reason.client), + }, +]; + +export interface AnalysisFormProps { + onSubmit: () => void; +} + +export const AnalysisForm: FC = memo(({ onSubmit }) => { + const { formatMessage: f } = useIntl(); + const dispatch = useDispatch(); + const currentEnvironment = useCurrentEnvironment(); + const features = useSelector( + (state) => selectAllFeatures(state.features), + shallowEqual + ); + const isFeatureLoading = useSelector( + (state) => state.features.loading, + shallowEqual + ); + const goals = useSelector( + (state) => selectAllGoals(state.goals), + shallowEqual + ); + const isGoalLoading = useSelector( + (state) => state.goals.loading, + shallowEqual + ); + const userMetadata = useSelector( + (state) => selectAllUserMetadata(state.userMetadata), + shallowEqual + ); + const isUserMetadataLoading = useSelector( + (state) => state.userMetadata.loading, + shallowEqual + ); + const isLoading = isFeatureLoading || isGoalLoading || isUserMetadataLoading; + const featureOptions = features.map((feature) => { + return { + value: feature.id, + label: feature.name, + }; + }); + const goalOptions = goals.map((goal) => { + return { + value: goal.id, + label: goal.name, + }; + }); + const segmentOptions = userMetadata.map((data) => { + return { + value: data, + label: getSegmentLabel(data), + }; + }); + segmentOptions.push({ + value: 'tag', + label: f(messages.tags), + }); + const methods = useFormContext(); + const { + control, + formState: { errors, isSubmitting, isDirty, isValid, dirtyFields }, + setValue, + watch, + } = methods; + const featureId = watch('featureId'); + + const handleOnChangeFeature = useCallback( + (featureId: string) => { + const feature = features?.find((f) => f.id === featureId); + setValue('featureVersion', feature ? feature.version : 0); + !feature && setValue('reason', ''); + }, + [features, setValue] + ); + + useEffect(() => { + dispatch( + listGoals({ + environmentNamespace: currentEnvironment.namespace, + pageSize: 99999, + cursor: '', + searchKeyword: null, + status: null, + orderBy: ListGoalsRequest.OrderBy.DEFAULT, + orderDirection: ListGoalsRequest.OrderDirection.ASC, + }) + ); + dispatch( + listFeatures({ + environmentNamespace: currentEnvironment.namespace, + pageSize: 99999, + cursor: '', + tags: [], + searchKeyword: null, + enabled: null, + hasExperiment: null, + maintainerId: null, + orderBy: ListFeaturesRequest.OrderBy.DEFAULT, + orderDirection: ListFeaturesRequest.OrderDirection.ASC, + }) + ); + dispatch( + listUserMetadata({ + environmentNamespace: currentEnvironment.namespace, + }) + ); + }, [dispatch]); + + return isLoading ? ( +
+ +
+ ) : ( +
+
+
+
+
+ + +
+
+ + +
+
+

+ {errors.startAt?.message && ( + {errors.startAt?.message} + )} +

+

+ {errors.endAt?.message && ( + {errors.endAt?.message} + )} +

+
+
+ + { + return ( + { + field.onChange(o ? o.value : ''); + handleOnChangeFeature(o?.value.toString()); + }} + value={featureOptions.find((o) => o.value === field.value)} + clearable={true} + /> + ); + }} + /> +

+ {errors.featureid?.message && ( + {errors.featureid?.message} + )} +

+
+ {featureId && ( +
+ + { + return ( + { + field.onChange(ops.map((o) => o.value)); + }} + clearable={true} + /> + ); + }} + /> +

+ {errors.userMetadata?.message && ( + {errors.userMetadata?.message} + )} +

+
+
+
+ +
+
+ ); +}); + +const getSegmentLabel = (segment: string): string => { + const match = segment.match(ANALYSIS_USER_METADATA_REGEX); + if (match && match?.length > 1) { + return `${match[1]} (${intl.formatMessage(messages.analysis.clientData)})`; + } + return segment; +}; diff --git a/ui/web-v2/apps/admin/src/components/AnalysisTable/index.tsx b/ui/web-v2/apps/admin/src/components/AnalysisTable/index.tsx new file mode 100644 index 000000000..fd85593d9 --- /dev/null +++ b/ui/web-v2/apps/admin/src/components/AnalysisTable/index.tsx @@ -0,0 +1,241 @@ +import { SerializedError } from '@reduxjs/toolkit'; +import { FC, useState } from 'react'; +import { useIntl } from 'react-intl'; +import { shallowEqual, useSelector } from 'react-redux'; + +import { messages } from '../..//lang/messages'; +import { + ANALYSIS_LIST_PAGE_SIZE, + ANALYSIS_USER_METADATA_REGEX, +} from '../../constants/analysis'; +import { intl } from '../../lang'; +import { AppState } from '../../modules'; +import { selectById as selectFeatureById } from '../../modules/features'; +import { Cell, Row } from '../../proto/eventcounter/table_pb'; +import { Feature } from '../../proto/feature/feature_pb'; +import { classNames } from '../../utils/css'; +import { ListSkeleton } from '../ListSkeleton'; +import { Pagination } from '../Pagination'; + +interface AnalysisTableProps { + featureId?: string; +} + +export const AnalysisTable: FC = ({ featureId }) => { + const [page, setPage] = useState(1); + const [feature, getFeatureError] = useSelector< + AppState, + [Feature.AsObject | undefined, SerializedError | null] + >( + (state) => [ + selectFeatureById(state.features, featureId), + state.features.getFeatureError, + ], + shallowEqual + ); + const headers = useSelector( + (state) => state.goalCounts.headers, + shallowEqual + ); + const rows = useSelector( + (state) => state.goalCounts.rows, + shallowEqual + ); + const loading = useSelector( + (state) => state.goalCounts.loading, + shallowEqual + ); + const variationIdx = headers?.cellsList?.findIndex( + (cell) => cell.value == 'Variation' + ); + const variations = new Map(); + feature?.variationsList.forEach((v) => { + variations.set(v.id, v.value); + }); + if (loading) { + return ( +
+ +
+ ); + } + if (!(rows && headers)) { + return null; + } + return ( +
+ + + + {rows + .slice( + (page - 1) * ANALYSIS_LIST_PAGE_SIZE, + page * ANALYSIS_LIST_PAGE_SIZE + ) + .map((row, i) => { + return ( + + {row.cellsList.map((cell, j) => { + let textLeft = false; + let value: string; + if (cell.type == Cell.Type.DOUBLE) { + value = Number.isNaN(cell.valuedouble) + ? 'n/a' + : cell.valuedouble.toFixed(1); + } else { + textLeft = true; + // Replace variation ID with variation value. + if (variationIdx == j) { + const variationValue = variations.get(cell.value); + if (!variationValue) { + value = `(deleted) ${cell.value}`; + } else { + value = variationValue; + } + } else { + value = cell.value; + } + } + return ( + + {' '} + {value}{' '} + + ); + })} + + ); + })} + +
+ +
+ ); +}; + +interface TableHeaderProps { + headers: Row.AsObject; +} + +const TableHeader: FC = ({ headers }) => { + const { formatMessage: f } = useIntl(); + return ( + + + {headers.cellsList.map((header) => { + let value = header.value; + const match = value.match(ANALYSIS_USER_METADATA_REGEX); + if (match && match?.length > 1) { + value = match[1]; + } + if (value === 'tag') { + value = f(messages.tags); + } + return ( + +
+ {headerMessage(value)} +
+ + ); + })} + + + ); +}; + +function headerMessage(name: string): string { + let msg = ''; + switch (name) { + case 'Variation': + msg = intl.formatMessage(messages.analysis.variation); + break; + case 'Evaluation user': + msg = intl.formatMessage(messages.analysis.evaluationUser); + break; + case 'Evaluation total': + msg = intl.formatMessage(messages.analysis.evaluationTotal); + break; + case 'Goal user': + msg = intl.formatMessage(messages.analysis.goalUser); + break; + case 'Goal total': + msg = intl.formatMessage(messages.analysis.goalTotal); + break; + case 'Goal value total': + msg = intl.formatMessage(messages.analysis.goalValueTotal); + break; + case 'Goal value mean': + msg = intl.formatMessage(messages.analysis.goalValueMean); + break; + case 'Goal value variance': + msg = intl.formatMessage(messages.analysis.goalValueVariance); + break; + case 'Conversion rate': + msg = intl.formatMessage(messages.analysis.conversionRate); + break; + default: + msg = name; + } + return msg; +} + +export interface TableProps {} + +export const Table: FC = ({ children }) => { + return ( +
+ + {children} +
+
+ ); +}; + +export interface TableBodyProps {} + +export const TableBody: FC = ({ children }) => { + return {children}; +}; + +export interface TableRowProps {} + +export const TableRow: FC = ({ children }) => { + return {children}; +}; + +export interface TableCellProps { + textLeft?: boolean; +} + +export const TableCell: FC = ({ children, textLeft }) => { + return ( + + {children} + + ); +}; diff --git a/ui/web-v2/apps/admin/src/components/AuditLogList/index.tsx b/ui/web-v2/apps/admin/src/components/AuditLogList/index.tsx new file mode 100644 index 000000000..87b8f429b --- /dev/null +++ b/ui/web-v2/apps/admin/src/components/AuditLogList/index.tsx @@ -0,0 +1,142 @@ +import { FC, memo } from 'react'; +import { useIntl } from 'react-intl'; +import { shallowEqual, useSelector } from 'react-redux'; + +import { AUDITLOG_LIST_PAGE_SIZE } from '../../constants/auditLog'; +import { messages } from '../../lang/messages'; +import { AppState } from '../../modules'; +import { selectAll } from '../../modules/auditLogs'; +import { AuditLog } from '../../proto/auditlog/auditlog_pb'; +import { AuditLogSearchOptions } from '../../types/auditLog'; +import { classNames } from '../../utils/css'; +import { AuditLogSearch } from '../AuditLogSearch'; +import { ListSkeleton } from '../ListSkeleton'; +import { Pagination } from '../Pagination'; +import { RelativeDateText } from '../RelativeDateText'; + +export interface AuditLogListProps { + searchOptions: AuditLogSearchOptions; + onChangePage: (page: number) => void; + onChangeSearchOptions: (options: AuditLogSearchOptions) => void; +} + +export const AuditLogList: FC = memo( + ({ searchOptions, onChangePage, onChangeSearchOptions }) => { + const { formatMessage: f } = useIntl(); + const auditLogs = useSelector( + (state) => selectAll(state.auditLog), + shallowEqual + ); + const isLoading = useSelector( + (state) => state.auditLog.loading, + shallowEqual + ); + const totalCount = useSelector( + (state) => state.auditLog.totalCount, + shallowEqual + ); + return ( +
+
+ +
+ {isLoading ? ( + + ) : auditLogs.length == 0 ? ( + searchOptions.q || searchOptions.resource ? ( +
+
+

+ {f(messages.noResult.title, { + title: f(messages.auditLog.list.header.title), + })} +

+
+
    +
  • + {f(messages.noResult.searchByKeyword, { + keyword: f( + messages.auditLog.list.noResult.searchKeyword + ), + })} +
  • +
  • {f(messages.noResult.checkTypos)}
  • +
+
+
+
+ ) : ( +
+
+

+ {f(messages.noData.title, { + title: f(messages.auditLog.list.header.title), + })} +

+

+ {f(messages.auditLog.list.noData.description)} +

+ + {f(messages.readMore)} + +
+
+ ) + ) : ( +
+ + + {auditLogs.map((auditLog) => ( + + + + ))} + +
+ + {auditLog.editor.email} + + + {auditLog.localizedMessage.message} + +
+ +
+ {auditLog.options && ( +
+ {nl2br(auditLog.options.comment)} +
+ )} +
+ +
+ )} +
+ ); + } +); + +const nl2br = (text: string): Array => { + const regex = /(\n)/g; + return text.split(regex).map((line, i) => { + if (line.match(regex)) { + return
; + } else { + return line; + } + }); +}; diff --git a/ui/web-v2/apps/admin/src/components/AuditLogSearch/index.tsx b/ui/web-v2/apps/admin/src/components/AuditLogSearch/index.tsx new file mode 100644 index 000000000..12de5dcba --- /dev/null +++ b/ui/web-v2/apps/admin/src/components/AuditLogSearch/index.tsx @@ -0,0 +1,98 @@ +import { FC, memo, useCallback, useState } from 'react'; +import { useIntl } from 'react-intl'; +import { shallowEqual, useSelector } from 'react-redux'; + +import { intl } from '../../lang'; +import { messages } from '../../lang/messages'; +import { AppState } from '../../modules'; +import { AuditLogSearchOptions } from '../../types/auditLog'; +import { + SORT_OPTIONS_CREATED_AT_ASC, + SORT_OPTIONS_CREATED_AT_DESC, +} from '../../types/list'; +import { classNames } from '../../utils/css'; +import { Option } from '../FilterPopover'; +import { SearchInput } from '../SearchInput'; +import { SortItem, SortSelect } from '../SortSelect'; + +const sortItems: SortItem[] = [ + { + key: SORT_OPTIONS_CREATED_AT_DESC, + message: intl.formatMessage(messages.auditLog.sort.newest), + }, + { + key: SORT_OPTIONS_CREATED_AT_ASC, + message: intl.formatMessage(messages.auditLog.sort.oldest), + }, +]; + +export enum FilterTypes { + DATES = 'dates', + TYPE = 'type', +} + +export const filterOptions: Option[] = [ + { + value: FilterTypes.DATES, + label: intl.formatMessage(messages.auditLog.filter.dates), + }, + { + value: FilterTypes.TYPE, + label: intl.formatMessage(messages.auditLog.filter.type), + }, +]; + +export interface AuditLogSearchProps { + options: AuditLogSearchOptions; + onChange: (options: AuditLogSearchOptions) => void; +} + +export const AuditLogSearch: FC = memo( + ({ options, onChange }) => { + const { formatMessage: f } = useIntl(); + const isLoading = useSelector( + (state) => state.auditLog.loading, + shallowEqual + ); + const handleUpdateOption = ( + optionPart: Partial + ): void => { + onChange({ ...options, ...optionPart }); + }; + + return ( +
+
+
+ + handleUpdateOption({ + q: query, + }) + } + /> +
+
+
+ + handleUpdateOption({ + sort: sort, + }) + } + /> +
+
+
+ ); + } +); diff --git a/ui/web-v2/apps/admin/src/components/Breadcrumbs/index.tsx b/ui/web-v2/apps/admin/src/components/Breadcrumbs/index.tsx new file mode 100644 index 000000000..0cfab8223 --- /dev/null +++ b/ui/web-v2/apps/admin/src/components/Breadcrumbs/index.tsx @@ -0,0 +1,57 @@ +import { HomeIcon } from '@heroicons/react/solid'; +import React, { FC, memo } from 'react'; +import { Link } from 'react-router-dom'; + +import { PAGE_PATH_ROOT } from '../../constants/routing'; +import { classNames } from '../../utils/css'; + +export interface Pages { + name: string; + path: string; + current: boolean; +} + +export interface BreadcrumbsProps { + pages: Pages[]; +} + +export const Breadcrumbs: FC = memo(({ pages }) => { + return ( + + ); +}); diff --git a/ui/web-v2/apps/admin/src/components/CheckBox/index.tsx b/ui/web-v2/apps/admin/src/components/CheckBox/index.tsx new file mode 100644 index 000000000..d1bf6632f --- /dev/null +++ b/ui/web-v2/apps/admin/src/components/CheckBox/index.tsx @@ -0,0 +1,29 @@ +import { FC, memo } from 'react'; + +export interface CheckBoxProps { + id: string; + value: string; + onChange: (value: string, checked: boolean) => void; + defaultChecked?: boolean; + disabled?: boolean; +} + +export const CheckBox: FC = memo( + ({ id, value, defaultChecked, onChange, disabled }) => { + const handleOnChange = (e: React.ChangeEvent) => { + onChange(e.target.value, e.target.checked); + }; + + return ( + + ); + } +); diff --git a/ui/web-v2/apps/admin/src/components/CheckBoxList/index.tsx b/ui/web-v2/apps/admin/src/components/CheckBoxList/index.tsx new file mode 100644 index 000000000..5ce25a9ee --- /dev/null +++ b/ui/web-v2/apps/admin/src/components/CheckBoxList/index.tsx @@ -0,0 +1,81 @@ +import { FC, memo, useState } from 'react'; + +import { CheckBox } from '../CheckBox'; + +export interface Option { + value: string; + label: string; + description?: string; +} + +export interface CheckBoxListProps { + onChange: (values: string[]) => void; + options: Option[]; + defaultValues?: Option[]; + disabled?: boolean; +} + +export const CheckBoxList: FC = memo( + ({ onChange, options, defaultValues, disabled }) => { + const [checkedItems] = useState(() => { + const items = new Map(); + defaultValues && + defaultValues.forEach((item, _) => { + items.set(item.value, item.value); + }); + return items; + }); + + const handleOnChange = (value: string, checked: boolean) => { + if (checked) { + checkedItems.set(value, value); + } else { + checkedItems.delete(value); + } + const valueList = []; + checkedItems.forEach((v, _) => { + valueList.push(v); + }); + onChange(valueList); + }; + + return ( +
+
+
+ {options.map((item, index) => { + return ( +
+
+ +
+
+ +
+
+ ); + })} +
+
+
+ ); + } +); diff --git a/ui/web-v2/apps/admin/src/components/ConfirmDialog/index.tsx b/ui/web-v2/apps/admin/src/components/ConfirmDialog/index.tsx new file mode 100644 index 000000000..6b6023ef4 --- /dev/null +++ b/ui/web-v2/apps/admin/src/components/ConfirmDialog/index.tsx @@ -0,0 +1,59 @@ +import { Dialog, Transition } from '@headlessui/react'; +import { Fragment, FC } from 'react'; +import { useIntl } from 'react-intl'; + +import { Modal } from '../Modal'; + +interface ConfirmDialogProps { + open: boolean; + onConfirm: () => void; + onClose: () => void; + title: string; + description: string; + onCloseButton: string; + onConfirmButton: string; +} + +export const ConfirmDialog: FC = ({ + open, + onConfirm, + onClose, + title, + description, + onCloseButton, + onConfirmButton, +}) => { + return ( + + + {title} + +
+

{description}

+
+
+
+ + +
+
+
+ ); +}; diff --git a/ui/web-v2/apps/admin/src/components/CopyChip/index.tsx b/ui/web-v2/apps/admin/src/components/CopyChip/index.tsx new file mode 100644 index 000000000..bcb45fa81 --- /dev/null +++ b/ui/web-v2/apps/admin/src/components/CopyChip/index.tsx @@ -0,0 +1,49 @@ +import { FC, memo, useCallback, useState } from 'react'; +import { useIntl } from 'react-intl'; + +import { messages } from '../../lang/messages'; +import { classNames } from '../../utils/css'; +import { HoverPopover } from '../HoverPopover'; + +export interface CopyChipProps { + text: string; +} + +export const CopyChip: FC = memo(({ text, children }) => { + const { formatMessage: f } = useIntl(); + const [textClicked, setTextClicked] = useState(false); + + const handleTextClick = useCallback( + (text: string) => { + navigator.clipboard.writeText(text); + setTextClicked(true); + }, + [setTextClicked] + ); + return ( +
+ handleTextClick(text)} + onMouseLeave={() => setTextClicked(false)} + render={() => { + return ( +
+ {textClicked ? ( + {f(messages.copy.copied)} + ) : ( + {f(messages.copy.copyToClipboard)} + )} +
+ ); + }} + > + {children} +
+
+ ); +}); diff --git a/ui/web-v2/apps/admin/src/components/CountResultBarChart/index.tsx b/ui/web-v2/apps/admin/src/components/CountResultBarChart/index.tsx new file mode 100644 index 000000000..d279f5229 --- /dev/null +++ b/ui/web-v2/apps/admin/src/components/CountResultBarChart/index.tsx @@ -0,0 +1,57 @@ +import { jsx } from '@emotion/react'; +import { FC } from 'react'; +import { Bar } from 'react-chartjs-2'; + +import { COLORS } from '../../constants/colorPattern'; + +interface CountResultBarChartProps { + label: string; + variationValues: string[]; + data: number[]; +} + +export const CountResultBarChart: FC = ({ + label, + variationValues, + data, +}) => { + const chartData = { + labels: variationValues, + datasets: [ + { + label: '', + backgroundColor: COLORS.slice( + 0, + variationValues.length % COLORS.length + ), + borderWidth: 1, + data: data, + }, + ], + }; + const options = { + legend: { + display: false, + }, + title: { + display: true, + text: label, + fontStyle: 'normal', + }, + scales: { + yAxes: [ + { + ticks: { + beginAtZero: true, + }, + }, + ], + }, + }; + + return ( +
+ +
+ ); +}; diff --git a/ui/web-v2/apps/admin/src/components/CountResultPieChart/index.tsx b/ui/web-v2/apps/admin/src/components/CountResultPieChart/index.tsx new file mode 100644 index 000000000..dadd3d2e4 --- /dev/null +++ b/ui/web-v2/apps/admin/src/components/CountResultPieChart/index.tsx @@ -0,0 +1,43 @@ +import { jsx } from '@emotion/react'; +import { FC } from 'react'; +import { Pie } from 'react-chartjs-2'; + +import { COLORS } from '../../constants/colorPattern'; + +interface CountResultPieChartProps { + label: string; + variationValues: string[]; + data: number[]; +} + +export const CountResultPieChart: FC = ({ + label, + variationValues, + data, +}) => { + const chartData = { + labels: variationValues, + datasets: [ + { + data: data, + backgroundColor: COLORS.slice( + 0, + variationValues.length % COLORS.length + ), + }, + ], + }; + const options = { + title: { + display: true, + text: label, + fontStyle: 'normal', + }, + }; + + return ( +
+ +
+ ); +}; diff --git a/ui/web-v2/apps/admin/src/components/CreatableSelect/index.tsx b/ui/web-v2/apps/admin/src/components/CreatableSelect/index.tsx new file mode 100644 index 000000000..10fb9d744 --- /dev/null +++ b/ui/web-v2/apps/admin/src/components/CreatableSelect/index.tsx @@ -0,0 +1,88 @@ +import React, { FC, memo } from 'react'; +import ReactCreatableSelect from 'react-select/creatable'; + +export interface Option { + value: string; + label: string; +} + +export interface CreatableSelectProps { + options?: Option[]; + disabled?: boolean; + isSearchable?: boolean; + defaultValues?: Option[]; + className?: string; + onChange: (options: Option[]) => void; +} + +export const CreatableSelect: FC = memo( + ({ disabled, isSearchable, className, onChange, options, defaultValues }) => { + const textColor = '#3F3F46'; + const textColorDisabled = '#6B7280'; + const backgroundColor = 'white'; + const backgroundColorDisabled = '#F3F4F6'; + const borderColor = '#D1D5DB'; + const fontSize = '0.875rem'; + const lineHeight = '1.25rem'; + const minHeight = '2.5rem'; + const colourStyles = { + control: (styles, { isDisabled }) => ({ + ...styles, + backgroundColor: isDisabled ? backgroundColorDisabled : backgroundColor, + borderColor: borderColor, + '&:hover': { + borderColor: borderColor, + }, + fontSize: fontSize, + lineHeight: lineHeight, + minHeight: minHeight, + '*': { + boxShadow: 'none !important', + }, + }), + option: (styles, { isFocused }) => { + return { + ...styles, + backgroundColor: isFocused ? backgroundColor : null, + color: textColor, + }; + }, + menu: (base) => ({ + ...base, + fontSize: fontSize, + lineHeight: lineHeight, + color: textColor, + }), + multiValueLabel: (base, { isDisabled }) => ({ + ...base, + color: isDisabled ? textColorDisabled : textColor, + }), + singleValue: (styles, { isDisabled }) => { + return { + ...styles, + color: isDisabled ? textColorDisabled : textColor, + }; + }, + multiValueRemove: (base) => { + return { ...base, display: 'none' }; + }, + }; + return ( + + ); + } +); diff --git a/ui/web-v2/apps/admin/src/components/DatetimePicker/index.tsx b/ui/web-v2/apps/admin/src/components/DatetimePicker/index.tsx new file mode 100644 index 000000000..f00533a14 --- /dev/null +++ b/ui/web-v2/apps/admin/src/components/DatetimePicker/index.tsx @@ -0,0 +1,38 @@ +import React, { FC, memo } from 'react'; +import ReactDatePicker from 'react-datepicker'; +import { Controller, useFormContext } from 'react-hook-form'; + +import { classNames } from '../../utils/css'; +import 'react-datepicker/dist/react-datepicker.css'; + +export interface DatetimePickerProps { + name: string; + disabled?: boolean; +} + +export const DatetimePicker: FC = memo( + ({ name, disabled }) => { + const methods = useFormContext(); + const { control } = methods; + + return ( + ( + + )} + /> + ); + } +); diff --git a/ui/web-v2/apps/admin/src/components/DetailSkeleton/index.tsx b/ui/web-v2/apps/admin/src/components/DetailSkeleton/index.tsx new file mode 100644 index 000000000..efe7410a8 --- /dev/null +++ b/ui/web-v2/apps/admin/src/components/DetailSkeleton/index.tsx @@ -0,0 +1,19 @@ +import { FC, memo } from 'react'; + +import { classNames } from '../../utils/css'; + +export const DetailSkeleton: FC = memo(() => { + return ( +
+
+
+
+
+ ); +}); diff --git a/ui/web-v2/apps/admin/src/components/EnvironmentAddForm/index.tsx b/ui/web-v2/apps/admin/src/components/EnvironmentAddForm/index.tsx new file mode 100644 index 000000000..311272ee0 --- /dev/null +++ b/ui/web-v2/apps/admin/src/components/EnvironmentAddForm/index.tsx @@ -0,0 +1,169 @@ +import { Dialog } from '@headlessui/react'; +import { FC, memo, useEffect } from 'react'; +import { Controller, useFormContext } from 'react-hook-form'; +import { useIntl } from 'react-intl'; +import { shallowEqual, useDispatch, useSelector } from 'react-redux'; + +import { messages } from '../../lang/messages'; +import { AppState } from '../../modules'; +import { + selectAll as selectAllProjects, + listProjects, +} from '../../modules/projects'; +import { Project } from '../../proto/environment/project_pb'; +import { AppDispatch } from '../../store'; +import { Select } from '../Select'; + +export interface EnvironmentAddFormProps { + onSubmit: () => void; + onCancel: () => void; +} + +export const EnvironmentAddForm: FC = memo( + ({ onSubmit, onCancel }) => { + const { formatMessage: f } = useIntl(); + const methods = useFormContext(); + const { + control, + register, + formState: { errors, isValid, isSubmitted }, + } = methods; + const projects = useSelector( + (state) => selectAllProjects(state.projects), + shallowEqual + ); + const isLoadingProjects = useSelector( + (state) => state.projects.loading, + shallowEqual + ); + const projectIdOptions = projects.map((project) => { + return { + value: project.id, + label: project.id, + }; + }); + const dispatch = useDispatch(); + useEffect(() => { + dispatch( + listProjects({ + pageSize: 0, + cursor: '', + }) + ); + }, [dispatch]); + return ( +
+
+
+
+
+ + {f(messages.adminEnvironment.add.header.title)} + +
+
+

+ {f(messages.adminEnvironment.add.header.description)} +

+
+
+
+
+
+ +
+ +

+ {errors.id && ( + {errors.id.message} + )} +

+
+
+
+ + { + return ( +