diff --git a/provider/cmd/pulumi-resource-confluentcloud/bridge-metadata.json b/provider/cmd/pulumi-resource-confluentcloud/bridge-metadata.json index b787ba91..70642ff2 100644 --- a/provider/cmd/pulumi-resource-confluentcloud/bridge-metadata.json +++ b/provider/cmd/pulumi-resource-confluentcloud/bridge-metadata.json @@ -195,6 +195,14 @@ "resource_name": {} } }, + "confluent_flink_compute_pool": { + "current": "confluentcloud:index/flinkComputePool:FlinkComputePool", + "fields": { + "environment": { + "maxItemsOne": true + } + } + }, "confluent_identity_pool": { "current": "confluentcloud:index/identityPool:IdentityPool", "fields": { @@ -849,6 +857,30 @@ "version": {} } }, + "confluent_schema_exporter": { + "current": "confluentcloud:index/schemaExporter:SchemaExporter", + "fields": { + "credentials": { + "maxItemsOne": true + }, + "destination_schema_registry_cluster": { + "maxItemsOne": true, + "elem": { + "fields": { + "credentials": { + "maxItemsOne": true + } + } + } + }, + "schema_registry_cluster": { + "maxItemsOne": true + }, + "subjects": { + "maxItemsOne": false + } + } + }, "confluent_schema_registry_cluster": { "current": "confluentcloud:index/schemaRegistryCluster:SchemaRegistryCluster", "fields": { @@ -1109,6 +1141,14 @@ } } }, + "confluent_flink_compute_pool": { + "current": "confluentcloud:index/getFlinkComputePool:getFlinkComputePool", + "fields": { + "environment": { + "maxItemsOne": true + } + } + }, "confluent_identity_pool": { "current": "confluentcloud:index/getIdentityPool:getIdentityPool", "fields": { @@ -1548,6 +1588,7 @@ "confluentcloud:index/clusterLink:ClusterLink": "confluent_cluster_link", "confluentcloud:index/connector:Connector": "confluent_connector", "confluentcloud:index/environment:Environment": "confluent_environment", + "confluentcloud:index/flinkComputePool:FlinkComputePool": "confluent_flink_compute_pool", "confluentcloud:index/identityPool:IdentityPool": "confluent_identity_pool", "confluentcloud:index/identityProvider:IdentityProvider": "confluent_identity_provider", "confluentcloud:index/invitation:Invitation": "confluent_invitation", @@ -1567,6 +1608,7 @@ "confluentcloud:index/privateLinkAttachmentConnection:PrivateLinkAttachmentConnection": "confluent_private_link_attachment_connection", "confluentcloud:index/roleBinding:RoleBinding": "confluent_role_binding", "confluentcloud:index/schema:Schema": "confluent_schema", + "confluentcloud:index/schemaExporter:SchemaExporter": "confluent_schema_exporter", "confluentcloud:index/schemaRegistryCluster:SchemaRegistryCluster": "confluent_schema_registry_cluster", "confluentcloud:index/schemaRegistryClusterConfig:SchemaRegistryClusterConfig": "confluent_schema_registry_cluster_config", "confluentcloud:index/schemaRegistryClusterMode:SchemaRegistryClusterMode": "confluent_schema_registry_cluster_mode", @@ -1584,6 +1626,7 @@ "confluentcloud:index/getByokKey:getByokKey": "confluent_byok_key", "confluentcloud:index/getEnvironment:getEnvironment": "confluent_environment", "confluentcloud:index/getEnvironments:getEnvironments": "confluent_environments", + "confluentcloud:index/getFlinkComputePool:getFlinkComputePool": "confluent_flink_compute_pool", "confluentcloud:index/getIdentityPool:getIdentityPool": "confluent_identity_pool", "confluentcloud:index/getIdentityProvider:getIdentityProvider": "confluent_identity_provider", "confluentcloud:index/getInvitation:getInvitation": "confluent_invitation", @@ -1710,6 +1753,9 @@ "confluentcloud:index/PrivateLinkAttachmentGcp:PrivateLinkAttachmentGcp": { "privateServiceConnectServiceAttachment": "private_service_connect_service_attachment" }, + "confluentcloud:index/SchemaExporterDestinationSchemaRegistryCluster:SchemaExporterDestinationSchemaRegistryCluster": { + "restEndpoint": "rest_endpoint" + }, "confluentcloud:index/SchemaSchemaReference:SchemaSchemaReference": { "subjectName": "subject_name" }, @@ -1753,6 +1799,14 @@ "displayName": "display_name", "resourceName": "resource_name" }, + "confluentcloud:index/flinkComputePool:FlinkComputePool": { + "apiVersion": "api_version", + "currentCfu": "current_cfu", + "displayName": "display_name", + "maxCfu": "max_cfu", + "resourceName": "resource_name", + "restEndpoint": "rest_endpoint" + }, "confluentcloud:index/getBusinessMetadata:getBusinessMetadata": { "attributeDefinitions": "attribute_definition", "restEndpoint": "rest_endpoint", @@ -1785,6 +1839,14 @@ "displayName": "display_name", "resourceName": "resource_name" }, + "confluentcloud:index/getFlinkComputePool:getFlinkComputePool": { + "apiVersion": "api_version", + "currentCfu": "current_cfu", + "displayName": "display_name", + "maxCfu": "max_cfu", + "resourceName": "resource_name", + "restEndpoint": "rest_endpoint" + }, "confluentcloud:index/getIdentityPool:getIdentityPool": { "displayName": "display_name", "identityClaim": "identity_claim", @@ -2146,6 +2208,14 @@ "schemaRegistryCluster": "schema_registry_cluster", "subjectName": "subject_name" }, + "confluentcloud:index/schemaExporter:SchemaExporter": { + "contextType": "context_type", + "destinationSchemaRegistryCluster": "destination_schema_registry_cluster", + "resetOnUpdate": "reset_on_update", + "restEndpoint": "rest_endpoint", + "schemaRegistryCluster": "schema_registry_cluster", + "subjectRenameFormat": "subject_rename_format" + }, "confluentcloud:index/schemaRegistryCluster:SchemaRegistryCluster": { "apiVersion": "api_version", "displayName": "display_name", diff --git a/provider/cmd/pulumi-resource-confluentcloud/schema.json b/provider/cmd/pulumi-resource-confluentcloud/schema.json index 16a4cb10..98341e5c 100644 --- a/provider/cmd/pulumi-resource-confluentcloud/schema.json +++ b/provider/cmd/pulumi-resource-confluentcloud/schema.json @@ -392,7 +392,7 @@ }, "secret": { "type": "string", - "description": "The Kafka API Secret.\n\n\u003e **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).\n", + "description": "The Kafka API Secret.\n", "secret": true } }, @@ -438,7 +438,7 @@ }, "secret": { "type": "string", - "description": "The Kafka API Secret.\n\n\u003e **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).\n", + "description": "The Kafka API Secret.\n", "secret": true } }, @@ -484,7 +484,7 @@ }, "secret": { "type": "string", - "description": "The Kafka API Secret.\n\n\u003e **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).\n", + "description": "The Kafka API Secret.\n", "secret": true } }, @@ -530,7 +530,7 @@ }, "secret": { "type": "string", - "description": "The Kafka API Secret.\n\n\u003e **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).\n", + "description": "The Kafka API Secret.\n", "secret": true } }, @@ -566,6 +566,19 @@ "id" ] }, + "confluentcloud:index/FlinkComputePoolEnvironment:FlinkComputePoolEnvironment": { + "properties": { + "id": { + "type": "string", + "description": "The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`.\n", + "willReplaceOnChanges": true + } + }, + "type": "object", + "required": [ + "id" + ] + }, "confluentcloud:index/IdentityPoolIdentityProvider:IdentityPoolIdentityProvider": { "properties": { "id": { @@ -1472,6 +1485,74 @@ "secret" ] }, + "confluentcloud:index/SchemaExporterCredentials:SchemaExporterCredentials": { + "properties": { + "key": { + "type": "string", + "description": "The Schema Registry API Key.\n", + "secret": true + }, + "secret": { + "type": "string", + "description": "The Schema Registry API Secret.\n", + "secret": true + } + }, + "type": "object", + "required": [ + "key", + "secret" + ] + }, + "confluentcloud:index/SchemaExporterDestinationSchemaRegistryCluster:SchemaExporterDestinationSchemaRegistryCluster": { + "properties": { + "credentials": { + "$ref": "#/types/confluentcloud:index/SchemaExporterDestinationSchemaRegistryClusterCredentials:SchemaExporterDestinationSchemaRegistryClusterCredentials", + "secret": true + }, + "restEndpoint": { + "type": "string", + "description": "The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).\n" + } + }, + "type": "object", + "required": [ + "credentials", + "restEndpoint" + ] + }, + "confluentcloud:index/SchemaExporterDestinationSchemaRegistryClusterCredentials:SchemaExporterDestinationSchemaRegistryClusterCredentials": { + "properties": { + "key": { + "type": "string", + "description": "The Schema Registry API Key.\n", + "secret": true + }, + "secret": { + "type": "string", + "description": "The Schema Registry API Secret.\n", + "secret": true + } + }, + "type": "object", + "required": [ + "key", + "secret" + ] + }, + "confluentcloud:index/SchemaExporterSchemaRegistryCluster:SchemaExporterSchemaRegistryCluster": { + "properties": { + "id": { + "type": "string", + "description": "The ID of the Schema Registry cluster, for example, `lsrc-abc123`.\n", + "willReplaceOnChanges": true + } + }, + "type": "object", + "required": [ + "id" + ] + }, "confluentcloud:index/SchemaRegistryClusterConfigCredentials:SchemaRegistryClusterConfigCredentials": { "properties": { "key": { @@ -1953,6 +2034,18 @@ } } }, + "confluentcloud:index/getFlinkComputePoolEnvironment:getFlinkComputePoolEnvironment": { + "properties": { + "id": { + "type": "string", + "description": "The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`.\n\n\u003e **Note:** Exactly one from the `id` and `display_name` attributes must be specified.\n" + } + }, + "type": "object", + "required": [ + "id" + ] + }, "confluentcloud:index/getIdentityPoolIdentityProvider:getIdentityPoolIdentityProvider": { "properties": { "id": { @@ -4102,6 +4195,143 @@ "type": "object" } }, + "confluentcloud:index/flinkComputePool:FlinkComputePool": { + "description": "{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as confluentcloud from \"@pulumi/confluentcloud\";\n\nconst development = new confluentcloud.Environment(\"development\", {});\nconst main = new confluentcloud.FlinkComputePool(\"main\", {\n displayName: \"standard_compute_pool\",\n cloud: \"AWS\",\n region: \"us-east-1\",\n maxCfu: 5,\n environment: {\n id: development.id,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_confluentcloud as confluentcloud\n\ndevelopment = confluentcloud.Environment(\"development\")\nmain = confluentcloud.FlinkComputePool(\"main\",\n display_name=\"standard_compute_pool\",\n cloud=\"AWS\",\n region=\"us-east-1\",\n max_cfu=5,\n environment=confluentcloud.FlinkComputePoolEnvironmentArgs(\n id=development.id,\n ))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing ConfluentCloud = Pulumi.ConfluentCloud;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var development = new ConfluentCloud.Environment(\"development\");\n\n var main = new ConfluentCloud.FlinkComputePool(\"main\", new()\n {\n DisplayName = \"standard_compute_pool\",\n Cloud = \"AWS\",\n Region = \"us-east-1\",\n MaxCfu = 5,\n Environment = new ConfluentCloud.Inputs.FlinkComputePoolEnvironmentArgs\n {\n Id = development.Id,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tdevelopment, err := confluentcloud.NewEnvironment(ctx, \"development\", nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = confluentcloud.NewFlinkComputePool(ctx, \"main\", \u0026confluentcloud.FlinkComputePoolArgs{\n\t\t\tDisplayName: pulumi.String(\"standard_compute_pool\"),\n\t\t\tCloud: pulumi.String(\"AWS\"),\n\t\t\tRegion: pulumi.String(\"us-east-1\"),\n\t\t\tMaxCfu: pulumi.Int(5),\n\t\t\tEnvironment: \u0026confluentcloud.FlinkComputePoolEnvironmentArgs{\n\t\t\t\tId: development.ID(),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.confluentcloud.Environment;\nimport com.pulumi.confluentcloud.FlinkComputePool;\nimport com.pulumi.confluentcloud.FlinkComputePoolArgs;\nimport com.pulumi.confluentcloud.inputs.FlinkComputePoolEnvironmentArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var development = new Environment(\"development\");\n\n var main = new FlinkComputePool(\"main\", FlinkComputePoolArgs.builder() \n .displayName(\"standard_compute_pool\")\n .cloud(\"AWS\")\n .region(\"us-east-1\")\n .maxCfu(5)\n .environment(FlinkComputePoolEnvironmentArgs.builder()\n .id(development.id())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n development:\n type: confluentcloud:Environment\n main:\n type: confluentcloud:FlinkComputePool\n properties:\n displayName: standard_compute_pool\n cloud: AWS\n region: us-east-1\n maxCfu: 5\n environment:\n id: ${development.id}\n```\n{{% /example %}}\n{{% /examples %}}\n\n## Import\n\nYou can import a Flink Compute Pool by using Environment ID and Flink Compute Pool ID, in the format `\u003cEnvironment ID\u003e/\u003cFlink Compute Pool ID\u003e`. The following example shows how to import a Flink Compute Pool$ export CONFLUENT_CLOUD_API_KEY=\"\u003ccloud_api_key\u003e\" $ export CONFLUENT_CLOUD_API_SECRET=\"\u003ccloud_api_secret\u003e\"\n\n```sh\n $ pulumi import confluentcloud:index/flinkComputePool:FlinkComputePool main env-abc123/lfcp-abc123\n```\n\n !\u003e **Warning:** Do not forget to delete terminal command history afterwards for security purposes. ", + "properties": { + "apiVersion": { + "type": "string", + "description": "(Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`.\n" + }, + "cloud": { + "type": "string", + "description": "The cloud service provider that runs the Flink Compute Pool.\n" + }, + "currentCfu": { + "type": "integer", + "description": "(Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool.\n" + }, + "displayName": { + "type": "string", + "description": "The name of the Flink Compute Pool.\n" + }, + "environment": { + "$ref": "#/types/confluentcloud:index/FlinkComputePoolEnvironment:FlinkComputePoolEnvironment", + "description": "Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.\n" + }, + "kind": { + "type": "string", + "description": "(Required String) The kind of the Flink Compute Pool, for example, `ComputePool`.\n" + }, + "maxCfu": { + "type": "integer", + "description": "Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`.\n" + }, + "region": { + "type": "string", + "description": "The cloud service provider region that hosts the Flink Compute Pool.\n" + }, + "resourceName": { + "type": "string", + "description": "(Required String) The Confluent Resource Name of the Flink Compute Pool.\n" + }, + "restEndpoint": { + "type": "string", + "description": "(Required String) The API endpoint of the Flink Compute Pool.\n" + } + }, + "required": [ + "apiVersion", + "cloud", + "currentCfu", + "displayName", + "environment", + "kind", + "maxCfu", + "region", + "resourceName", + "restEndpoint" + ], + "inputProperties": { + "cloud": { + "type": "string", + "description": "The cloud service provider that runs the Flink Compute Pool.\n", + "willReplaceOnChanges": true + }, + "displayName": { + "type": "string", + "description": "The name of the Flink Compute Pool.\n" + }, + "environment": { + "$ref": "#/types/confluentcloud:index/FlinkComputePoolEnvironment:FlinkComputePoolEnvironment", + "description": "Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.\n", + "willReplaceOnChanges": true + }, + "maxCfu": { + "type": "integer", + "description": "Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`.\n" + }, + "region": { + "type": "string", + "description": "The cloud service provider region that hosts the Flink Compute Pool.\n", + "willReplaceOnChanges": true + } + }, + "requiredInputs": [ + "cloud", + "displayName", + "environment", + "region" + ], + "stateInputs": { + "description": "Input properties used for looking up and filtering FlinkComputePool resources.\n", + "properties": { + "apiVersion": { + "type": "string", + "description": "(Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`.\n" + }, + "cloud": { + "type": "string", + "description": "The cloud service provider that runs the Flink Compute Pool.\n", + "willReplaceOnChanges": true + }, + "currentCfu": { + "type": "integer", + "description": "(Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool.\n" + }, + "displayName": { + "type": "string", + "description": "The name of the Flink Compute Pool.\n" + }, + "environment": { + "$ref": "#/types/confluentcloud:index/FlinkComputePoolEnvironment:FlinkComputePoolEnvironment", + "description": "Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.\n", + "willReplaceOnChanges": true + }, + "kind": { + "type": "string", + "description": "(Required String) The kind of the Flink Compute Pool, for example, `ComputePool`.\n" + }, + "maxCfu": { + "type": "integer", + "description": "Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`.\n" + }, + "region": { + "type": "string", + "description": "The cloud service provider region that hosts the Flink Compute Pool.\n", + "willReplaceOnChanges": true + }, + "resourceName": { + "type": "string", + "description": "(Required String) The Confluent Resource Name of the Flink Compute Pool.\n" + }, + "restEndpoint": { + "type": "string", + "description": "(Required String) The API endpoint of the Flink Compute Pool.\n" + } + }, + "type": "object" + } + }, "confluentcloud:index/identityPool:IdentityPool": { "description": "[![General Availability](https://img.shields.io/badge/Lifecycle%20Stage-General%20Availability-%2345c6e8)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy)\n\n`confluentcloud.IdentityPool` provides an Identity Pool resource that enables creating, editing, and deleting identity pools on Confluent Cloud.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n### Example Identity Pool to be used with Azure AD\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as confluentcloud from \"@pulumi/confluentcloud\";\n\nconst azure = new confluentcloud.IdentityProvider(\"azure\", {\n displayName: \"My OIDC Provider: Azure AD\",\n description: \"My description\",\n issuer: \"https://login.microsoftonline.com/{tenant_id}/v2.0\",\n jwksUri: \"https://login.microsoftonline.com/common/discovery/v2.0/keys\",\n});\nconst example = new confluentcloud.IdentityPool(\"example\", {\n identityProvider: {\n id: azure.id,\n },\n displayName: \"My Identity Pool\",\n description: \"Prod Access to Kafka clusters to Release Engineering\",\n identityClaim: \"claims.sub\",\n filter: \"claims.aud==\\\"confluent\\\" \u0026\u0026 claims.group!=\\\"invalid_group\\\"\",\n});\n```\n```python\nimport pulumi\nimport pulumi_confluentcloud as confluentcloud\n\nazure = confluentcloud.IdentityProvider(\"azure\",\n display_name=\"My OIDC Provider: Azure AD\",\n description=\"My description\",\n issuer=\"https://login.microsoftonline.com/{tenant_id}/v2.0\",\n jwks_uri=\"https://login.microsoftonline.com/common/discovery/v2.0/keys\")\nexample = confluentcloud.IdentityPool(\"example\",\n identity_provider=confluentcloud.IdentityPoolIdentityProviderArgs(\n id=azure.id,\n ),\n display_name=\"My Identity Pool\",\n description=\"Prod Access to Kafka clusters to Release Engineering\",\n identity_claim=\"claims.sub\",\n filter=\"claims.aud==\\\"confluent\\\" \u0026\u0026 claims.group!=\\\"invalid_group\\\"\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing ConfluentCloud = Pulumi.ConfluentCloud;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var azure = new ConfluentCloud.IdentityProvider(\"azure\", new()\n {\n DisplayName = \"My OIDC Provider: Azure AD\",\n Description = \"My description\",\n Issuer = \"https://login.microsoftonline.com/{tenant_id}/v2.0\",\n JwksUri = \"https://login.microsoftonline.com/common/discovery/v2.0/keys\",\n });\n\n var example = new ConfluentCloud.IdentityPool(\"example\", new()\n {\n IdentityProvider = new ConfluentCloud.Inputs.IdentityPoolIdentityProviderArgs\n {\n Id = azure.Id,\n },\n DisplayName = \"My Identity Pool\",\n Description = \"Prod Access to Kafka clusters to Release Engineering\",\n IdentityClaim = \"claims.sub\",\n Filter = \"claims.aud==\\\"confluent\\\" \u0026\u0026 claims.group!=\\\"invalid_group\\\"\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tazure, err := confluentcloud.NewIdentityProvider(ctx, \"azure\", \u0026confluentcloud.IdentityProviderArgs{\n\t\t\tDisplayName: pulumi.String(\"My OIDC Provider: Azure AD\"),\n\t\t\tDescription: pulumi.String(\"My description\"),\n\t\t\tIssuer: pulumi.String(\"https://login.microsoftonline.com/{tenant_id}/v2.0\"),\n\t\t\tJwksUri: pulumi.String(\"https://login.microsoftonline.com/common/discovery/v2.0/keys\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = confluentcloud.NewIdentityPool(ctx, \"example\", \u0026confluentcloud.IdentityPoolArgs{\n\t\t\tIdentityProvider: \u0026confluentcloud.IdentityPoolIdentityProviderArgs{\n\t\t\t\tId: azure.ID(),\n\t\t\t},\n\t\t\tDisplayName: pulumi.String(\"My Identity Pool\"),\n\t\t\tDescription: pulumi.String(\"Prod Access to Kafka clusters to Release Engineering\"),\n\t\t\tIdentityClaim: pulumi.String(\"claims.sub\"),\n\t\t\tFilter: pulumi.String(\"claims.aud==\\\"confluent\\\" \u0026\u0026 claims.group!=\\\"invalid_group\\\"\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.confluentcloud.IdentityProvider;\nimport com.pulumi.confluentcloud.IdentityProviderArgs;\nimport com.pulumi.confluentcloud.IdentityPool;\nimport com.pulumi.confluentcloud.IdentityPoolArgs;\nimport com.pulumi.confluentcloud.inputs.IdentityPoolIdentityProviderArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var azure = new IdentityProvider(\"azure\", IdentityProviderArgs.builder() \n .displayName(\"My OIDC Provider: Azure AD\")\n .description(\"My description\")\n .issuer(\"https://login.microsoftonline.com/{tenant_id}/v2.0\")\n .jwksUri(\"https://login.microsoftonline.com/common/discovery/v2.0/keys\")\n .build());\n\n var example = new IdentityPool(\"example\", IdentityPoolArgs.builder() \n .identityProvider(IdentityPoolIdentityProviderArgs.builder()\n .id(azure.id())\n .build())\n .displayName(\"My Identity Pool\")\n .description(\"Prod Access to Kafka clusters to Release Engineering\")\n .identityClaim(\"claims.sub\")\n .filter(\"claims.aud==\\\"confluent\\\" \u0026\u0026 claims.group!=\\\"invalid_group\\\"\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n azure:\n type: confluentcloud:IdentityProvider\n properties:\n displayName: 'My OIDC Provider: Azure AD'\n description: My description\n issuer: https://login.microsoftonline.com/{tenant_id}/v2.0\n jwksUri: https://login.microsoftonline.com/common/discovery/v2.0/keys\n example:\n type: confluentcloud:IdentityPool\n properties:\n identityProvider:\n id: ${azure.id}\n displayName: My Identity Pool\n description: Prod Access to Kafka clusters to Release Engineering\n identityClaim: claims.sub\n filter: claims.aud==\"confluent\" \u0026\u0026 claims.group!=\"invalid_group\"\n```\n{{% /example %}}\n{{% example %}}\n### Example Identity Pool to be used with Okta\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as confluentcloud from \"@pulumi/confluentcloud\";\n\nconst okta = new confluentcloud.IdentityProvider(\"okta\", {\n displayName: \"My OIDC Provider: Okta\",\n description: \"My description\",\n issuer: \"https://mycompany.okta.com/oauth2/default\",\n jwksUri: \"https://mycompany.okta.com/oauth2/default/v1/keys\",\n});\nconst example = new confluentcloud.IdentityPool(\"example\", {\n identityProvider: {\n id: okta.id,\n },\n displayName: \"My Identity Pool\",\n description: \"Prod Access to Kafka clusters to Release Engineering\",\n identityClaim: \"claims.sub\",\n filter: \"claims.aud==\\\"confluent\\\" \u0026\u0026 claims.group!=\\\"invalid_group\\\"\",\n});\n```\n```python\nimport pulumi\nimport pulumi_confluentcloud as confluentcloud\n\nokta = confluentcloud.IdentityProvider(\"okta\",\n display_name=\"My OIDC Provider: Okta\",\n description=\"My description\",\n issuer=\"https://mycompany.okta.com/oauth2/default\",\n jwks_uri=\"https://mycompany.okta.com/oauth2/default/v1/keys\")\nexample = confluentcloud.IdentityPool(\"example\",\n identity_provider=confluentcloud.IdentityPoolIdentityProviderArgs(\n id=okta.id,\n ),\n display_name=\"My Identity Pool\",\n description=\"Prod Access to Kafka clusters to Release Engineering\",\n identity_claim=\"claims.sub\",\n filter=\"claims.aud==\\\"confluent\\\" \u0026\u0026 claims.group!=\\\"invalid_group\\\"\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing ConfluentCloud = Pulumi.ConfluentCloud;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var okta = new ConfluentCloud.IdentityProvider(\"okta\", new()\n {\n DisplayName = \"My OIDC Provider: Okta\",\n Description = \"My description\",\n Issuer = \"https://mycompany.okta.com/oauth2/default\",\n JwksUri = \"https://mycompany.okta.com/oauth2/default/v1/keys\",\n });\n\n var example = new ConfluentCloud.IdentityPool(\"example\", new()\n {\n IdentityProvider = new ConfluentCloud.Inputs.IdentityPoolIdentityProviderArgs\n {\n Id = okta.Id,\n },\n DisplayName = \"My Identity Pool\",\n Description = \"Prod Access to Kafka clusters to Release Engineering\",\n IdentityClaim = \"claims.sub\",\n Filter = \"claims.aud==\\\"confluent\\\" \u0026\u0026 claims.group!=\\\"invalid_group\\\"\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tokta, err := confluentcloud.NewIdentityProvider(ctx, \"okta\", \u0026confluentcloud.IdentityProviderArgs{\n\t\t\tDisplayName: pulumi.String(\"My OIDC Provider: Okta\"),\n\t\t\tDescription: pulumi.String(\"My description\"),\n\t\t\tIssuer: pulumi.String(\"https://mycompany.okta.com/oauth2/default\"),\n\t\t\tJwksUri: pulumi.String(\"https://mycompany.okta.com/oauth2/default/v1/keys\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = confluentcloud.NewIdentityPool(ctx, \"example\", \u0026confluentcloud.IdentityPoolArgs{\n\t\t\tIdentityProvider: \u0026confluentcloud.IdentityPoolIdentityProviderArgs{\n\t\t\t\tId: okta.ID(),\n\t\t\t},\n\t\t\tDisplayName: pulumi.String(\"My Identity Pool\"),\n\t\t\tDescription: pulumi.String(\"Prod Access to Kafka clusters to Release Engineering\"),\n\t\t\tIdentityClaim: pulumi.String(\"claims.sub\"),\n\t\t\tFilter: pulumi.String(\"claims.aud==\\\"confluent\\\" \u0026\u0026 claims.group!=\\\"invalid_group\\\"\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.confluentcloud.IdentityProvider;\nimport com.pulumi.confluentcloud.IdentityProviderArgs;\nimport com.pulumi.confluentcloud.IdentityPool;\nimport com.pulumi.confluentcloud.IdentityPoolArgs;\nimport com.pulumi.confluentcloud.inputs.IdentityPoolIdentityProviderArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var okta = new IdentityProvider(\"okta\", IdentityProviderArgs.builder() \n .displayName(\"My OIDC Provider: Okta\")\n .description(\"My description\")\n .issuer(\"https://mycompany.okta.com/oauth2/default\")\n .jwksUri(\"https://mycompany.okta.com/oauth2/default/v1/keys\")\n .build());\n\n var example = new IdentityPool(\"example\", IdentityPoolArgs.builder() \n .identityProvider(IdentityPoolIdentityProviderArgs.builder()\n .id(okta.id())\n .build())\n .displayName(\"My Identity Pool\")\n .description(\"Prod Access to Kafka clusters to Release Engineering\")\n .identityClaim(\"claims.sub\")\n .filter(\"claims.aud==\\\"confluent\\\" \u0026\u0026 claims.group!=\\\"invalid_group\\\"\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n okta:\n type: confluentcloud:IdentityProvider\n properties:\n displayName: 'My OIDC Provider: Okta'\n description: My description\n issuer: https://mycompany.okta.com/oauth2/default\n jwksUri: https://mycompany.okta.com/oauth2/default/v1/keys\n example:\n type: confluentcloud:IdentityPool\n properties:\n identityProvider:\n id: ${okta.id}\n displayName: My Identity Pool\n description: Prod Access to Kafka clusters to Release Engineering\n identityClaim: claims.sub\n filter: claims.aud==\"confluent\" \u0026\u0026 claims.group!=\"invalid_group\"\n```\n\n{{% /example %}}\n{{% /examples %}}\n## External Documentation\n\n* [Use identity pools with your OAuth provider](https://docs.confluent.io/cloud/current/access-management/authenticate/oauth/identity-pools.html).\n\n\n## Import\n\nYou can import an Identity Pool by using Identity Provider ID and Identity Pool ID, in the format `\u003cIdentity Provider ID\u003e/\u003cIdentity Pool ID\u003e`. The following example shows how to import an Identity Pool$ export CONFLUENT_CLOUD_API_KEY=\"\u003ccloud_api_key\u003e\" $ export CONFLUENT_CLOUD_API_SECRET=\"\u003ccloud_api_secret\u003e\"\n\n```sh\n $ pulumi import confluentcloud:index/identityPool:IdentityPool example op-abc123/pool-xyz456\n```\n\n !\u003e **Warning:** Do not forget to delete terminal command history afterwards for security purposes. ", "properties": { @@ -4372,7 +4602,7 @@ } }, "confluentcloud:index/kafkaAcl:KafkaAcl": { - "description": "\n\n\n## Import\n\nYou can import Kafka ACLs by using the Kafka cluster ID and attributes of `confluent_kafka_acl` resource in the format `\u003cKafka cluster ID\u003e/\u003cKafka ACL resource type\u003e#\u003cKafka ACL resource name\u003e#\u003cKafka ACL pattern type\u003e#\u003cKafka ACL principal\u003e#\u003cKafka ACL host\u003e#\u003cKafka ACL operation\u003e#\u003cKafka ACL permission\u003e`, for exampleOption #1Manage multiple Kafka clusters in the same Terraform workspace $ export CONFLUENT_CLOUD_API_KEY=\"\u003ccloud_api_key\u003e\" $ export CONFLUENT_CLOUD_API_SECRET=\"\u003ccloud_api_secret\u003e\" $ export IMPORT_KAFKA_API_KEY=\"\u003ckafka_api_key\u003e\" $ export IMPORT_KAFKA_API_SECRET=\"\u003ckafka_api_secret\u003e\" $ export IMPORT_KAFKA_REST_ENDPOINT=\"\u003ckafka_rest_endpoint\u003e\"\n\n```sh\n $ pulumi import confluentcloud:index/kafkaAcl:KafkaAcl describe-cluster \"lkc-12345/CLUSTER#kafka-cluster#LITERAL#User:sa-xyz123#*#DESCRIBE#ALLOW\"\n```\n\n Option #2Manage a single Kafka cluster in the same Terraform workspace $ export CONFLUENT_CLOUD_API_KEY=\"\u003ccloud_api_key\u003e\" $ export CONFLUENT_CLOUD_API_SECRET=\"\u003ccloud_api_secret\u003e\"\n\n```sh\n $ pulumi import confluentcloud:index/kafkaAcl:KafkaAcl describe-cluster \"lkc-12345/CLUSTER#kafka-cluster#LITERAL#User:sa-xyz123#*#DESCRIBE#ALLOW\"\n```\n\n !\u003e **Warning:** Do not forget to delete terminal command history afterwards for security purposes. ", + "description": "\n\n\n## Import\n\nYou can import Kafka ACLs by using the Kafka cluster ID and attributes of `confluent_kafka_acl` resource in the format `\u003cKafka cluster ID\u003e/\u003cKafka ACL resource type\u003e#\u003cKafka ACL resource name\u003e#\u003cKafka ACL pattern type\u003e#\u003cKafka ACL principal\u003e#\u003cKafka ACL host\u003e#\u003cKafka ACL operation\u003e#\u003cKafka ACL permission\u003e`, for exampleOption #1Manage multiple Kafka clusters in the same Terraform workspace $ export IMPORT_KAFKA_API_KEY=\"\u003ckafka_api_key\u003e\" $ export IMPORT_KAFKA_API_SECRET=\"\u003ckafka_api_secret\u003e\" $ export IMPORT_KAFKA_REST_ENDPOINT=\"\u003ckafka_rest_endpoint\u003e\"\n\n```sh\n $ pulumi import confluentcloud:index/kafkaAcl:KafkaAcl describe-cluster \"lkc-12345/CLUSTER#kafka-cluster#LITERAL#User:sa-xyz123#*#DESCRIBE#ALLOW\"\n```\n\n Option #2Manage a single Kafka cluster in the same Terraform workspace $ export CONFLUENT_CLOUD_API_KEY=\"\u003ccloud_api_key\u003e\" $ export CONFLUENT_CLOUD_API_SECRET=\"\u003ccloud_api_secret\u003e\"\n\n```sh\n $ pulumi import confluentcloud:index/kafkaAcl:KafkaAcl describe-cluster \"lkc-12345/CLUSTER#kafka-cluster#LITERAL#User:sa-xyz123#*#DESCRIBE#ALLOW\"\n```\n\n !\u003e **Warning:** Do not forget to delete terminal command history afterwards for security purposes. ", "properties": { "credentials": { "$ref": "#/types/confluentcloud:index/KafkaAclCredentials:KafkaAclCredentials", @@ -6468,6 +6698,197 @@ "type": "object" } }, + "confluentcloud:index/schemaExporter:SchemaExporter": { + "description": "\n\n\n## Import\n\nYou can import a Schema Exporter by using the Schema Registry cluster ID, Schema Exporter name in the format `\u003cSchema Registry cluster ID\u003e/\u003cSchema Exporter name\u003e`, for example$ export IMPORT_SCHEMA_REGISTRY_API_KEY=\"\u003cschema_registry_api_key\u003e\" $ export IMPORT_SCHEMA_REGISTRY_API_SECRET=\"\u003cschema_registry_api_secret\u003e\" $ export IMPORT_SCHEMA_REGISTRY_REST_ENDPOINT=\"\u003cschema_registry_rest_endpoint\u003e\"\n\n```sh\n $ pulumi import confluentcloud:index/schemaExporter:SchemaExporter main lsrc-8wrx70/test-exporter\n```\n\n !\u003e **Warning:** Do not forget to delete terminal command history afterwards for security purposes. ", + "properties": { + "config": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Block for custom *nonsensitive* configuration properties:\n" + }, + "context": { + "type": "string", + "description": "Customized context of the exporter if `context_type` is set to `CUSTOM`.\n" + }, + "contextType": { + "type": "string", + "description": "Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`.\n" + }, + "credentials": { + "$ref": "#/types/confluentcloud:index/SchemaExporterCredentials:SchemaExporterCredentials", + "description": "The Cluster API Credentials.\n", + "secret": true + }, + "destinationSchemaRegistryCluster": { + "$ref": "#/types/confluentcloud:index/SchemaExporterDestinationSchemaRegistryCluster:SchemaExporterDestinationSchemaRegistryCluster" + }, + "name": { + "type": "string", + "description": "The configuration setting name.\n" + }, + "resetOnUpdate": { + "type": "boolean", + "description": "The flag to control whether to reset the exporter when updating configs. Defaults to `false`.\n" + }, + "restEndpoint": { + "type": "string", + "description": "The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).\n" + }, + "schemaRegistryCluster": { + "$ref": "#/types/confluentcloud:index/SchemaExporterSchemaRegistryCluster:SchemaExporterSchemaRegistryCluster" + }, + "status": { + "type": "string", + "description": "The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`.\n" + }, + "subjectRenameFormat": { + "type": "string", + "description": "Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`.\n" + }, + "subjects": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Name of each exporter subject.\n" + } + }, + "required": [ + "config", + "context", + "contextType", + "destinationSchemaRegistryCluster", + "name", + "status", + "subjectRenameFormat", + "subjects" + ], + "inputProperties": { + "config": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Block for custom *nonsensitive* configuration properties:\n" + }, + "context": { + "type": "string", + "description": "Customized context of the exporter if `context_type` is set to `CUSTOM`.\n" + }, + "contextType": { + "type": "string", + "description": "Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`.\n" + }, + "credentials": { + "$ref": "#/types/confluentcloud:index/SchemaExporterCredentials:SchemaExporterCredentials", + "description": "The Cluster API Credentials.\n", + "secret": true + }, + "destinationSchemaRegistryCluster": { + "$ref": "#/types/confluentcloud:index/SchemaExporterDestinationSchemaRegistryCluster:SchemaExporterDestinationSchemaRegistryCluster" + }, + "name": { + "type": "string", + "description": "The configuration setting name.\n", + "willReplaceOnChanges": true + }, + "resetOnUpdate": { + "type": "boolean", + "description": "The flag to control whether to reset the exporter when updating configs. Defaults to `false`.\n" + }, + "restEndpoint": { + "type": "string", + "description": "The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).\n", + "willReplaceOnChanges": true + }, + "schemaRegistryCluster": { + "$ref": "#/types/confluentcloud:index/SchemaExporterSchemaRegistryCluster:SchemaExporterSchemaRegistryCluster", + "willReplaceOnChanges": true + }, + "status": { + "type": "string", + "description": "The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`.\n" + }, + "subjectRenameFormat": { + "type": "string", + "description": "Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`.\n" + }, + "subjects": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Name of each exporter subject.\n" + } + }, + "requiredInputs": [ + "destinationSchemaRegistryCluster" + ], + "stateInputs": { + "description": "Input properties used for looking up and filtering SchemaExporter resources.\n", + "properties": { + "config": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Block for custom *nonsensitive* configuration properties:\n" + }, + "context": { + "type": "string", + "description": "Customized context of the exporter if `context_type` is set to `CUSTOM`.\n" + }, + "contextType": { + "type": "string", + "description": "Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`.\n" + }, + "credentials": { + "$ref": "#/types/confluentcloud:index/SchemaExporterCredentials:SchemaExporterCredentials", + "description": "The Cluster API Credentials.\n", + "secret": true + }, + "destinationSchemaRegistryCluster": { + "$ref": "#/types/confluentcloud:index/SchemaExporterDestinationSchemaRegistryCluster:SchemaExporterDestinationSchemaRegistryCluster" + }, + "name": { + "type": "string", + "description": "The configuration setting name.\n", + "willReplaceOnChanges": true + }, + "resetOnUpdate": { + "type": "boolean", + "description": "The flag to control whether to reset the exporter when updating configs. Defaults to `false`.\n" + }, + "restEndpoint": { + "type": "string", + "description": "The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).\n", + "willReplaceOnChanges": true + }, + "schemaRegistryCluster": { + "$ref": "#/types/confluentcloud:index/SchemaExporterSchemaRegistryCluster:SchemaExporterSchemaRegistryCluster", + "willReplaceOnChanges": true + }, + "status": { + "type": "string", + "description": "The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`.\n" + }, + "subjectRenameFormat": { + "type": "string", + "description": "Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`.\n" + }, + "subjects": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Name of each exporter subject.\n" + } + }, + "type": "object" + } + }, "confluentcloud:index/schemaRegistryCluster:SchemaRegistryCluster": { "description": "{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as confluentcloud from \"@pulumi/confluentcloud\";\n\nconst development = new confluentcloud.Environment(\"development\", {});\nconst example = confluentcloud.getSchemaRegistryRegion({\n cloud: \"AWS\",\n region: \"us-east-2\",\n \"package\": \"ESSENTIALS\",\n});\nconst essentials = new confluentcloud.SchemaRegistryCluster(\"essentials\", {\n \"package\": example.then(example =\u003e example[\"package\"]),\n environment: {\n id: development.id,\n },\n region: {\n id: example.then(example =\u003e example.id),\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_confluentcloud as confluentcloud\n\ndevelopment = confluentcloud.Environment(\"development\")\nexample = confluentcloud.get_schema_registry_region(cloud=\"AWS\",\n region=\"us-east-2\",\n package=\"ESSENTIALS\")\nessentials = confluentcloud.SchemaRegistryCluster(\"essentials\",\n package=example.package,\n environment=confluentcloud.SchemaRegistryClusterEnvironmentArgs(\n id=development.id,\n ),\n region=confluentcloud.SchemaRegistryClusterRegionArgs(\n id=example.id,\n ))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing ConfluentCloud = Pulumi.ConfluentCloud;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var development = new ConfluentCloud.Environment(\"development\");\n\n var example = ConfluentCloud.GetSchemaRegistryRegion.Invoke(new()\n {\n Cloud = \"AWS\",\n Region = \"us-east-2\",\n Package = \"ESSENTIALS\",\n });\n\n var essentials = new ConfluentCloud.SchemaRegistryCluster(\"essentials\", new()\n {\n Package = example.Apply(getSchemaRegistryRegionResult =\u003e getSchemaRegistryRegionResult.Package),\n Environment = new ConfluentCloud.Inputs.SchemaRegistryClusterEnvironmentArgs\n {\n Id = development.Id,\n },\n Region = new ConfluentCloud.Inputs.SchemaRegistryClusterRegionArgs\n {\n Id = example.Apply(getSchemaRegistryRegionResult =\u003e getSchemaRegistryRegionResult.Id),\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tdevelopment, err := confluentcloud.NewEnvironment(ctx, \"development\", nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\texample, err := confluentcloud.GetSchemaRegistryRegion(ctx, \u0026confluentcloud.GetSchemaRegistryRegionArgs{\n\t\t\tCloud: \"AWS\",\n\t\t\tRegion: \"us-east-2\",\n\t\t\tPackage: \"ESSENTIALS\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = confluentcloud.NewSchemaRegistryCluster(ctx, \"essentials\", \u0026confluentcloud.SchemaRegistryClusterArgs{\n\t\t\tPackage: *pulumi.String(example.Package),\n\t\t\tEnvironment: \u0026confluentcloud.SchemaRegistryClusterEnvironmentArgs{\n\t\t\t\tId: development.ID(),\n\t\t\t},\n\t\t\tRegion: \u0026confluentcloud.SchemaRegistryClusterRegionArgs{\n\t\t\t\tId: *pulumi.String(example.Id),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.confluentcloud.Environment;\nimport com.pulumi.confluentcloud.ConfluentcloudFunctions;\nimport com.pulumi.confluentcloud.inputs.GetSchemaRegistryRegionArgs;\nimport com.pulumi.confluentcloud.SchemaRegistryCluster;\nimport com.pulumi.confluentcloud.SchemaRegistryClusterArgs;\nimport com.pulumi.confluentcloud.inputs.SchemaRegistryClusterEnvironmentArgs;\nimport com.pulumi.confluentcloud.inputs.SchemaRegistryClusterRegionArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var development = new Environment(\"development\");\n\n final var example = ConfluentcloudFunctions.getSchemaRegistryRegion(GetSchemaRegistryRegionArgs.builder()\n .cloud(\"AWS\")\n .region(\"us-east-2\")\n .package_(\"ESSENTIALS\")\n .build());\n\n var essentials = new SchemaRegistryCluster(\"essentials\", SchemaRegistryClusterArgs.builder() \n .package_(example.applyValue(getSchemaRegistryRegionResult -\u003e getSchemaRegistryRegionResult.package()))\n .environment(SchemaRegistryClusterEnvironmentArgs.builder()\n .id(development.id())\n .build())\n .region(SchemaRegistryClusterRegionArgs.builder()\n .id(example.applyValue(getSchemaRegistryRegionResult -\u003e getSchemaRegistryRegionResult.id()))\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n development:\n type: confluentcloud:Environment\n essentials:\n type: confluentcloud:SchemaRegistryCluster\n properties:\n package: ${example.package}\n environment:\n id: ${development.id}\n region:\n id: ${example.id}\nvariables:\n example:\n fn::invoke:\n Function: confluentcloud:getSchemaRegistryRegion\n Arguments:\n cloud: AWS\n region: us-east-2\n package: ESSENTIALS\n```\n{{% /example %}}\n{{% /examples %}}\n\n## Import\n\nYou can import a Schema Registry cluster by using Environment ID and Schema Registry cluster ID, in the format `\u003cEnvironment ID\u003e/\u003cSchema Registry cluster ID\u003e`, for example$ export CONFLUENT_CLOUD_API_KEY=\"\u003ccloud_api_key\u003e\" $ export CONFLUENT_CLOUD_API_SECRET=\"\u003ccloud_api_secret\u003e\"\n\n```sh\n $ pulumi import confluentcloud:index/schemaRegistryCluster:SchemaRegistryCluster example env-abc123/lsrc-abc123\n```\n\n !\u003e **Warning:** Do not forget to delete terminal command history afterwards for security purposes. ", "properties": { @@ -7536,6 +7957,93 @@ ] } }, + "confluentcloud:index/getFlinkComputePool:getFlinkComputePool": { + "description": "[![Early Access](https://img.shields.io/badge/Lifecycle%20Stage-Early%20Access-%2300afba)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy)\n\n\u003e **Note:** `confluentcloud.FlinkComputePool` data source is available in **Early Access** for early adopters. Early Access features are introduced to gather customer feedback. This feature should be used only for evaluation and non-production testing purposes or to provide feedback to Confluent, particularly as it becomes more widely available in follow-on editions. \n**Early Access** features are intended for evaluation use in development and testing environments only, and not for production use. The warranty, SLA, and Support Services provisions of your agreement with Confluent do not apply to Early Access features. Early Access features are considered to be a Proof of Concept as defined in the Confluent Cloud Terms of Service. Confluent may discontinue providing early access releases of the Early Access features at any time in Confluent’s sole discretion.\n\n`confluentcloud.FlinkComputePool` describes a Flink Compute Pool data source.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as confluentcloud from \"@pulumi/confluentcloud\";\n\nconst exampleUsingIdFlinkComputePool = confluentcloud.getFlinkComputePool({\n id: \"lfcp-abc123\",\n environment: {\n id: \"env-xyz456\",\n },\n});\nexport const exampleUsingId = exampleUsingIdFlinkComputePool;\nconst exampleUsingNameFlinkComputePool = confluentcloud.getFlinkComputePool({\n displayName: \"my_compute_pool\",\n environment: {\n id: \"env-xyz456\",\n },\n});\nexport const exampleUsingName = exampleUsingNameFlinkComputePool;\n```\n```python\nimport pulumi\nimport pulumi_confluentcloud as confluentcloud\n\nexample_using_id_flink_compute_pool = confluentcloud.get_flink_compute_pool(id=\"lfcp-abc123\",\n environment=confluentcloud.GetFlinkComputePoolEnvironmentArgs(\n id=\"env-xyz456\",\n ))\npulumi.export(\"exampleUsingId\", example_using_id_flink_compute_pool)\nexample_using_name_flink_compute_pool = confluentcloud.get_flink_compute_pool(display_name=\"my_compute_pool\",\n environment=confluentcloud.GetFlinkComputePoolEnvironmentArgs(\n id=\"env-xyz456\",\n ))\npulumi.export(\"exampleUsingName\", example_using_name_flink_compute_pool)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing ConfluentCloud = Pulumi.ConfluentCloud;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var exampleUsingIdFlinkComputePool = ConfluentCloud.GetFlinkComputePool.Invoke(new()\n {\n Id = \"lfcp-abc123\",\n Environment = new ConfluentCloud.Inputs.GetFlinkComputePoolEnvironmentInputArgs\n {\n Id = \"env-xyz456\",\n },\n });\n\n var exampleUsingNameFlinkComputePool = ConfluentCloud.GetFlinkComputePool.Invoke(new()\n {\n DisplayName = \"my_compute_pool\",\n Environment = new ConfluentCloud.Inputs.GetFlinkComputePoolEnvironmentInputArgs\n {\n Id = \"env-xyz456\",\n },\n });\n\n return new Dictionary\u003cstring, object?\u003e\n {\n [\"exampleUsingId\"] = exampleUsingIdFlinkComputePool,\n [\"exampleUsingName\"] = exampleUsingNameFlinkComputePool,\n };\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texampleUsingIdFlinkComputePool, err := confluentcloud.LookupFlinkComputePool(ctx, \u0026confluentcloud.LookupFlinkComputePoolArgs{\n\t\t\tId: pulumi.StringRef(\"lfcp-abc123\"),\n\t\t\tEnvironment: confluentcloud.GetFlinkComputePoolEnvironment{\n\t\t\t\tId: \"env-xyz456\",\n\t\t\t},\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.Export(\"exampleUsingId\", exampleUsingIdFlinkComputePool)\n\t\texampleUsingNameFlinkComputePool, err := confluentcloud.LookupFlinkComputePool(ctx, \u0026confluentcloud.LookupFlinkComputePoolArgs{\n\t\t\tDisplayName: pulumi.StringRef(\"my_compute_pool\"),\n\t\t\tEnvironment: confluentcloud.GetFlinkComputePoolEnvironment{\n\t\t\t\tId: \"env-xyz456\",\n\t\t\t},\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.Export(\"exampleUsingName\", exampleUsingNameFlinkComputePool)\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.confluentcloud.ConfluentcloudFunctions;\nimport com.pulumi.confluentcloud.inputs.GetFlinkComputePoolArgs;\nimport com.pulumi.confluentcloud.inputs.GetFlinkComputePoolEnvironmentArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var exampleUsingIdFlinkComputePool = ConfluentcloudFunctions.getFlinkComputePool(GetFlinkComputePoolArgs.builder()\n .id(\"lfcp-abc123\")\n .environment(GetFlinkComputePoolEnvironmentArgs.builder()\n .id(\"env-xyz456\")\n .build())\n .build());\n\n ctx.export(\"exampleUsingId\", exampleUsingIdFlinkComputePool.applyValue(getFlinkComputePoolResult -\u003e getFlinkComputePoolResult));\n final var exampleUsingNameFlinkComputePool = ConfluentcloudFunctions.getFlinkComputePool(GetFlinkComputePoolArgs.builder()\n .displayName(\"my_compute_pool\")\n .environment(GetFlinkComputePoolEnvironmentArgs.builder()\n .id(\"env-xyz456\")\n .build())\n .build());\n\n ctx.export(\"exampleUsingName\", exampleUsingNameFlinkComputePool.applyValue(getFlinkComputePoolResult -\u003e getFlinkComputePoolResult));\n }\n}\n```\n```yaml\nvariables:\n exampleUsingIdFlinkComputePool:\n fn::invoke:\n Function: confluentcloud:getFlinkComputePool\n Arguments:\n id: lfcp-abc123\n environment:\n id: env-xyz456\n exampleUsingNameFlinkComputePool:\n fn::invoke:\n Function: confluentcloud:getFlinkComputePool\n Arguments:\n displayName: my_compute_pool\n environment:\n id: env-xyz456\noutputs:\n exampleUsingId: ${exampleUsingIdFlinkComputePool}\n exampleUsingName: ${exampleUsingNameFlinkComputePool}\n```\n{{% /example %}}\n{{% /examples %}}", + "inputs": { + "description": "A collection of arguments for invoking getFlinkComputePool.\n", + "properties": { + "displayName": { + "type": "string", + "description": "A human-readable name for the Flink Compute Pool.\n" + }, + "environment": { + "$ref": "#/types/confluentcloud:index/getFlinkComputePoolEnvironment:getFlinkComputePoolEnvironment", + "description": "(Required Configuration Block) supports the following:\n" + }, + "id": { + "type": "string", + "description": "The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`.\n\n\u003e **Note:** Exactly one from the `id` and `display_name` attributes must be specified.\n" + } + }, + "type": "object", + "required": [ + "environment" + ] + }, + "outputs": { + "description": "A collection of values returned by getFlinkComputePool.\n", + "properties": { + "apiVersion": { + "type": "string", + "description": "(Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`.\n" + }, + "cloud": { + "type": "string", + "description": "(Required String) The cloud service provider that runs the Flink Compute Pool.\n" + }, + "currentCfu": { + "type": "integer", + "description": "(Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool.\n" + }, + "displayName": { + "type": "string", + "description": "(Required String) The name of the Flink Compute Pool.\n" + }, + "environment": { + "$ref": "#/types/confluentcloud:index/getFlinkComputePoolEnvironment:getFlinkComputePoolEnvironment", + "description": "(Required Configuration Block) supports the following:\n" + }, + "id": { + "type": "string", + "description": "(Required String) The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`.\n" + }, + "kind": { + "type": "string", + "description": "(Required String) The kind of the Flink Compute Pool, for example, `ComputePool`.\n" + }, + "maxCfu": { + "type": "integer", + "description": "(Required Integer) Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to.\n" + }, + "region": { + "type": "string", + "description": "(Required String) The cloud service provider region that hosts the Flink Compute Pool.\n" + }, + "resourceName": { + "type": "string", + "description": "(Required String) The Confluent Resource Name of the Flink Compute Pool.\n" + }, + "restEndpoint": { + "type": "string", + "description": "(Required String) The API endpoint of the Flink Compute Pool.\n" + } + }, + "type": "object", + "required": [ + "apiVersion", + "cloud", + "currentCfu", + "displayName", + "environment", + "id", + "kind", + "maxCfu", + "region", + "resourceName", + "restEndpoint" + ] + } + }, "confluentcloud:index/getIdentityPool:getIdentityPool": { "description": "[![General Availability](https://img.shields.io/badge/Lifecycle%20Stage-General%20Availability-%2345c6e8)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy)\n\n`confluentcloud.IdentityPool` describes an Identity Pool data source.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as confluentcloud from \"@pulumi/confluentcloud\";\n\nconst exampleUsingIdIdentityPool = confluentcloud.getIdentityPool({\n id: \"pool-xyz456\",\n identityProvider: {\n id: \"op-abc123\",\n },\n});\nexport const exampleUsingId = exampleUsingIdIdentityPool;\nconst exampleUsingNameIdentityPool = confluentcloud.getIdentityPool({\n displayName: \"My Identity Pool\",\n identityProvider: {\n id: \"op-abc123\",\n },\n});\nexport const exampleUsingName = exampleUsingNameIdentityPool;\n```\n```python\nimport pulumi\nimport pulumi_confluentcloud as confluentcloud\n\nexample_using_id_identity_pool = confluentcloud.get_identity_pool(id=\"pool-xyz456\",\n identity_provider=confluentcloud.GetIdentityPoolIdentityProviderArgs(\n id=\"op-abc123\",\n ))\npulumi.export(\"exampleUsingId\", example_using_id_identity_pool)\nexample_using_name_identity_pool = confluentcloud.get_identity_pool(display_name=\"My Identity Pool\",\n identity_provider=confluentcloud.GetIdentityPoolIdentityProviderArgs(\n id=\"op-abc123\",\n ))\npulumi.export(\"exampleUsingName\", example_using_name_identity_pool)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing ConfluentCloud = Pulumi.ConfluentCloud;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var exampleUsingIdIdentityPool = ConfluentCloud.GetIdentityPool.Invoke(new()\n {\n Id = \"pool-xyz456\",\n IdentityProvider = new ConfluentCloud.Inputs.GetIdentityPoolIdentityProviderInputArgs\n {\n Id = \"op-abc123\",\n },\n });\n\n var exampleUsingNameIdentityPool = ConfluentCloud.GetIdentityPool.Invoke(new()\n {\n DisplayName = \"My Identity Pool\",\n IdentityProvider = new ConfluentCloud.Inputs.GetIdentityPoolIdentityProviderInputArgs\n {\n Id = \"op-abc123\",\n },\n });\n\n return new Dictionary\u003cstring, object?\u003e\n {\n [\"exampleUsingId\"] = exampleUsingIdIdentityPool,\n [\"exampleUsingName\"] = exampleUsingNameIdentityPool,\n };\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texampleUsingIdIdentityPool, err := confluentcloud.LookupIdentityPool(ctx, \u0026confluentcloud.LookupIdentityPoolArgs{\n\t\t\tId: pulumi.StringRef(\"pool-xyz456\"),\n\t\t\tIdentityProvider: confluentcloud.GetIdentityPoolIdentityProvider{\n\t\t\t\tId: \"op-abc123\",\n\t\t\t},\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.Export(\"exampleUsingId\", exampleUsingIdIdentityPool)\n\t\texampleUsingNameIdentityPool, err := confluentcloud.LookupIdentityPool(ctx, \u0026confluentcloud.LookupIdentityPoolArgs{\n\t\t\tDisplayName: pulumi.StringRef(\"My Identity Pool\"),\n\t\t\tIdentityProvider: confluentcloud.GetIdentityPoolIdentityProvider{\n\t\t\t\tId: \"op-abc123\",\n\t\t\t},\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.Export(\"exampleUsingName\", exampleUsingNameIdentityPool)\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.confluentcloud.ConfluentcloudFunctions;\nimport com.pulumi.confluentcloud.inputs.GetIdentityPoolArgs;\nimport com.pulumi.confluentcloud.inputs.GetIdentityPoolIdentityProviderArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var exampleUsingIdIdentityPool = ConfluentcloudFunctions.getIdentityPool(GetIdentityPoolArgs.builder()\n .id(\"pool-xyz456\")\n .identityProvider(GetIdentityPoolIdentityProviderArgs.builder()\n .id(\"op-abc123\")\n .build())\n .build());\n\n ctx.export(\"exampleUsingId\", exampleUsingIdIdentityPool.applyValue(getIdentityPoolResult -\u003e getIdentityPoolResult));\n final var exampleUsingNameIdentityPool = ConfluentcloudFunctions.getIdentityPool(GetIdentityPoolArgs.builder()\n .displayName(\"My Identity Pool\")\n .identityProvider(GetIdentityPoolIdentityProviderArgs.builder()\n .id(\"op-abc123\")\n .build())\n .build());\n\n ctx.export(\"exampleUsingName\", exampleUsingNameIdentityPool.applyValue(getIdentityPoolResult -\u003e getIdentityPoolResult));\n }\n}\n```\n```yaml\nvariables:\n exampleUsingIdIdentityPool:\n fn::invoke:\n Function: confluentcloud:getIdentityPool\n Arguments:\n id: pool-xyz456\n identityProvider:\n id: op-abc123\n exampleUsingNameIdentityPool:\n fn::invoke:\n Function: confluentcloud:getIdentityPool\n Arguments:\n displayName: My Identity Pool\n identityProvider:\n id: op-abc123\noutputs:\n exampleUsingId: ${exampleUsingIdIdentityPool}\n exampleUsingName: ${exampleUsingNameIdentityPool}\n```\n{{% /example %}}\n{{% /examples %}}", "inputs": { @@ -8750,7 +9258,7 @@ } }, "confluentcloud:index/getRoleBinding:getRoleBinding": { - "description": "[![General Availability](https://img.shields.io/badge/Lifecycle%20Stage-General%20Availability-%2345c6e8)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy)\n\n`confluentcloud.RoleBinding` describes a Role Binding.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as confluentcloud from \"@pulumi/confluentcloud\";\n\nconst exampleRoleBinding = confluentcloud.getRoleBinding({\n id: \"rb-abc123\",\n});\nexport const example = exampleRoleBinding;\n```\n```python\nimport pulumi\nimport pulumi_confluentcloud as confluentcloud\n\nexample_role_binding = confluentcloud.get_role_binding(id=\"rb-abc123\")\npulumi.export(\"example\", example_role_binding)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing ConfluentCloud = Pulumi.ConfluentCloud;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var exampleRoleBinding = ConfluentCloud.GetRoleBinding.Invoke(new()\n {\n Id = \"rb-abc123\",\n });\n\n return new Dictionary\u003cstring, object?\u003e\n {\n [\"example\"] = exampleRoleBinding,\n };\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texampleRoleBinding, err := confluentcloud.LookupRoleBinding(ctx, \u0026confluentcloud.LookupRoleBindingArgs{\n\t\t\tId: \"rb-abc123\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.Export(\"example\", exampleRoleBinding)\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.confluentcloud.ConfluentcloudFunctions;\nimport com.pulumi.confluentcloud.inputs.GetRoleBindingArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var exampleRoleBinding = ConfluentcloudFunctions.getRoleBinding(GetRoleBindingArgs.builder()\n .id(\"rb-abc123\")\n .build());\n\n ctx.export(\"example\", exampleRoleBinding.applyValue(getRoleBindingResult -\u003e getRoleBindingResult));\n }\n}\n```\n```yaml\nvariables:\n exampleRoleBinding:\n fn::invoke:\n Function: confluentcloud:getRoleBinding\n Arguments:\n id: rb-abc123\noutputs:\n example: ${exampleRoleBinding}\n```\n{{% /example %}}\n{{% /examples %}}", + "description": "[![General Availability](https://img.shields.io/badge/Lifecycle%20Stage-General%20Availability-%2345c6e8)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy)\n\n`confluentcloud.RoleBinding` describes a Role Binding.\n\n\u003e **Note:** For more information on the Role Bindings, see [Predefined RBAC roles in Confluent Cloud](https://docs.confluent.io/cloud/current/access-management/access-control/rbac/predefined-rbac-roles.html).\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as confluentcloud from \"@pulumi/confluentcloud\";\n\nconst exampleRoleBinding = confluentcloud.getRoleBinding({\n id: \"rb-abc123\",\n});\nexport const example = exampleRoleBinding;\n```\n```python\nimport pulumi\nimport pulumi_confluentcloud as confluentcloud\n\nexample_role_binding = confluentcloud.get_role_binding(id=\"rb-abc123\")\npulumi.export(\"example\", example_role_binding)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing ConfluentCloud = Pulumi.ConfluentCloud;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var exampleRoleBinding = ConfluentCloud.GetRoleBinding.Invoke(new()\n {\n Id = \"rb-abc123\",\n });\n\n return new Dictionary\u003cstring, object?\u003e\n {\n [\"example\"] = exampleRoleBinding,\n };\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texampleRoleBinding, err := confluentcloud.LookupRoleBinding(ctx, \u0026confluentcloud.LookupRoleBindingArgs{\n\t\t\tId: \"rb-abc123\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.Export(\"example\", exampleRoleBinding)\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.confluentcloud.ConfluentcloudFunctions;\nimport com.pulumi.confluentcloud.inputs.GetRoleBindingArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var exampleRoleBinding = ConfluentcloudFunctions.getRoleBinding(GetRoleBindingArgs.builder()\n .id(\"rb-abc123\")\n .build());\n\n ctx.export(\"example\", exampleRoleBinding.applyValue(getRoleBindingResult -\u003e getRoleBindingResult));\n }\n}\n```\n```yaml\nvariables:\n exampleRoleBinding:\n fn::invoke:\n Function: confluentcloud:getRoleBinding\n Arguments:\n id: rb-abc123\noutputs:\n example: ${exampleRoleBinding}\n```\n{{% /example %}}\n{{% /examples %}}", "inputs": { "description": "A collection of arguments for invoking getRoleBinding.\n", "properties": { diff --git a/provider/go.mod b/provider/go.mod index 5b77ff27..ab4fcf61 100644 --- a/provider/go.mod +++ b/provider/go.mod @@ -10,7 +10,7 @@ replace ( require ( github.com/confluentinc/terraform-provider-confluent v1.32.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1 - github.com/pulumi/pulumi-terraform-bridge/v3 v3.60.0 + github.com/pulumi/pulumi-terraform-bridge/v3 v3.60.1 github.com/pulumi/pulumi/sdk/v3 v3.81.0 ) @@ -92,7 +92,8 @@ require ( github.com/confluentinc/ccloud-sdk-go-v2/byok v0.0.1 // indirect github.com/confluentinc/ccloud-sdk-go-v2/cmk v0.10.0 // indirect github.com/confluentinc/ccloud-sdk-go-v2/connect v0.2.0 // indirect - github.com/confluentinc/ccloud-sdk-go-v2/data-catalog v0.1.0 // indirect + github.com/confluentinc/ccloud-sdk-go-v2/data-catalog v0.2.0 // indirect + github.com/confluentinc/ccloud-sdk-go-v2/flink v0.5.0 // indirect github.com/confluentinc/ccloud-sdk-go-v2/iam v0.10.0 // indirect github.com/confluentinc/ccloud-sdk-go-v2/identity-provider v0.2.0 // indirect github.com/confluentinc/ccloud-sdk-go-v2/kafka-quotas v0.4.0 // indirect @@ -102,7 +103,7 @@ require ( github.com/confluentinc/ccloud-sdk-go-v2/networking v0.7.0 // indirect github.com/confluentinc/ccloud-sdk-go-v2/networking-privatelink v0.1.0 // indirect github.com/confluentinc/ccloud-sdk-go-v2/org v0.4.0 // indirect - github.com/confluentinc/ccloud-sdk-go-v2/schema-registry v0.2.0 // indirect + github.com/confluentinc/ccloud-sdk-go-v2/schema-registry v0.3.0 // indirect github.com/confluentinc/ccloud-sdk-go-v2/srcm v0.2.0 // indirect github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 // indirect github.com/containerd/containerd v1.6.19 // indirect diff --git a/provider/go.sum b/provider/go.sum index 643becbc..8e918266 100644 --- a/provider/go.sum +++ b/provider/go.sum @@ -982,8 +982,10 @@ github.com/confluentinc/ccloud-sdk-go-v2/cmk v0.10.0 h1:YQEcSvhX5ODllg0mhxLivckK github.com/confluentinc/ccloud-sdk-go-v2/cmk v0.10.0/go.mod h1:357Zo3HvVAe5iQgUFxUbQPAKJasGm8vFMkOB+krVmR8= github.com/confluentinc/ccloud-sdk-go-v2/connect v0.2.0 h1:rEb3sxzKCZvZCnEZ10WyGqkVIdlqxJGbmP85/4C4YdE= github.com/confluentinc/ccloud-sdk-go-v2/connect v0.2.0/go.mod h1:lF4AfDtxoL0V7ZIMOULWiAycPwlfyt9UG659adRNdOM= -github.com/confluentinc/ccloud-sdk-go-v2/data-catalog v0.1.0 h1:HuU7SAId+hUi/SWbIXXXu6khDyGW/RANsNQxEwMNWmE= -github.com/confluentinc/ccloud-sdk-go-v2/data-catalog v0.1.0/go.mod h1:27GwI+j82LDFydahgmKVroqw6oFxzbvIj+ZOnksaKGw= +github.com/confluentinc/ccloud-sdk-go-v2/data-catalog v0.2.0 h1:ySx0jYNGK0XLcSkgPz+hxcH05v1LI5GVb3Rg+TCqBqk= +github.com/confluentinc/ccloud-sdk-go-v2/data-catalog v0.2.0/go.mod h1:27GwI+j82LDFydahgmKVroqw6oFxzbvIj+ZOnksaKGw= +github.com/confluentinc/ccloud-sdk-go-v2/flink v0.5.0 h1:5lh7TY2aUlZA0wUL0wAebZeGPp8uEnEhZawjYhnRUxo= +github.com/confluentinc/ccloud-sdk-go-v2/flink v0.5.0/go.mod h1:x+8kpYsJHRlvGuIB/tV0afPNyjKst3MsNOE6XsjgAl0= github.com/confluentinc/ccloud-sdk-go-v2/iam v0.10.0 h1:AV0bGk01bGfKzNq5IVqRi2iEc6YTeBbl//IYvQ/j8ag= github.com/confluentinc/ccloud-sdk-go-v2/iam v0.10.0/go.mod h1:2Lm82ly9Yh5LLhp8OTnUGqjz4JdIXAZ5a0/u9T+rGGU= github.com/confluentinc/ccloud-sdk-go-v2/identity-provider v0.2.0 h1:9TT8UCFRc5zUdsE7UgMz7hqN+2KYnIkBcAKCaiZJrXw= @@ -1002,8 +1004,8 @@ github.com/confluentinc/ccloud-sdk-go-v2/networking-privatelink v0.1.0 h1:CA+3m6 github.com/confluentinc/ccloud-sdk-go-v2/networking-privatelink v0.1.0/go.mod h1:uj/ybBJPQbmuuBdSoznMiMGEwW3z/g0Uko8uKWg36I8= github.com/confluentinc/ccloud-sdk-go-v2/org v0.4.0 h1:WcJs6RbY8nU5HapaG0ZCH9ftFBtZyuKMIuNAkdVmc2o= github.com/confluentinc/ccloud-sdk-go-v2/org v0.4.0/go.mod h1:zREJ+OOZz0rEXCaPx0JbCVj2EfNnYs/c6qhPDfhldI0= -github.com/confluentinc/ccloud-sdk-go-v2/schema-registry v0.2.0 h1:qR8cm4OmT/B2g3pINSBxt+dDMFep5wgVJGu6oHIrbdk= -github.com/confluentinc/ccloud-sdk-go-v2/schema-registry v0.2.0/go.mod h1:uTE8K5/jg75ubJY1Flh6TfBIwVFVOchkLWqVsamwLYc= +github.com/confluentinc/ccloud-sdk-go-v2/schema-registry v0.3.0 h1:AOqLmXM4nK41OaMaJpYXy5JKaJSZYs0mciFadvdBOQY= +github.com/confluentinc/ccloud-sdk-go-v2/schema-registry v0.3.0/go.mod h1:uTE8K5/jg75ubJY1Flh6TfBIwVFVOchkLWqVsamwLYc= github.com/confluentinc/ccloud-sdk-go-v2/srcm v0.2.0 h1:ezCzDCOOavjTPVKvdET0QjAXm1u9iRJjg2s5lkSMrps= github.com/confluentinc/ccloud-sdk-go-v2/srcm v0.2.0/go.mod h1:qY4Y/QCDKI0eR+HLVJWGFstsTAiI83+sowKOyoRFhF0= github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= @@ -2227,8 +2229,8 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T github.com/pulumi/pulumi-java/pkg v0.9.6 h1:UJrOAsYHRchwb4QlfI9Q224qg1TOI3rIsI6DDTUnn30= github.com/pulumi/pulumi-java/pkg v0.9.6/go.mod h1:c6rSw/+q4O0IImgJ9axxoC6QesbPYWBaG5gimbHouUQ= github.com/pulumi/pulumi-terraform-bridge/testing v0.0.1 h1:SCg1gjfY9N4yn8U8peIUYATifjoDABkyR7H9lmefsfc= -github.com/pulumi/pulumi-terraform-bridge/v3 v3.60.0 h1:MPhSwNLJJlqLFHGfrXIRXZHzFIu05YLQldAJRYpOHRs= -github.com/pulumi/pulumi-terraform-bridge/v3 v3.60.0/go.mod h1:o0Vfch2UXtHOnGYpNElzGg4htT6B8X8hS9fa5AguP7g= +github.com/pulumi/pulumi-terraform-bridge/v3 v3.60.1 h1:+W2JHLi4y+G57jLPLJbDLv1xvm/9L2NO0gWXrtR8MDM= +github.com/pulumi/pulumi-terraform-bridge/v3 v3.60.1/go.mod h1:o0Vfch2UXtHOnGYpNElzGg4htT6B8X8hS9fa5AguP7g= github.com/pulumi/pulumi-terraform-bridge/x/muxer v0.0.4 h1:rIzMmtcVpPX8ynaz6/nW5AHNY63DiNfCohqmxWvMpM4= github.com/pulumi/pulumi-terraform-bridge/x/muxer v0.0.4/go.mod h1:Kt8RIZWa/N8rW3+0g6NrqCBmF3o+HuIhFaZpssEkG6w= github.com/pulumi/pulumi-yaml v1.2.2 h1:W6BeUBLhDrJ2GSU0em1AUVelG9PBI4ABY61DdhJOO3E= diff --git a/sdk/dotnet/FlinkComputePool.cs b/sdk/dotnet/FlinkComputePool.cs new file mode 100644 index 00000000..8b4071c4 --- /dev/null +++ b/sdk/dotnet/FlinkComputePool.cs @@ -0,0 +1,262 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.ConfluentCloud +{ + /// + /// ## Example Usage + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using ConfluentCloud = Pulumi.ConfluentCloud; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var development = new ConfluentCloud.Environment("development"); + /// + /// var main = new ConfluentCloud.FlinkComputePool("main", new() + /// { + /// DisplayName = "standard_compute_pool", + /// Cloud = "AWS", + /// Region = "us-east-1", + /// MaxCfu = 5, + /// Environment = new ConfluentCloud.Inputs.FlinkComputePoolEnvironmentArgs + /// { + /// Id = development.Id, + /// }, + /// }); + /// + /// }); + /// ``` + /// + /// ## Import + /// + /// You can import a Flink Compute Pool by using Environment ID and Flink Compute Pool ID, in the format `<Environment ID>/<Flink Compute Pool ID>`. The following example shows how to import a Flink Compute Pool$ export CONFLUENT_CLOUD_API_KEY="<cloud_api_key>" $ export CONFLUENT_CLOUD_API_SECRET="<cloud_api_secret>" + /// + /// ```sh + /// $ pulumi import confluentcloud:index/flinkComputePool:FlinkComputePool main env-abc123/lfcp-abc123 + /// ``` + /// + /// !> **Warning:** Do not forget to delete terminal command history afterwards for security purposes. + /// + [ConfluentCloudResourceType("confluentcloud:index/flinkComputePool:FlinkComputePool")] + public partial class FlinkComputePool : global::Pulumi.CustomResource + { + /// + /// (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`. + /// + [Output("apiVersion")] + public Output ApiVersion { get; private set; } = null!; + + /// + /// The cloud service provider that runs the Flink Compute Pool. + /// + [Output("cloud")] + public Output Cloud { get; private set; } = null!; + + /// + /// (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool. + /// + [Output("currentCfu")] + public Output CurrentCfu { get; private set; } = null!; + + /// + /// The name of the Flink Compute Pool. + /// + [Output("displayName")] + public Output DisplayName { get; private set; } = null!; + + /// + /// Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + /// + [Output("environment")] + public Output Environment { get; private set; } = null!; + + /// + /// (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`. + /// + [Output("kind")] + public Output Kind { get; private set; } = null!; + + /// + /// Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + /// + [Output("maxCfu")] + public Output MaxCfu { get; private set; } = null!; + + /// + /// The cloud service provider region that hosts the Flink Compute Pool. + /// + [Output("region")] + public Output Region { get; private set; } = null!; + + /// + /// (Required String) The Confluent Resource Name of the Flink Compute Pool. + /// + [Output("resourceName")] + public Output ResourceName { get; private set; } = null!; + + /// + /// (Required String) The API endpoint of the Flink Compute Pool. + /// + [Output("restEndpoint")] + public Output RestEndpoint { get; private set; } = null!; + + + /// + /// Create a FlinkComputePool resource with the given unique name, arguments, and options. + /// + /// + /// The unique name of the resource + /// The arguments used to populate this resource's properties + /// A bag of options that control this resource's behavior + public FlinkComputePool(string name, FlinkComputePoolArgs args, CustomResourceOptions? options = null) + : base("confluentcloud:index/flinkComputePool:FlinkComputePool", name, args ?? new FlinkComputePoolArgs(), MakeResourceOptions(options, "")) + { + } + + private FlinkComputePool(string name, Input id, FlinkComputePoolState? state = null, CustomResourceOptions? options = null) + : base("confluentcloud:index/flinkComputePool:FlinkComputePool", name, state, MakeResourceOptions(options, id)) + { + } + + private static CustomResourceOptions MakeResourceOptions(CustomResourceOptions? options, Input? id) + { + var defaultOptions = new CustomResourceOptions + { + Version = Utilities.Version, + }; + var merged = CustomResourceOptions.Merge(defaultOptions, options); + // Override the ID if one was specified for consistency with other language SDKs. + merged.Id = id ?? merged.Id; + return merged; + } + /// + /// Get an existing FlinkComputePool resource's state with the given name, ID, and optional extra + /// properties used to qualify the lookup. + /// + /// + /// The unique name of the resulting resource. + /// The unique provider ID of the resource to lookup. + /// Any extra arguments used during the lookup. + /// A bag of options that control this resource's behavior + public static FlinkComputePool Get(string name, Input id, FlinkComputePoolState? state = null, CustomResourceOptions? options = null) + { + return new FlinkComputePool(name, id, state, options); + } + } + + public sealed class FlinkComputePoolArgs : global::Pulumi.ResourceArgs + { + /// + /// The cloud service provider that runs the Flink Compute Pool. + /// + [Input("cloud", required: true)] + public Input Cloud { get; set; } = null!; + + /// + /// The name of the Flink Compute Pool. + /// + [Input("displayName", required: true)] + public Input DisplayName { get; set; } = null!; + + /// + /// Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + /// + [Input("environment", required: true)] + public Input Environment { get; set; } = null!; + + /// + /// Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + /// + [Input("maxCfu")] + public Input? MaxCfu { get; set; } + + /// + /// The cloud service provider region that hosts the Flink Compute Pool. + /// + [Input("region", required: true)] + public Input Region { get; set; } = null!; + + public FlinkComputePoolArgs() + { + } + public static new FlinkComputePoolArgs Empty => new FlinkComputePoolArgs(); + } + + public sealed class FlinkComputePoolState : global::Pulumi.ResourceArgs + { + /// + /// (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`. + /// + [Input("apiVersion")] + public Input? ApiVersion { get; set; } + + /// + /// The cloud service provider that runs the Flink Compute Pool. + /// + [Input("cloud")] + public Input? Cloud { get; set; } + + /// + /// (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool. + /// + [Input("currentCfu")] + public Input? CurrentCfu { get; set; } + + /// + /// The name of the Flink Compute Pool. + /// + [Input("displayName")] + public Input? DisplayName { get; set; } + + /// + /// Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + /// + [Input("environment")] + public Input? Environment { get; set; } + + /// + /// (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`. + /// + [Input("kind")] + public Input? Kind { get; set; } + + /// + /// Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + /// + [Input("maxCfu")] + public Input? MaxCfu { get; set; } + + /// + /// The cloud service provider region that hosts the Flink Compute Pool. + /// + [Input("region")] + public Input? Region { get; set; } + + /// + /// (Required String) The Confluent Resource Name of the Flink Compute Pool. + /// + [Input("resourceName")] + public Input? ResourceName { get; set; } + + /// + /// (Required String) The API endpoint of the Flink Compute Pool. + /// + [Input("restEndpoint")] + public Input? RestEndpoint { get; set; } + + public FlinkComputePoolState() + { + } + public static new FlinkComputePoolState Empty => new FlinkComputePoolState(); + } +} diff --git a/sdk/dotnet/GetFlinkComputePool.cs b/sdk/dotnet/GetFlinkComputePool.cs new file mode 100644 index 00000000..77be0f40 --- /dev/null +++ b/sdk/dotnet/GetFlinkComputePool.cs @@ -0,0 +1,260 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.ConfluentCloud +{ + public static class GetFlinkComputePool + { + /// + /// [![Early Access](https://img.shields.io/badge/Lifecycle%20Stage-Early%20Access-%2300afba)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy) + /// + /// > **Note:** `confluentcloud.FlinkComputePool` data source is available in **Early Access** for early adopters. Early Access features are introduced to gather customer feedback. This feature should be used only for evaluation and non-production testing purposes or to provide feedback to Confluent, particularly as it becomes more widely available in follow-on editions. + /// **Early Access** features are intended for evaluation use in development and testing environments only, and not for production use. The warranty, SLA, and Support Services provisions of your agreement with Confluent do not apply to Early Access features. Early Access features are considered to be a Proof of Concept as defined in the Confluent Cloud Terms of Service. Confluent may discontinue providing early access releases of the Early Access features at any time in Confluent’s sole discretion. + /// + /// `confluentcloud.FlinkComputePool` describes a Flink Compute Pool data source. + /// + /// {{% examples %}} + /// ## Example Usage + /// {{% example %}} + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using ConfluentCloud = Pulumi.ConfluentCloud; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var exampleUsingIdFlinkComputePool = ConfluentCloud.GetFlinkComputePool.Invoke(new() + /// { + /// Id = "lfcp-abc123", + /// Environment = new ConfluentCloud.Inputs.GetFlinkComputePoolEnvironmentInputArgs + /// { + /// Id = "env-xyz456", + /// }, + /// }); + /// + /// var exampleUsingNameFlinkComputePool = ConfluentCloud.GetFlinkComputePool.Invoke(new() + /// { + /// DisplayName = "my_compute_pool", + /// Environment = new ConfluentCloud.Inputs.GetFlinkComputePoolEnvironmentInputArgs + /// { + /// Id = "env-xyz456", + /// }, + /// }); + /// + /// return new Dictionary<string, object?> + /// { + /// ["exampleUsingId"] = exampleUsingIdFlinkComputePool, + /// ["exampleUsingName"] = exampleUsingNameFlinkComputePool, + /// }; + /// }); + /// ``` + /// {{% /example %}} + /// {{% /examples %}} + /// + public static Task InvokeAsync(GetFlinkComputePoolArgs args, InvokeOptions? options = null) + => global::Pulumi.Deployment.Instance.InvokeAsync("confluentcloud:index/getFlinkComputePool:getFlinkComputePool", args ?? new GetFlinkComputePoolArgs(), options.WithDefaults()); + + /// + /// [![Early Access](https://img.shields.io/badge/Lifecycle%20Stage-Early%20Access-%2300afba)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy) + /// + /// > **Note:** `confluentcloud.FlinkComputePool` data source is available in **Early Access** for early adopters. Early Access features are introduced to gather customer feedback. This feature should be used only for evaluation and non-production testing purposes or to provide feedback to Confluent, particularly as it becomes more widely available in follow-on editions. + /// **Early Access** features are intended for evaluation use in development and testing environments only, and not for production use. The warranty, SLA, and Support Services provisions of your agreement with Confluent do not apply to Early Access features. Early Access features are considered to be a Proof of Concept as defined in the Confluent Cloud Terms of Service. Confluent may discontinue providing early access releases of the Early Access features at any time in Confluent’s sole discretion. + /// + /// `confluentcloud.FlinkComputePool` describes a Flink Compute Pool data source. + /// + /// {{% examples %}} + /// ## Example Usage + /// {{% example %}} + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using ConfluentCloud = Pulumi.ConfluentCloud; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var exampleUsingIdFlinkComputePool = ConfluentCloud.GetFlinkComputePool.Invoke(new() + /// { + /// Id = "lfcp-abc123", + /// Environment = new ConfluentCloud.Inputs.GetFlinkComputePoolEnvironmentInputArgs + /// { + /// Id = "env-xyz456", + /// }, + /// }); + /// + /// var exampleUsingNameFlinkComputePool = ConfluentCloud.GetFlinkComputePool.Invoke(new() + /// { + /// DisplayName = "my_compute_pool", + /// Environment = new ConfluentCloud.Inputs.GetFlinkComputePoolEnvironmentInputArgs + /// { + /// Id = "env-xyz456", + /// }, + /// }); + /// + /// return new Dictionary<string, object?> + /// { + /// ["exampleUsingId"] = exampleUsingIdFlinkComputePool, + /// ["exampleUsingName"] = exampleUsingNameFlinkComputePool, + /// }; + /// }); + /// ``` + /// {{% /example %}} + /// {{% /examples %}} + /// + public static Output Invoke(GetFlinkComputePoolInvokeArgs args, InvokeOptions? options = null) + => global::Pulumi.Deployment.Instance.Invoke("confluentcloud:index/getFlinkComputePool:getFlinkComputePool", args ?? new GetFlinkComputePoolInvokeArgs(), options.WithDefaults()); + } + + + public sealed class GetFlinkComputePoolArgs : global::Pulumi.InvokeArgs + { + /// + /// A human-readable name for the Flink Compute Pool. + /// + [Input("displayName")] + public string? DisplayName { get; set; } + + /// + /// (Required Configuration Block) supports the following: + /// + [Input("environment", required: true)] + public Inputs.GetFlinkComputePoolEnvironmentArgs Environment { get; set; } = null!; + + /// + /// The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + /// + /// > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + /// + [Input("id")] + public string? Id { get; set; } + + public GetFlinkComputePoolArgs() + { + } + public static new GetFlinkComputePoolArgs Empty => new GetFlinkComputePoolArgs(); + } + + public sealed class GetFlinkComputePoolInvokeArgs : global::Pulumi.InvokeArgs + { + /// + /// A human-readable name for the Flink Compute Pool. + /// + [Input("displayName")] + public Input? DisplayName { get; set; } + + /// + /// (Required Configuration Block) supports the following: + /// + [Input("environment", required: true)] + public Input Environment { get; set; } = null!; + + /// + /// The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + /// + /// > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + /// + [Input("id")] + public Input? Id { get; set; } + + public GetFlinkComputePoolInvokeArgs() + { + } + public static new GetFlinkComputePoolInvokeArgs Empty => new GetFlinkComputePoolInvokeArgs(); + } + + + [OutputType] + public sealed class GetFlinkComputePoolResult + { + /// + /// (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`. + /// + public readonly string ApiVersion; + /// + /// (Required String) The cloud service provider that runs the Flink Compute Pool. + /// + public readonly string Cloud; + /// + /// (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool. + /// + public readonly int CurrentCfu; + /// + /// (Required String) The name of the Flink Compute Pool. + /// + public readonly string DisplayName; + /// + /// (Required Configuration Block) supports the following: + /// + public readonly Outputs.GetFlinkComputePoolEnvironmentResult Environment; + /// + /// (Required String) The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`. + /// + public readonly string Id; + /// + /// (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`. + /// + public readonly string Kind; + /// + /// (Required Integer) Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. + /// + public readonly int MaxCfu; + /// + /// (Required String) The cloud service provider region that hosts the Flink Compute Pool. + /// + public readonly string Region; + /// + /// (Required String) The Confluent Resource Name of the Flink Compute Pool. + /// + public readonly string ResourceName; + /// + /// (Required String) The API endpoint of the Flink Compute Pool. + /// + public readonly string RestEndpoint; + + [OutputConstructor] + private GetFlinkComputePoolResult( + string apiVersion, + + string cloud, + + int currentCfu, + + string displayName, + + Outputs.GetFlinkComputePoolEnvironmentResult environment, + + string id, + + string kind, + + int maxCfu, + + string region, + + string resourceName, + + string restEndpoint) + { + ApiVersion = apiVersion; + Cloud = cloud; + CurrentCfu = currentCfu; + DisplayName = displayName; + Environment = environment; + Id = id; + Kind = kind; + MaxCfu = maxCfu; + Region = region; + ResourceName = resourceName; + RestEndpoint = restEndpoint; + } + } +} diff --git a/sdk/dotnet/GetRoleBinding.cs b/sdk/dotnet/GetRoleBinding.cs index 13558446..31e7abae 100644 --- a/sdk/dotnet/GetRoleBinding.cs +++ b/sdk/dotnet/GetRoleBinding.cs @@ -16,6 +16,8 @@ public static class GetRoleBinding /// /// `confluentcloud.RoleBinding` describes a Role Binding. /// + /// > **Note:** For more information on the Role Bindings, see [Predefined RBAC roles in Confluent Cloud](https://docs.confluent.io/cloud/current/access-management/access-control/rbac/predefined-rbac-roles.html). + /// /// {{% examples %}} /// ## Example Usage /// {{% example %}} @@ -50,6 +52,8 @@ public static Task InvokeAsync(GetRoleBindingArgs args, In /// /// `confluentcloud.RoleBinding` describes a Role Binding. /// + /// > **Note:** For more information on the Role Bindings, see [Predefined RBAC roles in Confluent Cloud](https://docs.confluent.io/cloud/current/access-management/access-control/rbac/predefined-rbac-roles.html). + /// /// {{% examples %}} /// ## Example Usage /// {{% example %}} diff --git a/sdk/dotnet/Inputs/ClusterLinkDestinationKafkaClusterCredentialsArgs.cs b/sdk/dotnet/Inputs/ClusterLinkDestinationKafkaClusterCredentialsArgs.cs index 54e7021f..b805cebd 100644 --- a/sdk/dotnet/Inputs/ClusterLinkDestinationKafkaClusterCredentialsArgs.cs +++ b/sdk/dotnet/Inputs/ClusterLinkDestinationKafkaClusterCredentialsArgs.cs @@ -33,8 +33,6 @@ public Input? Key /// /// The Kafka API Secret. - /// - /// > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). /// public Input? Secret { diff --git a/sdk/dotnet/Inputs/ClusterLinkDestinationKafkaClusterCredentialsGetArgs.cs b/sdk/dotnet/Inputs/ClusterLinkDestinationKafkaClusterCredentialsGetArgs.cs index 2df3f05f..5bf06f7a 100644 --- a/sdk/dotnet/Inputs/ClusterLinkDestinationKafkaClusterCredentialsGetArgs.cs +++ b/sdk/dotnet/Inputs/ClusterLinkDestinationKafkaClusterCredentialsGetArgs.cs @@ -33,8 +33,6 @@ public Input? Key /// /// The Kafka API Secret. - /// - /// > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). /// public Input? Secret { diff --git a/sdk/dotnet/Inputs/ClusterLinkLocalKafkaClusterCredentialsArgs.cs b/sdk/dotnet/Inputs/ClusterLinkLocalKafkaClusterCredentialsArgs.cs index b5530d24..da98936e 100644 --- a/sdk/dotnet/Inputs/ClusterLinkLocalKafkaClusterCredentialsArgs.cs +++ b/sdk/dotnet/Inputs/ClusterLinkLocalKafkaClusterCredentialsArgs.cs @@ -33,8 +33,6 @@ public Input? Key /// /// The Kafka API Secret. - /// - /// > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). /// public Input? Secret { diff --git a/sdk/dotnet/Inputs/ClusterLinkLocalKafkaClusterCredentialsGetArgs.cs b/sdk/dotnet/Inputs/ClusterLinkLocalKafkaClusterCredentialsGetArgs.cs index c1240aa3..f801e43e 100644 --- a/sdk/dotnet/Inputs/ClusterLinkLocalKafkaClusterCredentialsGetArgs.cs +++ b/sdk/dotnet/Inputs/ClusterLinkLocalKafkaClusterCredentialsGetArgs.cs @@ -33,8 +33,6 @@ public Input? Key /// /// The Kafka API Secret. - /// - /// > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). /// public Input? Secret { diff --git a/sdk/dotnet/Inputs/ClusterLinkRemoteKafkaClusterCredentialsArgs.cs b/sdk/dotnet/Inputs/ClusterLinkRemoteKafkaClusterCredentialsArgs.cs index c4ff345b..27fbc309 100644 --- a/sdk/dotnet/Inputs/ClusterLinkRemoteKafkaClusterCredentialsArgs.cs +++ b/sdk/dotnet/Inputs/ClusterLinkRemoteKafkaClusterCredentialsArgs.cs @@ -33,8 +33,6 @@ public Input? Key /// /// The Kafka API Secret. - /// - /// > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). /// public Input? Secret { diff --git a/sdk/dotnet/Inputs/ClusterLinkRemoteKafkaClusterCredentialsGetArgs.cs b/sdk/dotnet/Inputs/ClusterLinkRemoteKafkaClusterCredentialsGetArgs.cs index fa2f85cb..1abbabdd 100644 --- a/sdk/dotnet/Inputs/ClusterLinkRemoteKafkaClusterCredentialsGetArgs.cs +++ b/sdk/dotnet/Inputs/ClusterLinkRemoteKafkaClusterCredentialsGetArgs.cs @@ -33,8 +33,6 @@ public Input? Key /// /// The Kafka API Secret. - /// - /// > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). /// public Input? Secret { diff --git a/sdk/dotnet/Inputs/ClusterLinkSourceKafkaClusterCredentialsArgs.cs b/sdk/dotnet/Inputs/ClusterLinkSourceKafkaClusterCredentialsArgs.cs index 91c07405..d124d70a 100644 --- a/sdk/dotnet/Inputs/ClusterLinkSourceKafkaClusterCredentialsArgs.cs +++ b/sdk/dotnet/Inputs/ClusterLinkSourceKafkaClusterCredentialsArgs.cs @@ -33,8 +33,6 @@ public Input? Key /// /// The Kafka API Secret. - /// - /// > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). /// public Input? Secret { diff --git a/sdk/dotnet/Inputs/ClusterLinkSourceKafkaClusterCredentialsGetArgs.cs b/sdk/dotnet/Inputs/ClusterLinkSourceKafkaClusterCredentialsGetArgs.cs index df949ae0..5a70c8a1 100644 --- a/sdk/dotnet/Inputs/ClusterLinkSourceKafkaClusterCredentialsGetArgs.cs +++ b/sdk/dotnet/Inputs/ClusterLinkSourceKafkaClusterCredentialsGetArgs.cs @@ -33,8 +33,6 @@ public Input? Key /// /// The Kafka API Secret. - /// - /// > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). /// public Input? Secret { diff --git a/sdk/dotnet/Inputs/FlinkComputePoolEnvironmentArgs.cs b/sdk/dotnet/Inputs/FlinkComputePoolEnvironmentArgs.cs new file mode 100644 index 00000000..8204fb28 --- /dev/null +++ b/sdk/dotnet/Inputs/FlinkComputePoolEnvironmentArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.ConfluentCloud.Inputs +{ + + public sealed class FlinkComputePoolEnvironmentArgs : global::Pulumi.ResourceArgs + { + /// + /// The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`. + /// + [Input("id", required: true)] + public Input Id { get; set; } = null!; + + public FlinkComputePoolEnvironmentArgs() + { + } + public static new FlinkComputePoolEnvironmentArgs Empty => new FlinkComputePoolEnvironmentArgs(); + } +} diff --git a/sdk/dotnet/Inputs/FlinkComputePoolEnvironmentGetArgs.cs b/sdk/dotnet/Inputs/FlinkComputePoolEnvironmentGetArgs.cs new file mode 100644 index 00000000..c7681602 --- /dev/null +++ b/sdk/dotnet/Inputs/FlinkComputePoolEnvironmentGetArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.ConfluentCloud.Inputs +{ + + public sealed class FlinkComputePoolEnvironmentGetArgs : global::Pulumi.ResourceArgs + { + /// + /// The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`. + /// + [Input("id", required: true)] + public Input Id { get; set; } = null!; + + public FlinkComputePoolEnvironmentGetArgs() + { + } + public static new FlinkComputePoolEnvironmentGetArgs Empty => new FlinkComputePoolEnvironmentGetArgs(); + } +} diff --git a/sdk/dotnet/Inputs/GetFlinkComputePoolEnvironment.cs b/sdk/dotnet/Inputs/GetFlinkComputePoolEnvironment.cs new file mode 100644 index 00000000..57854b10 --- /dev/null +++ b/sdk/dotnet/Inputs/GetFlinkComputePoolEnvironment.cs @@ -0,0 +1,28 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.ConfluentCloud.Inputs +{ + + public sealed class GetFlinkComputePoolEnvironmentArgs : global::Pulumi.InvokeArgs + { + /// + /// The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + /// + /// > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + /// + [Input("id", required: true)] + public string Id { get; set; } = null!; + + public GetFlinkComputePoolEnvironmentArgs() + { + } + public static new GetFlinkComputePoolEnvironmentArgs Empty => new GetFlinkComputePoolEnvironmentArgs(); + } +} diff --git a/sdk/dotnet/Inputs/GetFlinkComputePoolEnvironmentArgs.cs b/sdk/dotnet/Inputs/GetFlinkComputePoolEnvironmentArgs.cs new file mode 100644 index 00000000..06f3ed54 --- /dev/null +++ b/sdk/dotnet/Inputs/GetFlinkComputePoolEnvironmentArgs.cs @@ -0,0 +1,28 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.ConfluentCloud.Inputs +{ + + public sealed class GetFlinkComputePoolEnvironmentInputArgs : global::Pulumi.ResourceArgs + { + /// + /// The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + /// + /// > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + /// + [Input("id", required: true)] + public Input Id { get; set; } = null!; + + public GetFlinkComputePoolEnvironmentInputArgs() + { + } + public static new GetFlinkComputePoolEnvironmentInputArgs Empty => new GetFlinkComputePoolEnvironmentInputArgs(); + } +} diff --git a/sdk/dotnet/Inputs/SchemaExporterCredentialsArgs.cs b/sdk/dotnet/Inputs/SchemaExporterCredentialsArgs.cs new file mode 100644 index 00000000..78d7d413 --- /dev/null +++ b/sdk/dotnet/Inputs/SchemaExporterCredentialsArgs.cs @@ -0,0 +1,52 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.ConfluentCloud.Inputs +{ + + public sealed class SchemaExporterCredentialsArgs : global::Pulumi.ResourceArgs + { + [Input("key", required: true)] + private Input? _key; + + /// + /// The Schema Registry API Key. + /// + public Input? Key + { + get => _key; + set + { + var emptySecret = Output.CreateSecret(0); + _key = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1); + } + } + + [Input("secret", required: true)] + private Input? _secret; + + /// + /// The Schema Registry API Secret. + /// + public Input? Secret + { + get => _secret; + set + { + var emptySecret = Output.CreateSecret(0); + _secret = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1); + } + } + + public SchemaExporterCredentialsArgs() + { + } + public static new SchemaExporterCredentialsArgs Empty => new SchemaExporterCredentialsArgs(); + } +} diff --git a/sdk/dotnet/Inputs/SchemaExporterCredentialsGetArgs.cs b/sdk/dotnet/Inputs/SchemaExporterCredentialsGetArgs.cs new file mode 100644 index 00000000..72960a65 --- /dev/null +++ b/sdk/dotnet/Inputs/SchemaExporterCredentialsGetArgs.cs @@ -0,0 +1,52 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.ConfluentCloud.Inputs +{ + + public sealed class SchemaExporterCredentialsGetArgs : global::Pulumi.ResourceArgs + { + [Input("key", required: true)] + private Input? _key; + + /// + /// The Schema Registry API Key. + /// + public Input? Key + { + get => _key; + set + { + var emptySecret = Output.CreateSecret(0); + _key = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1); + } + } + + [Input("secret", required: true)] + private Input? _secret; + + /// + /// The Schema Registry API Secret. + /// + public Input? Secret + { + get => _secret; + set + { + var emptySecret = Output.CreateSecret(0); + _secret = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1); + } + } + + public SchemaExporterCredentialsGetArgs() + { + } + public static new SchemaExporterCredentialsGetArgs Empty => new SchemaExporterCredentialsGetArgs(); + } +} diff --git a/sdk/dotnet/Inputs/SchemaExporterDestinationSchemaRegistryClusterArgs.cs b/sdk/dotnet/Inputs/SchemaExporterDestinationSchemaRegistryClusterArgs.cs new file mode 100644 index 00000000..aa938216 --- /dev/null +++ b/sdk/dotnet/Inputs/SchemaExporterDestinationSchemaRegistryClusterArgs.cs @@ -0,0 +1,38 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.ConfluentCloud.Inputs +{ + + public sealed class SchemaExporterDestinationSchemaRegistryClusterArgs : global::Pulumi.ResourceArgs + { + [Input("credentials", required: true)] + private Input? _credentials; + public Input? Credentials + { + get => _credentials; + set + { + var emptySecret = Output.CreateSecret(0); + _credentials = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1); + } + } + + /// + /// The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + /// + [Input("restEndpoint", required: true)] + public Input RestEndpoint { get; set; } = null!; + + public SchemaExporterDestinationSchemaRegistryClusterArgs() + { + } + public static new SchemaExporterDestinationSchemaRegistryClusterArgs Empty => new SchemaExporterDestinationSchemaRegistryClusterArgs(); + } +} diff --git a/sdk/dotnet/Inputs/SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs.cs b/sdk/dotnet/Inputs/SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs.cs new file mode 100644 index 00000000..b880b2c4 --- /dev/null +++ b/sdk/dotnet/Inputs/SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs.cs @@ -0,0 +1,52 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.ConfluentCloud.Inputs +{ + + public sealed class SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs : global::Pulumi.ResourceArgs + { + [Input("key", required: true)] + private Input? _key; + + /// + /// The Schema Registry API Key. + /// + public Input? Key + { + get => _key; + set + { + var emptySecret = Output.CreateSecret(0); + _key = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1); + } + } + + [Input("secret", required: true)] + private Input? _secret; + + /// + /// The Schema Registry API Secret. + /// + public Input? Secret + { + get => _secret; + set + { + var emptySecret = Output.CreateSecret(0); + _secret = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1); + } + } + + public SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs() + { + } + public static new SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs Empty => new SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs(); + } +} diff --git a/sdk/dotnet/Inputs/SchemaExporterDestinationSchemaRegistryClusterCredentialsGetArgs.cs b/sdk/dotnet/Inputs/SchemaExporterDestinationSchemaRegistryClusterCredentialsGetArgs.cs new file mode 100644 index 00000000..25b0a6b5 --- /dev/null +++ b/sdk/dotnet/Inputs/SchemaExporterDestinationSchemaRegistryClusterCredentialsGetArgs.cs @@ -0,0 +1,52 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.ConfluentCloud.Inputs +{ + + public sealed class SchemaExporterDestinationSchemaRegistryClusterCredentialsGetArgs : global::Pulumi.ResourceArgs + { + [Input("key", required: true)] + private Input? _key; + + /// + /// The Schema Registry API Key. + /// + public Input? Key + { + get => _key; + set + { + var emptySecret = Output.CreateSecret(0); + _key = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1); + } + } + + [Input("secret", required: true)] + private Input? _secret; + + /// + /// The Schema Registry API Secret. + /// + public Input? Secret + { + get => _secret; + set + { + var emptySecret = Output.CreateSecret(0); + _secret = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1); + } + } + + public SchemaExporterDestinationSchemaRegistryClusterCredentialsGetArgs() + { + } + public static new SchemaExporterDestinationSchemaRegistryClusterCredentialsGetArgs Empty => new SchemaExporterDestinationSchemaRegistryClusterCredentialsGetArgs(); + } +} diff --git a/sdk/dotnet/Inputs/SchemaExporterDestinationSchemaRegistryClusterGetArgs.cs b/sdk/dotnet/Inputs/SchemaExporterDestinationSchemaRegistryClusterGetArgs.cs new file mode 100644 index 00000000..202d5fae --- /dev/null +++ b/sdk/dotnet/Inputs/SchemaExporterDestinationSchemaRegistryClusterGetArgs.cs @@ -0,0 +1,38 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.ConfluentCloud.Inputs +{ + + public sealed class SchemaExporterDestinationSchemaRegistryClusterGetArgs : global::Pulumi.ResourceArgs + { + [Input("credentials", required: true)] + private Input? _credentials; + public Input? Credentials + { + get => _credentials; + set + { + var emptySecret = Output.CreateSecret(0); + _credentials = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1); + } + } + + /// + /// The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + /// + [Input("restEndpoint", required: true)] + public Input RestEndpoint { get; set; } = null!; + + public SchemaExporterDestinationSchemaRegistryClusterGetArgs() + { + } + public static new SchemaExporterDestinationSchemaRegistryClusterGetArgs Empty => new SchemaExporterDestinationSchemaRegistryClusterGetArgs(); + } +} diff --git a/sdk/dotnet/Inputs/SchemaExporterSchemaRegistryClusterArgs.cs b/sdk/dotnet/Inputs/SchemaExporterSchemaRegistryClusterArgs.cs new file mode 100644 index 00000000..c294ccbe --- /dev/null +++ b/sdk/dotnet/Inputs/SchemaExporterSchemaRegistryClusterArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.ConfluentCloud.Inputs +{ + + public sealed class SchemaExporterSchemaRegistryClusterArgs : global::Pulumi.ResourceArgs + { + /// + /// The ID of the Schema Registry cluster, for example, `lsrc-abc123`. + /// + [Input("id", required: true)] + public Input Id { get; set; } = null!; + + public SchemaExporterSchemaRegistryClusterArgs() + { + } + public static new SchemaExporterSchemaRegistryClusterArgs Empty => new SchemaExporterSchemaRegistryClusterArgs(); + } +} diff --git a/sdk/dotnet/Inputs/SchemaExporterSchemaRegistryClusterGetArgs.cs b/sdk/dotnet/Inputs/SchemaExporterSchemaRegistryClusterGetArgs.cs new file mode 100644 index 00000000..7a5a335c --- /dev/null +++ b/sdk/dotnet/Inputs/SchemaExporterSchemaRegistryClusterGetArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.ConfluentCloud.Inputs +{ + + public sealed class SchemaExporterSchemaRegistryClusterGetArgs : global::Pulumi.ResourceArgs + { + /// + /// The ID of the Schema Registry cluster, for example, `lsrc-abc123`. + /// + [Input("id", required: true)] + public Input Id { get; set; } = null!; + + public SchemaExporterSchemaRegistryClusterGetArgs() + { + } + public static new SchemaExporterSchemaRegistryClusterGetArgs Empty => new SchemaExporterSchemaRegistryClusterGetArgs(); + } +} diff --git a/sdk/dotnet/KafkaAcl.cs b/sdk/dotnet/KafkaAcl.cs index d3053c34..f3eb7a0a 100644 --- a/sdk/dotnet/KafkaAcl.cs +++ b/sdk/dotnet/KafkaAcl.cs @@ -12,7 +12,7 @@ namespace Pulumi.ConfluentCloud /// /// ## Import /// - /// You can import Kafka ACLs by using the Kafka cluster ID and attributes of `confluent_kafka_acl` resource in the format `<Kafka cluster ID>/<Kafka ACL resource type>#<Kafka ACL resource name>#<Kafka ACL pattern type>#<Kafka ACL principal>#<Kafka ACL host>#<Kafka ACL operation>#<Kafka ACL permission>`, for exampleOption #1Manage multiple Kafka clusters in the same Terraform workspace $ export CONFLUENT_CLOUD_API_KEY="<cloud_api_key>" $ export CONFLUENT_CLOUD_API_SECRET="<cloud_api_secret>" $ export IMPORT_KAFKA_API_KEY="<kafka_api_key>" $ export IMPORT_KAFKA_API_SECRET="<kafka_api_secret>" $ export IMPORT_KAFKA_REST_ENDPOINT="<kafka_rest_endpoint>" + /// You can import Kafka ACLs by using the Kafka cluster ID and attributes of `confluent_kafka_acl` resource in the format `<Kafka cluster ID>/<Kafka ACL resource type>#<Kafka ACL resource name>#<Kafka ACL pattern type>#<Kafka ACL principal>#<Kafka ACL host>#<Kafka ACL operation>#<Kafka ACL permission>`, for exampleOption #1Manage multiple Kafka clusters in the same Terraform workspace $ export IMPORT_KAFKA_API_KEY="<kafka_api_key>" $ export IMPORT_KAFKA_API_SECRET="<kafka_api_secret>" $ export IMPORT_KAFKA_REST_ENDPOINT="<kafka_rest_endpoint>" /// /// ```sh /// $ pulumi import confluentcloud:index/kafkaAcl:KafkaAcl describe-cluster "lkc-12345/CLUSTER#kafka-cluster#LITERAL#User:sa-xyz123#*#DESCRIBE#ALLOW" diff --git a/sdk/dotnet/Outputs/ClusterLinkDestinationKafkaClusterCredentials.cs b/sdk/dotnet/Outputs/ClusterLinkDestinationKafkaClusterCredentials.cs index a872e338..d2a935ef 100644 --- a/sdk/dotnet/Outputs/ClusterLinkDestinationKafkaClusterCredentials.cs +++ b/sdk/dotnet/Outputs/ClusterLinkDestinationKafkaClusterCredentials.cs @@ -19,8 +19,6 @@ public sealed class ClusterLinkDestinationKafkaClusterCredentials public readonly string Key; /// /// The Kafka API Secret. - /// - /// > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). /// public readonly string Secret; diff --git a/sdk/dotnet/Outputs/ClusterLinkLocalKafkaClusterCredentials.cs b/sdk/dotnet/Outputs/ClusterLinkLocalKafkaClusterCredentials.cs index 50b7f0ee..4e3004eb 100644 --- a/sdk/dotnet/Outputs/ClusterLinkLocalKafkaClusterCredentials.cs +++ b/sdk/dotnet/Outputs/ClusterLinkLocalKafkaClusterCredentials.cs @@ -19,8 +19,6 @@ public sealed class ClusterLinkLocalKafkaClusterCredentials public readonly string Key; /// /// The Kafka API Secret. - /// - /// > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). /// public readonly string Secret; diff --git a/sdk/dotnet/Outputs/ClusterLinkRemoteKafkaClusterCredentials.cs b/sdk/dotnet/Outputs/ClusterLinkRemoteKafkaClusterCredentials.cs index d70a4872..78fa7fde 100644 --- a/sdk/dotnet/Outputs/ClusterLinkRemoteKafkaClusterCredentials.cs +++ b/sdk/dotnet/Outputs/ClusterLinkRemoteKafkaClusterCredentials.cs @@ -19,8 +19,6 @@ public sealed class ClusterLinkRemoteKafkaClusterCredentials public readonly string Key; /// /// The Kafka API Secret. - /// - /// > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). /// public readonly string Secret; diff --git a/sdk/dotnet/Outputs/ClusterLinkSourceKafkaClusterCredentials.cs b/sdk/dotnet/Outputs/ClusterLinkSourceKafkaClusterCredentials.cs index 01db8645..75bf982f 100644 --- a/sdk/dotnet/Outputs/ClusterLinkSourceKafkaClusterCredentials.cs +++ b/sdk/dotnet/Outputs/ClusterLinkSourceKafkaClusterCredentials.cs @@ -19,8 +19,6 @@ public sealed class ClusterLinkSourceKafkaClusterCredentials public readonly string Key; /// /// The Kafka API Secret. - /// - /// > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). /// public readonly string Secret; diff --git a/sdk/dotnet/Outputs/FlinkComputePoolEnvironment.cs b/sdk/dotnet/Outputs/FlinkComputePoolEnvironment.cs new file mode 100644 index 00000000..a6bee702 --- /dev/null +++ b/sdk/dotnet/Outputs/FlinkComputePoolEnvironment.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.ConfluentCloud.Outputs +{ + + [OutputType] + public sealed class FlinkComputePoolEnvironment + { + /// + /// The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`. + /// + public readonly string Id; + + [OutputConstructor] + private FlinkComputePoolEnvironment(string id) + { + Id = id; + } + } +} diff --git a/sdk/dotnet/Outputs/GetFlinkComputePoolEnvironmentResult.cs b/sdk/dotnet/Outputs/GetFlinkComputePoolEnvironmentResult.cs new file mode 100644 index 00000000..e73804d7 --- /dev/null +++ b/sdk/dotnet/Outputs/GetFlinkComputePoolEnvironmentResult.cs @@ -0,0 +1,29 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.ConfluentCloud.Outputs +{ + + [OutputType] + public sealed class GetFlinkComputePoolEnvironmentResult + { + /// + /// The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + /// + /// > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + /// + public readonly string Id; + + [OutputConstructor] + private GetFlinkComputePoolEnvironmentResult(string id) + { + Id = id; + } + } +} diff --git a/sdk/dotnet/Outputs/SchemaExporterCredentials.cs b/sdk/dotnet/Outputs/SchemaExporterCredentials.cs new file mode 100644 index 00000000..942c8f8f --- /dev/null +++ b/sdk/dotnet/Outputs/SchemaExporterCredentials.cs @@ -0,0 +1,35 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.ConfluentCloud.Outputs +{ + + [OutputType] + public sealed class SchemaExporterCredentials + { + /// + /// The Schema Registry API Key. + /// + public readonly string Key; + /// + /// The Schema Registry API Secret. + /// + public readonly string Secret; + + [OutputConstructor] + private SchemaExporterCredentials( + string key, + + string secret) + { + Key = key; + Secret = secret; + } + } +} diff --git a/sdk/dotnet/Outputs/SchemaExporterDestinationSchemaRegistryCluster.cs b/sdk/dotnet/Outputs/SchemaExporterDestinationSchemaRegistryCluster.cs new file mode 100644 index 00000000..d28f2a8b --- /dev/null +++ b/sdk/dotnet/Outputs/SchemaExporterDestinationSchemaRegistryCluster.cs @@ -0,0 +1,32 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.ConfluentCloud.Outputs +{ + + [OutputType] + public sealed class SchemaExporterDestinationSchemaRegistryCluster + { + public readonly Outputs.SchemaExporterDestinationSchemaRegistryClusterCredentials Credentials; + /// + /// The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + /// + public readonly string RestEndpoint; + + [OutputConstructor] + private SchemaExporterDestinationSchemaRegistryCluster( + Outputs.SchemaExporterDestinationSchemaRegistryClusterCredentials credentials, + + string restEndpoint) + { + Credentials = credentials; + RestEndpoint = restEndpoint; + } + } +} diff --git a/sdk/dotnet/Outputs/SchemaExporterDestinationSchemaRegistryClusterCredentials.cs b/sdk/dotnet/Outputs/SchemaExporterDestinationSchemaRegistryClusterCredentials.cs new file mode 100644 index 00000000..6059a9c0 --- /dev/null +++ b/sdk/dotnet/Outputs/SchemaExporterDestinationSchemaRegistryClusterCredentials.cs @@ -0,0 +1,35 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.ConfluentCloud.Outputs +{ + + [OutputType] + public sealed class SchemaExporterDestinationSchemaRegistryClusterCredentials + { + /// + /// The Schema Registry API Key. + /// + public readonly string Key; + /// + /// The Schema Registry API Secret. + /// + public readonly string Secret; + + [OutputConstructor] + private SchemaExporterDestinationSchemaRegistryClusterCredentials( + string key, + + string secret) + { + Key = key; + Secret = secret; + } + } +} diff --git a/sdk/dotnet/Outputs/SchemaExporterSchemaRegistryCluster.cs b/sdk/dotnet/Outputs/SchemaExporterSchemaRegistryCluster.cs new file mode 100644 index 00000000..a0a37543 --- /dev/null +++ b/sdk/dotnet/Outputs/SchemaExporterSchemaRegistryCluster.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.ConfluentCloud.Outputs +{ + + [OutputType] + public sealed class SchemaExporterSchemaRegistryCluster + { + /// + /// The ID of the Schema Registry cluster, for example, `lsrc-abc123`. + /// + public readonly string Id; + + [OutputConstructor] + private SchemaExporterSchemaRegistryCluster(string id) + { + Id = id; + } + } +} diff --git a/sdk/dotnet/SchemaExporter.cs b/sdk/dotnet/SchemaExporter.cs new file mode 100644 index 00000000..6ee14744 --- /dev/null +++ b/sdk/dotnet/SchemaExporter.cs @@ -0,0 +1,331 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.ConfluentCloud +{ + /// + /// ## Import + /// + /// You can import a Schema Exporter by using the Schema Registry cluster ID, Schema Exporter name in the format `<Schema Registry cluster ID>/<Schema Exporter name>`, for example$ export IMPORT_SCHEMA_REGISTRY_API_KEY="<schema_registry_api_key>" $ export IMPORT_SCHEMA_REGISTRY_API_SECRET="<schema_registry_api_secret>" $ export IMPORT_SCHEMA_REGISTRY_REST_ENDPOINT="<schema_registry_rest_endpoint>" + /// + /// ```sh + /// $ pulumi import confluentcloud:index/schemaExporter:SchemaExporter main lsrc-8wrx70/test-exporter + /// ``` + /// + /// !> **Warning:** Do not forget to delete terminal command history afterwards for security purposes. + /// + [ConfluentCloudResourceType("confluentcloud:index/schemaExporter:SchemaExporter")] + public partial class SchemaExporter : global::Pulumi.CustomResource + { + /// + /// Block for custom *nonsensitive* configuration properties: + /// + [Output("config")] + public Output> Config { get; private set; } = null!; + + /// + /// Customized context of the exporter if `context_type` is set to `CUSTOM`. + /// + [Output("context")] + public Output Context { get; private set; } = null!; + + /// + /// Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + /// + [Output("contextType")] + public Output ContextType { get; private set; } = null!; + + /// + /// The Cluster API Credentials. + /// + [Output("credentials")] + public Output Credentials { get; private set; } = null!; + + [Output("destinationSchemaRegistryCluster")] + public Output DestinationSchemaRegistryCluster { get; private set; } = null!; + + /// + /// The configuration setting name. + /// + [Output("name")] + public Output Name { get; private set; } = null!; + + /// + /// The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + /// + [Output("resetOnUpdate")] + public Output ResetOnUpdate { get; private set; } = null!; + + /// + /// The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + /// + [Output("restEndpoint")] + public Output RestEndpoint { get; private set; } = null!; + + [Output("schemaRegistryCluster")] + public Output SchemaRegistryCluster { get; private set; } = null!; + + /// + /// The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + /// + [Output("status")] + public Output Status { get; private set; } = null!; + + /// + /// Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`. + /// + [Output("subjectRenameFormat")] + public Output SubjectRenameFormat { get; private set; } = null!; + + /// + /// Name of each exporter subject. + /// + [Output("subjects")] + public Output> Subjects { get; private set; } = null!; + + + /// + /// Create a SchemaExporter resource with the given unique name, arguments, and options. + /// + /// + /// The unique name of the resource + /// The arguments used to populate this resource's properties + /// A bag of options that control this resource's behavior + public SchemaExporter(string name, SchemaExporterArgs args, CustomResourceOptions? options = null) + : base("confluentcloud:index/schemaExporter:SchemaExporter", name, args ?? new SchemaExporterArgs(), MakeResourceOptions(options, "")) + { + } + + private SchemaExporter(string name, Input id, SchemaExporterState? state = null, CustomResourceOptions? options = null) + : base("confluentcloud:index/schemaExporter:SchemaExporter", name, state, MakeResourceOptions(options, id)) + { + } + + private static CustomResourceOptions MakeResourceOptions(CustomResourceOptions? options, Input? id) + { + var defaultOptions = new CustomResourceOptions + { + Version = Utilities.Version, + AdditionalSecretOutputs = + { + "credentials", + }, + }; + var merged = CustomResourceOptions.Merge(defaultOptions, options); + // Override the ID if one was specified for consistency with other language SDKs. + merged.Id = id ?? merged.Id; + return merged; + } + /// + /// Get an existing SchemaExporter resource's state with the given name, ID, and optional extra + /// properties used to qualify the lookup. + /// + /// + /// The unique name of the resulting resource. + /// The unique provider ID of the resource to lookup. + /// Any extra arguments used during the lookup. + /// A bag of options that control this resource's behavior + public static SchemaExporter Get(string name, Input id, SchemaExporterState? state = null, CustomResourceOptions? options = null) + { + return new SchemaExporter(name, id, state, options); + } + } + + public sealed class SchemaExporterArgs : global::Pulumi.ResourceArgs + { + [Input("config")] + private InputMap? _config; + + /// + /// Block for custom *nonsensitive* configuration properties: + /// + public InputMap Config + { + get => _config ?? (_config = new InputMap()); + set => _config = value; + } + + /// + /// Customized context of the exporter if `context_type` is set to `CUSTOM`. + /// + [Input("context")] + public Input? Context { get; set; } + + /// + /// Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + /// + [Input("contextType")] + public Input? ContextType { get; set; } + + [Input("credentials")] + private Input? _credentials; + + /// + /// The Cluster API Credentials. + /// + public Input? Credentials + { + get => _credentials; + set + { + var emptySecret = Output.CreateSecret(0); + _credentials = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1); + } + } + + [Input("destinationSchemaRegistryCluster", required: true)] + public Input DestinationSchemaRegistryCluster { get; set; } = null!; + + /// + /// The configuration setting name. + /// + [Input("name")] + public Input? Name { get; set; } + + /// + /// The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + /// + [Input("resetOnUpdate")] + public Input? ResetOnUpdate { get; set; } + + /// + /// The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + /// + [Input("restEndpoint")] + public Input? RestEndpoint { get; set; } + + [Input("schemaRegistryCluster")] + public Input? SchemaRegistryCluster { get; set; } + + /// + /// The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + /// + [Input("status")] + public Input? Status { get; set; } + + /// + /// Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`. + /// + [Input("subjectRenameFormat")] + public Input? SubjectRenameFormat { get; set; } + + [Input("subjects")] + private InputList? _subjects; + + /// + /// Name of each exporter subject. + /// + public InputList Subjects + { + get => _subjects ?? (_subjects = new InputList()); + set => _subjects = value; + } + + public SchemaExporterArgs() + { + } + public static new SchemaExporterArgs Empty => new SchemaExporterArgs(); + } + + public sealed class SchemaExporterState : global::Pulumi.ResourceArgs + { + [Input("config")] + private InputMap? _config; + + /// + /// Block for custom *nonsensitive* configuration properties: + /// + public InputMap Config + { + get => _config ?? (_config = new InputMap()); + set => _config = value; + } + + /// + /// Customized context of the exporter if `context_type` is set to `CUSTOM`. + /// + [Input("context")] + public Input? Context { get; set; } + + /// + /// Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + /// + [Input("contextType")] + public Input? ContextType { get; set; } + + [Input("credentials")] + private Input? _credentials; + + /// + /// The Cluster API Credentials. + /// + public Input? Credentials + { + get => _credentials; + set + { + var emptySecret = Output.CreateSecret(0); + _credentials = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1); + } + } + + [Input("destinationSchemaRegistryCluster")] + public Input? DestinationSchemaRegistryCluster { get; set; } + + /// + /// The configuration setting name. + /// + [Input("name")] + public Input? Name { get; set; } + + /// + /// The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + /// + [Input("resetOnUpdate")] + public Input? ResetOnUpdate { get; set; } + + /// + /// The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + /// + [Input("restEndpoint")] + public Input? RestEndpoint { get; set; } + + [Input("schemaRegistryCluster")] + public Input? SchemaRegistryCluster { get; set; } + + /// + /// The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + /// + [Input("status")] + public Input? Status { get; set; } + + /// + /// Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`. + /// + [Input("subjectRenameFormat")] + public Input? SubjectRenameFormat { get; set; } + + [Input("subjects")] + private InputList? _subjects; + + /// + /// Name of each exporter subject. + /// + public InputList Subjects + { + get => _subjects ?? (_subjects = new InputList()); + set => _subjects = value; + } + + public SchemaExporterState() + { + } + public static new SchemaExporterState Empty => new SchemaExporterState(); + } +} diff --git a/sdk/go/confluentcloud/flinkComputePool.go b/sdk/go/confluentcloud/flinkComputePool.go new file mode 100644 index 00000000..3f7bad18 --- /dev/null +++ b/sdk/go/confluentcloud/flinkComputePool.go @@ -0,0 +1,426 @@ +// Code generated by the Pulumi Terraform Bridge (tfgen) Tool DO NOT EDIT. +// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! *** + +package confluentcloud + +import ( + "context" + "reflect" + + "errors" + "github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud/internal" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" + "github.com/pulumi/pulumi/sdk/v3/go/pulumix" +) + +// ## Example Usage +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// development, err := confluentcloud.NewEnvironment(ctx, "development", nil) +// if err != nil { +// return err +// } +// _, err = confluentcloud.NewFlinkComputePool(ctx, "main", &confluentcloud.FlinkComputePoolArgs{ +// DisplayName: pulumi.String("standard_compute_pool"), +// Cloud: pulumi.String("AWS"), +// Region: pulumi.String("us-east-1"), +// MaxCfu: pulumi.Int(5), +// Environment: &confluentcloud.FlinkComputePoolEnvironmentArgs{ +// Id: development.ID(), +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// +// ## Import +// +// You can import a Flink Compute Pool by using Environment ID and Flink Compute Pool ID, in the format `/`. The following example shows how to import a Flink Compute Pool$ export CONFLUENT_CLOUD_API_KEY="" $ export CONFLUENT_CLOUD_API_SECRET="" +// +// ```sh +// +// $ pulumi import confluentcloud:index/flinkComputePool:FlinkComputePool main env-abc123/lfcp-abc123 +// +// ``` +// +// !> **Warning:** Do not forget to delete terminal command history afterwards for security purposes. +type FlinkComputePool struct { + pulumi.CustomResourceState + + // (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`. + ApiVersion pulumi.StringOutput `pulumi:"apiVersion"` + // The cloud service provider that runs the Flink Compute Pool. + Cloud pulumi.StringOutput `pulumi:"cloud"` + // (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool. + CurrentCfu pulumi.IntOutput `pulumi:"currentCfu"` + // The name of the Flink Compute Pool. + DisplayName pulumi.StringOutput `pulumi:"displayName"` + // Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + Environment FlinkComputePoolEnvironmentOutput `pulumi:"environment"` + // (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`. + Kind pulumi.StringOutput `pulumi:"kind"` + // Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + MaxCfu pulumi.IntOutput `pulumi:"maxCfu"` + // The cloud service provider region that hosts the Flink Compute Pool. + Region pulumi.StringOutput `pulumi:"region"` + // (Required String) The Confluent Resource Name of the Flink Compute Pool. + ResourceName pulumi.StringOutput `pulumi:"resourceName"` + // (Required String) The API endpoint of the Flink Compute Pool. + RestEndpoint pulumi.StringOutput `pulumi:"restEndpoint"` +} + +// NewFlinkComputePool registers a new resource with the given unique name, arguments, and options. +func NewFlinkComputePool(ctx *pulumi.Context, + name string, args *FlinkComputePoolArgs, opts ...pulumi.ResourceOption) (*FlinkComputePool, error) { + if args == nil { + return nil, errors.New("missing one or more required arguments") + } + + if args.Cloud == nil { + return nil, errors.New("invalid value for required argument 'Cloud'") + } + if args.DisplayName == nil { + return nil, errors.New("invalid value for required argument 'DisplayName'") + } + if args.Environment == nil { + return nil, errors.New("invalid value for required argument 'Environment'") + } + if args.Region == nil { + return nil, errors.New("invalid value for required argument 'Region'") + } + opts = internal.PkgResourceDefaultOpts(opts) + var resource FlinkComputePool + err := ctx.RegisterResource("confluentcloud:index/flinkComputePool:FlinkComputePool", name, args, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// GetFlinkComputePool gets an existing FlinkComputePool resource's state with the given name, ID, and optional +// state properties that are used to uniquely qualify the lookup (nil if not required). +func GetFlinkComputePool(ctx *pulumi.Context, + name string, id pulumi.IDInput, state *FlinkComputePoolState, opts ...pulumi.ResourceOption) (*FlinkComputePool, error) { + var resource FlinkComputePool + err := ctx.ReadResource("confluentcloud:index/flinkComputePool:FlinkComputePool", name, id, state, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// Input properties used for looking up and filtering FlinkComputePool resources. +type flinkComputePoolState struct { + // (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`. + ApiVersion *string `pulumi:"apiVersion"` + // The cloud service provider that runs the Flink Compute Pool. + Cloud *string `pulumi:"cloud"` + // (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool. + CurrentCfu *int `pulumi:"currentCfu"` + // The name of the Flink Compute Pool. + DisplayName *string `pulumi:"displayName"` + // Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + Environment *FlinkComputePoolEnvironment `pulumi:"environment"` + // (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`. + Kind *string `pulumi:"kind"` + // Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + MaxCfu *int `pulumi:"maxCfu"` + // The cloud service provider region that hosts the Flink Compute Pool. + Region *string `pulumi:"region"` + // (Required String) The Confluent Resource Name of the Flink Compute Pool. + ResourceName *string `pulumi:"resourceName"` + // (Required String) The API endpoint of the Flink Compute Pool. + RestEndpoint *string `pulumi:"restEndpoint"` +} + +type FlinkComputePoolState struct { + // (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`. + ApiVersion pulumi.StringPtrInput + // The cloud service provider that runs the Flink Compute Pool. + Cloud pulumi.StringPtrInput + // (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool. + CurrentCfu pulumi.IntPtrInput + // The name of the Flink Compute Pool. + DisplayName pulumi.StringPtrInput + // Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + Environment FlinkComputePoolEnvironmentPtrInput + // (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`. + Kind pulumi.StringPtrInput + // Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + MaxCfu pulumi.IntPtrInput + // The cloud service provider region that hosts the Flink Compute Pool. + Region pulumi.StringPtrInput + // (Required String) The Confluent Resource Name of the Flink Compute Pool. + ResourceName pulumi.StringPtrInput + // (Required String) The API endpoint of the Flink Compute Pool. + RestEndpoint pulumi.StringPtrInput +} + +func (FlinkComputePoolState) ElementType() reflect.Type { + return reflect.TypeOf((*flinkComputePoolState)(nil)).Elem() +} + +type flinkComputePoolArgs struct { + // The cloud service provider that runs the Flink Compute Pool. + Cloud string `pulumi:"cloud"` + // The name of the Flink Compute Pool. + DisplayName string `pulumi:"displayName"` + // Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + Environment FlinkComputePoolEnvironment `pulumi:"environment"` + // Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + MaxCfu *int `pulumi:"maxCfu"` + // The cloud service provider region that hosts the Flink Compute Pool. + Region string `pulumi:"region"` +} + +// The set of arguments for constructing a FlinkComputePool resource. +type FlinkComputePoolArgs struct { + // The cloud service provider that runs the Flink Compute Pool. + Cloud pulumi.StringInput + // The name of the Flink Compute Pool. + DisplayName pulumi.StringInput + // Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + Environment FlinkComputePoolEnvironmentInput + // Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + MaxCfu pulumi.IntPtrInput + // The cloud service provider region that hosts the Flink Compute Pool. + Region pulumi.StringInput +} + +func (FlinkComputePoolArgs) ElementType() reflect.Type { + return reflect.TypeOf((*flinkComputePoolArgs)(nil)).Elem() +} + +type FlinkComputePoolInput interface { + pulumi.Input + + ToFlinkComputePoolOutput() FlinkComputePoolOutput + ToFlinkComputePoolOutputWithContext(ctx context.Context) FlinkComputePoolOutput +} + +func (*FlinkComputePool) ElementType() reflect.Type { + return reflect.TypeOf((**FlinkComputePool)(nil)).Elem() +} + +func (i *FlinkComputePool) ToFlinkComputePoolOutput() FlinkComputePoolOutput { + return i.ToFlinkComputePoolOutputWithContext(context.Background()) +} + +func (i *FlinkComputePool) ToFlinkComputePoolOutputWithContext(ctx context.Context) FlinkComputePoolOutput { + return pulumi.ToOutputWithContext(ctx, i).(FlinkComputePoolOutput) +} + +func (i *FlinkComputePool) ToOutput(ctx context.Context) pulumix.Output[*FlinkComputePool] { + return pulumix.Output[*FlinkComputePool]{ + OutputState: i.ToFlinkComputePoolOutputWithContext(ctx).OutputState, + } +} + +// FlinkComputePoolArrayInput is an input type that accepts FlinkComputePoolArray and FlinkComputePoolArrayOutput values. +// You can construct a concrete instance of `FlinkComputePoolArrayInput` via: +// +// FlinkComputePoolArray{ FlinkComputePoolArgs{...} } +type FlinkComputePoolArrayInput interface { + pulumi.Input + + ToFlinkComputePoolArrayOutput() FlinkComputePoolArrayOutput + ToFlinkComputePoolArrayOutputWithContext(context.Context) FlinkComputePoolArrayOutput +} + +type FlinkComputePoolArray []FlinkComputePoolInput + +func (FlinkComputePoolArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]*FlinkComputePool)(nil)).Elem() +} + +func (i FlinkComputePoolArray) ToFlinkComputePoolArrayOutput() FlinkComputePoolArrayOutput { + return i.ToFlinkComputePoolArrayOutputWithContext(context.Background()) +} + +func (i FlinkComputePoolArray) ToFlinkComputePoolArrayOutputWithContext(ctx context.Context) FlinkComputePoolArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(FlinkComputePoolArrayOutput) +} + +func (i FlinkComputePoolArray) ToOutput(ctx context.Context) pulumix.Output[[]*FlinkComputePool] { + return pulumix.Output[[]*FlinkComputePool]{ + OutputState: i.ToFlinkComputePoolArrayOutputWithContext(ctx).OutputState, + } +} + +// FlinkComputePoolMapInput is an input type that accepts FlinkComputePoolMap and FlinkComputePoolMapOutput values. +// You can construct a concrete instance of `FlinkComputePoolMapInput` via: +// +// FlinkComputePoolMap{ "key": FlinkComputePoolArgs{...} } +type FlinkComputePoolMapInput interface { + pulumi.Input + + ToFlinkComputePoolMapOutput() FlinkComputePoolMapOutput + ToFlinkComputePoolMapOutputWithContext(context.Context) FlinkComputePoolMapOutput +} + +type FlinkComputePoolMap map[string]FlinkComputePoolInput + +func (FlinkComputePoolMap) ElementType() reflect.Type { + return reflect.TypeOf((*map[string]*FlinkComputePool)(nil)).Elem() +} + +func (i FlinkComputePoolMap) ToFlinkComputePoolMapOutput() FlinkComputePoolMapOutput { + return i.ToFlinkComputePoolMapOutputWithContext(context.Background()) +} + +func (i FlinkComputePoolMap) ToFlinkComputePoolMapOutputWithContext(ctx context.Context) FlinkComputePoolMapOutput { + return pulumi.ToOutputWithContext(ctx, i).(FlinkComputePoolMapOutput) +} + +func (i FlinkComputePoolMap) ToOutput(ctx context.Context) pulumix.Output[map[string]*FlinkComputePool] { + return pulumix.Output[map[string]*FlinkComputePool]{ + OutputState: i.ToFlinkComputePoolMapOutputWithContext(ctx).OutputState, + } +} + +type FlinkComputePoolOutput struct{ *pulumi.OutputState } + +func (FlinkComputePoolOutput) ElementType() reflect.Type { + return reflect.TypeOf((**FlinkComputePool)(nil)).Elem() +} + +func (o FlinkComputePoolOutput) ToFlinkComputePoolOutput() FlinkComputePoolOutput { + return o +} + +func (o FlinkComputePoolOutput) ToFlinkComputePoolOutputWithContext(ctx context.Context) FlinkComputePoolOutput { + return o +} + +func (o FlinkComputePoolOutput) ToOutput(ctx context.Context) pulumix.Output[*FlinkComputePool] { + return pulumix.Output[*FlinkComputePool]{ + OutputState: o.OutputState, + } +} + +// (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`. +func (o FlinkComputePoolOutput) ApiVersion() pulumi.StringOutput { + return o.ApplyT(func(v *FlinkComputePool) pulumi.StringOutput { return v.ApiVersion }).(pulumi.StringOutput) +} + +// The cloud service provider that runs the Flink Compute Pool. +func (o FlinkComputePoolOutput) Cloud() pulumi.StringOutput { + return o.ApplyT(func(v *FlinkComputePool) pulumi.StringOutput { return v.Cloud }).(pulumi.StringOutput) +} + +// (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool. +func (o FlinkComputePoolOutput) CurrentCfu() pulumi.IntOutput { + return o.ApplyT(func(v *FlinkComputePool) pulumi.IntOutput { return v.CurrentCfu }).(pulumi.IntOutput) +} + +// The name of the Flink Compute Pool. +func (o FlinkComputePoolOutput) DisplayName() pulumi.StringOutput { + return o.ApplyT(func(v *FlinkComputePool) pulumi.StringOutput { return v.DisplayName }).(pulumi.StringOutput) +} + +// Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. +func (o FlinkComputePoolOutput) Environment() FlinkComputePoolEnvironmentOutput { + return o.ApplyT(func(v *FlinkComputePool) FlinkComputePoolEnvironmentOutput { return v.Environment }).(FlinkComputePoolEnvironmentOutput) +} + +// (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`. +func (o FlinkComputePoolOutput) Kind() pulumi.StringOutput { + return o.ApplyT(func(v *FlinkComputePool) pulumi.StringOutput { return v.Kind }).(pulumi.StringOutput) +} + +// Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. +func (o FlinkComputePoolOutput) MaxCfu() pulumi.IntOutput { + return o.ApplyT(func(v *FlinkComputePool) pulumi.IntOutput { return v.MaxCfu }).(pulumi.IntOutput) +} + +// The cloud service provider region that hosts the Flink Compute Pool. +func (o FlinkComputePoolOutput) Region() pulumi.StringOutput { + return o.ApplyT(func(v *FlinkComputePool) pulumi.StringOutput { return v.Region }).(pulumi.StringOutput) +} + +// (Required String) The Confluent Resource Name of the Flink Compute Pool. +func (o FlinkComputePoolOutput) ResourceName() pulumi.StringOutput { + return o.ApplyT(func(v *FlinkComputePool) pulumi.StringOutput { return v.ResourceName }).(pulumi.StringOutput) +} + +// (Required String) The API endpoint of the Flink Compute Pool. +func (o FlinkComputePoolOutput) RestEndpoint() pulumi.StringOutput { + return o.ApplyT(func(v *FlinkComputePool) pulumi.StringOutput { return v.RestEndpoint }).(pulumi.StringOutput) +} + +type FlinkComputePoolArrayOutput struct{ *pulumi.OutputState } + +func (FlinkComputePoolArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]*FlinkComputePool)(nil)).Elem() +} + +func (o FlinkComputePoolArrayOutput) ToFlinkComputePoolArrayOutput() FlinkComputePoolArrayOutput { + return o +} + +func (o FlinkComputePoolArrayOutput) ToFlinkComputePoolArrayOutputWithContext(ctx context.Context) FlinkComputePoolArrayOutput { + return o +} + +func (o FlinkComputePoolArrayOutput) ToOutput(ctx context.Context) pulumix.Output[[]*FlinkComputePool] { + return pulumix.Output[[]*FlinkComputePool]{ + OutputState: o.OutputState, + } +} + +func (o FlinkComputePoolArrayOutput) Index(i pulumi.IntInput) FlinkComputePoolOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) *FlinkComputePool { + return vs[0].([]*FlinkComputePool)[vs[1].(int)] + }).(FlinkComputePoolOutput) +} + +type FlinkComputePoolMapOutput struct{ *pulumi.OutputState } + +func (FlinkComputePoolMapOutput) ElementType() reflect.Type { + return reflect.TypeOf((*map[string]*FlinkComputePool)(nil)).Elem() +} + +func (o FlinkComputePoolMapOutput) ToFlinkComputePoolMapOutput() FlinkComputePoolMapOutput { + return o +} + +func (o FlinkComputePoolMapOutput) ToFlinkComputePoolMapOutputWithContext(ctx context.Context) FlinkComputePoolMapOutput { + return o +} + +func (o FlinkComputePoolMapOutput) ToOutput(ctx context.Context) pulumix.Output[map[string]*FlinkComputePool] { + return pulumix.Output[map[string]*FlinkComputePool]{ + OutputState: o.OutputState, + } +} + +func (o FlinkComputePoolMapOutput) MapIndex(k pulumi.StringInput) FlinkComputePoolOutput { + return pulumi.All(o, k).ApplyT(func(vs []interface{}) *FlinkComputePool { + return vs[0].(map[string]*FlinkComputePool)[vs[1].(string)] + }).(FlinkComputePoolOutput) +} + +func init() { + pulumi.RegisterInputType(reflect.TypeOf((*FlinkComputePoolInput)(nil)).Elem(), &FlinkComputePool{}) + pulumi.RegisterInputType(reflect.TypeOf((*FlinkComputePoolArrayInput)(nil)).Elem(), FlinkComputePoolArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*FlinkComputePoolMapInput)(nil)).Elem(), FlinkComputePoolMap{}) + pulumi.RegisterOutputType(FlinkComputePoolOutput{}) + pulumi.RegisterOutputType(FlinkComputePoolArrayOutput{}) + pulumi.RegisterOutputType(FlinkComputePoolMapOutput{}) +} diff --git a/sdk/go/confluentcloud/getFlinkComputePool.go b/sdk/go/confluentcloud/getFlinkComputePool.go new file mode 100644 index 00000000..40f8ed2f --- /dev/null +++ b/sdk/go/confluentcloud/getFlinkComputePool.go @@ -0,0 +1,216 @@ +// Code generated by the Pulumi Terraform Bridge (tfgen) Tool DO NOT EDIT. +// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! *** + +package confluentcloud + +import ( + "context" + "reflect" + + "github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud/internal" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" + "github.com/pulumi/pulumi/sdk/v3/go/pulumix" +) + +// [![Early Access](https://img.shields.io/badge/Lifecycle%20Stage-Early%20Access-%2300afba)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy) +// +// > **Note:** `FlinkComputePool` data source is available in **Early Access** for early adopters. Early Access features are introduced to gather customer feedback. This feature should be used only for evaluation and non-production testing purposes or to provide feedback to Confluent, particularly as it becomes more widely available in follow-on editions.\ +// **Early Access** features are intended for evaluation use in development and testing environments only, and not for production use. The warranty, SLA, and Support Services provisions of your agreement with Confluent do not apply to Early Access features. Early Access features are considered to be a Proof of Concept as defined in the Confluent Cloud Terms of Service. Confluent may discontinue providing early access releases of the Early Access features at any time in Confluent’s sole discretion. +// +// `FlinkComputePool` describes a Flink Compute Pool data source. +// +// ## Example Usage +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// exampleUsingIdFlinkComputePool, err := confluentcloud.LookupFlinkComputePool(ctx, &confluentcloud.LookupFlinkComputePoolArgs{ +// Id: pulumi.StringRef("lfcp-abc123"), +// Environment: confluentcloud.GetFlinkComputePoolEnvironment{ +// Id: "env-xyz456", +// }, +// }, nil) +// if err != nil { +// return err +// } +// ctx.Export("exampleUsingId", exampleUsingIdFlinkComputePool) +// exampleUsingNameFlinkComputePool, err := confluentcloud.LookupFlinkComputePool(ctx, &confluentcloud.LookupFlinkComputePoolArgs{ +// DisplayName: pulumi.StringRef("my_compute_pool"), +// Environment: confluentcloud.GetFlinkComputePoolEnvironment{ +// Id: "env-xyz456", +// }, +// }, nil) +// if err != nil { +// return err +// } +// ctx.Export("exampleUsingName", exampleUsingNameFlinkComputePool) +// return nil +// }) +// } +// +// ``` +func LookupFlinkComputePool(ctx *pulumi.Context, args *LookupFlinkComputePoolArgs, opts ...pulumi.InvokeOption) (*LookupFlinkComputePoolResult, error) { + opts = internal.PkgInvokeDefaultOpts(opts) + var rv LookupFlinkComputePoolResult + err := ctx.Invoke("confluentcloud:index/getFlinkComputePool:getFlinkComputePool", args, &rv, opts...) + if err != nil { + return nil, err + } + return &rv, nil +} + +// A collection of arguments for invoking getFlinkComputePool. +type LookupFlinkComputePoolArgs struct { + // A human-readable name for the Flink Compute Pool. + DisplayName *string `pulumi:"displayName"` + // (Required Configuration Block) supports the following: + Environment GetFlinkComputePoolEnvironment `pulumi:"environment"` + // The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + // + // > **Note:** Exactly one from the `id` and `displayName` attributes must be specified. + Id *string `pulumi:"id"` +} + +// A collection of values returned by getFlinkComputePool. +type LookupFlinkComputePoolResult struct { + // (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`. + ApiVersion string `pulumi:"apiVersion"` + // (Required String) The cloud service provider that runs the Flink Compute Pool. + Cloud string `pulumi:"cloud"` + // (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool. + CurrentCfu int `pulumi:"currentCfu"` + // (Required String) The name of the Flink Compute Pool. + DisplayName string `pulumi:"displayName"` + // (Required Configuration Block) supports the following: + Environment GetFlinkComputePoolEnvironment `pulumi:"environment"` + // (Required String) The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`. + Id string `pulumi:"id"` + // (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`. + Kind string `pulumi:"kind"` + // (Required Integer) Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. + MaxCfu int `pulumi:"maxCfu"` + // (Required String) The cloud service provider region that hosts the Flink Compute Pool. + Region string `pulumi:"region"` + // (Required String) The Confluent Resource Name of the Flink Compute Pool. + ResourceName string `pulumi:"resourceName"` + // (Required String) The API endpoint of the Flink Compute Pool. + RestEndpoint string `pulumi:"restEndpoint"` +} + +func LookupFlinkComputePoolOutput(ctx *pulumi.Context, args LookupFlinkComputePoolOutputArgs, opts ...pulumi.InvokeOption) LookupFlinkComputePoolResultOutput { + return pulumi.ToOutputWithContext(context.Background(), args). + ApplyT(func(v interface{}) (LookupFlinkComputePoolResult, error) { + args := v.(LookupFlinkComputePoolArgs) + r, err := LookupFlinkComputePool(ctx, &args, opts...) + var s LookupFlinkComputePoolResult + if r != nil { + s = *r + } + return s, err + }).(LookupFlinkComputePoolResultOutput) +} + +// A collection of arguments for invoking getFlinkComputePool. +type LookupFlinkComputePoolOutputArgs struct { + // A human-readable name for the Flink Compute Pool. + DisplayName pulumi.StringPtrInput `pulumi:"displayName"` + // (Required Configuration Block) supports the following: + Environment GetFlinkComputePoolEnvironmentInput `pulumi:"environment"` + // The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + // + // > **Note:** Exactly one from the `id` and `displayName` attributes must be specified. + Id pulumi.StringPtrInput `pulumi:"id"` +} + +func (LookupFlinkComputePoolOutputArgs) ElementType() reflect.Type { + return reflect.TypeOf((*LookupFlinkComputePoolArgs)(nil)).Elem() +} + +// A collection of values returned by getFlinkComputePool. +type LookupFlinkComputePoolResultOutput struct{ *pulumi.OutputState } + +func (LookupFlinkComputePoolResultOutput) ElementType() reflect.Type { + return reflect.TypeOf((*LookupFlinkComputePoolResult)(nil)).Elem() +} + +func (o LookupFlinkComputePoolResultOutput) ToLookupFlinkComputePoolResultOutput() LookupFlinkComputePoolResultOutput { + return o +} + +func (o LookupFlinkComputePoolResultOutput) ToLookupFlinkComputePoolResultOutputWithContext(ctx context.Context) LookupFlinkComputePoolResultOutput { + return o +} + +func (o LookupFlinkComputePoolResultOutput) ToOutput(ctx context.Context) pulumix.Output[LookupFlinkComputePoolResult] { + return pulumix.Output[LookupFlinkComputePoolResult]{ + OutputState: o.OutputState, + } +} + +// (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`. +func (o LookupFlinkComputePoolResultOutput) ApiVersion() pulumi.StringOutput { + return o.ApplyT(func(v LookupFlinkComputePoolResult) string { return v.ApiVersion }).(pulumi.StringOutput) +} + +// (Required String) The cloud service provider that runs the Flink Compute Pool. +func (o LookupFlinkComputePoolResultOutput) Cloud() pulumi.StringOutput { + return o.ApplyT(func(v LookupFlinkComputePoolResult) string { return v.Cloud }).(pulumi.StringOutput) +} + +// (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool. +func (o LookupFlinkComputePoolResultOutput) CurrentCfu() pulumi.IntOutput { + return o.ApplyT(func(v LookupFlinkComputePoolResult) int { return v.CurrentCfu }).(pulumi.IntOutput) +} + +// (Required String) The name of the Flink Compute Pool. +func (o LookupFlinkComputePoolResultOutput) DisplayName() pulumi.StringOutput { + return o.ApplyT(func(v LookupFlinkComputePoolResult) string { return v.DisplayName }).(pulumi.StringOutput) +} + +// (Required Configuration Block) supports the following: +func (o LookupFlinkComputePoolResultOutput) Environment() GetFlinkComputePoolEnvironmentOutput { + return o.ApplyT(func(v LookupFlinkComputePoolResult) GetFlinkComputePoolEnvironment { return v.Environment }).(GetFlinkComputePoolEnvironmentOutput) +} + +// (Required String) The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`. +func (o LookupFlinkComputePoolResultOutput) Id() pulumi.StringOutput { + return o.ApplyT(func(v LookupFlinkComputePoolResult) string { return v.Id }).(pulumi.StringOutput) +} + +// (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`. +func (o LookupFlinkComputePoolResultOutput) Kind() pulumi.StringOutput { + return o.ApplyT(func(v LookupFlinkComputePoolResult) string { return v.Kind }).(pulumi.StringOutput) +} + +// (Required Integer) Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. +func (o LookupFlinkComputePoolResultOutput) MaxCfu() pulumi.IntOutput { + return o.ApplyT(func(v LookupFlinkComputePoolResult) int { return v.MaxCfu }).(pulumi.IntOutput) +} + +// (Required String) The cloud service provider region that hosts the Flink Compute Pool. +func (o LookupFlinkComputePoolResultOutput) Region() pulumi.StringOutput { + return o.ApplyT(func(v LookupFlinkComputePoolResult) string { return v.Region }).(pulumi.StringOutput) +} + +// (Required String) The Confluent Resource Name of the Flink Compute Pool. +func (o LookupFlinkComputePoolResultOutput) ResourceName() pulumi.StringOutput { + return o.ApplyT(func(v LookupFlinkComputePoolResult) string { return v.ResourceName }).(pulumi.StringOutput) +} + +// (Required String) The API endpoint of the Flink Compute Pool. +func (o LookupFlinkComputePoolResultOutput) RestEndpoint() pulumi.StringOutput { + return o.ApplyT(func(v LookupFlinkComputePoolResult) string { return v.RestEndpoint }).(pulumi.StringOutput) +} + +func init() { + pulumi.RegisterOutputType(LookupFlinkComputePoolResultOutput{}) +} diff --git a/sdk/go/confluentcloud/getRoleBinding.go b/sdk/go/confluentcloud/getRoleBinding.go index fec23d2b..537355b6 100644 --- a/sdk/go/confluentcloud/getRoleBinding.go +++ b/sdk/go/confluentcloud/getRoleBinding.go @@ -16,6 +16,8 @@ import ( // // `RoleBinding` describes a Role Binding. // +// > **Note:** For more information on the Role Bindings, see [Predefined RBAC roles in Confluent Cloud](https://docs.confluent.io/cloud/current/access-management/access-control/rbac/predefined-rbac-roles.html). +// // ## Example Usage // // ```go diff --git a/sdk/go/confluentcloud/init.go b/sdk/go/confluentcloud/init.go index f5401572..c766e361 100644 --- a/sdk/go/confluentcloud/init.go +++ b/sdk/go/confluentcloud/init.go @@ -35,6 +35,8 @@ func (m *module) Construct(ctx *pulumi.Context, name, typ, urn string) (r pulumi r = &Connector{} case "confluentcloud:index/environment:Environment": r = &Environment{} + case "confluentcloud:index/flinkComputePool:FlinkComputePool": + r = &FlinkComputePool{} case "confluentcloud:index/identityPool:IdentityPool": r = &IdentityPool{} case "confluentcloud:index/identityProvider:IdentityProvider": @@ -73,6 +75,8 @@ func (m *module) Construct(ctx *pulumi.Context, name, typ, urn string) (r pulumi r = &RoleBinding{} case "confluentcloud:index/schema:Schema": r = &Schema{} + case "confluentcloud:index/schemaExporter:SchemaExporter": + r = &SchemaExporter{} case "confluentcloud:index/schemaRegistryCluster:SchemaRegistryCluster": r = &SchemaRegistryCluster{} case "confluentcloud:index/schemaRegistryClusterConfig:SchemaRegistryClusterConfig": @@ -159,6 +163,11 @@ func init() { "index/environment", &module{version}, ) + pulumi.RegisterResourceModule( + "confluentcloud", + "index/flinkComputePool", + &module{version}, + ) pulumi.RegisterResourceModule( "confluentcloud", "index/identityPool", @@ -254,6 +263,11 @@ func init() { "index/schema", &module{version}, ) + pulumi.RegisterResourceModule( + "confluentcloud", + "index/schemaExporter", + &module{version}, + ) pulumi.RegisterResourceModule( "confluentcloud", "index/schemaRegistryCluster", diff --git a/sdk/go/confluentcloud/kafkaAcl.go b/sdk/go/confluentcloud/kafkaAcl.go index 96216927..3db16ab8 100644 --- a/sdk/go/confluentcloud/kafkaAcl.go +++ b/sdk/go/confluentcloud/kafkaAcl.go @@ -15,7 +15,7 @@ import ( // ## Import // -// You can import Kafka ACLs by using the Kafka cluster ID and attributes of `confluent_kafka_acl` resource in the format `/######`, for exampleOption #1Manage multiple Kafka clusters in the same Terraform workspace $ export CONFLUENT_CLOUD_API_KEY="" $ export CONFLUENT_CLOUD_API_SECRET="" $ export IMPORT_KAFKA_API_KEY="" $ export IMPORT_KAFKA_API_SECRET="" $ export IMPORT_KAFKA_REST_ENDPOINT="" +// You can import Kafka ACLs by using the Kafka cluster ID and attributes of `confluent_kafka_acl` resource in the format `/######`, for exampleOption #1Manage multiple Kafka clusters in the same Terraform workspace $ export IMPORT_KAFKA_API_KEY="" $ export IMPORT_KAFKA_API_SECRET="" $ export IMPORT_KAFKA_REST_ENDPOINT="" // // ```sh // diff --git a/sdk/go/confluentcloud/pulumiTypes.go b/sdk/go/confluentcloud/pulumiTypes.go index 82ed34b3..c567d414 100644 --- a/sdk/go/confluentcloud/pulumiTypes.go +++ b/sdk/go/confluentcloud/pulumiTypes.go @@ -2050,8 +2050,6 @@ type ClusterLinkDestinationKafkaClusterCredentials struct { // The Kafka API Key. Key string `pulumi:"key"` // The Kafka API Secret. - // - // > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). Secret string `pulumi:"secret"` } @@ -2070,8 +2068,6 @@ type ClusterLinkDestinationKafkaClusterCredentialsArgs struct { // The Kafka API Key. Key pulumi.StringInput `pulumi:"key"` // The Kafka API Secret. - // - // > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). Secret pulumi.StringInput `pulumi:"secret"` } @@ -2176,8 +2172,6 @@ func (o ClusterLinkDestinationKafkaClusterCredentialsOutput) Key() pulumi.String } // The Kafka API Secret. -// -// > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). func (o ClusterLinkDestinationKafkaClusterCredentialsOutput) Secret() pulumi.StringOutput { return o.ApplyT(func(v ClusterLinkDestinationKafkaClusterCredentials) string { return v.Secret }).(pulumi.StringOutput) } @@ -2223,8 +2217,6 @@ func (o ClusterLinkDestinationKafkaClusterCredentialsPtrOutput) Key() pulumi.Str } // The Kafka API Secret. -// -// > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). func (o ClusterLinkDestinationKafkaClusterCredentialsPtrOutput) Secret() pulumi.StringPtrOutput { return o.ApplyT(func(v *ClusterLinkDestinationKafkaClusterCredentials) *string { if v == nil { @@ -2452,8 +2444,6 @@ type ClusterLinkLocalKafkaClusterCredentials struct { // The Kafka API Key. Key string `pulumi:"key"` // The Kafka API Secret. - // - // > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). Secret string `pulumi:"secret"` } @@ -2472,8 +2462,6 @@ type ClusterLinkLocalKafkaClusterCredentialsArgs struct { // The Kafka API Key. Key pulumi.StringInput `pulumi:"key"` // The Kafka API Secret. - // - // > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). Secret pulumi.StringInput `pulumi:"secret"` } @@ -2578,8 +2566,6 @@ func (o ClusterLinkLocalKafkaClusterCredentialsOutput) Key() pulumi.StringOutput } // The Kafka API Secret. -// -// > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). func (o ClusterLinkLocalKafkaClusterCredentialsOutput) Secret() pulumi.StringOutput { return o.ApplyT(func(v ClusterLinkLocalKafkaClusterCredentials) string { return v.Secret }).(pulumi.StringOutput) } @@ -2625,8 +2611,6 @@ func (o ClusterLinkLocalKafkaClusterCredentialsPtrOutput) Key() pulumi.StringPtr } // The Kafka API Secret. -// -// > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). func (o ClusterLinkLocalKafkaClusterCredentialsPtrOutput) Secret() pulumi.StringPtrOutput { return o.ApplyT(func(v *ClusterLinkLocalKafkaClusterCredentials) *string { if v == nil { @@ -2854,8 +2838,6 @@ type ClusterLinkRemoteKafkaClusterCredentials struct { // The Kafka API Key. Key string `pulumi:"key"` // The Kafka API Secret. - // - // > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). Secret string `pulumi:"secret"` } @@ -2874,8 +2856,6 @@ type ClusterLinkRemoteKafkaClusterCredentialsArgs struct { // The Kafka API Key. Key pulumi.StringInput `pulumi:"key"` // The Kafka API Secret. - // - // > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). Secret pulumi.StringInput `pulumi:"secret"` } @@ -2980,8 +2960,6 @@ func (o ClusterLinkRemoteKafkaClusterCredentialsOutput) Key() pulumi.StringOutpu } // The Kafka API Secret. -// -// > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). func (o ClusterLinkRemoteKafkaClusterCredentialsOutput) Secret() pulumi.StringOutput { return o.ApplyT(func(v ClusterLinkRemoteKafkaClusterCredentials) string { return v.Secret }).(pulumi.StringOutput) } @@ -3027,8 +3005,6 @@ func (o ClusterLinkRemoteKafkaClusterCredentialsPtrOutput) Key() pulumi.StringPt } // The Kafka API Secret. -// -// > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). func (o ClusterLinkRemoteKafkaClusterCredentialsPtrOutput) Secret() pulumi.StringPtrOutput { return o.ApplyT(func(v *ClusterLinkRemoteKafkaClusterCredentials) *string { if v == nil { @@ -3256,8 +3232,6 @@ type ClusterLinkSourceKafkaClusterCredentials struct { // The Kafka API Key. Key string `pulumi:"key"` // The Kafka API Secret. - // - // > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). Secret string `pulumi:"secret"` } @@ -3276,8 +3250,6 @@ type ClusterLinkSourceKafkaClusterCredentialsArgs struct { // The Kafka API Key. Key pulumi.StringInput `pulumi:"key"` // The Kafka API Secret. - // - // > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). Secret pulumi.StringInput `pulumi:"secret"` } @@ -3382,8 +3354,6 @@ func (o ClusterLinkSourceKafkaClusterCredentialsOutput) Key() pulumi.StringOutpu } // The Kafka API Secret. -// -// > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). func (o ClusterLinkSourceKafkaClusterCredentialsOutput) Secret() pulumi.StringOutput { return o.ApplyT(func(v ClusterLinkSourceKafkaClusterCredentials) string { return v.Secret }).(pulumi.StringOutput) } @@ -3429,8 +3399,6 @@ func (o ClusterLinkSourceKafkaClusterCredentialsPtrOutput) Key() pulumi.StringPt } // The Kafka API Secret. -// -// > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). func (o ClusterLinkSourceKafkaClusterCredentialsPtrOutput) Secret() pulumi.StringPtrOutput { return o.ApplyT(func(v *ClusterLinkSourceKafkaClusterCredentials) *string { if v == nil { @@ -3762,6 +3730,167 @@ func (o ConnectorKafkaClusterPtrOutput) Id() pulumi.StringPtrOutput { }).(pulumi.StringPtrOutput) } +type FlinkComputePoolEnvironment struct { + // The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`. + Id string `pulumi:"id"` +} + +// FlinkComputePoolEnvironmentInput is an input type that accepts FlinkComputePoolEnvironmentArgs and FlinkComputePoolEnvironmentOutput values. +// You can construct a concrete instance of `FlinkComputePoolEnvironmentInput` via: +// +// FlinkComputePoolEnvironmentArgs{...} +type FlinkComputePoolEnvironmentInput interface { + pulumi.Input + + ToFlinkComputePoolEnvironmentOutput() FlinkComputePoolEnvironmentOutput + ToFlinkComputePoolEnvironmentOutputWithContext(context.Context) FlinkComputePoolEnvironmentOutput +} + +type FlinkComputePoolEnvironmentArgs struct { + // The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`. + Id pulumi.StringInput `pulumi:"id"` +} + +func (FlinkComputePoolEnvironmentArgs) ElementType() reflect.Type { + return reflect.TypeOf((*FlinkComputePoolEnvironment)(nil)).Elem() +} + +func (i FlinkComputePoolEnvironmentArgs) ToFlinkComputePoolEnvironmentOutput() FlinkComputePoolEnvironmentOutput { + return i.ToFlinkComputePoolEnvironmentOutputWithContext(context.Background()) +} + +func (i FlinkComputePoolEnvironmentArgs) ToFlinkComputePoolEnvironmentOutputWithContext(ctx context.Context) FlinkComputePoolEnvironmentOutput { + return pulumi.ToOutputWithContext(ctx, i).(FlinkComputePoolEnvironmentOutput) +} + +func (i FlinkComputePoolEnvironmentArgs) ToOutput(ctx context.Context) pulumix.Output[FlinkComputePoolEnvironment] { + return pulumix.Output[FlinkComputePoolEnvironment]{ + OutputState: i.ToFlinkComputePoolEnvironmentOutputWithContext(ctx).OutputState, + } +} + +func (i FlinkComputePoolEnvironmentArgs) ToFlinkComputePoolEnvironmentPtrOutput() FlinkComputePoolEnvironmentPtrOutput { + return i.ToFlinkComputePoolEnvironmentPtrOutputWithContext(context.Background()) +} + +func (i FlinkComputePoolEnvironmentArgs) ToFlinkComputePoolEnvironmentPtrOutputWithContext(ctx context.Context) FlinkComputePoolEnvironmentPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(FlinkComputePoolEnvironmentOutput).ToFlinkComputePoolEnvironmentPtrOutputWithContext(ctx) +} + +// FlinkComputePoolEnvironmentPtrInput is an input type that accepts FlinkComputePoolEnvironmentArgs, FlinkComputePoolEnvironmentPtr and FlinkComputePoolEnvironmentPtrOutput values. +// You can construct a concrete instance of `FlinkComputePoolEnvironmentPtrInput` via: +// +// FlinkComputePoolEnvironmentArgs{...} +// +// or: +// +// nil +type FlinkComputePoolEnvironmentPtrInput interface { + pulumi.Input + + ToFlinkComputePoolEnvironmentPtrOutput() FlinkComputePoolEnvironmentPtrOutput + ToFlinkComputePoolEnvironmentPtrOutputWithContext(context.Context) FlinkComputePoolEnvironmentPtrOutput +} + +type flinkComputePoolEnvironmentPtrType FlinkComputePoolEnvironmentArgs + +func FlinkComputePoolEnvironmentPtr(v *FlinkComputePoolEnvironmentArgs) FlinkComputePoolEnvironmentPtrInput { + return (*flinkComputePoolEnvironmentPtrType)(v) +} + +func (*flinkComputePoolEnvironmentPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**FlinkComputePoolEnvironment)(nil)).Elem() +} + +func (i *flinkComputePoolEnvironmentPtrType) ToFlinkComputePoolEnvironmentPtrOutput() FlinkComputePoolEnvironmentPtrOutput { + return i.ToFlinkComputePoolEnvironmentPtrOutputWithContext(context.Background()) +} + +func (i *flinkComputePoolEnvironmentPtrType) ToFlinkComputePoolEnvironmentPtrOutputWithContext(ctx context.Context) FlinkComputePoolEnvironmentPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(FlinkComputePoolEnvironmentPtrOutput) +} + +func (i *flinkComputePoolEnvironmentPtrType) ToOutput(ctx context.Context) pulumix.Output[*FlinkComputePoolEnvironment] { + return pulumix.Output[*FlinkComputePoolEnvironment]{ + OutputState: i.ToFlinkComputePoolEnvironmentPtrOutputWithContext(ctx).OutputState, + } +} + +type FlinkComputePoolEnvironmentOutput struct{ *pulumi.OutputState } + +func (FlinkComputePoolEnvironmentOutput) ElementType() reflect.Type { + return reflect.TypeOf((*FlinkComputePoolEnvironment)(nil)).Elem() +} + +func (o FlinkComputePoolEnvironmentOutput) ToFlinkComputePoolEnvironmentOutput() FlinkComputePoolEnvironmentOutput { + return o +} + +func (o FlinkComputePoolEnvironmentOutput) ToFlinkComputePoolEnvironmentOutputWithContext(ctx context.Context) FlinkComputePoolEnvironmentOutput { + return o +} + +func (o FlinkComputePoolEnvironmentOutput) ToFlinkComputePoolEnvironmentPtrOutput() FlinkComputePoolEnvironmentPtrOutput { + return o.ToFlinkComputePoolEnvironmentPtrOutputWithContext(context.Background()) +} + +func (o FlinkComputePoolEnvironmentOutput) ToFlinkComputePoolEnvironmentPtrOutputWithContext(ctx context.Context) FlinkComputePoolEnvironmentPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v FlinkComputePoolEnvironment) *FlinkComputePoolEnvironment { + return &v + }).(FlinkComputePoolEnvironmentPtrOutput) +} + +func (o FlinkComputePoolEnvironmentOutput) ToOutput(ctx context.Context) pulumix.Output[FlinkComputePoolEnvironment] { + return pulumix.Output[FlinkComputePoolEnvironment]{ + OutputState: o.OutputState, + } +} + +// The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`. +func (o FlinkComputePoolEnvironmentOutput) Id() pulumi.StringOutput { + return o.ApplyT(func(v FlinkComputePoolEnvironment) string { return v.Id }).(pulumi.StringOutput) +} + +type FlinkComputePoolEnvironmentPtrOutput struct{ *pulumi.OutputState } + +func (FlinkComputePoolEnvironmentPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**FlinkComputePoolEnvironment)(nil)).Elem() +} + +func (o FlinkComputePoolEnvironmentPtrOutput) ToFlinkComputePoolEnvironmentPtrOutput() FlinkComputePoolEnvironmentPtrOutput { + return o +} + +func (o FlinkComputePoolEnvironmentPtrOutput) ToFlinkComputePoolEnvironmentPtrOutputWithContext(ctx context.Context) FlinkComputePoolEnvironmentPtrOutput { + return o +} + +func (o FlinkComputePoolEnvironmentPtrOutput) ToOutput(ctx context.Context) pulumix.Output[*FlinkComputePoolEnvironment] { + return pulumix.Output[*FlinkComputePoolEnvironment]{ + OutputState: o.OutputState, + } +} + +func (o FlinkComputePoolEnvironmentPtrOutput) Elem() FlinkComputePoolEnvironmentOutput { + return o.ApplyT(func(v *FlinkComputePoolEnvironment) FlinkComputePoolEnvironment { + if v != nil { + return *v + } + var ret FlinkComputePoolEnvironment + return ret + }).(FlinkComputePoolEnvironmentOutput) +} + +// The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`. +func (o FlinkComputePoolEnvironmentPtrOutput) Id() pulumi.StringPtrOutput { + return o.ApplyT(func(v *FlinkComputePoolEnvironment) *string { + if v == nil { + return nil + } + return &v.Id + }).(pulumi.StringPtrOutput) +} + type IdentityPoolIdentityProvider struct { // The ID of the Identity Provider associated with the Identity Pool, for example, `op-abc123`. Id string `pulumi:"id"` @@ -13174,166 +13303,169 @@ func (o SchemaCredentialsPtrOutput) Secret() pulumi.StringPtrOutput { }).(pulumi.StringPtrOutput) } -type SchemaRegistryClusterConfigCredentials struct { +type SchemaExporterCredentials struct { // The Schema Registry API Key. - Key string `pulumi:"key"` + Key string `pulumi:"key"` + // The Schema Registry API Secret. Secret string `pulumi:"secret"` } -// SchemaRegistryClusterConfigCredentialsInput is an input type that accepts SchemaRegistryClusterConfigCredentialsArgs and SchemaRegistryClusterConfigCredentialsOutput values. -// You can construct a concrete instance of `SchemaRegistryClusterConfigCredentialsInput` via: +// SchemaExporterCredentialsInput is an input type that accepts SchemaExporterCredentialsArgs and SchemaExporterCredentialsOutput values. +// You can construct a concrete instance of `SchemaExporterCredentialsInput` via: // -// SchemaRegistryClusterConfigCredentialsArgs{...} -type SchemaRegistryClusterConfigCredentialsInput interface { +// SchemaExporterCredentialsArgs{...} +type SchemaExporterCredentialsInput interface { pulumi.Input - ToSchemaRegistryClusterConfigCredentialsOutput() SchemaRegistryClusterConfigCredentialsOutput - ToSchemaRegistryClusterConfigCredentialsOutputWithContext(context.Context) SchemaRegistryClusterConfigCredentialsOutput + ToSchemaExporterCredentialsOutput() SchemaExporterCredentialsOutput + ToSchemaExporterCredentialsOutputWithContext(context.Context) SchemaExporterCredentialsOutput } -type SchemaRegistryClusterConfigCredentialsArgs struct { +type SchemaExporterCredentialsArgs struct { // The Schema Registry API Key. - Key pulumi.StringInput `pulumi:"key"` + Key pulumi.StringInput `pulumi:"key"` + // The Schema Registry API Secret. Secret pulumi.StringInput `pulumi:"secret"` } -func (SchemaRegistryClusterConfigCredentialsArgs) ElementType() reflect.Type { - return reflect.TypeOf((*SchemaRegistryClusterConfigCredentials)(nil)).Elem() +func (SchemaExporterCredentialsArgs) ElementType() reflect.Type { + return reflect.TypeOf((*SchemaExporterCredentials)(nil)).Elem() } -func (i SchemaRegistryClusterConfigCredentialsArgs) ToSchemaRegistryClusterConfigCredentialsOutput() SchemaRegistryClusterConfigCredentialsOutput { - return i.ToSchemaRegistryClusterConfigCredentialsOutputWithContext(context.Background()) +func (i SchemaExporterCredentialsArgs) ToSchemaExporterCredentialsOutput() SchemaExporterCredentialsOutput { + return i.ToSchemaExporterCredentialsOutputWithContext(context.Background()) } -func (i SchemaRegistryClusterConfigCredentialsArgs) ToSchemaRegistryClusterConfigCredentialsOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigCredentialsOutput { - return pulumi.ToOutputWithContext(ctx, i).(SchemaRegistryClusterConfigCredentialsOutput) +func (i SchemaExporterCredentialsArgs) ToSchemaExporterCredentialsOutputWithContext(ctx context.Context) SchemaExporterCredentialsOutput { + return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterCredentialsOutput) } -func (i SchemaRegistryClusterConfigCredentialsArgs) ToOutput(ctx context.Context) pulumix.Output[SchemaRegistryClusterConfigCredentials] { - return pulumix.Output[SchemaRegistryClusterConfigCredentials]{ - OutputState: i.ToSchemaRegistryClusterConfigCredentialsOutputWithContext(ctx).OutputState, +func (i SchemaExporterCredentialsArgs) ToOutput(ctx context.Context) pulumix.Output[SchemaExporterCredentials] { + return pulumix.Output[SchemaExporterCredentials]{ + OutputState: i.ToSchemaExporterCredentialsOutputWithContext(ctx).OutputState, } } -func (i SchemaRegistryClusterConfigCredentialsArgs) ToSchemaRegistryClusterConfigCredentialsPtrOutput() SchemaRegistryClusterConfigCredentialsPtrOutput { - return i.ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(context.Background()) +func (i SchemaExporterCredentialsArgs) ToSchemaExporterCredentialsPtrOutput() SchemaExporterCredentialsPtrOutput { + return i.ToSchemaExporterCredentialsPtrOutputWithContext(context.Background()) } -func (i SchemaRegistryClusterConfigCredentialsArgs) ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigCredentialsPtrOutput { - return pulumi.ToOutputWithContext(ctx, i).(SchemaRegistryClusterConfigCredentialsOutput).ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(ctx) +func (i SchemaExporterCredentialsArgs) ToSchemaExporterCredentialsPtrOutputWithContext(ctx context.Context) SchemaExporterCredentialsPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterCredentialsOutput).ToSchemaExporterCredentialsPtrOutputWithContext(ctx) } -// SchemaRegistryClusterConfigCredentialsPtrInput is an input type that accepts SchemaRegistryClusterConfigCredentialsArgs, SchemaRegistryClusterConfigCredentialsPtr and SchemaRegistryClusterConfigCredentialsPtrOutput values. -// You can construct a concrete instance of `SchemaRegistryClusterConfigCredentialsPtrInput` via: +// SchemaExporterCredentialsPtrInput is an input type that accepts SchemaExporterCredentialsArgs, SchemaExporterCredentialsPtr and SchemaExporterCredentialsPtrOutput values. +// You can construct a concrete instance of `SchemaExporterCredentialsPtrInput` via: // -// SchemaRegistryClusterConfigCredentialsArgs{...} +// SchemaExporterCredentialsArgs{...} // // or: // // nil -type SchemaRegistryClusterConfigCredentialsPtrInput interface { +type SchemaExporterCredentialsPtrInput interface { pulumi.Input - ToSchemaRegistryClusterConfigCredentialsPtrOutput() SchemaRegistryClusterConfigCredentialsPtrOutput - ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(context.Context) SchemaRegistryClusterConfigCredentialsPtrOutput + ToSchemaExporterCredentialsPtrOutput() SchemaExporterCredentialsPtrOutput + ToSchemaExporterCredentialsPtrOutputWithContext(context.Context) SchemaExporterCredentialsPtrOutput } -type schemaRegistryClusterConfigCredentialsPtrType SchemaRegistryClusterConfigCredentialsArgs +type schemaExporterCredentialsPtrType SchemaExporterCredentialsArgs -func SchemaRegistryClusterConfigCredentialsPtr(v *SchemaRegistryClusterConfigCredentialsArgs) SchemaRegistryClusterConfigCredentialsPtrInput { - return (*schemaRegistryClusterConfigCredentialsPtrType)(v) +func SchemaExporterCredentialsPtr(v *SchemaExporterCredentialsArgs) SchemaExporterCredentialsPtrInput { + return (*schemaExporterCredentialsPtrType)(v) } -func (*schemaRegistryClusterConfigCredentialsPtrType) ElementType() reflect.Type { - return reflect.TypeOf((**SchemaRegistryClusterConfigCredentials)(nil)).Elem() +func (*schemaExporterCredentialsPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**SchemaExporterCredentials)(nil)).Elem() } -func (i *schemaRegistryClusterConfigCredentialsPtrType) ToSchemaRegistryClusterConfigCredentialsPtrOutput() SchemaRegistryClusterConfigCredentialsPtrOutput { - return i.ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(context.Background()) +func (i *schemaExporterCredentialsPtrType) ToSchemaExporterCredentialsPtrOutput() SchemaExporterCredentialsPtrOutput { + return i.ToSchemaExporterCredentialsPtrOutputWithContext(context.Background()) } -func (i *schemaRegistryClusterConfigCredentialsPtrType) ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigCredentialsPtrOutput { - return pulumi.ToOutputWithContext(ctx, i).(SchemaRegistryClusterConfigCredentialsPtrOutput) +func (i *schemaExporterCredentialsPtrType) ToSchemaExporterCredentialsPtrOutputWithContext(ctx context.Context) SchemaExporterCredentialsPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterCredentialsPtrOutput) } -func (i *schemaRegistryClusterConfigCredentialsPtrType) ToOutput(ctx context.Context) pulumix.Output[*SchemaRegistryClusterConfigCredentials] { - return pulumix.Output[*SchemaRegistryClusterConfigCredentials]{ - OutputState: i.ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(ctx).OutputState, +func (i *schemaExporterCredentialsPtrType) ToOutput(ctx context.Context) pulumix.Output[*SchemaExporterCredentials] { + return pulumix.Output[*SchemaExporterCredentials]{ + OutputState: i.ToSchemaExporterCredentialsPtrOutputWithContext(ctx).OutputState, } } -type SchemaRegistryClusterConfigCredentialsOutput struct{ *pulumi.OutputState } +type SchemaExporterCredentialsOutput struct{ *pulumi.OutputState } -func (SchemaRegistryClusterConfigCredentialsOutput) ElementType() reflect.Type { - return reflect.TypeOf((*SchemaRegistryClusterConfigCredentials)(nil)).Elem() +func (SchemaExporterCredentialsOutput) ElementType() reflect.Type { + return reflect.TypeOf((*SchemaExporterCredentials)(nil)).Elem() } -func (o SchemaRegistryClusterConfigCredentialsOutput) ToSchemaRegistryClusterConfigCredentialsOutput() SchemaRegistryClusterConfigCredentialsOutput { +func (o SchemaExporterCredentialsOutput) ToSchemaExporterCredentialsOutput() SchemaExporterCredentialsOutput { return o } -func (o SchemaRegistryClusterConfigCredentialsOutput) ToSchemaRegistryClusterConfigCredentialsOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigCredentialsOutput { +func (o SchemaExporterCredentialsOutput) ToSchemaExporterCredentialsOutputWithContext(ctx context.Context) SchemaExporterCredentialsOutput { return o } -func (o SchemaRegistryClusterConfigCredentialsOutput) ToSchemaRegistryClusterConfigCredentialsPtrOutput() SchemaRegistryClusterConfigCredentialsPtrOutput { - return o.ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(context.Background()) +func (o SchemaExporterCredentialsOutput) ToSchemaExporterCredentialsPtrOutput() SchemaExporterCredentialsPtrOutput { + return o.ToSchemaExporterCredentialsPtrOutputWithContext(context.Background()) } -func (o SchemaRegistryClusterConfigCredentialsOutput) ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigCredentialsPtrOutput { - return o.ApplyTWithContext(ctx, func(_ context.Context, v SchemaRegistryClusterConfigCredentials) *SchemaRegistryClusterConfigCredentials { +func (o SchemaExporterCredentialsOutput) ToSchemaExporterCredentialsPtrOutputWithContext(ctx context.Context) SchemaExporterCredentialsPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v SchemaExporterCredentials) *SchemaExporterCredentials { return &v - }).(SchemaRegistryClusterConfigCredentialsPtrOutput) + }).(SchemaExporterCredentialsPtrOutput) } -func (o SchemaRegistryClusterConfigCredentialsOutput) ToOutput(ctx context.Context) pulumix.Output[SchemaRegistryClusterConfigCredentials] { - return pulumix.Output[SchemaRegistryClusterConfigCredentials]{ +func (o SchemaExporterCredentialsOutput) ToOutput(ctx context.Context) pulumix.Output[SchemaExporterCredentials] { + return pulumix.Output[SchemaExporterCredentials]{ OutputState: o.OutputState, } } // The Schema Registry API Key. -func (o SchemaRegistryClusterConfigCredentialsOutput) Key() pulumi.StringOutput { - return o.ApplyT(func(v SchemaRegistryClusterConfigCredentials) string { return v.Key }).(pulumi.StringOutput) +func (o SchemaExporterCredentialsOutput) Key() pulumi.StringOutput { + return o.ApplyT(func(v SchemaExporterCredentials) string { return v.Key }).(pulumi.StringOutput) } -func (o SchemaRegistryClusterConfigCredentialsOutput) Secret() pulumi.StringOutput { - return o.ApplyT(func(v SchemaRegistryClusterConfigCredentials) string { return v.Secret }).(pulumi.StringOutput) +// The Schema Registry API Secret. +func (o SchemaExporterCredentialsOutput) Secret() pulumi.StringOutput { + return o.ApplyT(func(v SchemaExporterCredentials) string { return v.Secret }).(pulumi.StringOutput) } -type SchemaRegistryClusterConfigCredentialsPtrOutput struct{ *pulumi.OutputState } +type SchemaExporterCredentialsPtrOutput struct{ *pulumi.OutputState } -func (SchemaRegistryClusterConfigCredentialsPtrOutput) ElementType() reflect.Type { - return reflect.TypeOf((**SchemaRegistryClusterConfigCredentials)(nil)).Elem() +func (SchemaExporterCredentialsPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**SchemaExporterCredentials)(nil)).Elem() } -func (o SchemaRegistryClusterConfigCredentialsPtrOutput) ToSchemaRegistryClusterConfigCredentialsPtrOutput() SchemaRegistryClusterConfigCredentialsPtrOutput { +func (o SchemaExporterCredentialsPtrOutput) ToSchemaExporterCredentialsPtrOutput() SchemaExporterCredentialsPtrOutput { return o } -func (o SchemaRegistryClusterConfigCredentialsPtrOutput) ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigCredentialsPtrOutput { +func (o SchemaExporterCredentialsPtrOutput) ToSchemaExporterCredentialsPtrOutputWithContext(ctx context.Context) SchemaExporterCredentialsPtrOutput { return o } -func (o SchemaRegistryClusterConfigCredentialsPtrOutput) ToOutput(ctx context.Context) pulumix.Output[*SchemaRegistryClusterConfigCredentials] { - return pulumix.Output[*SchemaRegistryClusterConfigCredentials]{ +func (o SchemaExporterCredentialsPtrOutput) ToOutput(ctx context.Context) pulumix.Output[*SchemaExporterCredentials] { + return pulumix.Output[*SchemaExporterCredentials]{ OutputState: o.OutputState, } } -func (o SchemaRegistryClusterConfigCredentialsPtrOutput) Elem() SchemaRegistryClusterConfigCredentialsOutput { - return o.ApplyT(func(v *SchemaRegistryClusterConfigCredentials) SchemaRegistryClusterConfigCredentials { +func (o SchemaExporterCredentialsPtrOutput) Elem() SchemaExporterCredentialsOutput { + return o.ApplyT(func(v *SchemaExporterCredentials) SchemaExporterCredentials { if v != nil { return *v } - var ret SchemaRegistryClusterConfigCredentials + var ret SchemaExporterCredentials return ret - }).(SchemaRegistryClusterConfigCredentialsOutput) + }).(SchemaExporterCredentialsOutput) } // The Schema Registry API Key. -func (o SchemaRegistryClusterConfigCredentialsPtrOutput) Key() pulumi.StringPtrOutput { - return o.ApplyT(func(v *SchemaRegistryClusterConfigCredentials) *string { +func (o SchemaExporterCredentialsPtrOutput) Key() pulumi.StringPtrOutput { + return o.ApplyT(func(v *SchemaExporterCredentials) *string { if v == nil { return nil } @@ -13341,8 +13473,9 @@ func (o SchemaRegistryClusterConfigCredentialsPtrOutput) Key() pulumi.StringPtrO }).(pulumi.StringPtrOutput) } -func (o SchemaRegistryClusterConfigCredentialsPtrOutput) Secret() pulumi.StringPtrOutput { - return o.ApplyT(func(v *SchemaRegistryClusterConfigCredentials) *string { +// The Schema Registry API Secret. +func (o SchemaExporterCredentialsPtrOutput) Secret() pulumi.StringPtrOutput { + return o.ApplyT(func(v *SchemaExporterCredentials) *string { if v == nil { return nil } @@ -13350,148 +13483,843 @@ func (o SchemaRegistryClusterConfigCredentialsPtrOutput) Secret() pulumi.StringP }).(pulumi.StringPtrOutput) } -type SchemaRegistryClusterConfigSchemaRegistryCluster struct { - // The ID of the Schema Registry cluster, for example, `lsrc-abc123`. - Id string `pulumi:"id"` +type SchemaExporterDestinationSchemaRegistryCluster struct { + Credentials SchemaExporterDestinationSchemaRegistryClusterCredentials `pulumi:"credentials"` + // The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + RestEndpoint string `pulumi:"restEndpoint"` } -// SchemaRegistryClusterConfigSchemaRegistryClusterInput is an input type that accepts SchemaRegistryClusterConfigSchemaRegistryClusterArgs and SchemaRegistryClusterConfigSchemaRegistryClusterOutput values. -// You can construct a concrete instance of `SchemaRegistryClusterConfigSchemaRegistryClusterInput` via: +// SchemaExporterDestinationSchemaRegistryClusterInput is an input type that accepts SchemaExporterDestinationSchemaRegistryClusterArgs and SchemaExporterDestinationSchemaRegistryClusterOutput values. +// You can construct a concrete instance of `SchemaExporterDestinationSchemaRegistryClusterInput` via: // -// SchemaRegistryClusterConfigSchemaRegistryClusterArgs{...} -type SchemaRegistryClusterConfigSchemaRegistryClusterInput interface { +// SchemaExporterDestinationSchemaRegistryClusterArgs{...} +type SchemaExporterDestinationSchemaRegistryClusterInput interface { pulumi.Input - ToSchemaRegistryClusterConfigSchemaRegistryClusterOutput() SchemaRegistryClusterConfigSchemaRegistryClusterOutput - ToSchemaRegistryClusterConfigSchemaRegistryClusterOutputWithContext(context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterOutput + ToSchemaExporterDestinationSchemaRegistryClusterOutput() SchemaExporterDestinationSchemaRegistryClusterOutput + ToSchemaExporterDestinationSchemaRegistryClusterOutputWithContext(context.Context) SchemaExporterDestinationSchemaRegistryClusterOutput } -type SchemaRegistryClusterConfigSchemaRegistryClusterArgs struct { - // The ID of the Schema Registry cluster, for example, `lsrc-abc123`. - Id pulumi.StringInput `pulumi:"id"` +type SchemaExporterDestinationSchemaRegistryClusterArgs struct { + Credentials SchemaExporterDestinationSchemaRegistryClusterCredentialsInput `pulumi:"credentials"` + // The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + RestEndpoint pulumi.StringInput `pulumi:"restEndpoint"` } -func (SchemaRegistryClusterConfigSchemaRegistryClusterArgs) ElementType() reflect.Type { - return reflect.TypeOf((*SchemaRegistryClusterConfigSchemaRegistryCluster)(nil)).Elem() +func (SchemaExporterDestinationSchemaRegistryClusterArgs) ElementType() reflect.Type { + return reflect.TypeOf((*SchemaExporterDestinationSchemaRegistryCluster)(nil)).Elem() } -func (i SchemaRegistryClusterConfigSchemaRegistryClusterArgs) ToSchemaRegistryClusterConfigSchemaRegistryClusterOutput() SchemaRegistryClusterConfigSchemaRegistryClusterOutput { - return i.ToSchemaRegistryClusterConfigSchemaRegistryClusterOutputWithContext(context.Background()) +func (i SchemaExporterDestinationSchemaRegistryClusterArgs) ToSchemaExporterDestinationSchemaRegistryClusterOutput() SchemaExporterDestinationSchemaRegistryClusterOutput { + return i.ToSchemaExporterDestinationSchemaRegistryClusterOutputWithContext(context.Background()) } -func (i SchemaRegistryClusterConfigSchemaRegistryClusterArgs) ToSchemaRegistryClusterConfigSchemaRegistryClusterOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterOutput { - return pulumi.ToOutputWithContext(ctx, i).(SchemaRegistryClusterConfigSchemaRegistryClusterOutput) +func (i SchemaExporterDestinationSchemaRegistryClusterArgs) ToSchemaExporterDestinationSchemaRegistryClusterOutputWithContext(ctx context.Context) SchemaExporterDestinationSchemaRegistryClusterOutput { + return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterDestinationSchemaRegistryClusterOutput) } -func (i SchemaRegistryClusterConfigSchemaRegistryClusterArgs) ToOutput(ctx context.Context) pulumix.Output[SchemaRegistryClusterConfigSchemaRegistryCluster] { - return pulumix.Output[SchemaRegistryClusterConfigSchemaRegistryCluster]{ - OutputState: i.ToSchemaRegistryClusterConfigSchemaRegistryClusterOutputWithContext(ctx).OutputState, +func (i SchemaExporterDestinationSchemaRegistryClusterArgs) ToOutput(ctx context.Context) pulumix.Output[SchemaExporterDestinationSchemaRegistryCluster] { + return pulumix.Output[SchemaExporterDestinationSchemaRegistryCluster]{ + OutputState: i.ToSchemaExporterDestinationSchemaRegistryClusterOutputWithContext(ctx).OutputState, } } -func (i SchemaRegistryClusterConfigSchemaRegistryClusterArgs) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput() SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput { - return i.ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(context.Background()) +func (i SchemaExporterDestinationSchemaRegistryClusterArgs) ToSchemaExporterDestinationSchemaRegistryClusterPtrOutput() SchemaExporterDestinationSchemaRegistryClusterPtrOutput { + return i.ToSchemaExporterDestinationSchemaRegistryClusterPtrOutputWithContext(context.Background()) } -func (i SchemaRegistryClusterConfigSchemaRegistryClusterArgs) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput { - return pulumi.ToOutputWithContext(ctx, i).(SchemaRegistryClusterConfigSchemaRegistryClusterOutput).ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(ctx) +func (i SchemaExporterDestinationSchemaRegistryClusterArgs) ToSchemaExporterDestinationSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaExporterDestinationSchemaRegistryClusterPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterDestinationSchemaRegistryClusterOutput).ToSchemaExporterDestinationSchemaRegistryClusterPtrOutputWithContext(ctx) } -// SchemaRegistryClusterConfigSchemaRegistryClusterPtrInput is an input type that accepts SchemaRegistryClusterConfigSchemaRegistryClusterArgs, SchemaRegistryClusterConfigSchemaRegistryClusterPtr and SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput values. -// You can construct a concrete instance of `SchemaRegistryClusterConfigSchemaRegistryClusterPtrInput` via: +// SchemaExporterDestinationSchemaRegistryClusterPtrInput is an input type that accepts SchemaExporterDestinationSchemaRegistryClusterArgs, SchemaExporterDestinationSchemaRegistryClusterPtr and SchemaExporterDestinationSchemaRegistryClusterPtrOutput values. +// You can construct a concrete instance of `SchemaExporterDestinationSchemaRegistryClusterPtrInput` via: // -// SchemaRegistryClusterConfigSchemaRegistryClusterArgs{...} +// SchemaExporterDestinationSchemaRegistryClusterArgs{...} // // or: // // nil -type SchemaRegistryClusterConfigSchemaRegistryClusterPtrInput interface { +type SchemaExporterDestinationSchemaRegistryClusterPtrInput interface { pulumi.Input - ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput() SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput - ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput + ToSchemaExporterDestinationSchemaRegistryClusterPtrOutput() SchemaExporterDestinationSchemaRegistryClusterPtrOutput + ToSchemaExporterDestinationSchemaRegistryClusterPtrOutputWithContext(context.Context) SchemaExporterDestinationSchemaRegistryClusterPtrOutput } -type schemaRegistryClusterConfigSchemaRegistryClusterPtrType SchemaRegistryClusterConfigSchemaRegistryClusterArgs +type schemaExporterDestinationSchemaRegistryClusterPtrType SchemaExporterDestinationSchemaRegistryClusterArgs -func SchemaRegistryClusterConfigSchemaRegistryClusterPtr(v *SchemaRegistryClusterConfigSchemaRegistryClusterArgs) SchemaRegistryClusterConfigSchemaRegistryClusterPtrInput { - return (*schemaRegistryClusterConfigSchemaRegistryClusterPtrType)(v) +func SchemaExporterDestinationSchemaRegistryClusterPtr(v *SchemaExporterDestinationSchemaRegistryClusterArgs) SchemaExporterDestinationSchemaRegistryClusterPtrInput { + return (*schemaExporterDestinationSchemaRegistryClusterPtrType)(v) } -func (*schemaRegistryClusterConfigSchemaRegistryClusterPtrType) ElementType() reflect.Type { - return reflect.TypeOf((**SchemaRegistryClusterConfigSchemaRegistryCluster)(nil)).Elem() +func (*schemaExporterDestinationSchemaRegistryClusterPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**SchemaExporterDestinationSchemaRegistryCluster)(nil)).Elem() } -func (i *schemaRegistryClusterConfigSchemaRegistryClusterPtrType) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput() SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput { - return i.ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(context.Background()) +func (i *schemaExporterDestinationSchemaRegistryClusterPtrType) ToSchemaExporterDestinationSchemaRegistryClusterPtrOutput() SchemaExporterDestinationSchemaRegistryClusterPtrOutput { + return i.ToSchemaExporterDestinationSchemaRegistryClusterPtrOutputWithContext(context.Background()) } -func (i *schemaRegistryClusterConfigSchemaRegistryClusterPtrType) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput { - return pulumi.ToOutputWithContext(ctx, i).(SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput) +func (i *schemaExporterDestinationSchemaRegistryClusterPtrType) ToSchemaExporterDestinationSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaExporterDestinationSchemaRegistryClusterPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterDestinationSchemaRegistryClusterPtrOutput) } -func (i *schemaRegistryClusterConfigSchemaRegistryClusterPtrType) ToOutput(ctx context.Context) pulumix.Output[*SchemaRegistryClusterConfigSchemaRegistryCluster] { - return pulumix.Output[*SchemaRegistryClusterConfigSchemaRegistryCluster]{ - OutputState: i.ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(ctx).OutputState, +func (i *schemaExporterDestinationSchemaRegistryClusterPtrType) ToOutput(ctx context.Context) pulumix.Output[*SchemaExporterDestinationSchemaRegistryCluster] { + return pulumix.Output[*SchemaExporterDestinationSchemaRegistryCluster]{ + OutputState: i.ToSchemaExporterDestinationSchemaRegistryClusterPtrOutputWithContext(ctx).OutputState, } } -type SchemaRegistryClusterConfigSchemaRegistryClusterOutput struct{ *pulumi.OutputState } +type SchemaExporterDestinationSchemaRegistryClusterOutput struct{ *pulumi.OutputState } -func (SchemaRegistryClusterConfigSchemaRegistryClusterOutput) ElementType() reflect.Type { - return reflect.TypeOf((*SchemaRegistryClusterConfigSchemaRegistryCluster)(nil)).Elem() +func (SchemaExporterDestinationSchemaRegistryClusterOutput) ElementType() reflect.Type { + return reflect.TypeOf((*SchemaExporterDestinationSchemaRegistryCluster)(nil)).Elem() } -func (o SchemaRegistryClusterConfigSchemaRegistryClusterOutput) ToSchemaRegistryClusterConfigSchemaRegistryClusterOutput() SchemaRegistryClusterConfigSchemaRegistryClusterOutput { +func (o SchemaExporterDestinationSchemaRegistryClusterOutput) ToSchemaExporterDestinationSchemaRegistryClusterOutput() SchemaExporterDestinationSchemaRegistryClusterOutput { return o } -func (o SchemaRegistryClusterConfigSchemaRegistryClusterOutput) ToSchemaRegistryClusterConfigSchemaRegistryClusterOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterOutput { +func (o SchemaExporterDestinationSchemaRegistryClusterOutput) ToSchemaExporterDestinationSchemaRegistryClusterOutputWithContext(ctx context.Context) SchemaExporterDestinationSchemaRegistryClusterOutput { return o } -func (o SchemaRegistryClusterConfigSchemaRegistryClusterOutput) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput() SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput { - return o.ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(context.Background()) +func (o SchemaExporterDestinationSchemaRegistryClusterOutput) ToSchemaExporterDestinationSchemaRegistryClusterPtrOutput() SchemaExporterDestinationSchemaRegistryClusterPtrOutput { + return o.ToSchemaExporterDestinationSchemaRegistryClusterPtrOutputWithContext(context.Background()) } -func (o SchemaRegistryClusterConfigSchemaRegistryClusterOutput) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput { - return o.ApplyTWithContext(ctx, func(_ context.Context, v SchemaRegistryClusterConfigSchemaRegistryCluster) *SchemaRegistryClusterConfigSchemaRegistryCluster { +func (o SchemaExporterDestinationSchemaRegistryClusterOutput) ToSchemaExporterDestinationSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaExporterDestinationSchemaRegistryClusterPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v SchemaExporterDestinationSchemaRegistryCluster) *SchemaExporterDestinationSchemaRegistryCluster { return &v - }).(SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput) + }).(SchemaExporterDestinationSchemaRegistryClusterPtrOutput) } -func (o SchemaRegistryClusterConfigSchemaRegistryClusterOutput) ToOutput(ctx context.Context) pulumix.Output[SchemaRegistryClusterConfigSchemaRegistryCluster] { - return pulumix.Output[SchemaRegistryClusterConfigSchemaRegistryCluster]{ +func (o SchemaExporterDestinationSchemaRegistryClusterOutput) ToOutput(ctx context.Context) pulumix.Output[SchemaExporterDestinationSchemaRegistryCluster] { + return pulumix.Output[SchemaExporterDestinationSchemaRegistryCluster]{ OutputState: o.OutputState, } } -// The ID of the Schema Registry cluster, for example, `lsrc-abc123`. -func (o SchemaRegistryClusterConfigSchemaRegistryClusterOutput) Id() pulumi.StringOutput { - return o.ApplyT(func(v SchemaRegistryClusterConfigSchemaRegistryCluster) string { return v.Id }).(pulumi.StringOutput) +func (o SchemaExporterDestinationSchemaRegistryClusterOutput) Credentials() SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput { + return o.ApplyT(func(v SchemaExporterDestinationSchemaRegistryCluster) SchemaExporterDestinationSchemaRegistryClusterCredentials { + return v.Credentials + }).(SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput) } -type SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput struct{ *pulumi.OutputState } +// The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). +func (o SchemaExporterDestinationSchemaRegistryClusterOutput) RestEndpoint() pulumi.StringOutput { + return o.ApplyT(func(v SchemaExporterDestinationSchemaRegistryCluster) string { return v.RestEndpoint }).(pulumi.StringOutput) +} -func (SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput) ElementType() reflect.Type { - return reflect.TypeOf((**SchemaRegistryClusterConfigSchemaRegistryCluster)(nil)).Elem() +type SchemaExporterDestinationSchemaRegistryClusterPtrOutput struct{ *pulumi.OutputState } + +func (SchemaExporterDestinationSchemaRegistryClusterPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**SchemaExporterDestinationSchemaRegistryCluster)(nil)).Elem() } -func (o SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput() SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput { +func (o SchemaExporterDestinationSchemaRegistryClusterPtrOutput) ToSchemaExporterDestinationSchemaRegistryClusterPtrOutput() SchemaExporterDestinationSchemaRegistryClusterPtrOutput { return o } -func (o SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput { +func (o SchemaExporterDestinationSchemaRegistryClusterPtrOutput) ToSchemaExporterDestinationSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaExporterDestinationSchemaRegistryClusterPtrOutput { return o } -func (o SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput) ToOutput(ctx context.Context) pulumix.Output[*SchemaRegistryClusterConfigSchemaRegistryCluster] { - return pulumix.Output[*SchemaRegistryClusterConfigSchemaRegistryCluster]{ +func (o SchemaExporterDestinationSchemaRegistryClusterPtrOutput) ToOutput(ctx context.Context) pulumix.Output[*SchemaExporterDestinationSchemaRegistryCluster] { + return pulumix.Output[*SchemaExporterDestinationSchemaRegistryCluster]{ OutputState: o.OutputState, } } -func (o SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput) Elem() SchemaRegistryClusterConfigSchemaRegistryClusterOutput { +func (o SchemaExporterDestinationSchemaRegistryClusterPtrOutput) Elem() SchemaExporterDestinationSchemaRegistryClusterOutput { + return o.ApplyT(func(v *SchemaExporterDestinationSchemaRegistryCluster) SchemaExporterDestinationSchemaRegistryCluster { + if v != nil { + return *v + } + var ret SchemaExporterDestinationSchemaRegistryCluster + return ret + }).(SchemaExporterDestinationSchemaRegistryClusterOutput) +} + +func (o SchemaExporterDestinationSchemaRegistryClusterPtrOutput) Credentials() SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput { + return o.ApplyT(func(v *SchemaExporterDestinationSchemaRegistryCluster) *SchemaExporterDestinationSchemaRegistryClusterCredentials { + if v == nil { + return nil + } + return &v.Credentials + }).(SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput) +} + +// The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). +func (o SchemaExporterDestinationSchemaRegistryClusterPtrOutput) RestEndpoint() pulumi.StringPtrOutput { + return o.ApplyT(func(v *SchemaExporterDestinationSchemaRegistryCluster) *string { + if v == nil { + return nil + } + return &v.RestEndpoint + }).(pulumi.StringPtrOutput) +} + +type SchemaExporterDestinationSchemaRegistryClusterCredentials struct { + // The Schema Registry API Key. + Key string `pulumi:"key"` + // The Schema Registry API Secret. + Secret string `pulumi:"secret"` +} + +// SchemaExporterDestinationSchemaRegistryClusterCredentialsInput is an input type that accepts SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs and SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput values. +// You can construct a concrete instance of `SchemaExporterDestinationSchemaRegistryClusterCredentialsInput` via: +// +// SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs{...} +type SchemaExporterDestinationSchemaRegistryClusterCredentialsInput interface { + pulumi.Input + + ToSchemaExporterDestinationSchemaRegistryClusterCredentialsOutput() SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput + ToSchemaExporterDestinationSchemaRegistryClusterCredentialsOutputWithContext(context.Context) SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput +} + +type SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs struct { + // The Schema Registry API Key. + Key pulumi.StringInput `pulumi:"key"` + // The Schema Registry API Secret. + Secret pulumi.StringInput `pulumi:"secret"` +} + +func (SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs) ElementType() reflect.Type { + return reflect.TypeOf((*SchemaExporterDestinationSchemaRegistryClusterCredentials)(nil)).Elem() +} + +func (i SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs) ToSchemaExporterDestinationSchemaRegistryClusterCredentialsOutput() SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput { + return i.ToSchemaExporterDestinationSchemaRegistryClusterCredentialsOutputWithContext(context.Background()) +} + +func (i SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs) ToSchemaExporterDestinationSchemaRegistryClusterCredentialsOutputWithContext(ctx context.Context) SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput { + return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput) +} + +func (i SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs) ToOutput(ctx context.Context) pulumix.Output[SchemaExporterDestinationSchemaRegistryClusterCredentials] { + return pulumix.Output[SchemaExporterDestinationSchemaRegistryClusterCredentials]{ + OutputState: i.ToSchemaExporterDestinationSchemaRegistryClusterCredentialsOutputWithContext(ctx).OutputState, + } +} + +func (i SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs) ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput() SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput { + return i.ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutputWithContext(context.Background()) +} + +func (i SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs) ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutputWithContext(ctx context.Context) SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput).ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutputWithContext(ctx) +} + +// SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrInput is an input type that accepts SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs, SchemaExporterDestinationSchemaRegistryClusterCredentialsPtr and SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput values. +// You can construct a concrete instance of `SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrInput` via: +// +// SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs{...} +// +// or: +// +// nil +type SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrInput interface { + pulumi.Input + + ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput() SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput + ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutputWithContext(context.Context) SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput +} + +type schemaExporterDestinationSchemaRegistryClusterCredentialsPtrType SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs + +func SchemaExporterDestinationSchemaRegistryClusterCredentialsPtr(v *SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs) SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrInput { + return (*schemaExporterDestinationSchemaRegistryClusterCredentialsPtrType)(v) +} + +func (*schemaExporterDestinationSchemaRegistryClusterCredentialsPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**SchemaExporterDestinationSchemaRegistryClusterCredentials)(nil)).Elem() +} + +func (i *schemaExporterDestinationSchemaRegistryClusterCredentialsPtrType) ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput() SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput { + return i.ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutputWithContext(context.Background()) +} + +func (i *schemaExporterDestinationSchemaRegistryClusterCredentialsPtrType) ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutputWithContext(ctx context.Context) SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput) +} + +func (i *schemaExporterDestinationSchemaRegistryClusterCredentialsPtrType) ToOutput(ctx context.Context) pulumix.Output[*SchemaExporterDestinationSchemaRegistryClusterCredentials] { + return pulumix.Output[*SchemaExporterDestinationSchemaRegistryClusterCredentials]{ + OutputState: i.ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutputWithContext(ctx).OutputState, + } +} + +type SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput struct{ *pulumi.OutputState } + +func (SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput) ElementType() reflect.Type { + return reflect.TypeOf((*SchemaExporterDestinationSchemaRegistryClusterCredentials)(nil)).Elem() +} + +func (o SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput) ToSchemaExporterDestinationSchemaRegistryClusterCredentialsOutput() SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput { + return o +} + +func (o SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput) ToSchemaExporterDestinationSchemaRegistryClusterCredentialsOutputWithContext(ctx context.Context) SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput { + return o +} + +func (o SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput) ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput() SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput { + return o.ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutputWithContext(context.Background()) +} + +func (o SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput) ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutputWithContext(ctx context.Context) SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v SchemaExporterDestinationSchemaRegistryClusterCredentials) *SchemaExporterDestinationSchemaRegistryClusterCredentials { + return &v + }).(SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput) +} + +func (o SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput) ToOutput(ctx context.Context) pulumix.Output[SchemaExporterDestinationSchemaRegistryClusterCredentials] { + return pulumix.Output[SchemaExporterDestinationSchemaRegistryClusterCredentials]{ + OutputState: o.OutputState, + } +} + +// The Schema Registry API Key. +func (o SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput) Key() pulumi.StringOutput { + return o.ApplyT(func(v SchemaExporterDestinationSchemaRegistryClusterCredentials) string { return v.Key }).(pulumi.StringOutput) +} + +// The Schema Registry API Secret. +func (o SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput) Secret() pulumi.StringOutput { + return o.ApplyT(func(v SchemaExporterDestinationSchemaRegistryClusterCredentials) string { return v.Secret }).(pulumi.StringOutput) +} + +type SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput struct{ *pulumi.OutputState } + +func (SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**SchemaExporterDestinationSchemaRegistryClusterCredentials)(nil)).Elem() +} + +func (o SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput) ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput() SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput { + return o +} + +func (o SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput) ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutputWithContext(ctx context.Context) SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput { + return o +} + +func (o SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput) ToOutput(ctx context.Context) pulumix.Output[*SchemaExporterDestinationSchemaRegistryClusterCredentials] { + return pulumix.Output[*SchemaExporterDestinationSchemaRegistryClusterCredentials]{ + OutputState: o.OutputState, + } +} + +func (o SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput) Elem() SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput { + return o.ApplyT(func(v *SchemaExporterDestinationSchemaRegistryClusterCredentials) SchemaExporterDestinationSchemaRegistryClusterCredentials { + if v != nil { + return *v + } + var ret SchemaExporterDestinationSchemaRegistryClusterCredentials + return ret + }).(SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput) +} + +// The Schema Registry API Key. +func (o SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput) Key() pulumi.StringPtrOutput { + return o.ApplyT(func(v *SchemaExporterDestinationSchemaRegistryClusterCredentials) *string { + if v == nil { + return nil + } + return &v.Key + }).(pulumi.StringPtrOutput) +} + +// The Schema Registry API Secret. +func (o SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput) Secret() pulumi.StringPtrOutput { + return o.ApplyT(func(v *SchemaExporterDestinationSchemaRegistryClusterCredentials) *string { + if v == nil { + return nil + } + return &v.Secret + }).(pulumi.StringPtrOutput) +} + +type SchemaExporterSchemaRegistryCluster struct { + // The ID of the Schema Registry cluster, for example, `lsrc-abc123`. + Id string `pulumi:"id"` +} + +// SchemaExporterSchemaRegistryClusterInput is an input type that accepts SchemaExporterSchemaRegistryClusterArgs and SchemaExporterSchemaRegistryClusterOutput values. +// You can construct a concrete instance of `SchemaExporterSchemaRegistryClusterInput` via: +// +// SchemaExporterSchemaRegistryClusterArgs{...} +type SchemaExporterSchemaRegistryClusterInput interface { + pulumi.Input + + ToSchemaExporterSchemaRegistryClusterOutput() SchemaExporterSchemaRegistryClusterOutput + ToSchemaExporterSchemaRegistryClusterOutputWithContext(context.Context) SchemaExporterSchemaRegistryClusterOutput +} + +type SchemaExporterSchemaRegistryClusterArgs struct { + // The ID of the Schema Registry cluster, for example, `lsrc-abc123`. + Id pulumi.StringInput `pulumi:"id"` +} + +func (SchemaExporterSchemaRegistryClusterArgs) ElementType() reflect.Type { + return reflect.TypeOf((*SchemaExporterSchemaRegistryCluster)(nil)).Elem() +} + +func (i SchemaExporterSchemaRegistryClusterArgs) ToSchemaExporterSchemaRegistryClusterOutput() SchemaExporterSchemaRegistryClusterOutput { + return i.ToSchemaExporterSchemaRegistryClusterOutputWithContext(context.Background()) +} + +func (i SchemaExporterSchemaRegistryClusterArgs) ToSchemaExporterSchemaRegistryClusterOutputWithContext(ctx context.Context) SchemaExporterSchemaRegistryClusterOutput { + return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterSchemaRegistryClusterOutput) +} + +func (i SchemaExporterSchemaRegistryClusterArgs) ToOutput(ctx context.Context) pulumix.Output[SchemaExporterSchemaRegistryCluster] { + return pulumix.Output[SchemaExporterSchemaRegistryCluster]{ + OutputState: i.ToSchemaExporterSchemaRegistryClusterOutputWithContext(ctx).OutputState, + } +} + +func (i SchemaExporterSchemaRegistryClusterArgs) ToSchemaExporterSchemaRegistryClusterPtrOutput() SchemaExporterSchemaRegistryClusterPtrOutput { + return i.ToSchemaExporterSchemaRegistryClusterPtrOutputWithContext(context.Background()) +} + +func (i SchemaExporterSchemaRegistryClusterArgs) ToSchemaExporterSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaExporterSchemaRegistryClusterPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterSchemaRegistryClusterOutput).ToSchemaExporterSchemaRegistryClusterPtrOutputWithContext(ctx) +} + +// SchemaExporterSchemaRegistryClusterPtrInput is an input type that accepts SchemaExporterSchemaRegistryClusterArgs, SchemaExporterSchemaRegistryClusterPtr and SchemaExporterSchemaRegistryClusterPtrOutput values. +// You can construct a concrete instance of `SchemaExporterSchemaRegistryClusterPtrInput` via: +// +// SchemaExporterSchemaRegistryClusterArgs{...} +// +// or: +// +// nil +type SchemaExporterSchemaRegistryClusterPtrInput interface { + pulumi.Input + + ToSchemaExporterSchemaRegistryClusterPtrOutput() SchemaExporterSchemaRegistryClusterPtrOutput + ToSchemaExporterSchemaRegistryClusterPtrOutputWithContext(context.Context) SchemaExporterSchemaRegistryClusterPtrOutput +} + +type schemaExporterSchemaRegistryClusterPtrType SchemaExporterSchemaRegistryClusterArgs + +func SchemaExporterSchemaRegistryClusterPtr(v *SchemaExporterSchemaRegistryClusterArgs) SchemaExporterSchemaRegistryClusterPtrInput { + return (*schemaExporterSchemaRegistryClusterPtrType)(v) +} + +func (*schemaExporterSchemaRegistryClusterPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**SchemaExporterSchemaRegistryCluster)(nil)).Elem() +} + +func (i *schemaExporterSchemaRegistryClusterPtrType) ToSchemaExporterSchemaRegistryClusterPtrOutput() SchemaExporterSchemaRegistryClusterPtrOutput { + return i.ToSchemaExporterSchemaRegistryClusterPtrOutputWithContext(context.Background()) +} + +func (i *schemaExporterSchemaRegistryClusterPtrType) ToSchemaExporterSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaExporterSchemaRegistryClusterPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterSchemaRegistryClusterPtrOutput) +} + +func (i *schemaExporterSchemaRegistryClusterPtrType) ToOutput(ctx context.Context) pulumix.Output[*SchemaExporterSchemaRegistryCluster] { + return pulumix.Output[*SchemaExporterSchemaRegistryCluster]{ + OutputState: i.ToSchemaExporterSchemaRegistryClusterPtrOutputWithContext(ctx).OutputState, + } +} + +type SchemaExporterSchemaRegistryClusterOutput struct{ *pulumi.OutputState } + +func (SchemaExporterSchemaRegistryClusterOutput) ElementType() reflect.Type { + return reflect.TypeOf((*SchemaExporterSchemaRegistryCluster)(nil)).Elem() +} + +func (o SchemaExporterSchemaRegistryClusterOutput) ToSchemaExporterSchemaRegistryClusterOutput() SchemaExporterSchemaRegistryClusterOutput { + return o +} + +func (o SchemaExporterSchemaRegistryClusterOutput) ToSchemaExporterSchemaRegistryClusterOutputWithContext(ctx context.Context) SchemaExporterSchemaRegistryClusterOutput { + return o +} + +func (o SchemaExporterSchemaRegistryClusterOutput) ToSchemaExporterSchemaRegistryClusterPtrOutput() SchemaExporterSchemaRegistryClusterPtrOutput { + return o.ToSchemaExporterSchemaRegistryClusterPtrOutputWithContext(context.Background()) +} + +func (o SchemaExporterSchemaRegistryClusterOutput) ToSchemaExporterSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaExporterSchemaRegistryClusterPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v SchemaExporterSchemaRegistryCluster) *SchemaExporterSchemaRegistryCluster { + return &v + }).(SchemaExporterSchemaRegistryClusterPtrOutput) +} + +func (o SchemaExporterSchemaRegistryClusterOutput) ToOutput(ctx context.Context) pulumix.Output[SchemaExporterSchemaRegistryCluster] { + return pulumix.Output[SchemaExporterSchemaRegistryCluster]{ + OutputState: o.OutputState, + } +} + +// The ID of the Schema Registry cluster, for example, `lsrc-abc123`. +func (o SchemaExporterSchemaRegistryClusterOutput) Id() pulumi.StringOutput { + return o.ApplyT(func(v SchemaExporterSchemaRegistryCluster) string { return v.Id }).(pulumi.StringOutput) +} + +type SchemaExporterSchemaRegistryClusterPtrOutput struct{ *pulumi.OutputState } + +func (SchemaExporterSchemaRegistryClusterPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**SchemaExporterSchemaRegistryCluster)(nil)).Elem() +} + +func (o SchemaExporterSchemaRegistryClusterPtrOutput) ToSchemaExporterSchemaRegistryClusterPtrOutput() SchemaExporterSchemaRegistryClusterPtrOutput { + return o +} + +func (o SchemaExporterSchemaRegistryClusterPtrOutput) ToSchemaExporterSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaExporterSchemaRegistryClusterPtrOutput { + return o +} + +func (o SchemaExporterSchemaRegistryClusterPtrOutput) ToOutput(ctx context.Context) pulumix.Output[*SchemaExporterSchemaRegistryCluster] { + return pulumix.Output[*SchemaExporterSchemaRegistryCluster]{ + OutputState: o.OutputState, + } +} + +func (o SchemaExporterSchemaRegistryClusterPtrOutput) Elem() SchemaExporterSchemaRegistryClusterOutput { + return o.ApplyT(func(v *SchemaExporterSchemaRegistryCluster) SchemaExporterSchemaRegistryCluster { + if v != nil { + return *v + } + var ret SchemaExporterSchemaRegistryCluster + return ret + }).(SchemaExporterSchemaRegistryClusterOutput) +} + +// The ID of the Schema Registry cluster, for example, `lsrc-abc123`. +func (o SchemaExporterSchemaRegistryClusterPtrOutput) Id() pulumi.StringPtrOutput { + return o.ApplyT(func(v *SchemaExporterSchemaRegistryCluster) *string { + if v == nil { + return nil + } + return &v.Id + }).(pulumi.StringPtrOutput) +} + +type SchemaRegistryClusterConfigCredentials struct { + // The Schema Registry API Key. + Key string `pulumi:"key"` + Secret string `pulumi:"secret"` +} + +// SchemaRegistryClusterConfigCredentialsInput is an input type that accepts SchemaRegistryClusterConfigCredentialsArgs and SchemaRegistryClusterConfigCredentialsOutput values. +// You can construct a concrete instance of `SchemaRegistryClusterConfigCredentialsInput` via: +// +// SchemaRegistryClusterConfigCredentialsArgs{...} +type SchemaRegistryClusterConfigCredentialsInput interface { + pulumi.Input + + ToSchemaRegistryClusterConfigCredentialsOutput() SchemaRegistryClusterConfigCredentialsOutput + ToSchemaRegistryClusterConfigCredentialsOutputWithContext(context.Context) SchemaRegistryClusterConfigCredentialsOutput +} + +type SchemaRegistryClusterConfigCredentialsArgs struct { + // The Schema Registry API Key. + Key pulumi.StringInput `pulumi:"key"` + Secret pulumi.StringInput `pulumi:"secret"` +} + +func (SchemaRegistryClusterConfigCredentialsArgs) ElementType() reflect.Type { + return reflect.TypeOf((*SchemaRegistryClusterConfigCredentials)(nil)).Elem() +} + +func (i SchemaRegistryClusterConfigCredentialsArgs) ToSchemaRegistryClusterConfigCredentialsOutput() SchemaRegistryClusterConfigCredentialsOutput { + return i.ToSchemaRegistryClusterConfigCredentialsOutputWithContext(context.Background()) +} + +func (i SchemaRegistryClusterConfigCredentialsArgs) ToSchemaRegistryClusterConfigCredentialsOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigCredentialsOutput { + return pulumi.ToOutputWithContext(ctx, i).(SchemaRegistryClusterConfigCredentialsOutput) +} + +func (i SchemaRegistryClusterConfigCredentialsArgs) ToOutput(ctx context.Context) pulumix.Output[SchemaRegistryClusterConfigCredentials] { + return pulumix.Output[SchemaRegistryClusterConfigCredentials]{ + OutputState: i.ToSchemaRegistryClusterConfigCredentialsOutputWithContext(ctx).OutputState, + } +} + +func (i SchemaRegistryClusterConfigCredentialsArgs) ToSchemaRegistryClusterConfigCredentialsPtrOutput() SchemaRegistryClusterConfigCredentialsPtrOutput { + return i.ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(context.Background()) +} + +func (i SchemaRegistryClusterConfigCredentialsArgs) ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigCredentialsPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(SchemaRegistryClusterConfigCredentialsOutput).ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(ctx) +} + +// SchemaRegistryClusterConfigCredentialsPtrInput is an input type that accepts SchemaRegistryClusterConfigCredentialsArgs, SchemaRegistryClusterConfigCredentialsPtr and SchemaRegistryClusterConfigCredentialsPtrOutput values. +// You can construct a concrete instance of `SchemaRegistryClusterConfigCredentialsPtrInput` via: +// +// SchemaRegistryClusterConfigCredentialsArgs{...} +// +// or: +// +// nil +type SchemaRegistryClusterConfigCredentialsPtrInput interface { + pulumi.Input + + ToSchemaRegistryClusterConfigCredentialsPtrOutput() SchemaRegistryClusterConfigCredentialsPtrOutput + ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(context.Context) SchemaRegistryClusterConfigCredentialsPtrOutput +} + +type schemaRegistryClusterConfigCredentialsPtrType SchemaRegistryClusterConfigCredentialsArgs + +func SchemaRegistryClusterConfigCredentialsPtr(v *SchemaRegistryClusterConfigCredentialsArgs) SchemaRegistryClusterConfigCredentialsPtrInput { + return (*schemaRegistryClusterConfigCredentialsPtrType)(v) +} + +func (*schemaRegistryClusterConfigCredentialsPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**SchemaRegistryClusterConfigCredentials)(nil)).Elem() +} + +func (i *schemaRegistryClusterConfigCredentialsPtrType) ToSchemaRegistryClusterConfigCredentialsPtrOutput() SchemaRegistryClusterConfigCredentialsPtrOutput { + return i.ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(context.Background()) +} + +func (i *schemaRegistryClusterConfigCredentialsPtrType) ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigCredentialsPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(SchemaRegistryClusterConfigCredentialsPtrOutput) +} + +func (i *schemaRegistryClusterConfigCredentialsPtrType) ToOutput(ctx context.Context) pulumix.Output[*SchemaRegistryClusterConfigCredentials] { + return pulumix.Output[*SchemaRegistryClusterConfigCredentials]{ + OutputState: i.ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(ctx).OutputState, + } +} + +type SchemaRegistryClusterConfigCredentialsOutput struct{ *pulumi.OutputState } + +func (SchemaRegistryClusterConfigCredentialsOutput) ElementType() reflect.Type { + return reflect.TypeOf((*SchemaRegistryClusterConfigCredentials)(nil)).Elem() +} + +func (o SchemaRegistryClusterConfigCredentialsOutput) ToSchemaRegistryClusterConfigCredentialsOutput() SchemaRegistryClusterConfigCredentialsOutput { + return o +} + +func (o SchemaRegistryClusterConfigCredentialsOutput) ToSchemaRegistryClusterConfigCredentialsOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigCredentialsOutput { + return o +} + +func (o SchemaRegistryClusterConfigCredentialsOutput) ToSchemaRegistryClusterConfigCredentialsPtrOutput() SchemaRegistryClusterConfigCredentialsPtrOutput { + return o.ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(context.Background()) +} + +func (o SchemaRegistryClusterConfigCredentialsOutput) ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigCredentialsPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v SchemaRegistryClusterConfigCredentials) *SchemaRegistryClusterConfigCredentials { + return &v + }).(SchemaRegistryClusterConfigCredentialsPtrOutput) +} + +func (o SchemaRegistryClusterConfigCredentialsOutput) ToOutput(ctx context.Context) pulumix.Output[SchemaRegistryClusterConfigCredentials] { + return pulumix.Output[SchemaRegistryClusterConfigCredentials]{ + OutputState: o.OutputState, + } +} + +// The Schema Registry API Key. +func (o SchemaRegistryClusterConfigCredentialsOutput) Key() pulumi.StringOutput { + return o.ApplyT(func(v SchemaRegistryClusterConfigCredentials) string { return v.Key }).(pulumi.StringOutput) +} + +func (o SchemaRegistryClusterConfigCredentialsOutput) Secret() pulumi.StringOutput { + return o.ApplyT(func(v SchemaRegistryClusterConfigCredentials) string { return v.Secret }).(pulumi.StringOutput) +} + +type SchemaRegistryClusterConfigCredentialsPtrOutput struct{ *pulumi.OutputState } + +func (SchemaRegistryClusterConfigCredentialsPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**SchemaRegistryClusterConfigCredentials)(nil)).Elem() +} + +func (o SchemaRegistryClusterConfigCredentialsPtrOutput) ToSchemaRegistryClusterConfigCredentialsPtrOutput() SchemaRegistryClusterConfigCredentialsPtrOutput { + return o +} + +func (o SchemaRegistryClusterConfigCredentialsPtrOutput) ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigCredentialsPtrOutput { + return o +} + +func (o SchemaRegistryClusterConfigCredentialsPtrOutput) ToOutput(ctx context.Context) pulumix.Output[*SchemaRegistryClusterConfigCredentials] { + return pulumix.Output[*SchemaRegistryClusterConfigCredentials]{ + OutputState: o.OutputState, + } +} + +func (o SchemaRegistryClusterConfigCredentialsPtrOutput) Elem() SchemaRegistryClusterConfigCredentialsOutput { + return o.ApplyT(func(v *SchemaRegistryClusterConfigCredentials) SchemaRegistryClusterConfigCredentials { + if v != nil { + return *v + } + var ret SchemaRegistryClusterConfigCredentials + return ret + }).(SchemaRegistryClusterConfigCredentialsOutput) +} + +// The Schema Registry API Key. +func (o SchemaRegistryClusterConfigCredentialsPtrOutput) Key() pulumi.StringPtrOutput { + return o.ApplyT(func(v *SchemaRegistryClusterConfigCredentials) *string { + if v == nil { + return nil + } + return &v.Key + }).(pulumi.StringPtrOutput) +} + +func (o SchemaRegistryClusterConfigCredentialsPtrOutput) Secret() pulumi.StringPtrOutput { + return o.ApplyT(func(v *SchemaRegistryClusterConfigCredentials) *string { + if v == nil { + return nil + } + return &v.Secret + }).(pulumi.StringPtrOutput) +} + +type SchemaRegistryClusterConfigSchemaRegistryCluster struct { + // The ID of the Schema Registry cluster, for example, `lsrc-abc123`. + Id string `pulumi:"id"` +} + +// SchemaRegistryClusterConfigSchemaRegistryClusterInput is an input type that accepts SchemaRegistryClusterConfigSchemaRegistryClusterArgs and SchemaRegistryClusterConfigSchemaRegistryClusterOutput values. +// You can construct a concrete instance of `SchemaRegistryClusterConfigSchemaRegistryClusterInput` via: +// +// SchemaRegistryClusterConfigSchemaRegistryClusterArgs{...} +type SchemaRegistryClusterConfigSchemaRegistryClusterInput interface { + pulumi.Input + + ToSchemaRegistryClusterConfigSchemaRegistryClusterOutput() SchemaRegistryClusterConfigSchemaRegistryClusterOutput + ToSchemaRegistryClusterConfigSchemaRegistryClusterOutputWithContext(context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterOutput +} + +type SchemaRegistryClusterConfigSchemaRegistryClusterArgs struct { + // The ID of the Schema Registry cluster, for example, `lsrc-abc123`. + Id pulumi.StringInput `pulumi:"id"` +} + +func (SchemaRegistryClusterConfigSchemaRegistryClusterArgs) ElementType() reflect.Type { + return reflect.TypeOf((*SchemaRegistryClusterConfigSchemaRegistryCluster)(nil)).Elem() +} + +func (i SchemaRegistryClusterConfigSchemaRegistryClusterArgs) ToSchemaRegistryClusterConfigSchemaRegistryClusterOutput() SchemaRegistryClusterConfigSchemaRegistryClusterOutput { + return i.ToSchemaRegistryClusterConfigSchemaRegistryClusterOutputWithContext(context.Background()) +} + +func (i SchemaRegistryClusterConfigSchemaRegistryClusterArgs) ToSchemaRegistryClusterConfigSchemaRegistryClusterOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterOutput { + return pulumi.ToOutputWithContext(ctx, i).(SchemaRegistryClusterConfigSchemaRegistryClusterOutput) +} + +func (i SchemaRegistryClusterConfigSchemaRegistryClusterArgs) ToOutput(ctx context.Context) pulumix.Output[SchemaRegistryClusterConfigSchemaRegistryCluster] { + return pulumix.Output[SchemaRegistryClusterConfigSchemaRegistryCluster]{ + OutputState: i.ToSchemaRegistryClusterConfigSchemaRegistryClusterOutputWithContext(ctx).OutputState, + } +} + +func (i SchemaRegistryClusterConfigSchemaRegistryClusterArgs) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput() SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput { + return i.ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(context.Background()) +} + +func (i SchemaRegistryClusterConfigSchemaRegistryClusterArgs) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(SchemaRegistryClusterConfigSchemaRegistryClusterOutput).ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(ctx) +} + +// SchemaRegistryClusterConfigSchemaRegistryClusterPtrInput is an input type that accepts SchemaRegistryClusterConfigSchemaRegistryClusterArgs, SchemaRegistryClusterConfigSchemaRegistryClusterPtr and SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput values. +// You can construct a concrete instance of `SchemaRegistryClusterConfigSchemaRegistryClusterPtrInput` via: +// +// SchemaRegistryClusterConfigSchemaRegistryClusterArgs{...} +// +// or: +// +// nil +type SchemaRegistryClusterConfigSchemaRegistryClusterPtrInput interface { + pulumi.Input + + ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput() SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput + ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput +} + +type schemaRegistryClusterConfigSchemaRegistryClusterPtrType SchemaRegistryClusterConfigSchemaRegistryClusterArgs + +func SchemaRegistryClusterConfigSchemaRegistryClusterPtr(v *SchemaRegistryClusterConfigSchemaRegistryClusterArgs) SchemaRegistryClusterConfigSchemaRegistryClusterPtrInput { + return (*schemaRegistryClusterConfigSchemaRegistryClusterPtrType)(v) +} + +func (*schemaRegistryClusterConfigSchemaRegistryClusterPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**SchemaRegistryClusterConfigSchemaRegistryCluster)(nil)).Elem() +} + +func (i *schemaRegistryClusterConfigSchemaRegistryClusterPtrType) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput() SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput { + return i.ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(context.Background()) +} + +func (i *schemaRegistryClusterConfigSchemaRegistryClusterPtrType) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput) +} + +func (i *schemaRegistryClusterConfigSchemaRegistryClusterPtrType) ToOutput(ctx context.Context) pulumix.Output[*SchemaRegistryClusterConfigSchemaRegistryCluster] { + return pulumix.Output[*SchemaRegistryClusterConfigSchemaRegistryCluster]{ + OutputState: i.ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(ctx).OutputState, + } +} + +type SchemaRegistryClusterConfigSchemaRegistryClusterOutput struct{ *pulumi.OutputState } + +func (SchemaRegistryClusterConfigSchemaRegistryClusterOutput) ElementType() reflect.Type { + return reflect.TypeOf((*SchemaRegistryClusterConfigSchemaRegistryCluster)(nil)).Elem() +} + +func (o SchemaRegistryClusterConfigSchemaRegistryClusterOutput) ToSchemaRegistryClusterConfigSchemaRegistryClusterOutput() SchemaRegistryClusterConfigSchemaRegistryClusterOutput { + return o +} + +func (o SchemaRegistryClusterConfigSchemaRegistryClusterOutput) ToSchemaRegistryClusterConfigSchemaRegistryClusterOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterOutput { + return o +} + +func (o SchemaRegistryClusterConfigSchemaRegistryClusterOutput) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput() SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput { + return o.ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(context.Background()) +} + +func (o SchemaRegistryClusterConfigSchemaRegistryClusterOutput) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v SchemaRegistryClusterConfigSchemaRegistryCluster) *SchemaRegistryClusterConfigSchemaRegistryCluster { + return &v + }).(SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput) +} + +func (o SchemaRegistryClusterConfigSchemaRegistryClusterOutput) ToOutput(ctx context.Context) pulumix.Output[SchemaRegistryClusterConfigSchemaRegistryCluster] { + return pulumix.Output[SchemaRegistryClusterConfigSchemaRegistryCluster]{ + OutputState: o.OutputState, + } +} + +// The ID of the Schema Registry cluster, for example, `lsrc-abc123`. +func (o SchemaRegistryClusterConfigSchemaRegistryClusterOutput) Id() pulumi.StringOutput { + return o.ApplyT(func(v SchemaRegistryClusterConfigSchemaRegistryCluster) string { return v.Id }).(pulumi.StringOutput) +} + +type SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput struct{ *pulumi.OutputState } + +func (SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**SchemaRegistryClusterConfigSchemaRegistryCluster)(nil)).Elem() +} + +func (o SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput() SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput { + return o +} + +func (o SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput { + return o +} + +func (o SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput) ToOutput(ctx context.Context) pulumix.Output[*SchemaRegistryClusterConfigSchemaRegistryCluster] { + return pulumix.Output[*SchemaRegistryClusterConfigSchemaRegistryCluster]{ + OutputState: o.OutputState, + } +} + +func (o SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput) Elem() SchemaRegistryClusterConfigSchemaRegistryClusterOutput { return o.ApplyT(func(v *SchemaRegistryClusterConfigSchemaRegistryCluster) SchemaRegistryClusterConfigSchemaRegistryCluster { if v != nil { return *v @@ -17514,6 +18342,76 @@ func (o GetByokKeyAzureArrayOutput) Index(i pulumi.IntInput) GetByokKeyAzureOutp }).(GetByokKeyAzureOutput) } +type GetFlinkComputePoolEnvironment struct { + // The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + // + // > **Note:** Exactly one from the `id` and `displayName` attributes must be specified. + Id string `pulumi:"id"` +} + +// GetFlinkComputePoolEnvironmentInput is an input type that accepts GetFlinkComputePoolEnvironmentArgs and GetFlinkComputePoolEnvironmentOutput values. +// You can construct a concrete instance of `GetFlinkComputePoolEnvironmentInput` via: +// +// GetFlinkComputePoolEnvironmentArgs{...} +type GetFlinkComputePoolEnvironmentInput interface { + pulumi.Input + + ToGetFlinkComputePoolEnvironmentOutput() GetFlinkComputePoolEnvironmentOutput + ToGetFlinkComputePoolEnvironmentOutputWithContext(context.Context) GetFlinkComputePoolEnvironmentOutput +} + +type GetFlinkComputePoolEnvironmentArgs struct { + // The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + // + // > **Note:** Exactly one from the `id` and `displayName` attributes must be specified. + Id pulumi.StringInput `pulumi:"id"` +} + +func (GetFlinkComputePoolEnvironmentArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetFlinkComputePoolEnvironment)(nil)).Elem() +} + +func (i GetFlinkComputePoolEnvironmentArgs) ToGetFlinkComputePoolEnvironmentOutput() GetFlinkComputePoolEnvironmentOutput { + return i.ToGetFlinkComputePoolEnvironmentOutputWithContext(context.Background()) +} + +func (i GetFlinkComputePoolEnvironmentArgs) ToGetFlinkComputePoolEnvironmentOutputWithContext(ctx context.Context) GetFlinkComputePoolEnvironmentOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetFlinkComputePoolEnvironmentOutput) +} + +func (i GetFlinkComputePoolEnvironmentArgs) ToOutput(ctx context.Context) pulumix.Output[GetFlinkComputePoolEnvironment] { + return pulumix.Output[GetFlinkComputePoolEnvironment]{ + OutputState: i.ToGetFlinkComputePoolEnvironmentOutputWithContext(ctx).OutputState, + } +} + +type GetFlinkComputePoolEnvironmentOutput struct{ *pulumi.OutputState } + +func (GetFlinkComputePoolEnvironmentOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetFlinkComputePoolEnvironment)(nil)).Elem() +} + +func (o GetFlinkComputePoolEnvironmentOutput) ToGetFlinkComputePoolEnvironmentOutput() GetFlinkComputePoolEnvironmentOutput { + return o +} + +func (o GetFlinkComputePoolEnvironmentOutput) ToGetFlinkComputePoolEnvironmentOutputWithContext(ctx context.Context) GetFlinkComputePoolEnvironmentOutput { + return o +} + +func (o GetFlinkComputePoolEnvironmentOutput) ToOutput(ctx context.Context) pulumix.Output[GetFlinkComputePoolEnvironment] { + return pulumix.Output[GetFlinkComputePoolEnvironment]{ + OutputState: o.OutputState, + } +} + +// The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. +// +// > **Note:** Exactly one from the `id` and `displayName` attributes must be specified. +func (o GetFlinkComputePoolEnvironmentOutput) Id() pulumi.StringOutput { + return o.ApplyT(func(v GetFlinkComputePoolEnvironment) string { return v.Id }).(pulumi.StringOutput) +} + type GetIdentityPoolIdentityProvider struct { // The ID of the Identity Provider associated with the Identity Pool, for example, `op-abc123`. // @@ -27629,6 +28527,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*ConnectorEnvironmentPtrInput)(nil)).Elem(), ConnectorEnvironmentArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*ConnectorKafkaClusterInput)(nil)).Elem(), ConnectorKafkaClusterArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*ConnectorKafkaClusterPtrInput)(nil)).Elem(), ConnectorKafkaClusterArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*FlinkComputePoolEnvironmentInput)(nil)).Elem(), FlinkComputePoolEnvironmentArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*FlinkComputePoolEnvironmentPtrInput)(nil)).Elem(), FlinkComputePoolEnvironmentArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*IdentityPoolIdentityProviderInput)(nil)).Elem(), IdentityPoolIdentityProviderArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*IdentityPoolIdentityProviderPtrInput)(nil)).Elem(), IdentityPoolIdentityProviderArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*InvitationCreatorInput)(nil)).Elem(), InvitationCreatorArgs{}) @@ -27745,6 +28645,14 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*PrivateLinkAttachmentGcpArrayInput)(nil)).Elem(), PrivateLinkAttachmentGcpArray{}) pulumi.RegisterInputType(reflect.TypeOf((*SchemaCredentialsInput)(nil)).Elem(), SchemaCredentialsArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*SchemaCredentialsPtrInput)(nil)).Elem(), SchemaCredentialsArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*SchemaExporterCredentialsInput)(nil)).Elem(), SchemaExporterCredentialsArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*SchemaExporterCredentialsPtrInput)(nil)).Elem(), SchemaExporterCredentialsArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*SchemaExporterDestinationSchemaRegistryClusterInput)(nil)).Elem(), SchemaExporterDestinationSchemaRegistryClusterArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*SchemaExporterDestinationSchemaRegistryClusterPtrInput)(nil)).Elem(), SchemaExporterDestinationSchemaRegistryClusterArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*SchemaExporterDestinationSchemaRegistryClusterCredentialsInput)(nil)).Elem(), SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrInput)(nil)).Elem(), SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*SchemaExporterSchemaRegistryClusterInput)(nil)).Elem(), SchemaExporterSchemaRegistryClusterArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*SchemaExporterSchemaRegistryClusterPtrInput)(nil)).Elem(), SchemaExporterSchemaRegistryClusterArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*SchemaRegistryClusterConfigCredentialsInput)(nil)).Elem(), SchemaRegistryClusterConfigCredentialsArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*SchemaRegistryClusterConfigCredentialsPtrInput)(nil)).Elem(), SchemaRegistryClusterConfigCredentialsArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*SchemaRegistryClusterConfigSchemaRegistryClusterInput)(nil)).Elem(), SchemaRegistryClusterConfigSchemaRegistryClusterArgs{}) @@ -27797,6 +28705,7 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*GetByokKeyAwArrayInput)(nil)).Elem(), GetByokKeyAwArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetByokKeyAzureInput)(nil)).Elem(), GetByokKeyAzureArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetByokKeyAzureArrayInput)(nil)).Elem(), GetByokKeyAzureArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetFlinkComputePoolEnvironmentInput)(nil)).Elem(), GetFlinkComputePoolEnvironmentArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetIdentityPoolIdentityProviderInput)(nil)).Elem(), GetIdentityPoolIdentityProviderArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetInvitationCreatorInput)(nil)).Elem(), GetInvitationCreatorArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetInvitationCreatorArrayInput)(nil)).Elem(), GetInvitationCreatorArray{}) @@ -27978,6 +28887,8 @@ func init() { pulumi.RegisterOutputType(ConnectorEnvironmentPtrOutput{}) pulumi.RegisterOutputType(ConnectorKafkaClusterOutput{}) pulumi.RegisterOutputType(ConnectorKafkaClusterPtrOutput{}) + pulumi.RegisterOutputType(FlinkComputePoolEnvironmentOutput{}) + pulumi.RegisterOutputType(FlinkComputePoolEnvironmentPtrOutput{}) pulumi.RegisterOutputType(IdentityPoolIdentityProviderOutput{}) pulumi.RegisterOutputType(IdentityPoolIdentityProviderPtrOutput{}) pulumi.RegisterOutputType(InvitationCreatorOutput{}) @@ -28094,6 +29005,14 @@ func init() { pulumi.RegisterOutputType(PrivateLinkAttachmentGcpArrayOutput{}) pulumi.RegisterOutputType(SchemaCredentialsOutput{}) pulumi.RegisterOutputType(SchemaCredentialsPtrOutput{}) + pulumi.RegisterOutputType(SchemaExporterCredentialsOutput{}) + pulumi.RegisterOutputType(SchemaExporterCredentialsPtrOutput{}) + pulumi.RegisterOutputType(SchemaExporterDestinationSchemaRegistryClusterOutput{}) + pulumi.RegisterOutputType(SchemaExporterDestinationSchemaRegistryClusterPtrOutput{}) + pulumi.RegisterOutputType(SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput{}) + pulumi.RegisterOutputType(SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput{}) + pulumi.RegisterOutputType(SchemaExporterSchemaRegistryClusterOutput{}) + pulumi.RegisterOutputType(SchemaExporterSchemaRegistryClusterPtrOutput{}) pulumi.RegisterOutputType(SchemaRegistryClusterConfigCredentialsOutput{}) pulumi.RegisterOutputType(SchemaRegistryClusterConfigCredentialsPtrOutput{}) pulumi.RegisterOutputType(SchemaRegistryClusterConfigSchemaRegistryClusterOutput{}) @@ -28146,6 +29065,7 @@ func init() { pulumi.RegisterOutputType(GetByokKeyAwArrayOutput{}) pulumi.RegisterOutputType(GetByokKeyAzureOutput{}) pulumi.RegisterOutputType(GetByokKeyAzureArrayOutput{}) + pulumi.RegisterOutputType(GetFlinkComputePoolEnvironmentOutput{}) pulumi.RegisterOutputType(GetIdentityPoolIdentityProviderOutput{}) pulumi.RegisterOutputType(GetInvitationCreatorOutput{}) pulumi.RegisterOutputType(GetInvitationCreatorArrayOutput{}) diff --git a/sdk/go/confluentcloud/schemaExporter.go b/sdk/go/confluentcloud/schemaExporter.go new file mode 100644 index 00000000..2fd55252 --- /dev/null +++ b/sdk/go/confluentcloud/schemaExporter.go @@ -0,0 +1,428 @@ +// Code generated by the Pulumi Terraform Bridge (tfgen) Tool DO NOT EDIT. +// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! *** + +package confluentcloud + +import ( + "context" + "reflect" + + "errors" + "github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud/internal" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" + "github.com/pulumi/pulumi/sdk/v3/go/pulumix" +) + +// ## Import +// +// You can import a Schema Exporter by using the Schema Registry cluster ID, Schema Exporter name in the format `/`, for example$ export IMPORT_SCHEMA_REGISTRY_API_KEY="" $ export IMPORT_SCHEMA_REGISTRY_API_SECRET="" $ export IMPORT_SCHEMA_REGISTRY_REST_ENDPOINT="" +// +// ```sh +// +// $ pulumi import confluentcloud:index/schemaExporter:SchemaExporter main lsrc-8wrx70/test-exporter +// +// ``` +// +// !> **Warning:** Do not forget to delete terminal command history afterwards for security purposes. +type SchemaExporter struct { + pulumi.CustomResourceState + + // Block for custom *nonsensitive* configuration properties: + Config pulumi.StringMapOutput `pulumi:"config"` + // Customized context of the exporter if `contextType` is set to `CUSTOM`. + Context pulumi.StringOutput `pulumi:"context"` + // Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + ContextType pulumi.StringOutput `pulumi:"contextType"` + // The Cluster API Credentials. + Credentials SchemaExporterCredentialsPtrOutput `pulumi:"credentials"` + DestinationSchemaRegistryCluster SchemaExporterDestinationSchemaRegistryClusterOutput `pulumi:"destinationSchemaRegistryCluster"` + // The configuration setting name. + Name pulumi.StringOutput `pulumi:"name"` + // The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + ResetOnUpdate pulumi.BoolPtrOutput `pulumi:"resetOnUpdate"` + // The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + RestEndpoint pulumi.StringPtrOutput `pulumi:"restEndpoint"` + SchemaRegistryCluster SchemaExporterSchemaRegistryClusterPtrOutput `pulumi:"schemaRegistryCluster"` + // The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + Status pulumi.StringOutput `pulumi:"status"` + // Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dcOrders`. + SubjectRenameFormat pulumi.StringOutput `pulumi:"subjectRenameFormat"` + // Name of each exporter subject. + Subjects pulumi.StringArrayOutput `pulumi:"subjects"` +} + +// NewSchemaExporter registers a new resource with the given unique name, arguments, and options. +func NewSchemaExporter(ctx *pulumi.Context, + name string, args *SchemaExporterArgs, opts ...pulumi.ResourceOption) (*SchemaExporter, error) { + if args == nil { + return nil, errors.New("missing one or more required arguments") + } + + if args.DestinationSchemaRegistryCluster == nil { + return nil, errors.New("invalid value for required argument 'DestinationSchemaRegistryCluster'") + } + if args.Credentials != nil { + args.Credentials = pulumi.ToSecret(args.Credentials).(SchemaExporterCredentialsPtrInput) + } + secrets := pulumi.AdditionalSecretOutputs([]string{ + "credentials", + }) + opts = append(opts, secrets) + opts = internal.PkgResourceDefaultOpts(opts) + var resource SchemaExporter + err := ctx.RegisterResource("confluentcloud:index/schemaExporter:SchemaExporter", name, args, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// GetSchemaExporter gets an existing SchemaExporter resource's state with the given name, ID, and optional +// state properties that are used to uniquely qualify the lookup (nil if not required). +func GetSchemaExporter(ctx *pulumi.Context, + name string, id pulumi.IDInput, state *SchemaExporterState, opts ...pulumi.ResourceOption) (*SchemaExporter, error) { + var resource SchemaExporter + err := ctx.ReadResource("confluentcloud:index/schemaExporter:SchemaExporter", name, id, state, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// Input properties used for looking up and filtering SchemaExporter resources. +type schemaExporterState struct { + // Block for custom *nonsensitive* configuration properties: + Config map[string]string `pulumi:"config"` + // Customized context of the exporter if `contextType` is set to `CUSTOM`. + Context *string `pulumi:"context"` + // Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + ContextType *string `pulumi:"contextType"` + // The Cluster API Credentials. + Credentials *SchemaExporterCredentials `pulumi:"credentials"` + DestinationSchemaRegistryCluster *SchemaExporterDestinationSchemaRegistryCluster `pulumi:"destinationSchemaRegistryCluster"` + // The configuration setting name. + Name *string `pulumi:"name"` + // The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + ResetOnUpdate *bool `pulumi:"resetOnUpdate"` + // The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + RestEndpoint *string `pulumi:"restEndpoint"` + SchemaRegistryCluster *SchemaExporterSchemaRegistryCluster `pulumi:"schemaRegistryCluster"` + // The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + Status *string `pulumi:"status"` + // Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dcOrders`. + SubjectRenameFormat *string `pulumi:"subjectRenameFormat"` + // Name of each exporter subject. + Subjects []string `pulumi:"subjects"` +} + +type SchemaExporterState struct { + // Block for custom *nonsensitive* configuration properties: + Config pulumi.StringMapInput + // Customized context of the exporter if `contextType` is set to `CUSTOM`. + Context pulumi.StringPtrInput + // Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + ContextType pulumi.StringPtrInput + // The Cluster API Credentials. + Credentials SchemaExporterCredentialsPtrInput + DestinationSchemaRegistryCluster SchemaExporterDestinationSchemaRegistryClusterPtrInput + // The configuration setting name. + Name pulumi.StringPtrInput + // The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + ResetOnUpdate pulumi.BoolPtrInput + // The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + RestEndpoint pulumi.StringPtrInput + SchemaRegistryCluster SchemaExporterSchemaRegistryClusterPtrInput + // The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + Status pulumi.StringPtrInput + // Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dcOrders`. + SubjectRenameFormat pulumi.StringPtrInput + // Name of each exporter subject. + Subjects pulumi.StringArrayInput +} + +func (SchemaExporterState) ElementType() reflect.Type { + return reflect.TypeOf((*schemaExporterState)(nil)).Elem() +} + +type schemaExporterArgs struct { + // Block for custom *nonsensitive* configuration properties: + Config map[string]string `pulumi:"config"` + // Customized context of the exporter if `contextType` is set to `CUSTOM`. + Context *string `pulumi:"context"` + // Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + ContextType *string `pulumi:"contextType"` + // The Cluster API Credentials. + Credentials *SchemaExporterCredentials `pulumi:"credentials"` + DestinationSchemaRegistryCluster SchemaExporterDestinationSchemaRegistryCluster `pulumi:"destinationSchemaRegistryCluster"` + // The configuration setting name. + Name *string `pulumi:"name"` + // The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + ResetOnUpdate *bool `pulumi:"resetOnUpdate"` + // The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + RestEndpoint *string `pulumi:"restEndpoint"` + SchemaRegistryCluster *SchemaExporterSchemaRegistryCluster `pulumi:"schemaRegistryCluster"` + // The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + Status *string `pulumi:"status"` + // Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dcOrders`. + SubjectRenameFormat *string `pulumi:"subjectRenameFormat"` + // Name of each exporter subject. + Subjects []string `pulumi:"subjects"` +} + +// The set of arguments for constructing a SchemaExporter resource. +type SchemaExporterArgs struct { + // Block for custom *nonsensitive* configuration properties: + Config pulumi.StringMapInput + // Customized context of the exporter if `contextType` is set to `CUSTOM`. + Context pulumi.StringPtrInput + // Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + ContextType pulumi.StringPtrInput + // The Cluster API Credentials. + Credentials SchemaExporterCredentialsPtrInput + DestinationSchemaRegistryCluster SchemaExporterDestinationSchemaRegistryClusterInput + // The configuration setting name. + Name pulumi.StringPtrInput + // The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + ResetOnUpdate pulumi.BoolPtrInput + // The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + RestEndpoint pulumi.StringPtrInput + SchemaRegistryCluster SchemaExporterSchemaRegistryClusterPtrInput + // The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + Status pulumi.StringPtrInput + // Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dcOrders`. + SubjectRenameFormat pulumi.StringPtrInput + // Name of each exporter subject. + Subjects pulumi.StringArrayInput +} + +func (SchemaExporterArgs) ElementType() reflect.Type { + return reflect.TypeOf((*schemaExporterArgs)(nil)).Elem() +} + +type SchemaExporterInput interface { + pulumi.Input + + ToSchemaExporterOutput() SchemaExporterOutput + ToSchemaExporterOutputWithContext(ctx context.Context) SchemaExporterOutput +} + +func (*SchemaExporter) ElementType() reflect.Type { + return reflect.TypeOf((**SchemaExporter)(nil)).Elem() +} + +func (i *SchemaExporter) ToSchemaExporterOutput() SchemaExporterOutput { + return i.ToSchemaExporterOutputWithContext(context.Background()) +} + +func (i *SchemaExporter) ToSchemaExporterOutputWithContext(ctx context.Context) SchemaExporterOutput { + return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterOutput) +} + +func (i *SchemaExporter) ToOutput(ctx context.Context) pulumix.Output[*SchemaExporter] { + return pulumix.Output[*SchemaExporter]{ + OutputState: i.ToSchemaExporterOutputWithContext(ctx).OutputState, + } +} + +// SchemaExporterArrayInput is an input type that accepts SchemaExporterArray and SchemaExporterArrayOutput values. +// You can construct a concrete instance of `SchemaExporterArrayInput` via: +// +// SchemaExporterArray{ SchemaExporterArgs{...} } +type SchemaExporterArrayInput interface { + pulumi.Input + + ToSchemaExporterArrayOutput() SchemaExporterArrayOutput + ToSchemaExporterArrayOutputWithContext(context.Context) SchemaExporterArrayOutput +} + +type SchemaExporterArray []SchemaExporterInput + +func (SchemaExporterArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]*SchemaExporter)(nil)).Elem() +} + +func (i SchemaExporterArray) ToSchemaExporterArrayOutput() SchemaExporterArrayOutput { + return i.ToSchemaExporterArrayOutputWithContext(context.Background()) +} + +func (i SchemaExporterArray) ToSchemaExporterArrayOutputWithContext(ctx context.Context) SchemaExporterArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterArrayOutput) +} + +func (i SchemaExporterArray) ToOutput(ctx context.Context) pulumix.Output[[]*SchemaExporter] { + return pulumix.Output[[]*SchemaExporter]{ + OutputState: i.ToSchemaExporterArrayOutputWithContext(ctx).OutputState, + } +} + +// SchemaExporterMapInput is an input type that accepts SchemaExporterMap and SchemaExporterMapOutput values. +// You can construct a concrete instance of `SchemaExporterMapInput` via: +// +// SchemaExporterMap{ "key": SchemaExporterArgs{...} } +type SchemaExporterMapInput interface { + pulumi.Input + + ToSchemaExporterMapOutput() SchemaExporterMapOutput + ToSchemaExporterMapOutputWithContext(context.Context) SchemaExporterMapOutput +} + +type SchemaExporterMap map[string]SchemaExporterInput + +func (SchemaExporterMap) ElementType() reflect.Type { + return reflect.TypeOf((*map[string]*SchemaExporter)(nil)).Elem() +} + +func (i SchemaExporterMap) ToSchemaExporterMapOutput() SchemaExporterMapOutput { + return i.ToSchemaExporterMapOutputWithContext(context.Background()) +} + +func (i SchemaExporterMap) ToSchemaExporterMapOutputWithContext(ctx context.Context) SchemaExporterMapOutput { + return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterMapOutput) +} + +func (i SchemaExporterMap) ToOutput(ctx context.Context) pulumix.Output[map[string]*SchemaExporter] { + return pulumix.Output[map[string]*SchemaExporter]{ + OutputState: i.ToSchemaExporterMapOutputWithContext(ctx).OutputState, + } +} + +type SchemaExporterOutput struct{ *pulumi.OutputState } + +func (SchemaExporterOutput) ElementType() reflect.Type { + return reflect.TypeOf((**SchemaExporter)(nil)).Elem() +} + +func (o SchemaExporterOutput) ToSchemaExporterOutput() SchemaExporterOutput { + return o +} + +func (o SchemaExporterOutput) ToSchemaExporterOutputWithContext(ctx context.Context) SchemaExporterOutput { + return o +} + +func (o SchemaExporterOutput) ToOutput(ctx context.Context) pulumix.Output[*SchemaExporter] { + return pulumix.Output[*SchemaExporter]{ + OutputState: o.OutputState, + } +} + +// Block for custom *nonsensitive* configuration properties: +func (o SchemaExporterOutput) Config() pulumi.StringMapOutput { + return o.ApplyT(func(v *SchemaExporter) pulumi.StringMapOutput { return v.Config }).(pulumi.StringMapOutput) +} + +// Customized context of the exporter if `contextType` is set to `CUSTOM`. +func (o SchemaExporterOutput) Context() pulumi.StringOutput { + return o.ApplyT(func(v *SchemaExporter) pulumi.StringOutput { return v.Context }).(pulumi.StringOutput) +} + +// Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. +func (o SchemaExporterOutput) ContextType() pulumi.StringOutput { + return o.ApplyT(func(v *SchemaExporter) pulumi.StringOutput { return v.ContextType }).(pulumi.StringOutput) +} + +// The Cluster API Credentials. +func (o SchemaExporterOutput) Credentials() SchemaExporterCredentialsPtrOutput { + return o.ApplyT(func(v *SchemaExporter) SchemaExporterCredentialsPtrOutput { return v.Credentials }).(SchemaExporterCredentialsPtrOutput) +} + +func (o SchemaExporterOutput) DestinationSchemaRegistryCluster() SchemaExporterDestinationSchemaRegistryClusterOutput { + return o.ApplyT(func(v *SchemaExporter) SchemaExporterDestinationSchemaRegistryClusterOutput { + return v.DestinationSchemaRegistryCluster + }).(SchemaExporterDestinationSchemaRegistryClusterOutput) +} + +// The configuration setting name. +func (o SchemaExporterOutput) Name() pulumi.StringOutput { + return o.ApplyT(func(v *SchemaExporter) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput) +} + +// The flag to control whether to reset the exporter when updating configs. Defaults to `false`. +func (o SchemaExporterOutput) ResetOnUpdate() pulumi.BoolPtrOutput { + return o.ApplyT(func(v *SchemaExporter) pulumi.BoolPtrOutput { return v.ResetOnUpdate }).(pulumi.BoolPtrOutput) +} + +// The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). +func (o SchemaExporterOutput) RestEndpoint() pulumi.StringPtrOutput { + return o.ApplyT(func(v *SchemaExporter) pulumi.StringPtrOutput { return v.RestEndpoint }).(pulumi.StringPtrOutput) +} + +func (o SchemaExporterOutput) SchemaRegistryCluster() SchemaExporterSchemaRegistryClusterPtrOutput { + return o.ApplyT(func(v *SchemaExporter) SchemaExporterSchemaRegistryClusterPtrOutput { return v.SchemaRegistryCluster }).(SchemaExporterSchemaRegistryClusterPtrOutput) +} + +// The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. +func (o SchemaExporterOutput) Status() pulumi.StringOutput { + return o.ApplyT(func(v *SchemaExporter) pulumi.StringOutput { return v.Status }).(pulumi.StringOutput) +} + +// Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dcOrders`. +func (o SchemaExporterOutput) SubjectRenameFormat() pulumi.StringOutput { + return o.ApplyT(func(v *SchemaExporter) pulumi.StringOutput { return v.SubjectRenameFormat }).(pulumi.StringOutput) +} + +// Name of each exporter subject. +func (o SchemaExporterOutput) Subjects() pulumi.StringArrayOutput { + return o.ApplyT(func(v *SchemaExporter) pulumi.StringArrayOutput { return v.Subjects }).(pulumi.StringArrayOutput) +} + +type SchemaExporterArrayOutput struct{ *pulumi.OutputState } + +func (SchemaExporterArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]*SchemaExporter)(nil)).Elem() +} + +func (o SchemaExporterArrayOutput) ToSchemaExporterArrayOutput() SchemaExporterArrayOutput { + return o +} + +func (o SchemaExporterArrayOutput) ToSchemaExporterArrayOutputWithContext(ctx context.Context) SchemaExporterArrayOutput { + return o +} + +func (o SchemaExporterArrayOutput) ToOutput(ctx context.Context) pulumix.Output[[]*SchemaExporter] { + return pulumix.Output[[]*SchemaExporter]{ + OutputState: o.OutputState, + } +} + +func (o SchemaExporterArrayOutput) Index(i pulumi.IntInput) SchemaExporterOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) *SchemaExporter { + return vs[0].([]*SchemaExporter)[vs[1].(int)] + }).(SchemaExporterOutput) +} + +type SchemaExporterMapOutput struct{ *pulumi.OutputState } + +func (SchemaExporterMapOutput) ElementType() reflect.Type { + return reflect.TypeOf((*map[string]*SchemaExporter)(nil)).Elem() +} + +func (o SchemaExporterMapOutput) ToSchemaExporterMapOutput() SchemaExporterMapOutput { + return o +} + +func (o SchemaExporterMapOutput) ToSchemaExporterMapOutputWithContext(ctx context.Context) SchemaExporterMapOutput { + return o +} + +func (o SchemaExporterMapOutput) ToOutput(ctx context.Context) pulumix.Output[map[string]*SchemaExporter] { + return pulumix.Output[map[string]*SchemaExporter]{ + OutputState: o.OutputState, + } +} + +func (o SchemaExporterMapOutput) MapIndex(k pulumi.StringInput) SchemaExporterOutput { + return pulumi.All(o, k).ApplyT(func(vs []interface{}) *SchemaExporter { + return vs[0].(map[string]*SchemaExporter)[vs[1].(string)] + }).(SchemaExporterOutput) +} + +func init() { + pulumi.RegisterInputType(reflect.TypeOf((*SchemaExporterInput)(nil)).Elem(), &SchemaExporter{}) + pulumi.RegisterInputType(reflect.TypeOf((*SchemaExporterArrayInput)(nil)).Elem(), SchemaExporterArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*SchemaExporterMapInput)(nil)).Elem(), SchemaExporterMap{}) + pulumi.RegisterOutputType(SchemaExporterOutput{}) + pulumi.RegisterOutputType(SchemaExporterArrayOutput{}) + pulumi.RegisterOutputType(SchemaExporterMapOutput{}) +} diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/ConfluentcloudFunctions.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/ConfluentcloudFunctions.java index 52df66b8..e2ff5a52 100644 --- a/sdk/java/src/main/java/com/pulumi/confluentcloud/ConfluentcloudFunctions.java +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/ConfluentcloudFunctions.java @@ -12,6 +12,8 @@ import com.pulumi.confluentcloud.inputs.GetByokKeyPlainArgs; import com.pulumi.confluentcloud.inputs.GetEnvironmentArgs; import com.pulumi.confluentcloud.inputs.GetEnvironmentPlainArgs; +import com.pulumi.confluentcloud.inputs.GetFlinkComputePoolArgs; +import com.pulumi.confluentcloud.inputs.GetFlinkComputePoolPlainArgs; import com.pulumi.confluentcloud.inputs.GetIdentityPoolArgs; import com.pulumi.confluentcloud.inputs.GetIdentityPoolPlainArgs; import com.pulumi.confluentcloud.inputs.GetIdentityProviderArgs; @@ -73,6 +75,7 @@ import com.pulumi.confluentcloud.outputs.GetByokKeyResult; import com.pulumi.confluentcloud.outputs.GetEnvironmentResult; import com.pulumi.confluentcloud.outputs.GetEnvironmentsResult; +import com.pulumi.confluentcloud.outputs.GetFlinkComputePoolResult; import com.pulumi.confluentcloud.outputs.GetIdentityPoolResult; import com.pulumi.confluentcloud.outputs.GetIdentityProviderResult; import com.pulumi.confluentcloud.outputs.GetInvitationResult; @@ -878,6 +881,226 @@ public static Output getEnvironments(InvokeArgs args, Inv public static CompletableFuture getEnvironmentsPlain(InvokeArgs args, InvokeOptions options) { return Deployment.getInstance().invokeAsync("confluentcloud:index/getEnvironments:getEnvironments", TypeShape.of(GetEnvironmentsResult.class), args, Utilities.withVersion(options)); } + /** + * [![Early Access](https://img.shields.io/badge/Lifecycle%20Stage-Early%20Access-%2300afba)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy) + * + * > **Note:** `confluentcloud.FlinkComputePool` data source is available in **Early Access** for early adopters. Early Access features are introduced to gather customer feedback. This feature should be used only for evaluation and non-production testing purposes or to provide feedback to Confluent, particularly as it becomes more widely available in follow-on editions.\ + * **Early Access** features are intended for evaluation use in development and testing environments only, and not for production use. The warranty, SLA, and Support Services provisions of your agreement with Confluent do not apply to Early Access features. Early Access features are considered to be a Proof of Concept as defined in the Confluent Cloud Terms of Service. Confluent may discontinue providing early access releases of the Early Access features at any time in Confluent’s sole discretion. + * + * `confluentcloud.FlinkComputePool` describes a Flink Compute Pool data source. + * + * ## Example Usage + * ```java + * package generated_program; + * + * import com.pulumi.Context; + * import com.pulumi.Pulumi; + * import com.pulumi.core.Output; + * import com.pulumi.confluentcloud.ConfluentcloudFunctions; + * import com.pulumi.confluentcloud.inputs.GetFlinkComputePoolArgs; + * import com.pulumi.confluentcloud.inputs.GetFlinkComputePoolEnvironmentArgs; + * import java.util.List; + * import java.util.ArrayList; + * import java.util.Map; + * import java.io.File; + * import java.nio.file.Files; + * import java.nio.file.Paths; + * + * public class App { + * public static void main(String[] args) { + * Pulumi.run(App::stack); + * } + * + * public static void stack(Context ctx) { + * final var exampleUsingIdFlinkComputePool = ConfluentcloudFunctions.getFlinkComputePool(GetFlinkComputePoolArgs.builder() + * .id("lfcp-abc123") + * .environment(GetFlinkComputePoolEnvironmentArgs.builder() + * .id("env-xyz456") + * .build()) + * .build()); + * + * ctx.export("exampleUsingId", exampleUsingIdFlinkComputePool.applyValue(getFlinkComputePoolResult -> getFlinkComputePoolResult)); + * final var exampleUsingNameFlinkComputePool = ConfluentcloudFunctions.getFlinkComputePool(GetFlinkComputePoolArgs.builder() + * .displayName("my_compute_pool") + * .environment(GetFlinkComputePoolEnvironmentArgs.builder() + * .id("env-xyz456") + * .build()) + * .build()); + * + * ctx.export("exampleUsingName", exampleUsingNameFlinkComputePool.applyValue(getFlinkComputePoolResult -> getFlinkComputePoolResult)); + * } + * } + * ``` + * + */ + public static Output getFlinkComputePool(GetFlinkComputePoolArgs args) { + return getFlinkComputePool(args, InvokeOptions.Empty); + } + /** + * [![Early Access](https://img.shields.io/badge/Lifecycle%20Stage-Early%20Access-%2300afba)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy) + * + * > **Note:** `confluentcloud.FlinkComputePool` data source is available in **Early Access** for early adopters. Early Access features are introduced to gather customer feedback. This feature should be used only for evaluation and non-production testing purposes or to provide feedback to Confluent, particularly as it becomes more widely available in follow-on editions.\ + * **Early Access** features are intended for evaluation use in development and testing environments only, and not for production use. The warranty, SLA, and Support Services provisions of your agreement with Confluent do not apply to Early Access features. Early Access features are considered to be a Proof of Concept as defined in the Confluent Cloud Terms of Service. Confluent may discontinue providing early access releases of the Early Access features at any time in Confluent’s sole discretion. + * + * `confluentcloud.FlinkComputePool` describes a Flink Compute Pool data source. + * + * ## Example Usage + * ```java + * package generated_program; + * + * import com.pulumi.Context; + * import com.pulumi.Pulumi; + * import com.pulumi.core.Output; + * import com.pulumi.confluentcloud.ConfluentcloudFunctions; + * import com.pulumi.confluentcloud.inputs.GetFlinkComputePoolArgs; + * import com.pulumi.confluentcloud.inputs.GetFlinkComputePoolEnvironmentArgs; + * import java.util.List; + * import java.util.ArrayList; + * import java.util.Map; + * import java.io.File; + * import java.nio.file.Files; + * import java.nio.file.Paths; + * + * public class App { + * public static void main(String[] args) { + * Pulumi.run(App::stack); + * } + * + * public static void stack(Context ctx) { + * final var exampleUsingIdFlinkComputePool = ConfluentcloudFunctions.getFlinkComputePool(GetFlinkComputePoolArgs.builder() + * .id("lfcp-abc123") + * .environment(GetFlinkComputePoolEnvironmentArgs.builder() + * .id("env-xyz456") + * .build()) + * .build()); + * + * ctx.export("exampleUsingId", exampleUsingIdFlinkComputePool.applyValue(getFlinkComputePoolResult -> getFlinkComputePoolResult)); + * final var exampleUsingNameFlinkComputePool = ConfluentcloudFunctions.getFlinkComputePool(GetFlinkComputePoolArgs.builder() + * .displayName("my_compute_pool") + * .environment(GetFlinkComputePoolEnvironmentArgs.builder() + * .id("env-xyz456") + * .build()) + * .build()); + * + * ctx.export("exampleUsingName", exampleUsingNameFlinkComputePool.applyValue(getFlinkComputePoolResult -> getFlinkComputePoolResult)); + * } + * } + * ``` + * + */ + public static CompletableFuture getFlinkComputePoolPlain(GetFlinkComputePoolPlainArgs args) { + return getFlinkComputePoolPlain(args, InvokeOptions.Empty); + } + /** + * [![Early Access](https://img.shields.io/badge/Lifecycle%20Stage-Early%20Access-%2300afba)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy) + * + * > **Note:** `confluentcloud.FlinkComputePool` data source is available in **Early Access** for early adopters. Early Access features are introduced to gather customer feedback. This feature should be used only for evaluation and non-production testing purposes or to provide feedback to Confluent, particularly as it becomes more widely available in follow-on editions.\ + * **Early Access** features are intended for evaluation use in development and testing environments only, and not for production use. The warranty, SLA, and Support Services provisions of your agreement with Confluent do not apply to Early Access features. Early Access features are considered to be a Proof of Concept as defined in the Confluent Cloud Terms of Service. Confluent may discontinue providing early access releases of the Early Access features at any time in Confluent’s sole discretion. + * + * `confluentcloud.FlinkComputePool` describes a Flink Compute Pool data source. + * + * ## Example Usage + * ```java + * package generated_program; + * + * import com.pulumi.Context; + * import com.pulumi.Pulumi; + * import com.pulumi.core.Output; + * import com.pulumi.confluentcloud.ConfluentcloudFunctions; + * import com.pulumi.confluentcloud.inputs.GetFlinkComputePoolArgs; + * import com.pulumi.confluentcloud.inputs.GetFlinkComputePoolEnvironmentArgs; + * import java.util.List; + * import java.util.ArrayList; + * import java.util.Map; + * import java.io.File; + * import java.nio.file.Files; + * import java.nio.file.Paths; + * + * public class App { + * public static void main(String[] args) { + * Pulumi.run(App::stack); + * } + * + * public static void stack(Context ctx) { + * final var exampleUsingIdFlinkComputePool = ConfluentcloudFunctions.getFlinkComputePool(GetFlinkComputePoolArgs.builder() + * .id("lfcp-abc123") + * .environment(GetFlinkComputePoolEnvironmentArgs.builder() + * .id("env-xyz456") + * .build()) + * .build()); + * + * ctx.export("exampleUsingId", exampleUsingIdFlinkComputePool.applyValue(getFlinkComputePoolResult -> getFlinkComputePoolResult)); + * final var exampleUsingNameFlinkComputePool = ConfluentcloudFunctions.getFlinkComputePool(GetFlinkComputePoolArgs.builder() + * .displayName("my_compute_pool") + * .environment(GetFlinkComputePoolEnvironmentArgs.builder() + * .id("env-xyz456") + * .build()) + * .build()); + * + * ctx.export("exampleUsingName", exampleUsingNameFlinkComputePool.applyValue(getFlinkComputePoolResult -> getFlinkComputePoolResult)); + * } + * } + * ``` + * + */ + public static Output getFlinkComputePool(GetFlinkComputePoolArgs args, InvokeOptions options) { + return Deployment.getInstance().invoke("confluentcloud:index/getFlinkComputePool:getFlinkComputePool", TypeShape.of(GetFlinkComputePoolResult.class), args, Utilities.withVersion(options)); + } + /** + * [![Early Access](https://img.shields.io/badge/Lifecycle%20Stage-Early%20Access-%2300afba)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy) + * + * > **Note:** `confluentcloud.FlinkComputePool` data source is available in **Early Access** for early adopters. Early Access features are introduced to gather customer feedback. This feature should be used only for evaluation and non-production testing purposes or to provide feedback to Confluent, particularly as it becomes more widely available in follow-on editions.\ + * **Early Access** features are intended for evaluation use in development and testing environments only, and not for production use. The warranty, SLA, and Support Services provisions of your agreement with Confluent do not apply to Early Access features. Early Access features are considered to be a Proof of Concept as defined in the Confluent Cloud Terms of Service. Confluent may discontinue providing early access releases of the Early Access features at any time in Confluent’s sole discretion. + * + * `confluentcloud.FlinkComputePool` describes a Flink Compute Pool data source. + * + * ## Example Usage + * ```java + * package generated_program; + * + * import com.pulumi.Context; + * import com.pulumi.Pulumi; + * import com.pulumi.core.Output; + * import com.pulumi.confluentcloud.ConfluentcloudFunctions; + * import com.pulumi.confluentcloud.inputs.GetFlinkComputePoolArgs; + * import com.pulumi.confluentcloud.inputs.GetFlinkComputePoolEnvironmentArgs; + * import java.util.List; + * import java.util.ArrayList; + * import java.util.Map; + * import java.io.File; + * import java.nio.file.Files; + * import java.nio.file.Paths; + * + * public class App { + * public static void main(String[] args) { + * Pulumi.run(App::stack); + * } + * + * public static void stack(Context ctx) { + * final var exampleUsingIdFlinkComputePool = ConfluentcloudFunctions.getFlinkComputePool(GetFlinkComputePoolArgs.builder() + * .id("lfcp-abc123") + * .environment(GetFlinkComputePoolEnvironmentArgs.builder() + * .id("env-xyz456") + * .build()) + * .build()); + * + * ctx.export("exampleUsingId", exampleUsingIdFlinkComputePool.applyValue(getFlinkComputePoolResult -> getFlinkComputePoolResult)); + * final var exampleUsingNameFlinkComputePool = ConfluentcloudFunctions.getFlinkComputePool(GetFlinkComputePoolArgs.builder() + * .displayName("my_compute_pool") + * .environment(GetFlinkComputePoolEnvironmentArgs.builder() + * .id("env-xyz456") + * .build()) + * .build()); + * + * ctx.export("exampleUsingName", exampleUsingNameFlinkComputePool.applyValue(getFlinkComputePoolResult -> getFlinkComputePoolResult)); + * } + * } + * ``` + * + */ + public static CompletableFuture getFlinkComputePoolPlain(GetFlinkComputePoolPlainArgs args, InvokeOptions options) { + return Deployment.getInstance().invokeAsync("confluentcloud:index/getFlinkComputePool:getFlinkComputePool", TypeShape.of(GetFlinkComputePoolResult.class), args, Utilities.withVersion(options)); + } /** * [![General Availability](https://img.shields.io/badge/Lifecycle%20Stage-General%20Availability-%2345c6e8)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy) * @@ -3731,6 +3954,8 @@ public static CompletableFuture getPri * * `confluentcloud.RoleBinding` describes a Role Binding. * + * > **Note:** For more information on the Role Bindings, see [Predefined RBAC roles in Confluent Cloud](https://docs.confluent.io/cloud/current/access-management/access-control/rbac/predefined-rbac-roles.html). + * * ## Example Usage * ```java * package generated_program; @@ -3771,6 +3996,8 @@ public static Output getRoleBinding(GetRoleBindingArgs arg * * `confluentcloud.RoleBinding` describes a Role Binding. * + * > **Note:** For more information on the Role Bindings, see [Predefined RBAC roles in Confluent Cloud](https://docs.confluent.io/cloud/current/access-management/access-control/rbac/predefined-rbac-roles.html). + * * ## Example Usage * ```java * package generated_program; @@ -3811,6 +4038,8 @@ public static CompletableFuture getRoleBindingPlain(GetRol * * `confluentcloud.RoleBinding` describes a Role Binding. * + * > **Note:** For more information on the Role Bindings, see [Predefined RBAC roles in Confluent Cloud](https://docs.confluent.io/cloud/current/access-management/access-control/rbac/predefined-rbac-roles.html). + * * ## Example Usage * ```java * package generated_program; @@ -3851,6 +4080,8 @@ public static Output getRoleBinding(GetRoleBindingArgs arg * * `confluentcloud.RoleBinding` describes a Role Binding. * + * > **Note:** For more information on the Role Bindings, see [Predefined RBAC roles in Confluent Cloud](https://docs.confluent.io/cloud/current/access-management/access-control/rbac/predefined-rbac-roles.html). + * * ## Example Usage * ```java * package generated_program; diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/FlinkComputePool.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/FlinkComputePool.java new file mode 100644 index 00000000..df0f7f09 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/FlinkComputePool.java @@ -0,0 +1,261 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.confluentcloud; + +import com.pulumi.confluentcloud.FlinkComputePoolArgs; +import com.pulumi.confluentcloud.Utilities; +import com.pulumi.confluentcloud.inputs.FlinkComputePoolState; +import com.pulumi.confluentcloud.outputs.FlinkComputePoolEnvironment; +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Export; +import com.pulumi.core.annotations.ResourceType; +import com.pulumi.core.internal.Codegen; +import java.lang.Integer; +import java.lang.String; +import javax.annotation.Nullable; + +/** + * ## Example Usage + * ```java + * package generated_program; + * + * import com.pulumi.Context; + * import com.pulumi.Pulumi; + * import com.pulumi.core.Output; + * import com.pulumi.confluentcloud.Environment; + * import com.pulumi.confluentcloud.FlinkComputePool; + * import com.pulumi.confluentcloud.FlinkComputePoolArgs; + * import com.pulumi.confluentcloud.inputs.FlinkComputePoolEnvironmentArgs; + * import java.util.List; + * import java.util.ArrayList; + * import java.util.Map; + * import java.io.File; + * import java.nio.file.Files; + * import java.nio.file.Paths; + * + * public class App { + * public static void main(String[] args) { + * Pulumi.run(App::stack); + * } + * + * public static void stack(Context ctx) { + * var development = new Environment("development"); + * + * var main = new FlinkComputePool("main", FlinkComputePoolArgs.builder() + * .displayName("standard_compute_pool") + * .cloud("AWS") + * .region("us-east-1") + * .maxCfu(5) + * .environment(FlinkComputePoolEnvironmentArgs.builder() + * .id(development.id()) + * .build()) + * .build()); + * + * } + * } + * ``` + * + * ## Import + * + * You can import a Flink Compute Pool by using Environment ID and Flink Compute Pool ID, in the format `<Environment ID>/<Flink Compute Pool ID>`. The following example shows how to import a Flink Compute Pool$ export CONFLUENT_CLOUD_API_KEY="<cloud_api_key>" $ export CONFLUENT_CLOUD_API_SECRET="<cloud_api_secret>" + * + * ```sh + * $ pulumi import confluentcloud:index/flinkComputePool:FlinkComputePool main env-abc123/lfcp-abc123 + * ``` + * + * !> **Warning:** Do not forget to delete terminal command history afterwards for security purposes. + * + */ +@ResourceType(type="confluentcloud:index/flinkComputePool:FlinkComputePool") +public class FlinkComputePool extends com.pulumi.resources.CustomResource { + /** + * (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`. + * + */ + @Export(name="apiVersion", refs={String.class}, tree="[0]") + private Output apiVersion; + + /** + * @return (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`. + * + */ + public Output apiVersion() { + return this.apiVersion; + } + /** + * The cloud service provider that runs the Flink Compute Pool. + * + */ + @Export(name="cloud", refs={String.class}, tree="[0]") + private Output cloud; + + /** + * @return The cloud service provider that runs the Flink Compute Pool. + * + */ + public Output cloud() { + return this.cloud; + } + /** + * (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool. + * + */ + @Export(name="currentCfu", refs={Integer.class}, tree="[0]") + private Output currentCfu; + + /** + * @return (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool. + * + */ + public Output currentCfu() { + return this.currentCfu; + } + /** + * The name of the Flink Compute Pool. + * + */ + @Export(name="displayName", refs={String.class}, tree="[0]") + private Output displayName; + + /** + * @return The name of the Flink Compute Pool. + * + */ + public Output displayName() { + return this.displayName; + } + /** + * Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + * + */ + @Export(name="environment", refs={FlinkComputePoolEnvironment.class}, tree="[0]") + private Output environment; + + /** + * @return Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + * + */ + public Output environment() { + return this.environment; + } + /** + * (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`. + * + */ + @Export(name="kind", refs={String.class}, tree="[0]") + private Output kind; + + /** + * @return (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`. + * + */ + public Output kind() { + return this.kind; + } + /** + * Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + * + */ + @Export(name="maxCfu", refs={Integer.class}, tree="[0]") + private Output maxCfu; + + /** + * @return Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + * + */ + public Output maxCfu() { + return this.maxCfu; + } + /** + * The cloud service provider region that hosts the Flink Compute Pool. + * + */ + @Export(name="region", refs={String.class}, tree="[0]") + private Output region; + + /** + * @return The cloud service provider region that hosts the Flink Compute Pool. + * + */ + public Output region() { + return this.region; + } + /** + * (Required String) The Confluent Resource Name of the Flink Compute Pool. + * + */ + @Export(name="resourceName", refs={String.class}, tree="[0]") + private Output resourceName; + + /** + * @return (Required String) The Confluent Resource Name of the Flink Compute Pool. + * + */ + public Output resourceName() { + return this.resourceName; + } + /** + * (Required String) The API endpoint of the Flink Compute Pool. + * + */ + @Export(name="restEndpoint", refs={String.class}, tree="[0]") + private Output restEndpoint; + + /** + * @return (Required String) The API endpoint of the Flink Compute Pool. + * + */ + public Output restEndpoint() { + return this.restEndpoint; + } + + /** + * + * @param name The _unique_ name of the resulting resource. + */ + public FlinkComputePool(String name) { + this(name, FlinkComputePoolArgs.Empty); + } + /** + * + * @param name The _unique_ name of the resulting resource. + * @param args The arguments to use to populate this resource's properties. + */ + public FlinkComputePool(String name, FlinkComputePoolArgs args) { + this(name, args, null); + } + /** + * + * @param name The _unique_ name of the resulting resource. + * @param args The arguments to use to populate this resource's properties. + * @param options A bag of options that control this resource's behavior. + */ + public FlinkComputePool(String name, FlinkComputePoolArgs args, @Nullable com.pulumi.resources.CustomResourceOptions options) { + super("confluentcloud:index/flinkComputePool:FlinkComputePool", name, args == null ? FlinkComputePoolArgs.Empty : args, makeResourceOptions(options, Codegen.empty())); + } + + private FlinkComputePool(String name, Output id, @Nullable FlinkComputePoolState state, @Nullable com.pulumi.resources.CustomResourceOptions options) { + super("confluentcloud:index/flinkComputePool:FlinkComputePool", name, state, makeResourceOptions(options, id)); + } + + private static com.pulumi.resources.CustomResourceOptions makeResourceOptions(@Nullable com.pulumi.resources.CustomResourceOptions options, @Nullable Output id) { + var defaultOptions = com.pulumi.resources.CustomResourceOptions.builder() + .version(Utilities.getVersion()) + .build(); + return com.pulumi.resources.CustomResourceOptions.merge(defaultOptions, options, id); + } + + /** + * Get an existing Host resource's state with the given name, ID, and optional extra + * properties used to qualify the lookup. + * + * @param name The _unique_ name of the resulting resource. + * @param id The _unique_ provider ID of the resource to lookup. + * @param state + * @param options Optional settings to control the behavior of the CustomResource. + */ + public static FlinkComputePool get(String name, Output id, @Nullable FlinkComputePoolState state, @Nullable com.pulumi.resources.CustomResourceOptions options) { + return new FlinkComputePool(name, id, state, options); + } +} diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/FlinkComputePoolArgs.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/FlinkComputePoolArgs.java new file mode 100644 index 00000000..cfb3e5c4 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/FlinkComputePoolArgs.java @@ -0,0 +1,237 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.confluentcloud; + +import com.pulumi.confluentcloud.inputs.FlinkComputePoolEnvironmentArgs; +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.Integer; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class FlinkComputePoolArgs extends com.pulumi.resources.ResourceArgs { + + public static final FlinkComputePoolArgs Empty = new FlinkComputePoolArgs(); + + /** + * The cloud service provider that runs the Flink Compute Pool. + * + */ + @Import(name="cloud", required=true) + private Output cloud; + + /** + * @return The cloud service provider that runs the Flink Compute Pool. + * + */ + public Output cloud() { + return this.cloud; + } + + /** + * The name of the Flink Compute Pool. + * + */ + @Import(name="displayName", required=true) + private Output displayName; + + /** + * @return The name of the Flink Compute Pool. + * + */ + public Output displayName() { + return this.displayName; + } + + /** + * Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + * + */ + @Import(name="environment", required=true) + private Output environment; + + /** + * @return Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + * + */ + public Output environment() { + return this.environment; + } + + /** + * Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + * + */ + @Import(name="maxCfu") + private @Nullable Output maxCfu; + + /** + * @return Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + * + */ + public Optional> maxCfu() { + return Optional.ofNullable(this.maxCfu); + } + + /** + * The cloud service provider region that hosts the Flink Compute Pool. + * + */ + @Import(name="region", required=true) + private Output region; + + /** + * @return The cloud service provider region that hosts the Flink Compute Pool. + * + */ + public Output region() { + return this.region; + } + + private FlinkComputePoolArgs() {} + + private FlinkComputePoolArgs(FlinkComputePoolArgs $) { + this.cloud = $.cloud; + this.displayName = $.displayName; + this.environment = $.environment; + this.maxCfu = $.maxCfu; + this.region = $.region; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(FlinkComputePoolArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private FlinkComputePoolArgs $; + + public Builder() { + $ = new FlinkComputePoolArgs(); + } + + public Builder(FlinkComputePoolArgs defaults) { + $ = new FlinkComputePoolArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param cloud The cloud service provider that runs the Flink Compute Pool. + * + * @return builder + * + */ + public Builder cloud(Output cloud) { + $.cloud = cloud; + return this; + } + + /** + * @param cloud The cloud service provider that runs the Flink Compute Pool. + * + * @return builder + * + */ + public Builder cloud(String cloud) { + return cloud(Output.of(cloud)); + } + + /** + * @param displayName The name of the Flink Compute Pool. + * + * @return builder + * + */ + public Builder displayName(Output displayName) { + $.displayName = displayName; + return this; + } + + /** + * @param displayName The name of the Flink Compute Pool. + * + * @return builder + * + */ + public Builder displayName(String displayName) { + return displayName(Output.of(displayName)); + } + + /** + * @param environment Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + * + * @return builder + * + */ + public Builder environment(Output environment) { + $.environment = environment; + return this; + } + + /** + * @param environment Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + * + * @return builder + * + */ + public Builder environment(FlinkComputePoolEnvironmentArgs environment) { + return environment(Output.of(environment)); + } + + /** + * @param maxCfu Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + * + * @return builder + * + */ + public Builder maxCfu(@Nullable Output maxCfu) { + $.maxCfu = maxCfu; + return this; + } + + /** + * @param maxCfu Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + * + * @return builder + * + */ + public Builder maxCfu(Integer maxCfu) { + return maxCfu(Output.of(maxCfu)); + } + + /** + * @param region The cloud service provider region that hosts the Flink Compute Pool. + * + * @return builder + * + */ + public Builder region(Output region) { + $.region = region; + return this; + } + + /** + * @param region The cloud service provider region that hosts the Flink Compute Pool. + * + * @return builder + * + */ + public Builder region(String region) { + return region(Output.of(region)); + } + + public FlinkComputePoolArgs build() { + $.cloud = Objects.requireNonNull($.cloud, "expected parameter 'cloud' to be non-null"); + $.displayName = Objects.requireNonNull($.displayName, "expected parameter 'displayName' to be non-null"); + $.environment = Objects.requireNonNull($.environment, "expected parameter 'environment' to be non-null"); + $.region = Objects.requireNonNull($.region, "expected parameter 'region' to be non-null"); + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/KafkaAcl.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/KafkaAcl.java index 944d6e21..fcf32f02 100644 --- a/sdk/java/src/main/java/com/pulumi/confluentcloud/KafkaAcl.java +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/KafkaAcl.java @@ -20,7 +20,7 @@ /** * ## Import * - * You can import Kafka ACLs by using the Kafka cluster ID and attributes of `confluent_kafka_acl` resource in the format `<Kafka cluster ID>/<Kafka ACL resource type>#<Kafka ACL resource name>#<Kafka ACL pattern type>#<Kafka ACL principal>#<Kafka ACL host>#<Kafka ACL operation>#<Kafka ACL permission>`, for exampleOption #1Manage multiple Kafka clusters in the same Terraform workspace $ export CONFLUENT_CLOUD_API_KEY="<cloud_api_key>" $ export CONFLUENT_CLOUD_API_SECRET="<cloud_api_secret>" $ export IMPORT_KAFKA_API_KEY="<kafka_api_key>" $ export IMPORT_KAFKA_API_SECRET="<kafka_api_secret>" $ export IMPORT_KAFKA_REST_ENDPOINT="<kafka_rest_endpoint>" + * You can import Kafka ACLs by using the Kafka cluster ID and attributes of `confluent_kafka_acl` resource in the format `<Kafka cluster ID>/<Kafka ACL resource type>#<Kafka ACL resource name>#<Kafka ACL pattern type>#<Kafka ACL principal>#<Kafka ACL host>#<Kafka ACL operation>#<Kafka ACL permission>`, for exampleOption #1Manage multiple Kafka clusters in the same Terraform workspace $ export IMPORT_KAFKA_API_KEY="<kafka_api_key>" $ export IMPORT_KAFKA_API_SECRET="<kafka_api_secret>" $ export IMPORT_KAFKA_REST_ENDPOINT="<kafka_rest_endpoint>" * * ```sh * $ pulumi import confluentcloud:index/kafkaAcl:KafkaAcl describe-cluster "lkc-12345/CLUSTER#kafka-cluster#LITERAL#User:sa-xyz123#*#DESCRIBE#ALLOW" diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/SchemaExporter.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/SchemaExporter.java new file mode 100644 index 00000000..709ee066 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/SchemaExporter.java @@ -0,0 +1,241 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.confluentcloud; + +import com.pulumi.confluentcloud.SchemaExporterArgs; +import com.pulumi.confluentcloud.Utilities; +import com.pulumi.confluentcloud.inputs.SchemaExporterState; +import com.pulumi.confluentcloud.outputs.SchemaExporterCredentials; +import com.pulumi.confluentcloud.outputs.SchemaExporterDestinationSchemaRegistryCluster; +import com.pulumi.confluentcloud.outputs.SchemaExporterSchemaRegistryCluster; +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Export; +import com.pulumi.core.annotations.ResourceType; +import com.pulumi.core.internal.Codegen; +import java.lang.Boolean; +import java.lang.String; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import javax.annotation.Nullable; + +/** + * ## Import + * + * You can import a Schema Exporter by using the Schema Registry cluster ID, Schema Exporter name in the format `<Schema Registry cluster ID>/<Schema Exporter name>`, for example$ export IMPORT_SCHEMA_REGISTRY_API_KEY="<schema_registry_api_key>" $ export IMPORT_SCHEMA_REGISTRY_API_SECRET="<schema_registry_api_secret>" $ export IMPORT_SCHEMA_REGISTRY_REST_ENDPOINT="<schema_registry_rest_endpoint>" + * + * ```sh + * $ pulumi import confluentcloud:index/schemaExporter:SchemaExporter main lsrc-8wrx70/test-exporter + * ``` + * + * !> **Warning:** Do not forget to delete terminal command history afterwards for security purposes. + * + */ +@ResourceType(type="confluentcloud:index/schemaExporter:SchemaExporter") +public class SchemaExporter extends com.pulumi.resources.CustomResource { + /** + * Block for custom *nonsensitive* configuration properties: + * + */ + @Export(name="config", refs={Map.class,String.class}, tree="[0,1,1]") + private Output> config; + + /** + * @return Block for custom *nonsensitive* configuration properties: + * + */ + public Output> config() { + return this.config; + } + /** + * Customized context of the exporter if `context_type` is set to `CUSTOM`. + * + */ + @Export(name="context", refs={String.class}, tree="[0]") + private Output context; + + /** + * @return Customized context of the exporter if `context_type` is set to `CUSTOM`. + * + */ + public Output context() { + return this.context; + } + /** + * Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + * + */ + @Export(name="contextType", refs={String.class}, tree="[0]") + private Output contextType; + + /** + * @return Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + * + */ + public Output contextType() { + return this.contextType; + } + /** + * The Cluster API Credentials. + * + */ + @Export(name="credentials", refs={SchemaExporterCredentials.class}, tree="[0]") + private Output credentials; + + /** + * @return The Cluster API Credentials. + * + */ + public Output> credentials() { + return Codegen.optional(this.credentials); + } + @Export(name="destinationSchemaRegistryCluster", refs={SchemaExporterDestinationSchemaRegistryCluster.class}, tree="[0]") + private Output destinationSchemaRegistryCluster; + + public Output destinationSchemaRegistryCluster() { + return this.destinationSchemaRegistryCluster; + } + /** + * The configuration setting name. + * + */ + @Export(name="name", refs={String.class}, tree="[0]") + private Output name; + + /** + * @return The configuration setting name. + * + */ + public Output name() { + return this.name; + } + /** + * The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + * + */ + @Export(name="resetOnUpdate", refs={Boolean.class}, tree="[0]") + private Output resetOnUpdate; + + /** + * @return The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + * + */ + public Output> resetOnUpdate() { + return Codegen.optional(this.resetOnUpdate); + } + /** + * The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + * + */ + @Export(name="restEndpoint", refs={String.class}, tree="[0]") + private Output restEndpoint; + + /** + * @return The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + * + */ + public Output> restEndpoint() { + return Codegen.optional(this.restEndpoint); + } + @Export(name="schemaRegistryCluster", refs={SchemaExporterSchemaRegistryCluster.class}, tree="[0]") + private Output schemaRegistryCluster; + + public Output> schemaRegistryCluster() { + return Codegen.optional(this.schemaRegistryCluster); + } + /** + * The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + * + */ + @Export(name="status", refs={String.class}, tree="[0]") + private Output status; + + /** + * @return The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + * + */ + public Output status() { + return this.status; + } + /** + * Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`. + * + */ + @Export(name="subjectRenameFormat", refs={String.class}, tree="[0]") + private Output subjectRenameFormat; + + /** + * @return Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`. + * + */ + public Output subjectRenameFormat() { + return this.subjectRenameFormat; + } + /** + * Name of each exporter subject. + * + */ + @Export(name="subjects", refs={List.class,String.class}, tree="[0,1]") + private Output> subjects; + + /** + * @return Name of each exporter subject. + * + */ + public Output> subjects() { + return this.subjects; + } + + /** + * + * @param name The _unique_ name of the resulting resource. + */ + public SchemaExporter(String name) { + this(name, SchemaExporterArgs.Empty); + } + /** + * + * @param name The _unique_ name of the resulting resource. + * @param args The arguments to use to populate this resource's properties. + */ + public SchemaExporter(String name, SchemaExporterArgs args) { + this(name, args, null); + } + /** + * + * @param name The _unique_ name of the resulting resource. + * @param args The arguments to use to populate this resource's properties. + * @param options A bag of options that control this resource's behavior. + */ + public SchemaExporter(String name, SchemaExporterArgs args, @Nullable com.pulumi.resources.CustomResourceOptions options) { + super("confluentcloud:index/schemaExporter:SchemaExporter", name, args == null ? SchemaExporterArgs.Empty : args, makeResourceOptions(options, Codegen.empty())); + } + + private SchemaExporter(String name, Output id, @Nullable SchemaExporterState state, @Nullable com.pulumi.resources.CustomResourceOptions options) { + super("confluentcloud:index/schemaExporter:SchemaExporter", name, state, makeResourceOptions(options, id)); + } + + private static com.pulumi.resources.CustomResourceOptions makeResourceOptions(@Nullable com.pulumi.resources.CustomResourceOptions options, @Nullable Output id) { + var defaultOptions = com.pulumi.resources.CustomResourceOptions.builder() + .version(Utilities.getVersion()) + .additionalSecretOutputs(List.of( + "credentials" + )) + .build(); + return com.pulumi.resources.CustomResourceOptions.merge(defaultOptions, options, id); + } + + /** + * Get an existing Host resource's state with the given name, ID, and optional extra + * properties used to qualify the lookup. + * + * @param name The _unique_ name of the resulting resource. + * @param id The _unique_ provider ID of the resource to lookup. + * @param state + * @param options Optional settings to control the behavior of the CustomResource. + */ + public static SchemaExporter get(String name, Output id, @Nullable SchemaExporterState state, @Nullable com.pulumi.resources.CustomResourceOptions options) { + return new SchemaExporter(name, id, state, options); + } +} diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/SchemaExporterArgs.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/SchemaExporterArgs.java new file mode 100644 index 00000000..25bc16ea --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/SchemaExporterArgs.java @@ -0,0 +1,467 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.confluentcloud; + +import com.pulumi.confluentcloud.inputs.SchemaExporterCredentialsArgs; +import com.pulumi.confluentcloud.inputs.SchemaExporterDestinationSchemaRegistryClusterArgs; +import com.pulumi.confluentcloud.inputs.SchemaExporterSchemaRegistryClusterArgs; +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.Boolean; +import java.lang.String; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class SchemaExporterArgs extends com.pulumi.resources.ResourceArgs { + + public static final SchemaExporterArgs Empty = new SchemaExporterArgs(); + + /** + * Block for custom *nonsensitive* configuration properties: + * + */ + @Import(name="config") + private @Nullable Output> config; + + /** + * @return Block for custom *nonsensitive* configuration properties: + * + */ + public Optional>> config() { + return Optional.ofNullable(this.config); + } + + /** + * Customized context of the exporter if `context_type` is set to `CUSTOM`. + * + */ + @Import(name="context") + private @Nullable Output context; + + /** + * @return Customized context of the exporter if `context_type` is set to `CUSTOM`. + * + */ + public Optional> context() { + return Optional.ofNullable(this.context); + } + + /** + * Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + * + */ + @Import(name="contextType") + private @Nullable Output contextType; + + /** + * @return Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + * + */ + public Optional> contextType() { + return Optional.ofNullable(this.contextType); + } + + /** + * The Cluster API Credentials. + * + */ + @Import(name="credentials") + private @Nullable Output credentials; + + /** + * @return The Cluster API Credentials. + * + */ + public Optional> credentials() { + return Optional.ofNullable(this.credentials); + } + + @Import(name="destinationSchemaRegistryCluster", required=true) + private Output destinationSchemaRegistryCluster; + + public Output destinationSchemaRegistryCluster() { + return this.destinationSchemaRegistryCluster; + } + + /** + * The configuration setting name. + * + */ + @Import(name="name") + private @Nullable Output name; + + /** + * @return The configuration setting name. + * + */ + public Optional> name() { + return Optional.ofNullable(this.name); + } + + /** + * The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + * + */ + @Import(name="resetOnUpdate") + private @Nullable Output resetOnUpdate; + + /** + * @return The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + * + */ + public Optional> resetOnUpdate() { + return Optional.ofNullable(this.resetOnUpdate); + } + + /** + * The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + * + */ + @Import(name="restEndpoint") + private @Nullable Output restEndpoint; + + /** + * @return The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + * + */ + public Optional> restEndpoint() { + return Optional.ofNullable(this.restEndpoint); + } + + @Import(name="schemaRegistryCluster") + private @Nullable Output schemaRegistryCluster; + + public Optional> schemaRegistryCluster() { + return Optional.ofNullable(this.schemaRegistryCluster); + } + + /** + * The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + * + */ + @Import(name="status") + private @Nullable Output status; + + /** + * @return The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + * + */ + public Optional> status() { + return Optional.ofNullable(this.status); + } + + /** + * Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`. + * + */ + @Import(name="subjectRenameFormat") + private @Nullable Output subjectRenameFormat; + + /** + * @return Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`. + * + */ + public Optional> subjectRenameFormat() { + return Optional.ofNullable(this.subjectRenameFormat); + } + + /** + * Name of each exporter subject. + * + */ + @Import(name="subjects") + private @Nullable Output> subjects; + + /** + * @return Name of each exporter subject. + * + */ + public Optional>> subjects() { + return Optional.ofNullable(this.subjects); + } + + private SchemaExporterArgs() {} + + private SchemaExporterArgs(SchemaExporterArgs $) { + this.config = $.config; + this.context = $.context; + this.contextType = $.contextType; + this.credentials = $.credentials; + this.destinationSchemaRegistryCluster = $.destinationSchemaRegistryCluster; + this.name = $.name; + this.resetOnUpdate = $.resetOnUpdate; + this.restEndpoint = $.restEndpoint; + this.schemaRegistryCluster = $.schemaRegistryCluster; + this.status = $.status; + this.subjectRenameFormat = $.subjectRenameFormat; + this.subjects = $.subjects; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(SchemaExporterArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private SchemaExporterArgs $; + + public Builder() { + $ = new SchemaExporterArgs(); + } + + public Builder(SchemaExporterArgs defaults) { + $ = new SchemaExporterArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param config Block for custom *nonsensitive* configuration properties: + * + * @return builder + * + */ + public Builder config(@Nullable Output> config) { + $.config = config; + return this; + } + + /** + * @param config Block for custom *nonsensitive* configuration properties: + * + * @return builder + * + */ + public Builder config(Map config) { + return config(Output.of(config)); + } + + /** + * @param context Customized context of the exporter if `context_type` is set to `CUSTOM`. + * + * @return builder + * + */ + public Builder context(@Nullable Output context) { + $.context = context; + return this; + } + + /** + * @param context Customized context of the exporter if `context_type` is set to `CUSTOM`. + * + * @return builder + * + */ + public Builder context(String context) { + return context(Output.of(context)); + } + + /** + * @param contextType Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + * + * @return builder + * + */ + public Builder contextType(@Nullable Output contextType) { + $.contextType = contextType; + return this; + } + + /** + * @param contextType Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + * + * @return builder + * + */ + public Builder contextType(String contextType) { + return contextType(Output.of(contextType)); + } + + /** + * @param credentials The Cluster API Credentials. + * + * @return builder + * + */ + public Builder credentials(@Nullable Output credentials) { + $.credentials = credentials; + return this; + } + + /** + * @param credentials The Cluster API Credentials. + * + * @return builder + * + */ + public Builder credentials(SchemaExporterCredentialsArgs credentials) { + return credentials(Output.of(credentials)); + } + + public Builder destinationSchemaRegistryCluster(Output destinationSchemaRegistryCluster) { + $.destinationSchemaRegistryCluster = destinationSchemaRegistryCluster; + return this; + } + + public Builder destinationSchemaRegistryCluster(SchemaExporterDestinationSchemaRegistryClusterArgs destinationSchemaRegistryCluster) { + return destinationSchemaRegistryCluster(Output.of(destinationSchemaRegistryCluster)); + } + + /** + * @param name The configuration setting name. + * + * @return builder + * + */ + public Builder name(@Nullable Output name) { + $.name = name; + return this; + } + + /** + * @param name The configuration setting name. + * + * @return builder + * + */ + public Builder name(String name) { + return name(Output.of(name)); + } + + /** + * @param resetOnUpdate The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + * + * @return builder + * + */ + public Builder resetOnUpdate(@Nullable Output resetOnUpdate) { + $.resetOnUpdate = resetOnUpdate; + return this; + } + + /** + * @param resetOnUpdate The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + * + * @return builder + * + */ + public Builder resetOnUpdate(Boolean resetOnUpdate) { + return resetOnUpdate(Output.of(resetOnUpdate)); + } + + /** + * @param restEndpoint The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + * + * @return builder + * + */ + public Builder restEndpoint(@Nullable Output restEndpoint) { + $.restEndpoint = restEndpoint; + return this; + } + + /** + * @param restEndpoint The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + * + * @return builder + * + */ + public Builder restEndpoint(String restEndpoint) { + return restEndpoint(Output.of(restEndpoint)); + } + + public Builder schemaRegistryCluster(@Nullable Output schemaRegistryCluster) { + $.schemaRegistryCluster = schemaRegistryCluster; + return this; + } + + public Builder schemaRegistryCluster(SchemaExporterSchemaRegistryClusterArgs schemaRegistryCluster) { + return schemaRegistryCluster(Output.of(schemaRegistryCluster)); + } + + /** + * @param status The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + * + * @return builder + * + */ + public Builder status(@Nullable Output status) { + $.status = status; + return this; + } + + /** + * @param status The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + * + * @return builder + * + */ + public Builder status(String status) { + return status(Output.of(status)); + } + + /** + * @param subjectRenameFormat Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`. + * + * @return builder + * + */ + public Builder subjectRenameFormat(@Nullable Output subjectRenameFormat) { + $.subjectRenameFormat = subjectRenameFormat; + return this; + } + + /** + * @param subjectRenameFormat Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`. + * + * @return builder + * + */ + public Builder subjectRenameFormat(String subjectRenameFormat) { + return subjectRenameFormat(Output.of(subjectRenameFormat)); + } + + /** + * @param subjects Name of each exporter subject. + * + * @return builder + * + */ + public Builder subjects(@Nullable Output> subjects) { + $.subjects = subjects; + return this; + } + + /** + * @param subjects Name of each exporter subject. + * + * @return builder + * + */ + public Builder subjects(List subjects) { + return subjects(Output.of(subjects)); + } + + /** + * @param subjects Name of each exporter subject. + * + * @return builder + * + */ + public Builder subjects(String... subjects) { + return subjects(List.of(subjects)); + } + + public SchemaExporterArgs build() { + $.destinationSchemaRegistryCluster = Objects.requireNonNull($.destinationSchemaRegistryCluster, "expected parameter 'destinationSchemaRegistryCluster' to be non-null"); + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/ClusterLinkDestinationKafkaClusterCredentialsArgs.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/ClusterLinkDestinationKafkaClusterCredentialsArgs.java index b29103f1..2948d405 100644 --- a/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/ClusterLinkDestinationKafkaClusterCredentialsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/ClusterLinkDestinationKafkaClusterCredentialsArgs.java @@ -31,8 +31,6 @@ public Output key() { /** * The Kafka API Secret. * - * > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). - * */ @Import(name="secret", required=true) private Output secret; @@ -40,8 +38,6 @@ public Output key() { /** * @return The Kafka API Secret. * - * > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). - * */ public Output secret() { return this.secret; @@ -96,8 +92,6 @@ public Builder key(String key) { /** * @param secret The Kafka API Secret. * - * > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). - * * @return builder * */ @@ -109,8 +103,6 @@ public Builder secret(Output secret) { /** * @param secret The Kafka API Secret. * - * > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). - * * @return builder * */ diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/ClusterLinkLocalKafkaClusterCredentialsArgs.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/ClusterLinkLocalKafkaClusterCredentialsArgs.java index c7f32385..63a7c0d2 100644 --- a/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/ClusterLinkLocalKafkaClusterCredentialsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/ClusterLinkLocalKafkaClusterCredentialsArgs.java @@ -31,8 +31,6 @@ public Output key() { /** * The Kafka API Secret. * - * > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). - * */ @Import(name="secret", required=true) private Output secret; @@ -40,8 +38,6 @@ public Output key() { /** * @return The Kafka API Secret. * - * > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). - * */ public Output secret() { return this.secret; @@ -96,8 +92,6 @@ public Builder key(String key) { /** * @param secret The Kafka API Secret. * - * > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). - * * @return builder * */ @@ -109,8 +103,6 @@ public Builder secret(Output secret) { /** * @param secret The Kafka API Secret. * - * > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). - * * @return builder * */ diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/ClusterLinkRemoteKafkaClusterCredentialsArgs.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/ClusterLinkRemoteKafkaClusterCredentialsArgs.java index b803592c..24ebb83c 100644 --- a/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/ClusterLinkRemoteKafkaClusterCredentialsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/ClusterLinkRemoteKafkaClusterCredentialsArgs.java @@ -31,8 +31,6 @@ public Output key() { /** * The Kafka API Secret. * - * > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). - * */ @Import(name="secret", required=true) private Output secret; @@ -40,8 +38,6 @@ public Output key() { /** * @return The Kafka API Secret. * - * > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). - * */ public Output secret() { return this.secret; @@ -96,8 +92,6 @@ public Builder key(String key) { /** * @param secret The Kafka API Secret. * - * > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). - * * @return builder * */ @@ -109,8 +103,6 @@ public Builder secret(Output secret) { /** * @param secret The Kafka API Secret. * - * > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). - * * @return builder * */ diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/ClusterLinkSourceKafkaClusterCredentialsArgs.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/ClusterLinkSourceKafkaClusterCredentialsArgs.java index 12a28d8d..b155795e 100644 --- a/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/ClusterLinkSourceKafkaClusterCredentialsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/ClusterLinkSourceKafkaClusterCredentialsArgs.java @@ -31,8 +31,6 @@ public Output key() { /** * The Kafka API Secret. * - * > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). - * */ @Import(name="secret", required=true) private Output secret; @@ -40,8 +38,6 @@ public Output key() { /** * @return The Kafka API Secret. * - * > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). - * */ public Output secret() { return this.secret; @@ -96,8 +92,6 @@ public Builder key(String key) { /** * @param secret The Kafka API Secret. * - * > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). - * * @return builder * */ @@ -109,8 +103,6 @@ public Builder secret(Output secret) { /** * @param secret The Kafka API Secret. * - * > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). - * * @return builder * */ diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/FlinkComputePoolEnvironmentArgs.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/FlinkComputePoolEnvironmentArgs.java new file mode 100644 index 00000000..a760224a --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/FlinkComputePoolEnvironmentArgs.java @@ -0,0 +1,82 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.confluentcloud.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; + + +public final class FlinkComputePoolEnvironmentArgs extends com.pulumi.resources.ResourceArgs { + + public static final FlinkComputePoolEnvironmentArgs Empty = new FlinkComputePoolEnvironmentArgs(); + + /** + * The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`. + * + */ + @Import(name="id", required=true) + private Output id; + + /** + * @return The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`. + * + */ + public Output id() { + return this.id; + } + + private FlinkComputePoolEnvironmentArgs() {} + + private FlinkComputePoolEnvironmentArgs(FlinkComputePoolEnvironmentArgs $) { + this.id = $.id; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(FlinkComputePoolEnvironmentArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private FlinkComputePoolEnvironmentArgs $; + + public Builder() { + $ = new FlinkComputePoolEnvironmentArgs(); + } + + public Builder(FlinkComputePoolEnvironmentArgs defaults) { + $ = new FlinkComputePoolEnvironmentArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param id The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`. + * + * @return builder + * + */ + public Builder id(Output id) { + $.id = id; + return this; + } + + /** + * @param id The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`. + * + * @return builder + * + */ + public Builder id(String id) { + return id(Output.of(id)); + } + + public FlinkComputePoolEnvironmentArgs build() { + $.id = Objects.requireNonNull($.id, "expected parameter 'id' to be non-null"); + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/FlinkComputePoolState.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/FlinkComputePoolState.java new file mode 100644 index 00000000..40f46a58 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/FlinkComputePoolState.java @@ -0,0 +1,418 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.confluentcloud.inputs; + +import com.pulumi.confluentcloud.inputs.FlinkComputePoolEnvironmentArgs; +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.Integer; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class FlinkComputePoolState extends com.pulumi.resources.ResourceArgs { + + public static final FlinkComputePoolState Empty = new FlinkComputePoolState(); + + /** + * (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`. + * + */ + @Import(name="apiVersion") + private @Nullable Output apiVersion; + + /** + * @return (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`. + * + */ + public Optional> apiVersion() { + return Optional.ofNullable(this.apiVersion); + } + + /** + * The cloud service provider that runs the Flink Compute Pool. + * + */ + @Import(name="cloud") + private @Nullable Output cloud; + + /** + * @return The cloud service provider that runs the Flink Compute Pool. + * + */ + public Optional> cloud() { + return Optional.ofNullable(this.cloud); + } + + /** + * (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool. + * + */ + @Import(name="currentCfu") + private @Nullable Output currentCfu; + + /** + * @return (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool. + * + */ + public Optional> currentCfu() { + return Optional.ofNullable(this.currentCfu); + } + + /** + * The name of the Flink Compute Pool. + * + */ + @Import(name="displayName") + private @Nullable Output displayName; + + /** + * @return The name of the Flink Compute Pool. + * + */ + public Optional> displayName() { + return Optional.ofNullable(this.displayName); + } + + /** + * Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + * + */ + @Import(name="environment") + private @Nullable Output environment; + + /** + * @return Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + * + */ + public Optional> environment() { + return Optional.ofNullable(this.environment); + } + + /** + * (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`. + * + */ + @Import(name="kind") + private @Nullable Output kind; + + /** + * @return (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`. + * + */ + public Optional> kind() { + return Optional.ofNullable(this.kind); + } + + /** + * Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + * + */ + @Import(name="maxCfu") + private @Nullable Output maxCfu; + + /** + * @return Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + * + */ + public Optional> maxCfu() { + return Optional.ofNullable(this.maxCfu); + } + + /** + * The cloud service provider region that hosts the Flink Compute Pool. + * + */ + @Import(name="region") + private @Nullable Output region; + + /** + * @return The cloud service provider region that hosts the Flink Compute Pool. + * + */ + public Optional> region() { + return Optional.ofNullable(this.region); + } + + /** + * (Required String) The Confluent Resource Name of the Flink Compute Pool. + * + */ + @Import(name="resourceName") + private @Nullable Output resourceName; + + /** + * @return (Required String) The Confluent Resource Name of the Flink Compute Pool. + * + */ + public Optional> resourceName() { + return Optional.ofNullable(this.resourceName); + } + + /** + * (Required String) The API endpoint of the Flink Compute Pool. + * + */ + @Import(name="restEndpoint") + private @Nullable Output restEndpoint; + + /** + * @return (Required String) The API endpoint of the Flink Compute Pool. + * + */ + public Optional> restEndpoint() { + return Optional.ofNullable(this.restEndpoint); + } + + private FlinkComputePoolState() {} + + private FlinkComputePoolState(FlinkComputePoolState $) { + this.apiVersion = $.apiVersion; + this.cloud = $.cloud; + this.currentCfu = $.currentCfu; + this.displayName = $.displayName; + this.environment = $.environment; + this.kind = $.kind; + this.maxCfu = $.maxCfu; + this.region = $.region; + this.resourceName = $.resourceName; + this.restEndpoint = $.restEndpoint; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(FlinkComputePoolState defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private FlinkComputePoolState $; + + public Builder() { + $ = new FlinkComputePoolState(); + } + + public Builder(FlinkComputePoolState defaults) { + $ = new FlinkComputePoolState(Objects.requireNonNull(defaults)); + } + + /** + * @param apiVersion (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`. + * + * @return builder + * + */ + public Builder apiVersion(@Nullable Output apiVersion) { + $.apiVersion = apiVersion; + return this; + } + + /** + * @param apiVersion (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`. + * + * @return builder + * + */ + public Builder apiVersion(String apiVersion) { + return apiVersion(Output.of(apiVersion)); + } + + /** + * @param cloud The cloud service provider that runs the Flink Compute Pool. + * + * @return builder + * + */ + public Builder cloud(@Nullable Output cloud) { + $.cloud = cloud; + return this; + } + + /** + * @param cloud The cloud service provider that runs the Flink Compute Pool. + * + * @return builder + * + */ + public Builder cloud(String cloud) { + return cloud(Output.of(cloud)); + } + + /** + * @param currentCfu (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool. + * + * @return builder + * + */ + public Builder currentCfu(@Nullable Output currentCfu) { + $.currentCfu = currentCfu; + return this; + } + + /** + * @param currentCfu (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool. + * + * @return builder + * + */ + public Builder currentCfu(Integer currentCfu) { + return currentCfu(Output.of(currentCfu)); + } + + /** + * @param displayName The name of the Flink Compute Pool. + * + * @return builder + * + */ + public Builder displayName(@Nullable Output displayName) { + $.displayName = displayName; + return this; + } + + /** + * @param displayName The name of the Flink Compute Pool. + * + * @return builder + * + */ + public Builder displayName(String displayName) { + return displayName(Output.of(displayName)); + } + + /** + * @param environment Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + * + * @return builder + * + */ + public Builder environment(@Nullable Output environment) { + $.environment = environment; + return this; + } + + /** + * @param environment Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + * + * @return builder + * + */ + public Builder environment(FlinkComputePoolEnvironmentArgs environment) { + return environment(Output.of(environment)); + } + + /** + * @param kind (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`. + * + * @return builder + * + */ + public Builder kind(@Nullable Output kind) { + $.kind = kind; + return this; + } + + /** + * @param kind (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`. + * + * @return builder + * + */ + public Builder kind(String kind) { + return kind(Output.of(kind)); + } + + /** + * @param maxCfu Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + * + * @return builder + * + */ + public Builder maxCfu(@Nullable Output maxCfu) { + $.maxCfu = maxCfu; + return this; + } + + /** + * @param maxCfu Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + * + * @return builder + * + */ + public Builder maxCfu(Integer maxCfu) { + return maxCfu(Output.of(maxCfu)); + } + + /** + * @param region The cloud service provider region that hosts the Flink Compute Pool. + * + * @return builder + * + */ + public Builder region(@Nullable Output region) { + $.region = region; + return this; + } + + /** + * @param region The cloud service provider region that hosts the Flink Compute Pool. + * + * @return builder + * + */ + public Builder region(String region) { + return region(Output.of(region)); + } + + /** + * @param resourceName (Required String) The Confluent Resource Name of the Flink Compute Pool. + * + * @return builder + * + */ + public Builder resourceName(@Nullable Output resourceName) { + $.resourceName = resourceName; + return this; + } + + /** + * @param resourceName (Required String) The Confluent Resource Name of the Flink Compute Pool. + * + * @return builder + * + */ + public Builder resourceName(String resourceName) { + return resourceName(Output.of(resourceName)); + } + + /** + * @param restEndpoint (Required String) The API endpoint of the Flink Compute Pool. + * + * @return builder + * + */ + public Builder restEndpoint(@Nullable Output restEndpoint) { + $.restEndpoint = restEndpoint; + return this; + } + + /** + * @param restEndpoint (Required String) The API endpoint of the Flink Compute Pool. + * + * @return builder + * + */ + public Builder restEndpoint(String restEndpoint) { + return restEndpoint(Output.of(restEndpoint)); + } + + public FlinkComputePoolState build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/GetFlinkComputePoolArgs.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/GetFlinkComputePoolArgs.java new file mode 100644 index 00000000..bd76bf8e --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/GetFlinkComputePoolArgs.java @@ -0,0 +1,167 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.confluentcloud.inputs; + +import com.pulumi.confluentcloud.inputs.GetFlinkComputePoolEnvironmentArgs; +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class GetFlinkComputePoolArgs extends com.pulumi.resources.InvokeArgs { + + public static final GetFlinkComputePoolArgs Empty = new GetFlinkComputePoolArgs(); + + /** + * A human-readable name for the Flink Compute Pool. + * + */ + @Import(name="displayName") + private @Nullable Output displayName; + + /** + * @return A human-readable name for the Flink Compute Pool. + * + */ + public Optional> displayName() { + return Optional.ofNullable(this.displayName); + } + + /** + * (Required Configuration Block) supports the following: + * + */ + @Import(name="environment", required=true) + private Output environment; + + /** + * @return (Required Configuration Block) supports the following: + * + */ + public Output environment() { + return this.environment; + } + + /** + * The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + * + * > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + * + */ + @Import(name="id") + private @Nullable Output id; + + /** + * @return The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + * + * > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + * + */ + public Optional> id() { + return Optional.ofNullable(this.id); + } + + private GetFlinkComputePoolArgs() {} + + private GetFlinkComputePoolArgs(GetFlinkComputePoolArgs $) { + this.displayName = $.displayName; + this.environment = $.environment; + this.id = $.id; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetFlinkComputePoolArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetFlinkComputePoolArgs $; + + public Builder() { + $ = new GetFlinkComputePoolArgs(); + } + + public Builder(GetFlinkComputePoolArgs defaults) { + $ = new GetFlinkComputePoolArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param displayName A human-readable name for the Flink Compute Pool. + * + * @return builder + * + */ + public Builder displayName(@Nullable Output displayName) { + $.displayName = displayName; + return this; + } + + /** + * @param displayName A human-readable name for the Flink Compute Pool. + * + * @return builder + * + */ + public Builder displayName(String displayName) { + return displayName(Output.of(displayName)); + } + + /** + * @param environment (Required Configuration Block) supports the following: + * + * @return builder + * + */ + public Builder environment(Output environment) { + $.environment = environment; + return this; + } + + /** + * @param environment (Required Configuration Block) supports the following: + * + * @return builder + * + */ + public Builder environment(GetFlinkComputePoolEnvironmentArgs environment) { + return environment(Output.of(environment)); + } + + /** + * @param id The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + * + * > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + * + * @return builder + * + */ + public Builder id(@Nullable Output id) { + $.id = id; + return this; + } + + /** + * @param id The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + * + * > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + * + * @return builder + * + */ + public Builder id(String id) { + return id(Output.of(id)); + } + + public GetFlinkComputePoolArgs build() { + $.environment = Objects.requireNonNull($.environment, "expected parameter 'environment' to be non-null"); + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/GetFlinkComputePoolEnvironment.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/GetFlinkComputePoolEnvironment.java new file mode 100644 index 00000000..c3333ca8 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/GetFlinkComputePoolEnvironment.java @@ -0,0 +1,77 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.confluentcloud.inputs; + +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; + + +public final class GetFlinkComputePoolEnvironment extends com.pulumi.resources.InvokeArgs { + + public static final GetFlinkComputePoolEnvironment Empty = new GetFlinkComputePoolEnvironment(); + + /** + * The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + * + * > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + * + */ + @Import(name="id", required=true) + private String id; + + /** + * @return The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + * + * > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + * + */ + public String id() { + return this.id; + } + + private GetFlinkComputePoolEnvironment() {} + + private GetFlinkComputePoolEnvironment(GetFlinkComputePoolEnvironment $) { + this.id = $.id; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetFlinkComputePoolEnvironment defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetFlinkComputePoolEnvironment $; + + public Builder() { + $ = new GetFlinkComputePoolEnvironment(); + } + + public Builder(GetFlinkComputePoolEnvironment defaults) { + $ = new GetFlinkComputePoolEnvironment(Objects.requireNonNull(defaults)); + } + + /** + * @param id The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + * + * > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + * + * @return builder + * + */ + public Builder id(String id) { + $.id = id; + return this; + } + + public GetFlinkComputePoolEnvironment build() { + $.id = Objects.requireNonNull($.id, "expected parameter 'id' to be non-null"); + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/GetFlinkComputePoolEnvironmentArgs.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/GetFlinkComputePoolEnvironmentArgs.java new file mode 100644 index 00000000..c9071d48 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/GetFlinkComputePoolEnvironmentArgs.java @@ -0,0 +1,90 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.confluentcloud.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; + + +public final class GetFlinkComputePoolEnvironmentArgs extends com.pulumi.resources.ResourceArgs { + + public static final GetFlinkComputePoolEnvironmentArgs Empty = new GetFlinkComputePoolEnvironmentArgs(); + + /** + * The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + * + * > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + * + */ + @Import(name="id", required=true) + private Output id; + + /** + * @return The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + * + * > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + * + */ + public Output id() { + return this.id; + } + + private GetFlinkComputePoolEnvironmentArgs() {} + + private GetFlinkComputePoolEnvironmentArgs(GetFlinkComputePoolEnvironmentArgs $) { + this.id = $.id; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetFlinkComputePoolEnvironmentArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetFlinkComputePoolEnvironmentArgs $; + + public Builder() { + $ = new GetFlinkComputePoolEnvironmentArgs(); + } + + public Builder(GetFlinkComputePoolEnvironmentArgs defaults) { + $ = new GetFlinkComputePoolEnvironmentArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param id The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + * + * > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + * + * @return builder + * + */ + public Builder id(Output id) { + $.id = id; + return this; + } + + /** + * @param id The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + * + * > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + * + * @return builder + * + */ + public Builder id(String id) { + return id(Output.of(id)); + } + + public GetFlinkComputePoolEnvironmentArgs build() { + $.id = Objects.requireNonNull($.id, "expected parameter 'id' to be non-null"); + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/GetFlinkComputePoolPlainArgs.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/GetFlinkComputePoolPlainArgs.java new file mode 100644 index 00000000..798754a5 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/GetFlinkComputePoolPlainArgs.java @@ -0,0 +1,134 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.confluentcloud.inputs; + +import com.pulumi.confluentcloud.inputs.GetFlinkComputePoolEnvironment; +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class GetFlinkComputePoolPlainArgs extends com.pulumi.resources.InvokeArgs { + + public static final GetFlinkComputePoolPlainArgs Empty = new GetFlinkComputePoolPlainArgs(); + + /** + * A human-readable name for the Flink Compute Pool. + * + */ + @Import(name="displayName") + private @Nullable String displayName; + + /** + * @return A human-readable name for the Flink Compute Pool. + * + */ + public Optional displayName() { + return Optional.ofNullable(this.displayName); + } + + /** + * (Required Configuration Block) supports the following: + * + */ + @Import(name="environment", required=true) + private GetFlinkComputePoolEnvironment environment; + + /** + * @return (Required Configuration Block) supports the following: + * + */ + public GetFlinkComputePoolEnvironment environment() { + return this.environment; + } + + /** + * The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + * + * > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + * + */ + @Import(name="id") + private @Nullable String id; + + /** + * @return The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + * + * > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + * + */ + public Optional id() { + return Optional.ofNullable(this.id); + } + + private GetFlinkComputePoolPlainArgs() {} + + private GetFlinkComputePoolPlainArgs(GetFlinkComputePoolPlainArgs $) { + this.displayName = $.displayName; + this.environment = $.environment; + this.id = $.id; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetFlinkComputePoolPlainArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetFlinkComputePoolPlainArgs $; + + public Builder() { + $ = new GetFlinkComputePoolPlainArgs(); + } + + public Builder(GetFlinkComputePoolPlainArgs defaults) { + $ = new GetFlinkComputePoolPlainArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param displayName A human-readable name for the Flink Compute Pool. + * + * @return builder + * + */ + public Builder displayName(@Nullable String displayName) { + $.displayName = displayName; + return this; + } + + /** + * @param environment (Required Configuration Block) supports the following: + * + * @return builder + * + */ + public Builder environment(GetFlinkComputePoolEnvironment environment) { + $.environment = environment; + return this; + } + + /** + * @param id The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + * + * > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + * + * @return builder + * + */ + public Builder id(@Nullable String id) { + $.id = id; + return this; + } + + public GetFlinkComputePoolPlainArgs build() { + $.environment = Objects.requireNonNull($.environment, "expected parameter 'environment' to be non-null"); + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/SchemaExporterCredentialsArgs.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/SchemaExporterCredentialsArgs.java new file mode 100644 index 00000000..61994459 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/SchemaExporterCredentialsArgs.java @@ -0,0 +1,120 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.confluentcloud.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; + + +public final class SchemaExporterCredentialsArgs extends com.pulumi.resources.ResourceArgs { + + public static final SchemaExporterCredentialsArgs Empty = new SchemaExporterCredentialsArgs(); + + /** + * The Schema Registry API Key. + * + */ + @Import(name="key", required=true) + private Output key; + + /** + * @return The Schema Registry API Key. + * + */ + public Output key() { + return this.key; + } + + /** + * The Schema Registry API Secret. + * + */ + @Import(name="secret", required=true) + private Output secret; + + /** + * @return The Schema Registry API Secret. + * + */ + public Output secret() { + return this.secret; + } + + private SchemaExporterCredentialsArgs() {} + + private SchemaExporterCredentialsArgs(SchemaExporterCredentialsArgs $) { + this.key = $.key; + this.secret = $.secret; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(SchemaExporterCredentialsArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private SchemaExporterCredentialsArgs $; + + public Builder() { + $ = new SchemaExporterCredentialsArgs(); + } + + public Builder(SchemaExporterCredentialsArgs defaults) { + $ = new SchemaExporterCredentialsArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param key The Schema Registry API Key. + * + * @return builder + * + */ + public Builder key(Output key) { + $.key = key; + return this; + } + + /** + * @param key The Schema Registry API Key. + * + * @return builder + * + */ + public Builder key(String key) { + return key(Output.of(key)); + } + + /** + * @param secret The Schema Registry API Secret. + * + * @return builder + * + */ + public Builder secret(Output secret) { + $.secret = secret; + return this; + } + + /** + * @param secret The Schema Registry API Secret. + * + * @return builder + * + */ + public Builder secret(String secret) { + return secret(Output.of(secret)); + } + + public SchemaExporterCredentialsArgs build() { + $.key = Objects.requireNonNull($.key, "expected parameter 'key' to be non-null"); + $.secret = Objects.requireNonNull($.secret, "expected parameter 'secret' to be non-null"); + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/SchemaExporterDestinationSchemaRegistryClusterArgs.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/SchemaExporterDestinationSchemaRegistryClusterArgs.java new file mode 100644 index 00000000..31c12813 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/SchemaExporterDestinationSchemaRegistryClusterArgs.java @@ -0,0 +1,101 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.confluentcloud.inputs; + +import com.pulumi.confluentcloud.inputs.SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs; +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; + + +public final class SchemaExporterDestinationSchemaRegistryClusterArgs extends com.pulumi.resources.ResourceArgs { + + public static final SchemaExporterDestinationSchemaRegistryClusterArgs Empty = new SchemaExporterDestinationSchemaRegistryClusterArgs(); + + @Import(name="credentials", required=true) + private Output credentials; + + public Output credentials() { + return this.credentials; + } + + /** + * The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + * + */ + @Import(name="restEndpoint", required=true) + private Output restEndpoint; + + /** + * @return The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + * + */ + public Output restEndpoint() { + return this.restEndpoint; + } + + private SchemaExporterDestinationSchemaRegistryClusterArgs() {} + + private SchemaExporterDestinationSchemaRegistryClusterArgs(SchemaExporterDestinationSchemaRegistryClusterArgs $) { + this.credentials = $.credentials; + this.restEndpoint = $.restEndpoint; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(SchemaExporterDestinationSchemaRegistryClusterArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private SchemaExporterDestinationSchemaRegistryClusterArgs $; + + public Builder() { + $ = new SchemaExporterDestinationSchemaRegistryClusterArgs(); + } + + public Builder(SchemaExporterDestinationSchemaRegistryClusterArgs defaults) { + $ = new SchemaExporterDestinationSchemaRegistryClusterArgs(Objects.requireNonNull(defaults)); + } + + public Builder credentials(Output credentials) { + $.credentials = credentials; + return this; + } + + public Builder credentials(SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs credentials) { + return credentials(Output.of(credentials)); + } + + /** + * @param restEndpoint The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + * + * @return builder + * + */ + public Builder restEndpoint(Output restEndpoint) { + $.restEndpoint = restEndpoint; + return this; + } + + /** + * @param restEndpoint The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + * + * @return builder + * + */ + public Builder restEndpoint(String restEndpoint) { + return restEndpoint(Output.of(restEndpoint)); + } + + public SchemaExporterDestinationSchemaRegistryClusterArgs build() { + $.credentials = Objects.requireNonNull($.credentials, "expected parameter 'credentials' to be non-null"); + $.restEndpoint = Objects.requireNonNull($.restEndpoint, "expected parameter 'restEndpoint' to be non-null"); + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs.java new file mode 100644 index 00000000..02aa70cf --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs.java @@ -0,0 +1,120 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.confluentcloud.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; + + +public final class SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs extends com.pulumi.resources.ResourceArgs { + + public static final SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs Empty = new SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs(); + + /** + * The Schema Registry API Key. + * + */ + @Import(name="key", required=true) + private Output key; + + /** + * @return The Schema Registry API Key. + * + */ + public Output key() { + return this.key; + } + + /** + * The Schema Registry API Secret. + * + */ + @Import(name="secret", required=true) + private Output secret; + + /** + * @return The Schema Registry API Secret. + * + */ + public Output secret() { + return this.secret; + } + + private SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs() {} + + private SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs(SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs $) { + this.key = $.key; + this.secret = $.secret; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs $; + + public Builder() { + $ = new SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs(); + } + + public Builder(SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs defaults) { + $ = new SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param key The Schema Registry API Key. + * + * @return builder + * + */ + public Builder key(Output key) { + $.key = key; + return this; + } + + /** + * @param key The Schema Registry API Key. + * + * @return builder + * + */ + public Builder key(String key) { + return key(Output.of(key)); + } + + /** + * @param secret The Schema Registry API Secret. + * + * @return builder + * + */ + public Builder secret(Output secret) { + $.secret = secret; + return this; + } + + /** + * @param secret The Schema Registry API Secret. + * + * @return builder + * + */ + public Builder secret(String secret) { + return secret(Output.of(secret)); + } + + public SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs build() { + $.key = Objects.requireNonNull($.key, "expected parameter 'key' to be non-null"); + $.secret = Objects.requireNonNull($.secret, "expected parameter 'secret' to be non-null"); + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/SchemaExporterSchemaRegistryClusterArgs.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/SchemaExporterSchemaRegistryClusterArgs.java new file mode 100644 index 00000000..2bbdceaf --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/SchemaExporterSchemaRegistryClusterArgs.java @@ -0,0 +1,82 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.confluentcloud.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; + + +public final class SchemaExporterSchemaRegistryClusterArgs extends com.pulumi.resources.ResourceArgs { + + public static final SchemaExporterSchemaRegistryClusterArgs Empty = new SchemaExporterSchemaRegistryClusterArgs(); + + /** + * The ID of the Schema Registry cluster, for example, `lsrc-abc123`. + * + */ + @Import(name="id", required=true) + private Output id; + + /** + * @return The ID of the Schema Registry cluster, for example, `lsrc-abc123`. + * + */ + public Output id() { + return this.id; + } + + private SchemaExporterSchemaRegistryClusterArgs() {} + + private SchemaExporterSchemaRegistryClusterArgs(SchemaExporterSchemaRegistryClusterArgs $) { + this.id = $.id; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(SchemaExporterSchemaRegistryClusterArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private SchemaExporterSchemaRegistryClusterArgs $; + + public Builder() { + $ = new SchemaExporterSchemaRegistryClusterArgs(); + } + + public Builder(SchemaExporterSchemaRegistryClusterArgs defaults) { + $ = new SchemaExporterSchemaRegistryClusterArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param id The ID of the Schema Registry cluster, for example, `lsrc-abc123`. + * + * @return builder + * + */ + public Builder id(Output id) { + $.id = id; + return this; + } + + /** + * @param id The ID of the Schema Registry cluster, for example, `lsrc-abc123`. + * + * @return builder + * + */ + public Builder id(String id) { + return id(Output.of(id)); + } + + public SchemaExporterSchemaRegistryClusterArgs build() { + $.id = Objects.requireNonNull($.id, "expected parameter 'id' to be non-null"); + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/SchemaExporterState.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/SchemaExporterState.java new file mode 100644 index 00000000..1b7a31cc --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/inputs/SchemaExporterState.java @@ -0,0 +1,466 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.confluentcloud.inputs; + +import com.pulumi.confluentcloud.inputs.SchemaExporterCredentialsArgs; +import com.pulumi.confluentcloud.inputs.SchemaExporterDestinationSchemaRegistryClusterArgs; +import com.pulumi.confluentcloud.inputs.SchemaExporterSchemaRegistryClusterArgs; +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.Boolean; +import java.lang.String; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class SchemaExporterState extends com.pulumi.resources.ResourceArgs { + + public static final SchemaExporterState Empty = new SchemaExporterState(); + + /** + * Block for custom *nonsensitive* configuration properties: + * + */ + @Import(name="config") + private @Nullable Output> config; + + /** + * @return Block for custom *nonsensitive* configuration properties: + * + */ + public Optional>> config() { + return Optional.ofNullable(this.config); + } + + /** + * Customized context of the exporter if `context_type` is set to `CUSTOM`. + * + */ + @Import(name="context") + private @Nullable Output context; + + /** + * @return Customized context of the exporter if `context_type` is set to `CUSTOM`. + * + */ + public Optional> context() { + return Optional.ofNullable(this.context); + } + + /** + * Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + * + */ + @Import(name="contextType") + private @Nullable Output contextType; + + /** + * @return Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + * + */ + public Optional> contextType() { + return Optional.ofNullable(this.contextType); + } + + /** + * The Cluster API Credentials. + * + */ + @Import(name="credentials") + private @Nullable Output credentials; + + /** + * @return The Cluster API Credentials. + * + */ + public Optional> credentials() { + return Optional.ofNullable(this.credentials); + } + + @Import(name="destinationSchemaRegistryCluster") + private @Nullable Output destinationSchemaRegistryCluster; + + public Optional> destinationSchemaRegistryCluster() { + return Optional.ofNullable(this.destinationSchemaRegistryCluster); + } + + /** + * The configuration setting name. + * + */ + @Import(name="name") + private @Nullable Output name; + + /** + * @return The configuration setting name. + * + */ + public Optional> name() { + return Optional.ofNullable(this.name); + } + + /** + * The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + * + */ + @Import(name="resetOnUpdate") + private @Nullable Output resetOnUpdate; + + /** + * @return The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + * + */ + public Optional> resetOnUpdate() { + return Optional.ofNullable(this.resetOnUpdate); + } + + /** + * The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + * + */ + @Import(name="restEndpoint") + private @Nullable Output restEndpoint; + + /** + * @return The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + * + */ + public Optional> restEndpoint() { + return Optional.ofNullable(this.restEndpoint); + } + + @Import(name="schemaRegistryCluster") + private @Nullable Output schemaRegistryCluster; + + public Optional> schemaRegistryCluster() { + return Optional.ofNullable(this.schemaRegistryCluster); + } + + /** + * The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + * + */ + @Import(name="status") + private @Nullable Output status; + + /** + * @return The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + * + */ + public Optional> status() { + return Optional.ofNullable(this.status); + } + + /** + * Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`. + * + */ + @Import(name="subjectRenameFormat") + private @Nullable Output subjectRenameFormat; + + /** + * @return Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`. + * + */ + public Optional> subjectRenameFormat() { + return Optional.ofNullable(this.subjectRenameFormat); + } + + /** + * Name of each exporter subject. + * + */ + @Import(name="subjects") + private @Nullable Output> subjects; + + /** + * @return Name of each exporter subject. + * + */ + public Optional>> subjects() { + return Optional.ofNullable(this.subjects); + } + + private SchemaExporterState() {} + + private SchemaExporterState(SchemaExporterState $) { + this.config = $.config; + this.context = $.context; + this.contextType = $.contextType; + this.credentials = $.credentials; + this.destinationSchemaRegistryCluster = $.destinationSchemaRegistryCluster; + this.name = $.name; + this.resetOnUpdate = $.resetOnUpdate; + this.restEndpoint = $.restEndpoint; + this.schemaRegistryCluster = $.schemaRegistryCluster; + this.status = $.status; + this.subjectRenameFormat = $.subjectRenameFormat; + this.subjects = $.subjects; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(SchemaExporterState defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private SchemaExporterState $; + + public Builder() { + $ = new SchemaExporterState(); + } + + public Builder(SchemaExporterState defaults) { + $ = new SchemaExporterState(Objects.requireNonNull(defaults)); + } + + /** + * @param config Block for custom *nonsensitive* configuration properties: + * + * @return builder + * + */ + public Builder config(@Nullable Output> config) { + $.config = config; + return this; + } + + /** + * @param config Block for custom *nonsensitive* configuration properties: + * + * @return builder + * + */ + public Builder config(Map config) { + return config(Output.of(config)); + } + + /** + * @param context Customized context of the exporter if `context_type` is set to `CUSTOM`. + * + * @return builder + * + */ + public Builder context(@Nullable Output context) { + $.context = context; + return this; + } + + /** + * @param context Customized context of the exporter if `context_type` is set to `CUSTOM`. + * + * @return builder + * + */ + public Builder context(String context) { + return context(Output.of(context)); + } + + /** + * @param contextType Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + * + * @return builder + * + */ + public Builder contextType(@Nullable Output contextType) { + $.contextType = contextType; + return this; + } + + /** + * @param contextType Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + * + * @return builder + * + */ + public Builder contextType(String contextType) { + return contextType(Output.of(contextType)); + } + + /** + * @param credentials The Cluster API Credentials. + * + * @return builder + * + */ + public Builder credentials(@Nullable Output credentials) { + $.credentials = credentials; + return this; + } + + /** + * @param credentials The Cluster API Credentials. + * + * @return builder + * + */ + public Builder credentials(SchemaExporterCredentialsArgs credentials) { + return credentials(Output.of(credentials)); + } + + public Builder destinationSchemaRegistryCluster(@Nullable Output destinationSchemaRegistryCluster) { + $.destinationSchemaRegistryCluster = destinationSchemaRegistryCluster; + return this; + } + + public Builder destinationSchemaRegistryCluster(SchemaExporterDestinationSchemaRegistryClusterArgs destinationSchemaRegistryCluster) { + return destinationSchemaRegistryCluster(Output.of(destinationSchemaRegistryCluster)); + } + + /** + * @param name The configuration setting name. + * + * @return builder + * + */ + public Builder name(@Nullable Output name) { + $.name = name; + return this; + } + + /** + * @param name The configuration setting name. + * + * @return builder + * + */ + public Builder name(String name) { + return name(Output.of(name)); + } + + /** + * @param resetOnUpdate The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + * + * @return builder + * + */ + public Builder resetOnUpdate(@Nullable Output resetOnUpdate) { + $.resetOnUpdate = resetOnUpdate; + return this; + } + + /** + * @param resetOnUpdate The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + * + * @return builder + * + */ + public Builder resetOnUpdate(Boolean resetOnUpdate) { + return resetOnUpdate(Output.of(resetOnUpdate)); + } + + /** + * @param restEndpoint The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + * + * @return builder + * + */ + public Builder restEndpoint(@Nullable Output restEndpoint) { + $.restEndpoint = restEndpoint; + return this; + } + + /** + * @param restEndpoint The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + * + * @return builder + * + */ + public Builder restEndpoint(String restEndpoint) { + return restEndpoint(Output.of(restEndpoint)); + } + + public Builder schemaRegistryCluster(@Nullable Output schemaRegistryCluster) { + $.schemaRegistryCluster = schemaRegistryCluster; + return this; + } + + public Builder schemaRegistryCluster(SchemaExporterSchemaRegistryClusterArgs schemaRegistryCluster) { + return schemaRegistryCluster(Output.of(schemaRegistryCluster)); + } + + /** + * @param status The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + * + * @return builder + * + */ + public Builder status(@Nullable Output status) { + $.status = status; + return this; + } + + /** + * @param status The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + * + * @return builder + * + */ + public Builder status(String status) { + return status(Output.of(status)); + } + + /** + * @param subjectRenameFormat Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`. + * + * @return builder + * + */ + public Builder subjectRenameFormat(@Nullable Output subjectRenameFormat) { + $.subjectRenameFormat = subjectRenameFormat; + return this; + } + + /** + * @param subjectRenameFormat Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`. + * + * @return builder + * + */ + public Builder subjectRenameFormat(String subjectRenameFormat) { + return subjectRenameFormat(Output.of(subjectRenameFormat)); + } + + /** + * @param subjects Name of each exporter subject. + * + * @return builder + * + */ + public Builder subjects(@Nullable Output> subjects) { + $.subjects = subjects; + return this; + } + + /** + * @param subjects Name of each exporter subject. + * + * @return builder + * + */ + public Builder subjects(List subjects) { + return subjects(Output.of(subjects)); + } + + /** + * @param subjects Name of each exporter subject. + * + * @return builder + * + */ + public Builder subjects(String... subjects) { + return subjects(List.of(subjects)); + } + + public SchemaExporterState build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/ClusterLinkDestinationKafkaClusterCredentials.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/ClusterLinkDestinationKafkaClusterCredentials.java index a5a22fca..a56f23b9 100644 --- a/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/ClusterLinkDestinationKafkaClusterCredentials.java +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/ClusterLinkDestinationKafkaClusterCredentials.java @@ -17,8 +17,6 @@ public final class ClusterLinkDestinationKafkaClusterCredentials { /** * @return The Kafka API Secret. * - * > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). - * */ private String secret; @@ -33,8 +31,6 @@ public String key() { /** * @return The Kafka API Secret. * - * > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). - * */ public String secret() { return this.secret; diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/ClusterLinkLocalKafkaClusterCredentials.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/ClusterLinkLocalKafkaClusterCredentials.java index 8d38203f..909f04e3 100644 --- a/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/ClusterLinkLocalKafkaClusterCredentials.java +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/ClusterLinkLocalKafkaClusterCredentials.java @@ -17,8 +17,6 @@ public final class ClusterLinkLocalKafkaClusterCredentials { /** * @return The Kafka API Secret. * - * > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). - * */ private String secret; @@ -33,8 +31,6 @@ public String key() { /** * @return The Kafka API Secret. * - * > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). - * */ public String secret() { return this.secret; diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/ClusterLinkRemoteKafkaClusterCredentials.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/ClusterLinkRemoteKafkaClusterCredentials.java index bce7f243..63645299 100644 --- a/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/ClusterLinkRemoteKafkaClusterCredentials.java +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/ClusterLinkRemoteKafkaClusterCredentials.java @@ -17,8 +17,6 @@ public final class ClusterLinkRemoteKafkaClusterCredentials { /** * @return The Kafka API Secret. * - * > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). - * */ private String secret; @@ -33,8 +31,6 @@ public String key() { /** * @return The Kafka API Secret. * - * > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). - * */ public String secret() { return this.secret; diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/ClusterLinkSourceKafkaClusterCredentials.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/ClusterLinkSourceKafkaClusterCredentials.java index ee985a20..7398c7db 100644 --- a/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/ClusterLinkSourceKafkaClusterCredentials.java +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/ClusterLinkSourceKafkaClusterCredentials.java @@ -17,8 +17,6 @@ public final class ClusterLinkSourceKafkaClusterCredentials { /** * @return The Kafka API Secret. * - * > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). - * */ private String secret; @@ -33,8 +31,6 @@ public String key() { /** * @return The Kafka API Secret. * - * > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). - * */ public String secret() { return this.secret; diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/FlinkComputePoolEnvironment.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/FlinkComputePoolEnvironment.java new file mode 100644 index 00000000..fb8d15c1 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/FlinkComputePoolEnvironment.java @@ -0,0 +1,54 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.confluentcloud.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class FlinkComputePoolEnvironment { + /** + * @return The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`. + * + */ + private String id; + + private FlinkComputePoolEnvironment() {} + /** + * @return The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`. + * + */ + public String id() { + return this.id; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(FlinkComputePoolEnvironment defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String id; + public Builder() {} + public Builder(FlinkComputePoolEnvironment defaults) { + Objects.requireNonNull(defaults); + this.id = defaults.id; + } + + @CustomType.Setter + public Builder id(String id) { + this.id = Objects.requireNonNull(id); + return this; + } + public FlinkComputePoolEnvironment build() { + final var o = new FlinkComputePoolEnvironment(); + o.id = id; + return o; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/GetFlinkComputePoolEnvironment.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/GetFlinkComputePoolEnvironment.java new file mode 100644 index 00000000..9c35dd29 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/GetFlinkComputePoolEnvironment.java @@ -0,0 +1,58 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.confluentcloud.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class GetFlinkComputePoolEnvironment { + /** + * @return The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + * + * > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + * + */ + private String id; + + private GetFlinkComputePoolEnvironment() {} + /** + * @return The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + * + * > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + * + */ + public String id() { + return this.id; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetFlinkComputePoolEnvironment defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String id; + public Builder() {} + public Builder(GetFlinkComputePoolEnvironment defaults) { + Objects.requireNonNull(defaults); + this.id = defaults.id; + } + + @CustomType.Setter + public Builder id(String id) { + this.id = Objects.requireNonNull(id); + return this; + } + public GetFlinkComputePoolEnvironment build() { + final var o = new GetFlinkComputePoolEnvironment(); + o.id = id; + return o; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/GetFlinkComputePoolResult.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/GetFlinkComputePoolResult.java new file mode 100644 index 00000000..85c10688 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/GetFlinkComputePoolResult.java @@ -0,0 +1,256 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.confluentcloud.outputs; + +import com.pulumi.confluentcloud.outputs.GetFlinkComputePoolEnvironment; +import com.pulumi.core.annotations.CustomType; +import java.lang.Integer; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class GetFlinkComputePoolResult { + /** + * @return (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`. + * + */ + private String apiVersion; + /** + * @return (Required String) The cloud service provider that runs the Flink Compute Pool. + * + */ + private String cloud; + /** + * @return (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool. + * + */ + private Integer currentCfu; + /** + * @return (Required String) The name of the Flink Compute Pool. + * + */ + private String displayName; + /** + * @return (Required Configuration Block) supports the following: + * + */ + private GetFlinkComputePoolEnvironment environment; + /** + * @return (Required String) The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`. + * + */ + private String id; + /** + * @return (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`. + * + */ + private String kind; + /** + * @return (Required Integer) Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. + * + */ + private Integer maxCfu; + /** + * @return (Required String) The cloud service provider region that hosts the Flink Compute Pool. + * + */ + private String region; + /** + * @return (Required String) The Confluent Resource Name of the Flink Compute Pool. + * + */ + private String resourceName; + /** + * @return (Required String) The API endpoint of the Flink Compute Pool. + * + */ + private String restEndpoint; + + private GetFlinkComputePoolResult() {} + /** + * @return (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`. + * + */ + public String apiVersion() { + return this.apiVersion; + } + /** + * @return (Required String) The cloud service provider that runs the Flink Compute Pool. + * + */ + public String cloud() { + return this.cloud; + } + /** + * @return (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool. + * + */ + public Integer currentCfu() { + return this.currentCfu; + } + /** + * @return (Required String) The name of the Flink Compute Pool. + * + */ + public String displayName() { + return this.displayName; + } + /** + * @return (Required Configuration Block) supports the following: + * + */ + public GetFlinkComputePoolEnvironment environment() { + return this.environment; + } + /** + * @return (Required String) The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`. + * + */ + public String id() { + return this.id; + } + /** + * @return (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`. + * + */ + public String kind() { + return this.kind; + } + /** + * @return (Required Integer) Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. + * + */ + public Integer maxCfu() { + return this.maxCfu; + } + /** + * @return (Required String) The cloud service provider region that hosts the Flink Compute Pool. + * + */ + public String region() { + return this.region; + } + /** + * @return (Required String) The Confluent Resource Name of the Flink Compute Pool. + * + */ + public String resourceName() { + return this.resourceName; + } + /** + * @return (Required String) The API endpoint of the Flink Compute Pool. + * + */ + public String restEndpoint() { + return this.restEndpoint; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetFlinkComputePoolResult defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String apiVersion; + private String cloud; + private Integer currentCfu; + private String displayName; + private GetFlinkComputePoolEnvironment environment; + private String id; + private String kind; + private Integer maxCfu; + private String region; + private String resourceName; + private String restEndpoint; + public Builder() {} + public Builder(GetFlinkComputePoolResult defaults) { + Objects.requireNonNull(defaults); + this.apiVersion = defaults.apiVersion; + this.cloud = defaults.cloud; + this.currentCfu = defaults.currentCfu; + this.displayName = defaults.displayName; + this.environment = defaults.environment; + this.id = defaults.id; + this.kind = defaults.kind; + this.maxCfu = defaults.maxCfu; + this.region = defaults.region; + this.resourceName = defaults.resourceName; + this.restEndpoint = defaults.restEndpoint; + } + + @CustomType.Setter + public Builder apiVersion(String apiVersion) { + this.apiVersion = Objects.requireNonNull(apiVersion); + return this; + } + @CustomType.Setter + public Builder cloud(String cloud) { + this.cloud = Objects.requireNonNull(cloud); + return this; + } + @CustomType.Setter + public Builder currentCfu(Integer currentCfu) { + this.currentCfu = Objects.requireNonNull(currentCfu); + return this; + } + @CustomType.Setter + public Builder displayName(String displayName) { + this.displayName = Objects.requireNonNull(displayName); + return this; + } + @CustomType.Setter + public Builder environment(GetFlinkComputePoolEnvironment environment) { + this.environment = Objects.requireNonNull(environment); + return this; + } + @CustomType.Setter + public Builder id(String id) { + this.id = Objects.requireNonNull(id); + return this; + } + @CustomType.Setter + public Builder kind(String kind) { + this.kind = Objects.requireNonNull(kind); + return this; + } + @CustomType.Setter + public Builder maxCfu(Integer maxCfu) { + this.maxCfu = Objects.requireNonNull(maxCfu); + return this; + } + @CustomType.Setter + public Builder region(String region) { + this.region = Objects.requireNonNull(region); + return this; + } + @CustomType.Setter + public Builder resourceName(String resourceName) { + this.resourceName = Objects.requireNonNull(resourceName); + return this; + } + @CustomType.Setter + public Builder restEndpoint(String restEndpoint) { + this.restEndpoint = Objects.requireNonNull(restEndpoint); + return this; + } + public GetFlinkComputePoolResult build() { + final var o = new GetFlinkComputePoolResult(); + o.apiVersion = apiVersion; + o.cloud = cloud; + o.currentCfu = currentCfu; + o.displayName = displayName; + o.environment = environment; + o.id = id; + o.kind = kind; + o.maxCfu = maxCfu; + o.region = region; + o.resourceName = resourceName; + o.restEndpoint = restEndpoint; + return o; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/SchemaExporterCredentials.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/SchemaExporterCredentials.java new file mode 100644 index 00000000..93047880 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/SchemaExporterCredentials.java @@ -0,0 +1,74 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.confluentcloud.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class SchemaExporterCredentials { + /** + * @return The Schema Registry API Key. + * + */ + private String key; + /** + * @return The Schema Registry API Secret. + * + */ + private String secret; + + private SchemaExporterCredentials() {} + /** + * @return The Schema Registry API Key. + * + */ + public String key() { + return this.key; + } + /** + * @return The Schema Registry API Secret. + * + */ + public String secret() { + return this.secret; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(SchemaExporterCredentials defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String key; + private String secret; + public Builder() {} + public Builder(SchemaExporterCredentials defaults) { + Objects.requireNonNull(defaults); + this.key = defaults.key; + this.secret = defaults.secret; + } + + @CustomType.Setter + public Builder key(String key) { + this.key = Objects.requireNonNull(key); + return this; + } + @CustomType.Setter + public Builder secret(String secret) { + this.secret = Objects.requireNonNull(secret); + return this; + } + public SchemaExporterCredentials build() { + final var o = new SchemaExporterCredentials(); + o.key = key; + o.secret = secret; + return o; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/SchemaExporterDestinationSchemaRegistryCluster.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/SchemaExporterDestinationSchemaRegistryCluster.java new file mode 100644 index 00000000..28e57c38 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/SchemaExporterDestinationSchemaRegistryCluster.java @@ -0,0 +1,67 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.confluentcloud.outputs; + +import com.pulumi.confluentcloud.outputs.SchemaExporterDestinationSchemaRegistryClusterCredentials; +import com.pulumi.core.annotations.CustomType; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class SchemaExporterDestinationSchemaRegistryCluster { + private SchemaExporterDestinationSchemaRegistryClusterCredentials credentials; + /** + * @return The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + * + */ + private String restEndpoint; + + private SchemaExporterDestinationSchemaRegistryCluster() {} + public SchemaExporterDestinationSchemaRegistryClusterCredentials credentials() { + return this.credentials; + } + /** + * @return The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + * + */ + public String restEndpoint() { + return this.restEndpoint; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(SchemaExporterDestinationSchemaRegistryCluster defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private SchemaExporterDestinationSchemaRegistryClusterCredentials credentials; + private String restEndpoint; + public Builder() {} + public Builder(SchemaExporterDestinationSchemaRegistryCluster defaults) { + Objects.requireNonNull(defaults); + this.credentials = defaults.credentials; + this.restEndpoint = defaults.restEndpoint; + } + + @CustomType.Setter + public Builder credentials(SchemaExporterDestinationSchemaRegistryClusterCredentials credentials) { + this.credentials = Objects.requireNonNull(credentials); + return this; + } + @CustomType.Setter + public Builder restEndpoint(String restEndpoint) { + this.restEndpoint = Objects.requireNonNull(restEndpoint); + return this; + } + public SchemaExporterDestinationSchemaRegistryCluster build() { + final var o = new SchemaExporterDestinationSchemaRegistryCluster(); + o.credentials = credentials; + o.restEndpoint = restEndpoint; + return o; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/SchemaExporterDestinationSchemaRegistryClusterCredentials.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/SchemaExporterDestinationSchemaRegistryClusterCredentials.java new file mode 100644 index 00000000..89fabd1e --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/SchemaExporterDestinationSchemaRegistryClusterCredentials.java @@ -0,0 +1,74 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.confluentcloud.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class SchemaExporterDestinationSchemaRegistryClusterCredentials { + /** + * @return The Schema Registry API Key. + * + */ + private String key; + /** + * @return The Schema Registry API Secret. + * + */ + private String secret; + + private SchemaExporterDestinationSchemaRegistryClusterCredentials() {} + /** + * @return The Schema Registry API Key. + * + */ + public String key() { + return this.key; + } + /** + * @return The Schema Registry API Secret. + * + */ + public String secret() { + return this.secret; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(SchemaExporterDestinationSchemaRegistryClusterCredentials defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String key; + private String secret; + public Builder() {} + public Builder(SchemaExporterDestinationSchemaRegistryClusterCredentials defaults) { + Objects.requireNonNull(defaults); + this.key = defaults.key; + this.secret = defaults.secret; + } + + @CustomType.Setter + public Builder key(String key) { + this.key = Objects.requireNonNull(key); + return this; + } + @CustomType.Setter + public Builder secret(String secret) { + this.secret = Objects.requireNonNull(secret); + return this; + } + public SchemaExporterDestinationSchemaRegistryClusterCredentials build() { + final var o = new SchemaExporterDestinationSchemaRegistryClusterCredentials(); + o.key = key; + o.secret = secret; + return o; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/SchemaExporterSchemaRegistryCluster.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/SchemaExporterSchemaRegistryCluster.java new file mode 100644 index 00000000..0029775b --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/outputs/SchemaExporterSchemaRegistryCluster.java @@ -0,0 +1,54 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.confluentcloud.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class SchemaExporterSchemaRegistryCluster { + /** + * @return The ID of the Schema Registry cluster, for example, `lsrc-abc123`. + * + */ + private String id; + + private SchemaExporterSchemaRegistryCluster() {} + /** + * @return The ID of the Schema Registry cluster, for example, `lsrc-abc123`. + * + */ + public String id() { + return this.id; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(SchemaExporterSchemaRegistryCluster defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String id; + public Builder() {} + public Builder(SchemaExporterSchemaRegistryCluster defaults) { + Objects.requireNonNull(defaults); + this.id = defaults.id; + } + + @CustomType.Setter + public Builder id(String id) { + this.id = Objects.requireNonNull(id); + return this; + } + public SchemaExporterSchemaRegistryCluster build() { + final var o = new SchemaExporterSchemaRegistryCluster(); + o.id = id; + return o; + } + } +} diff --git a/sdk/nodejs/flinkComputePool.ts b/sdk/nodejs/flinkComputePool.ts new file mode 100644 index 00000000..5f537673 --- /dev/null +++ b/sdk/nodejs/flinkComputePool.ts @@ -0,0 +1,230 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +import * as pulumi from "@pulumi/pulumi"; +import * as inputs from "./types/input"; +import * as outputs from "./types/output"; +import * as utilities from "./utilities"; + +/** + * ## Example Usage + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as confluentcloud from "@pulumi/confluentcloud"; + * + * const development = new confluentcloud.Environment("development", {}); + * const main = new confluentcloud.FlinkComputePool("main", { + * displayName: "standard_compute_pool", + * cloud: "AWS", + * region: "us-east-1", + * maxCfu: 5, + * environment: { + * id: development.id, + * }, + * }); + * ``` + * + * ## Import + * + * You can import a Flink Compute Pool by using Environment ID and Flink Compute Pool ID, in the format `/`. The following example shows how to import a Flink Compute Pool$ export CONFLUENT_CLOUD_API_KEY="" $ export CONFLUENT_CLOUD_API_SECRET="" + * + * ```sh + * $ pulumi import confluentcloud:index/flinkComputePool:FlinkComputePool main env-abc123/lfcp-abc123 + * ``` + * + * !> **Warning:** Do not forget to delete terminal command history afterwards for security purposes. + */ +export class FlinkComputePool extends pulumi.CustomResource { + /** + * Get an existing FlinkComputePool resource's state with the given name, ID, and optional extra + * properties used to qualify the lookup. + * + * @param name The _unique_ name of the resulting resource. + * @param id The _unique_ provider ID of the resource to lookup. + * @param state Any extra arguments used during the lookup. + * @param opts Optional settings to control the behavior of the CustomResource. + */ + public static get(name: string, id: pulumi.Input, state?: FlinkComputePoolState, opts?: pulumi.CustomResourceOptions): FlinkComputePool { + return new FlinkComputePool(name, state, { ...opts, id: id }); + } + + /** @internal */ + public static readonly __pulumiType = 'confluentcloud:index/flinkComputePool:FlinkComputePool'; + + /** + * Returns true if the given object is an instance of FlinkComputePool. This is designed to work even + * when multiple copies of the Pulumi SDK have been loaded into the same process. + */ + public static isInstance(obj: any): obj is FlinkComputePool { + if (obj === undefined || obj === null) { + return false; + } + return obj['__pulumiType'] === FlinkComputePool.__pulumiType; + } + + /** + * (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`. + */ + public /*out*/ readonly apiVersion!: pulumi.Output; + /** + * The cloud service provider that runs the Flink Compute Pool. + */ + public readonly cloud!: pulumi.Output; + /** + * (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool. + */ + public /*out*/ readonly currentCfu!: pulumi.Output; + /** + * The name of the Flink Compute Pool. + */ + public readonly displayName!: pulumi.Output; + /** + * Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + */ + public readonly environment!: pulumi.Output; + /** + * (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`. + */ + public /*out*/ readonly kind!: pulumi.Output; + /** + * Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + */ + public readonly maxCfu!: pulumi.Output; + /** + * The cloud service provider region that hosts the Flink Compute Pool. + */ + public readonly region!: pulumi.Output; + /** + * (Required String) The Confluent Resource Name of the Flink Compute Pool. + */ + public /*out*/ readonly resourceName!: pulumi.Output; + /** + * (Required String) The API endpoint of the Flink Compute Pool. + */ + public /*out*/ readonly restEndpoint!: pulumi.Output; + + /** + * Create a FlinkComputePool resource with the given unique name, arguments, and options. + * + * @param name The _unique_ name of the resource. + * @param args The arguments to use to populate this resource's properties. + * @param opts A bag of options that control this resource's behavior. + */ + constructor(name: string, args: FlinkComputePoolArgs, opts?: pulumi.CustomResourceOptions) + constructor(name: string, argsOrState?: FlinkComputePoolArgs | FlinkComputePoolState, opts?: pulumi.CustomResourceOptions) { + let resourceInputs: pulumi.Inputs = {}; + opts = opts || {}; + if (opts.id) { + const state = argsOrState as FlinkComputePoolState | undefined; + resourceInputs["apiVersion"] = state ? state.apiVersion : undefined; + resourceInputs["cloud"] = state ? state.cloud : undefined; + resourceInputs["currentCfu"] = state ? state.currentCfu : undefined; + resourceInputs["displayName"] = state ? state.displayName : undefined; + resourceInputs["environment"] = state ? state.environment : undefined; + resourceInputs["kind"] = state ? state.kind : undefined; + resourceInputs["maxCfu"] = state ? state.maxCfu : undefined; + resourceInputs["region"] = state ? state.region : undefined; + resourceInputs["resourceName"] = state ? state.resourceName : undefined; + resourceInputs["restEndpoint"] = state ? state.restEndpoint : undefined; + } else { + const args = argsOrState as FlinkComputePoolArgs | undefined; + if ((!args || args.cloud === undefined) && !opts.urn) { + throw new Error("Missing required property 'cloud'"); + } + if ((!args || args.displayName === undefined) && !opts.urn) { + throw new Error("Missing required property 'displayName'"); + } + if ((!args || args.environment === undefined) && !opts.urn) { + throw new Error("Missing required property 'environment'"); + } + if ((!args || args.region === undefined) && !opts.urn) { + throw new Error("Missing required property 'region'"); + } + resourceInputs["cloud"] = args ? args.cloud : undefined; + resourceInputs["displayName"] = args ? args.displayName : undefined; + resourceInputs["environment"] = args ? args.environment : undefined; + resourceInputs["maxCfu"] = args ? args.maxCfu : undefined; + resourceInputs["region"] = args ? args.region : undefined; + resourceInputs["apiVersion"] = undefined /*out*/; + resourceInputs["currentCfu"] = undefined /*out*/; + resourceInputs["kind"] = undefined /*out*/; + resourceInputs["resourceName"] = undefined /*out*/; + resourceInputs["restEndpoint"] = undefined /*out*/; + } + opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts); + super(FlinkComputePool.__pulumiType, name, resourceInputs, opts); + } +} + +/** + * Input properties used for looking up and filtering FlinkComputePool resources. + */ +export interface FlinkComputePoolState { + /** + * (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`. + */ + apiVersion?: pulumi.Input; + /** + * The cloud service provider that runs the Flink Compute Pool. + */ + cloud?: pulumi.Input; + /** + * (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool. + */ + currentCfu?: pulumi.Input; + /** + * The name of the Flink Compute Pool. + */ + displayName?: pulumi.Input; + /** + * Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + */ + environment?: pulumi.Input; + /** + * (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`. + */ + kind?: pulumi.Input; + /** + * Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + */ + maxCfu?: pulumi.Input; + /** + * The cloud service provider region that hosts the Flink Compute Pool. + */ + region?: pulumi.Input; + /** + * (Required String) The Confluent Resource Name of the Flink Compute Pool. + */ + resourceName?: pulumi.Input; + /** + * (Required String) The API endpoint of the Flink Compute Pool. + */ + restEndpoint?: pulumi.Input; +} + +/** + * The set of arguments for constructing a FlinkComputePool resource. + */ +export interface FlinkComputePoolArgs { + /** + * The cloud service provider that runs the Flink Compute Pool. + */ + cloud: pulumi.Input; + /** + * The name of the Flink Compute Pool. + */ + displayName: pulumi.Input; + /** + * Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + */ + environment: pulumi.Input; + /** + * Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + */ + maxCfu?: pulumi.Input; + /** + * The cloud service provider region that hosts the Flink Compute Pool. + */ + region: pulumi.Input; +} diff --git a/sdk/nodejs/getFlinkComputePool.ts b/sdk/nodejs/getFlinkComputePool.ts new file mode 100644 index 00000000..184a7fce --- /dev/null +++ b/sdk/nodejs/getFlinkComputePool.ts @@ -0,0 +1,170 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +import * as pulumi from "@pulumi/pulumi"; +import * as inputs from "./types/input"; +import * as outputs from "./types/output"; +import * as utilities from "./utilities"; + +/** + * [![Early Access](https://img.shields.io/badge/Lifecycle%20Stage-Early%20Access-%2300afba)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy) + * + * > **Note:** `confluentcloud.FlinkComputePool` data source is available in **Early Access** for early adopters. Early Access features are introduced to gather customer feedback. This feature should be used only for evaluation and non-production testing purposes or to provide feedback to Confluent, particularly as it becomes more widely available in follow-on editions.\ + * **Early Access** features are intended for evaluation use in development and testing environments only, and not for production use. The warranty, SLA, and Support Services provisions of your agreement with Confluent do not apply to Early Access features. Early Access features are considered to be a Proof of Concept as defined in the Confluent Cloud Terms of Service. Confluent may discontinue providing early access releases of the Early Access features at any time in Confluent’s sole discretion. + * + * `confluentcloud.FlinkComputePool` describes a Flink Compute Pool data source. + * + * ## Example Usage + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as confluentcloud from "@pulumi/confluentcloud"; + * + * const exampleUsingIdFlinkComputePool = confluentcloud.getFlinkComputePool({ + * id: "lfcp-abc123", + * environment: { + * id: "env-xyz456", + * }, + * }); + * export const exampleUsingId = exampleUsingIdFlinkComputePool; + * const exampleUsingNameFlinkComputePool = confluentcloud.getFlinkComputePool({ + * displayName: "my_compute_pool", + * environment: { + * id: "env-xyz456", + * }, + * }); + * export const exampleUsingName = exampleUsingNameFlinkComputePool; + * ``` + */ +export function getFlinkComputePool(args: GetFlinkComputePoolArgs, opts?: pulumi.InvokeOptions): Promise { + + opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts || {}); + return pulumi.runtime.invoke("confluentcloud:index/getFlinkComputePool:getFlinkComputePool", { + "displayName": args.displayName, + "environment": args.environment, + "id": args.id, + }, opts); +} + +/** + * A collection of arguments for invoking getFlinkComputePool. + */ +export interface GetFlinkComputePoolArgs { + /** + * A human-readable name for the Flink Compute Pool. + */ + displayName?: string; + /** + * (Required Configuration Block) supports the following: + */ + environment: inputs.GetFlinkComputePoolEnvironment; + /** + * The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + * + * > **Note:** Exactly one from the `id` and `displayName` attributes must be specified. + */ + id?: string; +} + +/** + * A collection of values returned by getFlinkComputePool. + */ +export interface GetFlinkComputePoolResult { + /** + * (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`. + */ + readonly apiVersion: string; + /** + * (Required String) The cloud service provider that runs the Flink Compute Pool. + */ + readonly cloud: string; + /** + * (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool. + */ + readonly currentCfu: number; + /** + * (Required String) The name of the Flink Compute Pool. + */ + readonly displayName: string; + /** + * (Required Configuration Block) supports the following: + */ + readonly environment: outputs.GetFlinkComputePoolEnvironment; + /** + * (Required String) The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`. + */ + readonly id: string; + /** + * (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`. + */ + readonly kind: string; + /** + * (Required Integer) Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. + */ + readonly maxCfu: number; + /** + * (Required String) The cloud service provider region that hosts the Flink Compute Pool. + */ + readonly region: string; + /** + * (Required String) The Confluent Resource Name of the Flink Compute Pool. + */ + readonly resourceName: string; + /** + * (Required String) The API endpoint of the Flink Compute Pool. + */ + readonly restEndpoint: string; +} +/** + * [![Early Access](https://img.shields.io/badge/Lifecycle%20Stage-Early%20Access-%2300afba)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy) + * + * > **Note:** `confluentcloud.FlinkComputePool` data source is available in **Early Access** for early adopters. Early Access features are introduced to gather customer feedback. This feature should be used only for evaluation and non-production testing purposes or to provide feedback to Confluent, particularly as it becomes more widely available in follow-on editions.\ + * **Early Access** features are intended for evaluation use in development and testing environments only, and not for production use. The warranty, SLA, and Support Services provisions of your agreement with Confluent do not apply to Early Access features. Early Access features are considered to be a Proof of Concept as defined in the Confluent Cloud Terms of Service. Confluent may discontinue providing early access releases of the Early Access features at any time in Confluent’s sole discretion. + * + * `confluentcloud.FlinkComputePool` describes a Flink Compute Pool data source. + * + * ## Example Usage + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as confluentcloud from "@pulumi/confluentcloud"; + * + * const exampleUsingIdFlinkComputePool = confluentcloud.getFlinkComputePool({ + * id: "lfcp-abc123", + * environment: { + * id: "env-xyz456", + * }, + * }); + * export const exampleUsingId = exampleUsingIdFlinkComputePool; + * const exampleUsingNameFlinkComputePool = confluentcloud.getFlinkComputePool({ + * displayName: "my_compute_pool", + * environment: { + * id: "env-xyz456", + * }, + * }); + * export const exampleUsingName = exampleUsingNameFlinkComputePool; + * ``` + */ +export function getFlinkComputePoolOutput(args: GetFlinkComputePoolOutputArgs, opts?: pulumi.InvokeOptions): pulumi.Output { + return pulumi.output(args).apply((a: any) => getFlinkComputePool(a, opts)) +} + +/** + * A collection of arguments for invoking getFlinkComputePool. + */ +export interface GetFlinkComputePoolOutputArgs { + /** + * A human-readable name for the Flink Compute Pool. + */ + displayName?: pulumi.Input; + /** + * (Required Configuration Block) supports the following: + */ + environment: pulumi.Input; + /** + * The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + * + * > **Note:** Exactly one from the `id` and `displayName` attributes must be specified. + */ + id?: pulumi.Input; +} diff --git a/sdk/nodejs/getRoleBinding.ts b/sdk/nodejs/getRoleBinding.ts index d3b5ffe3..c03d9e7a 100644 --- a/sdk/nodejs/getRoleBinding.ts +++ b/sdk/nodejs/getRoleBinding.ts @@ -9,6 +9,8 @@ import * as utilities from "./utilities"; * * `confluentcloud.RoleBinding` describes a Role Binding. * + * > **Note:** For more information on the Role Bindings, see [Predefined RBAC roles in Confluent Cloud](https://docs.confluent.io/cloud/current/access-management/access-control/rbac/predefined-rbac-roles.html). + * * ## Example Usage * * ```typescript @@ -62,6 +64,8 @@ export interface GetRoleBindingResult { * * `confluentcloud.RoleBinding` describes a Role Binding. * + * > **Note:** For more information on the Role Bindings, see [Predefined RBAC roles in Confluent Cloud](https://docs.confluent.io/cloud/current/access-management/access-control/rbac/predefined-rbac-roles.html). + * * ## Example Usage * * ```typescript diff --git a/sdk/nodejs/index.ts b/sdk/nodejs/index.ts index 2df44429..918612aa 100644 --- a/sdk/nodejs/index.ts +++ b/sdk/nodejs/index.ts @@ -40,6 +40,11 @@ export type Environment = import("./environment").Environment; export const Environment: typeof import("./environment").Environment = null as any; utilities.lazyLoad(exports, ["Environment"], () => require("./environment")); +export { FlinkComputePoolArgs, FlinkComputePoolState } from "./flinkComputePool"; +export type FlinkComputePool = import("./flinkComputePool").FlinkComputePool; +export const FlinkComputePool: typeof import("./flinkComputePool").FlinkComputePool = null as any; +utilities.lazyLoad(exports, ["FlinkComputePool"], () => require("./flinkComputePool")); + export { GetBusinessMetadataArgs, GetBusinessMetadataResult, GetBusinessMetadataOutputArgs } from "./getBusinessMetadata"; export const getBusinessMetadata: typeof import("./getBusinessMetadata").getBusinessMetadata = null as any; export const getBusinessMetadataOutput: typeof import("./getBusinessMetadata").getBusinessMetadataOutput = null as any; @@ -65,6 +70,11 @@ export const getEnvironments: typeof import("./getEnvironments").getEnvironments export const getEnvironmentsOutput: typeof import("./getEnvironments").getEnvironmentsOutput = null as any; utilities.lazyLoad(exports, ["getEnvironments","getEnvironmentsOutput"], () => require("./getEnvironments")); +export { GetFlinkComputePoolArgs, GetFlinkComputePoolResult, GetFlinkComputePoolOutputArgs } from "./getFlinkComputePool"; +export const getFlinkComputePool: typeof import("./getFlinkComputePool").getFlinkComputePool = null as any; +export const getFlinkComputePoolOutput: typeof import("./getFlinkComputePool").getFlinkComputePoolOutput = null as any; +utilities.lazyLoad(exports, ["getFlinkComputePool","getFlinkComputePoolOutput"], () => require("./getFlinkComputePool")); + export { GetIdentityPoolArgs, GetIdentityPoolResult, GetIdentityPoolOutputArgs } from "./getIdentityPool"; export const getIdentityPool: typeof import("./getIdentityPool").getIdentityPool = null as any; export const getIdentityPoolOutput: typeof import("./getIdentityPool").getIdentityPoolOutput = null as any; @@ -320,6 +330,11 @@ export type Schema = import("./schema").Schema; export const Schema: typeof import("./schema").Schema = null as any; utilities.lazyLoad(exports, ["Schema"], () => require("./schema")); +export { SchemaExporterArgs, SchemaExporterState } from "./schemaExporter"; +export type SchemaExporter = import("./schemaExporter").SchemaExporter; +export const SchemaExporter: typeof import("./schemaExporter").SchemaExporter = null as any; +utilities.lazyLoad(exports, ["SchemaExporter"], () => require("./schemaExporter")); + export { SchemaRegistryClusterArgs, SchemaRegistryClusterState } from "./schemaRegistryCluster"; export type SchemaRegistryCluster = import("./schemaRegistryCluster").SchemaRegistryCluster; export const SchemaRegistryCluster: typeof import("./schemaRegistryCluster").SchemaRegistryCluster = null as any; @@ -398,6 +413,8 @@ const _module = { return new Connector(name, undefined, { urn }) case "confluentcloud:index/environment:Environment": return new Environment(name, undefined, { urn }) + case "confluentcloud:index/flinkComputePool:FlinkComputePool": + return new FlinkComputePool(name, undefined, { urn }) case "confluentcloud:index/identityPool:IdentityPool": return new IdentityPool(name, undefined, { urn }) case "confluentcloud:index/identityProvider:IdentityProvider": @@ -436,6 +453,8 @@ const _module = { return new RoleBinding(name, undefined, { urn }) case "confluentcloud:index/schema:Schema": return new Schema(name, undefined, { urn }) + case "confluentcloud:index/schemaExporter:SchemaExporter": + return new SchemaExporter(name, undefined, { urn }) case "confluentcloud:index/schemaRegistryCluster:SchemaRegistryCluster": return new SchemaRegistryCluster(name, undefined, { urn }) case "confluentcloud:index/schemaRegistryClusterConfig:SchemaRegistryClusterConfig": @@ -468,6 +487,7 @@ pulumi.runtime.registerResourceModule("confluentcloud", "index/byokKey", _module pulumi.runtime.registerResourceModule("confluentcloud", "index/clusterLink", _module) pulumi.runtime.registerResourceModule("confluentcloud", "index/connector", _module) pulumi.runtime.registerResourceModule("confluentcloud", "index/environment", _module) +pulumi.runtime.registerResourceModule("confluentcloud", "index/flinkComputePool", _module) pulumi.runtime.registerResourceModule("confluentcloud", "index/identityPool", _module) pulumi.runtime.registerResourceModule("confluentcloud", "index/identityProvider", _module) pulumi.runtime.registerResourceModule("confluentcloud", "index/invitation", _module) @@ -487,6 +507,7 @@ pulumi.runtime.registerResourceModule("confluentcloud", "index/privateLinkAttach pulumi.runtime.registerResourceModule("confluentcloud", "index/privateLinkAttachmentConnection", _module) pulumi.runtime.registerResourceModule("confluentcloud", "index/roleBinding", _module) pulumi.runtime.registerResourceModule("confluentcloud", "index/schema", _module) +pulumi.runtime.registerResourceModule("confluentcloud", "index/schemaExporter", _module) pulumi.runtime.registerResourceModule("confluentcloud", "index/schemaRegistryCluster", _module) pulumi.runtime.registerResourceModule("confluentcloud", "index/schemaRegistryClusterConfig", _module) pulumi.runtime.registerResourceModule("confluentcloud", "index/schemaRegistryClusterMode", _module) diff --git a/sdk/nodejs/kafkaAcl.ts b/sdk/nodejs/kafkaAcl.ts index 0245b964..79544cd1 100644 --- a/sdk/nodejs/kafkaAcl.ts +++ b/sdk/nodejs/kafkaAcl.ts @@ -9,7 +9,7 @@ import * as utilities from "./utilities"; /** * ## Import * - * You can import Kafka ACLs by using the Kafka cluster ID and attributes of `confluent_kafka_acl` resource in the format `/######`, for exampleOption #1Manage multiple Kafka clusters in the same Terraform workspace $ export CONFLUENT_CLOUD_API_KEY="" $ export CONFLUENT_CLOUD_API_SECRET="" $ export IMPORT_KAFKA_API_KEY="" $ export IMPORT_KAFKA_API_SECRET="" $ export IMPORT_KAFKA_REST_ENDPOINT="" + * You can import Kafka ACLs by using the Kafka cluster ID and attributes of `confluent_kafka_acl` resource in the format `/######`, for exampleOption #1Manage multiple Kafka clusters in the same Terraform workspace $ export IMPORT_KAFKA_API_KEY="" $ export IMPORT_KAFKA_API_SECRET="" $ export IMPORT_KAFKA_REST_ENDPOINT="" * * ```sh * $ pulumi import confluentcloud:index/kafkaAcl:KafkaAcl describe-cluster "lkc-12345/CLUSTER#kafka-cluster#LITERAL#User:sa-xyz123#*#DESCRIBE#ALLOW" diff --git a/sdk/nodejs/schemaExporter.ts b/sdk/nodejs/schemaExporter.ts new file mode 100644 index 00000000..e8793f25 --- /dev/null +++ b/sdk/nodejs/schemaExporter.ts @@ -0,0 +1,235 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +import * as pulumi from "@pulumi/pulumi"; +import * as inputs from "./types/input"; +import * as outputs from "./types/output"; +import * as utilities from "./utilities"; + +/** + * ## Import + * + * You can import a Schema Exporter by using the Schema Registry cluster ID, Schema Exporter name in the format `/`, for example$ export IMPORT_SCHEMA_REGISTRY_API_KEY="" $ export IMPORT_SCHEMA_REGISTRY_API_SECRET="" $ export IMPORT_SCHEMA_REGISTRY_REST_ENDPOINT="" + * + * ```sh + * $ pulumi import confluentcloud:index/schemaExporter:SchemaExporter main lsrc-8wrx70/test-exporter + * ``` + * + * !> **Warning:** Do not forget to delete terminal command history afterwards for security purposes. + */ +export class SchemaExporter extends pulumi.CustomResource { + /** + * Get an existing SchemaExporter resource's state with the given name, ID, and optional extra + * properties used to qualify the lookup. + * + * @param name The _unique_ name of the resulting resource. + * @param id The _unique_ provider ID of the resource to lookup. + * @param state Any extra arguments used during the lookup. + * @param opts Optional settings to control the behavior of the CustomResource. + */ + public static get(name: string, id: pulumi.Input, state?: SchemaExporterState, opts?: pulumi.CustomResourceOptions): SchemaExporter { + return new SchemaExporter(name, state, { ...opts, id: id }); + } + + /** @internal */ + public static readonly __pulumiType = 'confluentcloud:index/schemaExporter:SchemaExporter'; + + /** + * Returns true if the given object is an instance of SchemaExporter. This is designed to work even + * when multiple copies of the Pulumi SDK have been loaded into the same process. + */ + public static isInstance(obj: any): obj is SchemaExporter { + if (obj === undefined || obj === null) { + return false; + } + return obj['__pulumiType'] === SchemaExporter.__pulumiType; + } + + /** + * Block for custom *nonsensitive* configuration properties: + */ + public readonly config!: pulumi.Output<{[key: string]: string}>; + /** + * Customized context of the exporter if `contextType` is set to `CUSTOM`. + */ + public readonly context!: pulumi.Output; + /** + * Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + */ + public readonly contextType!: pulumi.Output; + /** + * The Cluster API Credentials. + */ + public readonly credentials!: pulumi.Output; + public readonly destinationSchemaRegistryCluster!: pulumi.Output; + /** + * The configuration setting name. + */ + public readonly name!: pulumi.Output; + /** + * The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + */ + public readonly resetOnUpdate!: pulumi.Output; + /** + * The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + */ + public readonly restEndpoint!: pulumi.Output; + public readonly schemaRegistryCluster!: pulumi.Output; + /** + * The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + */ + public readonly status!: pulumi.Output; + /** + * Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dcOrders`. + */ + public readonly subjectRenameFormat!: pulumi.Output; + /** + * Name of each exporter subject. + */ + public readonly subjects!: pulumi.Output; + + /** + * Create a SchemaExporter resource with the given unique name, arguments, and options. + * + * @param name The _unique_ name of the resource. + * @param args The arguments to use to populate this resource's properties. + * @param opts A bag of options that control this resource's behavior. + */ + constructor(name: string, args: SchemaExporterArgs, opts?: pulumi.CustomResourceOptions) + constructor(name: string, argsOrState?: SchemaExporterArgs | SchemaExporterState, opts?: pulumi.CustomResourceOptions) { + let resourceInputs: pulumi.Inputs = {}; + opts = opts || {}; + if (opts.id) { + const state = argsOrState as SchemaExporterState | undefined; + resourceInputs["config"] = state ? state.config : undefined; + resourceInputs["context"] = state ? state.context : undefined; + resourceInputs["contextType"] = state ? state.contextType : undefined; + resourceInputs["credentials"] = state ? state.credentials : undefined; + resourceInputs["destinationSchemaRegistryCluster"] = state ? state.destinationSchemaRegistryCluster : undefined; + resourceInputs["name"] = state ? state.name : undefined; + resourceInputs["resetOnUpdate"] = state ? state.resetOnUpdate : undefined; + resourceInputs["restEndpoint"] = state ? state.restEndpoint : undefined; + resourceInputs["schemaRegistryCluster"] = state ? state.schemaRegistryCluster : undefined; + resourceInputs["status"] = state ? state.status : undefined; + resourceInputs["subjectRenameFormat"] = state ? state.subjectRenameFormat : undefined; + resourceInputs["subjects"] = state ? state.subjects : undefined; + } else { + const args = argsOrState as SchemaExporterArgs | undefined; + if ((!args || args.destinationSchemaRegistryCluster === undefined) && !opts.urn) { + throw new Error("Missing required property 'destinationSchemaRegistryCluster'"); + } + resourceInputs["config"] = args ? args.config : undefined; + resourceInputs["context"] = args ? args.context : undefined; + resourceInputs["contextType"] = args ? args.contextType : undefined; + resourceInputs["credentials"] = args?.credentials ? pulumi.secret(args.credentials) : undefined; + resourceInputs["destinationSchemaRegistryCluster"] = args ? args.destinationSchemaRegistryCluster : undefined; + resourceInputs["name"] = args ? args.name : undefined; + resourceInputs["resetOnUpdate"] = args ? args.resetOnUpdate : undefined; + resourceInputs["restEndpoint"] = args ? args.restEndpoint : undefined; + resourceInputs["schemaRegistryCluster"] = args ? args.schemaRegistryCluster : undefined; + resourceInputs["status"] = args ? args.status : undefined; + resourceInputs["subjectRenameFormat"] = args ? args.subjectRenameFormat : undefined; + resourceInputs["subjects"] = args ? args.subjects : undefined; + } + opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts); + const secretOpts = { additionalSecretOutputs: ["credentials"] }; + opts = pulumi.mergeOptions(opts, secretOpts); + super(SchemaExporter.__pulumiType, name, resourceInputs, opts); + } +} + +/** + * Input properties used for looking up and filtering SchemaExporter resources. + */ +export interface SchemaExporterState { + /** + * Block for custom *nonsensitive* configuration properties: + */ + config?: pulumi.Input<{[key: string]: pulumi.Input}>; + /** + * Customized context of the exporter if `contextType` is set to `CUSTOM`. + */ + context?: pulumi.Input; + /** + * Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + */ + contextType?: pulumi.Input; + /** + * The Cluster API Credentials. + */ + credentials?: pulumi.Input; + destinationSchemaRegistryCluster?: pulumi.Input; + /** + * The configuration setting name. + */ + name?: pulumi.Input; + /** + * The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + */ + resetOnUpdate?: pulumi.Input; + /** + * The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + */ + restEndpoint?: pulumi.Input; + schemaRegistryCluster?: pulumi.Input; + /** + * The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + */ + status?: pulumi.Input; + /** + * Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dcOrders`. + */ + subjectRenameFormat?: pulumi.Input; + /** + * Name of each exporter subject. + */ + subjects?: pulumi.Input[]>; +} + +/** + * The set of arguments for constructing a SchemaExporter resource. + */ +export interface SchemaExporterArgs { + /** + * Block for custom *nonsensitive* configuration properties: + */ + config?: pulumi.Input<{[key: string]: pulumi.Input}>; + /** + * Customized context of the exporter if `contextType` is set to `CUSTOM`. + */ + context?: pulumi.Input; + /** + * Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + */ + contextType?: pulumi.Input; + /** + * The Cluster API Credentials. + */ + credentials?: pulumi.Input; + destinationSchemaRegistryCluster: pulumi.Input; + /** + * The configuration setting name. + */ + name?: pulumi.Input; + /** + * The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + */ + resetOnUpdate?: pulumi.Input; + /** + * The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + */ + restEndpoint?: pulumi.Input; + schemaRegistryCluster?: pulumi.Input; + /** + * The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + */ + status?: pulumi.Input; + /** + * Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dcOrders`. + */ + subjectRenameFormat?: pulumi.Input; + /** + * Name of each exporter subject. + */ + subjects?: pulumi.Input[]>; +} diff --git a/sdk/nodejs/tsconfig.json b/sdk/nodejs/tsconfig.json index 00ecdd9f..40a0e21e 100644 --- a/sdk/nodejs/tsconfig.json +++ b/sdk/nodejs/tsconfig.json @@ -22,11 +22,13 @@ "config/vars.ts", "connector.ts", "environment.ts", + "flinkComputePool.ts", "getBusinessMetadata.ts", "getBusinessMetadataBinding.ts", "getByokKey.ts", "getEnvironment.ts", "getEnvironments.ts", + "getFlinkComputePool.ts", "getIdentityPool.ts", "getIdentityProvider.ts", "getInvitation.ts", @@ -79,6 +81,7 @@ "provider.ts", "roleBinding.ts", "schema.ts", + "schemaExporter.ts", "schemaRegistryCluster.ts", "schemaRegistryClusterConfig.ts", "schemaRegistryClusterMode.ts", diff --git a/sdk/nodejs/types/input.ts b/sdk/nodejs/types/input.ts index a8bfab40..1bf84b90 100644 --- a/sdk/nodejs/types/input.ts +++ b/sdk/nodejs/types/input.ts @@ -156,8 +156,6 @@ export interface ClusterLinkDestinationKafkaClusterCredentials { key: pulumi.Input; /** * The Kafka API Secret. - * - * > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). */ secret: pulumi.Input; } @@ -185,8 +183,6 @@ export interface ClusterLinkLocalKafkaClusterCredentials { key: pulumi.Input; /** * The Kafka API Secret. - * - * > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). */ secret: pulumi.Input; } @@ -214,8 +210,6 @@ export interface ClusterLinkRemoteKafkaClusterCredentials { key: pulumi.Input; /** * The Kafka API Secret. - * - * > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). */ secret: pulumi.Input; } @@ -243,8 +237,6 @@ export interface ClusterLinkSourceKafkaClusterCredentials { key: pulumi.Input; /** * The Kafka API Secret. - * - * > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). */ secret: pulumi.Input; } @@ -263,6 +255,13 @@ export interface ConnectorKafkaCluster { id: pulumi.Input; } +export interface FlinkComputePoolEnvironment { + /** + * The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`. + */ + id: pulumi.Input; +} + export interface GetBusinessMetadataBindingCredentials { /** * The Schema Registry API Key. @@ -339,6 +338,24 @@ export interface GetBusinessMetadataSchemaRegistryClusterArgs { id: pulumi.Input; } +export interface GetFlinkComputePoolEnvironment { + /** + * The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + * + * > **Note:** Exactly one from the `id` and `displayName` attributes must be specified. + */ + id: string; +} + +export interface GetFlinkComputePoolEnvironmentArgs { + /** + * The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + * + * > **Note:** Exactly one from the `id` and `displayName` attributes must be specified. + */ + id: pulumi.Input; +} + export interface GetIdentityPoolIdentityProvider { /** * The ID of the Identity Provider associated with the Identity Pool, for example, `op-abc123`. @@ -1555,6 +1572,43 @@ export interface SchemaCredentials { secret: pulumi.Input; } +export interface SchemaExporterCredentials { + /** + * The Schema Registry API Key. + */ + key: pulumi.Input; + /** + * The Schema Registry API Secret. + */ + secret: pulumi.Input; +} + +export interface SchemaExporterDestinationSchemaRegistryCluster { + credentials: pulumi.Input; + /** + * The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + */ + restEndpoint: pulumi.Input; +} + +export interface SchemaExporterDestinationSchemaRegistryClusterCredentials { + /** + * The Schema Registry API Key. + */ + key: pulumi.Input; + /** + * The Schema Registry API Secret. + */ + secret: pulumi.Input; +} + +export interface SchemaExporterSchemaRegistryCluster { + /** + * The ID of the Schema Registry cluster, for example, `lsrc-abc123`. + */ + id: pulumi.Input; +} + export interface SchemaRegistryClusterConfigCredentials { /** * The Schema Registry API Key. diff --git a/sdk/nodejs/types/output.ts b/sdk/nodejs/types/output.ts index d841a119..72faa871 100644 --- a/sdk/nodejs/types/output.ts +++ b/sdk/nodejs/types/output.ts @@ -156,8 +156,6 @@ export interface ClusterLinkDestinationKafkaClusterCredentials { key: string; /** * The Kafka API Secret. - * - * > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). */ secret: string; } @@ -185,8 +183,6 @@ export interface ClusterLinkLocalKafkaClusterCredentials { key: string; /** * The Kafka API Secret. - * - * > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). */ secret: string; } @@ -214,8 +210,6 @@ export interface ClusterLinkRemoteKafkaClusterCredentials { key: string; /** * The Kafka API Secret. - * - * > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). */ secret: string; } @@ -243,8 +237,6 @@ export interface ClusterLinkSourceKafkaClusterCredentials { key: string; /** * The Kafka API Secret. - * - * > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). */ secret: string; } @@ -263,6 +255,13 @@ export interface ConnectorKafkaCluster { id: string; } +export interface FlinkComputePoolEnvironment { + /** + * The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`. + */ + id: string; +} + export interface GetBusinessMetadataAttributeDefinition { /** * (Optional String) The default value of this attribute. @@ -360,6 +359,15 @@ export interface GetByokKeyAzure { tenantId: string; } +export interface GetFlinkComputePoolEnvironment { + /** + * The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + * + * > **Note:** Exactly one from the `id` and `displayName` attributes must be specified. + */ + id: string; +} + export interface GetIdentityPoolIdentityProvider { /** * The ID of the Identity Provider associated with the Identity Pool, for example, `op-abc123`. @@ -1601,6 +1609,43 @@ export interface SchemaCredentials { secret: string; } +export interface SchemaExporterCredentials { + /** + * The Schema Registry API Key. + */ + key: string; + /** + * The Schema Registry API Secret. + */ + secret: string; +} + +export interface SchemaExporterDestinationSchemaRegistryCluster { + credentials: outputs.SchemaExporterDestinationSchemaRegistryClusterCredentials; + /** + * The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + */ + restEndpoint: string; +} + +export interface SchemaExporterDestinationSchemaRegistryClusterCredentials { + /** + * The Schema Registry API Key. + */ + key: string; + /** + * The Schema Registry API Secret. + */ + secret: string; +} + +export interface SchemaExporterSchemaRegistryCluster { + /** + * The ID of the Schema Registry cluster, for example, `lsrc-abc123`. + */ + id: string; +} + export interface SchemaRegistryClusterConfigCredentials { /** * The Schema Registry API Key. diff --git a/sdk/python/pulumi_confluentcloud/__init__.py b/sdk/python/pulumi_confluentcloud/__init__.py index 7e9fe0be..a8130c47 100644 --- a/sdk/python/pulumi_confluentcloud/__init__.py +++ b/sdk/python/pulumi_confluentcloud/__init__.py @@ -12,11 +12,13 @@ from .cluster_link import * from .connector import * from .environment import * +from .flink_compute_pool import * from .get_business_metadata import * from .get_business_metadata_binding import * from .get_byok_key import * from .get_environment import * from .get_environments import * +from .get_flink_compute_pool import * from .get_identity_pool import * from .get_identity_provider import * from .get_invitation import * @@ -68,6 +70,7 @@ from .provider import * from .role_binding import * from .schema import * +from .schema_exporter import * from .schema_registry_cluster import * from .schema_registry_cluster_config import * from .schema_registry_cluster_mode import * @@ -147,6 +150,14 @@ "confluentcloud:index/environment:Environment": "Environment" } }, + { + "pkg": "confluentcloud", + "mod": "index/flinkComputePool", + "fqn": "pulumi_confluentcloud", + "classes": { + "confluentcloud:index/flinkComputePool:FlinkComputePool": "FlinkComputePool" + } + }, { "pkg": "confluentcloud", "mod": "index/identityPool", @@ -299,6 +310,14 @@ "confluentcloud:index/schema:Schema": "Schema" } }, + { + "pkg": "confluentcloud", + "mod": "index/schemaExporter", + "fqn": "pulumi_confluentcloud", + "classes": { + "confluentcloud:index/schemaExporter:SchemaExporter": "SchemaExporter" + } + }, { "pkg": "confluentcloud", "mod": "index/schemaRegistryCluster", diff --git a/sdk/python/pulumi_confluentcloud/_inputs.py b/sdk/python/pulumi_confluentcloud/_inputs.py index 2a960100..027581cf 100644 --- a/sdk/python/pulumi_confluentcloud/_inputs.py +++ b/sdk/python/pulumi_confluentcloud/_inputs.py @@ -30,6 +30,7 @@ 'ClusterLinkSourceKafkaClusterCredentialsArgs', 'ConnectorEnvironmentArgs', 'ConnectorKafkaClusterArgs', + 'FlinkComputePoolEnvironmentArgs', 'IdentityPoolIdentityProviderArgs', 'InvitationCreatorArgs', 'InvitationUserArgs', @@ -88,6 +89,10 @@ 'PrivateLinkAttachmentEnvironmentArgs', 'PrivateLinkAttachmentGcpArgs', 'SchemaCredentialsArgs', + 'SchemaExporterCredentialsArgs', + 'SchemaExporterDestinationSchemaRegistryClusterArgs', + 'SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs', + 'SchemaExporterSchemaRegistryClusterArgs', 'SchemaRegistryClusterConfigCredentialsArgs', 'SchemaRegistryClusterConfigSchemaRegistryClusterArgs', 'SchemaRegistryClusterEnvironmentArgs', @@ -111,6 +116,7 @@ 'GetBusinessMetadataBindingSchemaRegistryClusterArgs', 'GetBusinessMetadataCredentialsArgs', 'GetBusinessMetadataSchemaRegistryClusterArgs', + 'GetFlinkComputePoolEnvironmentArgs', 'GetIdentityPoolIdentityProviderArgs', 'GetKafkaClusterBasicArgs', 'GetKafkaClusterDedicatedArgs', @@ -686,8 +692,6 @@ def __init__(__self__, *, """ :param pulumi.Input[str] key: The Kafka API Key. :param pulumi.Input[str] secret: The Kafka API Secret. - - > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). """ pulumi.set(__self__, "key", key) pulumi.set(__self__, "secret", secret) @@ -709,8 +713,6 @@ def key(self, value: pulumi.Input[str]): def secret(self) -> pulumi.Input[str]: """ The Kafka API Secret. - - > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). """ return pulumi.get(self, "secret") @@ -793,8 +795,6 @@ def __init__(__self__, *, """ :param pulumi.Input[str] key: The Kafka API Key. :param pulumi.Input[str] secret: The Kafka API Secret. - - > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). """ pulumi.set(__self__, "key", key) pulumi.set(__self__, "secret", secret) @@ -816,8 +816,6 @@ def key(self, value: pulumi.Input[str]): def secret(self) -> pulumi.Input[str]: """ The Kafka API Secret. - - > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). """ return pulumi.get(self, "secret") @@ -900,8 +898,6 @@ def __init__(__self__, *, """ :param pulumi.Input[str] key: The Kafka API Key. :param pulumi.Input[str] secret: The Kafka API Secret. - - > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). """ pulumi.set(__self__, "key", key) pulumi.set(__self__, "secret", secret) @@ -923,8 +919,6 @@ def key(self, value: pulumi.Input[str]): def secret(self) -> pulumi.Input[str]: """ The Kafka API Secret. - - > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). """ return pulumi.get(self, "secret") @@ -1007,8 +1001,6 @@ def __init__(__self__, *, """ :param pulumi.Input[str] key: The Kafka API Key. :param pulumi.Input[str] secret: The Kafka API Secret. - - > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). """ pulumi.set(__self__, "key", key) pulumi.set(__self__, "secret", secret) @@ -1030,8 +1022,6 @@ def key(self, value: pulumi.Input[str]): def secret(self) -> pulumi.Input[str]: """ The Kafka API Secret. - - > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). """ return pulumi.get(self, "secret") @@ -1084,6 +1074,28 @@ def id(self, value: pulumi.Input[str]): pulumi.set(self, "id", value) +@pulumi.input_type +class FlinkComputePoolEnvironmentArgs: + def __init__(__self__, *, + id: pulumi.Input[str]): + """ + :param pulumi.Input[str] id: The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`. + """ + pulumi.set(__self__, "id", id) + + @property + @pulumi.getter + def id(self) -> pulumi.Input[str]: + """ + The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`. + """ + return pulumi.get(self, "id") + + @id.setter + def id(self, value: pulumi.Input[str]): + pulumi.set(self, "id", value) + + @pulumi.input_type class IdentityPoolIdentityProviderArgs: def __init__(__self__, *, @@ -2748,6 +2760,135 @@ def secret(self, value: pulumi.Input[str]): pulumi.set(self, "secret", value) +@pulumi.input_type +class SchemaExporterCredentialsArgs: + def __init__(__self__, *, + key: pulumi.Input[str], + secret: pulumi.Input[str]): + """ + :param pulumi.Input[str] key: The Schema Registry API Key. + :param pulumi.Input[str] secret: The Schema Registry API Secret. + """ + pulumi.set(__self__, "key", key) + pulumi.set(__self__, "secret", secret) + + @property + @pulumi.getter + def key(self) -> pulumi.Input[str]: + """ + The Schema Registry API Key. + """ + return pulumi.get(self, "key") + + @key.setter + def key(self, value: pulumi.Input[str]): + pulumi.set(self, "key", value) + + @property + @pulumi.getter + def secret(self) -> pulumi.Input[str]: + """ + The Schema Registry API Secret. + """ + return pulumi.get(self, "secret") + + @secret.setter + def secret(self, value: pulumi.Input[str]): + pulumi.set(self, "secret", value) + + +@pulumi.input_type +class SchemaExporterDestinationSchemaRegistryClusterArgs: + def __init__(__self__, *, + credentials: pulumi.Input['SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs'], + rest_endpoint: pulumi.Input[str]): + """ + :param pulumi.Input[str] rest_endpoint: The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + """ + pulumi.set(__self__, "credentials", credentials) + pulumi.set(__self__, "rest_endpoint", rest_endpoint) + + @property + @pulumi.getter + def credentials(self) -> pulumi.Input['SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs']: + return pulumi.get(self, "credentials") + + @credentials.setter + def credentials(self, value: pulumi.Input['SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs']): + pulumi.set(self, "credentials", value) + + @property + @pulumi.getter(name="restEndpoint") + def rest_endpoint(self) -> pulumi.Input[str]: + """ + The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + """ + return pulumi.get(self, "rest_endpoint") + + @rest_endpoint.setter + def rest_endpoint(self, value: pulumi.Input[str]): + pulumi.set(self, "rest_endpoint", value) + + +@pulumi.input_type +class SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs: + def __init__(__self__, *, + key: pulumi.Input[str], + secret: pulumi.Input[str]): + """ + :param pulumi.Input[str] key: The Schema Registry API Key. + :param pulumi.Input[str] secret: The Schema Registry API Secret. + """ + pulumi.set(__self__, "key", key) + pulumi.set(__self__, "secret", secret) + + @property + @pulumi.getter + def key(self) -> pulumi.Input[str]: + """ + The Schema Registry API Key. + """ + return pulumi.get(self, "key") + + @key.setter + def key(self, value: pulumi.Input[str]): + pulumi.set(self, "key", value) + + @property + @pulumi.getter + def secret(self) -> pulumi.Input[str]: + """ + The Schema Registry API Secret. + """ + return pulumi.get(self, "secret") + + @secret.setter + def secret(self, value: pulumi.Input[str]): + pulumi.set(self, "secret", value) + + +@pulumi.input_type +class SchemaExporterSchemaRegistryClusterArgs: + def __init__(__self__, *, + id: pulumi.Input[str]): + """ + :param pulumi.Input[str] id: The ID of the Schema Registry cluster, for example, `lsrc-abc123`. + """ + pulumi.set(__self__, "id", id) + + @property + @pulumi.getter + def id(self) -> pulumi.Input[str]: + """ + The ID of the Schema Registry cluster, for example, `lsrc-abc123`. + """ + return pulumi.get(self, "id") + + @id.setter + def id(self, value: pulumi.Input[str]): + pulumi.set(self, "id", value) + + @pulumi.input_type class SchemaRegistryClusterConfigCredentialsArgs: def __init__(__self__, *, @@ -3442,6 +3583,32 @@ def id(self, value: str): pulumi.set(self, "id", value) +@pulumi.input_type +class GetFlinkComputePoolEnvironmentArgs: + def __init__(__self__, *, + id: str): + """ + :param str id: The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + + > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + """ + pulumi.set(__self__, "id", id) + + @property + @pulumi.getter + def id(self) -> str: + """ + The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + + > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + """ + return pulumi.get(self, "id") + + @id.setter + def id(self, value: str): + pulumi.set(self, "id", value) + + @pulumi.input_type class GetIdentityPoolIdentityProviderArgs: def __init__(__self__, *, diff --git a/sdk/python/pulumi_confluentcloud/flink_compute_pool.py b/sdk/python/pulumi_confluentcloud/flink_compute_pool.py new file mode 100644 index 00000000..2e0d57f0 --- /dev/null +++ b/sdk/python/pulumi_confluentcloud/flink_compute_pool.py @@ -0,0 +1,529 @@ +# coding=utf-8 +# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +# *** Do not edit by hand unless you're certain you know what you are doing! *** + +import copy +import warnings +import pulumi +import pulumi.runtime +from typing import Any, Mapping, Optional, Sequence, Union, overload +from . import _utilities +from . import outputs +from ._inputs import * + +__all__ = ['FlinkComputePoolArgs', 'FlinkComputePool'] + +@pulumi.input_type +class FlinkComputePoolArgs: + def __init__(__self__, *, + cloud: pulumi.Input[str], + display_name: pulumi.Input[str], + environment: pulumi.Input['FlinkComputePoolEnvironmentArgs'], + region: pulumi.Input[str], + max_cfu: Optional[pulumi.Input[int]] = None): + """ + The set of arguments for constructing a FlinkComputePool resource. + :param pulumi.Input[str] cloud: The cloud service provider that runs the Flink Compute Pool. + :param pulumi.Input[str] display_name: The name of the Flink Compute Pool. + :param pulumi.Input['FlinkComputePoolEnvironmentArgs'] environment: Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + :param pulumi.Input[str] region: The cloud service provider region that hosts the Flink Compute Pool. + :param pulumi.Input[int] max_cfu: Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + """ + pulumi.set(__self__, "cloud", cloud) + pulumi.set(__self__, "display_name", display_name) + pulumi.set(__self__, "environment", environment) + pulumi.set(__self__, "region", region) + if max_cfu is not None: + pulumi.set(__self__, "max_cfu", max_cfu) + + @property + @pulumi.getter + def cloud(self) -> pulumi.Input[str]: + """ + The cloud service provider that runs the Flink Compute Pool. + """ + return pulumi.get(self, "cloud") + + @cloud.setter + def cloud(self, value: pulumi.Input[str]): + pulumi.set(self, "cloud", value) + + @property + @pulumi.getter(name="displayName") + def display_name(self) -> pulumi.Input[str]: + """ + The name of the Flink Compute Pool. + """ + return pulumi.get(self, "display_name") + + @display_name.setter + def display_name(self, value: pulumi.Input[str]): + pulumi.set(self, "display_name", value) + + @property + @pulumi.getter + def environment(self) -> pulumi.Input['FlinkComputePoolEnvironmentArgs']: + """ + Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + """ + return pulumi.get(self, "environment") + + @environment.setter + def environment(self, value: pulumi.Input['FlinkComputePoolEnvironmentArgs']): + pulumi.set(self, "environment", value) + + @property + @pulumi.getter + def region(self) -> pulumi.Input[str]: + """ + The cloud service provider region that hosts the Flink Compute Pool. + """ + return pulumi.get(self, "region") + + @region.setter + def region(self, value: pulumi.Input[str]): + pulumi.set(self, "region", value) + + @property + @pulumi.getter(name="maxCfu") + def max_cfu(self) -> Optional[pulumi.Input[int]]: + """ + Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + """ + return pulumi.get(self, "max_cfu") + + @max_cfu.setter + def max_cfu(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "max_cfu", value) + + +@pulumi.input_type +class _FlinkComputePoolState: + def __init__(__self__, *, + api_version: Optional[pulumi.Input[str]] = None, + cloud: Optional[pulumi.Input[str]] = None, + current_cfu: Optional[pulumi.Input[int]] = None, + display_name: Optional[pulumi.Input[str]] = None, + environment: Optional[pulumi.Input['FlinkComputePoolEnvironmentArgs']] = None, + kind: Optional[pulumi.Input[str]] = None, + max_cfu: Optional[pulumi.Input[int]] = None, + region: Optional[pulumi.Input[str]] = None, + resource_name: Optional[pulumi.Input[str]] = None, + rest_endpoint: Optional[pulumi.Input[str]] = None): + """ + Input properties used for looking up and filtering FlinkComputePool resources. + :param pulumi.Input[str] api_version: (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`. + :param pulumi.Input[str] cloud: The cloud service provider that runs the Flink Compute Pool. + :param pulumi.Input[int] current_cfu: (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool. + :param pulumi.Input[str] display_name: The name of the Flink Compute Pool. + :param pulumi.Input['FlinkComputePoolEnvironmentArgs'] environment: Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + :param pulumi.Input[str] kind: (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`. + :param pulumi.Input[int] max_cfu: Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + :param pulumi.Input[str] region: The cloud service provider region that hosts the Flink Compute Pool. + :param pulumi.Input[str] resource_name: (Required String) The Confluent Resource Name of the Flink Compute Pool. + :param pulumi.Input[str] rest_endpoint: (Required String) The API endpoint of the Flink Compute Pool. + """ + if api_version is not None: + pulumi.set(__self__, "api_version", api_version) + if cloud is not None: + pulumi.set(__self__, "cloud", cloud) + if current_cfu is not None: + pulumi.set(__self__, "current_cfu", current_cfu) + if display_name is not None: + pulumi.set(__self__, "display_name", display_name) + if environment is not None: + pulumi.set(__self__, "environment", environment) + if kind is not None: + pulumi.set(__self__, "kind", kind) + if max_cfu is not None: + pulumi.set(__self__, "max_cfu", max_cfu) + if region is not None: + pulumi.set(__self__, "region", region) + if resource_name is not None: + pulumi.set(__self__, "resource_name", resource_name) + if rest_endpoint is not None: + pulumi.set(__self__, "rest_endpoint", rest_endpoint) + + @property + @pulumi.getter(name="apiVersion") + def api_version(self) -> Optional[pulumi.Input[str]]: + """ + (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`. + """ + return pulumi.get(self, "api_version") + + @api_version.setter + def api_version(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "api_version", value) + + @property + @pulumi.getter + def cloud(self) -> Optional[pulumi.Input[str]]: + """ + The cloud service provider that runs the Flink Compute Pool. + """ + return pulumi.get(self, "cloud") + + @cloud.setter + def cloud(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "cloud", value) + + @property + @pulumi.getter(name="currentCfu") + def current_cfu(self) -> Optional[pulumi.Input[int]]: + """ + (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool. + """ + return pulumi.get(self, "current_cfu") + + @current_cfu.setter + def current_cfu(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "current_cfu", value) + + @property + @pulumi.getter(name="displayName") + def display_name(self) -> Optional[pulumi.Input[str]]: + """ + The name of the Flink Compute Pool. + """ + return pulumi.get(self, "display_name") + + @display_name.setter + def display_name(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "display_name", value) + + @property + @pulumi.getter + def environment(self) -> Optional[pulumi.Input['FlinkComputePoolEnvironmentArgs']]: + """ + Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + """ + return pulumi.get(self, "environment") + + @environment.setter + def environment(self, value: Optional[pulumi.Input['FlinkComputePoolEnvironmentArgs']]): + pulumi.set(self, "environment", value) + + @property + @pulumi.getter + def kind(self) -> Optional[pulumi.Input[str]]: + """ + (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`. + """ + return pulumi.get(self, "kind") + + @kind.setter + def kind(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "kind", value) + + @property + @pulumi.getter(name="maxCfu") + def max_cfu(self) -> Optional[pulumi.Input[int]]: + """ + Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + """ + return pulumi.get(self, "max_cfu") + + @max_cfu.setter + def max_cfu(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "max_cfu", value) + + @property + @pulumi.getter + def region(self) -> Optional[pulumi.Input[str]]: + """ + The cloud service provider region that hosts the Flink Compute Pool. + """ + return pulumi.get(self, "region") + + @region.setter + def region(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "region", value) + + @property + @pulumi.getter(name="resourceName") + def resource_name(self) -> Optional[pulumi.Input[str]]: + """ + (Required String) The Confluent Resource Name of the Flink Compute Pool. + """ + return pulumi.get(self, "resource_name") + + @resource_name.setter + def resource_name(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "resource_name", value) + + @property + @pulumi.getter(name="restEndpoint") + def rest_endpoint(self) -> Optional[pulumi.Input[str]]: + """ + (Required String) The API endpoint of the Flink Compute Pool. + """ + return pulumi.get(self, "rest_endpoint") + + @rest_endpoint.setter + def rest_endpoint(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "rest_endpoint", value) + + +class FlinkComputePool(pulumi.CustomResource): + @overload + def __init__(__self__, + resource_name: str, + opts: Optional[pulumi.ResourceOptions] = None, + cloud: Optional[pulumi.Input[str]] = None, + display_name: Optional[pulumi.Input[str]] = None, + environment: Optional[pulumi.Input[pulumi.InputType['FlinkComputePoolEnvironmentArgs']]] = None, + max_cfu: Optional[pulumi.Input[int]] = None, + region: Optional[pulumi.Input[str]] = None, + __props__=None): + """ + ## Example Usage + + ```python + import pulumi + import pulumi_confluentcloud as confluentcloud + + development = confluentcloud.Environment("development") + main = confluentcloud.FlinkComputePool("main", + display_name="standard_compute_pool", + cloud="AWS", + region="us-east-1", + max_cfu=5, + environment=confluentcloud.FlinkComputePoolEnvironmentArgs( + id=development.id, + )) + ``` + + ## Import + + You can import a Flink Compute Pool by using Environment ID and Flink Compute Pool ID, in the format `/`. The following example shows how to import a Flink Compute Pool$ export CONFLUENT_CLOUD_API_KEY="" $ export CONFLUENT_CLOUD_API_SECRET="" + + ```sh + $ pulumi import confluentcloud:index/flinkComputePool:FlinkComputePool main env-abc123/lfcp-abc123 + ``` + + !> **Warning:** Do not forget to delete terminal command history afterwards for security purposes. + + :param str resource_name: The name of the resource. + :param pulumi.ResourceOptions opts: Options for the resource. + :param pulumi.Input[str] cloud: The cloud service provider that runs the Flink Compute Pool. + :param pulumi.Input[str] display_name: The name of the Flink Compute Pool. + :param pulumi.Input[pulumi.InputType['FlinkComputePoolEnvironmentArgs']] environment: Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + :param pulumi.Input[int] max_cfu: Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + :param pulumi.Input[str] region: The cloud service provider region that hosts the Flink Compute Pool. + """ + ... + @overload + def __init__(__self__, + resource_name: str, + args: FlinkComputePoolArgs, + opts: Optional[pulumi.ResourceOptions] = None): + """ + ## Example Usage + + ```python + import pulumi + import pulumi_confluentcloud as confluentcloud + + development = confluentcloud.Environment("development") + main = confluentcloud.FlinkComputePool("main", + display_name="standard_compute_pool", + cloud="AWS", + region="us-east-1", + max_cfu=5, + environment=confluentcloud.FlinkComputePoolEnvironmentArgs( + id=development.id, + )) + ``` + + ## Import + + You can import a Flink Compute Pool by using Environment ID and Flink Compute Pool ID, in the format `/`. The following example shows how to import a Flink Compute Pool$ export CONFLUENT_CLOUD_API_KEY="" $ export CONFLUENT_CLOUD_API_SECRET="" + + ```sh + $ pulumi import confluentcloud:index/flinkComputePool:FlinkComputePool main env-abc123/lfcp-abc123 + ``` + + !> **Warning:** Do not forget to delete terminal command history afterwards for security purposes. + + :param str resource_name: The name of the resource. + :param FlinkComputePoolArgs args: The arguments to use to populate this resource's properties. + :param pulumi.ResourceOptions opts: Options for the resource. + """ + ... + def __init__(__self__, resource_name: str, *args, **kwargs): + resource_args, opts = _utilities.get_resource_args_opts(FlinkComputePoolArgs, pulumi.ResourceOptions, *args, **kwargs) + if resource_args is not None: + __self__._internal_init(resource_name, opts, **resource_args.__dict__) + else: + __self__._internal_init(resource_name, *args, **kwargs) + + def _internal_init(__self__, + resource_name: str, + opts: Optional[pulumi.ResourceOptions] = None, + cloud: Optional[pulumi.Input[str]] = None, + display_name: Optional[pulumi.Input[str]] = None, + environment: Optional[pulumi.Input[pulumi.InputType['FlinkComputePoolEnvironmentArgs']]] = None, + max_cfu: Optional[pulumi.Input[int]] = None, + region: Optional[pulumi.Input[str]] = None, + __props__=None): + opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) + if not isinstance(opts, pulumi.ResourceOptions): + raise TypeError('Expected resource options to be a ResourceOptions instance') + if opts.id is None: + if __props__ is not None: + raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') + __props__ = FlinkComputePoolArgs.__new__(FlinkComputePoolArgs) + + if cloud is None and not opts.urn: + raise TypeError("Missing required property 'cloud'") + __props__.__dict__["cloud"] = cloud + if display_name is None and not opts.urn: + raise TypeError("Missing required property 'display_name'") + __props__.__dict__["display_name"] = display_name + if environment is None and not opts.urn: + raise TypeError("Missing required property 'environment'") + __props__.__dict__["environment"] = environment + __props__.__dict__["max_cfu"] = max_cfu + if region is None and not opts.urn: + raise TypeError("Missing required property 'region'") + __props__.__dict__["region"] = region + __props__.__dict__["api_version"] = None + __props__.__dict__["current_cfu"] = None + __props__.__dict__["kind"] = None + __props__.__dict__["resource_name"] = None + __props__.__dict__["rest_endpoint"] = None + super(FlinkComputePool, __self__).__init__( + 'confluentcloud:index/flinkComputePool:FlinkComputePool', + resource_name, + __props__, + opts) + + @staticmethod + def get(resource_name: str, + id: pulumi.Input[str], + opts: Optional[pulumi.ResourceOptions] = None, + api_version: Optional[pulumi.Input[str]] = None, + cloud: Optional[pulumi.Input[str]] = None, + current_cfu: Optional[pulumi.Input[int]] = None, + display_name: Optional[pulumi.Input[str]] = None, + environment: Optional[pulumi.Input[pulumi.InputType['FlinkComputePoolEnvironmentArgs']]] = None, + kind: Optional[pulumi.Input[str]] = None, + max_cfu: Optional[pulumi.Input[int]] = None, + region: Optional[pulumi.Input[str]] = None, + resource_name_: Optional[pulumi.Input[str]] = None, + rest_endpoint: Optional[pulumi.Input[str]] = None) -> 'FlinkComputePool': + """ + Get an existing FlinkComputePool resource's state with the given name, id, and optional extra + properties used to qualify the lookup. + + :param str resource_name: The unique name of the resulting resource. + :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. + :param pulumi.ResourceOptions opts: Options for the resource. + :param pulumi.Input[str] api_version: (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`. + :param pulumi.Input[str] cloud: The cloud service provider that runs the Flink Compute Pool. + :param pulumi.Input[int] current_cfu: (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool. + :param pulumi.Input[str] display_name: The name of the Flink Compute Pool. + :param pulumi.Input[pulumi.InputType['FlinkComputePoolEnvironmentArgs']] environment: Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + :param pulumi.Input[str] kind: (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`. + :param pulumi.Input[int] max_cfu: Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + :param pulumi.Input[str] region: The cloud service provider region that hosts the Flink Compute Pool. + :param pulumi.Input[str] resource_name_: (Required String) The Confluent Resource Name of the Flink Compute Pool. + :param pulumi.Input[str] rest_endpoint: (Required String) The API endpoint of the Flink Compute Pool. + """ + opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) + + __props__ = _FlinkComputePoolState.__new__(_FlinkComputePoolState) + + __props__.__dict__["api_version"] = api_version + __props__.__dict__["cloud"] = cloud + __props__.__dict__["current_cfu"] = current_cfu + __props__.__dict__["display_name"] = display_name + __props__.__dict__["environment"] = environment + __props__.__dict__["kind"] = kind + __props__.__dict__["max_cfu"] = max_cfu + __props__.__dict__["region"] = region + __props__.__dict__["resource_name"] = resource_name_ + __props__.__dict__["rest_endpoint"] = rest_endpoint + return FlinkComputePool(resource_name, opts=opts, __props__=__props__) + + @property + @pulumi.getter(name="apiVersion") + def api_version(self) -> pulumi.Output[str]: + """ + (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`. + """ + return pulumi.get(self, "api_version") + + @property + @pulumi.getter + def cloud(self) -> pulumi.Output[str]: + """ + The cloud service provider that runs the Flink Compute Pool. + """ + return pulumi.get(self, "cloud") + + @property + @pulumi.getter(name="currentCfu") + def current_cfu(self) -> pulumi.Output[int]: + """ + (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool. + """ + return pulumi.get(self, "current_cfu") + + @property + @pulumi.getter(name="displayName") + def display_name(self) -> pulumi.Output[str]: + """ + The name of the Flink Compute Pool. + """ + return pulumi.get(self, "display_name") + + @property + @pulumi.getter + def environment(self) -> pulumi.Output['outputs.FlinkComputePoolEnvironment']: + """ + Environment objects represent an isolated namespace for your Confluent resources for organizational purposes. + """ + return pulumi.get(self, "environment") + + @property + @pulumi.getter + def kind(self) -> pulumi.Output[str]: + """ + (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`. + """ + return pulumi.get(self, "kind") + + @property + @pulumi.getter(name="maxCfu") + def max_cfu(self) -> pulumi.Output[int]: + """ + Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`. + """ + return pulumi.get(self, "max_cfu") + + @property + @pulumi.getter + def region(self) -> pulumi.Output[str]: + """ + The cloud service provider region that hosts the Flink Compute Pool. + """ + return pulumi.get(self, "region") + + @property + @pulumi.getter(name="resourceName") + def resource_name(self) -> pulumi.Output[str]: + """ + (Required String) The Confluent Resource Name of the Flink Compute Pool. + """ + return pulumi.get(self, "resource_name") + + @property + @pulumi.getter(name="restEndpoint") + def rest_endpoint(self) -> pulumi.Output[str]: + """ + (Required String) The API endpoint of the Flink Compute Pool. + """ + return pulumi.get(self, "rest_endpoint") + diff --git a/sdk/python/pulumi_confluentcloud/get_flink_compute_pool.py b/sdk/python/pulumi_confluentcloud/get_flink_compute_pool.py new file mode 100644 index 00000000..9a6a1912 --- /dev/null +++ b/sdk/python/pulumi_confluentcloud/get_flink_compute_pool.py @@ -0,0 +1,265 @@ +# coding=utf-8 +# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +# *** Do not edit by hand unless you're certain you know what you are doing! *** + +import copy +import warnings +import pulumi +import pulumi.runtime +from typing import Any, Mapping, Optional, Sequence, Union, overload +from . import _utilities +from . import outputs +from ._inputs import * + +__all__ = [ + 'GetFlinkComputePoolResult', + 'AwaitableGetFlinkComputePoolResult', + 'get_flink_compute_pool', + 'get_flink_compute_pool_output', +] + +@pulumi.output_type +class GetFlinkComputePoolResult: + """ + A collection of values returned by getFlinkComputePool. + """ + def __init__(__self__, api_version=None, cloud=None, current_cfu=None, display_name=None, environment=None, id=None, kind=None, max_cfu=None, region=None, resource_name=None, rest_endpoint=None): + if api_version and not isinstance(api_version, str): + raise TypeError("Expected argument 'api_version' to be a str") + pulumi.set(__self__, "api_version", api_version) + if cloud and not isinstance(cloud, str): + raise TypeError("Expected argument 'cloud' to be a str") + pulumi.set(__self__, "cloud", cloud) + if current_cfu and not isinstance(current_cfu, int): + raise TypeError("Expected argument 'current_cfu' to be a int") + pulumi.set(__self__, "current_cfu", current_cfu) + if display_name and not isinstance(display_name, str): + raise TypeError("Expected argument 'display_name' to be a str") + pulumi.set(__self__, "display_name", display_name) + if environment and not isinstance(environment, dict): + raise TypeError("Expected argument 'environment' to be a dict") + pulumi.set(__self__, "environment", environment) + if id and not isinstance(id, str): + raise TypeError("Expected argument 'id' to be a str") + pulumi.set(__self__, "id", id) + if kind and not isinstance(kind, str): + raise TypeError("Expected argument 'kind' to be a str") + pulumi.set(__self__, "kind", kind) + if max_cfu and not isinstance(max_cfu, int): + raise TypeError("Expected argument 'max_cfu' to be a int") + pulumi.set(__self__, "max_cfu", max_cfu) + if region and not isinstance(region, str): + raise TypeError("Expected argument 'region' to be a str") + pulumi.set(__self__, "region", region) + if resource_name and not isinstance(resource_name, str): + raise TypeError("Expected argument 'resource_name' to be a str") + pulumi.set(__self__, "resource_name", resource_name) + if rest_endpoint and not isinstance(rest_endpoint, str): + raise TypeError("Expected argument 'rest_endpoint' to be a str") + pulumi.set(__self__, "rest_endpoint", rest_endpoint) + + @property + @pulumi.getter(name="apiVersion") + def api_version(self) -> str: + """ + (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`. + """ + return pulumi.get(self, "api_version") + + @property + @pulumi.getter + def cloud(self) -> str: + """ + (Required String) The cloud service provider that runs the Flink Compute Pool. + """ + return pulumi.get(self, "cloud") + + @property + @pulumi.getter(name="currentCfu") + def current_cfu(self) -> int: + """ + (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool. + """ + return pulumi.get(self, "current_cfu") + + @property + @pulumi.getter(name="displayName") + def display_name(self) -> str: + """ + (Required String) The name of the Flink Compute Pool. + """ + return pulumi.get(self, "display_name") + + @property + @pulumi.getter + def environment(self) -> 'outputs.GetFlinkComputePoolEnvironmentResult': + """ + (Required Configuration Block) supports the following: + """ + return pulumi.get(self, "environment") + + @property + @pulumi.getter + def id(self) -> str: + """ + (Required String) The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`. + """ + return pulumi.get(self, "id") + + @property + @pulumi.getter + def kind(self) -> str: + """ + (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`. + """ + return pulumi.get(self, "kind") + + @property + @pulumi.getter(name="maxCfu") + def max_cfu(self) -> int: + """ + (Required Integer) Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. + """ + return pulumi.get(self, "max_cfu") + + @property + @pulumi.getter + def region(self) -> str: + """ + (Required String) The cloud service provider region that hosts the Flink Compute Pool. + """ + return pulumi.get(self, "region") + + @property + @pulumi.getter(name="resourceName") + def resource_name(self) -> str: + """ + (Required String) The Confluent Resource Name of the Flink Compute Pool. + """ + return pulumi.get(self, "resource_name") + + @property + @pulumi.getter(name="restEndpoint") + def rest_endpoint(self) -> str: + """ + (Required String) The API endpoint of the Flink Compute Pool. + """ + return pulumi.get(self, "rest_endpoint") + + +class AwaitableGetFlinkComputePoolResult(GetFlinkComputePoolResult): + # pylint: disable=using-constant-test + def __await__(self): + if False: + yield self + return GetFlinkComputePoolResult( + api_version=self.api_version, + cloud=self.cloud, + current_cfu=self.current_cfu, + display_name=self.display_name, + environment=self.environment, + id=self.id, + kind=self.kind, + max_cfu=self.max_cfu, + region=self.region, + resource_name=self.resource_name, + rest_endpoint=self.rest_endpoint) + + +def get_flink_compute_pool(display_name: Optional[str] = None, + environment: Optional[pulumi.InputType['GetFlinkComputePoolEnvironmentArgs']] = None, + id: Optional[str] = None, + opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFlinkComputePoolResult: + """ + [![Early Access](https://img.shields.io/badge/Lifecycle%20Stage-Early%20Access-%2300afba)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy) + + > **Note:** `FlinkComputePool` data source is available in **Early Access** for early adopters. Early Access features are introduced to gather customer feedback. This feature should be used only for evaluation and non-production testing purposes or to provide feedback to Confluent, particularly as it becomes more widely available in follow-on editions.\\ + **Early Access** features are intended for evaluation use in development and testing environments only, and not for production use. The warranty, SLA, and Support Services provisions of your agreement with Confluent do not apply to Early Access features. Early Access features are considered to be a Proof of Concept as defined in the Confluent Cloud Terms of Service. Confluent may discontinue providing early access releases of the Early Access features at any time in Confluent’s sole discretion. + + `FlinkComputePool` describes a Flink Compute Pool data source. + + ## Example Usage + + ```python + import pulumi + import pulumi_confluentcloud as confluentcloud + + example_using_id_flink_compute_pool = confluentcloud.get_flink_compute_pool(id="lfcp-abc123", + environment=confluentcloud.GetFlinkComputePoolEnvironmentArgs( + id="env-xyz456", + )) + pulumi.export("exampleUsingId", example_using_id_flink_compute_pool) + example_using_name_flink_compute_pool = confluentcloud.get_flink_compute_pool(display_name="my_compute_pool", + environment=confluentcloud.GetFlinkComputePoolEnvironmentArgs( + id="env-xyz456", + )) + pulumi.export("exampleUsingName", example_using_name_flink_compute_pool) + ``` + + + :param str display_name: A human-readable name for the Flink Compute Pool. + :param pulumi.InputType['GetFlinkComputePoolEnvironmentArgs'] environment: (Required Configuration Block) supports the following: + :param str id: The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + + > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + """ + __args__ = dict() + __args__['displayName'] = display_name + __args__['environment'] = environment + __args__['id'] = id + opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) + __ret__ = pulumi.runtime.invoke('confluentcloud:index/getFlinkComputePool:getFlinkComputePool', __args__, opts=opts, typ=GetFlinkComputePoolResult).value + + return AwaitableGetFlinkComputePoolResult( + api_version=pulumi.get(__ret__, 'api_version'), + cloud=pulumi.get(__ret__, 'cloud'), + current_cfu=pulumi.get(__ret__, 'current_cfu'), + display_name=pulumi.get(__ret__, 'display_name'), + environment=pulumi.get(__ret__, 'environment'), + id=pulumi.get(__ret__, 'id'), + kind=pulumi.get(__ret__, 'kind'), + max_cfu=pulumi.get(__ret__, 'max_cfu'), + region=pulumi.get(__ret__, 'region'), + resource_name=pulumi.get(__ret__, 'resource_name'), + rest_endpoint=pulumi.get(__ret__, 'rest_endpoint')) + + +@_utilities.lift_output_func(get_flink_compute_pool) +def get_flink_compute_pool_output(display_name: Optional[pulumi.Input[Optional[str]]] = None, + environment: Optional[pulumi.Input[pulumi.InputType['GetFlinkComputePoolEnvironmentArgs']]] = None, + id: Optional[pulumi.Input[Optional[str]]] = None, + opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetFlinkComputePoolResult]: + """ + [![Early Access](https://img.shields.io/badge/Lifecycle%20Stage-Early%20Access-%2300afba)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy) + + > **Note:** `FlinkComputePool` data source is available in **Early Access** for early adopters. Early Access features are introduced to gather customer feedback. This feature should be used only for evaluation and non-production testing purposes or to provide feedback to Confluent, particularly as it becomes more widely available in follow-on editions.\\ + **Early Access** features are intended for evaluation use in development and testing environments only, and not for production use. The warranty, SLA, and Support Services provisions of your agreement with Confluent do not apply to Early Access features. Early Access features are considered to be a Proof of Concept as defined in the Confluent Cloud Terms of Service. Confluent may discontinue providing early access releases of the Early Access features at any time in Confluent’s sole discretion. + + `FlinkComputePool` describes a Flink Compute Pool data source. + + ## Example Usage + + ```python + import pulumi + import pulumi_confluentcloud as confluentcloud + + example_using_id_flink_compute_pool = confluentcloud.get_flink_compute_pool(id="lfcp-abc123", + environment=confluentcloud.GetFlinkComputePoolEnvironmentArgs( + id="env-xyz456", + )) + pulumi.export("exampleUsingId", example_using_id_flink_compute_pool) + example_using_name_flink_compute_pool = confluentcloud.get_flink_compute_pool(display_name="my_compute_pool", + environment=confluentcloud.GetFlinkComputePoolEnvironmentArgs( + id="env-xyz456", + )) + pulumi.export("exampleUsingName", example_using_name_flink_compute_pool) + ``` + + + :param str display_name: A human-readable name for the Flink Compute Pool. + :param pulumi.InputType['GetFlinkComputePoolEnvironmentArgs'] environment: (Required Configuration Block) supports the following: + :param str id: The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + + > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + """ + ... diff --git a/sdk/python/pulumi_confluentcloud/get_role_binding.py b/sdk/python/pulumi_confluentcloud/get_role_binding.py index ac9a5d61..5e46d1b0 100644 --- a/sdk/python/pulumi_confluentcloud/get_role_binding.py +++ b/sdk/python/pulumi_confluentcloud/get_role_binding.py @@ -84,6 +84,8 @@ def get_role_binding(id: Optional[str] = None, `RoleBinding` describes a Role Binding. + > **Note:** For more information on the Role Bindings, see [Predefined RBAC roles in Confluent Cloud](https://docs.confluent.io/cloud/current/access-management/access-control/rbac/predefined-rbac-roles.html). + ## Example Usage ```python @@ -117,6 +119,8 @@ def get_role_binding_output(id: Optional[pulumi.Input[str]] = None, `RoleBinding` describes a Role Binding. + > **Note:** For more information on the Role Bindings, see [Predefined RBAC roles in Confluent Cloud](https://docs.confluent.io/cloud/current/access-management/access-control/rbac/predefined-rbac-roles.html). + ## Example Usage ```python diff --git a/sdk/python/pulumi_confluentcloud/kafka_acl.py b/sdk/python/pulumi_confluentcloud/kafka_acl.py index 2dd431df..cd3027cf 100644 --- a/sdk/python/pulumi_confluentcloud/kafka_acl.py +++ b/sdk/python/pulumi_confluentcloud/kafka_acl.py @@ -353,7 +353,7 @@ def __init__(__self__, """ ## Import - You can import Kafka ACLs by using the Kafka cluster ID and attributes of `confluent_kafka_acl` resource in the format `/######`, for exampleOption #1Manage multiple Kafka clusters in the same Terraform workspace $ export CONFLUENT_CLOUD_API_KEY="" $ export CONFLUENT_CLOUD_API_SECRET="" $ export IMPORT_KAFKA_API_KEY="" $ export IMPORT_KAFKA_API_SECRET="" $ export IMPORT_KAFKA_REST_ENDPOINT="" + You can import Kafka ACLs by using the Kafka cluster ID and attributes of `confluent_kafka_acl` resource in the format `/######`, for exampleOption #1Manage multiple Kafka clusters in the same Terraform workspace $ export IMPORT_KAFKA_API_KEY="" $ export IMPORT_KAFKA_API_SECRET="" $ export IMPORT_KAFKA_REST_ENDPOINT="" ```sh $ pulumi import confluentcloud:index/kafkaAcl:KafkaAcl describe-cluster "lkc-12345/CLUSTER#kafka-cluster#LITERAL#User:sa-xyz123#*#DESCRIBE#ALLOW" @@ -388,7 +388,7 @@ def __init__(__self__, """ ## Import - You can import Kafka ACLs by using the Kafka cluster ID and attributes of `confluent_kafka_acl` resource in the format `/######`, for exampleOption #1Manage multiple Kafka clusters in the same Terraform workspace $ export CONFLUENT_CLOUD_API_KEY="" $ export CONFLUENT_CLOUD_API_SECRET="" $ export IMPORT_KAFKA_API_KEY="" $ export IMPORT_KAFKA_API_SECRET="" $ export IMPORT_KAFKA_REST_ENDPOINT="" + You can import Kafka ACLs by using the Kafka cluster ID and attributes of `confluent_kafka_acl` resource in the format `/######`, for exampleOption #1Manage multiple Kafka clusters in the same Terraform workspace $ export IMPORT_KAFKA_API_KEY="" $ export IMPORT_KAFKA_API_SECRET="" $ export IMPORT_KAFKA_REST_ENDPOINT="" ```sh $ pulumi import confluentcloud:index/kafkaAcl:KafkaAcl describe-cluster "lkc-12345/CLUSTER#kafka-cluster#LITERAL#User:sa-xyz123#*#DESCRIBE#ALLOW" diff --git a/sdk/python/pulumi_confluentcloud/outputs.py b/sdk/python/pulumi_confluentcloud/outputs.py index 53fb130c..39cf0974 100644 --- a/sdk/python/pulumi_confluentcloud/outputs.py +++ b/sdk/python/pulumi_confluentcloud/outputs.py @@ -31,6 +31,7 @@ 'ClusterLinkSourceKafkaClusterCredentials', 'ConnectorEnvironment', 'ConnectorKafkaCluster', + 'FlinkComputePoolEnvironment', 'IdentityPoolIdentityProvider', 'InvitationCreator', 'InvitationUser', @@ -89,6 +90,10 @@ 'PrivateLinkAttachmentEnvironment', 'PrivateLinkAttachmentGcp', 'SchemaCredentials', + 'SchemaExporterCredentials', + 'SchemaExporterDestinationSchemaRegistryCluster', + 'SchemaExporterDestinationSchemaRegistryClusterCredentials', + 'SchemaExporterSchemaRegistryCluster', 'SchemaRegistryClusterConfigCredentials', 'SchemaRegistryClusterConfigSchemaRegistryCluster', 'SchemaRegistryClusterEnvironment', @@ -115,6 +120,7 @@ 'GetBusinessMetadataSchemaRegistryClusterResult', 'GetByokKeyAwResult', 'GetByokKeyAzureResult', + 'GetFlinkComputePoolEnvironmentResult', 'GetIdentityPoolIdentityProviderResult', 'GetInvitationCreatorResult', 'GetInvitationUserResult', @@ -719,8 +725,6 @@ def __init__(__self__, *, """ :param str key: The Kafka API Key. :param str secret: The Kafka API Secret. - - > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). """ pulumi.set(__self__, "key", key) pulumi.set(__self__, "secret", secret) @@ -738,8 +742,6 @@ def key(self) -> str: def secret(self) -> str: """ The Kafka API Secret. - - > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). """ return pulumi.get(self, "secret") @@ -821,8 +823,6 @@ def __init__(__self__, *, """ :param str key: The Kafka API Key. :param str secret: The Kafka API Secret. - - > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). """ pulumi.set(__self__, "key", key) pulumi.set(__self__, "secret", secret) @@ -840,8 +840,6 @@ def key(self) -> str: def secret(self) -> str: """ The Kafka API Secret. - - > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). """ return pulumi.get(self, "secret") @@ -923,8 +921,6 @@ def __init__(__self__, *, """ :param str key: The Kafka API Key. :param str secret: The Kafka API Secret. - - > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). """ pulumi.set(__self__, "key", key) pulumi.set(__self__, "secret", secret) @@ -942,8 +938,6 @@ def key(self) -> str: def secret(self) -> str: """ The Kafka API Secret. - - > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). """ return pulumi.get(self, "secret") @@ -1025,8 +1019,6 @@ def __init__(__self__, *, """ :param str key: The Kafka API Key. :param str secret: The Kafka API Secret. - - > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). """ pulumi.set(__self__, "key", key) pulumi.set(__self__, "secret", secret) @@ -1044,8 +1036,6 @@ def key(self) -> str: def secret(self) -> str: """ The Kafka API Secret. - - > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy). """ return pulumi.get(self, "secret") @@ -1086,6 +1076,24 @@ def id(self) -> str: return pulumi.get(self, "id") +@pulumi.output_type +class FlinkComputePoolEnvironment(dict): + def __init__(__self__, *, + id: str): + """ + :param str id: The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`. + """ + pulumi.set(__self__, "id", id) + + @property + @pulumi.getter + def id(self) -> str: + """ + The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`. + """ + return pulumi.get(self, "id") + + @pulumi.output_type class IdentityPoolIdentityProvider(dict): def __init__(__self__, *, @@ -2740,6 +2748,124 @@ def secret(self) -> str: return pulumi.get(self, "secret") +@pulumi.output_type +class SchemaExporterCredentials(dict): + def __init__(__self__, *, + key: str, + secret: str): + """ + :param str key: The Schema Registry API Key. + :param str secret: The Schema Registry API Secret. + """ + pulumi.set(__self__, "key", key) + pulumi.set(__self__, "secret", secret) + + @property + @pulumi.getter + def key(self) -> str: + """ + The Schema Registry API Key. + """ + return pulumi.get(self, "key") + + @property + @pulumi.getter + def secret(self) -> str: + """ + The Schema Registry API Secret. + """ + return pulumi.get(self, "secret") + + +@pulumi.output_type +class SchemaExporterDestinationSchemaRegistryCluster(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "restEndpoint": + suggest = "rest_endpoint" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in SchemaExporterDestinationSchemaRegistryCluster. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + SchemaExporterDestinationSchemaRegistryCluster.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + SchemaExporterDestinationSchemaRegistryCluster.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + credentials: 'outputs.SchemaExporterDestinationSchemaRegistryClusterCredentials', + rest_endpoint: str): + """ + :param str rest_endpoint: The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + """ + pulumi.set(__self__, "credentials", credentials) + pulumi.set(__self__, "rest_endpoint", rest_endpoint) + + @property + @pulumi.getter + def credentials(self) -> 'outputs.SchemaExporterDestinationSchemaRegistryClusterCredentials': + return pulumi.get(self, "credentials") + + @property + @pulumi.getter(name="restEndpoint") + def rest_endpoint(self) -> str: + """ + The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + """ + return pulumi.get(self, "rest_endpoint") + + +@pulumi.output_type +class SchemaExporterDestinationSchemaRegistryClusterCredentials(dict): + def __init__(__self__, *, + key: str, + secret: str): + """ + :param str key: The Schema Registry API Key. + :param str secret: The Schema Registry API Secret. + """ + pulumi.set(__self__, "key", key) + pulumi.set(__self__, "secret", secret) + + @property + @pulumi.getter + def key(self) -> str: + """ + The Schema Registry API Key. + """ + return pulumi.get(self, "key") + + @property + @pulumi.getter + def secret(self) -> str: + """ + The Schema Registry API Secret. + """ + return pulumi.get(self, "secret") + + +@pulumi.output_type +class SchemaExporterSchemaRegistryCluster(dict): + def __init__(__self__, *, + id: str): + """ + :param str id: The ID of the Schema Registry cluster, for example, `lsrc-abc123`. + """ + pulumi.set(__self__, "id", id) + + @property + @pulumi.getter + def id(self) -> str: + """ + The ID of the Schema Registry cluster, for example, `lsrc-abc123`. + """ + return pulumi.get(self, "id") + + @pulumi.output_type class SchemaRegistryClusterConfigCredentials(dict): def __init__(__self__, *, @@ -3485,6 +3611,28 @@ def tenant_id(self) -> str: return pulumi.get(self, "tenant_id") +@pulumi.output_type +class GetFlinkComputePoolEnvironmentResult(dict): + def __init__(__self__, *, + id: str): + """ + :param str id: The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + + > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + """ + pulumi.set(__self__, "id", id) + + @property + @pulumi.getter + def id(self) -> str: + """ + The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`. + + > **Note:** Exactly one from the `id` and `display_name` attributes must be specified. + """ + return pulumi.get(self, "id") + + @pulumi.output_type class GetIdentityPoolIdentityProviderResult(dict): def __init__(__self__, *, diff --git a/sdk/python/pulumi_confluentcloud/schema_exporter.py b/sdk/python/pulumi_confluentcloud/schema_exporter.py new file mode 100644 index 00000000..725dc26a --- /dev/null +++ b/sdk/python/pulumi_confluentcloud/schema_exporter.py @@ -0,0 +1,657 @@ +# coding=utf-8 +# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +# *** Do not edit by hand unless you're certain you know what you are doing! *** + +import copy +import warnings +import pulumi +import pulumi.runtime +from typing import Any, Mapping, Optional, Sequence, Union, overload +from . import _utilities +from . import outputs +from ._inputs import * + +__all__ = ['SchemaExporterArgs', 'SchemaExporter'] + +@pulumi.input_type +class SchemaExporterArgs: + def __init__(__self__, *, + destination_schema_registry_cluster: pulumi.Input['SchemaExporterDestinationSchemaRegistryClusterArgs'], + config: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + context: Optional[pulumi.Input[str]] = None, + context_type: Optional[pulumi.Input[str]] = None, + credentials: Optional[pulumi.Input['SchemaExporterCredentialsArgs']] = None, + name: Optional[pulumi.Input[str]] = None, + reset_on_update: Optional[pulumi.Input[bool]] = None, + rest_endpoint: Optional[pulumi.Input[str]] = None, + schema_registry_cluster: Optional[pulumi.Input['SchemaExporterSchemaRegistryClusterArgs']] = None, + status: Optional[pulumi.Input[str]] = None, + subject_rename_format: Optional[pulumi.Input[str]] = None, + subjects: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): + """ + The set of arguments for constructing a SchemaExporter resource. + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] config: Block for custom *nonsensitive* configuration properties: + :param pulumi.Input[str] context: Customized context of the exporter if `context_type` is set to `CUSTOM`. + :param pulumi.Input[str] context_type: Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + :param pulumi.Input['SchemaExporterCredentialsArgs'] credentials: The Cluster API Credentials. + :param pulumi.Input[str] name: The configuration setting name. + :param pulumi.Input[bool] reset_on_update: The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + :param pulumi.Input[str] rest_endpoint: The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + :param pulumi.Input[str] status: The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + :param pulumi.Input[str] subject_rename_format: Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`. + :param pulumi.Input[Sequence[pulumi.Input[str]]] subjects: Name of each exporter subject. + """ + pulumi.set(__self__, "destination_schema_registry_cluster", destination_schema_registry_cluster) + if config is not None: + pulumi.set(__self__, "config", config) + if context is not None: + pulumi.set(__self__, "context", context) + if context_type is not None: + pulumi.set(__self__, "context_type", context_type) + if credentials is not None: + pulumi.set(__self__, "credentials", credentials) + if name is not None: + pulumi.set(__self__, "name", name) + if reset_on_update is not None: + pulumi.set(__self__, "reset_on_update", reset_on_update) + if rest_endpoint is not None: + pulumi.set(__self__, "rest_endpoint", rest_endpoint) + if schema_registry_cluster is not None: + pulumi.set(__self__, "schema_registry_cluster", schema_registry_cluster) + if status is not None: + pulumi.set(__self__, "status", status) + if subject_rename_format is not None: + pulumi.set(__self__, "subject_rename_format", subject_rename_format) + if subjects is not None: + pulumi.set(__self__, "subjects", subjects) + + @property + @pulumi.getter(name="destinationSchemaRegistryCluster") + def destination_schema_registry_cluster(self) -> pulumi.Input['SchemaExporterDestinationSchemaRegistryClusterArgs']: + return pulumi.get(self, "destination_schema_registry_cluster") + + @destination_schema_registry_cluster.setter + def destination_schema_registry_cluster(self, value: pulumi.Input['SchemaExporterDestinationSchemaRegistryClusterArgs']): + pulumi.set(self, "destination_schema_registry_cluster", value) + + @property + @pulumi.getter + def config(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: + """ + Block for custom *nonsensitive* configuration properties: + """ + return pulumi.get(self, "config") + + @config.setter + def config(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): + pulumi.set(self, "config", value) + + @property + @pulumi.getter + def context(self) -> Optional[pulumi.Input[str]]: + """ + Customized context of the exporter if `context_type` is set to `CUSTOM`. + """ + return pulumi.get(self, "context") + + @context.setter + def context(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "context", value) + + @property + @pulumi.getter(name="contextType") + def context_type(self) -> Optional[pulumi.Input[str]]: + """ + Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + """ + return pulumi.get(self, "context_type") + + @context_type.setter + def context_type(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "context_type", value) + + @property + @pulumi.getter + def credentials(self) -> Optional[pulumi.Input['SchemaExporterCredentialsArgs']]: + """ + The Cluster API Credentials. + """ + return pulumi.get(self, "credentials") + + @credentials.setter + def credentials(self, value: Optional[pulumi.Input['SchemaExporterCredentialsArgs']]): + pulumi.set(self, "credentials", value) + + @property + @pulumi.getter + def name(self) -> Optional[pulumi.Input[str]]: + """ + The configuration setting name. + """ + return pulumi.get(self, "name") + + @name.setter + def name(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "name", value) + + @property + @pulumi.getter(name="resetOnUpdate") + def reset_on_update(self) -> Optional[pulumi.Input[bool]]: + """ + The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + """ + return pulumi.get(self, "reset_on_update") + + @reset_on_update.setter + def reset_on_update(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "reset_on_update", value) + + @property + @pulumi.getter(name="restEndpoint") + def rest_endpoint(self) -> Optional[pulumi.Input[str]]: + """ + The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + """ + return pulumi.get(self, "rest_endpoint") + + @rest_endpoint.setter + def rest_endpoint(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "rest_endpoint", value) + + @property + @pulumi.getter(name="schemaRegistryCluster") + def schema_registry_cluster(self) -> Optional[pulumi.Input['SchemaExporterSchemaRegistryClusterArgs']]: + return pulumi.get(self, "schema_registry_cluster") + + @schema_registry_cluster.setter + def schema_registry_cluster(self, value: Optional[pulumi.Input['SchemaExporterSchemaRegistryClusterArgs']]): + pulumi.set(self, "schema_registry_cluster", value) + + @property + @pulumi.getter + def status(self) -> Optional[pulumi.Input[str]]: + """ + The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + """ + return pulumi.get(self, "status") + + @status.setter + def status(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "status", value) + + @property + @pulumi.getter(name="subjectRenameFormat") + def subject_rename_format(self) -> Optional[pulumi.Input[str]]: + """ + Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`. + """ + return pulumi.get(self, "subject_rename_format") + + @subject_rename_format.setter + def subject_rename_format(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "subject_rename_format", value) + + @property + @pulumi.getter + def subjects(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: + """ + Name of each exporter subject. + """ + return pulumi.get(self, "subjects") + + @subjects.setter + def subjects(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): + pulumi.set(self, "subjects", value) + + +@pulumi.input_type +class _SchemaExporterState: + def __init__(__self__, *, + config: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + context: Optional[pulumi.Input[str]] = None, + context_type: Optional[pulumi.Input[str]] = None, + credentials: Optional[pulumi.Input['SchemaExporterCredentialsArgs']] = None, + destination_schema_registry_cluster: Optional[pulumi.Input['SchemaExporterDestinationSchemaRegistryClusterArgs']] = None, + name: Optional[pulumi.Input[str]] = None, + reset_on_update: Optional[pulumi.Input[bool]] = None, + rest_endpoint: Optional[pulumi.Input[str]] = None, + schema_registry_cluster: Optional[pulumi.Input['SchemaExporterSchemaRegistryClusterArgs']] = None, + status: Optional[pulumi.Input[str]] = None, + subject_rename_format: Optional[pulumi.Input[str]] = None, + subjects: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): + """ + Input properties used for looking up and filtering SchemaExporter resources. + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] config: Block for custom *nonsensitive* configuration properties: + :param pulumi.Input[str] context: Customized context of the exporter if `context_type` is set to `CUSTOM`. + :param pulumi.Input[str] context_type: Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + :param pulumi.Input['SchemaExporterCredentialsArgs'] credentials: The Cluster API Credentials. + :param pulumi.Input[str] name: The configuration setting name. + :param pulumi.Input[bool] reset_on_update: The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + :param pulumi.Input[str] rest_endpoint: The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + :param pulumi.Input[str] status: The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + :param pulumi.Input[str] subject_rename_format: Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`. + :param pulumi.Input[Sequence[pulumi.Input[str]]] subjects: Name of each exporter subject. + """ + if config is not None: + pulumi.set(__self__, "config", config) + if context is not None: + pulumi.set(__self__, "context", context) + if context_type is not None: + pulumi.set(__self__, "context_type", context_type) + if credentials is not None: + pulumi.set(__self__, "credentials", credentials) + if destination_schema_registry_cluster is not None: + pulumi.set(__self__, "destination_schema_registry_cluster", destination_schema_registry_cluster) + if name is not None: + pulumi.set(__self__, "name", name) + if reset_on_update is not None: + pulumi.set(__self__, "reset_on_update", reset_on_update) + if rest_endpoint is not None: + pulumi.set(__self__, "rest_endpoint", rest_endpoint) + if schema_registry_cluster is not None: + pulumi.set(__self__, "schema_registry_cluster", schema_registry_cluster) + if status is not None: + pulumi.set(__self__, "status", status) + if subject_rename_format is not None: + pulumi.set(__self__, "subject_rename_format", subject_rename_format) + if subjects is not None: + pulumi.set(__self__, "subjects", subjects) + + @property + @pulumi.getter + def config(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: + """ + Block for custom *nonsensitive* configuration properties: + """ + return pulumi.get(self, "config") + + @config.setter + def config(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): + pulumi.set(self, "config", value) + + @property + @pulumi.getter + def context(self) -> Optional[pulumi.Input[str]]: + """ + Customized context of the exporter if `context_type` is set to `CUSTOM`. + """ + return pulumi.get(self, "context") + + @context.setter + def context(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "context", value) + + @property + @pulumi.getter(name="contextType") + def context_type(self) -> Optional[pulumi.Input[str]]: + """ + Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + """ + return pulumi.get(self, "context_type") + + @context_type.setter + def context_type(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "context_type", value) + + @property + @pulumi.getter + def credentials(self) -> Optional[pulumi.Input['SchemaExporterCredentialsArgs']]: + """ + The Cluster API Credentials. + """ + return pulumi.get(self, "credentials") + + @credentials.setter + def credentials(self, value: Optional[pulumi.Input['SchemaExporterCredentialsArgs']]): + pulumi.set(self, "credentials", value) + + @property + @pulumi.getter(name="destinationSchemaRegistryCluster") + def destination_schema_registry_cluster(self) -> Optional[pulumi.Input['SchemaExporterDestinationSchemaRegistryClusterArgs']]: + return pulumi.get(self, "destination_schema_registry_cluster") + + @destination_schema_registry_cluster.setter + def destination_schema_registry_cluster(self, value: Optional[pulumi.Input['SchemaExporterDestinationSchemaRegistryClusterArgs']]): + pulumi.set(self, "destination_schema_registry_cluster", value) + + @property + @pulumi.getter + def name(self) -> Optional[pulumi.Input[str]]: + """ + The configuration setting name. + """ + return pulumi.get(self, "name") + + @name.setter + def name(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "name", value) + + @property + @pulumi.getter(name="resetOnUpdate") + def reset_on_update(self) -> Optional[pulumi.Input[bool]]: + """ + The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + """ + return pulumi.get(self, "reset_on_update") + + @reset_on_update.setter + def reset_on_update(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "reset_on_update", value) + + @property + @pulumi.getter(name="restEndpoint") + def rest_endpoint(self) -> Optional[pulumi.Input[str]]: + """ + The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + """ + return pulumi.get(self, "rest_endpoint") + + @rest_endpoint.setter + def rest_endpoint(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "rest_endpoint", value) + + @property + @pulumi.getter(name="schemaRegistryCluster") + def schema_registry_cluster(self) -> Optional[pulumi.Input['SchemaExporterSchemaRegistryClusterArgs']]: + return pulumi.get(self, "schema_registry_cluster") + + @schema_registry_cluster.setter + def schema_registry_cluster(self, value: Optional[pulumi.Input['SchemaExporterSchemaRegistryClusterArgs']]): + pulumi.set(self, "schema_registry_cluster", value) + + @property + @pulumi.getter + def status(self) -> Optional[pulumi.Input[str]]: + """ + The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + """ + return pulumi.get(self, "status") + + @status.setter + def status(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "status", value) + + @property + @pulumi.getter(name="subjectRenameFormat") + def subject_rename_format(self) -> Optional[pulumi.Input[str]]: + """ + Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`. + """ + return pulumi.get(self, "subject_rename_format") + + @subject_rename_format.setter + def subject_rename_format(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "subject_rename_format", value) + + @property + @pulumi.getter + def subjects(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: + """ + Name of each exporter subject. + """ + return pulumi.get(self, "subjects") + + @subjects.setter + def subjects(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): + pulumi.set(self, "subjects", value) + + +class SchemaExporter(pulumi.CustomResource): + @overload + def __init__(__self__, + resource_name: str, + opts: Optional[pulumi.ResourceOptions] = None, + config: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + context: Optional[pulumi.Input[str]] = None, + context_type: Optional[pulumi.Input[str]] = None, + credentials: Optional[pulumi.Input[pulumi.InputType['SchemaExporterCredentialsArgs']]] = None, + destination_schema_registry_cluster: Optional[pulumi.Input[pulumi.InputType['SchemaExporterDestinationSchemaRegistryClusterArgs']]] = None, + name: Optional[pulumi.Input[str]] = None, + reset_on_update: Optional[pulumi.Input[bool]] = None, + rest_endpoint: Optional[pulumi.Input[str]] = None, + schema_registry_cluster: Optional[pulumi.Input[pulumi.InputType['SchemaExporterSchemaRegistryClusterArgs']]] = None, + status: Optional[pulumi.Input[str]] = None, + subject_rename_format: Optional[pulumi.Input[str]] = None, + subjects: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + __props__=None): + """ + ## Import + + You can import a Schema Exporter by using the Schema Registry cluster ID, Schema Exporter name in the format `/`, for example$ export IMPORT_SCHEMA_REGISTRY_API_KEY="" $ export IMPORT_SCHEMA_REGISTRY_API_SECRET="" $ export IMPORT_SCHEMA_REGISTRY_REST_ENDPOINT="" + + ```sh + $ pulumi import confluentcloud:index/schemaExporter:SchemaExporter main lsrc-8wrx70/test-exporter + ``` + + !> **Warning:** Do not forget to delete terminal command history afterwards for security purposes. + + :param str resource_name: The name of the resource. + :param pulumi.ResourceOptions opts: Options for the resource. + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] config: Block for custom *nonsensitive* configuration properties: + :param pulumi.Input[str] context: Customized context of the exporter if `context_type` is set to `CUSTOM`. + :param pulumi.Input[str] context_type: Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + :param pulumi.Input[pulumi.InputType['SchemaExporterCredentialsArgs']] credentials: The Cluster API Credentials. + :param pulumi.Input[str] name: The configuration setting name. + :param pulumi.Input[bool] reset_on_update: The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + :param pulumi.Input[str] rest_endpoint: The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + :param pulumi.Input[str] status: The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + :param pulumi.Input[str] subject_rename_format: Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`. + :param pulumi.Input[Sequence[pulumi.Input[str]]] subjects: Name of each exporter subject. + """ + ... + @overload + def __init__(__self__, + resource_name: str, + args: SchemaExporterArgs, + opts: Optional[pulumi.ResourceOptions] = None): + """ + ## Import + + You can import a Schema Exporter by using the Schema Registry cluster ID, Schema Exporter name in the format `/`, for example$ export IMPORT_SCHEMA_REGISTRY_API_KEY="" $ export IMPORT_SCHEMA_REGISTRY_API_SECRET="" $ export IMPORT_SCHEMA_REGISTRY_REST_ENDPOINT="" + + ```sh + $ pulumi import confluentcloud:index/schemaExporter:SchemaExporter main lsrc-8wrx70/test-exporter + ``` + + !> **Warning:** Do not forget to delete terminal command history afterwards for security purposes. + + :param str resource_name: The name of the resource. + :param SchemaExporterArgs args: The arguments to use to populate this resource's properties. + :param pulumi.ResourceOptions opts: Options for the resource. + """ + ... + def __init__(__self__, resource_name: str, *args, **kwargs): + resource_args, opts = _utilities.get_resource_args_opts(SchemaExporterArgs, pulumi.ResourceOptions, *args, **kwargs) + if resource_args is not None: + __self__._internal_init(resource_name, opts, **resource_args.__dict__) + else: + __self__._internal_init(resource_name, *args, **kwargs) + + def _internal_init(__self__, + resource_name: str, + opts: Optional[pulumi.ResourceOptions] = None, + config: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + context: Optional[pulumi.Input[str]] = None, + context_type: Optional[pulumi.Input[str]] = None, + credentials: Optional[pulumi.Input[pulumi.InputType['SchemaExporterCredentialsArgs']]] = None, + destination_schema_registry_cluster: Optional[pulumi.Input[pulumi.InputType['SchemaExporterDestinationSchemaRegistryClusterArgs']]] = None, + name: Optional[pulumi.Input[str]] = None, + reset_on_update: Optional[pulumi.Input[bool]] = None, + rest_endpoint: Optional[pulumi.Input[str]] = None, + schema_registry_cluster: Optional[pulumi.Input[pulumi.InputType['SchemaExporterSchemaRegistryClusterArgs']]] = None, + status: Optional[pulumi.Input[str]] = None, + subject_rename_format: Optional[pulumi.Input[str]] = None, + subjects: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + __props__=None): + opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) + if not isinstance(opts, pulumi.ResourceOptions): + raise TypeError('Expected resource options to be a ResourceOptions instance') + if opts.id is None: + if __props__ is not None: + raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') + __props__ = SchemaExporterArgs.__new__(SchemaExporterArgs) + + __props__.__dict__["config"] = config + __props__.__dict__["context"] = context + __props__.__dict__["context_type"] = context_type + __props__.__dict__["credentials"] = None if credentials is None else pulumi.Output.secret(credentials) + if destination_schema_registry_cluster is None and not opts.urn: + raise TypeError("Missing required property 'destination_schema_registry_cluster'") + __props__.__dict__["destination_schema_registry_cluster"] = destination_schema_registry_cluster + __props__.__dict__["name"] = name + __props__.__dict__["reset_on_update"] = reset_on_update + __props__.__dict__["rest_endpoint"] = rest_endpoint + __props__.__dict__["schema_registry_cluster"] = schema_registry_cluster + __props__.__dict__["status"] = status + __props__.__dict__["subject_rename_format"] = subject_rename_format + __props__.__dict__["subjects"] = subjects + secret_opts = pulumi.ResourceOptions(additional_secret_outputs=["credentials"]) + opts = pulumi.ResourceOptions.merge(opts, secret_opts) + super(SchemaExporter, __self__).__init__( + 'confluentcloud:index/schemaExporter:SchemaExporter', + resource_name, + __props__, + opts) + + @staticmethod + def get(resource_name: str, + id: pulumi.Input[str], + opts: Optional[pulumi.ResourceOptions] = None, + config: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + context: Optional[pulumi.Input[str]] = None, + context_type: Optional[pulumi.Input[str]] = None, + credentials: Optional[pulumi.Input[pulumi.InputType['SchemaExporterCredentialsArgs']]] = None, + destination_schema_registry_cluster: Optional[pulumi.Input[pulumi.InputType['SchemaExporterDestinationSchemaRegistryClusterArgs']]] = None, + name: Optional[pulumi.Input[str]] = None, + reset_on_update: Optional[pulumi.Input[bool]] = None, + rest_endpoint: Optional[pulumi.Input[str]] = None, + schema_registry_cluster: Optional[pulumi.Input[pulumi.InputType['SchemaExporterSchemaRegistryClusterArgs']]] = None, + status: Optional[pulumi.Input[str]] = None, + subject_rename_format: Optional[pulumi.Input[str]] = None, + subjects: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'SchemaExporter': + """ + Get an existing SchemaExporter resource's state with the given name, id, and optional extra + properties used to qualify the lookup. + + :param str resource_name: The unique name of the resulting resource. + :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. + :param pulumi.ResourceOptions opts: Options for the resource. + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] config: Block for custom *nonsensitive* configuration properties: + :param pulumi.Input[str] context: Customized context of the exporter if `context_type` is set to `CUSTOM`. + :param pulumi.Input[str] context_type: Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + :param pulumi.Input[pulumi.InputType['SchemaExporterCredentialsArgs']] credentials: The Cluster API Credentials. + :param pulumi.Input[str] name: The configuration setting name. + :param pulumi.Input[bool] reset_on_update: The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + :param pulumi.Input[str] rest_endpoint: The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + :param pulumi.Input[str] status: The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + :param pulumi.Input[str] subject_rename_format: Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`. + :param pulumi.Input[Sequence[pulumi.Input[str]]] subjects: Name of each exporter subject. + """ + opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) + + __props__ = _SchemaExporterState.__new__(_SchemaExporterState) + + __props__.__dict__["config"] = config + __props__.__dict__["context"] = context + __props__.__dict__["context_type"] = context_type + __props__.__dict__["credentials"] = credentials + __props__.__dict__["destination_schema_registry_cluster"] = destination_schema_registry_cluster + __props__.__dict__["name"] = name + __props__.__dict__["reset_on_update"] = reset_on_update + __props__.__dict__["rest_endpoint"] = rest_endpoint + __props__.__dict__["schema_registry_cluster"] = schema_registry_cluster + __props__.__dict__["status"] = status + __props__.__dict__["subject_rename_format"] = subject_rename_format + __props__.__dict__["subjects"] = subjects + return SchemaExporter(resource_name, opts=opts, __props__=__props__) + + @property + @pulumi.getter + def config(self) -> pulumi.Output[Mapping[str, str]]: + """ + Block for custom *nonsensitive* configuration properties: + """ + return pulumi.get(self, "config") + + @property + @pulumi.getter + def context(self) -> pulumi.Output[str]: + """ + Customized context of the exporter if `context_type` is set to `CUSTOM`. + """ + return pulumi.get(self, "context") + + @property + @pulumi.getter(name="contextType") + def context_type(self) -> pulumi.Output[str]: + """ + Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`. + """ + return pulumi.get(self, "context_type") + + @property + @pulumi.getter + def credentials(self) -> pulumi.Output[Optional['outputs.SchemaExporterCredentials']]: + """ + The Cluster API Credentials. + """ + return pulumi.get(self, "credentials") + + @property + @pulumi.getter(name="destinationSchemaRegistryCluster") + def destination_schema_registry_cluster(self) -> pulumi.Output['outputs.SchemaExporterDestinationSchemaRegistryCluster']: + return pulumi.get(self, "destination_schema_registry_cluster") + + @property + @pulumi.getter + def name(self) -> pulumi.Output[str]: + """ + The configuration setting name. + """ + return pulumi.get(self, "name") + + @property + @pulumi.getter(name="resetOnUpdate") + def reset_on_update(self) -> pulumi.Output[Optional[bool]]: + """ + The flag to control whether to reset the exporter when updating configs. Defaults to `false`. + """ + return pulumi.get(self, "reset_on_update") + + @property + @pulumi.getter(name="restEndpoint") + def rest_endpoint(self) -> pulumi.Output[Optional[str]]: + """ + The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`). + """ + return pulumi.get(self, "rest_endpoint") + + @property + @pulumi.getter(name="schemaRegistryCluster") + def schema_registry_cluster(self) -> pulumi.Output[Optional['outputs.SchemaExporterSchemaRegistryCluster']]: + return pulumi.get(self, "schema_registry_cluster") + + @property + @pulumi.getter + def status(self) -> pulumi.Output[str]: + """ + The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`. + """ + return pulumi.get(self, "status") + + @property + @pulumi.getter(name="subjectRenameFormat") + def subject_rename_format(self) -> pulumi.Output[str]: + """ + Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`. + """ + return pulumi.get(self, "subject_rename_format") + + @property + @pulumi.getter + def subjects(self) -> pulumi.Output[Sequence[str]]: + """ + Name of each exporter subject. + """ + return pulumi.get(self, "subjects") + diff --git a/upstream b/upstream index 0fbb2353..0b05ee13 160000 --- a/upstream +++ b/upstream @@ -1 +1 @@ -Subproject commit 0fbb2353c6d75dcbf6a84764d8cce350469bfcc5 +Subproject commit 0b05ee133153e9753abc01dfa392fa8f3116adac