diff --git a/provider/cmd/pulumi-resource-confluentcloud/bridge-metadata.json b/provider/cmd/pulumi-resource-confluentcloud/bridge-metadata.json
index b787ba91..70642ff2 100644
--- a/provider/cmd/pulumi-resource-confluentcloud/bridge-metadata.json
+++ b/provider/cmd/pulumi-resource-confluentcloud/bridge-metadata.json
@@ -195,6 +195,14 @@
"resource_name": {}
}
},
+ "confluent_flink_compute_pool": {
+ "current": "confluentcloud:index/flinkComputePool:FlinkComputePool",
+ "fields": {
+ "environment": {
+ "maxItemsOne": true
+ }
+ }
+ },
"confluent_identity_pool": {
"current": "confluentcloud:index/identityPool:IdentityPool",
"fields": {
@@ -849,6 +857,30 @@
"version": {}
}
},
+ "confluent_schema_exporter": {
+ "current": "confluentcloud:index/schemaExporter:SchemaExporter",
+ "fields": {
+ "credentials": {
+ "maxItemsOne": true
+ },
+ "destination_schema_registry_cluster": {
+ "maxItemsOne": true,
+ "elem": {
+ "fields": {
+ "credentials": {
+ "maxItemsOne": true
+ }
+ }
+ }
+ },
+ "schema_registry_cluster": {
+ "maxItemsOne": true
+ },
+ "subjects": {
+ "maxItemsOne": false
+ }
+ }
+ },
"confluent_schema_registry_cluster": {
"current": "confluentcloud:index/schemaRegistryCluster:SchemaRegistryCluster",
"fields": {
@@ -1109,6 +1141,14 @@
}
}
},
+ "confluent_flink_compute_pool": {
+ "current": "confluentcloud:index/getFlinkComputePool:getFlinkComputePool",
+ "fields": {
+ "environment": {
+ "maxItemsOne": true
+ }
+ }
+ },
"confluent_identity_pool": {
"current": "confluentcloud:index/getIdentityPool:getIdentityPool",
"fields": {
@@ -1548,6 +1588,7 @@
"confluentcloud:index/clusterLink:ClusterLink": "confluent_cluster_link",
"confluentcloud:index/connector:Connector": "confluent_connector",
"confluentcloud:index/environment:Environment": "confluent_environment",
+ "confluentcloud:index/flinkComputePool:FlinkComputePool": "confluent_flink_compute_pool",
"confluentcloud:index/identityPool:IdentityPool": "confluent_identity_pool",
"confluentcloud:index/identityProvider:IdentityProvider": "confluent_identity_provider",
"confluentcloud:index/invitation:Invitation": "confluent_invitation",
@@ -1567,6 +1608,7 @@
"confluentcloud:index/privateLinkAttachmentConnection:PrivateLinkAttachmentConnection": "confluent_private_link_attachment_connection",
"confluentcloud:index/roleBinding:RoleBinding": "confluent_role_binding",
"confluentcloud:index/schema:Schema": "confluent_schema",
+ "confluentcloud:index/schemaExporter:SchemaExporter": "confluent_schema_exporter",
"confluentcloud:index/schemaRegistryCluster:SchemaRegistryCluster": "confluent_schema_registry_cluster",
"confluentcloud:index/schemaRegistryClusterConfig:SchemaRegistryClusterConfig": "confluent_schema_registry_cluster_config",
"confluentcloud:index/schemaRegistryClusterMode:SchemaRegistryClusterMode": "confluent_schema_registry_cluster_mode",
@@ -1584,6 +1626,7 @@
"confluentcloud:index/getByokKey:getByokKey": "confluent_byok_key",
"confluentcloud:index/getEnvironment:getEnvironment": "confluent_environment",
"confluentcloud:index/getEnvironments:getEnvironments": "confluent_environments",
+ "confluentcloud:index/getFlinkComputePool:getFlinkComputePool": "confluent_flink_compute_pool",
"confluentcloud:index/getIdentityPool:getIdentityPool": "confluent_identity_pool",
"confluentcloud:index/getIdentityProvider:getIdentityProvider": "confluent_identity_provider",
"confluentcloud:index/getInvitation:getInvitation": "confluent_invitation",
@@ -1710,6 +1753,9 @@
"confluentcloud:index/PrivateLinkAttachmentGcp:PrivateLinkAttachmentGcp": {
"privateServiceConnectServiceAttachment": "private_service_connect_service_attachment"
},
+ "confluentcloud:index/SchemaExporterDestinationSchemaRegistryCluster:SchemaExporterDestinationSchemaRegistryCluster": {
+ "restEndpoint": "rest_endpoint"
+ },
"confluentcloud:index/SchemaSchemaReference:SchemaSchemaReference": {
"subjectName": "subject_name"
},
@@ -1753,6 +1799,14 @@
"displayName": "display_name",
"resourceName": "resource_name"
},
+ "confluentcloud:index/flinkComputePool:FlinkComputePool": {
+ "apiVersion": "api_version",
+ "currentCfu": "current_cfu",
+ "displayName": "display_name",
+ "maxCfu": "max_cfu",
+ "resourceName": "resource_name",
+ "restEndpoint": "rest_endpoint"
+ },
"confluentcloud:index/getBusinessMetadata:getBusinessMetadata": {
"attributeDefinitions": "attribute_definition",
"restEndpoint": "rest_endpoint",
@@ -1785,6 +1839,14 @@
"displayName": "display_name",
"resourceName": "resource_name"
},
+ "confluentcloud:index/getFlinkComputePool:getFlinkComputePool": {
+ "apiVersion": "api_version",
+ "currentCfu": "current_cfu",
+ "displayName": "display_name",
+ "maxCfu": "max_cfu",
+ "resourceName": "resource_name",
+ "restEndpoint": "rest_endpoint"
+ },
"confluentcloud:index/getIdentityPool:getIdentityPool": {
"displayName": "display_name",
"identityClaim": "identity_claim",
@@ -2146,6 +2208,14 @@
"schemaRegistryCluster": "schema_registry_cluster",
"subjectName": "subject_name"
},
+ "confluentcloud:index/schemaExporter:SchemaExporter": {
+ "contextType": "context_type",
+ "destinationSchemaRegistryCluster": "destination_schema_registry_cluster",
+ "resetOnUpdate": "reset_on_update",
+ "restEndpoint": "rest_endpoint",
+ "schemaRegistryCluster": "schema_registry_cluster",
+ "subjectRenameFormat": "subject_rename_format"
+ },
"confluentcloud:index/schemaRegistryCluster:SchemaRegistryCluster": {
"apiVersion": "api_version",
"displayName": "display_name",
diff --git a/provider/cmd/pulumi-resource-confluentcloud/schema.json b/provider/cmd/pulumi-resource-confluentcloud/schema.json
index 16a4cb10..98341e5c 100644
--- a/provider/cmd/pulumi-resource-confluentcloud/schema.json
+++ b/provider/cmd/pulumi-resource-confluentcloud/schema.json
@@ -392,7 +392,7 @@
},
"secret": {
"type": "string",
- "description": "The Kafka API Secret.\n\n\u003e **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).\n",
+ "description": "The Kafka API Secret.\n",
"secret": true
}
},
@@ -438,7 +438,7 @@
},
"secret": {
"type": "string",
- "description": "The Kafka API Secret.\n\n\u003e **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).\n",
+ "description": "The Kafka API Secret.\n",
"secret": true
}
},
@@ -484,7 +484,7 @@
},
"secret": {
"type": "string",
- "description": "The Kafka API Secret.\n\n\u003e **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).\n",
+ "description": "The Kafka API Secret.\n",
"secret": true
}
},
@@ -530,7 +530,7 @@
},
"secret": {
"type": "string",
- "description": "The Kafka API Secret.\n\n\u003e **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).\n",
+ "description": "The Kafka API Secret.\n",
"secret": true
}
},
@@ -566,6 +566,19 @@
"id"
]
},
+ "confluentcloud:index/FlinkComputePoolEnvironment:FlinkComputePoolEnvironment": {
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`.\n",
+ "willReplaceOnChanges": true
+ }
+ },
+ "type": "object",
+ "required": [
+ "id"
+ ]
+ },
"confluentcloud:index/IdentityPoolIdentityProvider:IdentityPoolIdentityProvider": {
"properties": {
"id": {
@@ -1472,6 +1485,74 @@
"secret"
]
},
+ "confluentcloud:index/SchemaExporterCredentials:SchemaExporterCredentials": {
+ "properties": {
+ "key": {
+ "type": "string",
+ "description": "The Schema Registry API Key.\n",
+ "secret": true
+ },
+ "secret": {
+ "type": "string",
+ "description": "The Schema Registry API Secret.\n",
+ "secret": true
+ }
+ },
+ "type": "object",
+ "required": [
+ "key",
+ "secret"
+ ]
+ },
+ "confluentcloud:index/SchemaExporterDestinationSchemaRegistryCluster:SchemaExporterDestinationSchemaRegistryCluster": {
+ "properties": {
+ "credentials": {
+ "$ref": "#/types/confluentcloud:index/SchemaExporterDestinationSchemaRegistryClusterCredentials:SchemaExporterDestinationSchemaRegistryClusterCredentials",
+ "secret": true
+ },
+ "restEndpoint": {
+ "type": "string",
+ "description": "The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).\n"
+ }
+ },
+ "type": "object",
+ "required": [
+ "credentials",
+ "restEndpoint"
+ ]
+ },
+ "confluentcloud:index/SchemaExporterDestinationSchemaRegistryClusterCredentials:SchemaExporterDestinationSchemaRegistryClusterCredentials": {
+ "properties": {
+ "key": {
+ "type": "string",
+ "description": "The Schema Registry API Key.\n",
+ "secret": true
+ },
+ "secret": {
+ "type": "string",
+ "description": "The Schema Registry API Secret.\n",
+ "secret": true
+ }
+ },
+ "type": "object",
+ "required": [
+ "key",
+ "secret"
+ ]
+ },
+ "confluentcloud:index/SchemaExporterSchemaRegistryCluster:SchemaExporterSchemaRegistryCluster": {
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "The ID of the Schema Registry cluster, for example, `lsrc-abc123`.\n",
+ "willReplaceOnChanges": true
+ }
+ },
+ "type": "object",
+ "required": [
+ "id"
+ ]
+ },
"confluentcloud:index/SchemaRegistryClusterConfigCredentials:SchemaRegistryClusterConfigCredentials": {
"properties": {
"key": {
@@ -1953,6 +2034,18 @@
}
}
},
+ "confluentcloud:index/getFlinkComputePoolEnvironment:getFlinkComputePoolEnvironment": {
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`.\n\n\u003e **Note:** Exactly one from the `id` and `display_name` attributes must be specified.\n"
+ }
+ },
+ "type": "object",
+ "required": [
+ "id"
+ ]
+ },
"confluentcloud:index/getIdentityPoolIdentityProvider:getIdentityPoolIdentityProvider": {
"properties": {
"id": {
@@ -4102,6 +4195,143 @@
"type": "object"
}
},
+ "confluentcloud:index/flinkComputePool:FlinkComputePool": {
+ "description": "{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as confluentcloud from \"@pulumi/confluentcloud\";\n\nconst development = new confluentcloud.Environment(\"development\", {});\nconst main = new confluentcloud.FlinkComputePool(\"main\", {\n displayName: \"standard_compute_pool\",\n cloud: \"AWS\",\n region: \"us-east-1\",\n maxCfu: 5,\n environment: {\n id: development.id,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_confluentcloud as confluentcloud\n\ndevelopment = confluentcloud.Environment(\"development\")\nmain = confluentcloud.FlinkComputePool(\"main\",\n display_name=\"standard_compute_pool\",\n cloud=\"AWS\",\n region=\"us-east-1\",\n max_cfu=5,\n environment=confluentcloud.FlinkComputePoolEnvironmentArgs(\n id=development.id,\n ))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing ConfluentCloud = Pulumi.ConfluentCloud;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var development = new ConfluentCloud.Environment(\"development\");\n\n var main = new ConfluentCloud.FlinkComputePool(\"main\", new()\n {\n DisplayName = \"standard_compute_pool\",\n Cloud = \"AWS\",\n Region = \"us-east-1\",\n MaxCfu = 5,\n Environment = new ConfluentCloud.Inputs.FlinkComputePoolEnvironmentArgs\n {\n Id = development.Id,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tdevelopment, err := confluentcloud.NewEnvironment(ctx, \"development\", nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = confluentcloud.NewFlinkComputePool(ctx, \"main\", \u0026confluentcloud.FlinkComputePoolArgs{\n\t\t\tDisplayName: pulumi.String(\"standard_compute_pool\"),\n\t\t\tCloud: pulumi.String(\"AWS\"),\n\t\t\tRegion: pulumi.String(\"us-east-1\"),\n\t\t\tMaxCfu: pulumi.Int(5),\n\t\t\tEnvironment: \u0026confluentcloud.FlinkComputePoolEnvironmentArgs{\n\t\t\t\tId: development.ID(),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.confluentcloud.Environment;\nimport com.pulumi.confluentcloud.FlinkComputePool;\nimport com.pulumi.confluentcloud.FlinkComputePoolArgs;\nimport com.pulumi.confluentcloud.inputs.FlinkComputePoolEnvironmentArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var development = new Environment(\"development\");\n\n var main = new FlinkComputePool(\"main\", FlinkComputePoolArgs.builder() \n .displayName(\"standard_compute_pool\")\n .cloud(\"AWS\")\n .region(\"us-east-1\")\n .maxCfu(5)\n .environment(FlinkComputePoolEnvironmentArgs.builder()\n .id(development.id())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n development:\n type: confluentcloud:Environment\n main:\n type: confluentcloud:FlinkComputePool\n properties:\n displayName: standard_compute_pool\n cloud: AWS\n region: us-east-1\n maxCfu: 5\n environment:\n id: ${development.id}\n```\n{{% /example %}}\n{{% /examples %}}\n\n## Import\n\nYou can import a Flink Compute Pool by using Environment ID and Flink Compute Pool ID, in the format `\u003cEnvironment ID\u003e/\u003cFlink Compute Pool ID\u003e`. The following example shows how to import a Flink Compute Pool$ export CONFLUENT_CLOUD_API_KEY=\"\u003ccloud_api_key\u003e\" $ export CONFLUENT_CLOUD_API_SECRET=\"\u003ccloud_api_secret\u003e\"\n\n```sh\n $ pulumi import confluentcloud:index/flinkComputePool:FlinkComputePool main env-abc123/lfcp-abc123\n```\n\n !\u003e **Warning:** Do not forget to delete terminal command history afterwards for security purposes. ",
+ "properties": {
+ "apiVersion": {
+ "type": "string",
+ "description": "(Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`.\n"
+ },
+ "cloud": {
+ "type": "string",
+ "description": "The cloud service provider that runs the Flink Compute Pool.\n"
+ },
+ "currentCfu": {
+ "type": "integer",
+ "description": "(Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool.\n"
+ },
+ "displayName": {
+ "type": "string",
+ "description": "The name of the Flink Compute Pool.\n"
+ },
+ "environment": {
+ "$ref": "#/types/confluentcloud:index/FlinkComputePoolEnvironment:FlinkComputePoolEnvironment",
+ "description": "Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.\n"
+ },
+ "kind": {
+ "type": "string",
+ "description": "(Required String) The kind of the Flink Compute Pool, for example, `ComputePool`.\n"
+ },
+ "maxCfu": {
+ "type": "integer",
+ "description": "Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`.\n"
+ },
+ "region": {
+ "type": "string",
+ "description": "The cloud service provider region that hosts the Flink Compute Pool.\n"
+ },
+ "resourceName": {
+ "type": "string",
+ "description": "(Required String) The Confluent Resource Name of the Flink Compute Pool.\n"
+ },
+ "restEndpoint": {
+ "type": "string",
+ "description": "(Required String) The API endpoint of the Flink Compute Pool.\n"
+ }
+ },
+ "required": [
+ "apiVersion",
+ "cloud",
+ "currentCfu",
+ "displayName",
+ "environment",
+ "kind",
+ "maxCfu",
+ "region",
+ "resourceName",
+ "restEndpoint"
+ ],
+ "inputProperties": {
+ "cloud": {
+ "type": "string",
+ "description": "The cloud service provider that runs the Flink Compute Pool.\n",
+ "willReplaceOnChanges": true
+ },
+ "displayName": {
+ "type": "string",
+ "description": "The name of the Flink Compute Pool.\n"
+ },
+ "environment": {
+ "$ref": "#/types/confluentcloud:index/FlinkComputePoolEnvironment:FlinkComputePoolEnvironment",
+ "description": "Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.\n",
+ "willReplaceOnChanges": true
+ },
+ "maxCfu": {
+ "type": "integer",
+ "description": "Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`.\n"
+ },
+ "region": {
+ "type": "string",
+ "description": "The cloud service provider region that hosts the Flink Compute Pool.\n",
+ "willReplaceOnChanges": true
+ }
+ },
+ "requiredInputs": [
+ "cloud",
+ "displayName",
+ "environment",
+ "region"
+ ],
+ "stateInputs": {
+ "description": "Input properties used for looking up and filtering FlinkComputePool resources.\n",
+ "properties": {
+ "apiVersion": {
+ "type": "string",
+ "description": "(Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`.\n"
+ },
+ "cloud": {
+ "type": "string",
+ "description": "The cloud service provider that runs the Flink Compute Pool.\n",
+ "willReplaceOnChanges": true
+ },
+ "currentCfu": {
+ "type": "integer",
+ "description": "(Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool.\n"
+ },
+ "displayName": {
+ "type": "string",
+ "description": "The name of the Flink Compute Pool.\n"
+ },
+ "environment": {
+ "$ref": "#/types/confluentcloud:index/FlinkComputePoolEnvironment:FlinkComputePoolEnvironment",
+ "description": "Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.\n",
+ "willReplaceOnChanges": true
+ },
+ "kind": {
+ "type": "string",
+ "description": "(Required String) The kind of the Flink Compute Pool, for example, `ComputePool`.\n"
+ },
+ "maxCfu": {
+ "type": "integer",
+ "description": "Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`.\n"
+ },
+ "region": {
+ "type": "string",
+ "description": "The cloud service provider region that hosts the Flink Compute Pool.\n",
+ "willReplaceOnChanges": true
+ },
+ "resourceName": {
+ "type": "string",
+ "description": "(Required String) The Confluent Resource Name of the Flink Compute Pool.\n"
+ },
+ "restEndpoint": {
+ "type": "string",
+ "description": "(Required String) The API endpoint of the Flink Compute Pool.\n"
+ }
+ },
+ "type": "object"
+ }
+ },
"confluentcloud:index/identityPool:IdentityPool": {
"description": "[![General Availability](https://img.shields.io/badge/Lifecycle%20Stage-General%20Availability-%2345c6e8)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy)\n\n`confluentcloud.IdentityPool` provides an Identity Pool resource that enables creating, editing, and deleting identity pools on Confluent Cloud.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n### Example Identity Pool to be used with Azure AD\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as confluentcloud from \"@pulumi/confluentcloud\";\n\nconst azure = new confluentcloud.IdentityProvider(\"azure\", {\n displayName: \"My OIDC Provider: Azure AD\",\n description: \"My description\",\n issuer: \"https://login.microsoftonline.com/{tenant_id}/v2.0\",\n jwksUri: \"https://login.microsoftonline.com/common/discovery/v2.0/keys\",\n});\nconst example = new confluentcloud.IdentityPool(\"example\", {\n identityProvider: {\n id: azure.id,\n },\n displayName: \"My Identity Pool\",\n description: \"Prod Access to Kafka clusters to Release Engineering\",\n identityClaim: \"claims.sub\",\n filter: \"claims.aud==\\\"confluent\\\" \u0026\u0026 claims.group!=\\\"invalid_group\\\"\",\n});\n```\n```python\nimport pulumi\nimport pulumi_confluentcloud as confluentcloud\n\nazure = confluentcloud.IdentityProvider(\"azure\",\n display_name=\"My OIDC Provider: Azure AD\",\n description=\"My description\",\n issuer=\"https://login.microsoftonline.com/{tenant_id}/v2.0\",\n jwks_uri=\"https://login.microsoftonline.com/common/discovery/v2.0/keys\")\nexample = confluentcloud.IdentityPool(\"example\",\n identity_provider=confluentcloud.IdentityPoolIdentityProviderArgs(\n id=azure.id,\n ),\n display_name=\"My Identity Pool\",\n description=\"Prod Access to Kafka clusters to Release Engineering\",\n identity_claim=\"claims.sub\",\n filter=\"claims.aud==\\\"confluent\\\" \u0026\u0026 claims.group!=\\\"invalid_group\\\"\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing ConfluentCloud = Pulumi.ConfluentCloud;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var azure = new ConfluentCloud.IdentityProvider(\"azure\", new()\n {\n DisplayName = \"My OIDC Provider: Azure AD\",\n Description = \"My description\",\n Issuer = \"https://login.microsoftonline.com/{tenant_id}/v2.0\",\n JwksUri = \"https://login.microsoftonline.com/common/discovery/v2.0/keys\",\n });\n\n var example = new ConfluentCloud.IdentityPool(\"example\", new()\n {\n IdentityProvider = new ConfluentCloud.Inputs.IdentityPoolIdentityProviderArgs\n {\n Id = azure.Id,\n },\n DisplayName = \"My Identity Pool\",\n Description = \"Prod Access to Kafka clusters to Release Engineering\",\n IdentityClaim = \"claims.sub\",\n Filter = \"claims.aud==\\\"confluent\\\" \u0026\u0026 claims.group!=\\\"invalid_group\\\"\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tazure, err := confluentcloud.NewIdentityProvider(ctx, \"azure\", \u0026confluentcloud.IdentityProviderArgs{\n\t\t\tDisplayName: pulumi.String(\"My OIDC Provider: Azure AD\"),\n\t\t\tDescription: pulumi.String(\"My description\"),\n\t\t\tIssuer: pulumi.String(\"https://login.microsoftonline.com/{tenant_id}/v2.0\"),\n\t\t\tJwksUri: pulumi.String(\"https://login.microsoftonline.com/common/discovery/v2.0/keys\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = confluentcloud.NewIdentityPool(ctx, \"example\", \u0026confluentcloud.IdentityPoolArgs{\n\t\t\tIdentityProvider: \u0026confluentcloud.IdentityPoolIdentityProviderArgs{\n\t\t\t\tId: azure.ID(),\n\t\t\t},\n\t\t\tDisplayName: pulumi.String(\"My Identity Pool\"),\n\t\t\tDescription: pulumi.String(\"Prod Access to Kafka clusters to Release Engineering\"),\n\t\t\tIdentityClaim: pulumi.String(\"claims.sub\"),\n\t\t\tFilter: pulumi.String(\"claims.aud==\\\"confluent\\\" \u0026\u0026 claims.group!=\\\"invalid_group\\\"\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.confluentcloud.IdentityProvider;\nimport com.pulumi.confluentcloud.IdentityProviderArgs;\nimport com.pulumi.confluentcloud.IdentityPool;\nimport com.pulumi.confluentcloud.IdentityPoolArgs;\nimport com.pulumi.confluentcloud.inputs.IdentityPoolIdentityProviderArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var azure = new IdentityProvider(\"azure\", IdentityProviderArgs.builder() \n .displayName(\"My OIDC Provider: Azure AD\")\n .description(\"My description\")\n .issuer(\"https://login.microsoftonline.com/{tenant_id}/v2.0\")\n .jwksUri(\"https://login.microsoftonline.com/common/discovery/v2.0/keys\")\n .build());\n\n var example = new IdentityPool(\"example\", IdentityPoolArgs.builder() \n .identityProvider(IdentityPoolIdentityProviderArgs.builder()\n .id(azure.id())\n .build())\n .displayName(\"My Identity Pool\")\n .description(\"Prod Access to Kafka clusters to Release Engineering\")\n .identityClaim(\"claims.sub\")\n .filter(\"claims.aud==\\\"confluent\\\" \u0026\u0026 claims.group!=\\\"invalid_group\\\"\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n azure:\n type: confluentcloud:IdentityProvider\n properties:\n displayName: 'My OIDC Provider: Azure AD'\n description: My description\n issuer: https://login.microsoftonline.com/{tenant_id}/v2.0\n jwksUri: https://login.microsoftonline.com/common/discovery/v2.0/keys\n example:\n type: confluentcloud:IdentityPool\n properties:\n identityProvider:\n id: ${azure.id}\n displayName: My Identity Pool\n description: Prod Access to Kafka clusters to Release Engineering\n identityClaim: claims.sub\n filter: claims.aud==\"confluent\" \u0026\u0026 claims.group!=\"invalid_group\"\n```\n{{% /example %}}\n{{% example %}}\n### Example Identity Pool to be used with Okta\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as confluentcloud from \"@pulumi/confluentcloud\";\n\nconst okta = new confluentcloud.IdentityProvider(\"okta\", {\n displayName: \"My OIDC Provider: Okta\",\n description: \"My description\",\n issuer: \"https://mycompany.okta.com/oauth2/default\",\n jwksUri: \"https://mycompany.okta.com/oauth2/default/v1/keys\",\n});\nconst example = new confluentcloud.IdentityPool(\"example\", {\n identityProvider: {\n id: okta.id,\n },\n displayName: \"My Identity Pool\",\n description: \"Prod Access to Kafka clusters to Release Engineering\",\n identityClaim: \"claims.sub\",\n filter: \"claims.aud==\\\"confluent\\\" \u0026\u0026 claims.group!=\\\"invalid_group\\\"\",\n});\n```\n```python\nimport pulumi\nimport pulumi_confluentcloud as confluentcloud\n\nokta = confluentcloud.IdentityProvider(\"okta\",\n display_name=\"My OIDC Provider: Okta\",\n description=\"My description\",\n issuer=\"https://mycompany.okta.com/oauth2/default\",\n jwks_uri=\"https://mycompany.okta.com/oauth2/default/v1/keys\")\nexample = confluentcloud.IdentityPool(\"example\",\n identity_provider=confluentcloud.IdentityPoolIdentityProviderArgs(\n id=okta.id,\n ),\n display_name=\"My Identity Pool\",\n description=\"Prod Access to Kafka clusters to Release Engineering\",\n identity_claim=\"claims.sub\",\n filter=\"claims.aud==\\\"confluent\\\" \u0026\u0026 claims.group!=\\\"invalid_group\\\"\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing ConfluentCloud = Pulumi.ConfluentCloud;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var okta = new ConfluentCloud.IdentityProvider(\"okta\", new()\n {\n DisplayName = \"My OIDC Provider: Okta\",\n Description = \"My description\",\n Issuer = \"https://mycompany.okta.com/oauth2/default\",\n JwksUri = \"https://mycompany.okta.com/oauth2/default/v1/keys\",\n });\n\n var example = new ConfluentCloud.IdentityPool(\"example\", new()\n {\n IdentityProvider = new ConfluentCloud.Inputs.IdentityPoolIdentityProviderArgs\n {\n Id = okta.Id,\n },\n DisplayName = \"My Identity Pool\",\n Description = \"Prod Access to Kafka clusters to Release Engineering\",\n IdentityClaim = \"claims.sub\",\n Filter = \"claims.aud==\\\"confluent\\\" \u0026\u0026 claims.group!=\\\"invalid_group\\\"\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tokta, err := confluentcloud.NewIdentityProvider(ctx, \"okta\", \u0026confluentcloud.IdentityProviderArgs{\n\t\t\tDisplayName: pulumi.String(\"My OIDC Provider: Okta\"),\n\t\t\tDescription: pulumi.String(\"My description\"),\n\t\t\tIssuer: pulumi.String(\"https://mycompany.okta.com/oauth2/default\"),\n\t\t\tJwksUri: pulumi.String(\"https://mycompany.okta.com/oauth2/default/v1/keys\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = confluentcloud.NewIdentityPool(ctx, \"example\", \u0026confluentcloud.IdentityPoolArgs{\n\t\t\tIdentityProvider: \u0026confluentcloud.IdentityPoolIdentityProviderArgs{\n\t\t\t\tId: okta.ID(),\n\t\t\t},\n\t\t\tDisplayName: pulumi.String(\"My Identity Pool\"),\n\t\t\tDescription: pulumi.String(\"Prod Access to Kafka clusters to Release Engineering\"),\n\t\t\tIdentityClaim: pulumi.String(\"claims.sub\"),\n\t\t\tFilter: pulumi.String(\"claims.aud==\\\"confluent\\\" \u0026\u0026 claims.group!=\\\"invalid_group\\\"\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.confluentcloud.IdentityProvider;\nimport com.pulumi.confluentcloud.IdentityProviderArgs;\nimport com.pulumi.confluentcloud.IdentityPool;\nimport com.pulumi.confluentcloud.IdentityPoolArgs;\nimport com.pulumi.confluentcloud.inputs.IdentityPoolIdentityProviderArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var okta = new IdentityProvider(\"okta\", IdentityProviderArgs.builder() \n .displayName(\"My OIDC Provider: Okta\")\n .description(\"My description\")\n .issuer(\"https://mycompany.okta.com/oauth2/default\")\n .jwksUri(\"https://mycompany.okta.com/oauth2/default/v1/keys\")\n .build());\n\n var example = new IdentityPool(\"example\", IdentityPoolArgs.builder() \n .identityProvider(IdentityPoolIdentityProviderArgs.builder()\n .id(okta.id())\n .build())\n .displayName(\"My Identity Pool\")\n .description(\"Prod Access to Kafka clusters to Release Engineering\")\n .identityClaim(\"claims.sub\")\n .filter(\"claims.aud==\\\"confluent\\\" \u0026\u0026 claims.group!=\\\"invalid_group\\\"\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n okta:\n type: confluentcloud:IdentityProvider\n properties:\n displayName: 'My OIDC Provider: Okta'\n description: My description\n issuer: https://mycompany.okta.com/oauth2/default\n jwksUri: https://mycompany.okta.com/oauth2/default/v1/keys\n example:\n type: confluentcloud:IdentityPool\n properties:\n identityProvider:\n id: ${okta.id}\n displayName: My Identity Pool\n description: Prod Access to Kafka clusters to Release Engineering\n identityClaim: claims.sub\n filter: claims.aud==\"confluent\" \u0026\u0026 claims.group!=\"invalid_group\"\n```\n\n{{% /example %}}\n{{% /examples %}}\n## External Documentation\n\n* [Use identity pools with your OAuth provider](https://docs.confluent.io/cloud/current/access-management/authenticate/oauth/identity-pools.html).\n\n\n## Import\n\nYou can import an Identity Pool by using Identity Provider ID and Identity Pool ID, in the format `\u003cIdentity Provider ID\u003e/\u003cIdentity Pool ID\u003e`. The following example shows how to import an Identity Pool$ export CONFLUENT_CLOUD_API_KEY=\"\u003ccloud_api_key\u003e\" $ export CONFLUENT_CLOUD_API_SECRET=\"\u003ccloud_api_secret\u003e\"\n\n```sh\n $ pulumi import confluentcloud:index/identityPool:IdentityPool example op-abc123/pool-xyz456\n```\n\n !\u003e **Warning:** Do not forget to delete terminal command history afterwards for security purposes. ",
"properties": {
@@ -4372,7 +4602,7 @@
}
},
"confluentcloud:index/kafkaAcl:KafkaAcl": {
- "description": "\n\n\n## Import\n\nYou can import Kafka ACLs by using the Kafka cluster ID and attributes of `confluent_kafka_acl` resource in the format `\u003cKafka cluster ID\u003e/\u003cKafka ACL resource type\u003e#\u003cKafka ACL resource name\u003e#\u003cKafka ACL pattern type\u003e#\u003cKafka ACL principal\u003e#\u003cKafka ACL host\u003e#\u003cKafka ACL operation\u003e#\u003cKafka ACL permission\u003e`, for exampleOption #1Manage multiple Kafka clusters in the same Terraform workspace $ export CONFLUENT_CLOUD_API_KEY=\"\u003ccloud_api_key\u003e\" $ export CONFLUENT_CLOUD_API_SECRET=\"\u003ccloud_api_secret\u003e\" $ export IMPORT_KAFKA_API_KEY=\"\u003ckafka_api_key\u003e\" $ export IMPORT_KAFKA_API_SECRET=\"\u003ckafka_api_secret\u003e\" $ export IMPORT_KAFKA_REST_ENDPOINT=\"\u003ckafka_rest_endpoint\u003e\"\n\n```sh\n $ pulumi import confluentcloud:index/kafkaAcl:KafkaAcl describe-cluster \"lkc-12345/CLUSTER#kafka-cluster#LITERAL#User:sa-xyz123#*#DESCRIBE#ALLOW\"\n```\n\n Option #2Manage a single Kafka cluster in the same Terraform workspace $ export CONFLUENT_CLOUD_API_KEY=\"\u003ccloud_api_key\u003e\" $ export CONFLUENT_CLOUD_API_SECRET=\"\u003ccloud_api_secret\u003e\"\n\n```sh\n $ pulumi import confluentcloud:index/kafkaAcl:KafkaAcl describe-cluster \"lkc-12345/CLUSTER#kafka-cluster#LITERAL#User:sa-xyz123#*#DESCRIBE#ALLOW\"\n```\n\n !\u003e **Warning:** Do not forget to delete terminal command history afterwards for security purposes. ",
+ "description": "\n\n\n## Import\n\nYou can import Kafka ACLs by using the Kafka cluster ID and attributes of `confluent_kafka_acl` resource in the format `\u003cKafka cluster ID\u003e/\u003cKafka ACL resource type\u003e#\u003cKafka ACL resource name\u003e#\u003cKafka ACL pattern type\u003e#\u003cKafka ACL principal\u003e#\u003cKafka ACL host\u003e#\u003cKafka ACL operation\u003e#\u003cKafka ACL permission\u003e`, for exampleOption #1Manage multiple Kafka clusters in the same Terraform workspace $ export IMPORT_KAFKA_API_KEY=\"\u003ckafka_api_key\u003e\" $ export IMPORT_KAFKA_API_SECRET=\"\u003ckafka_api_secret\u003e\" $ export IMPORT_KAFKA_REST_ENDPOINT=\"\u003ckafka_rest_endpoint\u003e\"\n\n```sh\n $ pulumi import confluentcloud:index/kafkaAcl:KafkaAcl describe-cluster \"lkc-12345/CLUSTER#kafka-cluster#LITERAL#User:sa-xyz123#*#DESCRIBE#ALLOW\"\n```\n\n Option #2Manage a single Kafka cluster in the same Terraform workspace $ export CONFLUENT_CLOUD_API_KEY=\"\u003ccloud_api_key\u003e\" $ export CONFLUENT_CLOUD_API_SECRET=\"\u003ccloud_api_secret\u003e\"\n\n```sh\n $ pulumi import confluentcloud:index/kafkaAcl:KafkaAcl describe-cluster \"lkc-12345/CLUSTER#kafka-cluster#LITERAL#User:sa-xyz123#*#DESCRIBE#ALLOW\"\n```\n\n !\u003e **Warning:** Do not forget to delete terminal command history afterwards for security purposes. ",
"properties": {
"credentials": {
"$ref": "#/types/confluentcloud:index/KafkaAclCredentials:KafkaAclCredentials",
@@ -6468,6 +6698,197 @@
"type": "object"
}
},
+ "confluentcloud:index/schemaExporter:SchemaExporter": {
+ "description": "\n\n\n## Import\n\nYou can import a Schema Exporter by using the Schema Registry cluster ID, Schema Exporter name in the format `\u003cSchema Registry cluster ID\u003e/\u003cSchema Exporter name\u003e`, for example$ export IMPORT_SCHEMA_REGISTRY_API_KEY=\"\u003cschema_registry_api_key\u003e\" $ export IMPORT_SCHEMA_REGISTRY_API_SECRET=\"\u003cschema_registry_api_secret\u003e\" $ export IMPORT_SCHEMA_REGISTRY_REST_ENDPOINT=\"\u003cschema_registry_rest_endpoint\u003e\"\n\n```sh\n $ pulumi import confluentcloud:index/schemaExporter:SchemaExporter main lsrc-8wrx70/test-exporter\n```\n\n !\u003e **Warning:** Do not forget to delete terminal command history afterwards for security purposes. ",
+ "properties": {
+ "config": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ },
+ "description": "Block for custom *nonsensitive* configuration properties:\n"
+ },
+ "context": {
+ "type": "string",
+ "description": "Customized context of the exporter if `context_type` is set to `CUSTOM`.\n"
+ },
+ "contextType": {
+ "type": "string",
+ "description": "Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`.\n"
+ },
+ "credentials": {
+ "$ref": "#/types/confluentcloud:index/SchemaExporterCredentials:SchemaExporterCredentials",
+ "description": "The Cluster API Credentials.\n",
+ "secret": true
+ },
+ "destinationSchemaRegistryCluster": {
+ "$ref": "#/types/confluentcloud:index/SchemaExporterDestinationSchemaRegistryCluster:SchemaExporterDestinationSchemaRegistryCluster"
+ },
+ "name": {
+ "type": "string",
+ "description": "The configuration setting name.\n"
+ },
+ "resetOnUpdate": {
+ "type": "boolean",
+ "description": "The flag to control whether to reset the exporter when updating configs. Defaults to `false`.\n"
+ },
+ "restEndpoint": {
+ "type": "string",
+ "description": "The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).\n"
+ },
+ "schemaRegistryCluster": {
+ "$ref": "#/types/confluentcloud:index/SchemaExporterSchemaRegistryCluster:SchemaExporterSchemaRegistryCluster"
+ },
+ "status": {
+ "type": "string",
+ "description": "The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`.\n"
+ },
+ "subjectRenameFormat": {
+ "type": "string",
+ "description": "Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`.\n"
+ },
+ "subjects": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "description": "Name of each exporter subject.\n"
+ }
+ },
+ "required": [
+ "config",
+ "context",
+ "contextType",
+ "destinationSchemaRegistryCluster",
+ "name",
+ "status",
+ "subjectRenameFormat",
+ "subjects"
+ ],
+ "inputProperties": {
+ "config": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ },
+ "description": "Block for custom *nonsensitive* configuration properties:\n"
+ },
+ "context": {
+ "type": "string",
+ "description": "Customized context of the exporter if `context_type` is set to `CUSTOM`.\n"
+ },
+ "contextType": {
+ "type": "string",
+ "description": "Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`.\n"
+ },
+ "credentials": {
+ "$ref": "#/types/confluentcloud:index/SchemaExporterCredentials:SchemaExporterCredentials",
+ "description": "The Cluster API Credentials.\n",
+ "secret": true
+ },
+ "destinationSchemaRegistryCluster": {
+ "$ref": "#/types/confluentcloud:index/SchemaExporterDestinationSchemaRegistryCluster:SchemaExporterDestinationSchemaRegistryCluster"
+ },
+ "name": {
+ "type": "string",
+ "description": "The configuration setting name.\n",
+ "willReplaceOnChanges": true
+ },
+ "resetOnUpdate": {
+ "type": "boolean",
+ "description": "The flag to control whether to reset the exporter when updating configs. Defaults to `false`.\n"
+ },
+ "restEndpoint": {
+ "type": "string",
+ "description": "The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).\n",
+ "willReplaceOnChanges": true
+ },
+ "schemaRegistryCluster": {
+ "$ref": "#/types/confluentcloud:index/SchemaExporterSchemaRegistryCluster:SchemaExporterSchemaRegistryCluster",
+ "willReplaceOnChanges": true
+ },
+ "status": {
+ "type": "string",
+ "description": "The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`.\n"
+ },
+ "subjectRenameFormat": {
+ "type": "string",
+ "description": "Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`.\n"
+ },
+ "subjects": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "description": "Name of each exporter subject.\n"
+ }
+ },
+ "requiredInputs": [
+ "destinationSchemaRegistryCluster"
+ ],
+ "stateInputs": {
+ "description": "Input properties used for looking up and filtering SchemaExporter resources.\n",
+ "properties": {
+ "config": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ },
+ "description": "Block for custom *nonsensitive* configuration properties:\n"
+ },
+ "context": {
+ "type": "string",
+ "description": "Customized context of the exporter if `context_type` is set to `CUSTOM`.\n"
+ },
+ "contextType": {
+ "type": "string",
+ "description": "Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`.\n"
+ },
+ "credentials": {
+ "$ref": "#/types/confluentcloud:index/SchemaExporterCredentials:SchemaExporterCredentials",
+ "description": "The Cluster API Credentials.\n",
+ "secret": true
+ },
+ "destinationSchemaRegistryCluster": {
+ "$ref": "#/types/confluentcloud:index/SchemaExporterDestinationSchemaRegistryCluster:SchemaExporterDestinationSchemaRegistryCluster"
+ },
+ "name": {
+ "type": "string",
+ "description": "The configuration setting name.\n",
+ "willReplaceOnChanges": true
+ },
+ "resetOnUpdate": {
+ "type": "boolean",
+ "description": "The flag to control whether to reset the exporter when updating configs. Defaults to `false`.\n"
+ },
+ "restEndpoint": {
+ "type": "string",
+ "description": "The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).\n",
+ "willReplaceOnChanges": true
+ },
+ "schemaRegistryCluster": {
+ "$ref": "#/types/confluentcloud:index/SchemaExporterSchemaRegistryCluster:SchemaExporterSchemaRegistryCluster",
+ "willReplaceOnChanges": true
+ },
+ "status": {
+ "type": "string",
+ "description": "The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`.\n"
+ },
+ "subjectRenameFormat": {
+ "type": "string",
+ "description": "Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`.\n"
+ },
+ "subjects": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "description": "Name of each exporter subject.\n"
+ }
+ },
+ "type": "object"
+ }
+ },
"confluentcloud:index/schemaRegistryCluster:SchemaRegistryCluster": {
"description": "{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as confluentcloud from \"@pulumi/confluentcloud\";\n\nconst development = new confluentcloud.Environment(\"development\", {});\nconst example = confluentcloud.getSchemaRegistryRegion({\n cloud: \"AWS\",\n region: \"us-east-2\",\n \"package\": \"ESSENTIALS\",\n});\nconst essentials = new confluentcloud.SchemaRegistryCluster(\"essentials\", {\n \"package\": example.then(example =\u003e example[\"package\"]),\n environment: {\n id: development.id,\n },\n region: {\n id: example.then(example =\u003e example.id),\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_confluentcloud as confluentcloud\n\ndevelopment = confluentcloud.Environment(\"development\")\nexample = confluentcloud.get_schema_registry_region(cloud=\"AWS\",\n region=\"us-east-2\",\n package=\"ESSENTIALS\")\nessentials = confluentcloud.SchemaRegistryCluster(\"essentials\",\n package=example.package,\n environment=confluentcloud.SchemaRegistryClusterEnvironmentArgs(\n id=development.id,\n ),\n region=confluentcloud.SchemaRegistryClusterRegionArgs(\n id=example.id,\n ))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing ConfluentCloud = Pulumi.ConfluentCloud;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var development = new ConfluentCloud.Environment(\"development\");\n\n var example = ConfluentCloud.GetSchemaRegistryRegion.Invoke(new()\n {\n Cloud = \"AWS\",\n Region = \"us-east-2\",\n Package = \"ESSENTIALS\",\n });\n\n var essentials = new ConfluentCloud.SchemaRegistryCluster(\"essentials\", new()\n {\n Package = example.Apply(getSchemaRegistryRegionResult =\u003e getSchemaRegistryRegionResult.Package),\n Environment = new ConfluentCloud.Inputs.SchemaRegistryClusterEnvironmentArgs\n {\n Id = development.Id,\n },\n Region = new ConfluentCloud.Inputs.SchemaRegistryClusterRegionArgs\n {\n Id = example.Apply(getSchemaRegistryRegionResult =\u003e getSchemaRegistryRegionResult.Id),\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tdevelopment, err := confluentcloud.NewEnvironment(ctx, \"development\", nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\texample, err := confluentcloud.GetSchemaRegistryRegion(ctx, \u0026confluentcloud.GetSchemaRegistryRegionArgs{\n\t\t\tCloud: \"AWS\",\n\t\t\tRegion: \"us-east-2\",\n\t\t\tPackage: \"ESSENTIALS\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = confluentcloud.NewSchemaRegistryCluster(ctx, \"essentials\", \u0026confluentcloud.SchemaRegistryClusterArgs{\n\t\t\tPackage: *pulumi.String(example.Package),\n\t\t\tEnvironment: \u0026confluentcloud.SchemaRegistryClusterEnvironmentArgs{\n\t\t\t\tId: development.ID(),\n\t\t\t},\n\t\t\tRegion: \u0026confluentcloud.SchemaRegistryClusterRegionArgs{\n\t\t\t\tId: *pulumi.String(example.Id),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.confluentcloud.Environment;\nimport com.pulumi.confluentcloud.ConfluentcloudFunctions;\nimport com.pulumi.confluentcloud.inputs.GetSchemaRegistryRegionArgs;\nimport com.pulumi.confluentcloud.SchemaRegistryCluster;\nimport com.pulumi.confluentcloud.SchemaRegistryClusterArgs;\nimport com.pulumi.confluentcloud.inputs.SchemaRegistryClusterEnvironmentArgs;\nimport com.pulumi.confluentcloud.inputs.SchemaRegistryClusterRegionArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var development = new Environment(\"development\");\n\n final var example = ConfluentcloudFunctions.getSchemaRegistryRegion(GetSchemaRegistryRegionArgs.builder()\n .cloud(\"AWS\")\n .region(\"us-east-2\")\n .package_(\"ESSENTIALS\")\n .build());\n\n var essentials = new SchemaRegistryCluster(\"essentials\", SchemaRegistryClusterArgs.builder() \n .package_(example.applyValue(getSchemaRegistryRegionResult -\u003e getSchemaRegistryRegionResult.package()))\n .environment(SchemaRegistryClusterEnvironmentArgs.builder()\n .id(development.id())\n .build())\n .region(SchemaRegistryClusterRegionArgs.builder()\n .id(example.applyValue(getSchemaRegistryRegionResult -\u003e getSchemaRegistryRegionResult.id()))\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n development:\n type: confluentcloud:Environment\n essentials:\n type: confluentcloud:SchemaRegistryCluster\n properties:\n package: ${example.package}\n environment:\n id: ${development.id}\n region:\n id: ${example.id}\nvariables:\n example:\n fn::invoke:\n Function: confluentcloud:getSchemaRegistryRegion\n Arguments:\n cloud: AWS\n region: us-east-2\n package: ESSENTIALS\n```\n{{% /example %}}\n{{% /examples %}}\n\n## Import\n\nYou can import a Schema Registry cluster by using Environment ID and Schema Registry cluster ID, in the format `\u003cEnvironment ID\u003e/\u003cSchema Registry cluster ID\u003e`, for example$ export CONFLUENT_CLOUD_API_KEY=\"\u003ccloud_api_key\u003e\" $ export CONFLUENT_CLOUD_API_SECRET=\"\u003ccloud_api_secret\u003e\"\n\n```sh\n $ pulumi import confluentcloud:index/schemaRegistryCluster:SchemaRegistryCluster example env-abc123/lsrc-abc123\n```\n\n !\u003e **Warning:** Do not forget to delete terminal command history afterwards for security purposes. ",
"properties": {
@@ -7536,6 +7957,93 @@
]
}
},
+ "confluentcloud:index/getFlinkComputePool:getFlinkComputePool": {
+ "description": "[![Early Access](https://img.shields.io/badge/Lifecycle%20Stage-Early%20Access-%2300afba)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy)\n\n\u003e **Note:** `confluentcloud.FlinkComputePool` data source is available in **Early Access** for early adopters. Early Access features are introduced to gather customer feedback. This feature should be used only for evaluation and non-production testing purposes or to provide feedback to Confluent, particularly as it becomes more widely available in follow-on editions. \n**Early Access** features are intended for evaluation use in development and testing environments only, and not for production use. The warranty, SLA, and Support Services provisions of your agreement with Confluent do not apply to Early Access features. Early Access features are considered to be a Proof of Concept as defined in the Confluent Cloud Terms of Service. Confluent may discontinue providing early access releases of the Early Access features at any time in Confluent’s sole discretion.\n\n`confluentcloud.FlinkComputePool` describes a Flink Compute Pool data source.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as confluentcloud from \"@pulumi/confluentcloud\";\n\nconst exampleUsingIdFlinkComputePool = confluentcloud.getFlinkComputePool({\n id: \"lfcp-abc123\",\n environment: {\n id: \"env-xyz456\",\n },\n});\nexport const exampleUsingId = exampleUsingIdFlinkComputePool;\nconst exampleUsingNameFlinkComputePool = confluentcloud.getFlinkComputePool({\n displayName: \"my_compute_pool\",\n environment: {\n id: \"env-xyz456\",\n },\n});\nexport const exampleUsingName = exampleUsingNameFlinkComputePool;\n```\n```python\nimport pulumi\nimport pulumi_confluentcloud as confluentcloud\n\nexample_using_id_flink_compute_pool = confluentcloud.get_flink_compute_pool(id=\"lfcp-abc123\",\n environment=confluentcloud.GetFlinkComputePoolEnvironmentArgs(\n id=\"env-xyz456\",\n ))\npulumi.export(\"exampleUsingId\", example_using_id_flink_compute_pool)\nexample_using_name_flink_compute_pool = confluentcloud.get_flink_compute_pool(display_name=\"my_compute_pool\",\n environment=confluentcloud.GetFlinkComputePoolEnvironmentArgs(\n id=\"env-xyz456\",\n ))\npulumi.export(\"exampleUsingName\", example_using_name_flink_compute_pool)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing ConfluentCloud = Pulumi.ConfluentCloud;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var exampleUsingIdFlinkComputePool = ConfluentCloud.GetFlinkComputePool.Invoke(new()\n {\n Id = \"lfcp-abc123\",\n Environment = new ConfluentCloud.Inputs.GetFlinkComputePoolEnvironmentInputArgs\n {\n Id = \"env-xyz456\",\n },\n });\n\n var exampleUsingNameFlinkComputePool = ConfluentCloud.GetFlinkComputePool.Invoke(new()\n {\n DisplayName = \"my_compute_pool\",\n Environment = new ConfluentCloud.Inputs.GetFlinkComputePoolEnvironmentInputArgs\n {\n Id = \"env-xyz456\",\n },\n });\n\n return new Dictionary\u003cstring, object?\u003e\n {\n [\"exampleUsingId\"] = exampleUsingIdFlinkComputePool,\n [\"exampleUsingName\"] = exampleUsingNameFlinkComputePool,\n };\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texampleUsingIdFlinkComputePool, err := confluentcloud.LookupFlinkComputePool(ctx, \u0026confluentcloud.LookupFlinkComputePoolArgs{\n\t\t\tId: pulumi.StringRef(\"lfcp-abc123\"),\n\t\t\tEnvironment: confluentcloud.GetFlinkComputePoolEnvironment{\n\t\t\t\tId: \"env-xyz456\",\n\t\t\t},\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.Export(\"exampleUsingId\", exampleUsingIdFlinkComputePool)\n\t\texampleUsingNameFlinkComputePool, err := confluentcloud.LookupFlinkComputePool(ctx, \u0026confluentcloud.LookupFlinkComputePoolArgs{\n\t\t\tDisplayName: pulumi.StringRef(\"my_compute_pool\"),\n\t\t\tEnvironment: confluentcloud.GetFlinkComputePoolEnvironment{\n\t\t\t\tId: \"env-xyz456\",\n\t\t\t},\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.Export(\"exampleUsingName\", exampleUsingNameFlinkComputePool)\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.confluentcloud.ConfluentcloudFunctions;\nimport com.pulumi.confluentcloud.inputs.GetFlinkComputePoolArgs;\nimport com.pulumi.confluentcloud.inputs.GetFlinkComputePoolEnvironmentArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var exampleUsingIdFlinkComputePool = ConfluentcloudFunctions.getFlinkComputePool(GetFlinkComputePoolArgs.builder()\n .id(\"lfcp-abc123\")\n .environment(GetFlinkComputePoolEnvironmentArgs.builder()\n .id(\"env-xyz456\")\n .build())\n .build());\n\n ctx.export(\"exampleUsingId\", exampleUsingIdFlinkComputePool.applyValue(getFlinkComputePoolResult -\u003e getFlinkComputePoolResult));\n final var exampleUsingNameFlinkComputePool = ConfluentcloudFunctions.getFlinkComputePool(GetFlinkComputePoolArgs.builder()\n .displayName(\"my_compute_pool\")\n .environment(GetFlinkComputePoolEnvironmentArgs.builder()\n .id(\"env-xyz456\")\n .build())\n .build());\n\n ctx.export(\"exampleUsingName\", exampleUsingNameFlinkComputePool.applyValue(getFlinkComputePoolResult -\u003e getFlinkComputePoolResult));\n }\n}\n```\n```yaml\nvariables:\n exampleUsingIdFlinkComputePool:\n fn::invoke:\n Function: confluentcloud:getFlinkComputePool\n Arguments:\n id: lfcp-abc123\n environment:\n id: env-xyz456\n exampleUsingNameFlinkComputePool:\n fn::invoke:\n Function: confluentcloud:getFlinkComputePool\n Arguments:\n displayName: my_compute_pool\n environment:\n id: env-xyz456\noutputs:\n exampleUsingId: ${exampleUsingIdFlinkComputePool}\n exampleUsingName: ${exampleUsingNameFlinkComputePool}\n```\n{{% /example %}}\n{{% /examples %}}",
+ "inputs": {
+ "description": "A collection of arguments for invoking getFlinkComputePool.\n",
+ "properties": {
+ "displayName": {
+ "type": "string",
+ "description": "A human-readable name for the Flink Compute Pool.\n"
+ },
+ "environment": {
+ "$ref": "#/types/confluentcloud:index/getFlinkComputePoolEnvironment:getFlinkComputePoolEnvironment",
+ "description": "(Required Configuration Block) supports the following:\n"
+ },
+ "id": {
+ "type": "string",
+ "description": "The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`.\n\n\u003e **Note:** Exactly one from the `id` and `display_name` attributes must be specified.\n"
+ }
+ },
+ "type": "object",
+ "required": [
+ "environment"
+ ]
+ },
+ "outputs": {
+ "description": "A collection of values returned by getFlinkComputePool.\n",
+ "properties": {
+ "apiVersion": {
+ "type": "string",
+ "description": "(Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`.\n"
+ },
+ "cloud": {
+ "type": "string",
+ "description": "(Required String) The cloud service provider that runs the Flink Compute Pool.\n"
+ },
+ "currentCfu": {
+ "type": "integer",
+ "description": "(Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool.\n"
+ },
+ "displayName": {
+ "type": "string",
+ "description": "(Required String) The name of the Flink Compute Pool.\n"
+ },
+ "environment": {
+ "$ref": "#/types/confluentcloud:index/getFlinkComputePoolEnvironment:getFlinkComputePoolEnvironment",
+ "description": "(Required Configuration Block) supports the following:\n"
+ },
+ "id": {
+ "type": "string",
+ "description": "(Required String) The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`.\n"
+ },
+ "kind": {
+ "type": "string",
+ "description": "(Required String) The kind of the Flink Compute Pool, for example, `ComputePool`.\n"
+ },
+ "maxCfu": {
+ "type": "integer",
+ "description": "(Required Integer) Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to.\n"
+ },
+ "region": {
+ "type": "string",
+ "description": "(Required String) The cloud service provider region that hosts the Flink Compute Pool.\n"
+ },
+ "resourceName": {
+ "type": "string",
+ "description": "(Required String) The Confluent Resource Name of the Flink Compute Pool.\n"
+ },
+ "restEndpoint": {
+ "type": "string",
+ "description": "(Required String) The API endpoint of the Flink Compute Pool.\n"
+ }
+ },
+ "type": "object",
+ "required": [
+ "apiVersion",
+ "cloud",
+ "currentCfu",
+ "displayName",
+ "environment",
+ "id",
+ "kind",
+ "maxCfu",
+ "region",
+ "resourceName",
+ "restEndpoint"
+ ]
+ }
+ },
"confluentcloud:index/getIdentityPool:getIdentityPool": {
"description": "[![General Availability](https://img.shields.io/badge/Lifecycle%20Stage-General%20Availability-%2345c6e8)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy)\n\n`confluentcloud.IdentityPool` describes an Identity Pool data source.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as confluentcloud from \"@pulumi/confluentcloud\";\n\nconst exampleUsingIdIdentityPool = confluentcloud.getIdentityPool({\n id: \"pool-xyz456\",\n identityProvider: {\n id: \"op-abc123\",\n },\n});\nexport const exampleUsingId = exampleUsingIdIdentityPool;\nconst exampleUsingNameIdentityPool = confluentcloud.getIdentityPool({\n displayName: \"My Identity Pool\",\n identityProvider: {\n id: \"op-abc123\",\n },\n});\nexport const exampleUsingName = exampleUsingNameIdentityPool;\n```\n```python\nimport pulumi\nimport pulumi_confluentcloud as confluentcloud\n\nexample_using_id_identity_pool = confluentcloud.get_identity_pool(id=\"pool-xyz456\",\n identity_provider=confluentcloud.GetIdentityPoolIdentityProviderArgs(\n id=\"op-abc123\",\n ))\npulumi.export(\"exampleUsingId\", example_using_id_identity_pool)\nexample_using_name_identity_pool = confluentcloud.get_identity_pool(display_name=\"My Identity Pool\",\n identity_provider=confluentcloud.GetIdentityPoolIdentityProviderArgs(\n id=\"op-abc123\",\n ))\npulumi.export(\"exampleUsingName\", example_using_name_identity_pool)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing ConfluentCloud = Pulumi.ConfluentCloud;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var exampleUsingIdIdentityPool = ConfluentCloud.GetIdentityPool.Invoke(new()\n {\n Id = \"pool-xyz456\",\n IdentityProvider = new ConfluentCloud.Inputs.GetIdentityPoolIdentityProviderInputArgs\n {\n Id = \"op-abc123\",\n },\n });\n\n var exampleUsingNameIdentityPool = ConfluentCloud.GetIdentityPool.Invoke(new()\n {\n DisplayName = \"My Identity Pool\",\n IdentityProvider = new ConfluentCloud.Inputs.GetIdentityPoolIdentityProviderInputArgs\n {\n Id = \"op-abc123\",\n },\n });\n\n return new Dictionary\u003cstring, object?\u003e\n {\n [\"exampleUsingId\"] = exampleUsingIdIdentityPool,\n [\"exampleUsingName\"] = exampleUsingNameIdentityPool,\n };\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texampleUsingIdIdentityPool, err := confluentcloud.LookupIdentityPool(ctx, \u0026confluentcloud.LookupIdentityPoolArgs{\n\t\t\tId: pulumi.StringRef(\"pool-xyz456\"),\n\t\t\tIdentityProvider: confluentcloud.GetIdentityPoolIdentityProvider{\n\t\t\t\tId: \"op-abc123\",\n\t\t\t},\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.Export(\"exampleUsingId\", exampleUsingIdIdentityPool)\n\t\texampleUsingNameIdentityPool, err := confluentcloud.LookupIdentityPool(ctx, \u0026confluentcloud.LookupIdentityPoolArgs{\n\t\t\tDisplayName: pulumi.StringRef(\"My Identity Pool\"),\n\t\t\tIdentityProvider: confluentcloud.GetIdentityPoolIdentityProvider{\n\t\t\t\tId: \"op-abc123\",\n\t\t\t},\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.Export(\"exampleUsingName\", exampleUsingNameIdentityPool)\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.confluentcloud.ConfluentcloudFunctions;\nimport com.pulumi.confluentcloud.inputs.GetIdentityPoolArgs;\nimport com.pulumi.confluentcloud.inputs.GetIdentityPoolIdentityProviderArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var exampleUsingIdIdentityPool = ConfluentcloudFunctions.getIdentityPool(GetIdentityPoolArgs.builder()\n .id(\"pool-xyz456\")\n .identityProvider(GetIdentityPoolIdentityProviderArgs.builder()\n .id(\"op-abc123\")\n .build())\n .build());\n\n ctx.export(\"exampleUsingId\", exampleUsingIdIdentityPool.applyValue(getIdentityPoolResult -\u003e getIdentityPoolResult));\n final var exampleUsingNameIdentityPool = ConfluentcloudFunctions.getIdentityPool(GetIdentityPoolArgs.builder()\n .displayName(\"My Identity Pool\")\n .identityProvider(GetIdentityPoolIdentityProviderArgs.builder()\n .id(\"op-abc123\")\n .build())\n .build());\n\n ctx.export(\"exampleUsingName\", exampleUsingNameIdentityPool.applyValue(getIdentityPoolResult -\u003e getIdentityPoolResult));\n }\n}\n```\n```yaml\nvariables:\n exampleUsingIdIdentityPool:\n fn::invoke:\n Function: confluentcloud:getIdentityPool\n Arguments:\n id: pool-xyz456\n identityProvider:\n id: op-abc123\n exampleUsingNameIdentityPool:\n fn::invoke:\n Function: confluentcloud:getIdentityPool\n Arguments:\n displayName: My Identity Pool\n identityProvider:\n id: op-abc123\noutputs:\n exampleUsingId: ${exampleUsingIdIdentityPool}\n exampleUsingName: ${exampleUsingNameIdentityPool}\n```\n{{% /example %}}\n{{% /examples %}}",
"inputs": {
@@ -8750,7 +9258,7 @@
}
},
"confluentcloud:index/getRoleBinding:getRoleBinding": {
- "description": "[![General Availability](https://img.shields.io/badge/Lifecycle%20Stage-General%20Availability-%2345c6e8)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy)\n\n`confluentcloud.RoleBinding` describes a Role Binding.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as confluentcloud from \"@pulumi/confluentcloud\";\n\nconst exampleRoleBinding = confluentcloud.getRoleBinding({\n id: \"rb-abc123\",\n});\nexport const example = exampleRoleBinding;\n```\n```python\nimport pulumi\nimport pulumi_confluentcloud as confluentcloud\n\nexample_role_binding = confluentcloud.get_role_binding(id=\"rb-abc123\")\npulumi.export(\"example\", example_role_binding)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing ConfluentCloud = Pulumi.ConfluentCloud;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var exampleRoleBinding = ConfluentCloud.GetRoleBinding.Invoke(new()\n {\n Id = \"rb-abc123\",\n });\n\n return new Dictionary\u003cstring, object?\u003e\n {\n [\"example\"] = exampleRoleBinding,\n };\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texampleRoleBinding, err := confluentcloud.LookupRoleBinding(ctx, \u0026confluentcloud.LookupRoleBindingArgs{\n\t\t\tId: \"rb-abc123\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.Export(\"example\", exampleRoleBinding)\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.confluentcloud.ConfluentcloudFunctions;\nimport com.pulumi.confluentcloud.inputs.GetRoleBindingArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var exampleRoleBinding = ConfluentcloudFunctions.getRoleBinding(GetRoleBindingArgs.builder()\n .id(\"rb-abc123\")\n .build());\n\n ctx.export(\"example\", exampleRoleBinding.applyValue(getRoleBindingResult -\u003e getRoleBindingResult));\n }\n}\n```\n```yaml\nvariables:\n exampleRoleBinding:\n fn::invoke:\n Function: confluentcloud:getRoleBinding\n Arguments:\n id: rb-abc123\noutputs:\n example: ${exampleRoleBinding}\n```\n{{% /example %}}\n{{% /examples %}}",
+ "description": "[![General Availability](https://img.shields.io/badge/Lifecycle%20Stage-General%20Availability-%2345c6e8)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy)\n\n`confluentcloud.RoleBinding` describes a Role Binding.\n\n\u003e **Note:** For more information on the Role Bindings, see [Predefined RBAC roles in Confluent Cloud](https://docs.confluent.io/cloud/current/access-management/access-control/rbac/predefined-rbac-roles.html).\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as confluentcloud from \"@pulumi/confluentcloud\";\n\nconst exampleRoleBinding = confluentcloud.getRoleBinding({\n id: \"rb-abc123\",\n});\nexport const example = exampleRoleBinding;\n```\n```python\nimport pulumi\nimport pulumi_confluentcloud as confluentcloud\n\nexample_role_binding = confluentcloud.get_role_binding(id=\"rb-abc123\")\npulumi.export(\"example\", example_role_binding)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing ConfluentCloud = Pulumi.ConfluentCloud;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var exampleRoleBinding = ConfluentCloud.GetRoleBinding.Invoke(new()\n {\n Id = \"rb-abc123\",\n });\n\n return new Dictionary\u003cstring, object?\u003e\n {\n [\"example\"] = exampleRoleBinding,\n };\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texampleRoleBinding, err := confluentcloud.LookupRoleBinding(ctx, \u0026confluentcloud.LookupRoleBindingArgs{\n\t\t\tId: \"rb-abc123\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.Export(\"example\", exampleRoleBinding)\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.confluentcloud.ConfluentcloudFunctions;\nimport com.pulumi.confluentcloud.inputs.GetRoleBindingArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var exampleRoleBinding = ConfluentcloudFunctions.getRoleBinding(GetRoleBindingArgs.builder()\n .id(\"rb-abc123\")\n .build());\n\n ctx.export(\"example\", exampleRoleBinding.applyValue(getRoleBindingResult -\u003e getRoleBindingResult));\n }\n}\n```\n```yaml\nvariables:\n exampleRoleBinding:\n fn::invoke:\n Function: confluentcloud:getRoleBinding\n Arguments:\n id: rb-abc123\noutputs:\n example: ${exampleRoleBinding}\n```\n{{% /example %}}\n{{% /examples %}}",
"inputs": {
"description": "A collection of arguments for invoking getRoleBinding.\n",
"properties": {
diff --git a/provider/go.mod b/provider/go.mod
index 5b77ff27..ab4fcf61 100644
--- a/provider/go.mod
+++ b/provider/go.mod
@@ -10,7 +10,7 @@ replace (
require (
github.com/confluentinc/terraform-provider-confluent v1.32.0
github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1
- github.com/pulumi/pulumi-terraform-bridge/v3 v3.60.0
+ github.com/pulumi/pulumi-terraform-bridge/v3 v3.60.1
github.com/pulumi/pulumi/sdk/v3 v3.81.0
)
@@ -92,7 +92,8 @@ require (
github.com/confluentinc/ccloud-sdk-go-v2/byok v0.0.1 // indirect
github.com/confluentinc/ccloud-sdk-go-v2/cmk v0.10.0 // indirect
github.com/confluentinc/ccloud-sdk-go-v2/connect v0.2.0 // indirect
- github.com/confluentinc/ccloud-sdk-go-v2/data-catalog v0.1.0 // indirect
+ github.com/confluentinc/ccloud-sdk-go-v2/data-catalog v0.2.0 // indirect
+ github.com/confluentinc/ccloud-sdk-go-v2/flink v0.5.0 // indirect
github.com/confluentinc/ccloud-sdk-go-v2/iam v0.10.0 // indirect
github.com/confluentinc/ccloud-sdk-go-v2/identity-provider v0.2.0 // indirect
github.com/confluentinc/ccloud-sdk-go-v2/kafka-quotas v0.4.0 // indirect
@@ -102,7 +103,7 @@ require (
github.com/confluentinc/ccloud-sdk-go-v2/networking v0.7.0 // indirect
github.com/confluentinc/ccloud-sdk-go-v2/networking-privatelink v0.1.0 // indirect
github.com/confluentinc/ccloud-sdk-go-v2/org v0.4.0 // indirect
- github.com/confluentinc/ccloud-sdk-go-v2/schema-registry v0.2.0 // indirect
+ github.com/confluentinc/ccloud-sdk-go-v2/schema-registry v0.3.0 // indirect
github.com/confluentinc/ccloud-sdk-go-v2/srcm v0.2.0 // indirect
github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 // indirect
github.com/containerd/containerd v1.6.19 // indirect
diff --git a/provider/go.sum b/provider/go.sum
index 643becbc..8e918266 100644
--- a/provider/go.sum
+++ b/provider/go.sum
@@ -982,8 +982,10 @@ github.com/confluentinc/ccloud-sdk-go-v2/cmk v0.10.0 h1:YQEcSvhX5ODllg0mhxLivckK
github.com/confluentinc/ccloud-sdk-go-v2/cmk v0.10.0/go.mod h1:357Zo3HvVAe5iQgUFxUbQPAKJasGm8vFMkOB+krVmR8=
github.com/confluentinc/ccloud-sdk-go-v2/connect v0.2.0 h1:rEb3sxzKCZvZCnEZ10WyGqkVIdlqxJGbmP85/4C4YdE=
github.com/confluentinc/ccloud-sdk-go-v2/connect v0.2.0/go.mod h1:lF4AfDtxoL0V7ZIMOULWiAycPwlfyt9UG659adRNdOM=
-github.com/confluentinc/ccloud-sdk-go-v2/data-catalog v0.1.0 h1:HuU7SAId+hUi/SWbIXXXu6khDyGW/RANsNQxEwMNWmE=
-github.com/confluentinc/ccloud-sdk-go-v2/data-catalog v0.1.0/go.mod h1:27GwI+j82LDFydahgmKVroqw6oFxzbvIj+ZOnksaKGw=
+github.com/confluentinc/ccloud-sdk-go-v2/data-catalog v0.2.0 h1:ySx0jYNGK0XLcSkgPz+hxcH05v1LI5GVb3Rg+TCqBqk=
+github.com/confluentinc/ccloud-sdk-go-v2/data-catalog v0.2.0/go.mod h1:27GwI+j82LDFydahgmKVroqw6oFxzbvIj+ZOnksaKGw=
+github.com/confluentinc/ccloud-sdk-go-v2/flink v0.5.0 h1:5lh7TY2aUlZA0wUL0wAebZeGPp8uEnEhZawjYhnRUxo=
+github.com/confluentinc/ccloud-sdk-go-v2/flink v0.5.0/go.mod h1:x+8kpYsJHRlvGuIB/tV0afPNyjKst3MsNOE6XsjgAl0=
github.com/confluentinc/ccloud-sdk-go-v2/iam v0.10.0 h1:AV0bGk01bGfKzNq5IVqRi2iEc6YTeBbl//IYvQ/j8ag=
github.com/confluentinc/ccloud-sdk-go-v2/iam v0.10.0/go.mod h1:2Lm82ly9Yh5LLhp8OTnUGqjz4JdIXAZ5a0/u9T+rGGU=
github.com/confluentinc/ccloud-sdk-go-v2/identity-provider v0.2.0 h1:9TT8UCFRc5zUdsE7UgMz7hqN+2KYnIkBcAKCaiZJrXw=
@@ -1002,8 +1004,8 @@ github.com/confluentinc/ccloud-sdk-go-v2/networking-privatelink v0.1.0 h1:CA+3m6
github.com/confluentinc/ccloud-sdk-go-v2/networking-privatelink v0.1.0/go.mod h1:uj/ybBJPQbmuuBdSoznMiMGEwW3z/g0Uko8uKWg36I8=
github.com/confluentinc/ccloud-sdk-go-v2/org v0.4.0 h1:WcJs6RbY8nU5HapaG0ZCH9ftFBtZyuKMIuNAkdVmc2o=
github.com/confluentinc/ccloud-sdk-go-v2/org v0.4.0/go.mod h1:zREJ+OOZz0rEXCaPx0JbCVj2EfNnYs/c6qhPDfhldI0=
-github.com/confluentinc/ccloud-sdk-go-v2/schema-registry v0.2.0 h1:qR8cm4OmT/B2g3pINSBxt+dDMFep5wgVJGu6oHIrbdk=
-github.com/confluentinc/ccloud-sdk-go-v2/schema-registry v0.2.0/go.mod h1:uTE8K5/jg75ubJY1Flh6TfBIwVFVOchkLWqVsamwLYc=
+github.com/confluentinc/ccloud-sdk-go-v2/schema-registry v0.3.0 h1:AOqLmXM4nK41OaMaJpYXy5JKaJSZYs0mciFadvdBOQY=
+github.com/confluentinc/ccloud-sdk-go-v2/schema-registry v0.3.0/go.mod h1:uTE8K5/jg75ubJY1Flh6TfBIwVFVOchkLWqVsamwLYc=
github.com/confluentinc/ccloud-sdk-go-v2/srcm v0.2.0 h1:ezCzDCOOavjTPVKvdET0QjAXm1u9iRJjg2s5lkSMrps=
github.com/confluentinc/ccloud-sdk-go-v2/srcm v0.2.0/go.mod h1:qY4Y/QCDKI0eR+HLVJWGFstsTAiI83+sowKOyoRFhF0=
github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
@@ -2227,8 +2229,8 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T
github.com/pulumi/pulumi-java/pkg v0.9.6 h1:UJrOAsYHRchwb4QlfI9Q224qg1TOI3rIsI6DDTUnn30=
github.com/pulumi/pulumi-java/pkg v0.9.6/go.mod h1:c6rSw/+q4O0IImgJ9axxoC6QesbPYWBaG5gimbHouUQ=
github.com/pulumi/pulumi-terraform-bridge/testing v0.0.1 h1:SCg1gjfY9N4yn8U8peIUYATifjoDABkyR7H9lmefsfc=
-github.com/pulumi/pulumi-terraform-bridge/v3 v3.60.0 h1:MPhSwNLJJlqLFHGfrXIRXZHzFIu05YLQldAJRYpOHRs=
-github.com/pulumi/pulumi-terraform-bridge/v3 v3.60.0/go.mod h1:o0Vfch2UXtHOnGYpNElzGg4htT6B8X8hS9fa5AguP7g=
+github.com/pulumi/pulumi-terraform-bridge/v3 v3.60.1 h1:+W2JHLi4y+G57jLPLJbDLv1xvm/9L2NO0gWXrtR8MDM=
+github.com/pulumi/pulumi-terraform-bridge/v3 v3.60.1/go.mod h1:o0Vfch2UXtHOnGYpNElzGg4htT6B8X8hS9fa5AguP7g=
github.com/pulumi/pulumi-terraform-bridge/x/muxer v0.0.4 h1:rIzMmtcVpPX8ynaz6/nW5AHNY63DiNfCohqmxWvMpM4=
github.com/pulumi/pulumi-terraform-bridge/x/muxer v0.0.4/go.mod h1:Kt8RIZWa/N8rW3+0g6NrqCBmF3o+HuIhFaZpssEkG6w=
github.com/pulumi/pulumi-yaml v1.2.2 h1:W6BeUBLhDrJ2GSU0em1AUVelG9PBI4ABY61DdhJOO3E=
diff --git a/sdk/dotnet/FlinkComputePool.cs b/sdk/dotnet/FlinkComputePool.cs
new file mode 100644
index 00000000..8b4071c4
--- /dev/null
+++ b/sdk/dotnet/FlinkComputePool.cs
@@ -0,0 +1,262 @@
+// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+using System;
+using System.Collections.Generic;
+using System.Collections.Immutable;
+using System.Threading.Tasks;
+using Pulumi.Serialization;
+
+namespace Pulumi.ConfluentCloud
+{
+ ///
+ /// ## Example Usage
+ ///
+ /// ```csharp
+ /// using System.Collections.Generic;
+ /// using System.Linq;
+ /// using Pulumi;
+ /// using ConfluentCloud = Pulumi.ConfluentCloud;
+ ///
+ /// return await Deployment.RunAsync(() =>
+ /// {
+ /// var development = new ConfluentCloud.Environment("development");
+ ///
+ /// var main = new ConfluentCloud.FlinkComputePool("main", new()
+ /// {
+ /// DisplayName = "standard_compute_pool",
+ /// Cloud = "AWS",
+ /// Region = "us-east-1",
+ /// MaxCfu = 5,
+ /// Environment = new ConfluentCloud.Inputs.FlinkComputePoolEnvironmentArgs
+ /// {
+ /// Id = development.Id,
+ /// },
+ /// });
+ ///
+ /// });
+ /// ```
+ ///
+ /// ## Import
+ ///
+ /// You can import a Flink Compute Pool by using Environment ID and Flink Compute Pool ID, in the format `<Environment ID>/<Flink Compute Pool ID>`. The following example shows how to import a Flink Compute Pool$ export CONFLUENT_CLOUD_API_KEY="<cloud_api_key>" $ export CONFLUENT_CLOUD_API_SECRET="<cloud_api_secret>"
+ ///
+ /// ```sh
+ /// $ pulumi import confluentcloud:index/flinkComputePool:FlinkComputePool main env-abc123/lfcp-abc123
+ /// ```
+ ///
+ /// !> **Warning:** Do not forget to delete terminal command history afterwards for security purposes.
+ ///
+ [ConfluentCloudResourceType("confluentcloud:index/flinkComputePool:FlinkComputePool")]
+ public partial class FlinkComputePool : global::Pulumi.CustomResource
+ {
+ ///
+ /// (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`.
+ ///
+ [Output("apiVersion")]
+ public Output ApiVersion { get; private set; } = null!;
+
+ ///
+ /// The cloud service provider that runs the Flink Compute Pool.
+ ///
+ [Output("cloud")]
+ public Output Cloud { get; private set; } = null!;
+
+ ///
+ /// (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool.
+ ///
+ [Output("currentCfu")]
+ public Output CurrentCfu { get; private set; } = null!;
+
+ ///
+ /// The name of the Flink Compute Pool.
+ ///
+ [Output("displayName")]
+ public Output DisplayName { get; private set; } = null!;
+
+ ///
+ /// Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
+ ///
+ [Output("environment")]
+ public Output Environment { get; private set; } = null!;
+
+ ///
+ /// (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`.
+ ///
+ [Output("kind")]
+ public Output Kind { get; private set; } = null!;
+
+ ///
+ /// Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`.
+ ///
+ [Output("maxCfu")]
+ public Output MaxCfu { get; private set; } = null!;
+
+ ///
+ /// The cloud service provider region that hosts the Flink Compute Pool.
+ ///
+ [Output("region")]
+ public Output Region { get; private set; } = null!;
+
+ ///
+ /// (Required String) The Confluent Resource Name of the Flink Compute Pool.
+ ///
+ [Output("resourceName")]
+ public Output ResourceName { get; private set; } = null!;
+
+ ///
+ /// (Required String) The API endpoint of the Flink Compute Pool.
+ ///
+ [Output("restEndpoint")]
+ public Output RestEndpoint { get; private set; } = null!;
+
+
+ ///
+ /// Create a FlinkComputePool resource with the given unique name, arguments, and options.
+ ///
+ ///
+ /// The unique name of the resource
+ /// The arguments used to populate this resource's properties
+ /// A bag of options that control this resource's behavior
+ public FlinkComputePool(string name, FlinkComputePoolArgs args, CustomResourceOptions? options = null)
+ : base("confluentcloud:index/flinkComputePool:FlinkComputePool", name, args ?? new FlinkComputePoolArgs(), MakeResourceOptions(options, ""))
+ {
+ }
+
+ private FlinkComputePool(string name, Input id, FlinkComputePoolState? state = null, CustomResourceOptions? options = null)
+ : base("confluentcloud:index/flinkComputePool:FlinkComputePool", name, state, MakeResourceOptions(options, id))
+ {
+ }
+
+ private static CustomResourceOptions MakeResourceOptions(CustomResourceOptions? options, Input? id)
+ {
+ var defaultOptions = new CustomResourceOptions
+ {
+ Version = Utilities.Version,
+ };
+ var merged = CustomResourceOptions.Merge(defaultOptions, options);
+ // Override the ID if one was specified for consistency with other language SDKs.
+ merged.Id = id ?? merged.Id;
+ return merged;
+ }
+ ///
+ /// Get an existing FlinkComputePool resource's state with the given name, ID, and optional extra
+ /// properties used to qualify the lookup.
+ ///
+ ///
+ /// The unique name of the resulting resource.
+ /// The unique provider ID of the resource to lookup.
+ /// Any extra arguments used during the lookup.
+ /// A bag of options that control this resource's behavior
+ public static FlinkComputePool Get(string name, Input id, FlinkComputePoolState? state = null, CustomResourceOptions? options = null)
+ {
+ return new FlinkComputePool(name, id, state, options);
+ }
+ }
+
+ public sealed class FlinkComputePoolArgs : global::Pulumi.ResourceArgs
+ {
+ ///
+ /// The cloud service provider that runs the Flink Compute Pool.
+ ///
+ [Input("cloud", required: true)]
+ public Input Cloud { get; set; } = null!;
+
+ ///
+ /// The name of the Flink Compute Pool.
+ ///
+ [Input("displayName", required: true)]
+ public Input DisplayName { get; set; } = null!;
+
+ ///
+ /// Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
+ ///
+ [Input("environment", required: true)]
+ public Input Environment { get; set; } = null!;
+
+ ///
+ /// Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`.
+ ///
+ [Input("maxCfu")]
+ public Input? MaxCfu { get; set; }
+
+ ///
+ /// The cloud service provider region that hosts the Flink Compute Pool.
+ ///
+ [Input("region", required: true)]
+ public Input Region { get; set; } = null!;
+
+ public FlinkComputePoolArgs()
+ {
+ }
+ public static new FlinkComputePoolArgs Empty => new FlinkComputePoolArgs();
+ }
+
+ public sealed class FlinkComputePoolState : global::Pulumi.ResourceArgs
+ {
+ ///
+ /// (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`.
+ ///
+ [Input("apiVersion")]
+ public Input? ApiVersion { get; set; }
+
+ ///
+ /// The cloud service provider that runs the Flink Compute Pool.
+ ///
+ [Input("cloud")]
+ public Input? Cloud { get; set; }
+
+ ///
+ /// (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool.
+ ///
+ [Input("currentCfu")]
+ public Input? CurrentCfu { get; set; }
+
+ ///
+ /// The name of the Flink Compute Pool.
+ ///
+ [Input("displayName")]
+ public Input? DisplayName { get; set; }
+
+ ///
+ /// Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
+ ///
+ [Input("environment")]
+ public Input? Environment { get; set; }
+
+ ///
+ /// (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`.
+ ///
+ [Input("kind")]
+ public Input? Kind { get; set; }
+
+ ///
+ /// Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`.
+ ///
+ [Input("maxCfu")]
+ public Input? MaxCfu { get; set; }
+
+ ///
+ /// The cloud service provider region that hosts the Flink Compute Pool.
+ ///
+ [Input("region")]
+ public Input? Region { get; set; }
+
+ ///
+ /// (Required String) The Confluent Resource Name of the Flink Compute Pool.
+ ///
+ [Input("resourceName")]
+ public Input? ResourceName { get; set; }
+
+ ///
+ /// (Required String) The API endpoint of the Flink Compute Pool.
+ ///
+ [Input("restEndpoint")]
+ public Input? RestEndpoint { get; set; }
+
+ public FlinkComputePoolState()
+ {
+ }
+ public static new FlinkComputePoolState Empty => new FlinkComputePoolState();
+ }
+}
diff --git a/sdk/dotnet/GetFlinkComputePool.cs b/sdk/dotnet/GetFlinkComputePool.cs
new file mode 100644
index 00000000..77be0f40
--- /dev/null
+++ b/sdk/dotnet/GetFlinkComputePool.cs
@@ -0,0 +1,260 @@
+// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+using System;
+using System.Collections.Generic;
+using System.Collections.Immutable;
+using System.Threading.Tasks;
+using Pulumi.Serialization;
+
+namespace Pulumi.ConfluentCloud
+{
+ public static class GetFlinkComputePool
+ {
+ ///
+ /// [![Early Access](https://img.shields.io/badge/Lifecycle%20Stage-Early%20Access-%2300afba)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy)
+ ///
+ /// > **Note:** `confluentcloud.FlinkComputePool` data source is available in **Early Access** for early adopters. Early Access features are introduced to gather customer feedback. This feature should be used only for evaluation and non-production testing purposes or to provide feedback to Confluent, particularly as it becomes more widely available in follow-on editions.
+ /// **Early Access** features are intended for evaluation use in development and testing environments only, and not for production use. The warranty, SLA, and Support Services provisions of your agreement with Confluent do not apply to Early Access features. Early Access features are considered to be a Proof of Concept as defined in the Confluent Cloud Terms of Service. Confluent may discontinue providing early access releases of the Early Access features at any time in Confluent’s sole discretion.
+ ///
+ /// `confluentcloud.FlinkComputePool` describes a Flink Compute Pool data source.
+ ///
+ /// {{% examples %}}
+ /// ## Example Usage
+ /// {{% example %}}
+ ///
+ /// ```csharp
+ /// using System.Collections.Generic;
+ /// using System.Linq;
+ /// using Pulumi;
+ /// using ConfluentCloud = Pulumi.ConfluentCloud;
+ ///
+ /// return await Deployment.RunAsync(() =>
+ /// {
+ /// var exampleUsingIdFlinkComputePool = ConfluentCloud.GetFlinkComputePool.Invoke(new()
+ /// {
+ /// Id = "lfcp-abc123",
+ /// Environment = new ConfluentCloud.Inputs.GetFlinkComputePoolEnvironmentInputArgs
+ /// {
+ /// Id = "env-xyz456",
+ /// },
+ /// });
+ ///
+ /// var exampleUsingNameFlinkComputePool = ConfluentCloud.GetFlinkComputePool.Invoke(new()
+ /// {
+ /// DisplayName = "my_compute_pool",
+ /// Environment = new ConfluentCloud.Inputs.GetFlinkComputePoolEnvironmentInputArgs
+ /// {
+ /// Id = "env-xyz456",
+ /// },
+ /// });
+ ///
+ /// return new Dictionary<string, object?>
+ /// {
+ /// ["exampleUsingId"] = exampleUsingIdFlinkComputePool,
+ /// ["exampleUsingName"] = exampleUsingNameFlinkComputePool,
+ /// };
+ /// });
+ /// ```
+ /// {{% /example %}}
+ /// {{% /examples %}}
+ ///
+ public static Task InvokeAsync(GetFlinkComputePoolArgs args, InvokeOptions? options = null)
+ => global::Pulumi.Deployment.Instance.InvokeAsync("confluentcloud:index/getFlinkComputePool:getFlinkComputePool", args ?? new GetFlinkComputePoolArgs(), options.WithDefaults());
+
+ ///
+ /// [![Early Access](https://img.shields.io/badge/Lifecycle%20Stage-Early%20Access-%2300afba)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy)
+ ///
+ /// > **Note:** `confluentcloud.FlinkComputePool` data source is available in **Early Access** for early adopters. Early Access features are introduced to gather customer feedback. This feature should be used only for evaluation and non-production testing purposes or to provide feedback to Confluent, particularly as it becomes more widely available in follow-on editions.
+ /// **Early Access** features are intended for evaluation use in development and testing environments only, and not for production use. The warranty, SLA, and Support Services provisions of your agreement with Confluent do not apply to Early Access features. Early Access features are considered to be a Proof of Concept as defined in the Confluent Cloud Terms of Service. Confluent may discontinue providing early access releases of the Early Access features at any time in Confluent’s sole discretion.
+ ///
+ /// `confluentcloud.FlinkComputePool` describes a Flink Compute Pool data source.
+ ///
+ /// {{% examples %}}
+ /// ## Example Usage
+ /// {{% example %}}
+ ///
+ /// ```csharp
+ /// using System.Collections.Generic;
+ /// using System.Linq;
+ /// using Pulumi;
+ /// using ConfluentCloud = Pulumi.ConfluentCloud;
+ ///
+ /// return await Deployment.RunAsync(() =>
+ /// {
+ /// var exampleUsingIdFlinkComputePool = ConfluentCloud.GetFlinkComputePool.Invoke(new()
+ /// {
+ /// Id = "lfcp-abc123",
+ /// Environment = new ConfluentCloud.Inputs.GetFlinkComputePoolEnvironmentInputArgs
+ /// {
+ /// Id = "env-xyz456",
+ /// },
+ /// });
+ ///
+ /// var exampleUsingNameFlinkComputePool = ConfluentCloud.GetFlinkComputePool.Invoke(new()
+ /// {
+ /// DisplayName = "my_compute_pool",
+ /// Environment = new ConfluentCloud.Inputs.GetFlinkComputePoolEnvironmentInputArgs
+ /// {
+ /// Id = "env-xyz456",
+ /// },
+ /// });
+ ///
+ /// return new Dictionary<string, object?>
+ /// {
+ /// ["exampleUsingId"] = exampleUsingIdFlinkComputePool,
+ /// ["exampleUsingName"] = exampleUsingNameFlinkComputePool,
+ /// };
+ /// });
+ /// ```
+ /// {{% /example %}}
+ /// {{% /examples %}}
+ ///
+ public static Output Invoke(GetFlinkComputePoolInvokeArgs args, InvokeOptions? options = null)
+ => global::Pulumi.Deployment.Instance.Invoke("confluentcloud:index/getFlinkComputePool:getFlinkComputePool", args ?? new GetFlinkComputePoolInvokeArgs(), options.WithDefaults());
+ }
+
+
+ public sealed class GetFlinkComputePoolArgs : global::Pulumi.InvokeArgs
+ {
+ ///
+ /// A human-readable name for the Flink Compute Pool.
+ ///
+ [Input("displayName")]
+ public string? DisplayName { get; set; }
+
+ ///
+ /// (Required Configuration Block) supports the following:
+ ///
+ [Input("environment", required: true)]
+ public Inputs.GetFlinkComputePoolEnvironmentArgs Environment { get; set; } = null!;
+
+ ///
+ /// The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`.
+ ///
+ /// > **Note:** Exactly one from the `id` and `display_name` attributes must be specified.
+ ///
+ [Input("id")]
+ public string? Id { get; set; }
+
+ public GetFlinkComputePoolArgs()
+ {
+ }
+ public static new GetFlinkComputePoolArgs Empty => new GetFlinkComputePoolArgs();
+ }
+
+ public sealed class GetFlinkComputePoolInvokeArgs : global::Pulumi.InvokeArgs
+ {
+ ///
+ /// A human-readable name for the Flink Compute Pool.
+ ///
+ [Input("displayName")]
+ public Input? DisplayName { get; set; }
+
+ ///
+ /// (Required Configuration Block) supports the following:
+ ///
+ [Input("environment", required: true)]
+ public Input Environment { get; set; } = null!;
+
+ ///
+ /// The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`.
+ ///
+ /// > **Note:** Exactly one from the `id` and `display_name` attributes must be specified.
+ ///
+ [Input("id")]
+ public Input? Id { get; set; }
+
+ public GetFlinkComputePoolInvokeArgs()
+ {
+ }
+ public static new GetFlinkComputePoolInvokeArgs Empty => new GetFlinkComputePoolInvokeArgs();
+ }
+
+
+ [OutputType]
+ public sealed class GetFlinkComputePoolResult
+ {
+ ///
+ /// (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`.
+ ///
+ public readonly string ApiVersion;
+ ///
+ /// (Required String) The cloud service provider that runs the Flink Compute Pool.
+ ///
+ public readonly string Cloud;
+ ///
+ /// (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool.
+ ///
+ public readonly int CurrentCfu;
+ ///
+ /// (Required String) The name of the Flink Compute Pool.
+ ///
+ public readonly string DisplayName;
+ ///
+ /// (Required Configuration Block) supports the following:
+ ///
+ public readonly Outputs.GetFlinkComputePoolEnvironmentResult Environment;
+ ///
+ /// (Required String) The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`.
+ ///
+ public readonly string Id;
+ ///
+ /// (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`.
+ ///
+ public readonly string Kind;
+ ///
+ /// (Required Integer) Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to.
+ ///
+ public readonly int MaxCfu;
+ ///
+ /// (Required String) The cloud service provider region that hosts the Flink Compute Pool.
+ ///
+ public readonly string Region;
+ ///
+ /// (Required String) The Confluent Resource Name of the Flink Compute Pool.
+ ///
+ public readonly string ResourceName;
+ ///
+ /// (Required String) The API endpoint of the Flink Compute Pool.
+ ///
+ public readonly string RestEndpoint;
+
+ [OutputConstructor]
+ private GetFlinkComputePoolResult(
+ string apiVersion,
+
+ string cloud,
+
+ int currentCfu,
+
+ string displayName,
+
+ Outputs.GetFlinkComputePoolEnvironmentResult environment,
+
+ string id,
+
+ string kind,
+
+ int maxCfu,
+
+ string region,
+
+ string resourceName,
+
+ string restEndpoint)
+ {
+ ApiVersion = apiVersion;
+ Cloud = cloud;
+ CurrentCfu = currentCfu;
+ DisplayName = displayName;
+ Environment = environment;
+ Id = id;
+ Kind = kind;
+ MaxCfu = maxCfu;
+ Region = region;
+ ResourceName = resourceName;
+ RestEndpoint = restEndpoint;
+ }
+ }
+}
diff --git a/sdk/dotnet/GetRoleBinding.cs b/sdk/dotnet/GetRoleBinding.cs
index 13558446..31e7abae 100644
--- a/sdk/dotnet/GetRoleBinding.cs
+++ b/sdk/dotnet/GetRoleBinding.cs
@@ -16,6 +16,8 @@ public static class GetRoleBinding
///
/// `confluentcloud.RoleBinding` describes a Role Binding.
///
+ /// > **Note:** For more information on the Role Bindings, see [Predefined RBAC roles in Confluent Cloud](https://docs.confluent.io/cloud/current/access-management/access-control/rbac/predefined-rbac-roles.html).
+ ///
/// {{% examples %}}
/// ## Example Usage
/// {{% example %}}
@@ -50,6 +52,8 @@ public static Task InvokeAsync(GetRoleBindingArgs args, In
///
/// `confluentcloud.RoleBinding` describes a Role Binding.
///
+ /// > **Note:** For more information on the Role Bindings, see [Predefined RBAC roles in Confluent Cloud](https://docs.confluent.io/cloud/current/access-management/access-control/rbac/predefined-rbac-roles.html).
+ ///
/// {{% examples %}}
/// ## Example Usage
/// {{% example %}}
diff --git a/sdk/dotnet/Inputs/ClusterLinkDestinationKafkaClusterCredentialsArgs.cs b/sdk/dotnet/Inputs/ClusterLinkDestinationKafkaClusterCredentialsArgs.cs
index 54e7021f..b805cebd 100644
--- a/sdk/dotnet/Inputs/ClusterLinkDestinationKafkaClusterCredentialsArgs.cs
+++ b/sdk/dotnet/Inputs/ClusterLinkDestinationKafkaClusterCredentialsArgs.cs
@@ -33,8 +33,6 @@ public Input? Key
///
/// The Kafka API Secret.
- ///
- /// > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
///
public Input? Secret
{
diff --git a/sdk/dotnet/Inputs/ClusterLinkDestinationKafkaClusterCredentialsGetArgs.cs b/sdk/dotnet/Inputs/ClusterLinkDestinationKafkaClusterCredentialsGetArgs.cs
index 2df3f05f..5bf06f7a 100644
--- a/sdk/dotnet/Inputs/ClusterLinkDestinationKafkaClusterCredentialsGetArgs.cs
+++ b/sdk/dotnet/Inputs/ClusterLinkDestinationKafkaClusterCredentialsGetArgs.cs
@@ -33,8 +33,6 @@ public Input? Key
///
/// The Kafka API Secret.
- ///
- /// > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
///
public Input? Secret
{
diff --git a/sdk/dotnet/Inputs/ClusterLinkLocalKafkaClusterCredentialsArgs.cs b/sdk/dotnet/Inputs/ClusterLinkLocalKafkaClusterCredentialsArgs.cs
index b5530d24..da98936e 100644
--- a/sdk/dotnet/Inputs/ClusterLinkLocalKafkaClusterCredentialsArgs.cs
+++ b/sdk/dotnet/Inputs/ClusterLinkLocalKafkaClusterCredentialsArgs.cs
@@ -33,8 +33,6 @@ public Input? Key
///
/// The Kafka API Secret.
- ///
- /// > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
///
public Input? Secret
{
diff --git a/sdk/dotnet/Inputs/ClusterLinkLocalKafkaClusterCredentialsGetArgs.cs b/sdk/dotnet/Inputs/ClusterLinkLocalKafkaClusterCredentialsGetArgs.cs
index c1240aa3..f801e43e 100644
--- a/sdk/dotnet/Inputs/ClusterLinkLocalKafkaClusterCredentialsGetArgs.cs
+++ b/sdk/dotnet/Inputs/ClusterLinkLocalKafkaClusterCredentialsGetArgs.cs
@@ -33,8 +33,6 @@ public Input? Key
///
/// The Kafka API Secret.
- ///
- /// > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
///
public Input? Secret
{
diff --git a/sdk/dotnet/Inputs/ClusterLinkRemoteKafkaClusterCredentialsArgs.cs b/sdk/dotnet/Inputs/ClusterLinkRemoteKafkaClusterCredentialsArgs.cs
index c4ff345b..27fbc309 100644
--- a/sdk/dotnet/Inputs/ClusterLinkRemoteKafkaClusterCredentialsArgs.cs
+++ b/sdk/dotnet/Inputs/ClusterLinkRemoteKafkaClusterCredentialsArgs.cs
@@ -33,8 +33,6 @@ public Input? Key
///
/// The Kafka API Secret.
- ///
- /// > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
///
public Input? Secret
{
diff --git a/sdk/dotnet/Inputs/ClusterLinkRemoteKafkaClusterCredentialsGetArgs.cs b/sdk/dotnet/Inputs/ClusterLinkRemoteKafkaClusterCredentialsGetArgs.cs
index fa2f85cb..1abbabdd 100644
--- a/sdk/dotnet/Inputs/ClusterLinkRemoteKafkaClusterCredentialsGetArgs.cs
+++ b/sdk/dotnet/Inputs/ClusterLinkRemoteKafkaClusterCredentialsGetArgs.cs
@@ -33,8 +33,6 @@ public Input? Key
///
/// The Kafka API Secret.
- ///
- /// > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
///
public Input? Secret
{
diff --git a/sdk/dotnet/Inputs/ClusterLinkSourceKafkaClusterCredentialsArgs.cs b/sdk/dotnet/Inputs/ClusterLinkSourceKafkaClusterCredentialsArgs.cs
index 91c07405..d124d70a 100644
--- a/sdk/dotnet/Inputs/ClusterLinkSourceKafkaClusterCredentialsArgs.cs
+++ b/sdk/dotnet/Inputs/ClusterLinkSourceKafkaClusterCredentialsArgs.cs
@@ -33,8 +33,6 @@ public Input? Key
///
/// The Kafka API Secret.
- ///
- /// > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
///
public Input? Secret
{
diff --git a/sdk/dotnet/Inputs/ClusterLinkSourceKafkaClusterCredentialsGetArgs.cs b/sdk/dotnet/Inputs/ClusterLinkSourceKafkaClusterCredentialsGetArgs.cs
index df949ae0..5a70c8a1 100644
--- a/sdk/dotnet/Inputs/ClusterLinkSourceKafkaClusterCredentialsGetArgs.cs
+++ b/sdk/dotnet/Inputs/ClusterLinkSourceKafkaClusterCredentialsGetArgs.cs
@@ -33,8 +33,6 @@ public Input? Key
///
/// The Kafka API Secret.
- ///
- /// > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
///
public Input? Secret
{
diff --git a/sdk/dotnet/Inputs/FlinkComputePoolEnvironmentArgs.cs b/sdk/dotnet/Inputs/FlinkComputePoolEnvironmentArgs.cs
new file mode 100644
index 00000000..8204fb28
--- /dev/null
+++ b/sdk/dotnet/Inputs/FlinkComputePoolEnvironmentArgs.cs
@@ -0,0 +1,26 @@
+// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+using System;
+using System.Collections.Generic;
+using System.Collections.Immutable;
+using System.Threading.Tasks;
+using Pulumi.Serialization;
+
+namespace Pulumi.ConfluentCloud.Inputs
+{
+
+ public sealed class FlinkComputePoolEnvironmentArgs : global::Pulumi.ResourceArgs
+ {
+ ///
+ /// The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`.
+ ///
+ [Input("id", required: true)]
+ public Input Id { get; set; } = null!;
+
+ public FlinkComputePoolEnvironmentArgs()
+ {
+ }
+ public static new FlinkComputePoolEnvironmentArgs Empty => new FlinkComputePoolEnvironmentArgs();
+ }
+}
diff --git a/sdk/dotnet/Inputs/FlinkComputePoolEnvironmentGetArgs.cs b/sdk/dotnet/Inputs/FlinkComputePoolEnvironmentGetArgs.cs
new file mode 100644
index 00000000..c7681602
--- /dev/null
+++ b/sdk/dotnet/Inputs/FlinkComputePoolEnvironmentGetArgs.cs
@@ -0,0 +1,26 @@
+// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+using System;
+using System.Collections.Generic;
+using System.Collections.Immutable;
+using System.Threading.Tasks;
+using Pulumi.Serialization;
+
+namespace Pulumi.ConfluentCloud.Inputs
+{
+
+ public sealed class FlinkComputePoolEnvironmentGetArgs : global::Pulumi.ResourceArgs
+ {
+ ///
+ /// The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`.
+ ///
+ [Input("id", required: true)]
+ public Input Id { get; set; } = null!;
+
+ public FlinkComputePoolEnvironmentGetArgs()
+ {
+ }
+ public static new FlinkComputePoolEnvironmentGetArgs Empty => new FlinkComputePoolEnvironmentGetArgs();
+ }
+}
diff --git a/sdk/dotnet/Inputs/GetFlinkComputePoolEnvironment.cs b/sdk/dotnet/Inputs/GetFlinkComputePoolEnvironment.cs
new file mode 100644
index 00000000..57854b10
--- /dev/null
+++ b/sdk/dotnet/Inputs/GetFlinkComputePoolEnvironment.cs
@@ -0,0 +1,28 @@
+// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+using System;
+using System.Collections.Generic;
+using System.Collections.Immutable;
+using System.Threading.Tasks;
+using Pulumi.Serialization;
+
+namespace Pulumi.ConfluentCloud.Inputs
+{
+
+ public sealed class GetFlinkComputePoolEnvironmentArgs : global::Pulumi.InvokeArgs
+ {
+ ///
+ /// The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`.
+ ///
+ /// > **Note:** Exactly one from the `id` and `display_name` attributes must be specified.
+ ///
+ [Input("id", required: true)]
+ public string Id { get; set; } = null!;
+
+ public GetFlinkComputePoolEnvironmentArgs()
+ {
+ }
+ public static new GetFlinkComputePoolEnvironmentArgs Empty => new GetFlinkComputePoolEnvironmentArgs();
+ }
+}
diff --git a/sdk/dotnet/Inputs/GetFlinkComputePoolEnvironmentArgs.cs b/sdk/dotnet/Inputs/GetFlinkComputePoolEnvironmentArgs.cs
new file mode 100644
index 00000000..06f3ed54
--- /dev/null
+++ b/sdk/dotnet/Inputs/GetFlinkComputePoolEnvironmentArgs.cs
@@ -0,0 +1,28 @@
+// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+using System;
+using System.Collections.Generic;
+using System.Collections.Immutable;
+using System.Threading.Tasks;
+using Pulumi.Serialization;
+
+namespace Pulumi.ConfluentCloud.Inputs
+{
+
+ public sealed class GetFlinkComputePoolEnvironmentInputArgs : global::Pulumi.ResourceArgs
+ {
+ ///
+ /// The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`.
+ ///
+ /// > **Note:** Exactly one from the `id` and `display_name` attributes must be specified.
+ ///
+ [Input("id", required: true)]
+ public Input Id { get; set; } = null!;
+
+ public GetFlinkComputePoolEnvironmentInputArgs()
+ {
+ }
+ public static new GetFlinkComputePoolEnvironmentInputArgs Empty => new GetFlinkComputePoolEnvironmentInputArgs();
+ }
+}
diff --git a/sdk/dotnet/Inputs/SchemaExporterCredentialsArgs.cs b/sdk/dotnet/Inputs/SchemaExporterCredentialsArgs.cs
new file mode 100644
index 00000000..78d7d413
--- /dev/null
+++ b/sdk/dotnet/Inputs/SchemaExporterCredentialsArgs.cs
@@ -0,0 +1,52 @@
+// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+using System;
+using System.Collections.Generic;
+using System.Collections.Immutable;
+using System.Threading.Tasks;
+using Pulumi.Serialization;
+
+namespace Pulumi.ConfluentCloud.Inputs
+{
+
+ public sealed class SchemaExporterCredentialsArgs : global::Pulumi.ResourceArgs
+ {
+ [Input("key", required: true)]
+ private Input? _key;
+
+ ///
+ /// The Schema Registry API Key.
+ ///
+ public Input? Key
+ {
+ get => _key;
+ set
+ {
+ var emptySecret = Output.CreateSecret(0);
+ _key = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1);
+ }
+ }
+
+ [Input("secret", required: true)]
+ private Input? _secret;
+
+ ///
+ /// The Schema Registry API Secret.
+ ///
+ public Input? Secret
+ {
+ get => _secret;
+ set
+ {
+ var emptySecret = Output.CreateSecret(0);
+ _secret = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1);
+ }
+ }
+
+ public SchemaExporterCredentialsArgs()
+ {
+ }
+ public static new SchemaExporterCredentialsArgs Empty => new SchemaExporterCredentialsArgs();
+ }
+}
diff --git a/sdk/dotnet/Inputs/SchemaExporterCredentialsGetArgs.cs b/sdk/dotnet/Inputs/SchemaExporterCredentialsGetArgs.cs
new file mode 100644
index 00000000..72960a65
--- /dev/null
+++ b/sdk/dotnet/Inputs/SchemaExporterCredentialsGetArgs.cs
@@ -0,0 +1,52 @@
+// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+using System;
+using System.Collections.Generic;
+using System.Collections.Immutable;
+using System.Threading.Tasks;
+using Pulumi.Serialization;
+
+namespace Pulumi.ConfluentCloud.Inputs
+{
+
+ public sealed class SchemaExporterCredentialsGetArgs : global::Pulumi.ResourceArgs
+ {
+ [Input("key", required: true)]
+ private Input? _key;
+
+ ///
+ /// The Schema Registry API Key.
+ ///
+ public Input? Key
+ {
+ get => _key;
+ set
+ {
+ var emptySecret = Output.CreateSecret(0);
+ _key = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1);
+ }
+ }
+
+ [Input("secret", required: true)]
+ private Input? _secret;
+
+ ///
+ /// The Schema Registry API Secret.
+ ///
+ public Input? Secret
+ {
+ get => _secret;
+ set
+ {
+ var emptySecret = Output.CreateSecret(0);
+ _secret = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1);
+ }
+ }
+
+ public SchemaExporterCredentialsGetArgs()
+ {
+ }
+ public static new SchemaExporterCredentialsGetArgs Empty => new SchemaExporterCredentialsGetArgs();
+ }
+}
diff --git a/sdk/dotnet/Inputs/SchemaExporterDestinationSchemaRegistryClusterArgs.cs b/sdk/dotnet/Inputs/SchemaExporterDestinationSchemaRegistryClusterArgs.cs
new file mode 100644
index 00000000..aa938216
--- /dev/null
+++ b/sdk/dotnet/Inputs/SchemaExporterDestinationSchemaRegistryClusterArgs.cs
@@ -0,0 +1,38 @@
+// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+using System;
+using System.Collections.Generic;
+using System.Collections.Immutable;
+using System.Threading.Tasks;
+using Pulumi.Serialization;
+
+namespace Pulumi.ConfluentCloud.Inputs
+{
+
+ public sealed class SchemaExporterDestinationSchemaRegistryClusterArgs : global::Pulumi.ResourceArgs
+ {
+ [Input("credentials", required: true)]
+ private Input? _credentials;
+ public Input? Credentials
+ {
+ get => _credentials;
+ set
+ {
+ var emptySecret = Output.CreateSecret(0);
+ _credentials = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1);
+ }
+ }
+
+ ///
+ /// The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).
+ ///
+ [Input("restEndpoint", required: true)]
+ public Input RestEndpoint { get; set; } = null!;
+
+ public SchemaExporterDestinationSchemaRegistryClusterArgs()
+ {
+ }
+ public static new SchemaExporterDestinationSchemaRegistryClusterArgs Empty => new SchemaExporterDestinationSchemaRegistryClusterArgs();
+ }
+}
diff --git a/sdk/dotnet/Inputs/SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs.cs b/sdk/dotnet/Inputs/SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs.cs
new file mode 100644
index 00000000..b880b2c4
--- /dev/null
+++ b/sdk/dotnet/Inputs/SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs.cs
@@ -0,0 +1,52 @@
+// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+using System;
+using System.Collections.Generic;
+using System.Collections.Immutable;
+using System.Threading.Tasks;
+using Pulumi.Serialization;
+
+namespace Pulumi.ConfluentCloud.Inputs
+{
+
+ public sealed class SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs : global::Pulumi.ResourceArgs
+ {
+ [Input("key", required: true)]
+ private Input? _key;
+
+ ///
+ /// The Schema Registry API Key.
+ ///
+ public Input? Key
+ {
+ get => _key;
+ set
+ {
+ var emptySecret = Output.CreateSecret(0);
+ _key = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1);
+ }
+ }
+
+ [Input("secret", required: true)]
+ private Input? _secret;
+
+ ///
+ /// The Schema Registry API Secret.
+ ///
+ public Input? Secret
+ {
+ get => _secret;
+ set
+ {
+ var emptySecret = Output.CreateSecret(0);
+ _secret = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1);
+ }
+ }
+
+ public SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs()
+ {
+ }
+ public static new SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs Empty => new SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs();
+ }
+}
diff --git a/sdk/dotnet/Inputs/SchemaExporterDestinationSchemaRegistryClusterCredentialsGetArgs.cs b/sdk/dotnet/Inputs/SchemaExporterDestinationSchemaRegistryClusterCredentialsGetArgs.cs
new file mode 100644
index 00000000..25b0a6b5
--- /dev/null
+++ b/sdk/dotnet/Inputs/SchemaExporterDestinationSchemaRegistryClusterCredentialsGetArgs.cs
@@ -0,0 +1,52 @@
+// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+using System;
+using System.Collections.Generic;
+using System.Collections.Immutable;
+using System.Threading.Tasks;
+using Pulumi.Serialization;
+
+namespace Pulumi.ConfluentCloud.Inputs
+{
+
+ public sealed class SchemaExporterDestinationSchemaRegistryClusterCredentialsGetArgs : global::Pulumi.ResourceArgs
+ {
+ [Input("key", required: true)]
+ private Input? _key;
+
+ ///
+ /// The Schema Registry API Key.
+ ///
+ public Input? Key
+ {
+ get => _key;
+ set
+ {
+ var emptySecret = Output.CreateSecret(0);
+ _key = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1);
+ }
+ }
+
+ [Input("secret", required: true)]
+ private Input? _secret;
+
+ ///
+ /// The Schema Registry API Secret.
+ ///
+ public Input? Secret
+ {
+ get => _secret;
+ set
+ {
+ var emptySecret = Output.CreateSecret(0);
+ _secret = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1);
+ }
+ }
+
+ public SchemaExporterDestinationSchemaRegistryClusterCredentialsGetArgs()
+ {
+ }
+ public static new SchemaExporterDestinationSchemaRegistryClusterCredentialsGetArgs Empty => new SchemaExporterDestinationSchemaRegistryClusterCredentialsGetArgs();
+ }
+}
diff --git a/sdk/dotnet/Inputs/SchemaExporterDestinationSchemaRegistryClusterGetArgs.cs b/sdk/dotnet/Inputs/SchemaExporterDestinationSchemaRegistryClusterGetArgs.cs
new file mode 100644
index 00000000..202d5fae
--- /dev/null
+++ b/sdk/dotnet/Inputs/SchemaExporterDestinationSchemaRegistryClusterGetArgs.cs
@@ -0,0 +1,38 @@
+// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+using System;
+using System.Collections.Generic;
+using System.Collections.Immutable;
+using System.Threading.Tasks;
+using Pulumi.Serialization;
+
+namespace Pulumi.ConfluentCloud.Inputs
+{
+
+ public sealed class SchemaExporterDestinationSchemaRegistryClusterGetArgs : global::Pulumi.ResourceArgs
+ {
+ [Input("credentials", required: true)]
+ private Input? _credentials;
+ public Input? Credentials
+ {
+ get => _credentials;
+ set
+ {
+ var emptySecret = Output.CreateSecret(0);
+ _credentials = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1);
+ }
+ }
+
+ ///
+ /// The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).
+ ///
+ [Input("restEndpoint", required: true)]
+ public Input RestEndpoint { get; set; } = null!;
+
+ public SchemaExporterDestinationSchemaRegistryClusterGetArgs()
+ {
+ }
+ public static new SchemaExporterDestinationSchemaRegistryClusterGetArgs Empty => new SchemaExporterDestinationSchemaRegistryClusterGetArgs();
+ }
+}
diff --git a/sdk/dotnet/Inputs/SchemaExporterSchemaRegistryClusterArgs.cs b/sdk/dotnet/Inputs/SchemaExporterSchemaRegistryClusterArgs.cs
new file mode 100644
index 00000000..c294ccbe
--- /dev/null
+++ b/sdk/dotnet/Inputs/SchemaExporterSchemaRegistryClusterArgs.cs
@@ -0,0 +1,26 @@
+// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+using System;
+using System.Collections.Generic;
+using System.Collections.Immutable;
+using System.Threading.Tasks;
+using Pulumi.Serialization;
+
+namespace Pulumi.ConfluentCloud.Inputs
+{
+
+ public sealed class SchemaExporterSchemaRegistryClusterArgs : global::Pulumi.ResourceArgs
+ {
+ ///
+ /// The ID of the Schema Registry cluster, for example, `lsrc-abc123`.
+ ///
+ [Input("id", required: true)]
+ public Input Id { get; set; } = null!;
+
+ public SchemaExporterSchemaRegistryClusterArgs()
+ {
+ }
+ public static new SchemaExporterSchemaRegistryClusterArgs Empty => new SchemaExporterSchemaRegistryClusterArgs();
+ }
+}
diff --git a/sdk/dotnet/Inputs/SchemaExporterSchemaRegistryClusterGetArgs.cs b/sdk/dotnet/Inputs/SchemaExporterSchemaRegistryClusterGetArgs.cs
new file mode 100644
index 00000000..7a5a335c
--- /dev/null
+++ b/sdk/dotnet/Inputs/SchemaExporterSchemaRegistryClusterGetArgs.cs
@@ -0,0 +1,26 @@
+// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+using System;
+using System.Collections.Generic;
+using System.Collections.Immutable;
+using System.Threading.Tasks;
+using Pulumi.Serialization;
+
+namespace Pulumi.ConfluentCloud.Inputs
+{
+
+ public sealed class SchemaExporterSchemaRegistryClusterGetArgs : global::Pulumi.ResourceArgs
+ {
+ ///
+ /// The ID of the Schema Registry cluster, for example, `lsrc-abc123`.
+ ///
+ [Input("id", required: true)]
+ public Input Id { get; set; } = null!;
+
+ public SchemaExporterSchemaRegistryClusterGetArgs()
+ {
+ }
+ public static new SchemaExporterSchemaRegistryClusterGetArgs Empty => new SchemaExporterSchemaRegistryClusterGetArgs();
+ }
+}
diff --git a/sdk/dotnet/KafkaAcl.cs b/sdk/dotnet/KafkaAcl.cs
index d3053c34..f3eb7a0a 100644
--- a/sdk/dotnet/KafkaAcl.cs
+++ b/sdk/dotnet/KafkaAcl.cs
@@ -12,7 +12,7 @@ namespace Pulumi.ConfluentCloud
///
/// ## Import
///
- /// You can import Kafka ACLs by using the Kafka cluster ID and attributes of `confluent_kafka_acl` resource in the format `<Kafka cluster ID>/<Kafka ACL resource type>#<Kafka ACL resource name>#<Kafka ACL pattern type>#<Kafka ACL principal>#<Kafka ACL host>#<Kafka ACL operation>#<Kafka ACL permission>`, for exampleOption #1Manage multiple Kafka clusters in the same Terraform workspace $ export CONFLUENT_CLOUD_API_KEY="<cloud_api_key>" $ export CONFLUENT_CLOUD_API_SECRET="<cloud_api_secret>" $ export IMPORT_KAFKA_API_KEY="<kafka_api_key>" $ export IMPORT_KAFKA_API_SECRET="<kafka_api_secret>" $ export IMPORT_KAFKA_REST_ENDPOINT="<kafka_rest_endpoint>"
+ /// You can import Kafka ACLs by using the Kafka cluster ID and attributes of `confluent_kafka_acl` resource in the format `<Kafka cluster ID>/<Kafka ACL resource type>#<Kafka ACL resource name>#<Kafka ACL pattern type>#<Kafka ACL principal>#<Kafka ACL host>#<Kafka ACL operation>#<Kafka ACL permission>`, for exampleOption #1Manage multiple Kafka clusters in the same Terraform workspace $ export IMPORT_KAFKA_API_KEY="<kafka_api_key>" $ export IMPORT_KAFKA_API_SECRET="<kafka_api_secret>" $ export IMPORT_KAFKA_REST_ENDPOINT="<kafka_rest_endpoint>"
///
/// ```sh
/// $ pulumi import confluentcloud:index/kafkaAcl:KafkaAcl describe-cluster "lkc-12345/CLUSTER#kafka-cluster#LITERAL#User:sa-xyz123#*#DESCRIBE#ALLOW"
diff --git a/sdk/dotnet/Outputs/ClusterLinkDestinationKafkaClusterCredentials.cs b/sdk/dotnet/Outputs/ClusterLinkDestinationKafkaClusterCredentials.cs
index a872e338..d2a935ef 100644
--- a/sdk/dotnet/Outputs/ClusterLinkDestinationKafkaClusterCredentials.cs
+++ b/sdk/dotnet/Outputs/ClusterLinkDestinationKafkaClusterCredentials.cs
@@ -19,8 +19,6 @@ public sealed class ClusterLinkDestinationKafkaClusterCredentials
public readonly string Key;
///
/// The Kafka API Secret.
- ///
- /// > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
///
public readonly string Secret;
diff --git a/sdk/dotnet/Outputs/ClusterLinkLocalKafkaClusterCredentials.cs b/sdk/dotnet/Outputs/ClusterLinkLocalKafkaClusterCredentials.cs
index 50b7f0ee..4e3004eb 100644
--- a/sdk/dotnet/Outputs/ClusterLinkLocalKafkaClusterCredentials.cs
+++ b/sdk/dotnet/Outputs/ClusterLinkLocalKafkaClusterCredentials.cs
@@ -19,8 +19,6 @@ public sealed class ClusterLinkLocalKafkaClusterCredentials
public readonly string Key;
///
/// The Kafka API Secret.
- ///
- /// > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
///
public readonly string Secret;
diff --git a/sdk/dotnet/Outputs/ClusterLinkRemoteKafkaClusterCredentials.cs b/sdk/dotnet/Outputs/ClusterLinkRemoteKafkaClusterCredentials.cs
index d70a4872..78fa7fde 100644
--- a/sdk/dotnet/Outputs/ClusterLinkRemoteKafkaClusterCredentials.cs
+++ b/sdk/dotnet/Outputs/ClusterLinkRemoteKafkaClusterCredentials.cs
@@ -19,8 +19,6 @@ public sealed class ClusterLinkRemoteKafkaClusterCredentials
public readonly string Key;
///
/// The Kafka API Secret.
- ///
- /// > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
///
public readonly string Secret;
diff --git a/sdk/dotnet/Outputs/ClusterLinkSourceKafkaClusterCredentials.cs b/sdk/dotnet/Outputs/ClusterLinkSourceKafkaClusterCredentials.cs
index 01db8645..75bf982f 100644
--- a/sdk/dotnet/Outputs/ClusterLinkSourceKafkaClusterCredentials.cs
+++ b/sdk/dotnet/Outputs/ClusterLinkSourceKafkaClusterCredentials.cs
@@ -19,8 +19,6 @@ public sealed class ClusterLinkSourceKafkaClusterCredentials
public readonly string Key;
///
/// The Kafka API Secret.
- ///
- /// > **Note:** The `local_kafka_cluster`, `remote_kafka_cluster` configuration block and `link_mode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
///
public readonly string Secret;
diff --git a/sdk/dotnet/Outputs/FlinkComputePoolEnvironment.cs b/sdk/dotnet/Outputs/FlinkComputePoolEnvironment.cs
new file mode 100644
index 00000000..a6bee702
--- /dev/null
+++ b/sdk/dotnet/Outputs/FlinkComputePoolEnvironment.cs
@@ -0,0 +1,27 @@
+// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+using System;
+using System.Collections.Generic;
+using System.Collections.Immutable;
+using System.Threading.Tasks;
+using Pulumi.Serialization;
+
+namespace Pulumi.ConfluentCloud.Outputs
+{
+
+ [OutputType]
+ public sealed class FlinkComputePoolEnvironment
+ {
+ ///
+ /// The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`.
+ ///
+ public readonly string Id;
+
+ [OutputConstructor]
+ private FlinkComputePoolEnvironment(string id)
+ {
+ Id = id;
+ }
+ }
+}
diff --git a/sdk/dotnet/Outputs/GetFlinkComputePoolEnvironmentResult.cs b/sdk/dotnet/Outputs/GetFlinkComputePoolEnvironmentResult.cs
new file mode 100644
index 00000000..e73804d7
--- /dev/null
+++ b/sdk/dotnet/Outputs/GetFlinkComputePoolEnvironmentResult.cs
@@ -0,0 +1,29 @@
+// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+using System;
+using System.Collections.Generic;
+using System.Collections.Immutable;
+using System.Threading.Tasks;
+using Pulumi.Serialization;
+
+namespace Pulumi.ConfluentCloud.Outputs
+{
+
+ [OutputType]
+ public sealed class GetFlinkComputePoolEnvironmentResult
+ {
+ ///
+ /// The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`.
+ ///
+ /// > **Note:** Exactly one from the `id` and `display_name` attributes must be specified.
+ ///
+ public readonly string Id;
+
+ [OutputConstructor]
+ private GetFlinkComputePoolEnvironmentResult(string id)
+ {
+ Id = id;
+ }
+ }
+}
diff --git a/sdk/dotnet/Outputs/SchemaExporterCredentials.cs b/sdk/dotnet/Outputs/SchemaExporterCredentials.cs
new file mode 100644
index 00000000..942c8f8f
--- /dev/null
+++ b/sdk/dotnet/Outputs/SchemaExporterCredentials.cs
@@ -0,0 +1,35 @@
+// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+using System;
+using System.Collections.Generic;
+using System.Collections.Immutable;
+using System.Threading.Tasks;
+using Pulumi.Serialization;
+
+namespace Pulumi.ConfluentCloud.Outputs
+{
+
+ [OutputType]
+ public sealed class SchemaExporterCredentials
+ {
+ ///
+ /// The Schema Registry API Key.
+ ///
+ public readonly string Key;
+ ///
+ /// The Schema Registry API Secret.
+ ///
+ public readonly string Secret;
+
+ [OutputConstructor]
+ private SchemaExporterCredentials(
+ string key,
+
+ string secret)
+ {
+ Key = key;
+ Secret = secret;
+ }
+ }
+}
diff --git a/sdk/dotnet/Outputs/SchemaExporterDestinationSchemaRegistryCluster.cs b/sdk/dotnet/Outputs/SchemaExporterDestinationSchemaRegistryCluster.cs
new file mode 100644
index 00000000..d28f2a8b
--- /dev/null
+++ b/sdk/dotnet/Outputs/SchemaExporterDestinationSchemaRegistryCluster.cs
@@ -0,0 +1,32 @@
+// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+using System;
+using System.Collections.Generic;
+using System.Collections.Immutable;
+using System.Threading.Tasks;
+using Pulumi.Serialization;
+
+namespace Pulumi.ConfluentCloud.Outputs
+{
+
+ [OutputType]
+ public sealed class SchemaExporterDestinationSchemaRegistryCluster
+ {
+ public readonly Outputs.SchemaExporterDestinationSchemaRegistryClusterCredentials Credentials;
+ ///
+ /// The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).
+ ///
+ public readonly string RestEndpoint;
+
+ [OutputConstructor]
+ private SchemaExporterDestinationSchemaRegistryCluster(
+ Outputs.SchemaExporterDestinationSchemaRegistryClusterCredentials credentials,
+
+ string restEndpoint)
+ {
+ Credentials = credentials;
+ RestEndpoint = restEndpoint;
+ }
+ }
+}
diff --git a/sdk/dotnet/Outputs/SchemaExporterDestinationSchemaRegistryClusterCredentials.cs b/sdk/dotnet/Outputs/SchemaExporterDestinationSchemaRegistryClusterCredentials.cs
new file mode 100644
index 00000000..6059a9c0
--- /dev/null
+++ b/sdk/dotnet/Outputs/SchemaExporterDestinationSchemaRegistryClusterCredentials.cs
@@ -0,0 +1,35 @@
+// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+using System;
+using System.Collections.Generic;
+using System.Collections.Immutable;
+using System.Threading.Tasks;
+using Pulumi.Serialization;
+
+namespace Pulumi.ConfluentCloud.Outputs
+{
+
+ [OutputType]
+ public sealed class SchemaExporterDestinationSchemaRegistryClusterCredentials
+ {
+ ///
+ /// The Schema Registry API Key.
+ ///
+ public readonly string Key;
+ ///
+ /// The Schema Registry API Secret.
+ ///
+ public readonly string Secret;
+
+ [OutputConstructor]
+ private SchemaExporterDestinationSchemaRegistryClusterCredentials(
+ string key,
+
+ string secret)
+ {
+ Key = key;
+ Secret = secret;
+ }
+ }
+}
diff --git a/sdk/dotnet/Outputs/SchemaExporterSchemaRegistryCluster.cs b/sdk/dotnet/Outputs/SchemaExporterSchemaRegistryCluster.cs
new file mode 100644
index 00000000..a0a37543
--- /dev/null
+++ b/sdk/dotnet/Outputs/SchemaExporterSchemaRegistryCluster.cs
@@ -0,0 +1,27 @@
+// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+using System;
+using System.Collections.Generic;
+using System.Collections.Immutable;
+using System.Threading.Tasks;
+using Pulumi.Serialization;
+
+namespace Pulumi.ConfluentCloud.Outputs
+{
+
+ [OutputType]
+ public sealed class SchemaExporterSchemaRegistryCluster
+ {
+ ///
+ /// The ID of the Schema Registry cluster, for example, `lsrc-abc123`.
+ ///
+ public readonly string Id;
+
+ [OutputConstructor]
+ private SchemaExporterSchemaRegistryCluster(string id)
+ {
+ Id = id;
+ }
+ }
+}
diff --git a/sdk/dotnet/SchemaExporter.cs b/sdk/dotnet/SchemaExporter.cs
new file mode 100644
index 00000000..6ee14744
--- /dev/null
+++ b/sdk/dotnet/SchemaExporter.cs
@@ -0,0 +1,331 @@
+// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+using System;
+using System.Collections.Generic;
+using System.Collections.Immutable;
+using System.Threading.Tasks;
+using Pulumi.Serialization;
+
+namespace Pulumi.ConfluentCloud
+{
+ ///
+ /// ## Import
+ ///
+ /// You can import a Schema Exporter by using the Schema Registry cluster ID, Schema Exporter name in the format `<Schema Registry cluster ID>/<Schema Exporter name>`, for example$ export IMPORT_SCHEMA_REGISTRY_API_KEY="<schema_registry_api_key>" $ export IMPORT_SCHEMA_REGISTRY_API_SECRET="<schema_registry_api_secret>" $ export IMPORT_SCHEMA_REGISTRY_REST_ENDPOINT="<schema_registry_rest_endpoint>"
+ ///
+ /// ```sh
+ /// $ pulumi import confluentcloud:index/schemaExporter:SchemaExporter main lsrc-8wrx70/test-exporter
+ /// ```
+ ///
+ /// !> **Warning:** Do not forget to delete terminal command history afterwards for security purposes.
+ ///
+ [ConfluentCloudResourceType("confluentcloud:index/schemaExporter:SchemaExporter")]
+ public partial class SchemaExporter : global::Pulumi.CustomResource
+ {
+ ///
+ /// Block for custom *nonsensitive* configuration properties:
+ ///
+ [Output("config")]
+ public Output> Config { get; private set; } = null!;
+
+ ///
+ /// Customized context of the exporter if `context_type` is set to `CUSTOM`.
+ ///
+ [Output("context")]
+ public Output Context { get; private set; } = null!;
+
+ ///
+ /// Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`.
+ ///
+ [Output("contextType")]
+ public Output ContextType { get; private set; } = null!;
+
+ ///
+ /// The Cluster API Credentials.
+ ///
+ [Output("credentials")]
+ public Output Credentials { get; private set; } = null!;
+
+ [Output("destinationSchemaRegistryCluster")]
+ public Output DestinationSchemaRegistryCluster { get; private set; } = null!;
+
+ ///
+ /// The configuration setting name.
+ ///
+ [Output("name")]
+ public Output Name { get; private set; } = null!;
+
+ ///
+ /// The flag to control whether to reset the exporter when updating configs. Defaults to `false`.
+ ///
+ [Output("resetOnUpdate")]
+ public Output ResetOnUpdate { get; private set; } = null!;
+
+ ///
+ /// The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).
+ ///
+ [Output("restEndpoint")]
+ public Output RestEndpoint { get; private set; } = null!;
+
+ [Output("schemaRegistryCluster")]
+ public Output SchemaRegistryCluster { get; private set; } = null!;
+
+ ///
+ /// The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`.
+ ///
+ [Output("status")]
+ public Output Status { get; private set; } = null!;
+
+ ///
+ /// Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`.
+ ///
+ [Output("subjectRenameFormat")]
+ public Output SubjectRenameFormat { get; private set; } = null!;
+
+ ///
+ /// Name of each exporter subject.
+ ///
+ [Output("subjects")]
+ public Output> Subjects { get; private set; } = null!;
+
+
+ ///
+ /// Create a SchemaExporter resource with the given unique name, arguments, and options.
+ ///
+ ///
+ /// The unique name of the resource
+ /// The arguments used to populate this resource's properties
+ /// A bag of options that control this resource's behavior
+ public SchemaExporter(string name, SchemaExporterArgs args, CustomResourceOptions? options = null)
+ : base("confluentcloud:index/schemaExporter:SchemaExporter", name, args ?? new SchemaExporterArgs(), MakeResourceOptions(options, ""))
+ {
+ }
+
+ private SchemaExporter(string name, Input id, SchemaExporterState? state = null, CustomResourceOptions? options = null)
+ : base("confluentcloud:index/schemaExporter:SchemaExporter", name, state, MakeResourceOptions(options, id))
+ {
+ }
+
+ private static CustomResourceOptions MakeResourceOptions(CustomResourceOptions? options, Input? id)
+ {
+ var defaultOptions = new CustomResourceOptions
+ {
+ Version = Utilities.Version,
+ AdditionalSecretOutputs =
+ {
+ "credentials",
+ },
+ };
+ var merged = CustomResourceOptions.Merge(defaultOptions, options);
+ // Override the ID if one was specified for consistency with other language SDKs.
+ merged.Id = id ?? merged.Id;
+ return merged;
+ }
+ ///
+ /// Get an existing SchemaExporter resource's state with the given name, ID, and optional extra
+ /// properties used to qualify the lookup.
+ ///
+ ///
+ /// The unique name of the resulting resource.
+ /// The unique provider ID of the resource to lookup.
+ /// Any extra arguments used during the lookup.
+ /// A bag of options that control this resource's behavior
+ public static SchemaExporter Get(string name, Input id, SchemaExporterState? state = null, CustomResourceOptions? options = null)
+ {
+ return new SchemaExporter(name, id, state, options);
+ }
+ }
+
+ public sealed class SchemaExporterArgs : global::Pulumi.ResourceArgs
+ {
+ [Input("config")]
+ private InputMap? _config;
+
+ ///
+ /// Block for custom *nonsensitive* configuration properties:
+ ///
+ public InputMap Config
+ {
+ get => _config ?? (_config = new InputMap());
+ set => _config = value;
+ }
+
+ ///
+ /// Customized context of the exporter if `context_type` is set to `CUSTOM`.
+ ///
+ [Input("context")]
+ public Input? Context { get; set; }
+
+ ///
+ /// Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`.
+ ///
+ [Input("contextType")]
+ public Input? ContextType { get; set; }
+
+ [Input("credentials")]
+ private Input? _credentials;
+
+ ///
+ /// The Cluster API Credentials.
+ ///
+ public Input? Credentials
+ {
+ get => _credentials;
+ set
+ {
+ var emptySecret = Output.CreateSecret(0);
+ _credentials = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1);
+ }
+ }
+
+ [Input("destinationSchemaRegistryCluster", required: true)]
+ public Input DestinationSchemaRegistryCluster { get; set; } = null!;
+
+ ///
+ /// The configuration setting name.
+ ///
+ [Input("name")]
+ public Input? Name { get; set; }
+
+ ///
+ /// The flag to control whether to reset the exporter when updating configs. Defaults to `false`.
+ ///
+ [Input("resetOnUpdate")]
+ public Input? ResetOnUpdate { get; set; }
+
+ ///
+ /// The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).
+ ///
+ [Input("restEndpoint")]
+ public Input? RestEndpoint { get; set; }
+
+ [Input("schemaRegistryCluster")]
+ public Input? SchemaRegistryCluster { get; set; }
+
+ ///
+ /// The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`.
+ ///
+ [Input("status")]
+ public Input? Status { get; set; }
+
+ ///
+ /// Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`.
+ ///
+ [Input("subjectRenameFormat")]
+ public Input? SubjectRenameFormat { get; set; }
+
+ [Input("subjects")]
+ private InputList? _subjects;
+
+ ///
+ /// Name of each exporter subject.
+ ///
+ public InputList Subjects
+ {
+ get => _subjects ?? (_subjects = new InputList());
+ set => _subjects = value;
+ }
+
+ public SchemaExporterArgs()
+ {
+ }
+ public static new SchemaExporterArgs Empty => new SchemaExporterArgs();
+ }
+
+ public sealed class SchemaExporterState : global::Pulumi.ResourceArgs
+ {
+ [Input("config")]
+ private InputMap? _config;
+
+ ///
+ /// Block for custom *nonsensitive* configuration properties:
+ ///
+ public InputMap Config
+ {
+ get => _config ?? (_config = new InputMap());
+ set => _config = value;
+ }
+
+ ///
+ /// Customized context of the exporter if `context_type` is set to `CUSTOM`.
+ ///
+ [Input("context")]
+ public Input? Context { get; set; }
+
+ ///
+ /// Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`.
+ ///
+ [Input("contextType")]
+ public Input? ContextType { get; set; }
+
+ [Input("credentials")]
+ private Input? _credentials;
+
+ ///
+ /// The Cluster API Credentials.
+ ///
+ public Input? Credentials
+ {
+ get => _credentials;
+ set
+ {
+ var emptySecret = Output.CreateSecret(0);
+ _credentials = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1);
+ }
+ }
+
+ [Input("destinationSchemaRegistryCluster")]
+ public Input? DestinationSchemaRegistryCluster { get; set; }
+
+ ///
+ /// The configuration setting name.
+ ///
+ [Input("name")]
+ public Input? Name { get; set; }
+
+ ///
+ /// The flag to control whether to reset the exporter when updating configs. Defaults to `false`.
+ ///
+ [Input("resetOnUpdate")]
+ public Input? ResetOnUpdate { get; set; }
+
+ ///
+ /// The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).
+ ///
+ [Input("restEndpoint")]
+ public Input? RestEndpoint { get; set; }
+
+ [Input("schemaRegistryCluster")]
+ public Input? SchemaRegistryCluster { get; set; }
+
+ ///
+ /// The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`.
+ ///
+ [Input("status")]
+ public Input? Status { get; set; }
+
+ ///
+ /// Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dc_orders`.
+ ///
+ [Input("subjectRenameFormat")]
+ public Input? SubjectRenameFormat { get; set; }
+
+ [Input("subjects")]
+ private InputList? _subjects;
+
+ ///
+ /// Name of each exporter subject.
+ ///
+ public InputList Subjects
+ {
+ get => _subjects ?? (_subjects = new InputList());
+ set => _subjects = value;
+ }
+
+ public SchemaExporterState()
+ {
+ }
+ public static new SchemaExporterState Empty => new SchemaExporterState();
+ }
+}
diff --git a/sdk/go/confluentcloud/flinkComputePool.go b/sdk/go/confluentcloud/flinkComputePool.go
new file mode 100644
index 00000000..3f7bad18
--- /dev/null
+++ b/sdk/go/confluentcloud/flinkComputePool.go
@@ -0,0 +1,426 @@
+// Code generated by the Pulumi Terraform Bridge (tfgen) Tool DO NOT EDIT.
+// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! ***
+
+package confluentcloud
+
+import (
+ "context"
+ "reflect"
+
+ "errors"
+ "github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud/internal"
+ "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
+ "github.com/pulumi/pulumi/sdk/v3/go/pulumix"
+)
+
+// ## Example Usage
+//
+// ```go
+// package main
+//
+// import (
+//
+// "github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud"
+// "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
+//
+// )
+//
+// func main() {
+// pulumi.Run(func(ctx *pulumi.Context) error {
+// development, err := confluentcloud.NewEnvironment(ctx, "development", nil)
+// if err != nil {
+// return err
+// }
+// _, err = confluentcloud.NewFlinkComputePool(ctx, "main", &confluentcloud.FlinkComputePoolArgs{
+// DisplayName: pulumi.String("standard_compute_pool"),
+// Cloud: pulumi.String("AWS"),
+// Region: pulumi.String("us-east-1"),
+// MaxCfu: pulumi.Int(5),
+// Environment: &confluentcloud.FlinkComputePoolEnvironmentArgs{
+// Id: development.ID(),
+// },
+// })
+// if err != nil {
+// return err
+// }
+// return nil
+// })
+// }
+//
+// ```
+//
+// ## Import
+//
+// You can import a Flink Compute Pool by using Environment ID and Flink Compute Pool ID, in the format `/`. The following example shows how to import a Flink Compute Pool$ export CONFLUENT_CLOUD_API_KEY="" $ export CONFLUENT_CLOUD_API_SECRET=""
+//
+// ```sh
+//
+// $ pulumi import confluentcloud:index/flinkComputePool:FlinkComputePool main env-abc123/lfcp-abc123
+//
+// ```
+//
+// !> **Warning:** Do not forget to delete terminal command history afterwards for security purposes.
+type FlinkComputePool struct {
+ pulumi.CustomResourceState
+
+ // (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`.
+ ApiVersion pulumi.StringOutput `pulumi:"apiVersion"`
+ // The cloud service provider that runs the Flink Compute Pool.
+ Cloud pulumi.StringOutput `pulumi:"cloud"`
+ // (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool.
+ CurrentCfu pulumi.IntOutput `pulumi:"currentCfu"`
+ // The name of the Flink Compute Pool.
+ DisplayName pulumi.StringOutput `pulumi:"displayName"`
+ // Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
+ Environment FlinkComputePoolEnvironmentOutput `pulumi:"environment"`
+ // (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`.
+ Kind pulumi.StringOutput `pulumi:"kind"`
+ // Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`.
+ MaxCfu pulumi.IntOutput `pulumi:"maxCfu"`
+ // The cloud service provider region that hosts the Flink Compute Pool.
+ Region pulumi.StringOutput `pulumi:"region"`
+ // (Required String) The Confluent Resource Name of the Flink Compute Pool.
+ ResourceName pulumi.StringOutput `pulumi:"resourceName"`
+ // (Required String) The API endpoint of the Flink Compute Pool.
+ RestEndpoint pulumi.StringOutput `pulumi:"restEndpoint"`
+}
+
+// NewFlinkComputePool registers a new resource with the given unique name, arguments, and options.
+func NewFlinkComputePool(ctx *pulumi.Context,
+ name string, args *FlinkComputePoolArgs, opts ...pulumi.ResourceOption) (*FlinkComputePool, error) {
+ if args == nil {
+ return nil, errors.New("missing one or more required arguments")
+ }
+
+ if args.Cloud == nil {
+ return nil, errors.New("invalid value for required argument 'Cloud'")
+ }
+ if args.DisplayName == nil {
+ return nil, errors.New("invalid value for required argument 'DisplayName'")
+ }
+ if args.Environment == nil {
+ return nil, errors.New("invalid value for required argument 'Environment'")
+ }
+ if args.Region == nil {
+ return nil, errors.New("invalid value for required argument 'Region'")
+ }
+ opts = internal.PkgResourceDefaultOpts(opts)
+ var resource FlinkComputePool
+ err := ctx.RegisterResource("confluentcloud:index/flinkComputePool:FlinkComputePool", name, args, &resource, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return &resource, nil
+}
+
+// GetFlinkComputePool gets an existing FlinkComputePool resource's state with the given name, ID, and optional
+// state properties that are used to uniquely qualify the lookup (nil if not required).
+func GetFlinkComputePool(ctx *pulumi.Context,
+ name string, id pulumi.IDInput, state *FlinkComputePoolState, opts ...pulumi.ResourceOption) (*FlinkComputePool, error) {
+ var resource FlinkComputePool
+ err := ctx.ReadResource("confluentcloud:index/flinkComputePool:FlinkComputePool", name, id, state, &resource, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return &resource, nil
+}
+
+// Input properties used for looking up and filtering FlinkComputePool resources.
+type flinkComputePoolState struct {
+ // (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`.
+ ApiVersion *string `pulumi:"apiVersion"`
+ // The cloud service provider that runs the Flink Compute Pool.
+ Cloud *string `pulumi:"cloud"`
+ // (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool.
+ CurrentCfu *int `pulumi:"currentCfu"`
+ // The name of the Flink Compute Pool.
+ DisplayName *string `pulumi:"displayName"`
+ // Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
+ Environment *FlinkComputePoolEnvironment `pulumi:"environment"`
+ // (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`.
+ Kind *string `pulumi:"kind"`
+ // Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`.
+ MaxCfu *int `pulumi:"maxCfu"`
+ // The cloud service provider region that hosts the Flink Compute Pool.
+ Region *string `pulumi:"region"`
+ // (Required String) The Confluent Resource Name of the Flink Compute Pool.
+ ResourceName *string `pulumi:"resourceName"`
+ // (Required String) The API endpoint of the Flink Compute Pool.
+ RestEndpoint *string `pulumi:"restEndpoint"`
+}
+
+type FlinkComputePoolState struct {
+ // (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`.
+ ApiVersion pulumi.StringPtrInput
+ // The cloud service provider that runs the Flink Compute Pool.
+ Cloud pulumi.StringPtrInput
+ // (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool.
+ CurrentCfu pulumi.IntPtrInput
+ // The name of the Flink Compute Pool.
+ DisplayName pulumi.StringPtrInput
+ // Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
+ Environment FlinkComputePoolEnvironmentPtrInput
+ // (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`.
+ Kind pulumi.StringPtrInput
+ // Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`.
+ MaxCfu pulumi.IntPtrInput
+ // The cloud service provider region that hosts the Flink Compute Pool.
+ Region pulumi.StringPtrInput
+ // (Required String) The Confluent Resource Name of the Flink Compute Pool.
+ ResourceName pulumi.StringPtrInput
+ // (Required String) The API endpoint of the Flink Compute Pool.
+ RestEndpoint pulumi.StringPtrInput
+}
+
+func (FlinkComputePoolState) ElementType() reflect.Type {
+ return reflect.TypeOf((*flinkComputePoolState)(nil)).Elem()
+}
+
+type flinkComputePoolArgs struct {
+ // The cloud service provider that runs the Flink Compute Pool.
+ Cloud string `pulumi:"cloud"`
+ // The name of the Flink Compute Pool.
+ DisplayName string `pulumi:"displayName"`
+ // Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
+ Environment FlinkComputePoolEnvironment `pulumi:"environment"`
+ // Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`.
+ MaxCfu *int `pulumi:"maxCfu"`
+ // The cloud service provider region that hosts the Flink Compute Pool.
+ Region string `pulumi:"region"`
+}
+
+// The set of arguments for constructing a FlinkComputePool resource.
+type FlinkComputePoolArgs struct {
+ // The cloud service provider that runs the Flink Compute Pool.
+ Cloud pulumi.StringInput
+ // The name of the Flink Compute Pool.
+ DisplayName pulumi.StringInput
+ // Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
+ Environment FlinkComputePoolEnvironmentInput
+ // Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`.
+ MaxCfu pulumi.IntPtrInput
+ // The cloud service provider region that hosts the Flink Compute Pool.
+ Region pulumi.StringInput
+}
+
+func (FlinkComputePoolArgs) ElementType() reflect.Type {
+ return reflect.TypeOf((*flinkComputePoolArgs)(nil)).Elem()
+}
+
+type FlinkComputePoolInput interface {
+ pulumi.Input
+
+ ToFlinkComputePoolOutput() FlinkComputePoolOutput
+ ToFlinkComputePoolOutputWithContext(ctx context.Context) FlinkComputePoolOutput
+}
+
+func (*FlinkComputePool) ElementType() reflect.Type {
+ return reflect.TypeOf((**FlinkComputePool)(nil)).Elem()
+}
+
+func (i *FlinkComputePool) ToFlinkComputePoolOutput() FlinkComputePoolOutput {
+ return i.ToFlinkComputePoolOutputWithContext(context.Background())
+}
+
+func (i *FlinkComputePool) ToFlinkComputePoolOutputWithContext(ctx context.Context) FlinkComputePoolOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(FlinkComputePoolOutput)
+}
+
+func (i *FlinkComputePool) ToOutput(ctx context.Context) pulumix.Output[*FlinkComputePool] {
+ return pulumix.Output[*FlinkComputePool]{
+ OutputState: i.ToFlinkComputePoolOutputWithContext(ctx).OutputState,
+ }
+}
+
+// FlinkComputePoolArrayInput is an input type that accepts FlinkComputePoolArray and FlinkComputePoolArrayOutput values.
+// You can construct a concrete instance of `FlinkComputePoolArrayInput` via:
+//
+// FlinkComputePoolArray{ FlinkComputePoolArgs{...} }
+type FlinkComputePoolArrayInput interface {
+ pulumi.Input
+
+ ToFlinkComputePoolArrayOutput() FlinkComputePoolArrayOutput
+ ToFlinkComputePoolArrayOutputWithContext(context.Context) FlinkComputePoolArrayOutput
+}
+
+type FlinkComputePoolArray []FlinkComputePoolInput
+
+func (FlinkComputePoolArray) ElementType() reflect.Type {
+ return reflect.TypeOf((*[]*FlinkComputePool)(nil)).Elem()
+}
+
+func (i FlinkComputePoolArray) ToFlinkComputePoolArrayOutput() FlinkComputePoolArrayOutput {
+ return i.ToFlinkComputePoolArrayOutputWithContext(context.Background())
+}
+
+func (i FlinkComputePoolArray) ToFlinkComputePoolArrayOutputWithContext(ctx context.Context) FlinkComputePoolArrayOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(FlinkComputePoolArrayOutput)
+}
+
+func (i FlinkComputePoolArray) ToOutput(ctx context.Context) pulumix.Output[[]*FlinkComputePool] {
+ return pulumix.Output[[]*FlinkComputePool]{
+ OutputState: i.ToFlinkComputePoolArrayOutputWithContext(ctx).OutputState,
+ }
+}
+
+// FlinkComputePoolMapInput is an input type that accepts FlinkComputePoolMap and FlinkComputePoolMapOutput values.
+// You can construct a concrete instance of `FlinkComputePoolMapInput` via:
+//
+// FlinkComputePoolMap{ "key": FlinkComputePoolArgs{...} }
+type FlinkComputePoolMapInput interface {
+ pulumi.Input
+
+ ToFlinkComputePoolMapOutput() FlinkComputePoolMapOutput
+ ToFlinkComputePoolMapOutputWithContext(context.Context) FlinkComputePoolMapOutput
+}
+
+type FlinkComputePoolMap map[string]FlinkComputePoolInput
+
+func (FlinkComputePoolMap) ElementType() reflect.Type {
+ return reflect.TypeOf((*map[string]*FlinkComputePool)(nil)).Elem()
+}
+
+func (i FlinkComputePoolMap) ToFlinkComputePoolMapOutput() FlinkComputePoolMapOutput {
+ return i.ToFlinkComputePoolMapOutputWithContext(context.Background())
+}
+
+func (i FlinkComputePoolMap) ToFlinkComputePoolMapOutputWithContext(ctx context.Context) FlinkComputePoolMapOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(FlinkComputePoolMapOutput)
+}
+
+func (i FlinkComputePoolMap) ToOutput(ctx context.Context) pulumix.Output[map[string]*FlinkComputePool] {
+ return pulumix.Output[map[string]*FlinkComputePool]{
+ OutputState: i.ToFlinkComputePoolMapOutputWithContext(ctx).OutputState,
+ }
+}
+
+type FlinkComputePoolOutput struct{ *pulumi.OutputState }
+
+func (FlinkComputePoolOutput) ElementType() reflect.Type {
+ return reflect.TypeOf((**FlinkComputePool)(nil)).Elem()
+}
+
+func (o FlinkComputePoolOutput) ToFlinkComputePoolOutput() FlinkComputePoolOutput {
+ return o
+}
+
+func (o FlinkComputePoolOutput) ToFlinkComputePoolOutputWithContext(ctx context.Context) FlinkComputePoolOutput {
+ return o
+}
+
+func (o FlinkComputePoolOutput) ToOutput(ctx context.Context) pulumix.Output[*FlinkComputePool] {
+ return pulumix.Output[*FlinkComputePool]{
+ OutputState: o.OutputState,
+ }
+}
+
+// (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`.
+func (o FlinkComputePoolOutput) ApiVersion() pulumi.StringOutput {
+ return o.ApplyT(func(v *FlinkComputePool) pulumi.StringOutput { return v.ApiVersion }).(pulumi.StringOutput)
+}
+
+// The cloud service provider that runs the Flink Compute Pool.
+func (o FlinkComputePoolOutput) Cloud() pulumi.StringOutput {
+ return o.ApplyT(func(v *FlinkComputePool) pulumi.StringOutput { return v.Cloud }).(pulumi.StringOutput)
+}
+
+// (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool.
+func (o FlinkComputePoolOutput) CurrentCfu() pulumi.IntOutput {
+ return o.ApplyT(func(v *FlinkComputePool) pulumi.IntOutput { return v.CurrentCfu }).(pulumi.IntOutput)
+}
+
+// The name of the Flink Compute Pool.
+func (o FlinkComputePoolOutput) DisplayName() pulumi.StringOutput {
+ return o.ApplyT(func(v *FlinkComputePool) pulumi.StringOutput { return v.DisplayName }).(pulumi.StringOutput)
+}
+
+// Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
+func (o FlinkComputePoolOutput) Environment() FlinkComputePoolEnvironmentOutput {
+ return o.ApplyT(func(v *FlinkComputePool) FlinkComputePoolEnvironmentOutput { return v.Environment }).(FlinkComputePoolEnvironmentOutput)
+}
+
+// (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`.
+func (o FlinkComputePoolOutput) Kind() pulumi.StringOutput {
+ return o.ApplyT(func(v *FlinkComputePool) pulumi.StringOutput { return v.Kind }).(pulumi.StringOutput)
+}
+
+// Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`.
+func (o FlinkComputePoolOutput) MaxCfu() pulumi.IntOutput {
+ return o.ApplyT(func(v *FlinkComputePool) pulumi.IntOutput { return v.MaxCfu }).(pulumi.IntOutput)
+}
+
+// The cloud service provider region that hosts the Flink Compute Pool.
+func (o FlinkComputePoolOutput) Region() pulumi.StringOutput {
+ return o.ApplyT(func(v *FlinkComputePool) pulumi.StringOutput { return v.Region }).(pulumi.StringOutput)
+}
+
+// (Required String) The Confluent Resource Name of the Flink Compute Pool.
+func (o FlinkComputePoolOutput) ResourceName() pulumi.StringOutput {
+ return o.ApplyT(func(v *FlinkComputePool) pulumi.StringOutput { return v.ResourceName }).(pulumi.StringOutput)
+}
+
+// (Required String) The API endpoint of the Flink Compute Pool.
+func (o FlinkComputePoolOutput) RestEndpoint() pulumi.StringOutput {
+ return o.ApplyT(func(v *FlinkComputePool) pulumi.StringOutput { return v.RestEndpoint }).(pulumi.StringOutput)
+}
+
+type FlinkComputePoolArrayOutput struct{ *pulumi.OutputState }
+
+func (FlinkComputePoolArrayOutput) ElementType() reflect.Type {
+ return reflect.TypeOf((*[]*FlinkComputePool)(nil)).Elem()
+}
+
+func (o FlinkComputePoolArrayOutput) ToFlinkComputePoolArrayOutput() FlinkComputePoolArrayOutput {
+ return o
+}
+
+func (o FlinkComputePoolArrayOutput) ToFlinkComputePoolArrayOutputWithContext(ctx context.Context) FlinkComputePoolArrayOutput {
+ return o
+}
+
+func (o FlinkComputePoolArrayOutput) ToOutput(ctx context.Context) pulumix.Output[[]*FlinkComputePool] {
+ return pulumix.Output[[]*FlinkComputePool]{
+ OutputState: o.OutputState,
+ }
+}
+
+func (o FlinkComputePoolArrayOutput) Index(i pulumi.IntInput) FlinkComputePoolOutput {
+ return pulumi.All(o, i).ApplyT(func(vs []interface{}) *FlinkComputePool {
+ return vs[0].([]*FlinkComputePool)[vs[1].(int)]
+ }).(FlinkComputePoolOutput)
+}
+
+type FlinkComputePoolMapOutput struct{ *pulumi.OutputState }
+
+func (FlinkComputePoolMapOutput) ElementType() reflect.Type {
+ return reflect.TypeOf((*map[string]*FlinkComputePool)(nil)).Elem()
+}
+
+func (o FlinkComputePoolMapOutput) ToFlinkComputePoolMapOutput() FlinkComputePoolMapOutput {
+ return o
+}
+
+func (o FlinkComputePoolMapOutput) ToFlinkComputePoolMapOutputWithContext(ctx context.Context) FlinkComputePoolMapOutput {
+ return o
+}
+
+func (o FlinkComputePoolMapOutput) ToOutput(ctx context.Context) pulumix.Output[map[string]*FlinkComputePool] {
+ return pulumix.Output[map[string]*FlinkComputePool]{
+ OutputState: o.OutputState,
+ }
+}
+
+func (o FlinkComputePoolMapOutput) MapIndex(k pulumi.StringInput) FlinkComputePoolOutput {
+ return pulumi.All(o, k).ApplyT(func(vs []interface{}) *FlinkComputePool {
+ return vs[0].(map[string]*FlinkComputePool)[vs[1].(string)]
+ }).(FlinkComputePoolOutput)
+}
+
+func init() {
+ pulumi.RegisterInputType(reflect.TypeOf((*FlinkComputePoolInput)(nil)).Elem(), &FlinkComputePool{})
+ pulumi.RegisterInputType(reflect.TypeOf((*FlinkComputePoolArrayInput)(nil)).Elem(), FlinkComputePoolArray{})
+ pulumi.RegisterInputType(reflect.TypeOf((*FlinkComputePoolMapInput)(nil)).Elem(), FlinkComputePoolMap{})
+ pulumi.RegisterOutputType(FlinkComputePoolOutput{})
+ pulumi.RegisterOutputType(FlinkComputePoolArrayOutput{})
+ pulumi.RegisterOutputType(FlinkComputePoolMapOutput{})
+}
diff --git a/sdk/go/confluentcloud/getFlinkComputePool.go b/sdk/go/confluentcloud/getFlinkComputePool.go
new file mode 100644
index 00000000..40f8ed2f
--- /dev/null
+++ b/sdk/go/confluentcloud/getFlinkComputePool.go
@@ -0,0 +1,216 @@
+// Code generated by the Pulumi Terraform Bridge (tfgen) Tool DO NOT EDIT.
+// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! ***
+
+package confluentcloud
+
+import (
+ "context"
+ "reflect"
+
+ "github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud/internal"
+ "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
+ "github.com/pulumi/pulumi/sdk/v3/go/pulumix"
+)
+
+// [![Early Access](https://img.shields.io/badge/Lifecycle%20Stage-Early%20Access-%2300afba)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy)
+//
+// > **Note:** `FlinkComputePool` data source is available in **Early Access** for early adopters. Early Access features are introduced to gather customer feedback. This feature should be used only for evaluation and non-production testing purposes or to provide feedback to Confluent, particularly as it becomes more widely available in follow-on editions.\
+// **Early Access** features are intended for evaluation use in development and testing environments only, and not for production use. The warranty, SLA, and Support Services provisions of your agreement with Confluent do not apply to Early Access features. Early Access features are considered to be a Proof of Concept as defined in the Confluent Cloud Terms of Service. Confluent may discontinue providing early access releases of the Early Access features at any time in Confluent’s sole discretion.
+//
+// `FlinkComputePool` describes a Flink Compute Pool data source.
+//
+// ## Example Usage
+//
+// ```go
+// package main
+//
+// import (
+//
+// "github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud"
+// "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
+//
+// )
+//
+// func main() {
+// pulumi.Run(func(ctx *pulumi.Context) error {
+// exampleUsingIdFlinkComputePool, err := confluentcloud.LookupFlinkComputePool(ctx, &confluentcloud.LookupFlinkComputePoolArgs{
+// Id: pulumi.StringRef("lfcp-abc123"),
+// Environment: confluentcloud.GetFlinkComputePoolEnvironment{
+// Id: "env-xyz456",
+// },
+// }, nil)
+// if err != nil {
+// return err
+// }
+// ctx.Export("exampleUsingId", exampleUsingIdFlinkComputePool)
+// exampleUsingNameFlinkComputePool, err := confluentcloud.LookupFlinkComputePool(ctx, &confluentcloud.LookupFlinkComputePoolArgs{
+// DisplayName: pulumi.StringRef("my_compute_pool"),
+// Environment: confluentcloud.GetFlinkComputePoolEnvironment{
+// Id: "env-xyz456",
+// },
+// }, nil)
+// if err != nil {
+// return err
+// }
+// ctx.Export("exampleUsingName", exampleUsingNameFlinkComputePool)
+// return nil
+// })
+// }
+//
+// ```
+func LookupFlinkComputePool(ctx *pulumi.Context, args *LookupFlinkComputePoolArgs, opts ...pulumi.InvokeOption) (*LookupFlinkComputePoolResult, error) {
+ opts = internal.PkgInvokeDefaultOpts(opts)
+ var rv LookupFlinkComputePoolResult
+ err := ctx.Invoke("confluentcloud:index/getFlinkComputePool:getFlinkComputePool", args, &rv, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return &rv, nil
+}
+
+// A collection of arguments for invoking getFlinkComputePool.
+type LookupFlinkComputePoolArgs struct {
+ // A human-readable name for the Flink Compute Pool.
+ DisplayName *string `pulumi:"displayName"`
+ // (Required Configuration Block) supports the following:
+ Environment GetFlinkComputePoolEnvironment `pulumi:"environment"`
+ // The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`.
+ //
+ // > **Note:** Exactly one from the `id` and `displayName` attributes must be specified.
+ Id *string `pulumi:"id"`
+}
+
+// A collection of values returned by getFlinkComputePool.
+type LookupFlinkComputePoolResult struct {
+ // (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`.
+ ApiVersion string `pulumi:"apiVersion"`
+ // (Required String) The cloud service provider that runs the Flink Compute Pool.
+ Cloud string `pulumi:"cloud"`
+ // (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool.
+ CurrentCfu int `pulumi:"currentCfu"`
+ // (Required String) The name of the Flink Compute Pool.
+ DisplayName string `pulumi:"displayName"`
+ // (Required Configuration Block) supports the following:
+ Environment GetFlinkComputePoolEnvironment `pulumi:"environment"`
+ // (Required String) The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`.
+ Id string `pulumi:"id"`
+ // (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`.
+ Kind string `pulumi:"kind"`
+ // (Required Integer) Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to.
+ MaxCfu int `pulumi:"maxCfu"`
+ // (Required String) The cloud service provider region that hosts the Flink Compute Pool.
+ Region string `pulumi:"region"`
+ // (Required String) The Confluent Resource Name of the Flink Compute Pool.
+ ResourceName string `pulumi:"resourceName"`
+ // (Required String) The API endpoint of the Flink Compute Pool.
+ RestEndpoint string `pulumi:"restEndpoint"`
+}
+
+func LookupFlinkComputePoolOutput(ctx *pulumi.Context, args LookupFlinkComputePoolOutputArgs, opts ...pulumi.InvokeOption) LookupFlinkComputePoolResultOutput {
+ return pulumi.ToOutputWithContext(context.Background(), args).
+ ApplyT(func(v interface{}) (LookupFlinkComputePoolResult, error) {
+ args := v.(LookupFlinkComputePoolArgs)
+ r, err := LookupFlinkComputePool(ctx, &args, opts...)
+ var s LookupFlinkComputePoolResult
+ if r != nil {
+ s = *r
+ }
+ return s, err
+ }).(LookupFlinkComputePoolResultOutput)
+}
+
+// A collection of arguments for invoking getFlinkComputePool.
+type LookupFlinkComputePoolOutputArgs struct {
+ // A human-readable name for the Flink Compute Pool.
+ DisplayName pulumi.StringPtrInput `pulumi:"displayName"`
+ // (Required Configuration Block) supports the following:
+ Environment GetFlinkComputePoolEnvironmentInput `pulumi:"environment"`
+ // The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`.
+ //
+ // > **Note:** Exactly one from the `id` and `displayName` attributes must be specified.
+ Id pulumi.StringPtrInput `pulumi:"id"`
+}
+
+func (LookupFlinkComputePoolOutputArgs) ElementType() reflect.Type {
+ return reflect.TypeOf((*LookupFlinkComputePoolArgs)(nil)).Elem()
+}
+
+// A collection of values returned by getFlinkComputePool.
+type LookupFlinkComputePoolResultOutput struct{ *pulumi.OutputState }
+
+func (LookupFlinkComputePoolResultOutput) ElementType() reflect.Type {
+ return reflect.TypeOf((*LookupFlinkComputePoolResult)(nil)).Elem()
+}
+
+func (o LookupFlinkComputePoolResultOutput) ToLookupFlinkComputePoolResultOutput() LookupFlinkComputePoolResultOutput {
+ return o
+}
+
+func (o LookupFlinkComputePoolResultOutput) ToLookupFlinkComputePoolResultOutputWithContext(ctx context.Context) LookupFlinkComputePoolResultOutput {
+ return o
+}
+
+func (o LookupFlinkComputePoolResultOutput) ToOutput(ctx context.Context) pulumix.Output[LookupFlinkComputePoolResult] {
+ return pulumix.Output[LookupFlinkComputePoolResult]{
+ OutputState: o.OutputState,
+ }
+}
+
+// (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`.
+func (o LookupFlinkComputePoolResultOutput) ApiVersion() pulumi.StringOutput {
+ return o.ApplyT(func(v LookupFlinkComputePoolResult) string { return v.ApiVersion }).(pulumi.StringOutput)
+}
+
+// (Required String) The cloud service provider that runs the Flink Compute Pool.
+func (o LookupFlinkComputePoolResultOutput) Cloud() pulumi.StringOutput {
+ return o.ApplyT(func(v LookupFlinkComputePoolResult) string { return v.Cloud }).(pulumi.StringOutput)
+}
+
+// (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool.
+func (o LookupFlinkComputePoolResultOutput) CurrentCfu() pulumi.IntOutput {
+ return o.ApplyT(func(v LookupFlinkComputePoolResult) int { return v.CurrentCfu }).(pulumi.IntOutput)
+}
+
+// (Required String) The name of the Flink Compute Pool.
+func (o LookupFlinkComputePoolResultOutput) DisplayName() pulumi.StringOutput {
+ return o.ApplyT(func(v LookupFlinkComputePoolResult) string { return v.DisplayName }).(pulumi.StringOutput)
+}
+
+// (Required Configuration Block) supports the following:
+func (o LookupFlinkComputePoolResultOutput) Environment() GetFlinkComputePoolEnvironmentOutput {
+ return o.ApplyT(func(v LookupFlinkComputePoolResult) GetFlinkComputePoolEnvironment { return v.Environment }).(GetFlinkComputePoolEnvironmentOutput)
+}
+
+// (Required String) The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`.
+func (o LookupFlinkComputePoolResultOutput) Id() pulumi.StringOutput {
+ return o.ApplyT(func(v LookupFlinkComputePoolResult) string { return v.Id }).(pulumi.StringOutput)
+}
+
+// (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`.
+func (o LookupFlinkComputePoolResultOutput) Kind() pulumi.StringOutput {
+ return o.ApplyT(func(v LookupFlinkComputePoolResult) string { return v.Kind }).(pulumi.StringOutput)
+}
+
+// (Required Integer) Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to.
+func (o LookupFlinkComputePoolResultOutput) MaxCfu() pulumi.IntOutput {
+ return o.ApplyT(func(v LookupFlinkComputePoolResult) int { return v.MaxCfu }).(pulumi.IntOutput)
+}
+
+// (Required String) The cloud service provider region that hosts the Flink Compute Pool.
+func (o LookupFlinkComputePoolResultOutput) Region() pulumi.StringOutput {
+ return o.ApplyT(func(v LookupFlinkComputePoolResult) string { return v.Region }).(pulumi.StringOutput)
+}
+
+// (Required String) The Confluent Resource Name of the Flink Compute Pool.
+func (o LookupFlinkComputePoolResultOutput) ResourceName() pulumi.StringOutput {
+ return o.ApplyT(func(v LookupFlinkComputePoolResult) string { return v.ResourceName }).(pulumi.StringOutput)
+}
+
+// (Required String) The API endpoint of the Flink Compute Pool.
+func (o LookupFlinkComputePoolResultOutput) RestEndpoint() pulumi.StringOutput {
+ return o.ApplyT(func(v LookupFlinkComputePoolResult) string { return v.RestEndpoint }).(pulumi.StringOutput)
+}
+
+func init() {
+ pulumi.RegisterOutputType(LookupFlinkComputePoolResultOutput{})
+}
diff --git a/sdk/go/confluentcloud/getRoleBinding.go b/sdk/go/confluentcloud/getRoleBinding.go
index fec23d2b..537355b6 100644
--- a/sdk/go/confluentcloud/getRoleBinding.go
+++ b/sdk/go/confluentcloud/getRoleBinding.go
@@ -16,6 +16,8 @@ import (
//
// `RoleBinding` describes a Role Binding.
//
+// > **Note:** For more information on the Role Bindings, see [Predefined RBAC roles in Confluent Cloud](https://docs.confluent.io/cloud/current/access-management/access-control/rbac/predefined-rbac-roles.html).
+//
// ## Example Usage
//
// ```go
diff --git a/sdk/go/confluentcloud/init.go b/sdk/go/confluentcloud/init.go
index f5401572..c766e361 100644
--- a/sdk/go/confluentcloud/init.go
+++ b/sdk/go/confluentcloud/init.go
@@ -35,6 +35,8 @@ func (m *module) Construct(ctx *pulumi.Context, name, typ, urn string) (r pulumi
r = &Connector{}
case "confluentcloud:index/environment:Environment":
r = &Environment{}
+ case "confluentcloud:index/flinkComputePool:FlinkComputePool":
+ r = &FlinkComputePool{}
case "confluentcloud:index/identityPool:IdentityPool":
r = &IdentityPool{}
case "confluentcloud:index/identityProvider:IdentityProvider":
@@ -73,6 +75,8 @@ func (m *module) Construct(ctx *pulumi.Context, name, typ, urn string) (r pulumi
r = &RoleBinding{}
case "confluentcloud:index/schema:Schema":
r = &Schema{}
+ case "confluentcloud:index/schemaExporter:SchemaExporter":
+ r = &SchemaExporter{}
case "confluentcloud:index/schemaRegistryCluster:SchemaRegistryCluster":
r = &SchemaRegistryCluster{}
case "confluentcloud:index/schemaRegistryClusterConfig:SchemaRegistryClusterConfig":
@@ -159,6 +163,11 @@ func init() {
"index/environment",
&module{version},
)
+ pulumi.RegisterResourceModule(
+ "confluentcloud",
+ "index/flinkComputePool",
+ &module{version},
+ )
pulumi.RegisterResourceModule(
"confluentcloud",
"index/identityPool",
@@ -254,6 +263,11 @@ func init() {
"index/schema",
&module{version},
)
+ pulumi.RegisterResourceModule(
+ "confluentcloud",
+ "index/schemaExporter",
+ &module{version},
+ )
pulumi.RegisterResourceModule(
"confluentcloud",
"index/schemaRegistryCluster",
diff --git a/sdk/go/confluentcloud/kafkaAcl.go b/sdk/go/confluentcloud/kafkaAcl.go
index 96216927..3db16ab8 100644
--- a/sdk/go/confluentcloud/kafkaAcl.go
+++ b/sdk/go/confluentcloud/kafkaAcl.go
@@ -15,7 +15,7 @@ import (
// ## Import
//
-// You can import Kafka ACLs by using the Kafka cluster ID and attributes of `confluent_kafka_acl` resource in the format `/######`, for exampleOption #1Manage multiple Kafka clusters in the same Terraform workspace $ export CONFLUENT_CLOUD_API_KEY="" $ export CONFLUENT_CLOUD_API_SECRET="" $ export IMPORT_KAFKA_API_KEY="" $ export IMPORT_KAFKA_API_SECRET="" $ export IMPORT_KAFKA_REST_ENDPOINT=""
+// You can import Kafka ACLs by using the Kafka cluster ID and attributes of `confluent_kafka_acl` resource in the format `/######`, for exampleOption #1Manage multiple Kafka clusters in the same Terraform workspace $ export IMPORT_KAFKA_API_KEY="" $ export IMPORT_KAFKA_API_SECRET="" $ export IMPORT_KAFKA_REST_ENDPOINT=""
//
// ```sh
//
diff --git a/sdk/go/confluentcloud/pulumiTypes.go b/sdk/go/confluentcloud/pulumiTypes.go
index 82ed34b3..c567d414 100644
--- a/sdk/go/confluentcloud/pulumiTypes.go
+++ b/sdk/go/confluentcloud/pulumiTypes.go
@@ -2050,8 +2050,6 @@ type ClusterLinkDestinationKafkaClusterCredentials struct {
// The Kafka API Key.
Key string `pulumi:"key"`
// The Kafka API Secret.
- //
- // > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
Secret string `pulumi:"secret"`
}
@@ -2070,8 +2068,6 @@ type ClusterLinkDestinationKafkaClusterCredentialsArgs struct {
// The Kafka API Key.
Key pulumi.StringInput `pulumi:"key"`
// The Kafka API Secret.
- //
- // > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
Secret pulumi.StringInput `pulumi:"secret"`
}
@@ -2176,8 +2172,6 @@ func (o ClusterLinkDestinationKafkaClusterCredentialsOutput) Key() pulumi.String
}
// The Kafka API Secret.
-//
-// > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
func (o ClusterLinkDestinationKafkaClusterCredentialsOutput) Secret() pulumi.StringOutput {
return o.ApplyT(func(v ClusterLinkDestinationKafkaClusterCredentials) string { return v.Secret }).(pulumi.StringOutput)
}
@@ -2223,8 +2217,6 @@ func (o ClusterLinkDestinationKafkaClusterCredentialsPtrOutput) Key() pulumi.Str
}
// The Kafka API Secret.
-//
-// > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
func (o ClusterLinkDestinationKafkaClusterCredentialsPtrOutput) Secret() pulumi.StringPtrOutput {
return o.ApplyT(func(v *ClusterLinkDestinationKafkaClusterCredentials) *string {
if v == nil {
@@ -2452,8 +2444,6 @@ type ClusterLinkLocalKafkaClusterCredentials struct {
// The Kafka API Key.
Key string `pulumi:"key"`
// The Kafka API Secret.
- //
- // > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
Secret string `pulumi:"secret"`
}
@@ -2472,8 +2462,6 @@ type ClusterLinkLocalKafkaClusterCredentialsArgs struct {
// The Kafka API Key.
Key pulumi.StringInput `pulumi:"key"`
// The Kafka API Secret.
- //
- // > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
Secret pulumi.StringInput `pulumi:"secret"`
}
@@ -2578,8 +2566,6 @@ func (o ClusterLinkLocalKafkaClusterCredentialsOutput) Key() pulumi.StringOutput
}
// The Kafka API Secret.
-//
-// > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
func (o ClusterLinkLocalKafkaClusterCredentialsOutput) Secret() pulumi.StringOutput {
return o.ApplyT(func(v ClusterLinkLocalKafkaClusterCredentials) string { return v.Secret }).(pulumi.StringOutput)
}
@@ -2625,8 +2611,6 @@ func (o ClusterLinkLocalKafkaClusterCredentialsPtrOutput) Key() pulumi.StringPtr
}
// The Kafka API Secret.
-//
-// > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
func (o ClusterLinkLocalKafkaClusterCredentialsPtrOutput) Secret() pulumi.StringPtrOutput {
return o.ApplyT(func(v *ClusterLinkLocalKafkaClusterCredentials) *string {
if v == nil {
@@ -2854,8 +2838,6 @@ type ClusterLinkRemoteKafkaClusterCredentials struct {
// The Kafka API Key.
Key string `pulumi:"key"`
// The Kafka API Secret.
- //
- // > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
Secret string `pulumi:"secret"`
}
@@ -2874,8 +2856,6 @@ type ClusterLinkRemoteKafkaClusterCredentialsArgs struct {
// The Kafka API Key.
Key pulumi.StringInput `pulumi:"key"`
// The Kafka API Secret.
- //
- // > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
Secret pulumi.StringInput `pulumi:"secret"`
}
@@ -2980,8 +2960,6 @@ func (o ClusterLinkRemoteKafkaClusterCredentialsOutput) Key() pulumi.StringOutpu
}
// The Kafka API Secret.
-//
-// > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
func (o ClusterLinkRemoteKafkaClusterCredentialsOutput) Secret() pulumi.StringOutput {
return o.ApplyT(func(v ClusterLinkRemoteKafkaClusterCredentials) string { return v.Secret }).(pulumi.StringOutput)
}
@@ -3027,8 +3005,6 @@ func (o ClusterLinkRemoteKafkaClusterCredentialsPtrOutput) Key() pulumi.StringPt
}
// The Kafka API Secret.
-//
-// > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
func (o ClusterLinkRemoteKafkaClusterCredentialsPtrOutput) Secret() pulumi.StringPtrOutput {
return o.ApplyT(func(v *ClusterLinkRemoteKafkaClusterCredentials) *string {
if v == nil {
@@ -3256,8 +3232,6 @@ type ClusterLinkSourceKafkaClusterCredentials struct {
// The Kafka API Key.
Key string `pulumi:"key"`
// The Kafka API Secret.
- //
- // > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
Secret string `pulumi:"secret"`
}
@@ -3276,8 +3250,6 @@ type ClusterLinkSourceKafkaClusterCredentialsArgs struct {
// The Kafka API Key.
Key pulumi.StringInput `pulumi:"key"`
// The Kafka API Secret.
- //
- // > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
Secret pulumi.StringInput `pulumi:"secret"`
}
@@ -3382,8 +3354,6 @@ func (o ClusterLinkSourceKafkaClusterCredentialsOutput) Key() pulumi.StringOutpu
}
// The Kafka API Secret.
-//
-// > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
func (o ClusterLinkSourceKafkaClusterCredentialsOutput) Secret() pulumi.StringOutput {
return o.ApplyT(func(v ClusterLinkSourceKafkaClusterCredentials) string { return v.Secret }).(pulumi.StringOutput)
}
@@ -3429,8 +3399,6 @@ func (o ClusterLinkSourceKafkaClusterCredentialsPtrOutput) Key() pulumi.StringPt
}
// The Kafka API Secret.
-//
-// > **Note:** The `localKafkaCluster`, `remoteKafkaCluster` configuration block and `linkMode = BIDIRECTIONAL` are in a [Preview lifecycle stage](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy).
func (o ClusterLinkSourceKafkaClusterCredentialsPtrOutput) Secret() pulumi.StringPtrOutput {
return o.ApplyT(func(v *ClusterLinkSourceKafkaClusterCredentials) *string {
if v == nil {
@@ -3762,6 +3730,167 @@ func (o ConnectorKafkaClusterPtrOutput) Id() pulumi.StringPtrOutput {
}).(pulumi.StringPtrOutput)
}
+type FlinkComputePoolEnvironment struct {
+ // The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`.
+ Id string `pulumi:"id"`
+}
+
+// FlinkComputePoolEnvironmentInput is an input type that accepts FlinkComputePoolEnvironmentArgs and FlinkComputePoolEnvironmentOutput values.
+// You can construct a concrete instance of `FlinkComputePoolEnvironmentInput` via:
+//
+// FlinkComputePoolEnvironmentArgs{...}
+type FlinkComputePoolEnvironmentInput interface {
+ pulumi.Input
+
+ ToFlinkComputePoolEnvironmentOutput() FlinkComputePoolEnvironmentOutput
+ ToFlinkComputePoolEnvironmentOutputWithContext(context.Context) FlinkComputePoolEnvironmentOutput
+}
+
+type FlinkComputePoolEnvironmentArgs struct {
+ // The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`.
+ Id pulumi.StringInput `pulumi:"id"`
+}
+
+func (FlinkComputePoolEnvironmentArgs) ElementType() reflect.Type {
+ return reflect.TypeOf((*FlinkComputePoolEnvironment)(nil)).Elem()
+}
+
+func (i FlinkComputePoolEnvironmentArgs) ToFlinkComputePoolEnvironmentOutput() FlinkComputePoolEnvironmentOutput {
+ return i.ToFlinkComputePoolEnvironmentOutputWithContext(context.Background())
+}
+
+func (i FlinkComputePoolEnvironmentArgs) ToFlinkComputePoolEnvironmentOutputWithContext(ctx context.Context) FlinkComputePoolEnvironmentOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(FlinkComputePoolEnvironmentOutput)
+}
+
+func (i FlinkComputePoolEnvironmentArgs) ToOutput(ctx context.Context) pulumix.Output[FlinkComputePoolEnvironment] {
+ return pulumix.Output[FlinkComputePoolEnvironment]{
+ OutputState: i.ToFlinkComputePoolEnvironmentOutputWithContext(ctx).OutputState,
+ }
+}
+
+func (i FlinkComputePoolEnvironmentArgs) ToFlinkComputePoolEnvironmentPtrOutput() FlinkComputePoolEnvironmentPtrOutput {
+ return i.ToFlinkComputePoolEnvironmentPtrOutputWithContext(context.Background())
+}
+
+func (i FlinkComputePoolEnvironmentArgs) ToFlinkComputePoolEnvironmentPtrOutputWithContext(ctx context.Context) FlinkComputePoolEnvironmentPtrOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(FlinkComputePoolEnvironmentOutput).ToFlinkComputePoolEnvironmentPtrOutputWithContext(ctx)
+}
+
+// FlinkComputePoolEnvironmentPtrInput is an input type that accepts FlinkComputePoolEnvironmentArgs, FlinkComputePoolEnvironmentPtr and FlinkComputePoolEnvironmentPtrOutput values.
+// You can construct a concrete instance of `FlinkComputePoolEnvironmentPtrInput` via:
+//
+// FlinkComputePoolEnvironmentArgs{...}
+//
+// or:
+//
+// nil
+type FlinkComputePoolEnvironmentPtrInput interface {
+ pulumi.Input
+
+ ToFlinkComputePoolEnvironmentPtrOutput() FlinkComputePoolEnvironmentPtrOutput
+ ToFlinkComputePoolEnvironmentPtrOutputWithContext(context.Context) FlinkComputePoolEnvironmentPtrOutput
+}
+
+type flinkComputePoolEnvironmentPtrType FlinkComputePoolEnvironmentArgs
+
+func FlinkComputePoolEnvironmentPtr(v *FlinkComputePoolEnvironmentArgs) FlinkComputePoolEnvironmentPtrInput {
+ return (*flinkComputePoolEnvironmentPtrType)(v)
+}
+
+func (*flinkComputePoolEnvironmentPtrType) ElementType() reflect.Type {
+ return reflect.TypeOf((**FlinkComputePoolEnvironment)(nil)).Elem()
+}
+
+func (i *flinkComputePoolEnvironmentPtrType) ToFlinkComputePoolEnvironmentPtrOutput() FlinkComputePoolEnvironmentPtrOutput {
+ return i.ToFlinkComputePoolEnvironmentPtrOutputWithContext(context.Background())
+}
+
+func (i *flinkComputePoolEnvironmentPtrType) ToFlinkComputePoolEnvironmentPtrOutputWithContext(ctx context.Context) FlinkComputePoolEnvironmentPtrOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(FlinkComputePoolEnvironmentPtrOutput)
+}
+
+func (i *flinkComputePoolEnvironmentPtrType) ToOutput(ctx context.Context) pulumix.Output[*FlinkComputePoolEnvironment] {
+ return pulumix.Output[*FlinkComputePoolEnvironment]{
+ OutputState: i.ToFlinkComputePoolEnvironmentPtrOutputWithContext(ctx).OutputState,
+ }
+}
+
+type FlinkComputePoolEnvironmentOutput struct{ *pulumi.OutputState }
+
+func (FlinkComputePoolEnvironmentOutput) ElementType() reflect.Type {
+ return reflect.TypeOf((*FlinkComputePoolEnvironment)(nil)).Elem()
+}
+
+func (o FlinkComputePoolEnvironmentOutput) ToFlinkComputePoolEnvironmentOutput() FlinkComputePoolEnvironmentOutput {
+ return o
+}
+
+func (o FlinkComputePoolEnvironmentOutput) ToFlinkComputePoolEnvironmentOutputWithContext(ctx context.Context) FlinkComputePoolEnvironmentOutput {
+ return o
+}
+
+func (o FlinkComputePoolEnvironmentOutput) ToFlinkComputePoolEnvironmentPtrOutput() FlinkComputePoolEnvironmentPtrOutput {
+ return o.ToFlinkComputePoolEnvironmentPtrOutputWithContext(context.Background())
+}
+
+func (o FlinkComputePoolEnvironmentOutput) ToFlinkComputePoolEnvironmentPtrOutputWithContext(ctx context.Context) FlinkComputePoolEnvironmentPtrOutput {
+ return o.ApplyTWithContext(ctx, func(_ context.Context, v FlinkComputePoolEnvironment) *FlinkComputePoolEnvironment {
+ return &v
+ }).(FlinkComputePoolEnvironmentPtrOutput)
+}
+
+func (o FlinkComputePoolEnvironmentOutput) ToOutput(ctx context.Context) pulumix.Output[FlinkComputePoolEnvironment] {
+ return pulumix.Output[FlinkComputePoolEnvironment]{
+ OutputState: o.OutputState,
+ }
+}
+
+// The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`.
+func (o FlinkComputePoolEnvironmentOutput) Id() pulumi.StringOutput {
+ return o.ApplyT(func(v FlinkComputePoolEnvironment) string { return v.Id }).(pulumi.StringOutput)
+}
+
+type FlinkComputePoolEnvironmentPtrOutput struct{ *pulumi.OutputState }
+
+func (FlinkComputePoolEnvironmentPtrOutput) ElementType() reflect.Type {
+ return reflect.TypeOf((**FlinkComputePoolEnvironment)(nil)).Elem()
+}
+
+func (o FlinkComputePoolEnvironmentPtrOutput) ToFlinkComputePoolEnvironmentPtrOutput() FlinkComputePoolEnvironmentPtrOutput {
+ return o
+}
+
+func (o FlinkComputePoolEnvironmentPtrOutput) ToFlinkComputePoolEnvironmentPtrOutputWithContext(ctx context.Context) FlinkComputePoolEnvironmentPtrOutput {
+ return o
+}
+
+func (o FlinkComputePoolEnvironmentPtrOutput) ToOutput(ctx context.Context) pulumix.Output[*FlinkComputePoolEnvironment] {
+ return pulumix.Output[*FlinkComputePoolEnvironment]{
+ OutputState: o.OutputState,
+ }
+}
+
+func (o FlinkComputePoolEnvironmentPtrOutput) Elem() FlinkComputePoolEnvironmentOutput {
+ return o.ApplyT(func(v *FlinkComputePoolEnvironment) FlinkComputePoolEnvironment {
+ if v != nil {
+ return *v
+ }
+ var ret FlinkComputePoolEnvironment
+ return ret
+ }).(FlinkComputePoolEnvironmentOutput)
+}
+
+// The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-abc123`.
+func (o FlinkComputePoolEnvironmentPtrOutput) Id() pulumi.StringPtrOutput {
+ return o.ApplyT(func(v *FlinkComputePoolEnvironment) *string {
+ if v == nil {
+ return nil
+ }
+ return &v.Id
+ }).(pulumi.StringPtrOutput)
+}
+
type IdentityPoolIdentityProvider struct {
// The ID of the Identity Provider associated with the Identity Pool, for example, `op-abc123`.
Id string `pulumi:"id"`
@@ -13174,166 +13303,169 @@ func (o SchemaCredentialsPtrOutput) Secret() pulumi.StringPtrOutput {
}).(pulumi.StringPtrOutput)
}
-type SchemaRegistryClusterConfigCredentials struct {
+type SchemaExporterCredentials struct {
// The Schema Registry API Key.
- Key string `pulumi:"key"`
+ Key string `pulumi:"key"`
+ // The Schema Registry API Secret.
Secret string `pulumi:"secret"`
}
-// SchemaRegistryClusterConfigCredentialsInput is an input type that accepts SchemaRegistryClusterConfigCredentialsArgs and SchemaRegistryClusterConfigCredentialsOutput values.
-// You can construct a concrete instance of `SchemaRegistryClusterConfigCredentialsInput` via:
+// SchemaExporterCredentialsInput is an input type that accepts SchemaExporterCredentialsArgs and SchemaExporterCredentialsOutput values.
+// You can construct a concrete instance of `SchemaExporterCredentialsInput` via:
//
-// SchemaRegistryClusterConfigCredentialsArgs{...}
-type SchemaRegistryClusterConfigCredentialsInput interface {
+// SchemaExporterCredentialsArgs{...}
+type SchemaExporterCredentialsInput interface {
pulumi.Input
- ToSchemaRegistryClusterConfigCredentialsOutput() SchemaRegistryClusterConfigCredentialsOutput
- ToSchemaRegistryClusterConfigCredentialsOutputWithContext(context.Context) SchemaRegistryClusterConfigCredentialsOutput
+ ToSchemaExporterCredentialsOutput() SchemaExporterCredentialsOutput
+ ToSchemaExporterCredentialsOutputWithContext(context.Context) SchemaExporterCredentialsOutput
}
-type SchemaRegistryClusterConfigCredentialsArgs struct {
+type SchemaExporterCredentialsArgs struct {
// The Schema Registry API Key.
- Key pulumi.StringInput `pulumi:"key"`
+ Key pulumi.StringInput `pulumi:"key"`
+ // The Schema Registry API Secret.
Secret pulumi.StringInput `pulumi:"secret"`
}
-func (SchemaRegistryClusterConfigCredentialsArgs) ElementType() reflect.Type {
- return reflect.TypeOf((*SchemaRegistryClusterConfigCredentials)(nil)).Elem()
+func (SchemaExporterCredentialsArgs) ElementType() reflect.Type {
+ return reflect.TypeOf((*SchemaExporterCredentials)(nil)).Elem()
}
-func (i SchemaRegistryClusterConfigCredentialsArgs) ToSchemaRegistryClusterConfigCredentialsOutput() SchemaRegistryClusterConfigCredentialsOutput {
- return i.ToSchemaRegistryClusterConfigCredentialsOutputWithContext(context.Background())
+func (i SchemaExporterCredentialsArgs) ToSchemaExporterCredentialsOutput() SchemaExporterCredentialsOutput {
+ return i.ToSchemaExporterCredentialsOutputWithContext(context.Background())
}
-func (i SchemaRegistryClusterConfigCredentialsArgs) ToSchemaRegistryClusterConfigCredentialsOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigCredentialsOutput {
- return pulumi.ToOutputWithContext(ctx, i).(SchemaRegistryClusterConfigCredentialsOutput)
+func (i SchemaExporterCredentialsArgs) ToSchemaExporterCredentialsOutputWithContext(ctx context.Context) SchemaExporterCredentialsOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterCredentialsOutput)
}
-func (i SchemaRegistryClusterConfigCredentialsArgs) ToOutput(ctx context.Context) pulumix.Output[SchemaRegistryClusterConfigCredentials] {
- return pulumix.Output[SchemaRegistryClusterConfigCredentials]{
- OutputState: i.ToSchemaRegistryClusterConfigCredentialsOutputWithContext(ctx).OutputState,
+func (i SchemaExporterCredentialsArgs) ToOutput(ctx context.Context) pulumix.Output[SchemaExporterCredentials] {
+ return pulumix.Output[SchemaExporterCredentials]{
+ OutputState: i.ToSchemaExporterCredentialsOutputWithContext(ctx).OutputState,
}
}
-func (i SchemaRegistryClusterConfigCredentialsArgs) ToSchemaRegistryClusterConfigCredentialsPtrOutput() SchemaRegistryClusterConfigCredentialsPtrOutput {
- return i.ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(context.Background())
+func (i SchemaExporterCredentialsArgs) ToSchemaExporterCredentialsPtrOutput() SchemaExporterCredentialsPtrOutput {
+ return i.ToSchemaExporterCredentialsPtrOutputWithContext(context.Background())
}
-func (i SchemaRegistryClusterConfigCredentialsArgs) ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigCredentialsPtrOutput {
- return pulumi.ToOutputWithContext(ctx, i).(SchemaRegistryClusterConfigCredentialsOutput).ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(ctx)
+func (i SchemaExporterCredentialsArgs) ToSchemaExporterCredentialsPtrOutputWithContext(ctx context.Context) SchemaExporterCredentialsPtrOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterCredentialsOutput).ToSchemaExporterCredentialsPtrOutputWithContext(ctx)
}
-// SchemaRegistryClusterConfigCredentialsPtrInput is an input type that accepts SchemaRegistryClusterConfigCredentialsArgs, SchemaRegistryClusterConfigCredentialsPtr and SchemaRegistryClusterConfigCredentialsPtrOutput values.
-// You can construct a concrete instance of `SchemaRegistryClusterConfigCredentialsPtrInput` via:
+// SchemaExporterCredentialsPtrInput is an input type that accepts SchemaExporterCredentialsArgs, SchemaExporterCredentialsPtr and SchemaExporterCredentialsPtrOutput values.
+// You can construct a concrete instance of `SchemaExporterCredentialsPtrInput` via:
//
-// SchemaRegistryClusterConfigCredentialsArgs{...}
+// SchemaExporterCredentialsArgs{...}
//
// or:
//
// nil
-type SchemaRegistryClusterConfigCredentialsPtrInput interface {
+type SchemaExporterCredentialsPtrInput interface {
pulumi.Input
- ToSchemaRegistryClusterConfigCredentialsPtrOutput() SchemaRegistryClusterConfigCredentialsPtrOutput
- ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(context.Context) SchemaRegistryClusterConfigCredentialsPtrOutput
+ ToSchemaExporterCredentialsPtrOutput() SchemaExporterCredentialsPtrOutput
+ ToSchemaExporterCredentialsPtrOutputWithContext(context.Context) SchemaExporterCredentialsPtrOutput
}
-type schemaRegistryClusterConfigCredentialsPtrType SchemaRegistryClusterConfigCredentialsArgs
+type schemaExporterCredentialsPtrType SchemaExporterCredentialsArgs
-func SchemaRegistryClusterConfigCredentialsPtr(v *SchemaRegistryClusterConfigCredentialsArgs) SchemaRegistryClusterConfigCredentialsPtrInput {
- return (*schemaRegistryClusterConfigCredentialsPtrType)(v)
+func SchemaExporterCredentialsPtr(v *SchemaExporterCredentialsArgs) SchemaExporterCredentialsPtrInput {
+ return (*schemaExporterCredentialsPtrType)(v)
}
-func (*schemaRegistryClusterConfigCredentialsPtrType) ElementType() reflect.Type {
- return reflect.TypeOf((**SchemaRegistryClusterConfigCredentials)(nil)).Elem()
+func (*schemaExporterCredentialsPtrType) ElementType() reflect.Type {
+ return reflect.TypeOf((**SchemaExporterCredentials)(nil)).Elem()
}
-func (i *schemaRegistryClusterConfigCredentialsPtrType) ToSchemaRegistryClusterConfigCredentialsPtrOutput() SchemaRegistryClusterConfigCredentialsPtrOutput {
- return i.ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(context.Background())
+func (i *schemaExporterCredentialsPtrType) ToSchemaExporterCredentialsPtrOutput() SchemaExporterCredentialsPtrOutput {
+ return i.ToSchemaExporterCredentialsPtrOutputWithContext(context.Background())
}
-func (i *schemaRegistryClusterConfigCredentialsPtrType) ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigCredentialsPtrOutput {
- return pulumi.ToOutputWithContext(ctx, i).(SchemaRegistryClusterConfigCredentialsPtrOutput)
+func (i *schemaExporterCredentialsPtrType) ToSchemaExporterCredentialsPtrOutputWithContext(ctx context.Context) SchemaExporterCredentialsPtrOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterCredentialsPtrOutput)
}
-func (i *schemaRegistryClusterConfigCredentialsPtrType) ToOutput(ctx context.Context) pulumix.Output[*SchemaRegistryClusterConfigCredentials] {
- return pulumix.Output[*SchemaRegistryClusterConfigCredentials]{
- OutputState: i.ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(ctx).OutputState,
+func (i *schemaExporterCredentialsPtrType) ToOutput(ctx context.Context) pulumix.Output[*SchemaExporterCredentials] {
+ return pulumix.Output[*SchemaExporterCredentials]{
+ OutputState: i.ToSchemaExporterCredentialsPtrOutputWithContext(ctx).OutputState,
}
}
-type SchemaRegistryClusterConfigCredentialsOutput struct{ *pulumi.OutputState }
+type SchemaExporterCredentialsOutput struct{ *pulumi.OutputState }
-func (SchemaRegistryClusterConfigCredentialsOutput) ElementType() reflect.Type {
- return reflect.TypeOf((*SchemaRegistryClusterConfigCredentials)(nil)).Elem()
+func (SchemaExporterCredentialsOutput) ElementType() reflect.Type {
+ return reflect.TypeOf((*SchemaExporterCredentials)(nil)).Elem()
}
-func (o SchemaRegistryClusterConfigCredentialsOutput) ToSchemaRegistryClusterConfigCredentialsOutput() SchemaRegistryClusterConfigCredentialsOutput {
+func (o SchemaExporterCredentialsOutput) ToSchemaExporterCredentialsOutput() SchemaExporterCredentialsOutput {
return o
}
-func (o SchemaRegistryClusterConfigCredentialsOutput) ToSchemaRegistryClusterConfigCredentialsOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigCredentialsOutput {
+func (o SchemaExporterCredentialsOutput) ToSchemaExporterCredentialsOutputWithContext(ctx context.Context) SchemaExporterCredentialsOutput {
return o
}
-func (o SchemaRegistryClusterConfigCredentialsOutput) ToSchemaRegistryClusterConfigCredentialsPtrOutput() SchemaRegistryClusterConfigCredentialsPtrOutput {
- return o.ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(context.Background())
+func (o SchemaExporterCredentialsOutput) ToSchemaExporterCredentialsPtrOutput() SchemaExporterCredentialsPtrOutput {
+ return o.ToSchemaExporterCredentialsPtrOutputWithContext(context.Background())
}
-func (o SchemaRegistryClusterConfigCredentialsOutput) ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigCredentialsPtrOutput {
- return o.ApplyTWithContext(ctx, func(_ context.Context, v SchemaRegistryClusterConfigCredentials) *SchemaRegistryClusterConfigCredentials {
+func (o SchemaExporterCredentialsOutput) ToSchemaExporterCredentialsPtrOutputWithContext(ctx context.Context) SchemaExporterCredentialsPtrOutput {
+ return o.ApplyTWithContext(ctx, func(_ context.Context, v SchemaExporterCredentials) *SchemaExporterCredentials {
return &v
- }).(SchemaRegistryClusterConfigCredentialsPtrOutput)
+ }).(SchemaExporterCredentialsPtrOutput)
}
-func (o SchemaRegistryClusterConfigCredentialsOutput) ToOutput(ctx context.Context) pulumix.Output[SchemaRegistryClusterConfigCredentials] {
- return pulumix.Output[SchemaRegistryClusterConfigCredentials]{
+func (o SchemaExporterCredentialsOutput) ToOutput(ctx context.Context) pulumix.Output[SchemaExporterCredentials] {
+ return pulumix.Output[SchemaExporterCredentials]{
OutputState: o.OutputState,
}
}
// The Schema Registry API Key.
-func (o SchemaRegistryClusterConfigCredentialsOutput) Key() pulumi.StringOutput {
- return o.ApplyT(func(v SchemaRegistryClusterConfigCredentials) string { return v.Key }).(pulumi.StringOutput)
+func (o SchemaExporterCredentialsOutput) Key() pulumi.StringOutput {
+ return o.ApplyT(func(v SchemaExporterCredentials) string { return v.Key }).(pulumi.StringOutput)
}
-func (o SchemaRegistryClusterConfigCredentialsOutput) Secret() pulumi.StringOutput {
- return o.ApplyT(func(v SchemaRegistryClusterConfigCredentials) string { return v.Secret }).(pulumi.StringOutput)
+// The Schema Registry API Secret.
+func (o SchemaExporterCredentialsOutput) Secret() pulumi.StringOutput {
+ return o.ApplyT(func(v SchemaExporterCredentials) string { return v.Secret }).(pulumi.StringOutput)
}
-type SchemaRegistryClusterConfigCredentialsPtrOutput struct{ *pulumi.OutputState }
+type SchemaExporterCredentialsPtrOutput struct{ *pulumi.OutputState }
-func (SchemaRegistryClusterConfigCredentialsPtrOutput) ElementType() reflect.Type {
- return reflect.TypeOf((**SchemaRegistryClusterConfigCredentials)(nil)).Elem()
+func (SchemaExporterCredentialsPtrOutput) ElementType() reflect.Type {
+ return reflect.TypeOf((**SchemaExporterCredentials)(nil)).Elem()
}
-func (o SchemaRegistryClusterConfigCredentialsPtrOutput) ToSchemaRegistryClusterConfigCredentialsPtrOutput() SchemaRegistryClusterConfigCredentialsPtrOutput {
+func (o SchemaExporterCredentialsPtrOutput) ToSchemaExporterCredentialsPtrOutput() SchemaExporterCredentialsPtrOutput {
return o
}
-func (o SchemaRegistryClusterConfigCredentialsPtrOutput) ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigCredentialsPtrOutput {
+func (o SchemaExporterCredentialsPtrOutput) ToSchemaExporterCredentialsPtrOutputWithContext(ctx context.Context) SchemaExporterCredentialsPtrOutput {
return o
}
-func (o SchemaRegistryClusterConfigCredentialsPtrOutput) ToOutput(ctx context.Context) pulumix.Output[*SchemaRegistryClusterConfigCredentials] {
- return pulumix.Output[*SchemaRegistryClusterConfigCredentials]{
+func (o SchemaExporterCredentialsPtrOutput) ToOutput(ctx context.Context) pulumix.Output[*SchemaExporterCredentials] {
+ return pulumix.Output[*SchemaExporterCredentials]{
OutputState: o.OutputState,
}
}
-func (o SchemaRegistryClusterConfigCredentialsPtrOutput) Elem() SchemaRegistryClusterConfigCredentialsOutput {
- return o.ApplyT(func(v *SchemaRegistryClusterConfigCredentials) SchemaRegistryClusterConfigCredentials {
+func (o SchemaExporterCredentialsPtrOutput) Elem() SchemaExporterCredentialsOutput {
+ return o.ApplyT(func(v *SchemaExporterCredentials) SchemaExporterCredentials {
if v != nil {
return *v
}
- var ret SchemaRegistryClusterConfigCredentials
+ var ret SchemaExporterCredentials
return ret
- }).(SchemaRegistryClusterConfigCredentialsOutput)
+ }).(SchemaExporterCredentialsOutput)
}
// The Schema Registry API Key.
-func (o SchemaRegistryClusterConfigCredentialsPtrOutput) Key() pulumi.StringPtrOutput {
- return o.ApplyT(func(v *SchemaRegistryClusterConfigCredentials) *string {
+func (o SchemaExporterCredentialsPtrOutput) Key() pulumi.StringPtrOutput {
+ return o.ApplyT(func(v *SchemaExporterCredentials) *string {
if v == nil {
return nil
}
@@ -13341,8 +13473,9 @@ func (o SchemaRegistryClusterConfigCredentialsPtrOutput) Key() pulumi.StringPtrO
}).(pulumi.StringPtrOutput)
}
-func (o SchemaRegistryClusterConfigCredentialsPtrOutput) Secret() pulumi.StringPtrOutput {
- return o.ApplyT(func(v *SchemaRegistryClusterConfigCredentials) *string {
+// The Schema Registry API Secret.
+func (o SchemaExporterCredentialsPtrOutput) Secret() pulumi.StringPtrOutput {
+ return o.ApplyT(func(v *SchemaExporterCredentials) *string {
if v == nil {
return nil
}
@@ -13350,148 +13483,843 @@ func (o SchemaRegistryClusterConfigCredentialsPtrOutput) Secret() pulumi.StringP
}).(pulumi.StringPtrOutput)
}
-type SchemaRegistryClusterConfigSchemaRegistryCluster struct {
- // The ID of the Schema Registry cluster, for example, `lsrc-abc123`.
- Id string `pulumi:"id"`
+type SchemaExporterDestinationSchemaRegistryCluster struct {
+ Credentials SchemaExporterDestinationSchemaRegistryClusterCredentials `pulumi:"credentials"`
+ // The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).
+ RestEndpoint string `pulumi:"restEndpoint"`
}
-// SchemaRegistryClusterConfigSchemaRegistryClusterInput is an input type that accepts SchemaRegistryClusterConfigSchemaRegistryClusterArgs and SchemaRegistryClusterConfigSchemaRegistryClusterOutput values.
-// You can construct a concrete instance of `SchemaRegistryClusterConfigSchemaRegistryClusterInput` via:
+// SchemaExporterDestinationSchemaRegistryClusterInput is an input type that accepts SchemaExporterDestinationSchemaRegistryClusterArgs and SchemaExporterDestinationSchemaRegistryClusterOutput values.
+// You can construct a concrete instance of `SchemaExporterDestinationSchemaRegistryClusterInput` via:
//
-// SchemaRegistryClusterConfigSchemaRegistryClusterArgs{...}
-type SchemaRegistryClusterConfigSchemaRegistryClusterInput interface {
+// SchemaExporterDestinationSchemaRegistryClusterArgs{...}
+type SchemaExporterDestinationSchemaRegistryClusterInput interface {
pulumi.Input
- ToSchemaRegistryClusterConfigSchemaRegistryClusterOutput() SchemaRegistryClusterConfigSchemaRegistryClusterOutput
- ToSchemaRegistryClusterConfigSchemaRegistryClusterOutputWithContext(context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterOutput
+ ToSchemaExporterDestinationSchemaRegistryClusterOutput() SchemaExporterDestinationSchemaRegistryClusterOutput
+ ToSchemaExporterDestinationSchemaRegistryClusterOutputWithContext(context.Context) SchemaExporterDestinationSchemaRegistryClusterOutput
}
-type SchemaRegistryClusterConfigSchemaRegistryClusterArgs struct {
- // The ID of the Schema Registry cluster, for example, `lsrc-abc123`.
- Id pulumi.StringInput `pulumi:"id"`
+type SchemaExporterDestinationSchemaRegistryClusterArgs struct {
+ Credentials SchemaExporterDestinationSchemaRegistryClusterCredentialsInput `pulumi:"credentials"`
+ // The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).
+ RestEndpoint pulumi.StringInput `pulumi:"restEndpoint"`
}
-func (SchemaRegistryClusterConfigSchemaRegistryClusterArgs) ElementType() reflect.Type {
- return reflect.TypeOf((*SchemaRegistryClusterConfigSchemaRegistryCluster)(nil)).Elem()
+func (SchemaExporterDestinationSchemaRegistryClusterArgs) ElementType() reflect.Type {
+ return reflect.TypeOf((*SchemaExporterDestinationSchemaRegistryCluster)(nil)).Elem()
}
-func (i SchemaRegistryClusterConfigSchemaRegistryClusterArgs) ToSchemaRegistryClusterConfigSchemaRegistryClusterOutput() SchemaRegistryClusterConfigSchemaRegistryClusterOutput {
- return i.ToSchemaRegistryClusterConfigSchemaRegistryClusterOutputWithContext(context.Background())
+func (i SchemaExporterDestinationSchemaRegistryClusterArgs) ToSchemaExporterDestinationSchemaRegistryClusterOutput() SchemaExporterDestinationSchemaRegistryClusterOutput {
+ return i.ToSchemaExporterDestinationSchemaRegistryClusterOutputWithContext(context.Background())
}
-func (i SchemaRegistryClusterConfigSchemaRegistryClusterArgs) ToSchemaRegistryClusterConfigSchemaRegistryClusterOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterOutput {
- return pulumi.ToOutputWithContext(ctx, i).(SchemaRegistryClusterConfigSchemaRegistryClusterOutput)
+func (i SchemaExporterDestinationSchemaRegistryClusterArgs) ToSchemaExporterDestinationSchemaRegistryClusterOutputWithContext(ctx context.Context) SchemaExporterDestinationSchemaRegistryClusterOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterDestinationSchemaRegistryClusterOutput)
}
-func (i SchemaRegistryClusterConfigSchemaRegistryClusterArgs) ToOutput(ctx context.Context) pulumix.Output[SchemaRegistryClusterConfigSchemaRegistryCluster] {
- return pulumix.Output[SchemaRegistryClusterConfigSchemaRegistryCluster]{
- OutputState: i.ToSchemaRegistryClusterConfigSchemaRegistryClusterOutputWithContext(ctx).OutputState,
+func (i SchemaExporterDestinationSchemaRegistryClusterArgs) ToOutput(ctx context.Context) pulumix.Output[SchemaExporterDestinationSchemaRegistryCluster] {
+ return pulumix.Output[SchemaExporterDestinationSchemaRegistryCluster]{
+ OutputState: i.ToSchemaExporterDestinationSchemaRegistryClusterOutputWithContext(ctx).OutputState,
}
}
-func (i SchemaRegistryClusterConfigSchemaRegistryClusterArgs) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput() SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput {
- return i.ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(context.Background())
+func (i SchemaExporterDestinationSchemaRegistryClusterArgs) ToSchemaExporterDestinationSchemaRegistryClusterPtrOutput() SchemaExporterDestinationSchemaRegistryClusterPtrOutput {
+ return i.ToSchemaExporterDestinationSchemaRegistryClusterPtrOutputWithContext(context.Background())
}
-func (i SchemaRegistryClusterConfigSchemaRegistryClusterArgs) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput {
- return pulumi.ToOutputWithContext(ctx, i).(SchemaRegistryClusterConfigSchemaRegistryClusterOutput).ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(ctx)
+func (i SchemaExporterDestinationSchemaRegistryClusterArgs) ToSchemaExporterDestinationSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaExporterDestinationSchemaRegistryClusterPtrOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterDestinationSchemaRegistryClusterOutput).ToSchemaExporterDestinationSchemaRegistryClusterPtrOutputWithContext(ctx)
}
-// SchemaRegistryClusterConfigSchemaRegistryClusterPtrInput is an input type that accepts SchemaRegistryClusterConfigSchemaRegistryClusterArgs, SchemaRegistryClusterConfigSchemaRegistryClusterPtr and SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput values.
-// You can construct a concrete instance of `SchemaRegistryClusterConfigSchemaRegistryClusterPtrInput` via:
+// SchemaExporterDestinationSchemaRegistryClusterPtrInput is an input type that accepts SchemaExporterDestinationSchemaRegistryClusterArgs, SchemaExporterDestinationSchemaRegistryClusterPtr and SchemaExporterDestinationSchemaRegistryClusterPtrOutput values.
+// You can construct a concrete instance of `SchemaExporterDestinationSchemaRegistryClusterPtrInput` via:
//
-// SchemaRegistryClusterConfigSchemaRegistryClusterArgs{...}
+// SchemaExporterDestinationSchemaRegistryClusterArgs{...}
//
// or:
//
// nil
-type SchemaRegistryClusterConfigSchemaRegistryClusterPtrInput interface {
+type SchemaExporterDestinationSchemaRegistryClusterPtrInput interface {
pulumi.Input
- ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput() SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput
- ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput
+ ToSchemaExporterDestinationSchemaRegistryClusterPtrOutput() SchemaExporterDestinationSchemaRegistryClusterPtrOutput
+ ToSchemaExporterDestinationSchemaRegistryClusterPtrOutputWithContext(context.Context) SchemaExporterDestinationSchemaRegistryClusterPtrOutput
}
-type schemaRegistryClusterConfigSchemaRegistryClusterPtrType SchemaRegistryClusterConfigSchemaRegistryClusterArgs
+type schemaExporterDestinationSchemaRegistryClusterPtrType SchemaExporterDestinationSchemaRegistryClusterArgs
-func SchemaRegistryClusterConfigSchemaRegistryClusterPtr(v *SchemaRegistryClusterConfigSchemaRegistryClusterArgs) SchemaRegistryClusterConfigSchemaRegistryClusterPtrInput {
- return (*schemaRegistryClusterConfigSchemaRegistryClusterPtrType)(v)
+func SchemaExporterDestinationSchemaRegistryClusterPtr(v *SchemaExporterDestinationSchemaRegistryClusterArgs) SchemaExporterDestinationSchemaRegistryClusterPtrInput {
+ return (*schemaExporterDestinationSchemaRegistryClusterPtrType)(v)
}
-func (*schemaRegistryClusterConfigSchemaRegistryClusterPtrType) ElementType() reflect.Type {
- return reflect.TypeOf((**SchemaRegistryClusterConfigSchemaRegistryCluster)(nil)).Elem()
+func (*schemaExporterDestinationSchemaRegistryClusterPtrType) ElementType() reflect.Type {
+ return reflect.TypeOf((**SchemaExporterDestinationSchemaRegistryCluster)(nil)).Elem()
}
-func (i *schemaRegistryClusterConfigSchemaRegistryClusterPtrType) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput() SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput {
- return i.ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(context.Background())
+func (i *schemaExporterDestinationSchemaRegistryClusterPtrType) ToSchemaExporterDestinationSchemaRegistryClusterPtrOutput() SchemaExporterDestinationSchemaRegistryClusterPtrOutput {
+ return i.ToSchemaExporterDestinationSchemaRegistryClusterPtrOutputWithContext(context.Background())
}
-func (i *schemaRegistryClusterConfigSchemaRegistryClusterPtrType) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput {
- return pulumi.ToOutputWithContext(ctx, i).(SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput)
+func (i *schemaExporterDestinationSchemaRegistryClusterPtrType) ToSchemaExporterDestinationSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaExporterDestinationSchemaRegistryClusterPtrOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterDestinationSchemaRegistryClusterPtrOutput)
}
-func (i *schemaRegistryClusterConfigSchemaRegistryClusterPtrType) ToOutput(ctx context.Context) pulumix.Output[*SchemaRegistryClusterConfigSchemaRegistryCluster] {
- return pulumix.Output[*SchemaRegistryClusterConfigSchemaRegistryCluster]{
- OutputState: i.ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(ctx).OutputState,
+func (i *schemaExporterDestinationSchemaRegistryClusterPtrType) ToOutput(ctx context.Context) pulumix.Output[*SchemaExporterDestinationSchemaRegistryCluster] {
+ return pulumix.Output[*SchemaExporterDestinationSchemaRegistryCluster]{
+ OutputState: i.ToSchemaExporterDestinationSchemaRegistryClusterPtrOutputWithContext(ctx).OutputState,
}
}
-type SchemaRegistryClusterConfigSchemaRegistryClusterOutput struct{ *pulumi.OutputState }
+type SchemaExporterDestinationSchemaRegistryClusterOutput struct{ *pulumi.OutputState }
-func (SchemaRegistryClusterConfigSchemaRegistryClusterOutput) ElementType() reflect.Type {
- return reflect.TypeOf((*SchemaRegistryClusterConfigSchemaRegistryCluster)(nil)).Elem()
+func (SchemaExporterDestinationSchemaRegistryClusterOutput) ElementType() reflect.Type {
+ return reflect.TypeOf((*SchemaExporterDestinationSchemaRegistryCluster)(nil)).Elem()
}
-func (o SchemaRegistryClusterConfigSchemaRegistryClusterOutput) ToSchemaRegistryClusterConfigSchemaRegistryClusterOutput() SchemaRegistryClusterConfigSchemaRegistryClusterOutput {
+func (o SchemaExporterDestinationSchemaRegistryClusterOutput) ToSchemaExporterDestinationSchemaRegistryClusterOutput() SchemaExporterDestinationSchemaRegistryClusterOutput {
return o
}
-func (o SchemaRegistryClusterConfigSchemaRegistryClusterOutput) ToSchemaRegistryClusterConfigSchemaRegistryClusterOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterOutput {
+func (o SchemaExporterDestinationSchemaRegistryClusterOutput) ToSchemaExporterDestinationSchemaRegistryClusterOutputWithContext(ctx context.Context) SchemaExporterDestinationSchemaRegistryClusterOutput {
return o
}
-func (o SchemaRegistryClusterConfigSchemaRegistryClusterOutput) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput() SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput {
- return o.ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(context.Background())
+func (o SchemaExporterDestinationSchemaRegistryClusterOutput) ToSchemaExporterDestinationSchemaRegistryClusterPtrOutput() SchemaExporterDestinationSchemaRegistryClusterPtrOutput {
+ return o.ToSchemaExporterDestinationSchemaRegistryClusterPtrOutputWithContext(context.Background())
}
-func (o SchemaRegistryClusterConfigSchemaRegistryClusterOutput) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput {
- return o.ApplyTWithContext(ctx, func(_ context.Context, v SchemaRegistryClusterConfigSchemaRegistryCluster) *SchemaRegistryClusterConfigSchemaRegistryCluster {
+func (o SchemaExporterDestinationSchemaRegistryClusterOutput) ToSchemaExporterDestinationSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaExporterDestinationSchemaRegistryClusterPtrOutput {
+ return o.ApplyTWithContext(ctx, func(_ context.Context, v SchemaExporterDestinationSchemaRegistryCluster) *SchemaExporterDestinationSchemaRegistryCluster {
return &v
- }).(SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput)
+ }).(SchemaExporterDestinationSchemaRegistryClusterPtrOutput)
}
-func (o SchemaRegistryClusterConfigSchemaRegistryClusterOutput) ToOutput(ctx context.Context) pulumix.Output[SchemaRegistryClusterConfigSchemaRegistryCluster] {
- return pulumix.Output[SchemaRegistryClusterConfigSchemaRegistryCluster]{
+func (o SchemaExporterDestinationSchemaRegistryClusterOutput) ToOutput(ctx context.Context) pulumix.Output[SchemaExporterDestinationSchemaRegistryCluster] {
+ return pulumix.Output[SchemaExporterDestinationSchemaRegistryCluster]{
OutputState: o.OutputState,
}
}
-// The ID of the Schema Registry cluster, for example, `lsrc-abc123`.
-func (o SchemaRegistryClusterConfigSchemaRegistryClusterOutput) Id() pulumi.StringOutput {
- return o.ApplyT(func(v SchemaRegistryClusterConfigSchemaRegistryCluster) string { return v.Id }).(pulumi.StringOutput)
+func (o SchemaExporterDestinationSchemaRegistryClusterOutput) Credentials() SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput {
+ return o.ApplyT(func(v SchemaExporterDestinationSchemaRegistryCluster) SchemaExporterDestinationSchemaRegistryClusterCredentials {
+ return v.Credentials
+ }).(SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput)
}
-type SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput struct{ *pulumi.OutputState }
+// The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).
+func (o SchemaExporterDestinationSchemaRegistryClusterOutput) RestEndpoint() pulumi.StringOutput {
+ return o.ApplyT(func(v SchemaExporterDestinationSchemaRegistryCluster) string { return v.RestEndpoint }).(pulumi.StringOutput)
+}
-func (SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput) ElementType() reflect.Type {
- return reflect.TypeOf((**SchemaRegistryClusterConfigSchemaRegistryCluster)(nil)).Elem()
+type SchemaExporterDestinationSchemaRegistryClusterPtrOutput struct{ *pulumi.OutputState }
+
+func (SchemaExporterDestinationSchemaRegistryClusterPtrOutput) ElementType() reflect.Type {
+ return reflect.TypeOf((**SchemaExporterDestinationSchemaRegistryCluster)(nil)).Elem()
}
-func (o SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput() SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput {
+func (o SchemaExporterDestinationSchemaRegistryClusterPtrOutput) ToSchemaExporterDestinationSchemaRegistryClusterPtrOutput() SchemaExporterDestinationSchemaRegistryClusterPtrOutput {
return o
}
-func (o SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput {
+func (o SchemaExporterDestinationSchemaRegistryClusterPtrOutput) ToSchemaExporterDestinationSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaExporterDestinationSchemaRegistryClusterPtrOutput {
return o
}
-func (o SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput) ToOutput(ctx context.Context) pulumix.Output[*SchemaRegistryClusterConfigSchemaRegistryCluster] {
- return pulumix.Output[*SchemaRegistryClusterConfigSchemaRegistryCluster]{
+func (o SchemaExporterDestinationSchemaRegistryClusterPtrOutput) ToOutput(ctx context.Context) pulumix.Output[*SchemaExporterDestinationSchemaRegistryCluster] {
+ return pulumix.Output[*SchemaExporterDestinationSchemaRegistryCluster]{
OutputState: o.OutputState,
}
}
-func (o SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput) Elem() SchemaRegistryClusterConfigSchemaRegistryClusterOutput {
+func (o SchemaExporterDestinationSchemaRegistryClusterPtrOutput) Elem() SchemaExporterDestinationSchemaRegistryClusterOutput {
+ return o.ApplyT(func(v *SchemaExporterDestinationSchemaRegistryCluster) SchemaExporterDestinationSchemaRegistryCluster {
+ if v != nil {
+ return *v
+ }
+ var ret SchemaExporterDestinationSchemaRegistryCluster
+ return ret
+ }).(SchemaExporterDestinationSchemaRegistryClusterOutput)
+}
+
+func (o SchemaExporterDestinationSchemaRegistryClusterPtrOutput) Credentials() SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput {
+ return o.ApplyT(func(v *SchemaExporterDestinationSchemaRegistryCluster) *SchemaExporterDestinationSchemaRegistryClusterCredentials {
+ if v == nil {
+ return nil
+ }
+ return &v.Credentials
+ }).(SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput)
+}
+
+// The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).
+func (o SchemaExporterDestinationSchemaRegistryClusterPtrOutput) RestEndpoint() pulumi.StringPtrOutput {
+ return o.ApplyT(func(v *SchemaExporterDestinationSchemaRegistryCluster) *string {
+ if v == nil {
+ return nil
+ }
+ return &v.RestEndpoint
+ }).(pulumi.StringPtrOutput)
+}
+
+type SchemaExporterDestinationSchemaRegistryClusterCredentials struct {
+ // The Schema Registry API Key.
+ Key string `pulumi:"key"`
+ // The Schema Registry API Secret.
+ Secret string `pulumi:"secret"`
+}
+
+// SchemaExporterDestinationSchemaRegistryClusterCredentialsInput is an input type that accepts SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs and SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput values.
+// You can construct a concrete instance of `SchemaExporterDestinationSchemaRegistryClusterCredentialsInput` via:
+//
+// SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs{...}
+type SchemaExporterDestinationSchemaRegistryClusterCredentialsInput interface {
+ pulumi.Input
+
+ ToSchemaExporterDestinationSchemaRegistryClusterCredentialsOutput() SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput
+ ToSchemaExporterDestinationSchemaRegistryClusterCredentialsOutputWithContext(context.Context) SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput
+}
+
+type SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs struct {
+ // The Schema Registry API Key.
+ Key pulumi.StringInput `pulumi:"key"`
+ // The Schema Registry API Secret.
+ Secret pulumi.StringInput `pulumi:"secret"`
+}
+
+func (SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs) ElementType() reflect.Type {
+ return reflect.TypeOf((*SchemaExporterDestinationSchemaRegistryClusterCredentials)(nil)).Elem()
+}
+
+func (i SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs) ToSchemaExporterDestinationSchemaRegistryClusterCredentialsOutput() SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput {
+ return i.ToSchemaExporterDestinationSchemaRegistryClusterCredentialsOutputWithContext(context.Background())
+}
+
+func (i SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs) ToSchemaExporterDestinationSchemaRegistryClusterCredentialsOutputWithContext(ctx context.Context) SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput)
+}
+
+func (i SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs) ToOutput(ctx context.Context) pulumix.Output[SchemaExporterDestinationSchemaRegistryClusterCredentials] {
+ return pulumix.Output[SchemaExporterDestinationSchemaRegistryClusterCredentials]{
+ OutputState: i.ToSchemaExporterDestinationSchemaRegistryClusterCredentialsOutputWithContext(ctx).OutputState,
+ }
+}
+
+func (i SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs) ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput() SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput {
+ return i.ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutputWithContext(context.Background())
+}
+
+func (i SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs) ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutputWithContext(ctx context.Context) SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput).ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutputWithContext(ctx)
+}
+
+// SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrInput is an input type that accepts SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs, SchemaExporterDestinationSchemaRegistryClusterCredentialsPtr and SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput values.
+// You can construct a concrete instance of `SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrInput` via:
+//
+// SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs{...}
+//
+// or:
+//
+// nil
+type SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrInput interface {
+ pulumi.Input
+
+ ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput() SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput
+ ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutputWithContext(context.Context) SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput
+}
+
+type schemaExporterDestinationSchemaRegistryClusterCredentialsPtrType SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs
+
+func SchemaExporterDestinationSchemaRegistryClusterCredentialsPtr(v *SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs) SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrInput {
+ return (*schemaExporterDestinationSchemaRegistryClusterCredentialsPtrType)(v)
+}
+
+func (*schemaExporterDestinationSchemaRegistryClusterCredentialsPtrType) ElementType() reflect.Type {
+ return reflect.TypeOf((**SchemaExporterDestinationSchemaRegistryClusterCredentials)(nil)).Elem()
+}
+
+func (i *schemaExporterDestinationSchemaRegistryClusterCredentialsPtrType) ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput() SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput {
+ return i.ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutputWithContext(context.Background())
+}
+
+func (i *schemaExporterDestinationSchemaRegistryClusterCredentialsPtrType) ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutputWithContext(ctx context.Context) SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput)
+}
+
+func (i *schemaExporterDestinationSchemaRegistryClusterCredentialsPtrType) ToOutput(ctx context.Context) pulumix.Output[*SchemaExporterDestinationSchemaRegistryClusterCredentials] {
+ return pulumix.Output[*SchemaExporterDestinationSchemaRegistryClusterCredentials]{
+ OutputState: i.ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutputWithContext(ctx).OutputState,
+ }
+}
+
+type SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput struct{ *pulumi.OutputState }
+
+func (SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput) ElementType() reflect.Type {
+ return reflect.TypeOf((*SchemaExporterDestinationSchemaRegistryClusterCredentials)(nil)).Elem()
+}
+
+func (o SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput) ToSchemaExporterDestinationSchemaRegistryClusterCredentialsOutput() SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput {
+ return o
+}
+
+func (o SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput) ToSchemaExporterDestinationSchemaRegistryClusterCredentialsOutputWithContext(ctx context.Context) SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput {
+ return o
+}
+
+func (o SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput) ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput() SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput {
+ return o.ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutputWithContext(context.Background())
+}
+
+func (o SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput) ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutputWithContext(ctx context.Context) SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput {
+ return o.ApplyTWithContext(ctx, func(_ context.Context, v SchemaExporterDestinationSchemaRegistryClusterCredentials) *SchemaExporterDestinationSchemaRegistryClusterCredentials {
+ return &v
+ }).(SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput)
+}
+
+func (o SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput) ToOutput(ctx context.Context) pulumix.Output[SchemaExporterDestinationSchemaRegistryClusterCredentials] {
+ return pulumix.Output[SchemaExporterDestinationSchemaRegistryClusterCredentials]{
+ OutputState: o.OutputState,
+ }
+}
+
+// The Schema Registry API Key.
+func (o SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput) Key() pulumi.StringOutput {
+ return o.ApplyT(func(v SchemaExporterDestinationSchemaRegistryClusterCredentials) string { return v.Key }).(pulumi.StringOutput)
+}
+
+// The Schema Registry API Secret.
+func (o SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput) Secret() pulumi.StringOutput {
+ return o.ApplyT(func(v SchemaExporterDestinationSchemaRegistryClusterCredentials) string { return v.Secret }).(pulumi.StringOutput)
+}
+
+type SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput struct{ *pulumi.OutputState }
+
+func (SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput) ElementType() reflect.Type {
+ return reflect.TypeOf((**SchemaExporterDestinationSchemaRegistryClusterCredentials)(nil)).Elem()
+}
+
+func (o SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput) ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput() SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput {
+ return o
+}
+
+func (o SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput) ToSchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutputWithContext(ctx context.Context) SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput {
+ return o
+}
+
+func (o SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput) ToOutput(ctx context.Context) pulumix.Output[*SchemaExporterDestinationSchemaRegistryClusterCredentials] {
+ return pulumix.Output[*SchemaExporterDestinationSchemaRegistryClusterCredentials]{
+ OutputState: o.OutputState,
+ }
+}
+
+func (o SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput) Elem() SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput {
+ return o.ApplyT(func(v *SchemaExporterDestinationSchemaRegistryClusterCredentials) SchemaExporterDestinationSchemaRegistryClusterCredentials {
+ if v != nil {
+ return *v
+ }
+ var ret SchemaExporterDestinationSchemaRegistryClusterCredentials
+ return ret
+ }).(SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput)
+}
+
+// The Schema Registry API Key.
+func (o SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput) Key() pulumi.StringPtrOutput {
+ return o.ApplyT(func(v *SchemaExporterDestinationSchemaRegistryClusterCredentials) *string {
+ if v == nil {
+ return nil
+ }
+ return &v.Key
+ }).(pulumi.StringPtrOutput)
+}
+
+// The Schema Registry API Secret.
+func (o SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput) Secret() pulumi.StringPtrOutput {
+ return o.ApplyT(func(v *SchemaExporterDestinationSchemaRegistryClusterCredentials) *string {
+ if v == nil {
+ return nil
+ }
+ return &v.Secret
+ }).(pulumi.StringPtrOutput)
+}
+
+type SchemaExporterSchemaRegistryCluster struct {
+ // The ID of the Schema Registry cluster, for example, `lsrc-abc123`.
+ Id string `pulumi:"id"`
+}
+
+// SchemaExporterSchemaRegistryClusterInput is an input type that accepts SchemaExporterSchemaRegistryClusterArgs and SchemaExporterSchemaRegistryClusterOutput values.
+// You can construct a concrete instance of `SchemaExporterSchemaRegistryClusterInput` via:
+//
+// SchemaExporterSchemaRegistryClusterArgs{...}
+type SchemaExporterSchemaRegistryClusterInput interface {
+ pulumi.Input
+
+ ToSchemaExporterSchemaRegistryClusterOutput() SchemaExporterSchemaRegistryClusterOutput
+ ToSchemaExporterSchemaRegistryClusterOutputWithContext(context.Context) SchemaExporterSchemaRegistryClusterOutput
+}
+
+type SchemaExporterSchemaRegistryClusterArgs struct {
+ // The ID of the Schema Registry cluster, for example, `lsrc-abc123`.
+ Id pulumi.StringInput `pulumi:"id"`
+}
+
+func (SchemaExporterSchemaRegistryClusterArgs) ElementType() reflect.Type {
+ return reflect.TypeOf((*SchemaExporterSchemaRegistryCluster)(nil)).Elem()
+}
+
+func (i SchemaExporterSchemaRegistryClusterArgs) ToSchemaExporterSchemaRegistryClusterOutput() SchemaExporterSchemaRegistryClusterOutput {
+ return i.ToSchemaExporterSchemaRegistryClusterOutputWithContext(context.Background())
+}
+
+func (i SchemaExporterSchemaRegistryClusterArgs) ToSchemaExporterSchemaRegistryClusterOutputWithContext(ctx context.Context) SchemaExporterSchemaRegistryClusterOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterSchemaRegistryClusterOutput)
+}
+
+func (i SchemaExporterSchemaRegistryClusterArgs) ToOutput(ctx context.Context) pulumix.Output[SchemaExporterSchemaRegistryCluster] {
+ return pulumix.Output[SchemaExporterSchemaRegistryCluster]{
+ OutputState: i.ToSchemaExporterSchemaRegistryClusterOutputWithContext(ctx).OutputState,
+ }
+}
+
+func (i SchemaExporterSchemaRegistryClusterArgs) ToSchemaExporterSchemaRegistryClusterPtrOutput() SchemaExporterSchemaRegistryClusterPtrOutput {
+ return i.ToSchemaExporterSchemaRegistryClusterPtrOutputWithContext(context.Background())
+}
+
+func (i SchemaExporterSchemaRegistryClusterArgs) ToSchemaExporterSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaExporterSchemaRegistryClusterPtrOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterSchemaRegistryClusterOutput).ToSchemaExporterSchemaRegistryClusterPtrOutputWithContext(ctx)
+}
+
+// SchemaExporterSchemaRegistryClusterPtrInput is an input type that accepts SchemaExporterSchemaRegistryClusterArgs, SchemaExporterSchemaRegistryClusterPtr and SchemaExporterSchemaRegistryClusterPtrOutput values.
+// You can construct a concrete instance of `SchemaExporterSchemaRegistryClusterPtrInput` via:
+//
+// SchemaExporterSchemaRegistryClusterArgs{...}
+//
+// or:
+//
+// nil
+type SchemaExporterSchemaRegistryClusterPtrInput interface {
+ pulumi.Input
+
+ ToSchemaExporterSchemaRegistryClusterPtrOutput() SchemaExporterSchemaRegistryClusterPtrOutput
+ ToSchemaExporterSchemaRegistryClusterPtrOutputWithContext(context.Context) SchemaExporterSchemaRegistryClusterPtrOutput
+}
+
+type schemaExporterSchemaRegistryClusterPtrType SchemaExporterSchemaRegistryClusterArgs
+
+func SchemaExporterSchemaRegistryClusterPtr(v *SchemaExporterSchemaRegistryClusterArgs) SchemaExporterSchemaRegistryClusterPtrInput {
+ return (*schemaExporterSchemaRegistryClusterPtrType)(v)
+}
+
+func (*schemaExporterSchemaRegistryClusterPtrType) ElementType() reflect.Type {
+ return reflect.TypeOf((**SchemaExporterSchemaRegistryCluster)(nil)).Elem()
+}
+
+func (i *schemaExporterSchemaRegistryClusterPtrType) ToSchemaExporterSchemaRegistryClusterPtrOutput() SchemaExporterSchemaRegistryClusterPtrOutput {
+ return i.ToSchemaExporterSchemaRegistryClusterPtrOutputWithContext(context.Background())
+}
+
+func (i *schemaExporterSchemaRegistryClusterPtrType) ToSchemaExporterSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaExporterSchemaRegistryClusterPtrOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterSchemaRegistryClusterPtrOutput)
+}
+
+func (i *schemaExporterSchemaRegistryClusterPtrType) ToOutput(ctx context.Context) pulumix.Output[*SchemaExporterSchemaRegistryCluster] {
+ return pulumix.Output[*SchemaExporterSchemaRegistryCluster]{
+ OutputState: i.ToSchemaExporterSchemaRegistryClusterPtrOutputWithContext(ctx).OutputState,
+ }
+}
+
+type SchemaExporterSchemaRegistryClusterOutput struct{ *pulumi.OutputState }
+
+func (SchemaExporterSchemaRegistryClusterOutput) ElementType() reflect.Type {
+ return reflect.TypeOf((*SchemaExporterSchemaRegistryCluster)(nil)).Elem()
+}
+
+func (o SchemaExporterSchemaRegistryClusterOutput) ToSchemaExporterSchemaRegistryClusterOutput() SchemaExporterSchemaRegistryClusterOutput {
+ return o
+}
+
+func (o SchemaExporterSchemaRegistryClusterOutput) ToSchemaExporterSchemaRegistryClusterOutputWithContext(ctx context.Context) SchemaExporterSchemaRegistryClusterOutput {
+ return o
+}
+
+func (o SchemaExporterSchemaRegistryClusterOutput) ToSchemaExporterSchemaRegistryClusterPtrOutput() SchemaExporterSchemaRegistryClusterPtrOutput {
+ return o.ToSchemaExporterSchemaRegistryClusterPtrOutputWithContext(context.Background())
+}
+
+func (o SchemaExporterSchemaRegistryClusterOutput) ToSchemaExporterSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaExporterSchemaRegistryClusterPtrOutput {
+ return o.ApplyTWithContext(ctx, func(_ context.Context, v SchemaExporterSchemaRegistryCluster) *SchemaExporterSchemaRegistryCluster {
+ return &v
+ }).(SchemaExporterSchemaRegistryClusterPtrOutput)
+}
+
+func (o SchemaExporterSchemaRegistryClusterOutput) ToOutput(ctx context.Context) pulumix.Output[SchemaExporterSchemaRegistryCluster] {
+ return pulumix.Output[SchemaExporterSchemaRegistryCluster]{
+ OutputState: o.OutputState,
+ }
+}
+
+// The ID of the Schema Registry cluster, for example, `lsrc-abc123`.
+func (o SchemaExporterSchemaRegistryClusterOutput) Id() pulumi.StringOutput {
+ return o.ApplyT(func(v SchemaExporterSchemaRegistryCluster) string { return v.Id }).(pulumi.StringOutput)
+}
+
+type SchemaExporterSchemaRegistryClusterPtrOutput struct{ *pulumi.OutputState }
+
+func (SchemaExporterSchemaRegistryClusterPtrOutput) ElementType() reflect.Type {
+ return reflect.TypeOf((**SchemaExporterSchemaRegistryCluster)(nil)).Elem()
+}
+
+func (o SchemaExporterSchemaRegistryClusterPtrOutput) ToSchemaExporterSchemaRegistryClusterPtrOutput() SchemaExporterSchemaRegistryClusterPtrOutput {
+ return o
+}
+
+func (o SchemaExporterSchemaRegistryClusterPtrOutput) ToSchemaExporterSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaExporterSchemaRegistryClusterPtrOutput {
+ return o
+}
+
+func (o SchemaExporterSchemaRegistryClusterPtrOutput) ToOutput(ctx context.Context) pulumix.Output[*SchemaExporterSchemaRegistryCluster] {
+ return pulumix.Output[*SchemaExporterSchemaRegistryCluster]{
+ OutputState: o.OutputState,
+ }
+}
+
+func (o SchemaExporterSchemaRegistryClusterPtrOutput) Elem() SchemaExporterSchemaRegistryClusterOutput {
+ return o.ApplyT(func(v *SchemaExporterSchemaRegistryCluster) SchemaExporterSchemaRegistryCluster {
+ if v != nil {
+ return *v
+ }
+ var ret SchemaExporterSchemaRegistryCluster
+ return ret
+ }).(SchemaExporterSchemaRegistryClusterOutput)
+}
+
+// The ID of the Schema Registry cluster, for example, `lsrc-abc123`.
+func (o SchemaExporterSchemaRegistryClusterPtrOutput) Id() pulumi.StringPtrOutput {
+ return o.ApplyT(func(v *SchemaExporterSchemaRegistryCluster) *string {
+ if v == nil {
+ return nil
+ }
+ return &v.Id
+ }).(pulumi.StringPtrOutput)
+}
+
+type SchemaRegistryClusterConfigCredentials struct {
+ // The Schema Registry API Key.
+ Key string `pulumi:"key"`
+ Secret string `pulumi:"secret"`
+}
+
+// SchemaRegistryClusterConfigCredentialsInput is an input type that accepts SchemaRegistryClusterConfigCredentialsArgs and SchemaRegistryClusterConfigCredentialsOutput values.
+// You can construct a concrete instance of `SchemaRegistryClusterConfigCredentialsInput` via:
+//
+// SchemaRegistryClusterConfigCredentialsArgs{...}
+type SchemaRegistryClusterConfigCredentialsInput interface {
+ pulumi.Input
+
+ ToSchemaRegistryClusterConfigCredentialsOutput() SchemaRegistryClusterConfigCredentialsOutput
+ ToSchemaRegistryClusterConfigCredentialsOutputWithContext(context.Context) SchemaRegistryClusterConfigCredentialsOutput
+}
+
+type SchemaRegistryClusterConfigCredentialsArgs struct {
+ // The Schema Registry API Key.
+ Key pulumi.StringInput `pulumi:"key"`
+ Secret pulumi.StringInput `pulumi:"secret"`
+}
+
+func (SchemaRegistryClusterConfigCredentialsArgs) ElementType() reflect.Type {
+ return reflect.TypeOf((*SchemaRegistryClusterConfigCredentials)(nil)).Elem()
+}
+
+func (i SchemaRegistryClusterConfigCredentialsArgs) ToSchemaRegistryClusterConfigCredentialsOutput() SchemaRegistryClusterConfigCredentialsOutput {
+ return i.ToSchemaRegistryClusterConfigCredentialsOutputWithContext(context.Background())
+}
+
+func (i SchemaRegistryClusterConfigCredentialsArgs) ToSchemaRegistryClusterConfigCredentialsOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigCredentialsOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(SchemaRegistryClusterConfigCredentialsOutput)
+}
+
+func (i SchemaRegistryClusterConfigCredentialsArgs) ToOutput(ctx context.Context) pulumix.Output[SchemaRegistryClusterConfigCredentials] {
+ return pulumix.Output[SchemaRegistryClusterConfigCredentials]{
+ OutputState: i.ToSchemaRegistryClusterConfigCredentialsOutputWithContext(ctx).OutputState,
+ }
+}
+
+func (i SchemaRegistryClusterConfigCredentialsArgs) ToSchemaRegistryClusterConfigCredentialsPtrOutput() SchemaRegistryClusterConfigCredentialsPtrOutput {
+ return i.ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(context.Background())
+}
+
+func (i SchemaRegistryClusterConfigCredentialsArgs) ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigCredentialsPtrOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(SchemaRegistryClusterConfigCredentialsOutput).ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(ctx)
+}
+
+// SchemaRegistryClusterConfigCredentialsPtrInput is an input type that accepts SchemaRegistryClusterConfigCredentialsArgs, SchemaRegistryClusterConfigCredentialsPtr and SchemaRegistryClusterConfigCredentialsPtrOutput values.
+// You can construct a concrete instance of `SchemaRegistryClusterConfigCredentialsPtrInput` via:
+//
+// SchemaRegistryClusterConfigCredentialsArgs{...}
+//
+// or:
+//
+// nil
+type SchemaRegistryClusterConfigCredentialsPtrInput interface {
+ pulumi.Input
+
+ ToSchemaRegistryClusterConfigCredentialsPtrOutput() SchemaRegistryClusterConfigCredentialsPtrOutput
+ ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(context.Context) SchemaRegistryClusterConfigCredentialsPtrOutput
+}
+
+type schemaRegistryClusterConfigCredentialsPtrType SchemaRegistryClusterConfigCredentialsArgs
+
+func SchemaRegistryClusterConfigCredentialsPtr(v *SchemaRegistryClusterConfigCredentialsArgs) SchemaRegistryClusterConfigCredentialsPtrInput {
+ return (*schemaRegistryClusterConfigCredentialsPtrType)(v)
+}
+
+func (*schemaRegistryClusterConfigCredentialsPtrType) ElementType() reflect.Type {
+ return reflect.TypeOf((**SchemaRegistryClusterConfigCredentials)(nil)).Elem()
+}
+
+func (i *schemaRegistryClusterConfigCredentialsPtrType) ToSchemaRegistryClusterConfigCredentialsPtrOutput() SchemaRegistryClusterConfigCredentialsPtrOutput {
+ return i.ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(context.Background())
+}
+
+func (i *schemaRegistryClusterConfigCredentialsPtrType) ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigCredentialsPtrOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(SchemaRegistryClusterConfigCredentialsPtrOutput)
+}
+
+func (i *schemaRegistryClusterConfigCredentialsPtrType) ToOutput(ctx context.Context) pulumix.Output[*SchemaRegistryClusterConfigCredentials] {
+ return pulumix.Output[*SchemaRegistryClusterConfigCredentials]{
+ OutputState: i.ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(ctx).OutputState,
+ }
+}
+
+type SchemaRegistryClusterConfigCredentialsOutput struct{ *pulumi.OutputState }
+
+func (SchemaRegistryClusterConfigCredentialsOutput) ElementType() reflect.Type {
+ return reflect.TypeOf((*SchemaRegistryClusterConfigCredentials)(nil)).Elem()
+}
+
+func (o SchemaRegistryClusterConfigCredentialsOutput) ToSchemaRegistryClusterConfigCredentialsOutput() SchemaRegistryClusterConfigCredentialsOutput {
+ return o
+}
+
+func (o SchemaRegistryClusterConfigCredentialsOutput) ToSchemaRegistryClusterConfigCredentialsOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigCredentialsOutput {
+ return o
+}
+
+func (o SchemaRegistryClusterConfigCredentialsOutput) ToSchemaRegistryClusterConfigCredentialsPtrOutput() SchemaRegistryClusterConfigCredentialsPtrOutput {
+ return o.ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(context.Background())
+}
+
+func (o SchemaRegistryClusterConfigCredentialsOutput) ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigCredentialsPtrOutput {
+ return o.ApplyTWithContext(ctx, func(_ context.Context, v SchemaRegistryClusterConfigCredentials) *SchemaRegistryClusterConfigCredentials {
+ return &v
+ }).(SchemaRegistryClusterConfigCredentialsPtrOutput)
+}
+
+func (o SchemaRegistryClusterConfigCredentialsOutput) ToOutput(ctx context.Context) pulumix.Output[SchemaRegistryClusterConfigCredentials] {
+ return pulumix.Output[SchemaRegistryClusterConfigCredentials]{
+ OutputState: o.OutputState,
+ }
+}
+
+// The Schema Registry API Key.
+func (o SchemaRegistryClusterConfigCredentialsOutput) Key() pulumi.StringOutput {
+ return o.ApplyT(func(v SchemaRegistryClusterConfigCredentials) string { return v.Key }).(pulumi.StringOutput)
+}
+
+func (o SchemaRegistryClusterConfigCredentialsOutput) Secret() pulumi.StringOutput {
+ return o.ApplyT(func(v SchemaRegistryClusterConfigCredentials) string { return v.Secret }).(pulumi.StringOutput)
+}
+
+type SchemaRegistryClusterConfigCredentialsPtrOutput struct{ *pulumi.OutputState }
+
+func (SchemaRegistryClusterConfigCredentialsPtrOutput) ElementType() reflect.Type {
+ return reflect.TypeOf((**SchemaRegistryClusterConfigCredentials)(nil)).Elem()
+}
+
+func (o SchemaRegistryClusterConfigCredentialsPtrOutput) ToSchemaRegistryClusterConfigCredentialsPtrOutput() SchemaRegistryClusterConfigCredentialsPtrOutput {
+ return o
+}
+
+func (o SchemaRegistryClusterConfigCredentialsPtrOutput) ToSchemaRegistryClusterConfigCredentialsPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigCredentialsPtrOutput {
+ return o
+}
+
+func (o SchemaRegistryClusterConfigCredentialsPtrOutput) ToOutput(ctx context.Context) pulumix.Output[*SchemaRegistryClusterConfigCredentials] {
+ return pulumix.Output[*SchemaRegistryClusterConfigCredentials]{
+ OutputState: o.OutputState,
+ }
+}
+
+func (o SchemaRegistryClusterConfigCredentialsPtrOutput) Elem() SchemaRegistryClusterConfigCredentialsOutput {
+ return o.ApplyT(func(v *SchemaRegistryClusterConfigCredentials) SchemaRegistryClusterConfigCredentials {
+ if v != nil {
+ return *v
+ }
+ var ret SchemaRegistryClusterConfigCredentials
+ return ret
+ }).(SchemaRegistryClusterConfigCredentialsOutput)
+}
+
+// The Schema Registry API Key.
+func (o SchemaRegistryClusterConfigCredentialsPtrOutput) Key() pulumi.StringPtrOutput {
+ return o.ApplyT(func(v *SchemaRegistryClusterConfigCredentials) *string {
+ if v == nil {
+ return nil
+ }
+ return &v.Key
+ }).(pulumi.StringPtrOutput)
+}
+
+func (o SchemaRegistryClusterConfigCredentialsPtrOutput) Secret() pulumi.StringPtrOutput {
+ return o.ApplyT(func(v *SchemaRegistryClusterConfigCredentials) *string {
+ if v == nil {
+ return nil
+ }
+ return &v.Secret
+ }).(pulumi.StringPtrOutput)
+}
+
+type SchemaRegistryClusterConfigSchemaRegistryCluster struct {
+ // The ID of the Schema Registry cluster, for example, `lsrc-abc123`.
+ Id string `pulumi:"id"`
+}
+
+// SchemaRegistryClusterConfigSchemaRegistryClusterInput is an input type that accepts SchemaRegistryClusterConfigSchemaRegistryClusterArgs and SchemaRegistryClusterConfigSchemaRegistryClusterOutput values.
+// You can construct a concrete instance of `SchemaRegistryClusterConfigSchemaRegistryClusterInput` via:
+//
+// SchemaRegistryClusterConfigSchemaRegistryClusterArgs{...}
+type SchemaRegistryClusterConfigSchemaRegistryClusterInput interface {
+ pulumi.Input
+
+ ToSchemaRegistryClusterConfigSchemaRegistryClusterOutput() SchemaRegistryClusterConfigSchemaRegistryClusterOutput
+ ToSchemaRegistryClusterConfigSchemaRegistryClusterOutputWithContext(context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterOutput
+}
+
+type SchemaRegistryClusterConfigSchemaRegistryClusterArgs struct {
+ // The ID of the Schema Registry cluster, for example, `lsrc-abc123`.
+ Id pulumi.StringInput `pulumi:"id"`
+}
+
+func (SchemaRegistryClusterConfigSchemaRegistryClusterArgs) ElementType() reflect.Type {
+ return reflect.TypeOf((*SchemaRegistryClusterConfigSchemaRegistryCluster)(nil)).Elem()
+}
+
+func (i SchemaRegistryClusterConfigSchemaRegistryClusterArgs) ToSchemaRegistryClusterConfigSchemaRegistryClusterOutput() SchemaRegistryClusterConfigSchemaRegistryClusterOutput {
+ return i.ToSchemaRegistryClusterConfigSchemaRegistryClusterOutputWithContext(context.Background())
+}
+
+func (i SchemaRegistryClusterConfigSchemaRegistryClusterArgs) ToSchemaRegistryClusterConfigSchemaRegistryClusterOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(SchemaRegistryClusterConfigSchemaRegistryClusterOutput)
+}
+
+func (i SchemaRegistryClusterConfigSchemaRegistryClusterArgs) ToOutput(ctx context.Context) pulumix.Output[SchemaRegistryClusterConfigSchemaRegistryCluster] {
+ return pulumix.Output[SchemaRegistryClusterConfigSchemaRegistryCluster]{
+ OutputState: i.ToSchemaRegistryClusterConfigSchemaRegistryClusterOutputWithContext(ctx).OutputState,
+ }
+}
+
+func (i SchemaRegistryClusterConfigSchemaRegistryClusterArgs) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput() SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput {
+ return i.ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(context.Background())
+}
+
+func (i SchemaRegistryClusterConfigSchemaRegistryClusterArgs) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(SchemaRegistryClusterConfigSchemaRegistryClusterOutput).ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(ctx)
+}
+
+// SchemaRegistryClusterConfigSchemaRegistryClusterPtrInput is an input type that accepts SchemaRegistryClusterConfigSchemaRegistryClusterArgs, SchemaRegistryClusterConfigSchemaRegistryClusterPtr and SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput values.
+// You can construct a concrete instance of `SchemaRegistryClusterConfigSchemaRegistryClusterPtrInput` via:
+//
+// SchemaRegistryClusterConfigSchemaRegistryClusterArgs{...}
+//
+// or:
+//
+// nil
+type SchemaRegistryClusterConfigSchemaRegistryClusterPtrInput interface {
+ pulumi.Input
+
+ ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput() SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput
+ ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput
+}
+
+type schemaRegistryClusterConfigSchemaRegistryClusterPtrType SchemaRegistryClusterConfigSchemaRegistryClusterArgs
+
+func SchemaRegistryClusterConfigSchemaRegistryClusterPtr(v *SchemaRegistryClusterConfigSchemaRegistryClusterArgs) SchemaRegistryClusterConfigSchemaRegistryClusterPtrInput {
+ return (*schemaRegistryClusterConfigSchemaRegistryClusterPtrType)(v)
+}
+
+func (*schemaRegistryClusterConfigSchemaRegistryClusterPtrType) ElementType() reflect.Type {
+ return reflect.TypeOf((**SchemaRegistryClusterConfigSchemaRegistryCluster)(nil)).Elem()
+}
+
+func (i *schemaRegistryClusterConfigSchemaRegistryClusterPtrType) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput() SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput {
+ return i.ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(context.Background())
+}
+
+func (i *schemaRegistryClusterConfigSchemaRegistryClusterPtrType) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput)
+}
+
+func (i *schemaRegistryClusterConfigSchemaRegistryClusterPtrType) ToOutput(ctx context.Context) pulumix.Output[*SchemaRegistryClusterConfigSchemaRegistryCluster] {
+ return pulumix.Output[*SchemaRegistryClusterConfigSchemaRegistryCluster]{
+ OutputState: i.ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(ctx).OutputState,
+ }
+}
+
+type SchemaRegistryClusterConfigSchemaRegistryClusterOutput struct{ *pulumi.OutputState }
+
+func (SchemaRegistryClusterConfigSchemaRegistryClusterOutput) ElementType() reflect.Type {
+ return reflect.TypeOf((*SchemaRegistryClusterConfigSchemaRegistryCluster)(nil)).Elem()
+}
+
+func (o SchemaRegistryClusterConfigSchemaRegistryClusterOutput) ToSchemaRegistryClusterConfigSchemaRegistryClusterOutput() SchemaRegistryClusterConfigSchemaRegistryClusterOutput {
+ return o
+}
+
+func (o SchemaRegistryClusterConfigSchemaRegistryClusterOutput) ToSchemaRegistryClusterConfigSchemaRegistryClusterOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterOutput {
+ return o
+}
+
+func (o SchemaRegistryClusterConfigSchemaRegistryClusterOutput) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput() SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput {
+ return o.ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(context.Background())
+}
+
+func (o SchemaRegistryClusterConfigSchemaRegistryClusterOutput) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput {
+ return o.ApplyTWithContext(ctx, func(_ context.Context, v SchemaRegistryClusterConfigSchemaRegistryCluster) *SchemaRegistryClusterConfigSchemaRegistryCluster {
+ return &v
+ }).(SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput)
+}
+
+func (o SchemaRegistryClusterConfigSchemaRegistryClusterOutput) ToOutput(ctx context.Context) pulumix.Output[SchemaRegistryClusterConfigSchemaRegistryCluster] {
+ return pulumix.Output[SchemaRegistryClusterConfigSchemaRegistryCluster]{
+ OutputState: o.OutputState,
+ }
+}
+
+// The ID of the Schema Registry cluster, for example, `lsrc-abc123`.
+func (o SchemaRegistryClusterConfigSchemaRegistryClusterOutput) Id() pulumi.StringOutput {
+ return o.ApplyT(func(v SchemaRegistryClusterConfigSchemaRegistryCluster) string { return v.Id }).(pulumi.StringOutput)
+}
+
+type SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput struct{ *pulumi.OutputState }
+
+func (SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput) ElementType() reflect.Type {
+ return reflect.TypeOf((**SchemaRegistryClusterConfigSchemaRegistryCluster)(nil)).Elem()
+}
+
+func (o SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput() SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput {
+ return o
+}
+
+func (o SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput) ToSchemaRegistryClusterConfigSchemaRegistryClusterPtrOutputWithContext(ctx context.Context) SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput {
+ return o
+}
+
+func (o SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput) ToOutput(ctx context.Context) pulumix.Output[*SchemaRegistryClusterConfigSchemaRegistryCluster] {
+ return pulumix.Output[*SchemaRegistryClusterConfigSchemaRegistryCluster]{
+ OutputState: o.OutputState,
+ }
+}
+
+func (o SchemaRegistryClusterConfigSchemaRegistryClusterPtrOutput) Elem() SchemaRegistryClusterConfigSchemaRegistryClusterOutput {
return o.ApplyT(func(v *SchemaRegistryClusterConfigSchemaRegistryCluster) SchemaRegistryClusterConfigSchemaRegistryCluster {
if v != nil {
return *v
@@ -17514,6 +18342,76 @@ func (o GetByokKeyAzureArrayOutput) Index(i pulumi.IntInput) GetByokKeyAzureOutp
}).(GetByokKeyAzureOutput)
}
+type GetFlinkComputePoolEnvironment struct {
+ // The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`.
+ //
+ // > **Note:** Exactly one from the `id` and `displayName` attributes must be specified.
+ Id string `pulumi:"id"`
+}
+
+// GetFlinkComputePoolEnvironmentInput is an input type that accepts GetFlinkComputePoolEnvironmentArgs and GetFlinkComputePoolEnvironmentOutput values.
+// You can construct a concrete instance of `GetFlinkComputePoolEnvironmentInput` via:
+//
+// GetFlinkComputePoolEnvironmentArgs{...}
+type GetFlinkComputePoolEnvironmentInput interface {
+ pulumi.Input
+
+ ToGetFlinkComputePoolEnvironmentOutput() GetFlinkComputePoolEnvironmentOutput
+ ToGetFlinkComputePoolEnvironmentOutputWithContext(context.Context) GetFlinkComputePoolEnvironmentOutput
+}
+
+type GetFlinkComputePoolEnvironmentArgs struct {
+ // The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`.
+ //
+ // > **Note:** Exactly one from the `id` and `displayName` attributes must be specified.
+ Id pulumi.StringInput `pulumi:"id"`
+}
+
+func (GetFlinkComputePoolEnvironmentArgs) ElementType() reflect.Type {
+ return reflect.TypeOf((*GetFlinkComputePoolEnvironment)(nil)).Elem()
+}
+
+func (i GetFlinkComputePoolEnvironmentArgs) ToGetFlinkComputePoolEnvironmentOutput() GetFlinkComputePoolEnvironmentOutput {
+ return i.ToGetFlinkComputePoolEnvironmentOutputWithContext(context.Background())
+}
+
+func (i GetFlinkComputePoolEnvironmentArgs) ToGetFlinkComputePoolEnvironmentOutputWithContext(ctx context.Context) GetFlinkComputePoolEnvironmentOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(GetFlinkComputePoolEnvironmentOutput)
+}
+
+func (i GetFlinkComputePoolEnvironmentArgs) ToOutput(ctx context.Context) pulumix.Output[GetFlinkComputePoolEnvironment] {
+ return pulumix.Output[GetFlinkComputePoolEnvironment]{
+ OutputState: i.ToGetFlinkComputePoolEnvironmentOutputWithContext(ctx).OutputState,
+ }
+}
+
+type GetFlinkComputePoolEnvironmentOutput struct{ *pulumi.OutputState }
+
+func (GetFlinkComputePoolEnvironmentOutput) ElementType() reflect.Type {
+ return reflect.TypeOf((*GetFlinkComputePoolEnvironment)(nil)).Elem()
+}
+
+func (o GetFlinkComputePoolEnvironmentOutput) ToGetFlinkComputePoolEnvironmentOutput() GetFlinkComputePoolEnvironmentOutput {
+ return o
+}
+
+func (o GetFlinkComputePoolEnvironmentOutput) ToGetFlinkComputePoolEnvironmentOutputWithContext(ctx context.Context) GetFlinkComputePoolEnvironmentOutput {
+ return o
+}
+
+func (o GetFlinkComputePoolEnvironmentOutput) ToOutput(ctx context.Context) pulumix.Output[GetFlinkComputePoolEnvironment] {
+ return pulumix.Output[GetFlinkComputePoolEnvironment]{
+ OutputState: o.OutputState,
+ }
+}
+
+// The ID of the Environment that the Flink Compute Pool belongs to, for example, `env-xyz456`.
+//
+// > **Note:** Exactly one from the `id` and `displayName` attributes must be specified.
+func (o GetFlinkComputePoolEnvironmentOutput) Id() pulumi.StringOutput {
+ return o.ApplyT(func(v GetFlinkComputePoolEnvironment) string { return v.Id }).(pulumi.StringOutput)
+}
+
type GetIdentityPoolIdentityProvider struct {
// The ID of the Identity Provider associated with the Identity Pool, for example, `op-abc123`.
//
@@ -27629,6 +28527,8 @@ func init() {
pulumi.RegisterInputType(reflect.TypeOf((*ConnectorEnvironmentPtrInput)(nil)).Elem(), ConnectorEnvironmentArgs{})
pulumi.RegisterInputType(reflect.TypeOf((*ConnectorKafkaClusterInput)(nil)).Elem(), ConnectorKafkaClusterArgs{})
pulumi.RegisterInputType(reflect.TypeOf((*ConnectorKafkaClusterPtrInput)(nil)).Elem(), ConnectorKafkaClusterArgs{})
+ pulumi.RegisterInputType(reflect.TypeOf((*FlinkComputePoolEnvironmentInput)(nil)).Elem(), FlinkComputePoolEnvironmentArgs{})
+ pulumi.RegisterInputType(reflect.TypeOf((*FlinkComputePoolEnvironmentPtrInput)(nil)).Elem(), FlinkComputePoolEnvironmentArgs{})
pulumi.RegisterInputType(reflect.TypeOf((*IdentityPoolIdentityProviderInput)(nil)).Elem(), IdentityPoolIdentityProviderArgs{})
pulumi.RegisterInputType(reflect.TypeOf((*IdentityPoolIdentityProviderPtrInput)(nil)).Elem(), IdentityPoolIdentityProviderArgs{})
pulumi.RegisterInputType(reflect.TypeOf((*InvitationCreatorInput)(nil)).Elem(), InvitationCreatorArgs{})
@@ -27745,6 +28645,14 @@ func init() {
pulumi.RegisterInputType(reflect.TypeOf((*PrivateLinkAttachmentGcpArrayInput)(nil)).Elem(), PrivateLinkAttachmentGcpArray{})
pulumi.RegisterInputType(reflect.TypeOf((*SchemaCredentialsInput)(nil)).Elem(), SchemaCredentialsArgs{})
pulumi.RegisterInputType(reflect.TypeOf((*SchemaCredentialsPtrInput)(nil)).Elem(), SchemaCredentialsArgs{})
+ pulumi.RegisterInputType(reflect.TypeOf((*SchemaExporterCredentialsInput)(nil)).Elem(), SchemaExporterCredentialsArgs{})
+ pulumi.RegisterInputType(reflect.TypeOf((*SchemaExporterCredentialsPtrInput)(nil)).Elem(), SchemaExporterCredentialsArgs{})
+ pulumi.RegisterInputType(reflect.TypeOf((*SchemaExporterDestinationSchemaRegistryClusterInput)(nil)).Elem(), SchemaExporterDestinationSchemaRegistryClusterArgs{})
+ pulumi.RegisterInputType(reflect.TypeOf((*SchemaExporterDestinationSchemaRegistryClusterPtrInput)(nil)).Elem(), SchemaExporterDestinationSchemaRegistryClusterArgs{})
+ pulumi.RegisterInputType(reflect.TypeOf((*SchemaExporterDestinationSchemaRegistryClusterCredentialsInput)(nil)).Elem(), SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs{})
+ pulumi.RegisterInputType(reflect.TypeOf((*SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrInput)(nil)).Elem(), SchemaExporterDestinationSchemaRegistryClusterCredentialsArgs{})
+ pulumi.RegisterInputType(reflect.TypeOf((*SchemaExporterSchemaRegistryClusterInput)(nil)).Elem(), SchemaExporterSchemaRegistryClusterArgs{})
+ pulumi.RegisterInputType(reflect.TypeOf((*SchemaExporterSchemaRegistryClusterPtrInput)(nil)).Elem(), SchemaExporterSchemaRegistryClusterArgs{})
pulumi.RegisterInputType(reflect.TypeOf((*SchemaRegistryClusterConfigCredentialsInput)(nil)).Elem(), SchemaRegistryClusterConfigCredentialsArgs{})
pulumi.RegisterInputType(reflect.TypeOf((*SchemaRegistryClusterConfigCredentialsPtrInput)(nil)).Elem(), SchemaRegistryClusterConfigCredentialsArgs{})
pulumi.RegisterInputType(reflect.TypeOf((*SchemaRegistryClusterConfigSchemaRegistryClusterInput)(nil)).Elem(), SchemaRegistryClusterConfigSchemaRegistryClusterArgs{})
@@ -27797,6 +28705,7 @@ func init() {
pulumi.RegisterInputType(reflect.TypeOf((*GetByokKeyAwArrayInput)(nil)).Elem(), GetByokKeyAwArray{})
pulumi.RegisterInputType(reflect.TypeOf((*GetByokKeyAzureInput)(nil)).Elem(), GetByokKeyAzureArgs{})
pulumi.RegisterInputType(reflect.TypeOf((*GetByokKeyAzureArrayInput)(nil)).Elem(), GetByokKeyAzureArray{})
+ pulumi.RegisterInputType(reflect.TypeOf((*GetFlinkComputePoolEnvironmentInput)(nil)).Elem(), GetFlinkComputePoolEnvironmentArgs{})
pulumi.RegisterInputType(reflect.TypeOf((*GetIdentityPoolIdentityProviderInput)(nil)).Elem(), GetIdentityPoolIdentityProviderArgs{})
pulumi.RegisterInputType(reflect.TypeOf((*GetInvitationCreatorInput)(nil)).Elem(), GetInvitationCreatorArgs{})
pulumi.RegisterInputType(reflect.TypeOf((*GetInvitationCreatorArrayInput)(nil)).Elem(), GetInvitationCreatorArray{})
@@ -27978,6 +28887,8 @@ func init() {
pulumi.RegisterOutputType(ConnectorEnvironmentPtrOutput{})
pulumi.RegisterOutputType(ConnectorKafkaClusterOutput{})
pulumi.RegisterOutputType(ConnectorKafkaClusterPtrOutput{})
+ pulumi.RegisterOutputType(FlinkComputePoolEnvironmentOutput{})
+ pulumi.RegisterOutputType(FlinkComputePoolEnvironmentPtrOutput{})
pulumi.RegisterOutputType(IdentityPoolIdentityProviderOutput{})
pulumi.RegisterOutputType(IdentityPoolIdentityProviderPtrOutput{})
pulumi.RegisterOutputType(InvitationCreatorOutput{})
@@ -28094,6 +29005,14 @@ func init() {
pulumi.RegisterOutputType(PrivateLinkAttachmentGcpArrayOutput{})
pulumi.RegisterOutputType(SchemaCredentialsOutput{})
pulumi.RegisterOutputType(SchemaCredentialsPtrOutput{})
+ pulumi.RegisterOutputType(SchemaExporterCredentialsOutput{})
+ pulumi.RegisterOutputType(SchemaExporterCredentialsPtrOutput{})
+ pulumi.RegisterOutputType(SchemaExporterDestinationSchemaRegistryClusterOutput{})
+ pulumi.RegisterOutputType(SchemaExporterDestinationSchemaRegistryClusterPtrOutput{})
+ pulumi.RegisterOutputType(SchemaExporterDestinationSchemaRegistryClusterCredentialsOutput{})
+ pulumi.RegisterOutputType(SchemaExporterDestinationSchemaRegistryClusterCredentialsPtrOutput{})
+ pulumi.RegisterOutputType(SchemaExporterSchemaRegistryClusterOutput{})
+ pulumi.RegisterOutputType(SchemaExporterSchemaRegistryClusterPtrOutput{})
pulumi.RegisterOutputType(SchemaRegistryClusterConfigCredentialsOutput{})
pulumi.RegisterOutputType(SchemaRegistryClusterConfigCredentialsPtrOutput{})
pulumi.RegisterOutputType(SchemaRegistryClusterConfigSchemaRegistryClusterOutput{})
@@ -28146,6 +29065,7 @@ func init() {
pulumi.RegisterOutputType(GetByokKeyAwArrayOutput{})
pulumi.RegisterOutputType(GetByokKeyAzureOutput{})
pulumi.RegisterOutputType(GetByokKeyAzureArrayOutput{})
+ pulumi.RegisterOutputType(GetFlinkComputePoolEnvironmentOutput{})
pulumi.RegisterOutputType(GetIdentityPoolIdentityProviderOutput{})
pulumi.RegisterOutputType(GetInvitationCreatorOutput{})
pulumi.RegisterOutputType(GetInvitationCreatorArrayOutput{})
diff --git a/sdk/go/confluentcloud/schemaExporter.go b/sdk/go/confluentcloud/schemaExporter.go
new file mode 100644
index 00000000..2fd55252
--- /dev/null
+++ b/sdk/go/confluentcloud/schemaExporter.go
@@ -0,0 +1,428 @@
+// Code generated by the Pulumi Terraform Bridge (tfgen) Tool DO NOT EDIT.
+// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! ***
+
+package confluentcloud
+
+import (
+ "context"
+ "reflect"
+
+ "errors"
+ "github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud/internal"
+ "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
+ "github.com/pulumi/pulumi/sdk/v3/go/pulumix"
+)
+
+// ## Import
+//
+// You can import a Schema Exporter by using the Schema Registry cluster ID, Schema Exporter name in the format `/`, for example$ export IMPORT_SCHEMA_REGISTRY_API_KEY="" $ export IMPORT_SCHEMA_REGISTRY_API_SECRET="" $ export IMPORT_SCHEMA_REGISTRY_REST_ENDPOINT=""
+//
+// ```sh
+//
+// $ pulumi import confluentcloud:index/schemaExporter:SchemaExporter main lsrc-8wrx70/test-exporter
+//
+// ```
+//
+// !> **Warning:** Do not forget to delete terminal command history afterwards for security purposes.
+type SchemaExporter struct {
+ pulumi.CustomResourceState
+
+ // Block for custom *nonsensitive* configuration properties:
+ Config pulumi.StringMapOutput `pulumi:"config"`
+ // Customized context of the exporter if `contextType` is set to `CUSTOM`.
+ Context pulumi.StringOutput `pulumi:"context"`
+ // Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`.
+ ContextType pulumi.StringOutput `pulumi:"contextType"`
+ // The Cluster API Credentials.
+ Credentials SchemaExporterCredentialsPtrOutput `pulumi:"credentials"`
+ DestinationSchemaRegistryCluster SchemaExporterDestinationSchemaRegistryClusterOutput `pulumi:"destinationSchemaRegistryCluster"`
+ // The configuration setting name.
+ Name pulumi.StringOutput `pulumi:"name"`
+ // The flag to control whether to reset the exporter when updating configs. Defaults to `false`.
+ ResetOnUpdate pulumi.BoolPtrOutput `pulumi:"resetOnUpdate"`
+ // The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).
+ RestEndpoint pulumi.StringPtrOutput `pulumi:"restEndpoint"`
+ SchemaRegistryCluster SchemaExporterSchemaRegistryClusterPtrOutput `pulumi:"schemaRegistryCluster"`
+ // The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`.
+ Status pulumi.StringOutput `pulumi:"status"`
+ // Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dcOrders`.
+ SubjectRenameFormat pulumi.StringOutput `pulumi:"subjectRenameFormat"`
+ // Name of each exporter subject.
+ Subjects pulumi.StringArrayOutput `pulumi:"subjects"`
+}
+
+// NewSchemaExporter registers a new resource with the given unique name, arguments, and options.
+func NewSchemaExporter(ctx *pulumi.Context,
+ name string, args *SchemaExporterArgs, opts ...pulumi.ResourceOption) (*SchemaExporter, error) {
+ if args == nil {
+ return nil, errors.New("missing one or more required arguments")
+ }
+
+ if args.DestinationSchemaRegistryCluster == nil {
+ return nil, errors.New("invalid value for required argument 'DestinationSchemaRegistryCluster'")
+ }
+ if args.Credentials != nil {
+ args.Credentials = pulumi.ToSecret(args.Credentials).(SchemaExporterCredentialsPtrInput)
+ }
+ secrets := pulumi.AdditionalSecretOutputs([]string{
+ "credentials",
+ })
+ opts = append(opts, secrets)
+ opts = internal.PkgResourceDefaultOpts(opts)
+ var resource SchemaExporter
+ err := ctx.RegisterResource("confluentcloud:index/schemaExporter:SchemaExporter", name, args, &resource, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return &resource, nil
+}
+
+// GetSchemaExporter gets an existing SchemaExporter resource's state with the given name, ID, and optional
+// state properties that are used to uniquely qualify the lookup (nil if not required).
+func GetSchemaExporter(ctx *pulumi.Context,
+ name string, id pulumi.IDInput, state *SchemaExporterState, opts ...pulumi.ResourceOption) (*SchemaExporter, error) {
+ var resource SchemaExporter
+ err := ctx.ReadResource("confluentcloud:index/schemaExporter:SchemaExporter", name, id, state, &resource, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return &resource, nil
+}
+
+// Input properties used for looking up and filtering SchemaExporter resources.
+type schemaExporterState struct {
+ // Block for custom *nonsensitive* configuration properties:
+ Config map[string]string `pulumi:"config"`
+ // Customized context of the exporter if `contextType` is set to `CUSTOM`.
+ Context *string `pulumi:"context"`
+ // Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`.
+ ContextType *string `pulumi:"contextType"`
+ // The Cluster API Credentials.
+ Credentials *SchemaExporterCredentials `pulumi:"credentials"`
+ DestinationSchemaRegistryCluster *SchemaExporterDestinationSchemaRegistryCluster `pulumi:"destinationSchemaRegistryCluster"`
+ // The configuration setting name.
+ Name *string `pulumi:"name"`
+ // The flag to control whether to reset the exporter when updating configs. Defaults to `false`.
+ ResetOnUpdate *bool `pulumi:"resetOnUpdate"`
+ // The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).
+ RestEndpoint *string `pulumi:"restEndpoint"`
+ SchemaRegistryCluster *SchemaExporterSchemaRegistryCluster `pulumi:"schemaRegistryCluster"`
+ // The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`.
+ Status *string `pulumi:"status"`
+ // Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dcOrders`.
+ SubjectRenameFormat *string `pulumi:"subjectRenameFormat"`
+ // Name of each exporter subject.
+ Subjects []string `pulumi:"subjects"`
+}
+
+type SchemaExporterState struct {
+ // Block for custom *nonsensitive* configuration properties:
+ Config pulumi.StringMapInput
+ // Customized context of the exporter if `contextType` is set to `CUSTOM`.
+ Context pulumi.StringPtrInput
+ // Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`.
+ ContextType pulumi.StringPtrInput
+ // The Cluster API Credentials.
+ Credentials SchemaExporterCredentialsPtrInput
+ DestinationSchemaRegistryCluster SchemaExporterDestinationSchemaRegistryClusterPtrInput
+ // The configuration setting name.
+ Name pulumi.StringPtrInput
+ // The flag to control whether to reset the exporter when updating configs. Defaults to `false`.
+ ResetOnUpdate pulumi.BoolPtrInput
+ // The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).
+ RestEndpoint pulumi.StringPtrInput
+ SchemaRegistryCluster SchemaExporterSchemaRegistryClusterPtrInput
+ // The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`.
+ Status pulumi.StringPtrInput
+ // Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dcOrders`.
+ SubjectRenameFormat pulumi.StringPtrInput
+ // Name of each exporter subject.
+ Subjects pulumi.StringArrayInput
+}
+
+func (SchemaExporterState) ElementType() reflect.Type {
+ return reflect.TypeOf((*schemaExporterState)(nil)).Elem()
+}
+
+type schemaExporterArgs struct {
+ // Block for custom *nonsensitive* configuration properties:
+ Config map[string]string `pulumi:"config"`
+ // Customized context of the exporter if `contextType` is set to `CUSTOM`.
+ Context *string `pulumi:"context"`
+ // Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`.
+ ContextType *string `pulumi:"contextType"`
+ // The Cluster API Credentials.
+ Credentials *SchemaExporterCredentials `pulumi:"credentials"`
+ DestinationSchemaRegistryCluster SchemaExporterDestinationSchemaRegistryCluster `pulumi:"destinationSchemaRegistryCluster"`
+ // The configuration setting name.
+ Name *string `pulumi:"name"`
+ // The flag to control whether to reset the exporter when updating configs. Defaults to `false`.
+ ResetOnUpdate *bool `pulumi:"resetOnUpdate"`
+ // The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).
+ RestEndpoint *string `pulumi:"restEndpoint"`
+ SchemaRegistryCluster *SchemaExporterSchemaRegistryCluster `pulumi:"schemaRegistryCluster"`
+ // The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`.
+ Status *string `pulumi:"status"`
+ // Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dcOrders`.
+ SubjectRenameFormat *string `pulumi:"subjectRenameFormat"`
+ // Name of each exporter subject.
+ Subjects []string `pulumi:"subjects"`
+}
+
+// The set of arguments for constructing a SchemaExporter resource.
+type SchemaExporterArgs struct {
+ // Block for custom *nonsensitive* configuration properties:
+ Config pulumi.StringMapInput
+ // Customized context of the exporter if `contextType` is set to `CUSTOM`.
+ Context pulumi.StringPtrInput
+ // Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`.
+ ContextType pulumi.StringPtrInput
+ // The Cluster API Credentials.
+ Credentials SchemaExporterCredentialsPtrInput
+ DestinationSchemaRegistryCluster SchemaExporterDestinationSchemaRegistryClusterInput
+ // The configuration setting name.
+ Name pulumi.StringPtrInput
+ // The flag to control whether to reset the exporter when updating configs. Defaults to `false`.
+ ResetOnUpdate pulumi.BoolPtrInput
+ // The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).
+ RestEndpoint pulumi.StringPtrInput
+ SchemaRegistryCluster SchemaExporterSchemaRegistryClusterPtrInput
+ // The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`.
+ Status pulumi.StringPtrInput
+ // Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dcOrders`.
+ SubjectRenameFormat pulumi.StringPtrInput
+ // Name of each exporter subject.
+ Subjects pulumi.StringArrayInput
+}
+
+func (SchemaExporterArgs) ElementType() reflect.Type {
+ return reflect.TypeOf((*schemaExporterArgs)(nil)).Elem()
+}
+
+type SchemaExporterInput interface {
+ pulumi.Input
+
+ ToSchemaExporterOutput() SchemaExporterOutput
+ ToSchemaExporterOutputWithContext(ctx context.Context) SchemaExporterOutput
+}
+
+func (*SchemaExporter) ElementType() reflect.Type {
+ return reflect.TypeOf((**SchemaExporter)(nil)).Elem()
+}
+
+func (i *SchemaExporter) ToSchemaExporterOutput() SchemaExporterOutput {
+ return i.ToSchemaExporterOutputWithContext(context.Background())
+}
+
+func (i *SchemaExporter) ToSchemaExporterOutputWithContext(ctx context.Context) SchemaExporterOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterOutput)
+}
+
+func (i *SchemaExporter) ToOutput(ctx context.Context) pulumix.Output[*SchemaExporter] {
+ return pulumix.Output[*SchemaExporter]{
+ OutputState: i.ToSchemaExporterOutputWithContext(ctx).OutputState,
+ }
+}
+
+// SchemaExporterArrayInput is an input type that accepts SchemaExporterArray and SchemaExporterArrayOutput values.
+// You can construct a concrete instance of `SchemaExporterArrayInput` via:
+//
+// SchemaExporterArray{ SchemaExporterArgs{...} }
+type SchemaExporterArrayInput interface {
+ pulumi.Input
+
+ ToSchemaExporterArrayOutput() SchemaExporterArrayOutput
+ ToSchemaExporterArrayOutputWithContext(context.Context) SchemaExporterArrayOutput
+}
+
+type SchemaExporterArray []SchemaExporterInput
+
+func (SchemaExporterArray) ElementType() reflect.Type {
+ return reflect.TypeOf((*[]*SchemaExporter)(nil)).Elem()
+}
+
+func (i SchemaExporterArray) ToSchemaExporterArrayOutput() SchemaExporterArrayOutput {
+ return i.ToSchemaExporterArrayOutputWithContext(context.Background())
+}
+
+func (i SchemaExporterArray) ToSchemaExporterArrayOutputWithContext(ctx context.Context) SchemaExporterArrayOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterArrayOutput)
+}
+
+func (i SchemaExporterArray) ToOutput(ctx context.Context) pulumix.Output[[]*SchemaExporter] {
+ return pulumix.Output[[]*SchemaExporter]{
+ OutputState: i.ToSchemaExporterArrayOutputWithContext(ctx).OutputState,
+ }
+}
+
+// SchemaExporterMapInput is an input type that accepts SchemaExporterMap and SchemaExporterMapOutput values.
+// You can construct a concrete instance of `SchemaExporterMapInput` via:
+//
+// SchemaExporterMap{ "key": SchemaExporterArgs{...} }
+type SchemaExporterMapInput interface {
+ pulumi.Input
+
+ ToSchemaExporterMapOutput() SchemaExporterMapOutput
+ ToSchemaExporterMapOutputWithContext(context.Context) SchemaExporterMapOutput
+}
+
+type SchemaExporterMap map[string]SchemaExporterInput
+
+func (SchemaExporterMap) ElementType() reflect.Type {
+ return reflect.TypeOf((*map[string]*SchemaExporter)(nil)).Elem()
+}
+
+func (i SchemaExporterMap) ToSchemaExporterMapOutput() SchemaExporterMapOutput {
+ return i.ToSchemaExporterMapOutputWithContext(context.Background())
+}
+
+func (i SchemaExporterMap) ToSchemaExporterMapOutputWithContext(ctx context.Context) SchemaExporterMapOutput {
+ return pulumi.ToOutputWithContext(ctx, i).(SchemaExporterMapOutput)
+}
+
+func (i SchemaExporterMap) ToOutput(ctx context.Context) pulumix.Output[map[string]*SchemaExporter] {
+ return pulumix.Output[map[string]*SchemaExporter]{
+ OutputState: i.ToSchemaExporterMapOutputWithContext(ctx).OutputState,
+ }
+}
+
+type SchemaExporterOutput struct{ *pulumi.OutputState }
+
+func (SchemaExporterOutput) ElementType() reflect.Type {
+ return reflect.TypeOf((**SchemaExporter)(nil)).Elem()
+}
+
+func (o SchemaExporterOutput) ToSchemaExporterOutput() SchemaExporterOutput {
+ return o
+}
+
+func (o SchemaExporterOutput) ToSchemaExporterOutputWithContext(ctx context.Context) SchemaExporterOutput {
+ return o
+}
+
+func (o SchemaExporterOutput) ToOutput(ctx context.Context) pulumix.Output[*SchemaExporter] {
+ return pulumix.Output[*SchemaExporter]{
+ OutputState: o.OutputState,
+ }
+}
+
+// Block for custom *nonsensitive* configuration properties:
+func (o SchemaExporterOutput) Config() pulumi.StringMapOutput {
+ return o.ApplyT(func(v *SchemaExporter) pulumi.StringMapOutput { return v.Config }).(pulumi.StringMapOutput)
+}
+
+// Customized context of the exporter if `contextType` is set to `CUSTOM`.
+func (o SchemaExporterOutput) Context() pulumi.StringOutput {
+ return o.ApplyT(func(v *SchemaExporter) pulumi.StringOutput { return v.Context }).(pulumi.StringOutput)
+}
+
+// Context type of the exporter. Accepted values are: `CUSTOM`, `NONE` or `AUTO`. Defaults to `AUTO`.
+func (o SchemaExporterOutput) ContextType() pulumi.StringOutput {
+ return o.ApplyT(func(v *SchemaExporter) pulumi.StringOutput { return v.ContextType }).(pulumi.StringOutput)
+}
+
+// The Cluster API Credentials.
+func (o SchemaExporterOutput) Credentials() SchemaExporterCredentialsPtrOutput {
+ return o.ApplyT(func(v *SchemaExporter) SchemaExporterCredentialsPtrOutput { return v.Credentials }).(SchemaExporterCredentialsPtrOutput)
+}
+
+func (o SchemaExporterOutput) DestinationSchemaRegistryCluster() SchemaExporterDestinationSchemaRegistryClusterOutput {
+ return o.ApplyT(func(v *SchemaExporter) SchemaExporterDestinationSchemaRegistryClusterOutput {
+ return v.DestinationSchemaRegistryCluster
+ }).(SchemaExporterDestinationSchemaRegistryClusterOutput)
+}
+
+// The configuration setting name.
+func (o SchemaExporterOutput) Name() pulumi.StringOutput {
+ return o.ApplyT(func(v *SchemaExporter) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)
+}
+
+// The flag to control whether to reset the exporter when updating configs. Defaults to `false`.
+func (o SchemaExporterOutput) ResetOnUpdate() pulumi.BoolPtrOutput {
+ return o.ApplyT(func(v *SchemaExporter) pulumi.BoolPtrOutput { return v.ResetOnUpdate }).(pulumi.BoolPtrOutput)
+}
+
+// The REST endpoint of the destination Schema Registry cluster, for example, `https://pkc-00000.us-central1.gcp.confluent.cloud:443`).
+func (o SchemaExporterOutput) RestEndpoint() pulumi.StringPtrOutput {
+ return o.ApplyT(func(v *SchemaExporter) pulumi.StringPtrOutput { return v.RestEndpoint }).(pulumi.StringPtrOutput)
+}
+
+func (o SchemaExporterOutput) SchemaRegistryCluster() SchemaExporterSchemaRegistryClusterPtrOutput {
+ return o.ApplyT(func(v *SchemaExporter) SchemaExporterSchemaRegistryClusterPtrOutput { return v.SchemaRegistryCluster }).(SchemaExporterSchemaRegistryClusterPtrOutput)
+}
+
+// The status of the schema exporter. Accepted values are: `RUNNING` and `PAUSED`.
+func (o SchemaExporterOutput) Status() pulumi.StringOutput {
+ return o.ApplyT(func(v *SchemaExporter) pulumi.StringOutput { return v.Status }).(pulumi.StringOutput)
+}
+
+// Format string for the subject name in the destination cluster, which may contain `${subject}` as a placeholder for the originating subject name. For example, `dc_${subject}` for the subject orders will map to the destination subject name `dcOrders`.
+func (o SchemaExporterOutput) SubjectRenameFormat() pulumi.StringOutput {
+ return o.ApplyT(func(v *SchemaExporter) pulumi.StringOutput { return v.SubjectRenameFormat }).(pulumi.StringOutput)
+}
+
+// Name of each exporter subject.
+func (o SchemaExporterOutput) Subjects() pulumi.StringArrayOutput {
+ return o.ApplyT(func(v *SchemaExporter) pulumi.StringArrayOutput { return v.Subjects }).(pulumi.StringArrayOutput)
+}
+
+type SchemaExporterArrayOutput struct{ *pulumi.OutputState }
+
+func (SchemaExporterArrayOutput) ElementType() reflect.Type {
+ return reflect.TypeOf((*[]*SchemaExporter)(nil)).Elem()
+}
+
+func (o SchemaExporterArrayOutput) ToSchemaExporterArrayOutput() SchemaExporterArrayOutput {
+ return o
+}
+
+func (o SchemaExporterArrayOutput) ToSchemaExporterArrayOutputWithContext(ctx context.Context) SchemaExporterArrayOutput {
+ return o
+}
+
+func (o SchemaExporterArrayOutput) ToOutput(ctx context.Context) pulumix.Output[[]*SchemaExporter] {
+ return pulumix.Output[[]*SchemaExporter]{
+ OutputState: o.OutputState,
+ }
+}
+
+func (o SchemaExporterArrayOutput) Index(i pulumi.IntInput) SchemaExporterOutput {
+ return pulumi.All(o, i).ApplyT(func(vs []interface{}) *SchemaExporter {
+ return vs[0].([]*SchemaExporter)[vs[1].(int)]
+ }).(SchemaExporterOutput)
+}
+
+type SchemaExporterMapOutput struct{ *pulumi.OutputState }
+
+func (SchemaExporterMapOutput) ElementType() reflect.Type {
+ return reflect.TypeOf((*map[string]*SchemaExporter)(nil)).Elem()
+}
+
+func (o SchemaExporterMapOutput) ToSchemaExporterMapOutput() SchemaExporterMapOutput {
+ return o
+}
+
+func (o SchemaExporterMapOutput) ToSchemaExporterMapOutputWithContext(ctx context.Context) SchemaExporterMapOutput {
+ return o
+}
+
+func (o SchemaExporterMapOutput) ToOutput(ctx context.Context) pulumix.Output[map[string]*SchemaExporter] {
+ return pulumix.Output[map[string]*SchemaExporter]{
+ OutputState: o.OutputState,
+ }
+}
+
+func (o SchemaExporterMapOutput) MapIndex(k pulumi.StringInput) SchemaExporterOutput {
+ return pulumi.All(o, k).ApplyT(func(vs []interface{}) *SchemaExporter {
+ return vs[0].(map[string]*SchemaExporter)[vs[1].(string)]
+ }).(SchemaExporterOutput)
+}
+
+func init() {
+ pulumi.RegisterInputType(reflect.TypeOf((*SchemaExporterInput)(nil)).Elem(), &SchemaExporter{})
+ pulumi.RegisterInputType(reflect.TypeOf((*SchemaExporterArrayInput)(nil)).Elem(), SchemaExporterArray{})
+ pulumi.RegisterInputType(reflect.TypeOf((*SchemaExporterMapInput)(nil)).Elem(), SchemaExporterMap{})
+ pulumi.RegisterOutputType(SchemaExporterOutput{})
+ pulumi.RegisterOutputType(SchemaExporterArrayOutput{})
+ pulumi.RegisterOutputType(SchemaExporterMapOutput{})
+}
diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/ConfluentcloudFunctions.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/ConfluentcloudFunctions.java
index 52df66b8..e2ff5a52 100644
--- a/sdk/java/src/main/java/com/pulumi/confluentcloud/ConfluentcloudFunctions.java
+++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/ConfluentcloudFunctions.java
@@ -12,6 +12,8 @@
import com.pulumi.confluentcloud.inputs.GetByokKeyPlainArgs;
import com.pulumi.confluentcloud.inputs.GetEnvironmentArgs;
import com.pulumi.confluentcloud.inputs.GetEnvironmentPlainArgs;
+import com.pulumi.confluentcloud.inputs.GetFlinkComputePoolArgs;
+import com.pulumi.confluentcloud.inputs.GetFlinkComputePoolPlainArgs;
import com.pulumi.confluentcloud.inputs.GetIdentityPoolArgs;
import com.pulumi.confluentcloud.inputs.GetIdentityPoolPlainArgs;
import com.pulumi.confluentcloud.inputs.GetIdentityProviderArgs;
@@ -73,6 +75,7 @@
import com.pulumi.confluentcloud.outputs.GetByokKeyResult;
import com.pulumi.confluentcloud.outputs.GetEnvironmentResult;
import com.pulumi.confluentcloud.outputs.GetEnvironmentsResult;
+import com.pulumi.confluentcloud.outputs.GetFlinkComputePoolResult;
import com.pulumi.confluentcloud.outputs.GetIdentityPoolResult;
import com.pulumi.confluentcloud.outputs.GetIdentityProviderResult;
import com.pulumi.confluentcloud.outputs.GetInvitationResult;
@@ -878,6 +881,226 @@ public static Output getEnvironments(InvokeArgs args, Inv
public static CompletableFuture getEnvironmentsPlain(InvokeArgs args, InvokeOptions options) {
return Deployment.getInstance().invokeAsync("confluentcloud:index/getEnvironments:getEnvironments", TypeShape.of(GetEnvironmentsResult.class), args, Utilities.withVersion(options));
}
+ /**
+ * [![Early Access](https://img.shields.io/badge/Lifecycle%20Stage-Early%20Access-%2300afba)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy)
+ *
+ * > **Note:** `confluentcloud.FlinkComputePool` data source is available in **Early Access** for early adopters. Early Access features are introduced to gather customer feedback. This feature should be used only for evaluation and non-production testing purposes or to provide feedback to Confluent, particularly as it becomes more widely available in follow-on editions.\
+ * **Early Access** features are intended for evaluation use in development and testing environments only, and not for production use. The warranty, SLA, and Support Services provisions of your agreement with Confluent do not apply to Early Access features. Early Access features are considered to be a Proof of Concept as defined in the Confluent Cloud Terms of Service. Confluent may discontinue providing early access releases of the Early Access features at any time in Confluent’s sole discretion.
+ *
+ * `confluentcloud.FlinkComputePool` describes a Flink Compute Pool data source.
+ *
+ * ## Example Usage
+ * ```java
+ * package generated_program;
+ *
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.confluentcloud.ConfluentcloudFunctions;
+ * import com.pulumi.confluentcloud.inputs.GetFlinkComputePoolArgs;
+ * import com.pulumi.confluentcloud.inputs.GetFlinkComputePoolEnvironmentArgs;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ *
+ * public class App {
+ * public static void main(String[] args) {
+ * Pulumi.run(App::stack);
+ * }
+ *
+ * public static void stack(Context ctx) {
+ * final var exampleUsingIdFlinkComputePool = ConfluentcloudFunctions.getFlinkComputePool(GetFlinkComputePoolArgs.builder()
+ * .id("lfcp-abc123")
+ * .environment(GetFlinkComputePoolEnvironmentArgs.builder()
+ * .id("env-xyz456")
+ * .build())
+ * .build());
+ *
+ * ctx.export("exampleUsingId", exampleUsingIdFlinkComputePool.applyValue(getFlinkComputePoolResult -> getFlinkComputePoolResult));
+ * final var exampleUsingNameFlinkComputePool = ConfluentcloudFunctions.getFlinkComputePool(GetFlinkComputePoolArgs.builder()
+ * .displayName("my_compute_pool")
+ * .environment(GetFlinkComputePoolEnvironmentArgs.builder()
+ * .id("env-xyz456")
+ * .build())
+ * .build());
+ *
+ * ctx.export("exampleUsingName", exampleUsingNameFlinkComputePool.applyValue(getFlinkComputePoolResult -> getFlinkComputePoolResult));
+ * }
+ * }
+ * ```
+ *
+ */
+ public static Output getFlinkComputePool(GetFlinkComputePoolArgs args) {
+ return getFlinkComputePool(args, InvokeOptions.Empty);
+ }
+ /**
+ * [![Early Access](https://img.shields.io/badge/Lifecycle%20Stage-Early%20Access-%2300afba)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy)
+ *
+ * > **Note:** `confluentcloud.FlinkComputePool` data source is available in **Early Access** for early adopters. Early Access features are introduced to gather customer feedback. This feature should be used only for evaluation and non-production testing purposes or to provide feedback to Confluent, particularly as it becomes more widely available in follow-on editions.\
+ * **Early Access** features are intended for evaluation use in development and testing environments only, and not for production use. The warranty, SLA, and Support Services provisions of your agreement with Confluent do not apply to Early Access features. Early Access features are considered to be a Proof of Concept as defined in the Confluent Cloud Terms of Service. Confluent may discontinue providing early access releases of the Early Access features at any time in Confluent’s sole discretion.
+ *
+ * `confluentcloud.FlinkComputePool` describes a Flink Compute Pool data source.
+ *
+ * ## Example Usage
+ * ```java
+ * package generated_program;
+ *
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.confluentcloud.ConfluentcloudFunctions;
+ * import com.pulumi.confluentcloud.inputs.GetFlinkComputePoolArgs;
+ * import com.pulumi.confluentcloud.inputs.GetFlinkComputePoolEnvironmentArgs;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ *
+ * public class App {
+ * public static void main(String[] args) {
+ * Pulumi.run(App::stack);
+ * }
+ *
+ * public static void stack(Context ctx) {
+ * final var exampleUsingIdFlinkComputePool = ConfluentcloudFunctions.getFlinkComputePool(GetFlinkComputePoolArgs.builder()
+ * .id("lfcp-abc123")
+ * .environment(GetFlinkComputePoolEnvironmentArgs.builder()
+ * .id("env-xyz456")
+ * .build())
+ * .build());
+ *
+ * ctx.export("exampleUsingId", exampleUsingIdFlinkComputePool.applyValue(getFlinkComputePoolResult -> getFlinkComputePoolResult));
+ * final var exampleUsingNameFlinkComputePool = ConfluentcloudFunctions.getFlinkComputePool(GetFlinkComputePoolArgs.builder()
+ * .displayName("my_compute_pool")
+ * .environment(GetFlinkComputePoolEnvironmentArgs.builder()
+ * .id("env-xyz456")
+ * .build())
+ * .build());
+ *
+ * ctx.export("exampleUsingName", exampleUsingNameFlinkComputePool.applyValue(getFlinkComputePoolResult -> getFlinkComputePoolResult));
+ * }
+ * }
+ * ```
+ *
+ */
+ public static CompletableFuture getFlinkComputePoolPlain(GetFlinkComputePoolPlainArgs args) {
+ return getFlinkComputePoolPlain(args, InvokeOptions.Empty);
+ }
+ /**
+ * [![Early Access](https://img.shields.io/badge/Lifecycle%20Stage-Early%20Access-%2300afba)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy)
+ *
+ * > **Note:** `confluentcloud.FlinkComputePool` data source is available in **Early Access** for early adopters. Early Access features are introduced to gather customer feedback. This feature should be used only for evaluation and non-production testing purposes or to provide feedback to Confluent, particularly as it becomes more widely available in follow-on editions.\
+ * **Early Access** features are intended for evaluation use in development and testing environments only, and not for production use. The warranty, SLA, and Support Services provisions of your agreement with Confluent do not apply to Early Access features. Early Access features are considered to be a Proof of Concept as defined in the Confluent Cloud Terms of Service. Confluent may discontinue providing early access releases of the Early Access features at any time in Confluent’s sole discretion.
+ *
+ * `confluentcloud.FlinkComputePool` describes a Flink Compute Pool data source.
+ *
+ * ## Example Usage
+ * ```java
+ * package generated_program;
+ *
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.confluentcloud.ConfluentcloudFunctions;
+ * import com.pulumi.confluentcloud.inputs.GetFlinkComputePoolArgs;
+ * import com.pulumi.confluentcloud.inputs.GetFlinkComputePoolEnvironmentArgs;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ *
+ * public class App {
+ * public static void main(String[] args) {
+ * Pulumi.run(App::stack);
+ * }
+ *
+ * public static void stack(Context ctx) {
+ * final var exampleUsingIdFlinkComputePool = ConfluentcloudFunctions.getFlinkComputePool(GetFlinkComputePoolArgs.builder()
+ * .id("lfcp-abc123")
+ * .environment(GetFlinkComputePoolEnvironmentArgs.builder()
+ * .id("env-xyz456")
+ * .build())
+ * .build());
+ *
+ * ctx.export("exampleUsingId", exampleUsingIdFlinkComputePool.applyValue(getFlinkComputePoolResult -> getFlinkComputePoolResult));
+ * final var exampleUsingNameFlinkComputePool = ConfluentcloudFunctions.getFlinkComputePool(GetFlinkComputePoolArgs.builder()
+ * .displayName("my_compute_pool")
+ * .environment(GetFlinkComputePoolEnvironmentArgs.builder()
+ * .id("env-xyz456")
+ * .build())
+ * .build());
+ *
+ * ctx.export("exampleUsingName", exampleUsingNameFlinkComputePool.applyValue(getFlinkComputePoolResult -> getFlinkComputePoolResult));
+ * }
+ * }
+ * ```
+ *
+ */
+ public static Output getFlinkComputePool(GetFlinkComputePoolArgs args, InvokeOptions options) {
+ return Deployment.getInstance().invoke("confluentcloud:index/getFlinkComputePool:getFlinkComputePool", TypeShape.of(GetFlinkComputePoolResult.class), args, Utilities.withVersion(options));
+ }
+ /**
+ * [![Early Access](https://img.shields.io/badge/Lifecycle%20Stage-Early%20Access-%2300afba)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy)
+ *
+ * > **Note:** `confluentcloud.FlinkComputePool` data source is available in **Early Access** for early adopters. Early Access features are introduced to gather customer feedback. This feature should be used only for evaluation and non-production testing purposes or to provide feedback to Confluent, particularly as it becomes more widely available in follow-on editions.\
+ * **Early Access** features are intended for evaluation use in development and testing environments only, and not for production use. The warranty, SLA, and Support Services provisions of your agreement with Confluent do not apply to Early Access features. Early Access features are considered to be a Proof of Concept as defined in the Confluent Cloud Terms of Service. Confluent may discontinue providing early access releases of the Early Access features at any time in Confluent’s sole discretion.
+ *
+ * `confluentcloud.FlinkComputePool` describes a Flink Compute Pool data source.
+ *
+ * ## Example Usage
+ * ```java
+ * package generated_program;
+ *
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.confluentcloud.ConfluentcloudFunctions;
+ * import com.pulumi.confluentcloud.inputs.GetFlinkComputePoolArgs;
+ * import com.pulumi.confluentcloud.inputs.GetFlinkComputePoolEnvironmentArgs;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ *
+ * public class App {
+ * public static void main(String[] args) {
+ * Pulumi.run(App::stack);
+ * }
+ *
+ * public static void stack(Context ctx) {
+ * final var exampleUsingIdFlinkComputePool = ConfluentcloudFunctions.getFlinkComputePool(GetFlinkComputePoolArgs.builder()
+ * .id("lfcp-abc123")
+ * .environment(GetFlinkComputePoolEnvironmentArgs.builder()
+ * .id("env-xyz456")
+ * .build())
+ * .build());
+ *
+ * ctx.export("exampleUsingId", exampleUsingIdFlinkComputePool.applyValue(getFlinkComputePoolResult -> getFlinkComputePoolResult));
+ * final var exampleUsingNameFlinkComputePool = ConfluentcloudFunctions.getFlinkComputePool(GetFlinkComputePoolArgs.builder()
+ * .displayName("my_compute_pool")
+ * .environment(GetFlinkComputePoolEnvironmentArgs.builder()
+ * .id("env-xyz456")
+ * .build())
+ * .build());
+ *
+ * ctx.export("exampleUsingName", exampleUsingNameFlinkComputePool.applyValue(getFlinkComputePoolResult -> getFlinkComputePoolResult));
+ * }
+ * }
+ * ```
+ *
+ */
+ public static CompletableFuture getFlinkComputePoolPlain(GetFlinkComputePoolPlainArgs args, InvokeOptions options) {
+ return Deployment.getInstance().invokeAsync("confluentcloud:index/getFlinkComputePool:getFlinkComputePool", TypeShape.of(GetFlinkComputePoolResult.class), args, Utilities.withVersion(options));
+ }
/**
* [![General Availability](https://img.shields.io/badge/Lifecycle%20Stage-General%20Availability-%2345c6e8)](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy)
*
@@ -3731,6 +3954,8 @@ public static CompletableFuture getPri
*
* `confluentcloud.RoleBinding` describes a Role Binding.
*
+ * > **Note:** For more information on the Role Bindings, see [Predefined RBAC roles in Confluent Cloud](https://docs.confluent.io/cloud/current/access-management/access-control/rbac/predefined-rbac-roles.html).
+ *
* ## Example Usage
* ```java
* package generated_program;
@@ -3771,6 +3996,8 @@ public static Output getRoleBinding(GetRoleBindingArgs arg
*
* `confluentcloud.RoleBinding` describes a Role Binding.
*
+ * > **Note:** For more information on the Role Bindings, see [Predefined RBAC roles in Confluent Cloud](https://docs.confluent.io/cloud/current/access-management/access-control/rbac/predefined-rbac-roles.html).
+ *
* ## Example Usage
* ```java
* package generated_program;
@@ -3811,6 +4038,8 @@ public static CompletableFuture getRoleBindingPlain(GetRol
*
* `confluentcloud.RoleBinding` describes a Role Binding.
*
+ * > **Note:** For more information on the Role Bindings, see [Predefined RBAC roles in Confluent Cloud](https://docs.confluent.io/cloud/current/access-management/access-control/rbac/predefined-rbac-roles.html).
+ *
* ## Example Usage
* ```java
* package generated_program;
@@ -3851,6 +4080,8 @@ public static Output getRoleBinding(GetRoleBindingArgs arg
*
* `confluentcloud.RoleBinding` describes a Role Binding.
*
+ * > **Note:** For more information on the Role Bindings, see [Predefined RBAC roles in Confluent Cloud](https://docs.confluent.io/cloud/current/access-management/access-control/rbac/predefined-rbac-roles.html).
+ *
* ## Example Usage
* ```java
* package generated_program;
diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/FlinkComputePool.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/FlinkComputePool.java
new file mode 100644
index 00000000..df0f7f09
--- /dev/null
+++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/FlinkComputePool.java
@@ -0,0 +1,261 @@
+// *** WARNING: this file was generated by pulumi-java-gen. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+package com.pulumi.confluentcloud;
+
+import com.pulumi.confluentcloud.FlinkComputePoolArgs;
+import com.pulumi.confluentcloud.Utilities;
+import com.pulumi.confluentcloud.inputs.FlinkComputePoolState;
+import com.pulumi.confluentcloud.outputs.FlinkComputePoolEnvironment;
+import com.pulumi.core.Output;
+import com.pulumi.core.annotations.Export;
+import com.pulumi.core.annotations.ResourceType;
+import com.pulumi.core.internal.Codegen;
+import java.lang.Integer;
+import java.lang.String;
+import javax.annotation.Nullable;
+
+/**
+ * ## Example Usage
+ * ```java
+ * package generated_program;
+ *
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.confluentcloud.Environment;
+ * import com.pulumi.confluentcloud.FlinkComputePool;
+ * import com.pulumi.confluentcloud.FlinkComputePoolArgs;
+ * import com.pulumi.confluentcloud.inputs.FlinkComputePoolEnvironmentArgs;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ *
+ * public class App {
+ * public static void main(String[] args) {
+ * Pulumi.run(App::stack);
+ * }
+ *
+ * public static void stack(Context ctx) {
+ * var development = new Environment("development");
+ *
+ * var main = new FlinkComputePool("main", FlinkComputePoolArgs.builder()
+ * .displayName("standard_compute_pool")
+ * .cloud("AWS")
+ * .region("us-east-1")
+ * .maxCfu(5)
+ * .environment(FlinkComputePoolEnvironmentArgs.builder()
+ * .id(development.id())
+ * .build())
+ * .build());
+ *
+ * }
+ * }
+ * ```
+ *
+ * ## Import
+ *
+ * You can import a Flink Compute Pool by using Environment ID and Flink Compute Pool ID, in the format `<Environment ID>/<Flink Compute Pool ID>`. The following example shows how to import a Flink Compute Pool$ export CONFLUENT_CLOUD_API_KEY="<cloud_api_key>" $ export CONFLUENT_CLOUD_API_SECRET="<cloud_api_secret>"
+ *
+ * ```sh
+ * $ pulumi import confluentcloud:index/flinkComputePool:FlinkComputePool main env-abc123/lfcp-abc123
+ * ```
+ *
+ * !> **Warning:** Do not forget to delete terminal command history afterwards for security purposes.
+ *
+ */
+@ResourceType(type="confluentcloud:index/flinkComputePool:FlinkComputePool")
+public class FlinkComputePool extends com.pulumi.resources.CustomResource {
+ /**
+ * (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`.
+ *
+ */
+ @Export(name="apiVersion", refs={String.class}, tree="[0]")
+ private Output apiVersion;
+
+ /**
+ * @return (Required String) The API Version of the schema version of the Flink Compute Pool, for example, `fcpm/v2`.
+ *
+ */
+ public Output apiVersion() {
+ return this.apiVersion;
+ }
+ /**
+ * The cloud service provider that runs the Flink Compute Pool.
+ *
+ */
+ @Export(name="cloud", refs={String.class}, tree="[0]")
+ private Output cloud;
+
+ /**
+ * @return The cloud service provider that runs the Flink Compute Pool.
+ *
+ */
+ public Output cloud() {
+ return this.cloud;
+ }
+ /**
+ * (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool.
+ *
+ */
+ @Export(name="currentCfu", refs={Integer.class}, tree="[0]")
+ private Output currentCfu;
+
+ /**
+ * @return (Required Integer) The number of Confluent Flink Units (CFUs) currently allocated to this Flink compute pool.
+ *
+ */
+ public Output currentCfu() {
+ return this.currentCfu;
+ }
+ /**
+ * The name of the Flink Compute Pool.
+ *
+ */
+ @Export(name="displayName", refs={String.class}, tree="[0]")
+ private Output displayName;
+
+ /**
+ * @return The name of the Flink Compute Pool.
+ *
+ */
+ public Output displayName() {
+ return this.displayName;
+ }
+ /**
+ * Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
+ *
+ */
+ @Export(name="environment", refs={FlinkComputePoolEnvironment.class}, tree="[0]")
+ private Output environment;
+
+ /**
+ * @return Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
+ *
+ */
+ public Output environment() {
+ return this.environment;
+ }
+ /**
+ * (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`.
+ *
+ */
+ @Export(name="kind", refs={String.class}, tree="[0]")
+ private Output kind;
+
+ /**
+ * @return (Required String) The kind of the Flink Compute Pool, for example, `ComputePool`.
+ *
+ */
+ public Output kind() {
+ return this.kind;
+ }
+ /**
+ * Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`.
+ *
+ */
+ @Export(name="maxCfu", refs={Integer.class}, tree="[0]")
+ private Output maxCfu;
+
+ /**
+ * @return Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`.
+ *
+ */
+ public Output maxCfu() {
+ return this.maxCfu;
+ }
+ /**
+ * The cloud service provider region that hosts the Flink Compute Pool.
+ *
+ */
+ @Export(name="region", refs={String.class}, tree="[0]")
+ private Output region;
+
+ /**
+ * @return The cloud service provider region that hosts the Flink Compute Pool.
+ *
+ */
+ public Output region() {
+ return this.region;
+ }
+ /**
+ * (Required String) The Confluent Resource Name of the Flink Compute Pool.
+ *
+ */
+ @Export(name="resourceName", refs={String.class}, tree="[0]")
+ private Output resourceName;
+
+ /**
+ * @return (Required String) The Confluent Resource Name of the Flink Compute Pool.
+ *
+ */
+ public Output resourceName() {
+ return this.resourceName;
+ }
+ /**
+ * (Required String) The API endpoint of the Flink Compute Pool.
+ *
+ */
+ @Export(name="restEndpoint", refs={String.class}, tree="[0]")
+ private Output restEndpoint;
+
+ /**
+ * @return (Required String) The API endpoint of the Flink Compute Pool.
+ *
+ */
+ public Output restEndpoint() {
+ return this.restEndpoint;
+ }
+
+ /**
+ *
+ * @param name The _unique_ name of the resulting resource.
+ */
+ public FlinkComputePool(String name) {
+ this(name, FlinkComputePoolArgs.Empty);
+ }
+ /**
+ *
+ * @param name The _unique_ name of the resulting resource.
+ * @param args The arguments to use to populate this resource's properties.
+ */
+ public FlinkComputePool(String name, FlinkComputePoolArgs args) {
+ this(name, args, null);
+ }
+ /**
+ *
+ * @param name The _unique_ name of the resulting resource.
+ * @param args The arguments to use to populate this resource's properties.
+ * @param options A bag of options that control this resource's behavior.
+ */
+ public FlinkComputePool(String name, FlinkComputePoolArgs args, @Nullable com.pulumi.resources.CustomResourceOptions options) {
+ super("confluentcloud:index/flinkComputePool:FlinkComputePool", name, args == null ? FlinkComputePoolArgs.Empty : args, makeResourceOptions(options, Codegen.empty()));
+ }
+
+ private FlinkComputePool(String name, Output id, @Nullable FlinkComputePoolState state, @Nullable com.pulumi.resources.CustomResourceOptions options) {
+ super("confluentcloud:index/flinkComputePool:FlinkComputePool", name, state, makeResourceOptions(options, id));
+ }
+
+ private static com.pulumi.resources.CustomResourceOptions makeResourceOptions(@Nullable com.pulumi.resources.CustomResourceOptions options, @Nullable Output id) {
+ var defaultOptions = com.pulumi.resources.CustomResourceOptions.builder()
+ .version(Utilities.getVersion())
+ .build();
+ return com.pulumi.resources.CustomResourceOptions.merge(defaultOptions, options, id);
+ }
+
+ /**
+ * Get an existing Host resource's state with the given name, ID, and optional extra
+ * properties used to qualify the lookup.
+ *
+ * @param name The _unique_ name of the resulting resource.
+ * @param id The _unique_ provider ID of the resource to lookup.
+ * @param state
+ * @param options Optional settings to control the behavior of the CustomResource.
+ */
+ public static FlinkComputePool get(String name, Output id, @Nullable FlinkComputePoolState state, @Nullable com.pulumi.resources.CustomResourceOptions options) {
+ return new FlinkComputePool(name, id, state, options);
+ }
+}
diff --git a/sdk/java/src/main/java/com/pulumi/confluentcloud/FlinkComputePoolArgs.java b/sdk/java/src/main/java/com/pulumi/confluentcloud/FlinkComputePoolArgs.java
new file mode 100644
index 00000000..cfb3e5c4
--- /dev/null
+++ b/sdk/java/src/main/java/com/pulumi/confluentcloud/FlinkComputePoolArgs.java
@@ -0,0 +1,237 @@
+// *** WARNING: this file was generated by pulumi-java-gen. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+package com.pulumi.confluentcloud;
+
+import com.pulumi.confluentcloud.inputs.FlinkComputePoolEnvironmentArgs;
+import com.pulumi.core.Output;
+import com.pulumi.core.annotations.Import;
+import java.lang.Integer;
+import java.lang.String;
+import java.util.Objects;
+import java.util.Optional;
+import javax.annotation.Nullable;
+
+
+public final class FlinkComputePoolArgs extends com.pulumi.resources.ResourceArgs {
+
+ public static final FlinkComputePoolArgs Empty = new FlinkComputePoolArgs();
+
+ /**
+ * The cloud service provider that runs the Flink Compute Pool.
+ *
+ */
+ @Import(name="cloud", required=true)
+ private Output cloud;
+
+ /**
+ * @return The cloud service provider that runs the Flink Compute Pool.
+ *
+ */
+ public Output cloud() {
+ return this.cloud;
+ }
+
+ /**
+ * The name of the Flink Compute Pool.
+ *
+ */
+ @Import(name="displayName", required=true)
+ private Output displayName;
+
+ /**
+ * @return The name of the Flink Compute Pool.
+ *
+ */
+ public Output displayName() {
+ return this.displayName;
+ }
+
+ /**
+ * Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
+ *
+ */
+ @Import(name="environment", required=true)
+ private Output environment;
+
+ /**
+ * @return Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
+ *
+ */
+ public Output environment() {
+ return this.environment;
+ }
+
+ /**
+ * Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`.
+ *
+ */
+ @Import(name="maxCfu")
+ private @Nullable Output maxCfu;
+
+ /**
+ * @return Maximum number of Confluent Flink Units (CFUs) that the Flink compute pool should auto-scale to. The accepted values are: `5` and `10`.
+ *
+ */
+ public Optional