Skip to content

Commit

Permalink
Rrf (#1621)
Browse files Browse the repository at this point in the history
* #1618: Block atlassian maven repo so build is faster

* Regen

* Regen
  • Loading branch information
davsclaus authored May 6, 2024
1 parent 4703001 commit ed622b4
Show file tree
Hide file tree
Showing 17 changed files with 1,033 additions and 2 deletions.
2 changes: 2 additions & 0 deletions .mvn/maven.config
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
-Daether.remoteRepositoryFilter.groupId=true
-Daether.remoteRepositoryFilter.groupId.basedir=${session.rootDirectory}/.mvn/rrf/
3 changes: 3 additions & 0 deletions .mvn/rrf/groupId-B_shibboleth.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
net.shibboleth
net.shibboleth.utilities
org.opensaml
7 changes: 7 additions & 0 deletions .mvn/rrf/groupId-atlassian.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
com.atlassian.event
com.atlassian.httpclient
com.atlassian.jira
com.atlassian.platform
com.atlassian.pom
com.atlassian.sal
io.atlassian.fugue
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
{
"connector": {
"class": "org.apache.camel.kafkaconnector.azurestoragefilessink.CamelAzurestoragefilessinkSinkConnector",
"artifactId": "camel-azure-storage-files-sink-kafka-connector",
"groupId": "org.apache.camel.kafkaconnector",
"id": "camel-azure-storage-files-sink-sink",
"type": "sink",
"version": "4.4.2-SNAPSHOT",
"description": "Upload data to Azure Storage Files Share.\n\nIn the header, you can set the `file` \/ `ce-file` property to specify the filename to upload. If you do set property in the header, the Kamelet uses the exchange ID as filename."
},
"properties": {
"camel.kamelet.azure-storage-files-sink.accountName": {
"name": "camel.kamelet.azure-storage-files-sink.accountName",
"description": "The Azure Storage Blob account name.",
"priority": "HIGH",
"required": "true"
},
"camel.kamelet.azure-storage-files-sink.shareName": {
"name": "camel.kamelet.azure-storage-files-sink.shareName",
"description": "The Azure Storage File Share share name.",
"priority": "HIGH",
"required": "true"
},
"camel.kamelet.azure-storage-files-sink.sharedKey": {
"name": "camel.kamelet.azure-storage-files-sink.sharedKey",
"description": "The Azure Storage Blob access key.",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.azure-storage-files-sink.credentialType": {
"name": "camel.kamelet.azure-storage-files-sink.credentialType",
"description": "Determines the credential strategy to adopt.",
"defaultValue": "\"SHARED_ACCOUNT_KEY\"",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.azure-storage-files-sink.directoryName": {
"name": "camel.kamelet.azure-storage-files-sink.directoryName",
"description": "The directory from where the producer will upload the file.",
"defaultValue": "\".\"",
"priority": "MEDIUM",
"required": "false"
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
{
"connector": {
"class": "org.apache.camel.kafkaconnector.azurestoragefilessource.CamelAzurestoragefilessourceSourceConnector",
"artifactId": "camel-azure-storage-files-source-kafka-connector",
"groupId": "org.apache.camel.kafkaconnector",
"id": "camel-azure-storage-files-source-source",
"type": "source",
"version": "4.4.2-SNAPSHOT",
"description": "Consume files from Azure Storage File Shares."
},
"properties": {
"camel.kamelet.azure-storage-files-source.accountName": {
"name": "camel.kamelet.azure-storage-files-source.accountName",
"description": "The Azure Storage File Share account name.",
"priority": "HIGH",
"required": "true"
},
"camel.kamelet.azure-storage-files-source.shareName": {
"name": "camel.kamelet.azure-storage-files-source.shareName",
"description": "The Azure Storage File Share share name.",
"priority": "HIGH",
"required": "true"
},
"camel.kamelet.azure-storage-files-source.sharedKey": {
"name": "camel.kamelet.azure-storage-files-source.sharedKey",
"description": "The Azure Storage Blob access key.",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.azure-storage-files-source.delay": {
"name": "camel.kamelet.azure-storage-files-source.delay",
"description": "The number of milliseconds before the next poll of the selected blob.",
"defaultValue": "500",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.azure-storage-files-source.deleteAfterRead": {
"name": "camel.kamelet.azure-storage-files-source.deleteAfterRead",
"description": "Specifies to delete blobs after consuming them",
"defaultValue": "false",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.azure-storage-files-source.credentialType": {
"name": "camel.kamelet.azure-storage-files-source.credentialType",
"description": "Determines the credential strategy to adopt.",
"defaultValue": "\"SHARED_ACCOUNT_KEY\"",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.azure-storage-files-source.directoryName": {
"name": "camel.kamelet.azure-storage-files-source.directoryName",
"description": "The directory from where the consumer will start reading files.",
"defaultValue": "\".\"",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.azure-storage-files-source.recursive": {
"name": "camel.kamelet.azure-storage-files-source.recursive",
"description": "If a directory, the consumer will look for files in all the sub-directories as well.",
"defaultValue": "false",
"priority": "MEDIUM",
"required": "false"
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
{
"connector": {
"class": "org.apache.camel.kafkaconnector.kafkabatchapicurioregistrynotsecuredsource.CamelKafkabatchapicurioregistrynotsecuredsourceSourceConnector",
"artifactId": "camel-kafka-batch-apicurio-registry-not-secured-source-kafka-connector",
"groupId": "org.apache.camel.kafkaconnector",
"id": "camel-kafka-batch-apicurio-registry-not-secured-source-source",
"type": "source",
"version": "4.4.2-SNAPSHOT",
"description": "Receive data from Kafka topics in batch on an insecure broker combined with Apicurio Registry and commit them manually through KafkaManualCommit or auto commit."
},
"properties": {
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.topic": {
"name": "camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.topic",
"description": "Comma separated list of Kafka topic names",
"priority": "HIGH",
"required": "true"
},
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.bootstrapServers": {
"name": "camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.bootstrapServers",
"description": "Comma separated list of Kafka Broker URLs",
"priority": "HIGH",
"required": "true"
},
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.autoCommitEnable": {
"name": "camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.autoCommitEnable",
"description": "If true, periodically commit to ZooKeeper the offset of messages already fetched by the consumer",
"defaultValue": "true",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.allowManualCommit": {
"name": "camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.allowManualCommit",
"description": "Whether to allow doing manual commits",
"defaultValue": "false",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.pollOnError": {
"name": "camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.pollOnError",
"description": "What to do if kafka threw an exception while polling for new messages. There are 5 enums and the value can be one of DISCARD, ERROR_HANDLER, RECONNECT, RETRY, STOP",
"defaultValue": "\"ERROR_HANDLER\"",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.autoOffsetReset": {
"name": "camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.autoOffsetReset",
"description": "What to do when there is no initial offset. There are 3 enums and the value can be one of latest, earliest, none",
"defaultValue": "\"latest\"",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.consumerGroup": {
"name": "camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.consumerGroup",
"description": "A string that uniquely identifies the group of consumers to which this source belongs Example: my-group-id",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.deserializeHeaders": {
"name": "camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.deserializeHeaders",
"description": "When enabled the Kamelet source will deserialize all message headers to String representation.",
"defaultValue": "true",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.valueDeserializer": {
"name": "camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.valueDeserializer",
"description": "Deserializer class for value that implements the Deserializer interface.",
"defaultValue": "\"io.apicurio.registry.serde.avro.AvroKafkaDeserializer\"",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.apicurioRegistryUrl": {
"name": "camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.apicurioRegistryUrl",
"description": "The Apicurio Schema Registry URL",
"priority": "HIGH",
"required": "true"
},
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.avroDatumProvider": {
"name": "camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.avroDatumProvider",
"description": "How to read data with Avro",
"defaultValue": "\"io.apicurio.registry.serde.avro.ReflectAvroDatumProvider\"",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.batchSize": {
"name": "camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.batchSize",
"description": "The maximum number of records returned in a single call to poll()",
"defaultValue": "500",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.pollTimeout": {
"name": "camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.pollTimeout",
"description": "The timeout used when polling the KafkaConsumer",
"defaultValue": "5000",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.maxPollIntervalMs": {
"name": "camel.kamelet.kafka-batch-apicurio-registry-not-secured-source.maxPollIntervalMs",
"description": "The maximum delay between invocations of poll() when using consumer group management",
"priority": "MEDIUM",
"required": "false"
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
{
"connector": {
"class": "org.apache.camel.kafkaconnector.kafkabatchazureschemaregistrysource.CamelKafkabatchazureschemaregistrysourceSourceConnector",
"artifactId": "camel-kafka-batch-azure-schema-registry-source-kafka-connector",
"groupId": "org.apache.camel.kafkaconnector",
"id": "camel-kafka-batch-azure-schema-registry-source-source",
"type": "source",
"version": "4.4.2-SNAPSHOT",
"description": "Receive data from Kafka topics in batch on Azure Eventhubs combined with Azure Schema Registry and commit them manually through KafkaManualCommit or auto commit."
},
"properties": {
"camel.kamelet.kafka-batch-azure-schema-registry-source.topic": {
"name": "camel.kamelet.kafka-batch-azure-schema-registry-source.topic",
"description": "Comma separated list of Kafka topic names",
"priority": "HIGH",
"required": "true"
},
"camel.kamelet.kafka-batch-azure-schema-registry-source.bootstrapServers": {
"name": "camel.kamelet.kafka-batch-azure-schema-registry-source.bootstrapServers",
"description": "Comma separated list of Kafka Broker URLs",
"priority": "HIGH",
"required": "true"
},
"camel.kamelet.kafka-batch-azure-schema-registry-source.securityProtocol": {
"name": "camel.kamelet.kafka-batch-azure-schema-registry-source.securityProtocol",
"description": "Protocol used to communicate with brokers. SASL_PLAINTEXT, PLAINTEXT, SASL_SSL and SSL are supported",
"defaultValue": "\"SASL_SSL\"",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.kafka-batch-azure-schema-registry-source.saslMechanism": {
"name": "camel.kamelet.kafka-batch-azure-schema-registry-source.saslMechanism",
"description": "The Simple Authentication and Security Layer (SASL) Mechanism used.",
"defaultValue": "\"PLAIN\"",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.kafka-batch-azure-schema-registry-source.password": {
"name": "camel.kamelet.kafka-batch-azure-schema-registry-source.password",
"description": "Password to authenticate to kafka",
"priority": "HIGH",
"required": "true"
},
"camel.kamelet.kafka-batch-azure-schema-registry-source.autoCommitEnable": {
"name": "camel.kamelet.kafka-batch-azure-schema-registry-source.autoCommitEnable",
"description": "If true, periodically commit to ZooKeeper the offset of messages already fetched by the consumer",
"defaultValue": "true",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.kafka-batch-azure-schema-registry-source.allowManualCommit": {
"name": "camel.kamelet.kafka-batch-azure-schema-registry-source.allowManualCommit",
"description": "Whether to allow doing manual commits",
"defaultValue": "false",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.kafka-batch-azure-schema-registry-source.pollOnError": {
"name": "camel.kamelet.kafka-batch-azure-schema-registry-source.pollOnError",
"description": "What to do if kafka threw an exception while polling for new messages. There are 5 enums and the value can be one of DISCARD, ERROR_HANDLER, RECONNECT, RETRY, STOP",
"defaultValue": "\"ERROR_HANDLER\"",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.kafka-batch-azure-schema-registry-source.autoOffsetReset": {
"name": "camel.kamelet.kafka-batch-azure-schema-registry-source.autoOffsetReset",
"description": "What to do when there is no initial offset. There are 3 enums and the value can be one of latest, earliest, none",
"defaultValue": "\"latest\"",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.kafka-batch-azure-schema-registry-source.consumerGroup": {
"name": "camel.kamelet.kafka-batch-azure-schema-registry-source.consumerGroup",
"description": "A string that uniquely identifies the group of consumers to which this source belongs Example: my-group-id",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.kafka-batch-azure-schema-registry-source.deserializeHeaders": {
"name": "camel.kamelet.kafka-batch-azure-schema-registry-source.deserializeHeaders",
"description": "When enabled the Kamelet source will deserialize all message headers to String representation.",
"defaultValue": "true",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.kafka-batch-azure-schema-registry-source.valueDeserializer": {
"name": "camel.kamelet.kafka-batch-azure-schema-registry-source.valueDeserializer",
"description": "Deserializer class for value that implements the Deserializer interface.",
"defaultValue": "\"com.microsoft.azure.schemaregistry.kafka.avro.KafkaAvroDeserializer\"",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.kafka-batch-azure-schema-registry-source.azureRegistryUrl": {
"name": "camel.kamelet.kafka-batch-azure-schema-registry-source.azureRegistryUrl",
"description": "The Apicurio Schema Registry URL",
"priority": "HIGH",
"required": "true"
},
"camel.kamelet.kafka-batch-azure-schema-registry-source.specificAvroValueType": {
"name": "camel.kamelet.kafka-batch-azure-schema-registry-source.specificAvroValueType",
"description": "The Specific Type Avro will have to deal with Example: com.example.Order",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.kafka-batch-azure-schema-registry-source.batchSize": {
"name": "camel.kamelet.kafka-batch-azure-schema-registry-source.batchSize",
"description": "The maximum number of records returned in a single call to poll()",
"defaultValue": "500",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.kafka-batch-azure-schema-registry-source.pollTimeout": {
"name": "camel.kamelet.kafka-batch-azure-schema-registry-source.pollTimeout",
"description": "The timeout used when polling the KafkaConsumer",
"defaultValue": "5000",
"priority": "MEDIUM",
"required": "false"
},
"camel.kamelet.kafka-batch-azure-schema-registry-source.maxPollIntervalMs": {
"name": "camel.kamelet.kafka-batch-azure-schema-registry-source.maxPollIntervalMs",
"description": "The maximum delay between invocations of poll() when using consumer group management",
"priority": "MEDIUM",
"required": "false"
}
}
}
Loading

0 comments on commit ed622b4

Please sign in to comment.