From 6fb601594b393868f9330a25e616a15c0008029e Mon Sep 17 00:00:00 2001 From: SanthoshVasabhaktula Date: Wed, 29 Jul 2015 23:23:49 +0530 Subject: [PATCH] staging configuration --- src/main/config/secor.common.properties | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/main/config/secor.common.properties b/src/main/config/secor.common.properties index 1c296212d..9a8e92834 100644 --- a/src/main/config/secor.common.properties +++ b/src/main/config/secor.common.properties @@ -18,11 +18,11 @@ ############ # Regular expression matching names of consumed topics. -secor.kafka.topic_filter=.* +secor.kafka.topic_filter=telemetry # AWS authentication credentials. -aws.access.key= -aws.secret.key= +aws.access.key=AKIAJSHAOKZ7KUZN6DEQ +aws.secret.key=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 ################ # END MUST SET # @@ -128,19 +128,19 @@ monitoring.blacklist.topics= statsd.hostport= # Name of field that contains timestamp for JSON, MessagePack, or Thrift message parser. (1405970352123) -message.timestamp.name=timestamp +message.timestamp.name=ts # Name of field that contains a timestamp, as a date Format, for JSON. (2014-08-07, Jul 23 02:16:57 2005, etc...) # Should be used when there is no timestamp in a Long format. Also ignore time zones. -message.timestamp.input.pattern= +message.timestamp.input.pattern=yyyy-MM-dd # To enable compression, set this to a valid compression codec implementing # org.apache.hadoop.io.compress.CompressionCodec interface, such as # 'org.apache.hadoop.io.compress.GzipCodec'. -secor.compression.codec= +secor.compression.codec=org.apache.hadoop.io.compress.GzipCodec # The secor file reader/writer used to read/write the data, by default we write sequence files -secor.file.reader.writer.factory=com.pinterest.secor.io.impl.SequenceFileReaderWriterFactory +secor.file.reader.writer.factory=com.pinterest.secor.io.impl.DelimitedTextFileReaderWriterFactory # Max message size in bytes to retrieve via KafkaClient. This is used by ProgressMonitor and PartitionFinalizer. # This should be set large enough to accept the max message size configured in your kafka broker