# Generated by Apache Ambari. Tue Jul 25 17:59:28 2017 auto.create.topics.enable=true auto.leader.rebalance.enable=true compression.type=producer controlled.shutdown.enable=true controlled.shutdown.max.retries=3 controlled.shutdown.retry.backoff.ms=5000 controller.message.queue.size=10 controller.socket.timeout.ms=30000 default.replication.factor=1 delete.topic.enable=false external.kafka.metrics.exclude.prefix=kafka.network.RequestMetrics,kafka.server.DelayedOperationPurgatory,kafka.server.BrokerTopicMetrics.BytesRejectedPerSec external.kafka.metrics.include.prefix=kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetCommit.98percentile,kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Offsets.95percentile,kafka.network.RequestMetrics.ResponseSendTimeMs.request.Fetch.95percentile,kafka.network.RequestMetrics.RequestsPerSec.request fetch.purgatory.purge.interval.requests=10000 kafka.ganglia.metrics.group=kafka kafka.ganglia.metrics.host=localhost kafka.ganglia.metrics.port=8671 kafka.ganglia.metrics.reporter.enabled=true kafka.metrics.reporters=org.apache.hadoop.metrics2.sink.kafka.KafkaTimelineMetricsReporter kafka.timeline.metrics.host=nwk2-bdp-hadoop-07.gdcs-qa.apple.com kafka.timeline.metrics.maxRowCacheSize=10000 kafka.timeline.metrics.port=6188 kafka.timeline.metrics.protocol=http kafka.timeline.metrics.reporter.enabled=true kafka.timeline.metrics.reporter.sendInterval=5900 kafka.timeline.metrics.truststore.password=bigdata kafka.timeline.metrics.truststore.path=/etc/security/clientKeys/all.jks kafka.timeline.metrics.truststore.type=jks leader.imbalance.check.interval.seconds=300 leader.imbalance.per.broker.percentage=10 listeners=SSL://nwk2-bdp-kafka-04.gdcs-qa.apple.com:6668,PLAINTEXT://nwk2-bdp-kafka-04.gdcs-qa.apple.com:6667 log.cleanup.interval.mins=10 log.dirs=/kafka-logs log.index.interval.bytes=4096 log.index.size.max.bytes=10485760 log.retention.bytes=-1 log.retention.hours=168 log.roll.hours=168 log.segment.bytes=1073741824 message.max.bytes=1000000 min.insync.replicas=1 num.io.threads=8 num.network.threads=3 num.partitions=1 num.recovery.threads.per.data.dir=1 num.replica.fetchers=1 offset.metadata.max.bytes=4096 offsets.commit.required.acks=-1 offsets.commit.timeout.ms=5000 offsets.load.buffer.size=5242880 offsets.retention.check.interval.ms=600000 offsets.retention.minutes=86400000 offsets.topic.compression.codec=0 offsets.topic.num.partitions=50 offsets.topic.replication.factor=3 offsets.topic.segment.bytes=104857600 port=6667 producer.purgatory.purge.interval.requests=10000 queued.max.requests=500 replica.fetch.max.bytes=1048576 replica.fetch.min.bytes=1 replica.fetch.wait.max.ms=500 replica.high.watermark.checkpoint.interval.ms=5000 replica.lag.max.messages=4000 replica.lag.time.max.ms=10000 replica.socket.receive.buffer.bytes=65536 replica.socket.timeout.ms=30000 security.inter.broker.protocol=SSL socket.receive.buffer.bytes=102400 socket.request.max.bytes=104857600 socket.send.buffer.bytes=102400 ssl.client.auth=none ssl.enabled.protocols=TLSv1.2,TLSv1.1,TLSv1 ssl.endpoint.identification.algorithm=HTTPS ssl.key.password=changeit ssl.keystore.location=/tmp/ssl-kafka/server.keystore.jks ssl.keystore.password=changeit ssl.keystore.type=JKS ssl.secure.random.implementation=SHA1PRNG ssl.truststore.location=/tmp/ssl-kafka/server.truststore.jks ssl.truststore.password=changeit ssl.truststore.type=JKS zookeeper.connect=nwk2-bdp-kafka-05.gdcs-qa.apple.com:2181,nwk2-bdp-kafka-04.gdcs-qa.apple.com:2181,nwk2-bdp-kafka-06.gdcs-qa.apple.com:2181 zookeeper.connection.timeout.ms=25000 zookeeper.session.timeout.ms=30000 zookeeper.sync.time.ms=2000