<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>question Re: Mapreduce 2 and YARN auto stop after restart a few second, I use Hortonwork 3.1.1 in Support Questions</title>
    <link>https://community.cloudera.com/t5/Support-Questions/Mapreduce-2-and-YARN-auto-stop-after-restart-a-few-second-I/m-p/239690#M201499</link>
    <description>&lt;P&gt;hi @Geoffrey Shelton Okot &lt;/P&gt;&lt;P&gt; My kafka service is currently not working , Can you help me please ?  Here is the log on : /var/log/kafka/server.log and /var/log/kafka/kafka.err&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;&lt;/P&gt;&lt;P&gt;[2019-05-10 16:00:51,711] INFO [GroupMetadataManager brokerId=1001] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:25,008] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:25,518] INFO starting (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:25,519] INFO Connecting to zookeeper on am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181 (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:25,532] INFO [ZooKeeperClient] Initializing a new session to am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:25,551] INFO [ZooKeeperClient] Waiting until connected. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:25,616] INFO [ZooKeeperClient] Connected. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:25,881] INFO Cluster ID = z-4P_uf-RzmpT2QvMnOD2g (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:25,950] INFO KafkaConfig values: &lt;/P&gt;&lt;P&gt;    advertised.host.name = null&lt;/P&gt;&lt;P&gt;    advertised.listeners = null&lt;/P&gt;&lt;P&gt;    advertised.port = null&lt;/P&gt;&lt;P&gt;    alter.config.policy.class.name = null&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    authorizer.class.name = &lt;/P&gt;&lt;P&gt;    auto.create.topics.enable = true&lt;/P&gt;&lt;P&gt;    auto.leader.rebalance.enable = true&lt;/P&gt;&lt;P&gt;    background.threads = 10&lt;/P&gt;&lt;P&gt;    broker.id = -1&lt;/P&gt;&lt;P&gt;    broker.id.generation.enable = true&lt;/P&gt;&lt;P&gt;    broker.rack = null&lt;/P&gt;&lt;P&gt;    client.quota.callback.class = null&lt;/P&gt;&lt;P&gt;    compression.type = producer&lt;/P&gt;&lt;P&gt;    connections.max.idle.ms = 600000&lt;/P&gt;&lt;P&gt;    controlled.shutdown.enable = true&lt;/P&gt;&lt;P&gt;    controlled.shutdown.max.retries = 3&lt;/P&gt;&lt;P&gt;    controlled.shutdown.retry.backoff.ms = 5000&lt;/P&gt;&lt;P&gt;    controller.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    create.topic.policy.class.name = null&lt;/P&gt;&lt;P&gt;    default.replication.factor = 1&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.check.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.time.ms = 86400000&lt;/P&gt;&lt;P&gt;    delegation.token.master.key = null&lt;/P&gt;&lt;P&gt;    delegation.token.max.lifetime.ms = 604800000&lt;/P&gt;&lt;P&gt;    delete.records.purgatory.purge.interval.requests = 1&lt;/P&gt;&lt;P&gt;    delete.topic.enable = true&lt;/P&gt;&lt;P&gt;    fetch.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    group.initial.rebalance.delay.ms = 3000&lt;/P&gt;&lt;P&gt;    group.max.session.timeout.ms = 300000&lt;/P&gt;&lt;P&gt;    group.min.session.timeout.ms = 6000&lt;/P&gt;&lt;P&gt;    host.name = &lt;/P&gt;&lt;P&gt;    inter.broker.listener.name = null&lt;/P&gt;&lt;P&gt;    inter.broker.protocol.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    leader.imbalance.check.interval.seconds = 300&lt;/P&gt;&lt;P&gt;    leader.imbalance.per.broker.percentage = 10&lt;/P&gt;&lt;P&gt;    listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL&lt;/P&gt;&lt;P&gt;    listeners = PLAINTEXT://am-bigdata-01.am.local:6667&lt;/P&gt;&lt;P&gt;    log.cleaner.backoff.ms = 15000&lt;/P&gt;&lt;P&gt;    log.cleaner.dedupe.buffer.size = 134217728&lt;/P&gt;&lt;P&gt;    log.cleaner.delete.retention.ms = 86400000&lt;/P&gt;&lt;P&gt;    log.cleaner.enable = true&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.load.factor = 0.9&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.size = 524288&lt;/P&gt;&lt;P&gt;    log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308&lt;/P&gt;&lt;P&gt;    log.cleaner.min.cleanable.ratio = 0.5&lt;/P&gt;&lt;P&gt;    log.cleaner.min.compaction.lag.ms = 0&lt;/P&gt;&lt;P&gt;    log.cleaner.threads = 1&lt;/P&gt;&lt;P&gt;    log.cleanup.policy = [delete]&lt;/P&gt;&lt;P&gt;    log.dir = /tmp/kafka-logs&lt;/P&gt;&lt;P&gt;    log.dirs = /kafka-logs&lt;/P&gt;&lt;P&gt;    log.flush.interval.messages = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.interval.ms = null&lt;/P&gt;&lt;P&gt;    log.flush.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.flush.scheduler.interval.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.start.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.index.interval.bytes = 4096&lt;/P&gt;&lt;P&gt;    log.index.size.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    log.message.downconversion.enable = true&lt;/P&gt;&lt;P&gt;    log.message.format.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    log.message.timestamp.difference.max.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.message.timestamp.type = CreateTime&lt;/P&gt;&lt;P&gt;    log.preallocate = false&lt;/P&gt;&lt;P&gt;    log.retention.bytes = -1&lt;/P&gt;&lt;P&gt;    log.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    log.retention.hours = 168&lt;/P&gt;&lt;P&gt;    log.retention.minutes = null&lt;/P&gt;&lt;P&gt;    log.retention.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.hours = 168&lt;/P&gt;&lt;P&gt;    log.roll.jitter.hours = 0&lt;/P&gt;&lt;P&gt;    log.roll.jitter.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.ms = null&lt;/P&gt;&lt;P&gt;    log.segment.bytes = 1073741824&lt;/P&gt;&lt;P&gt;    log.segment.delete.delay.ms = 60000&lt;/P&gt;&lt;P&gt;    max.connections.per.ip = 2147483647&lt;/P&gt;&lt;P&gt;    max.connections.per.ip.overrides = &lt;/P&gt;&lt;P&gt;    max.incremental.fetch.session.cache.slots = 1000&lt;/P&gt;&lt;P&gt;    message.max.bytes = 1000000&lt;/P&gt;&lt;P&gt;    metric.reporters = []&lt;/P&gt;&lt;P&gt;    metrics.num.samples = 2&lt;/P&gt;&lt;P&gt;    metrics.recording.level = INFO&lt;/P&gt;&lt;P&gt;    metrics.sample.window.ms = 30000&lt;/P&gt;&lt;P&gt;    min.insync.replicas = 1&lt;/P&gt;&lt;P&gt;    num.io.threads = 8&lt;/P&gt;&lt;P&gt;    num.network.threads = 3&lt;/P&gt;&lt;P&gt;    num.partitions = 1&lt;/P&gt;&lt;P&gt;    num.recovery.threads.per.data.dir = 1&lt;/P&gt;&lt;P&gt;    num.replica.alter.log.dirs.threads = null&lt;/P&gt;&lt;P&gt;    num.replica.fetchers = 1&lt;/P&gt;&lt;P&gt;    offset.metadata.max.bytes = 4096&lt;/P&gt;&lt;P&gt;    offsets.commit.required.acks = -1&lt;/P&gt;&lt;P&gt;    offsets.commit.timeout.ms = 5000&lt;/P&gt;&lt;P&gt;    offsets.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    offsets.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    offsets.retention.minutes = 86400000&lt;/P&gt;&lt;P&gt;    offsets.topic.compression.codec = 0&lt;/P&gt;&lt;P&gt;    offsets.topic.num.partitions = 50&lt;/P&gt;&lt;P&gt;    offsets.topic.replication.factor = 3&lt;/P&gt;&lt;P&gt;    offsets.topic.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding&lt;/P&gt;&lt;P&gt;    password.encoder.iterations = 4096&lt;/P&gt;&lt;P&gt;    password.encoder.key.length = 128&lt;/P&gt;&lt;P&gt;    password.encoder.keyfactory.algorithm = null&lt;/P&gt;&lt;P&gt;    password.encoder.old.secret = null&lt;/P&gt;&lt;P&gt;    password.encoder.secret = null&lt;/P&gt;&lt;P&gt;    port = 6667&lt;/P&gt;&lt;P&gt;    principal.builder.class = null&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.entry.expiration.ms = 300000&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.max.size = 1000&lt;/P&gt;&lt;P&gt;    producer.metrics.enable = false&lt;/P&gt;&lt;P&gt;    producer.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    queued.max.request.bytes = -1&lt;/P&gt;&lt;P&gt;    queued.max.requests = 500&lt;/P&gt;&lt;P&gt;    quota.consumer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.producer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.window.num = 11&lt;/P&gt;&lt;P&gt;    quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.backoff.ms = 1000&lt;/P&gt;&lt;P&gt;    replica.fetch.max.bytes = 1048576&lt;/P&gt;&lt;P&gt;    replica.fetch.min.bytes = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.response.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    replica.fetch.wait.max.ms = 500&lt;/P&gt;&lt;P&gt;    replica.high.watermark.checkpoint.interval.ms = 5000&lt;/P&gt;&lt;P&gt;    replica.lag.time.max.ms = 10000&lt;/P&gt;&lt;P&gt;    replica.socket.receive.buffer.bytes = 65536&lt;/P&gt;&lt;P&gt;    replica.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    request.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    reserved.broker.max.id = 1000&lt;/P&gt;&lt;P&gt;    sasl.client.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.enabled.mechanisms = [GSSAPI]&lt;/P&gt;&lt;P&gt;    sasl.jaas.config = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.kinit.cmd = /usr/bin/kinit&lt;/P&gt;&lt;P&gt;    sasl.kerberos.min.time.before.relogin = 60000&lt;/P&gt;&lt;P&gt;    sasl.kerberos.principal.to.local.rules = [DEFAULT]&lt;/P&gt;&lt;P&gt;    sasl.kerberos.service.name = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.buffer.seconds = 300&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.min.period.seconds = 60&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.mechanism.inter.broker.protocol = GSSAPI&lt;/P&gt;&lt;P&gt;    sasl.server.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    security.inter.broker.protocol = PLAINTEXT&lt;/P&gt;&lt;P&gt;    socket.receive.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    socket.request.max.bytes = 104857600&lt;/P&gt;&lt;P&gt;    socket.send.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    ssl.cipher.suites = []&lt;/P&gt;&lt;P&gt;    ssl.client.auth = none&lt;/P&gt;&lt;P&gt;    ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]&lt;/P&gt;&lt;P&gt;    ssl.endpoint.identification.algorithm = https&lt;/P&gt;&lt;P&gt;    ssl.key.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keymanager.algorithm = SunX509&lt;/P&gt;&lt;P&gt;    ssl.keystore.location = &lt;/P&gt;&lt;P&gt;    ssl.keystore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keystore.type = JKS&lt;/P&gt;&lt;P&gt;    ssl.protocol = TLS&lt;/P&gt;&lt;P&gt;    ssl.provider = null&lt;/P&gt;&lt;P&gt;    ssl.secure.random.implementation = null&lt;/P&gt;&lt;P&gt;    ssl.trustmanager.algorithm = PKIX&lt;/P&gt;&lt;P&gt;    ssl.truststore.location = &lt;/P&gt;&lt;P&gt;    ssl.truststore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.truststore.type = JKS&lt;/P&gt;&lt;P&gt;    transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    transaction.max.timeout.ms = 900000&lt;/P&gt;&lt;P&gt;    transaction.remove.expired.transaction.cleanup.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    transaction.state.log.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    transaction.state.log.min.isr = 2&lt;/P&gt;&lt;P&gt;    transaction.state.log.num.partitions = 50&lt;/P&gt;&lt;P&gt;    transaction.state.log.replication.factor = 3&lt;/P&gt;&lt;P&gt;    transaction.state.log.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    transactional.id.expiration.ms = 604800000&lt;/P&gt;&lt;P&gt;    unclean.leader.election.enable = false&lt;/P&gt;&lt;P&gt;    zookeeper.connect = am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181&lt;/P&gt;&lt;P&gt;    zookeeper.connection.timeout.ms = 25000&lt;/P&gt;&lt;P&gt;    zookeeper.max.in.flight.requests = 10&lt;/P&gt;&lt;P&gt;    zookeeper.session.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    zookeeper.set.acl = false&lt;/P&gt;&lt;P&gt;    zookeeper.sync.time.ms = 2000&lt;/P&gt;&lt;P&gt; (kafka.server.KafkaConfig)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:25,956] INFO KafkaConfig values: &lt;/P&gt;&lt;P&gt;    advertised.host.name = null&lt;/P&gt;&lt;P&gt;    advertised.listeners = null&lt;/P&gt;&lt;P&gt;    advertised.port = null&lt;/P&gt;&lt;P&gt;    alter.config.policy.class.name = null&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    authorizer.class.name = &lt;/P&gt;&lt;P&gt;    auto.create.topics.enable = true&lt;/P&gt;&lt;P&gt;    auto.leader.rebalance.enable = true&lt;/P&gt;&lt;P&gt;    background.threads = 10&lt;/P&gt;&lt;P&gt;    broker.id = -1&lt;/P&gt;&lt;P&gt;    broker.id.generation.enable = true&lt;/P&gt;&lt;P&gt;    broker.rack = null&lt;/P&gt;&lt;P&gt;    client.quota.callback.class = null&lt;/P&gt;&lt;P&gt;    compression.type = producer&lt;/P&gt;&lt;P&gt;    connections.max.idle.ms = 600000&lt;/P&gt;&lt;P&gt;    controlled.shutdown.enable = true&lt;/P&gt;&lt;P&gt;    controlled.shutdown.max.retries = 3&lt;/P&gt;&lt;P&gt;    controlled.shutdown.retry.backoff.ms = 5000&lt;/P&gt;&lt;P&gt;    controller.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    create.topic.policy.class.name = null&lt;/P&gt;&lt;P&gt;    default.replication.factor = 1&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.check.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.time.ms = 86400000&lt;/P&gt;&lt;P&gt;    delegation.token.master.key = null&lt;/P&gt;&lt;P&gt;    delegation.token.max.lifetime.ms = 604800000&lt;/P&gt;&lt;P&gt;    delete.records.purgatory.purge.interval.requests = 1&lt;/P&gt;&lt;P&gt;    delete.topic.enable = true&lt;/P&gt;&lt;P&gt;    fetch.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    group.initial.rebalance.delay.ms = 3000&lt;/P&gt;&lt;P&gt;    group.max.session.timeout.ms = 300000&lt;/P&gt;&lt;P&gt;    group.min.session.timeout.ms = 6000&lt;/P&gt;&lt;P&gt;    host.name = &lt;/P&gt;&lt;P&gt;    inter.broker.listener.name = null&lt;/P&gt;&lt;P&gt;    inter.broker.protocol.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    leader.imbalance.check.interval.seconds = 300&lt;/P&gt;&lt;P&gt;    leader.imbalance.per.broker.percentage = 10&lt;/P&gt;&lt;P&gt;    listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL&lt;/P&gt;&lt;P&gt;    listeners = PLAINTEXT://am-bigdata-01.am.local:6667&lt;/P&gt;&lt;P&gt;    log.cleaner.backoff.ms = 15000&lt;/P&gt;&lt;P&gt;    log.cleaner.dedupe.buffer.size = 134217728&lt;/P&gt;&lt;P&gt;    log.cleaner.delete.retention.ms = 86400000&lt;/P&gt;&lt;P&gt;    log.cleaner.enable = true&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.load.factor = 0.9&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.size = 524288&lt;/P&gt;&lt;P&gt;    log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308&lt;/P&gt;&lt;P&gt;    log.cleaner.min.cleanable.ratio = 0.5&lt;/P&gt;&lt;P&gt;    log.cleaner.min.compaction.lag.ms = 0&lt;/P&gt;&lt;P&gt;    log.cleaner.threads = 1&lt;/P&gt;&lt;P&gt;    log.cleanup.policy = [delete]&lt;/P&gt;&lt;P&gt;    log.dir = /tmp/kafka-logs&lt;/P&gt;&lt;P&gt;    log.dirs = /kafka-logs&lt;/P&gt;&lt;P&gt;    log.flush.interval.messages = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.interval.ms = null&lt;/P&gt;&lt;P&gt;    log.flush.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.flush.scheduler.interval.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.start.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.index.interval.bytes = 4096&lt;/P&gt;&lt;P&gt;    log.index.size.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    log.message.downconversion.enable = true&lt;/P&gt;&lt;P&gt;    log.message.format.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    log.message.timestamp.difference.max.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.message.timestamp.type = CreateTime&lt;/P&gt;&lt;P&gt;    log.preallocate = false&lt;/P&gt;&lt;P&gt;    log.retention.bytes = -1&lt;/P&gt;&lt;P&gt;    log.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    log.retention.hours = 168&lt;/P&gt;&lt;P&gt;    log.retention.minutes = null&lt;/P&gt;&lt;P&gt;    log.retention.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.hours = 168&lt;/P&gt;&lt;P&gt;    log.roll.jitter.hours = 0&lt;/P&gt;&lt;P&gt;    log.roll.jitter.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.ms = null&lt;/P&gt;&lt;P&gt;    log.segment.bytes = 1073741824&lt;/P&gt;&lt;P&gt;    log.segment.delete.delay.ms = 60000&lt;/P&gt;&lt;P&gt;    max.connections.per.ip = 2147483647&lt;/P&gt;&lt;P&gt;    max.connections.per.ip.overrides = &lt;/P&gt;&lt;P&gt;    max.incremental.fetch.session.cache.slots = 1000&lt;/P&gt;&lt;P&gt;    message.max.bytes = 1000000&lt;/P&gt;&lt;P&gt;    metric.reporters = []&lt;/P&gt;&lt;P&gt;    metrics.num.samples = 2&lt;/P&gt;&lt;P&gt;    metrics.recording.level = INFO&lt;/P&gt;&lt;P&gt;    metrics.sample.window.ms = 30000&lt;/P&gt;&lt;P&gt;    min.insync.replicas = 1&lt;/P&gt;&lt;P&gt;    num.io.threads = 8&lt;/P&gt;&lt;P&gt;    num.network.threads = 3&lt;/P&gt;&lt;P&gt;    num.partitions = 1&lt;/P&gt;&lt;P&gt;    num.recovery.threads.per.data.dir = 1&lt;/P&gt;&lt;P&gt;    num.replica.alter.log.dirs.threads = null&lt;/P&gt;&lt;P&gt;    num.replica.fetchers = 1&lt;/P&gt;&lt;P&gt;    offset.metadata.max.bytes = 4096&lt;/P&gt;&lt;P&gt;    offsets.commit.required.acks = -1&lt;/P&gt;&lt;P&gt;    offsets.commit.timeout.ms = 5000&lt;/P&gt;&lt;P&gt;    offsets.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    offsets.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    offsets.retention.minutes = 86400000&lt;/P&gt;&lt;P&gt;    offsets.topic.compression.codec = 0&lt;/P&gt;&lt;P&gt;    offsets.topic.num.partitions = 50&lt;/P&gt;&lt;P&gt;    offsets.topic.replication.factor = 3&lt;/P&gt;&lt;P&gt;    offsets.topic.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding&lt;/P&gt;&lt;P&gt;    password.encoder.iterations = 4096&lt;/P&gt;&lt;P&gt;    password.encoder.key.length = 128&lt;/P&gt;&lt;P&gt;    password.encoder.keyfactory.algorithm = null&lt;/P&gt;&lt;P&gt;    password.encoder.old.secret = null&lt;/P&gt;&lt;P&gt;    password.encoder.secret = null&lt;/P&gt;&lt;P&gt;    port = 6667&lt;/P&gt;&lt;P&gt;    principal.builder.class = null&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.entry.expiration.ms = 300000&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.max.size = 1000&lt;/P&gt;&lt;P&gt;    producer.metrics.enable = false&lt;/P&gt;&lt;P&gt;    producer.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    queued.max.request.bytes = -1&lt;/P&gt;&lt;P&gt;    queued.max.requests = 500&lt;/P&gt;&lt;P&gt;    quota.consumer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.producer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.window.num = 11&lt;/P&gt;&lt;P&gt;    quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.backoff.ms = 1000&lt;/P&gt;&lt;P&gt;    replica.fetch.max.bytes = 1048576&lt;/P&gt;&lt;P&gt;    replica.fetch.min.bytes = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.response.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    replica.fetch.wait.max.ms = 500&lt;/P&gt;&lt;P&gt;    replica.high.watermark.checkpoint.interval.ms = 5000&lt;/P&gt;&lt;P&gt;    replica.lag.time.max.ms = 10000&lt;/P&gt;&lt;P&gt;    replica.socket.receive.buffer.bytes = 65536&lt;/P&gt;&lt;P&gt;    replica.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    request.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    reserved.broker.max.id = 1000&lt;/P&gt;&lt;P&gt;    sasl.client.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.enabled.mechanisms = [GSSAPI]&lt;/P&gt;&lt;P&gt;    sasl.jaas.config = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.kinit.cmd = /usr/bin/kinit&lt;/P&gt;&lt;P&gt;    sasl.kerberos.min.time.before.relogin = 60000&lt;/P&gt;&lt;P&gt;    sasl.kerberos.principal.to.local.rules = [DEFAULT]&lt;/P&gt;&lt;P&gt;    sasl.kerberos.service.name = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.buffer.seconds = 300&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.min.period.seconds = 60&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.mechanism.inter.broker.protocol = GSSAPI&lt;/P&gt;&lt;P&gt;    sasl.server.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    security.inter.broker.protocol = PLAINTEXT&lt;/P&gt;&lt;P&gt;    socket.receive.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    socket.request.max.bytes = 104857600&lt;/P&gt;&lt;P&gt;    socket.send.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    ssl.cipher.suites = []&lt;/P&gt;&lt;P&gt;    ssl.client.auth = none&lt;/P&gt;&lt;P&gt;    ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]&lt;/P&gt;&lt;P&gt;    ssl.endpoint.identification.algorithm = https&lt;/P&gt;&lt;P&gt;    ssl.key.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keymanager.algorithm = SunX509&lt;/P&gt;&lt;P&gt;    ssl.keystore.location = &lt;/P&gt;&lt;P&gt;    ssl.keystore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keystore.type = JKS&lt;/P&gt;&lt;P&gt;    ssl.protocol = TLS&lt;/P&gt;&lt;P&gt;    ssl.provider = null&lt;/P&gt;&lt;P&gt;    ssl.secure.random.implementation = null&lt;/P&gt;&lt;P&gt;    ssl.trustmanager.algorithm = PKIX&lt;/P&gt;&lt;P&gt;    ssl.truststore.location = &lt;/P&gt;&lt;P&gt;    ssl.truststore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.truststore.type = JKS&lt;/P&gt;&lt;P&gt;    transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    transaction.max.timeout.ms = 900000&lt;/P&gt;&lt;P&gt;    transaction.remove.expired.transaction.cleanup.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    transaction.state.log.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    transaction.state.log.min.isr = 2&lt;/P&gt;&lt;P&gt;    transaction.state.log.num.partitions = 50&lt;/P&gt;&lt;P&gt;    transaction.state.log.replication.factor = 3&lt;/P&gt;&lt;P&gt;    transaction.state.log.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    transactional.id.expiration.ms = 604800000&lt;/P&gt;&lt;P&gt;    unclean.leader.election.enable = false&lt;/P&gt;&lt;P&gt;    zookeeper.connect = am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181&lt;/P&gt;&lt;P&gt;    zookeeper.connection.timeout.ms = 25000&lt;/P&gt;&lt;P&gt;    zookeeper.max.in.flight.requests = 10&lt;/P&gt;&lt;P&gt;    zookeeper.session.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    zookeeper.set.acl = false&lt;/P&gt;&lt;P&gt;    zookeeper.sync.time.ms = 2000&lt;/P&gt;&lt;P&gt; (kafka.server.KafkaConfig)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:25,982] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:25,982] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:25,983] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:26,012] ERROR [KafkaServer id=1001] Fatal error during KafkaServer startup. Prepare to shutdown (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;org.apache.kafka.common.KafkaException: Failed to acquire lock on file .lock in /kafka-logs. A Kafka instance in another process or thread is using this directory.&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$$anonfun$lockLogDirs$1.apply(LogManager.scala:240)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$$anonfun$lockLogDirs$1.apply(LogManager.scala:236)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)&lt;/P&gt;&lt;P&gt;    at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$class.flatMap(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.AbstractTraversable.flatMap(Traversable.scala:104)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager.lockLogDirs(LogManager.scala:236)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager.&amp;lt;init&amp;gt;(LogManager.scala:97)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$.apply(LogManager.scala:958)&lt;/P&gt;&lt;P&gt;    at kafka.server.KafkaServer.startup(KafkaServer.scala:237)&lt;/P&gt;&lt;P&gt;    at kafka.server.KafkaServerStartable.startup(KafkaServerStartable.scala:38)&lt;/P&gt;&lt;P&gt;    at kafka.Kafka$.main(Kafka.scala:75)&lt;/P&gt;&lt;P&gt;    at kafka.Kafka.main(Kafka.scala)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:26,014] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:26,017] INFO [ZooKeeperClient] Closing. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:26,020] INFO [ZooKeeperClient] Closed. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:26,020] INFO [ThrottledChannelReaper-Fetch]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:26,982] INFO [ThrottledChannelReaper-Fetch]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:26,982] INFO [ThrottledChannelReaper-Fetch]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:26,982] INFO [ThrottledChannelReaper-Produce]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:27,982] INFO [ThrottledChannelReaper-Produce]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:27,982] INFO [ThrottledChannelReaper-Produce]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:27,983] INFO [ThrottledChannelReaper-Request]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:27,983] INFO [ThrottledChannelReaper-Request]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:27,983] INFO [ThrottledChannelReaper-Request]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:27,988] INFO [KafkaServer id=1001] shut down completed (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:27,988] ERROR Exiting Kafka. (kafka.server.KafkaServerStartable)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:27,990] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:10:51,711] INFO [GroupMetadataManager brokerId=1001] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:34,264] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:34,787] INFO starting (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:34,788] INFO Connecting to zookeeper on am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181 (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:34,801] INFO [ZooKeeperClient] Initializing a new session to am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:34,820] INFO [ZooKeeperClient] Waiting until connected. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:34,893] INFO [ZooKeeperClient] Connected. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:35,122] INFO Cluster ID = z-4P_uf-RzmpT2QvMnOD2g (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:35,200] INFO KafkaConfig values: &lt;/P&gt;&lt;P&gt;    advertised.host.name = null&lt;/P&gt;&lt;P&gt;    advertised.listeners = null&lt;/P&gt;&lt;P&gt;    advertised.port = null&lt;/P&gt;&lt;P&gt;    alter.config.policy.class.name = null&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    authorizer.class.name = &lt;/P&gt;&lt;P&gt;    auto.create.topics.enable = true&lt;/P&gt;&lt;P&gt;    auto.leader.rebalance.enable = true&lt;/P&gt;&lt;P&gt;    background.threads = 10&lt;/P&gt;&lt;P&gt;    broker.id = -1&lt;/P&gt;&lt;P&gt;    broker.id.generation.enable = true&lt;/P&gt;&lt;P&gt;    broker.rack = null&lt;/P&gt;&lt;P&gt;    client.quota.callback.class = null&lt;/P&gt;&lt;P&gt;    compression.type = producer&lt;/P&gt;&lt;P&gt;    connections.max.idle.ms = 600000&lt;/P&gt;&lt;P&gt;    controlled.shutdown.enable = true&lt;/P&gt;&lt;P&gt;    controlled.shutdown.max.retries = 3&lt;/P&gt;&lt;P&gt;    controlled.shutdown.retry.backoff.ms = 5000&lt;/P&gt;&lt;P&gt;    controller.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    create.topic.policy.class.name = null&lt;/P&gt;&lt;P&gt;    default.replication.factor = 1&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.check.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.time.ms = 86400000&lt;/P&gt;&lt;P&gt;    delegation.token.master.key = null&lt;/P&gt;&lt;P&gt;    delegation.token.max.lifetime.ms = 604800000&lt;/P&gt;&lt;P&gt;    delete.records.purgatory.purge.interval.requests = 1&lt;/P&gt;&lt;P&gt;    delete.topic.enable = true&lt;/P&gt;&lt;P&gt;    fetch.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    group.initial.rebalance.delay.ms = 3000&lt;/P&gt;&lt;P&gt;    group.max.session.timeout.ms = 300000&lt;/P&gt;&lt;P&gt;    group.min.session.timeout.ms = 6000&lt;/P&gt;&lt;P&gt;    host.name = &lt;/P&gt;&lt;P&gt;    inter.broker.listener.name = null&lt;/P&gt;&lt;P&gt;    inter.broker.protocol.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    leader.imbalance.check.interval.seconds = 300&lt;/P&gt;&lt;P&gt;    leader.imbalance.per.broker.percentage = 10&lt;/P&gt;&lt;P&gt;    listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL&lt;/P&gt;&lt;P&gt;    listeners = PLAINTEXT://am-bigdata-01.am.local:6667&lt;/P&gt;&lt;P&gt;    log.cleaner.backoff.ms = 15000&lt;/P&gt;&lt;P&gt;    log.cleaner.dedupe.buffer.size = 134217728&lt;/P&gt;&lt;P&gt;    log.cleaner.delete.retention.ms = 86400000&lt;/P&gt;&lt;P&gt;    log.cleaner.enable = true&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.load.factor = 0.9&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.size = 524288&lt;/P&gt;&lt;P&gt;    log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308&lt;/P&gt;&lt;P&gt;    log.cleaner.min.cleanable.ratio = 0.5&lt;/P&gt;&lt;P&gt;    log.cleaner.min.compaction.lag.ms = 0&lt;/P&gt;&lt;P&gt;    log.cleaner.threads = 1&lt;/P&gt;&lt;P&gt;    log.cleanup.policy = [delete]&lt;/P&gt;&lt;P&gt;    log.dir = /tmp/kafka-logs&lt;/P&gt;&lt;P&gt;    log.dirs = /kafka-logs&lt;/P&gt;&lt;P&gt;    log.flush.interval.messages = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.interval.ms = null&lt;/P&gt;&lt;P&gt;    log.flush.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.flush.scheduler.interval.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.start.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.index.interval.bytes = 4096&lt;/P&gt;&lt;P&gt;    log.index.size.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    log.message.downconversion.enable = true&lt;/P&gt;&lt;P&gt;    log.message.format.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    log.message.timestamp.difference.max.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.message.timestamp.type = CreateTime&lt;/P&gt;&lt;P&gt;    log.preallocate = false&lt;/P&gt;&lt;P&gt;    log.retention.bytes = -1&lt;/P&gt;&lt;P&gt;    log.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    log.retention.hours = 168&lt;/P&gt;&lt;P&gt;    log.retention.minutes = null&lt;/P&gt;&lt;P&gt;    log.retention.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.hours = 168&lt;/P&gt;&lt;P&gt;    log.roll.jitter.hours = 0&lt;/P&gt;&lt;P&gt;    log.roll.jitter.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.ms = null&lt;/P&gt;&lt;P&gt;    log.segment.bytes = 1073741824&lt;/P&gt;&lt;P&gt;    log.segment.delete.delay.ms = 60000&lt;/P&gt;&lt;P&gt;    max.connections.per.ip = 2147483647&lt;/P&gt;&lt;P&gt;    max.connections.per.ip.overrides = &lt;/P&gt;&lt;P&gt;    max.incremental.fetch.session.cache.slots = 1000&lt;/P&gt;&lt;P&gt;    message.max.bytes = 1000000&lt;/P&gt;&lt;P&gt;    metric.reporters = []&lt;/P&gt;&lt;P&gt;    metrics.num.samples = 2&lt;/P&gt;&lt;P&gt;    metrics.recording.level = INFO&lt;/P&gt;&lt;P&gt;    metrics.sample.window.ms = 30000&lt;/P&gt;&lt;P&gt;    min.insync.replicas = 1&lt;/P&gt;&lt;P&gt;    num.io.threads = 8&lt;/P&gt;&lt;P&gt;    num.network.threads = 3&lt;/P&gt;&lt;P&gt;    num.partitions = 1&lt;/P&gt;&lt;P&gt;    num.recovery.threads.per.data.dir = 1&lt;/P&gt;&lt;P&gt;    num.replica.alter.log.dirs.threads = null&lt;/P&gt;&lt;P&gt;    num.replica.fetchers = 1&lt;/P&gt;&lt;P&gt;    offset.metadata.max.bytes = 4096&lt;/P&gt;&lt;P&gt;    offsets.commit.required.acks = -1&lt;/P&gt;&lt;P&gt;    offsets.commit.timeout.ms = 5000&lt;/P&gt;&lt;P&gt;    offsets.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    offsets.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    offsets.retention.minutes = 86400000&lt;/P&gt;&lt;P&gt;    offsets.topic.compression.codec = 0&lt;/P&gt;&lt;P&gt;    offsets.topic.num.partitions = 50&lt;/P&gt;&lt;P&gt;    offsets.topic.replication.factor = 3&lt;/P&gt;&lt;P&gt;    offsets.topic.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding&lt;/P&gt;&lt;P&gt;    password.encoder.iterations = 4096&lt;/P&gt;&lt;P&gt;    password.encoder.key.length = 128&lt;/P&gt;&lt;P&gt;    password.encoder.keyfactory.algorithm = null&lt;/P&gt;&lt;P&gt;    password.encoder.old.secret = null&lt;/P&gt;&lt;P&gt;    password.encoder.secret = null&lt;/P&gt;&lt;P&gt;    port = 6667&lt;/P&gt;&lt;P&gt;    principal.builder.class = null&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.entry.expiration.ms = 300000&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.max.size = 1000&lt;/P&gt;&lt;P&gt;    producer.metrics.enable = false&lt;/P&gt;&lt;P&gt;    producer.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    queued.max.request.bytes = -1&lt;/P&gt;&lt;P&gt;    queued.max.requests = 500&lt;/P&gt;&lt;P&gt;    quota.consumer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.producer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.window.num = 11&lt;/P&gt;&lt;P&gt;    quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.backoff.ms = 1000&lt;/P&gt;&lt;P&gt;    replica.fetch.max.bytes = 1048576&lt;/P&gt;&lt;P&gt;    replica.fetch.min.bytes = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.response.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    replica.fetch.wait.max.ms = 500&lt;/P&gt;&lt;P&gt;    replica.high.watermark.checkpoint.interval.ms = 5000&lt;/P&gt;&lt;P&gt;    replica.lag.time.max.ms = 10000&lt;/P&gt;&lt;P&gt;    replica.socket.receive.buffer.bytes = 65536&lt;/P&gt;&lt;P&gt;    replica.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    request.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    reserved.broker.max.id = 1000&lt;/P&gt;&lt;P&gt;    sasl.client.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.enabled.mechanisms = [GSSAPI]&lt;/P&gt;&lt;P&gt;    sasl.jaas.config = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.kinit.cmd = /usr/bin/kinit&lt;/P&gt;&lt;P&gt;    sasl.kerberos.min.time.before.relogin = 60000&lt;/P&gt;&lt;P&gt;    sasl.kerberos.principal.to.local.rules = [DEFAULT]&lt;/P&gt;&lt;P&gt;    sasl.kerberos.service.name = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.buffer.seconds = 300&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.min.period.seconds = 60&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.mechanism.inter.broker.protocol = GSSAPI&lt;/P&gt;&lt;P&gt;    sasl.server.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    security.inter.broker.protocol = PLAINTEXT&lt;/P&gt;&lt;P&gt;    socket.receive.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    socket.request.max.bytes = 104857600&lt;/P&gt;&lt;P&gt;    socket.send.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    ssl.cipher.suites = []&lt;/P&gt;&lt;P&gt;    ssl.client.auth = none&lt;/P&gt;&lt;P&gt;    ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]&lt;/P&gt;&lt;P&gt;    ssl.endpoint.identification.algorithm = https&lt;/P&gt;&lt;P&gt;    ssl.key.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keymanager.algorithm = SunX509&lt;/P&gt;&lt;P&gt;    ssl.keystore.location = &lt;/P&gt;&lt;P&gt;    ssl.keystore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keystore.type = JKS&lt;/P&gt;&lt;P&gt;    ssl.protocol = TLS&lt;/P&gt;&lt;P&gt;    ssl.provider = null&lt;/P&gt;&lt;P&gt;    ssl.secure.random.implementation = null&lt;/P&gt;&lt;P&gt;    ssl.trustmanager.algorithm = PKIX&lt;/P&gt;&lt;P&gt;    ssl.truststore.location = &lt;/P&gt;&lt;P&gt;    ssl.truststore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.truststore.type = JKS&lt;/P&gt;&lt;P&gt;    transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    transaction.max.timeout.ms = 900000&lt;/P&gt;&lt;P&gt;    transaction.remove.expired.transaction.cleanup.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    transaction.state.log.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    transaction.state.log.min.isr = 2&lt;/P&gt;&lt;P&gt;    transaction.state.log.num.partitions = 50&lt;/P&gt;&lt;P&gt;    transaction.state.log.replication.factor = 3&lt;/P&gt;&lt;P&gt;    transaction.state.log.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    transactional.id.expiration.ms = 604800000&lt;/P&gt;&lt;P&gt;    unclean.leader.election.enable = false&lt;/P&gt;&lt;P&gt;    zookeeper.connect = am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181&lt;/P&gt;&lt;P&gt;    zookeeper.connection.timeout.ms = 25000&lt;/P&gt;&lt;P&gt;    zookeeper.max.in.flight.requests = 10&lt;/P&gt;&lt;P&gt;    zookeeper.session.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    zookeeper.set.acl = false&lt;/P&gt;&lt;P&gt;    zookeeper.sync.time.ms = 2000&lt;/P&gt;&lt;P&gt; (kafka.server.KafkaConfig)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:35,209] INFO KafkaConfig values: &lt;/P&gt;&lt;P&gt;    advertised.host.name = null&lt;/P&gt;&lt;P&gt;    advertised.listeners = null&lt;/P&gt;&lt;P&gt;    advertised.port = null&lt;/P&gt;&lt;P&gt;    alter.config.policy.class.name = null&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    authorizer.class.name = &lt;/P&gt;&lt;P&gt;    auto.create.topics.enable = true&lt;/P&gt;&lt;P&gt;    auto.leader.rebalance.enable = true&lt;/P&gt;&lt;P&gt;    background.threads = 10&lt;/P&gt;&lt;P&gt;    broker.id = -1&lt;/P&gt;&lt;P&gt;    broker.id.generation.enable = true&lt;/P&gt;&lt;P&gt;    broker.rack = null&lt;/P&gt;&lt;P&gt;    client.quota.callback.class = null&lt;/P&gt;&lt;P&gt;    compression.type = producer&lt;/P&gt;&lt;P&gt;    connections.max.idle.ms = 600000&lt;/P&gt;&lt;P&gt;    controlled.shutdown.enable = true&lt;/P&gt;&lt;P&gt;    controlled.shutdown.max.retries = 3&lt;/P&gt;&lt;P&gt;    controlled.shutdown.retry.backoff.ms = 5000&lt;/P&gt;&lt;P&gt;    controller.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    create.topic.policy.class.name = null&lt;/P&gt;&lt;P&gt;    default.replication.factor = 1&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.check.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.time.ms = 86400000&lt;/P&gt;&lt;P&gt;    delegation.token.master.key = null&lt;/P&gt;&lt;P&gt;    delegation.token.max.lifetime.ms = 604800000&lt;/P&gt;&lt;P&gt;    delete.records.purgatory.purge.interval.requests = 1&lt;/P&gt;&lt;P&gt;    delete.topic.enable = true&lt;/P&gt;&lt;P&gt;    fetch.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    group.initial.rebalance.delay.ms = 3000&lt;/P&gt;&lt;P&gt;    group.max.session.timeout.ms = 300000&lt;/P&gt;&lt;P&gt;    group.min.session.timeout.ms = 6000&lt;/P&gt;&lt;P&gt;    host.name = &lt;/P&gt;&lt;P&gt;    inter.broker.listener.name = null&lt;/P&gt;&lt;P&gt;    inter.broker.protocol.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    leader.imbalance.check.interval.seconds = 300&lt;/P&gt;&lt;P&gt;    leader.imbalance.per.broker.percentage = 10&lt;/P&gt;&lt;P&gt;    listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL&lt;/P&gt;&lt;P&gt;    listeners = PLAINTEXT://am-bigdata-01.am.local:6667&lt;/P&gt;&lt;P&gt;    log.cleaner.backoff.ms = 15000&lt;/P&gt;&lt;P&gt;    log.cleaner.dedupe.buffer.size = 134217728&lt;/P&gt;&lt;P&gt;    log.cleaner.delete.retention.ms = 86400000&lt;/P&gt;&lt;P&gt;    log.cleaner.enable = true&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.load.factor = 0.9&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.size = 524288&lt;/P&gt;&lt;P&gt;    log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308&lt;/P&gt;&lt;P&gt;    log.cleaner.min.cleanable.ratio = 0.5&lt;/P&gt;&lt;P&gt;    log.cleaner.min.compaction.lag.ms = 0&lt;/P&gt;&lt;P&gt;    log.cleaner.threads = 1&lt;/P&gt;&lt;P&gt;    log.cleanup.policy = [delete]&lt;/P&gt;&lt;P&gt;    log.dir = /tmp/kafka-logs&lt;/P&gt;&lt;P&gt;    log.dirs = /kafka-logs&lt;/P&gt;&lt;P&gt;    log.flush.interval.messages = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.interval.ms = null&lt;/P&gt;&lt;P&gt;    log.flush.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.flush.scheduler.interval.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.start.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.index.interval.bytes = 4096&lt;/P&gt;&lt;P&gt;    log.index.size.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    log.message.downconversion.enable = true&lt;/P&gt;&lt;P&gt;    log.message.format.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    log.message.timestamp.difference.max.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.message.timestamp.type = CreateTime&lt;/P&gt;&lt;P&gt;    log.preallocate = false&lt;/P&gt;&lt;P&gt;    log.retention.bytes = -1&lt;/P&gt;&lt;P&gt;    log.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    log.retention.hours = 168&lt;/P&gt;&lt;P&gt;    log.retention.minutes = null&lt;/P&gt;&lt;P&gt;    log.retention.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.hours = 168&lt;/P&gt;&lt;P&gt;    log.roll.jitter.hours = 0&lt;/P&gt;&lt;P&gt;    log.roll.jitter.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.ms = null&lt;/P&gt;&lt;P&gt;    log.segment.bytes = 1073741824&lt;/P&gt;&lt;P&gt;    log.segment.delete.delay.ms = 60000&lt;/P&gt;&lt;P&gt;    max.connections.per.ip = 2147483647&lt;/P&gt;&lt;P&gt;    max.connections.per.ip.overrides = &lt;/P&gt;&lt;P&gt;    max.incremental.fetch.session.cache.slots = 1000&lt;/P&gt;&lt;P&gt;    message.max.bytes = 1000000&lt;/P&gt;&lt;P&gt;    metric.reporters = []&lt;/P&gt;&lt;P&gt;    metrics.num.samples = 2&lt;/P&gt;&lt;P&gt;    metrics.recording.level = INFO&lt;/P&gt;&lt;P&gt;    metrics.sample.window.ms = 30000&lt;/P&gt;&lt;P&gt;    min.insync.replicas = 1&lt;/P&gt;&lt;P&gt;    num.io.threads = 8&lt;/P&gt;&lt;P&gt;    num.network.threads = 3&lt;/P&gt;&lt;P&gt;    num.partitions = 1&lt;/P&gt;&lt;P&gt;    num.recovery.threads.per.data.dir = 1&lt;/P&gt;&lt;P&gt;    num.replica.alter.log.dirs.threads = null&lt;/P&gt;&lt;P&gt;    num.replica.fetchers = 1&lt;/P&gt;&lt;P&gt;    offset.metadata.max.bytes = 4096&lt;/P&gt;&lt;P&gt;    offsets.commit.required.acks = -1&lt;/P&gt;&lt;P&gt;    offsets.commit.timeout.ms = 5000&lt;/P&gt;&lt;P&gt;    offsets.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    offsets.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    offsets.retention.minutes = 86400000&lt;/P&gt;&lt;P&gt;    offsets.topic.compression.codec = 0&lt;/P&gt;&lt;P&gt;    offsets.topic.num.partitions = 50&lt;/P&gt;&lt;P&gt;    offsets.topic.replication.factor = 3&lt;/P&gt;&lt;P&gt;    offsets.topic.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding&lt;/P&gt;&lt;P&gt;    password.encoder.iterations = 4096&lt;/P&gt;&lt;P&gt;    password.encoder.key.length = 128&lt;/P&gt;&lt;P&gt;    password.encoder.keyfactory.algorithm = null&lt;/P&gt;&lt;P&gt;    password.encoder.old.secret = null&lt;/P&gt;&lt;P&gt;    password.encoder.secret = null&lt;/P&gt;&lt;P&gt;    port = 6667&lt;/P&gt;&lt;P&gt;    principal.builder.class = null&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.entry.expiration.ms = 300000&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.max.size = 1000&lt;/P&gt;&lt;P&gt;    producer.metrics.enable = false&lt;/P&gt;&lt;P&gt;    producer.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    queued.max.request.bytes = -1&lt;/P&gt;&lt;P&gt;    queued.max.requests = 500&lt;/P&gt;&lt;P&gt;    quota.consumer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.producer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.window.num = 11&lt;/P&gt;&lt;P&gt;    quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.backoff.ms = 1000&lt;/P&gt;&lt;P&gt;    replica.fetch.max.bytes = 1048576&lt;/P&gt;&lt;P&gt;    replica.fetch.min.bytes = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.response.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    replica.fetch.wait.max.ms = 500&lt;/P&gt;&lt;P&gt;    replica.high.watermark.checkpoint.interval.ms = 5000&lt;/P&gt;&lt;P&gt;    replica.lag.time.max.ms = 10000&lt;/P&gt;&lt;P&gt;    replica.socket.receive.buffer.bytes = 65536&lt;/P&gt;&lt;P&gt;    replica.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    request.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    reserved.broker.max.id = 1000&lt;/P&gt;&lt;P&gt;    sasl.client.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.enabled.mechanisms = [GSSAPI]&lt;/P&gt;&lt;P&gt;    sasl.jaas.config = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.kinit.cmd = /usr/bin/kinit&lt;/P&gt;&lt;P&gt;    sasl.kerberos.min.time.before.relogin = 60000&lt;/P&gt;&lt;P&gt;    sasl.kerberos.principal.to.local.rules = [DEFAULT]&lt;/P&gt;&lt;P&gt;    sasl.kerberos.service.name = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.buffer.seconds = 300&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.min.period.seconds = 60&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.mechanism.inter.broker.protocol = GSSAPI&lt;/P&gt;&lt;P&gt;    sasl.server.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    security.inter.broker.protocol = PLAINTEXT&lt;/P&gt;&lt;P&gt;    socket.receive.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    socket.request.max.bytes = 104857600&lt;/P&gt;&lt;P&gt;    socket.send.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    ssl.cipher.suites = []&lt;/P&gt;&lt;P&gt;    ssl.client.auth = none&lt;/P&gt;&lt;P&gt;    ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]&lt;/P&gt;&lt;P&gt;    ssl.endpoint.identification.algorithm = https&lt;/P&gt;&lt;P&gt;    ssl.key.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keymanager.algorithm = SunX509&lt;/P&gt;&lt;P&gt;    ssl.keystore.location = &lt;/P&gt;&lt;P&gt;    ssl.keystore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keystore.type = JKS&lt;/P&gt;&lt;P&gt;    ssl.protocol = TLS&lt;/P&gt;&lt;P&gt;    ssl.provider = null&lt;/P&gt;&lt;P&gt;    ssl.secure.random.implementation = null&lt;/P&gt;&lt;P&gt;    ssl.trustmanager.algorithm = PKIX&lt;/P&gt;&lt;P&gt;    ssl.truststore.location = &lt;/P&gt;&lt;P&gt;    ssl.truststore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.truststore.type = JKS&lt;/P&gt;&lt;P&gt;    transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    transaction.max.timeout.ms = 900000&lt;/P&gt;&lt;P&gt;    transaction.remove.expired.transaction.cleanup.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    transaction.state.log.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    transaction.state.log.min.isr = 2&lt;/P&gt;&lt;P&gt;    transaction.state.log.num.partitions = 50&lt;/P&gt;&lt;P&gt;    transaction.state.log.replication.factor = 3&lt;/P&gt;&lt;P&gt;    transaction.state.log.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    transactional.id.expiration.ms = 604800000&lt;/P&gt;&lt;P&gt;    unclean.leader.election.enable = false&lt;/P&gt;&lt;P&gt;    zookeeper.connect = am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181&lt;/P&gt;&lt;P&gt;    zookeeper.connection.timeout.ms = 25000&lt;/P&gt;&lt;P&gt;    zookeeper.max.in.flight.requests = 10&lt;/P&gt;&lt;P&gt;    zookeeper.session.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    zookeeper.set.acl = false&lt;/P&gt;&lt;P&gt;    zookeeper.sync.time.ms = 2000&lt;/P&gt;&lt;P&gt; (kafka.server.KafkaConfig)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:35,236] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:35,236] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:35,237] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:35,269] ERROR [KafkaServer id=1001] Fatal error during KafkaServer startup. Prepare to shutdown (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;org.apache.kafka.common.KafkaException: Failed to acquire lock on file .lock in /kafka-logs. A Kafka instance in another process or thread is using this directory.&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$$anonfun$lockLogDirs$1.apply(LogManager.scala:240)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$$anonfun$lockLogDirs$1.apply(LogManager.scala:236)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)&lt;/P&gt;&lt;P&gt;    at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$class.flatMap(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.AbstractTraversable.flatMap(Traversable.scala:104)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager.lockLogDirs(LogManager.scala:236)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager.&amp;lt;init&amp;gt;(LogManager.scala:97)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$.apply(LogManager.scala:958)&lt;/P&gt;&lt;P&gt;    at kafka.server.KafkaServer.startup(KafkaServer.scala:237)&lt;/P&gt;&lt;P&gt;    at kafka.server.KafkaServerStartable.startup(KafkaServerStartable.scala:38)&lt;/P&gt;&lt;P&gt;    at kafka.Kafka$.main(Kafka.scala:75)&lt;/P&gt;&lt;P&gt;    at kafka.Kafka.main(Kafka.scala)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:35,271] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:35,274] INFO [ZooKeeperClient] Closing. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:35,277] INFO [ZooKeeperClient] Closed. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:35,278] INFO [ThrottledChannelReaper-Fetch]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:36,237] INFO [ThrottledChannelReaper-Fetch]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:36,237] INFO [ThrottledChannelReaper-Fetch]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:36,237] INFO [ThrottledChannelReaper-Produce]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:37,237] INFO [ThrottledChannelReaper-Produce]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:37,237] INFO [ThrottledChannelReaper-Produce]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:37,237] INFO [ThrottledChannelReaper-Request]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:37,238] INFO [ThrottledChannelReaper-Request]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:37,238] INFO [ThrottledChannelReaper-Request]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:37,245] INFO [KafkaServer id=1001] shut down completed (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:37,245] ERROR Exiting Kafka. (kafka.server.KafkaServerStartable)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:37,248] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:16,467] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:16,994] INFO starting (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:16,995] INFO Connecting to zookeeper on am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181 (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,009] INFO [ZooKeeperClient] Initializing a new session to am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,028] INFO [ZooKeeperClient] Waiting until connected. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,088] INFO [ZooKeeperClient] Connected. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,345] INFO Cluster ID = z-4P_uf-RzmpT2QvMnOD2g (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,424] INFO KafkaConfig values: &lt;/P&gt;&lt;P&gt;    advertised.host.name = null&lt;/P&gt;&lt;P&gt;    advertised.listeners = null&lt;/P&gt;&lt;P&gt;    advertised.port = null&lt;/P&gt;&lt;P&gt;    alter.config.policy.class.name = null&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    authorizer.class.name = &lt;/P&gt;&lt;P&gt;    auto.create.topics.enable = true&lt;/P&gt;&lt;P&gt;    auto.leader.rebalance.enable = true&lt;/P&gt;&lt;P&gt;    background.threads = 10&lt;/P&gt;&lt;P&gt;    broker.id = -1&lt;/P&gt;&lt;P&gt;    broker.id.generation.enable = true&lt;/P&gt;&lt;P&gt;    broker.rack = null&lt;/P&gt;&lt;P&gt;    client.quota.callback.class = null&lt;/P&gt;&lt;P&gt;    compression.type = producer&lt;/P&gt;&lt;P&gt;    connections.max.idle.ms = 600000&lt;/P&gt;&lt;P&gt;    controlled.shutdown.enable = true&lt;/P&gt;&lt;P&gt;    controlled.shutdown.max.retries = 3&lt;/P&gt;&lt;P&gt;    controlled.shutdown.retry.backoff.ms = 5000&lt;/P&gt;&lt;P&gt;    controller.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    create.topic.policy.class.name = null&lt;/P&gt;&lt;P&gt;    default.replication.factor = 1&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.check.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.time.ms = 86400000&lt;/P&gt;&lt;P&gt;    delegation.token.master.key = null&lt;/P&gt;&lt;P&gt;    delegation.token.max.lifetime.ms = 604800000&lt;/P&gt;&lt;P&gt;    delete.records.purgatory.purge.interval.requests = 1&lt;/P&gt;&lt;P&gt;    delete.topic.enable = true&lt;/P&gt;&lt;P&gt;    fetch.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    group.initial.rebalance.delay.ms = 3000&lt;/P&gt;&lt;P&gt;    group.max.session.timeout.ms = 300000&lt;/P&gt;&lt;P&gt;    group.min.session.timeout.ms = 6000&lt;/P&gt;&lt;P&gt;    host.name = &lt;/P&gt;&lt;P&gt;    inter.broker.listener.name = null&lt;/P&gt;&lt;P&gt;    inter.broker.protocol.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    leader.imbalance.check.interval.seconds = 300&lt;/P&gt;&lt;P&gt;    leader.imbalance.per.broker.percentage = 10&lt;/P&gt;&lt;P&gt;    listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL&lt;/P&gt;&lt;P&gt;    listeners = PLAINTEXT://am-bigdata-01.am.local:6667&lt;/P&gt;&lt;P&gt;    log.cleaner.backoff.ms = 15000&lt;/P&gt;&lt;P&gt;    log.cleaner.dedupe.buffer.size = 134217728&lt;/P&gt;&lt;P&gt;    log.cleaner.delete.retention.ms = 86400000&lt;/P&gt;&lt;P&gt;    log.cleaner.enable = true&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.load.factor = 0.9&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.size = 524288&lt;/P&gt;&lt;P&gt;    log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308&lt;/P&gt;&lt;P&gt;    log.cleaner.min.cleanable.ratio = 0.5&lt;/P&gt;&lt;P&gt;    log.cleaner.min.compaction.lag.ms = 0&lt;/P&gt;&lt;P&gt;    log.cleaner.threads = 1&lt;/P&gt;&lt;P&gt;    log.cleanup.policy = [delete]&lt;/P&gt;&lt;P&gt;    log.dir = /tmp/kafka-logs&lt;/P&gt;&lt;P&gt;    log.dirs = /kafka-logs&lt;/P&gt;&lt;P&gt;    log.flush.interval.messages = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.interval.ms = null&lt;/P&gt;&lt;P&gt;    log.flush.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.flush.scheduler.interval.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.start.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.index.interval.bytes = 4096&lt;/P&gt;&lt;P&gt;    log.index.size.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    log.message.downconversion.enable = true&lt;/P&gt;&lt;P&gt;    log.message.format.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    log.message.timestamp.difference.max.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.message.timestamp.type = CreateTime&lt;/P&gt;&lt;P&gt;    log.preallocate = false&lt;/P&gt;&lt;P&gt;    log.retention.bytes = -1&lt;/P&gt;&lt;P&gt;    log.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    log.retention.hours = 168&lt;/P&gt;&lt;P&gt;    log.retention.minutes = null&lt;/P&gt;&lt;P&gt;    log.retention.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.hours = 168&lt;/P&gt;&lt;P&gt;    log.roll.jitter.hours = 0&lt;/P&gt;&lt;P&gt;    log.roll.jitter.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.ms = null&lt;/P&gt;&lt;P&gt;    log.segment.bytes = 1073741824&lt;/P&gt;&lt;P&gt;    log.segment.delete.delay.ms = 60000&lt;/P&gt;&lt;P&gt;    max.connections.per.ip = 2147483647&lt;/P&gt;&lt;P&gt;    max.connections.per.ip.overrides = &lt;/P&gt;&lt;P&gt;    max.incremental.fetch.session.cache.slots = 1000&lt;/P&gt;&lt;P&gt;    message.max.bytes = 1000000&lt;/P&gt;&lt;P&gt;    metric.reporters = []&lt;/P&gt;&lt;P&gt;    metrics.num.samples = 2&lt;/P&gt;&lt;P&gt;    metrics.recording.level = INFO&lt;/P&gt;&lt;P&gt;    metrics.sample.window.ms = 30000&lt;/P&gt;&lt;P&gt;    min.insync.replicas = 1&lt;/P&gt;&lt;P&gt;    num.io.threads = 8&lt;/P&gt;&lt;P&gt;    num.network.threads = 3&lt;/P&gt;&lt;P&gt;    num.partitions = 1&lt;/P&gt;&lt;P&gt;    num.recovery.threads.per.data.dir = 1&lt;/P&gt;&lt;P&gt;    num.replica.alter.log.dirs.threads = null&lt;/P&gt;&lt;P&gt;    num.replica.fetchers = 1&lt;/P&gt;&lt;P&gt;    offset.metadata.max.bytes = 4096&lt;/P&gt;&lt;P&gt;    offsets.commit.required.acks = -1&lt;/P&gt;&lt;P&gt;    offsets.commit.timeout.ms = 5000&lt;/P&gt;&lt;P&gt;    offsets.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    offsets.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    offsets.retention.minutes = 86400000&lt;/P&gt;&lt;P&gt;    offsets.topic.compression.codec = 0&lt;/P&gt;&lt;P&gt;    offsets.topic.num.partitions = 50&lt;/P&gt;&lt;P&gt;    offsets.topic.replication.factor = 3&lt;/P&gt;&lt;P&gt;    offsets.topic.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding&lt;/P&gt;&lt;P&gt;    password.encoder.iterations = 4096&lt;/P&gt;&lt;P&gt;    password.encoder.key.length = 128&lt;/P&gt;&lt;P&gt;    password.encoder.keyfactory.algorithm = null&lt;/P&gt;&lt;P&gt;    password.encoder.old.secret = null&lt;/P&gt;&lt;P&gt;    password.encoder.secret = null&lt;/P&gt;&lt;P&gt;    port = 6667&lt;/P&gt;&lt;P&gt;    principal.builder.class = null&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.entry.expiration.ms = 300000&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.max.size = 1000&lt;/P&gt;&lt;P&gt;    producer.metrics.enable = false&lt;/P&gt;&lt;P&gt;    producer.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    queued.max.request.bytes = -1&lt;/P&gt;&lt;P&gt;    queued.max.requests = 500&lt;/P&gt;&lt;P&gt;    quota.consumer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.producer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.window.num = 11&lt;/P&gt;&lt;P&gt;    quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.backoff.ms = 1000&lt;/P&gt;&lt;P&gt;    replica.fetch.max.bytes = 1048576&lt;/P&gt;&lt;P&gt;    replica.fetch.min.bytes = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.response.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    replica.fetch.wait.max.ms = 500&lt;/P&gt;&lt;P&gt;    replica.high.watermark.checkpoint.interval.ms = 5000&lt;/P&gt;&lt;P&gt;    replica.lag.time.max.ms = 10000&lt;/P&gt;&lt;P&gt;    replica.socket.receive.buffer.bytes = 65536&lt;/P&gt;&lt;P&gt;    replica.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    request.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    reserved.broker.max.id = 1000&lt;/P&gt;&lt;P&gt;    sasl.client.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.enabled.mechanisms = [GSSAPI]&lt;/P&gt;&lt;P&gt;    sasl.jaas.config = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.kinit.cmd = /usr/bin/kinit&lt;/P&gt;&lt;P&gt;    sasl.kerberos.min.time.before.relogin = 60000&lt;/P&gt;&lt;P&gt;    sasl.kerberos.principal.to.local.rules = [DEFAULT]&lt;/P&gt;&lt;P&gt;    sasl.kerberos.service.name = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.buffer.seconds = 300&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.min.period.seconds = 60&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.mechanism.inter.broker.protocol = GSSAPI&lt;/P&gt;&lt;P&gt;    sasl.server.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    security.inter.broker.protocol = PLAINTEXT&lt;/P&gt;&lt;P&gt;    socket.receive.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    socket.request.max.bytes = 104857600&lt;/P&gt;&lt;P&gt;    socket.send.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    ssl.cipher.suites = []&lt;/P&gt;&lt;P&gt;    ssl.client.auth = none&lt;/P&gt;&lt;P&gt;    ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]&lt;/P&gt;&lt;P&gt;    ssl.endpoint.identification.algorithm = https&lt;/P&gt;&lt;P&gt;    ssl.key.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keymanager.algorithm = SunX509&lt;/P&gt;&lt;P&gt;    ssl.keystore.location = &lt;/P&gt;&lt;P&gt;    ssl.keystore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keystore.type = JKS&lt;/P&gt;&lt;P&gt;    ssl.protocol = TLS&lt;/P&gt;&lt;P&gt;    ssl.provider = null&lt;/P&gt;&lt;P&gt;    ssl.secure.random.implementation = null&lt;/P&gt;&lt;P&gt;    ssl.trustmanager.algorithm = PKIX&lt;/P&gt;&lt;P&gt;    ssl.truststore.location = &lt;/P&gt;&lt;P&gt;    ssl.truststore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.truststore.type = JKS&lt;/P&gt;&lt;P&gt;    transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    transaction.max.timeout.ms = 900000&lt;/P&gt;&lt;P&gt;    transaction.remove.expired.transaction.cleanup.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    transaction.state.log.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    transaction.state.log.min.isr = 2&lt;/P&gt;&lt;P&gt;    transaction.state.log.num.partitions = 50&lt;/P&gt;&lt;P&gt;    transaction.state.log.replication.factor = 3&lt;/P&gt;&lt;P&gt;    transaction.state.log.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    transactional.id.expiration.ms = 604800000&lt;/P&gt;&lt;P&gt;    unclean.leader.election.enable = false&lt;/P&gt;&lt;P&gt;    zookeeper.connect = am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181&lt;/P&gt;&lt;P&gt;    zookeeper.connection.timeout.ms = 25000&lt;/P&gt;&lt;P&gt;    zookeeper.max.in.flight.requests = 10&lt;/P&gt;&lt;P&gt;    zookeeper.session.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    zookeeper.set.acl = false&lt;/P&gt;&lt;P&gt;    zookeeper.sync.time.ms = 2000&lt;/P&gt;&lt;P&gt; (kafka.server.KafkaConfig)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,431] INFO KafkaConfig values: &lt;/P&gt;&lt;P&gt;    advertised.host.name = null&lt;/P&gt;&lt;P&gt;    advertised.listeners = null&lt;/P&gt;&lt;P&gt;    advertised.port = null&lt;/P&gt;&lt;P&gt;    alter.config.policy.class.name = null&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    authorizer.class.name = &lt;/P&gt;&lt;P&gt;    auto.create.topics.enable = true&lt;/P&gt;&lt;P&gt;    auto.leader.rebalance.enable = true&lt;/P&gt;&lt;P&gt;    background.threads = 10&lt;/P&gt;&lt;P&gt;    broker.id = -1&lt;/P&gt;&lt;P&gt;    broker.id.generation.enable = true&lt;/P&gt;&lt;P&gt;    broker.rack = null&lt;/P&gt;&lt;P&gt;    client.quota.callback.class = null&lt;/P&gt;&lt;P&gt;    compression.type = producer&lt;/P&gt;&lt;P&gt;    connections.max.idle.ms = 600000&lt;/P&gt;&lt;P&gt;    controlled.shutdown.enable = true&lt;/P&gt;&lt;P&gt;    controlled.shutdown.max.retries = 3&lt;/P&gt;&lt;P&gt;    controlled.shutdown.retry.backoff.ms = 5000&lt;/P&gt;&lt;P&gt;    controller.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    create.topic.policy.class.name = null&lt;/P&gt;&lt;P&gt;    default.replication.factor = 1&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.check.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.time.ms = 86400000&lt;/P&gt;&lt;P&gt;    delegation.token.master.key = null&lt;/P&gt;&lt;P&gt;    delegation.token.max.lifetime.ms = 604800000&lt;/P&gt;&lt;P&gt;    delete.records.purgatory.purge.interval.requests = 1&lt;/P&gt;&lt;P&gt;    delete.topic.enable = true&lt;/P&gt;&lt;P&gt;    fetch.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    group.initial.rebalance.delay.ms = 3000&lt;/P&gt;&lt;P&gt;    group.max.session.timeout.ms = 300000&lt;/P&gt;&lt;P&gt;    group.min.session.timeout.ms = 6000&lt;/P&gt;&lt;P&gt;    host.name = &lt;/P&gt;&lt;P&gt;    inter.broker.listener.name = null&lt;/P&gt;&lt;P&gt;    inter.broker.protocol.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    leader.imbalance.check.interval.seconds = 300&lt;/P&gt;&lt;P&gt;    leader.imbalance.per.broker.percentage = 10&lt;/P&gt;&lt;P&gt;    listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL&lt;/P&gt;&lt;P&gt;    listeners = PLAINTEXT://am-bigdata-01.am.local:6667&lt;/P&gt;&lt;P&gt;    log.cleaner.backoff.ms = 15000&lt;/P&gt;&lt;P&gt;    log.cleaner.dedupe.buffer.size = 134217728&lt;/P&gt;&lt;P&gt;    log.cleaner.delete.retention.ms = 86400000&lt;/P&gt;&lt;P&gt;    log.cleaner.enable = true&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.load.factor = 0.9&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.size = 524288&lt;/P&gt;&lt;P&gt;    log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308&lt;/P&gt;&lt;P&gt;    log.cleaner.min.cleanable.ratio = 0.5&lt;/P&gt;&lt;P&gt;    log.cleaner.min.compaction.lag.ms = 0&lt;/P&gt;&lt;P&gt;    log.cleaner.threads = 1&lt;/P&gt;&lt;P&gt;    log.cleanup.policy = [delete]&lt;/P&gt;&lt;P&gt;    log.dir = /tmp/kafka-logs&lt;/P&gt;&lt;P&gt;    log.dirs = /kafka-logs&lt;/P&gt;&lt;P&gt;    log.flush.interval.messages = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.interval.ms = null&lt;/P&gt;&lt;P&gt;    log.flush.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.flush.scheduler.interval.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.start.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.index.interval.bytes = 4096&lt;/P&gt;&lt;P&gt;    log.index.size.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    log.message.downconversion.enable = true&lt;/P&gt;&lt;P&gt;    log.message.format.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    log.message.timestamp.difference.max.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.message.timestamp.type = CreateTime&lt;/P&gt;&lt;P&gt;    log.preallocate = false&lt;/P&gt;&lt;P&gt;    log.retention.bytes = -1&lt;/P&gt;&lt;P&gt;    log.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    log.retention.hours = 168&lt;/P&gt;&lt;P&gt;    log.retention.minutes = null&lt;/P&gt;&lt;P&gt;    log.retention.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.hours = 168&lt;/P&gt;&lt;P&gt;    log.roll.jitter.hours = 0&lt;/P&gt;&lt;P&gt;    log.roll.jitter.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.ms = null&lt;/P&gt;&lt;P&gt;    log.segment.bytes = 1073741824&lt;/P&gt;&lt;P&gt;    log.segment.delete.delay.ms = 60000&lt;/P&gt;&lt;P&gt;    max.connections.per.ip = 2147483647&lt;/P&gt;&lt;P&gt;    max.connections.per.ip.overrides = &lt;/P&gt;&lt;P&gt;    max.incremental.fetch.session.cache.slots = 1000&lt;/P&gt;&lt;P&gt;    message.max.bytes = 1000000&lt;/P&gt;&lt;P&gt;    metric.reporters = []&lt;/P&gt;&lt;P&gt;    metrics.num.samples = 2&lt;/P&gt;&lt;P&gt;    metrics.recording.level = INFO&lt;/P&gt;&lt;P&gt;    metrics.sample.window.ms = 30000&lt;/P&gt;&lt;P&gt;    min.insync.replicas = 1&lt;/P&gt;&lt;P&gt;    num.io.threads = 8&lt;/P&gt;&lt;P&gt;    num.network.threads = 3&lt;/P&gt;&lt;P&gt;    num.partitions = 1&lt;/P&gt;&lt;P&gt;    num.recovery.threads.per.data.dir = 1&lt;/P&gt;&lt;P&gt;    num.replica.alter.log.dirs.threads = null&lt;/P&gt;&lt;P&gt;    num.replica.fetchers = 1&lt;/P&gt;&lt;P&gt;    offset.metadata.max.bytes = 4096&lt;/P&gt;&lt;P&gt;    offsets.commit.required.acks = -1&lt;/P&gt;&lt;P&gt;    offsets.commit.timeout.ms = 5000&lt;/P&gt;&lt;P&gt;    offsets.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    offsets.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    offsets.retention.minutes = 86400000&lt;/P&gt;&lt;P&gt;    offsets.topic.compression.codec = 0&lt;/P&gt;&lt;P&gt;    offsets.topic.num.partitions = 50&lt;/P&gt;&lt;P&gt;    offsets.topic.replication.factor = 3&lt;/P&gt;&lt;P&gt;    offsets.topic.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding&lt;/P&gt;&lt;P&gt;    password.encoder.iterations = 4096&lt;/P&gt;&lt;P&gt;    password.encoder.key.length = 128&lt;/P&gt;&lt;P&gt;    password.encoder.keyfactory.algorithm = null&lt;/P&gt;&lt;P&gt;    password.encoder.old.secret = null&lt;/P&gt;&lt;P&gt;    password.encoder.secret = null&lt;/P&gt;&lt;P&gt;    port = 6667&lt;/P&gt;&lt;P&gt;    principal.builder.class = null&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.entry.expiration.ms = 300000&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.max.size = 1000&lt;/P&gt;&lt;P&gt;    producer.metrics.enable = false&lt;/P&gt;&lt;P&gt;    producer.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    queued.max.request.bytes = -1&lt;/P&gt;&lt;P&gt;    queued.max.requests = 500&lt;/P&gt;&lt;P&gt;    quota.consumer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.producer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.window.num = 11&lt;/P&gt;&lt;P&gt;    quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.backoff.ms = 1000&lt;/P&gt;&lt;P&gt;    replica.fetch.max.bytes = 1048576&lt;/P&gt;&lt;P&gt;    replica.fetch.min.bytes = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.response.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    replica.fetch.wait.max.ms = 500&lt;/P&gt;&lt;P&gt;    replica.high.watermark.checkpoint.interval.ms = 5000&lt;/P&gt;&lt;P&gt;    replica.lag.time.max.ms = 10000&lt;/P&gt;&lt;P&gt;    replica.socket.receive.buffer.bytes = 65536&lt;/P&gt;&lt;P&gt;    replica.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    request.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    reserved.broker.max.id = 1000&lt;/P&gt;&lt;P&gt;    sasl.client.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.enabled.mechanisms = [GSSAPI]&lt;/P&gt;&lt;P&gt;    sasl.jaas.config = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.kinit.cmd = /usr/bin/kinit&lt;/P&gt;&lt;P&gt;    sasl.kerberos.min.time.before.relogin = 60000&lt;/P&gt;&lt;P&gt;    sasl.kerberos.principal.to.local.rules = [DEFAULT]&lt;/P&gt;&lt;P&gt;    sasl.kerberos.service.name = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.buffer.seconds = 300&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.min.period.seconds = 60&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.mechanism.inter.broker.protocol = GSSAPI&lt;/P&gt;&lt;P&gt;    sasl.server.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    security.inter.broker.protocol = PLAINTEXT&lt;/P&gt;&lt;P&gt;    socket.receive.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    socket.request.max.bytes = 104857600&lt;/P&gt;&lt;P&gt;    socket.send.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    ssl.cipher.suites = []&lt;/P&gt;&lt;P&gt;    ssl.client.auth = none&lt;/P&gt;&lt;P&gt;    ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]&lt;/P&gt;&lt;P&gt;    ssl.endpoint.identification.algorithm = https&lt;/P&gt;&lt;P&gt;    ssl.key.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keymanager.algorithm = SunX509&lt;/P&gt;&lt;P&gt;    ssl.keystore.location = &lt;/P&gt;&lt;P&gt;    ssl.keystore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keystore.type = JKS&lt;/P&gt;&lt;P&gt;    ssl.protocol = TLS&lt;/P&gt;&lt;P&gt;    ssl.provider = null&lt;/P&gt;&lt;P&gt;    ssl.secure.random.implementation = null&lt;/P&gt;&lt;P&gt;    ssl.trustmanager.algorithm = PKIX&lt;/P&gt;&lt;P&gt;    ssl.truststore.location = &lt;/P&gt;&lt;P&gt;    ssl.truststore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.truststore.type = JKS&lt;/P&gt;&lt;P&gt;    transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    transaction.max.timeout.ms = 900000&lt;/P&gt;&lt;P&gt;    transaction.remove.expired.transaction.cleanup.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    transaction.state.log.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    transaction.state.log.min.isr = 2&lt;/P&gt;&lt;P&gt;    transaction.state.log.num.partitions = 50&lt;/P&gt;&lt;P&gt;    transaction.state.log.replication.factor = 3&lt;/P&gt;&lt;P&gt;    transaction.state.log.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    transactional.id.expiration.ms = 604800000&lt;/P&gt;&lt;P&gt;    unclean.leader.election.enable = false&lt;/P&gt;&lt;P&gt;    zookeeper.connect = am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181&lt;/P&gt;&lt;P&gt;    zookeeper.connection.timeout.ms = 25000&lt;/P&gt;&lt;P&gt;    zookeeper.max.in.flight.requests = 10&lt;/P&gt;&lt;P&gt;    zookeeper.session.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    zookeeper.set.acl = false&lt;/P&gt;&lt;P&gt;    zookeeper.sync.time.ms = 2000&lt;/P&gt;&lt;P&gt; (kafka.server.KafkaConfig)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,458] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,458] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,459] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,492] ERROR [KafkaServer id=1001] Fatal error during KafkaServer startup. Prepare to shutdown (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;org.apache.kafka.common.KafkaException: Failed to acquire lock on file .lock in /kafka-logs. A Kafka instance in another process or thread is using this directory.&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$$anonfun$lockLogDirs$1.apply(LogManager.scala:240)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$$anonfun$lockLogDirs$1.apply(LogManager.scala:236)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)&lt;/P&gt;&lt;P&gt;    at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$class.flatMap(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.AbstractTraversable.flatMap(Traversable.scala:104)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager.lockLogDirs(LogManager.scala:236)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager.&amp;lt;init&amp;gt;(LogManager.scala:97)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$.apply(LogManager.scala:958)&lt;/P&gt;&lt;P&gt;    at kafka.server.KafkaServer.startup(KafkaServer.scala:237)&lt;/P&gt;&lt;P&gt;    at kafka.server.KafkaServerStartable.startup(KafkaServerStartable.scala:38)&lt;/P&gt;&lt;P&gt;    at kafka.Kafka$.main(Kafka.scala:75)&lt;/P&gt;&lt;P&gt;    at kafka.Kafka.main(Kafka.scala)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,494] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,497] INFO [ZooKeeperClient] Closing. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,501] INFO [ZooKeeperClient] Closed. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,501] INFO [ThrottledChannelReaper-Fetch]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:18,459] INFO [ThrottledChannelReaper-Fetch]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:18,459] INFO [ThrottledChannelReaper-Fetch]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:18,459] INFO [ThrottledChannelReaper-Produce]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:19,459] INFO [ThrottledChannelReaper-Produce]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:19,459] INFO [ThrottledChannelReaper-Produce]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:19,459] INFO [ThrottledChannelReaper-Request]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:19,460] INFO [ThrottledChannelReaper-Request]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:19,460] INFO [ThrottledChannelReaper-Request]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:19,465] INFO [KafkaServer id=1001] shut down completed (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:19,466] ERROR Exiting Kafka. (kafka.server.KafkaServerStartable)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:19,468] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:20:51,712] INFO [GroupMetadataManager brokerId=1001] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:18,284] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:18,803] INFO starting (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:18,804] INFO Connecting to zookeeper on am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181 (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:18,818] INFO [ZooKeeperClient] Initializing a new session to am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:18,838] INFO [ZooKeeperClient] Waiting until connected. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:18,904] INFO [ZooKeeperClient] Connected. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:19,210] INFO Cluster ID = z-4P_uf-RzmpT2QvMnOD2g (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:19,288] INFO KafkaConfig values: &lt;/P&gt;&lt;P&gt;    advertised.host.name = null&lt;/P&gt;&lt;P&gt;    advertised.listeners = null&lt;/P&gt;&lt;P&gt;    advertised.port = null&lt;/P&gt;&lt;P&gt;    alter.config.policy.class.name = null&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    authorizer.class.name = &lt;/P&gt;&lt;P&gt;    auto.create.topics.enable = true&lt;/P&gt;&lt;P&gt;    auto.leader.rebalance.enable = true&lt;/P&gt;&lt;P&gt;    background.threads = 10&lt;/P&gt;&lt;P&gt;    broker.id = -1&lt;/P&gt;&lt;P&gt;    broker.id.generation.enable = true&lt;/P&gt;&lt;P&gt;    broker.rack = null&lt;/P&gt;&lt;P&gt;    client.quota.callback.class = null&lt;/P&gt;&lt;P&gt;    compression.type = producer&lt;/P&gt;&lt;P&gt;    connections.max.idle.ms = 600000&lt;/P&gt;&lt;P&gt;    controlled.shutdown.enable = true&lt;/P&gt;&lt;P&gt;    controlled.shutdown.max.retries = 3&lt;/P&gt;&lt;P&gt;    controlled.shutdown.retry.backoff.ms = 5000&lt;/P&gt;&lt;P&gt;    controller.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    create.topic.policy.class.name = null&lt;/P&gt;&lt;P&gt;    default.replication.factor = 1&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.check.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.time.ms = 86400000&lt;/P&gt;&lt;P&gt;    delegation.token.master.key = null&lt;/P&gt;&lt;P&gt;    delegation.token.max.lifetime.ms = 604800000&lt;/P&gt;&lt;P&gt;    delete.records.purgatory.purge.interval.requests = 1&lt;/P&gt;&lt;P&gt;    delete.topic.enable = true&lt;/P&gt;&lt;P&gt;    fetch.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    group.initial.rebalance.delay.ms = 3000&lt;/P&gt;&lt;P&gt;    group.max.session.timeout.ms = 300000&lt;/P&gt;&lt;P&gt;    group.min.session.timeout.ms = 6000&lt;/P&gt;&lt;P&gt;    host.name = &lt;/P&gt;&lt;P&gt;    inter.broker.listener.name = null&lt;/P&gt;&lt;P&gt;    inter.broker.protocol.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    leader.imbalance.check.interval.seconds = 300&lt;/P&gt;&lt;P&gt;    leader.imbalance.per.broker.percentage = 10&lt;/P&gt;&lt;P&gt;    listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL&lt;/P&gt;&lt;P&gt;    listeners = PLAINTEXT://am-bigdata-01.am.local:6667&lt;/P&gt;&lt;P&gt;    log.cleaner.backoff.ms = 15000&lt;/P&gt;&lt;P&gt;    log.cleaner.dedupe.buffer.size = 134217728&lt;/P&gt;&lt;P&gt;    log.cleaner.delete.retention.ms = 86400000&lt;/P&gt;&lt;P&gt;    log.cleaner.enable = true&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.load.factor = 0.9&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.size = 524288&lt;/P&gt;&lt;P&gt;    log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308&lt;/P&gt;&lt;P&gt;    log.cleaner.min.cleanable.ratio = 0.5&lt;/P&gt;&lt;P&gt;    log.cleaner.min.compaction.lag.ms = 0&lt;/P&gt;&lt;P&gt;    log.cleaner.threads = 1&lt;/P&gt;&lt;P&gt;    log.cleanup.policy = [delete]&lt;/P&gt;&lt;P&gt;    log.dir = /tmp/kafka-logs&lt;/P&gt;&lt;P&gt;    log.dirs = /kafka-logs&lt;/P&gt;&lt;P&gt;    log.flush.interval.messages = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.interval.ms = null&lt;/P&gt;&lt;P&gt;    log.flush.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.flush.scheduler.interval.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.start.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.index.interval.bytes = 4096&lt;/P&gt;&lt;P&gt;    log.index.size.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    log.message.downconversion.enable = true&lt;/P&gt;&lt;P&gt;    log.message.format.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    log.message.timestamp.difference.max.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.message.timestamp.type = CreateTime&lt;/P&gt;&lt;P&gt;    log.preallocate = false&lt;/P&gt;&lt;P&gt;    log.retention.bytes = -1&lt;/P&gt;&lt;P&gt;    log.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    log.retention.hours = 168&lt;/P&gt;&lt;P&gt;    log.retention.minutes = null&lt;/P&gt;&lt;P&gt;    log.retention.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.hours = 168&lt;/P&gt;&lt;P&gt;    log.roll.jitter.hours = 0&lt;/P&gt;&lt;P&gt;    log.roll.jitter.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.ms = null&lt;/P&gt;&lt;P&gt;    log.segment.bytes = 1073741824&lt;/P&gt;&lt;P&gt;    log.segment.delete.delay.ms = 60000&lt;/P&gt;&lt;P&gt;    max.connections.per.ip = 2147483647&lt;/P&gt;&lt;P&gt;    max.connections.per.ip.overrides = &lt;/P&gt;&lt;P&gt;    max.incremental.fetch.session.cache.slots = 1000&lt;/P&gt;&lt;P&gt;    message.max.bytes = 1000000&lt;/P&gt;&lt;P&gt;    metric.reporters = []&lt;/P&gt;&lt;P&gt;    metrics.num.samples = 2&lt;/P&gt;&lt;P&gt;    metrics.recording.level = INFO&lt;/P&gt;&lt;P&gt;    metrics.sample.window.ms = 30000&lt;/P&gt;&lt;P&gt;    min.insync.replicas = 1&lt;/P&gt;&lt;P&gt;    num.io.threads = 8&lt;/P&gt;&lt;P&gt;    num.network.threads = 3&lt;/P&gt;&lt;P&gt;    num.partitions = 1&lt;/P&gt;&lt;P&gt;    num.recovery.threads.per.data.dir = 1&lt;/P&gt;&lt;P&gt;    num.replica.alter.log.dirs.threads = null&lt;/P&gt;&lt;P&gt;    num.replica.fetchers = 1&lt;/P&gt;&lt;P&gt;    offset.metadata.max.bytes = 4096&lt;/P&gt;&lt;P&gt;    offsets.commit.required.acks = -1&lt;/P&gt;&lt;P&gt;    offsets.commit.timeout.ms = 5000&lt;/P&gt;&lt;P&gt;    offsets.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    offsets.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    offsets.retention.minutes = 86400000&lt;/P&gt;&lt;P&gt;    offsets.topic.compression.codec = 0&lt;/P&gt;&lt;P&gt;    offsets.topic.num.partitions = 50&lt;/P&gt;&lt;P&gt;    offsets.topic.replication.factor = 3&lt;/P&gt;&lt;P&gt;    offsets.topic.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding&lt;/P&gt;&lt;P&gt;    password.encoder.iterations = 4096&lt;/P&gt;&lt;P&gt;    password.encoder.key.length = 128&lt;/P&gt;&lt;P&gt;    password.encoder.keyfactory.algorithm = null&lt;/P&gt;&lt;P&gt;    password.encoder.old.secret = null&lt;/P&gt;&lt;P&gt;    password.encoder.secret = null&lt;/P&gt;&lt;P&gt;    port = 6667&lt;/P&gt;&lt;P&gt;    principal.builder.class = null&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.entry.expiration.ms = 300000&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.max.size = 1000&lt;/P&gt;&lt;P&gt;    producer.metrics.enable = false&lt;/P&gt;&lt;P&gt;    producer.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    queued.max.request.bytes = -1&lt;/P&gt;&lt;P&gt;    queued.max.requests = 500&lt;/P&gt;&lt;P&gt;    quota.consumer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.producer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.window.num = 11&lt;/P&gt;&lt;P&gt;    quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.backoff.ms = 1000&lt;/P&gt;&lt;P&gt;    replica.fetch.max.bytes = 1048576&lt;/P&gt;&lt;P&gt;    replica.fetch.min.bytes = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.response.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    replica.fetch.wait.max.ms = 500&lt;/P&gt;&lt;P&gt;    replica.high.watermark.checkpoint.interval.ms = 5000&lt;/P&gt;&lt;P&gt;    replica.lag.time.max.ms = 10000&lt;/P&gt;&lt;P&gt;    replica.socket.receive.buffer.bytes = 65536&lt;/P&gt;&lt;P&gt;    replica.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    request.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    reserved.broker.max.id = 1000&lt;/P&gt;&lt;P&gt;    sasl.client.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.enabled.mechanisms = [GSSAPI]&lt;/P&gt;&lt;P&gt;    sasl.jaas.config = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.kinit.cmd = /usr/bin/kinit&lt;/P&gt;&lt;P&gt;    sasl.kerberos.min.time.before.relogin = 60000&lt;/P&gt;&lt;P&gt;    sasl.kerberos.principal.to.local.rules = [DEFAULT]&lt;/P&gt;&lt;P&gt;    sasl.kerberos.service.name = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.buffer.seconds = 300&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.min.period.seconds = 60&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.mechanism.inter.broker.protocol = GSSAPI&lt;/P&gt;&lt;P&gt;    sasl.server.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    security.inter.broker.protocol = PLAINTEXT&lt;/P&gt;&lt;P&gt;    socket.receive.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    socket.request.max.bytes = 104857600&lt;/P&gt;&lt;P&gt;    socket.send.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    ssl.cipher.suites = []&lt;/P&gt;&lt;P&gt;    ssl.client.auth = none&lt;/P&gt;&lt;P&gt;    ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]&lt;/P&gt;&lt;P&gt;    ssl.endpoint.identification.algorithm = https&lt;/P&gt;&lt;P&gt;    ssl.key.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keymanager.algorithm = SunX509&lt;/P&gt;&lt;P&gt;    ssl.keystore.location = &lt;/P&gt;&lt;P&gt;    ssl.keystore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keystore.type = JKS&lt;/P&gt;&lt;P&gt;    ssl.protocol = TLS&lt;/P&gt;&lt;P&gt;    ssl.provider = null&lt;/P&gt;&lt;P&gt;    ssl.secure.random.implementation = null&lt;/P&gt;&lt;P&gt;    ssl.trustmanager.algorithm = PKIX&lt;/P&gt;&lt;P&gt;    ssl.truststore.location = &lt;/P&gt;&lt;P&gt;    ssl.truststore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.truststore.type = JKS&lt;/P&gt;&lt;P&gt;    transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    transaction.max.timeout.ms = 900000&lt;/P&gt;&lt;P&gt;    transaction.remove.expired.transaction.cleanup.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    transaction.state.log.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    transaction.state.log.min.isr = 2&lt;/P&gt;&lt;P&gt;    transaction.state.log.num.partitions = 50&lt;/P&gt;&lt;P&gt;    transaction.state.log.replication.factor = 3&lt;/P&gt;&lt;P&gt;    transaction.state.log.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    transactional.id.expiration.ms = 604800000&lt;/P&gt;&lt;P&gt;    unclean.leader.election.enable = false&lt;/P&gt;&lt;P&gt;    zookeeper.connect = am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181&lt;/P&gt;&lt;P&gt;    zookeeper.connection.timeout.ms = 25000&lt;/P&gt;&lt;P&gt;    zookeeper.max.in.flight.requests = 10&lt;/P&gt;&lt;P&gt;    zookeeper.session.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    zookeeper.set.acl = false&lt;/P&gt;&lt;P&gt;    zookeeper.sync.time.ms = 2000&lt;/P&gt;&lt;P&gt; (kafka.server.KafkaConfig)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:19,296] INFO KafkaConfig values: &lt;/P&gt;&lt;P&gt;    advertised.host.name = null&lt;/P&gt;&lt;P&gt;    advertised.listeners = null&lt;/P&gt;&lt;P&gt;    advertised.port = null&lt;/P&gt;&lt;P&gt;    alter.config.policy.class.name = null&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    authorizer.class.name = &lt;/P&gt;&lt;P&gt;    auto.create.topics.enable = true&lt;/P&gt;&lt;P&gt;    auto.leader.rebalance.enable = true&lt;/P&gt;&lt;P&gt;    background.threads = 10&lt;/P&gt;&lt;P&gt;    broker.id = -1&lt;/P&gt;&lt;P&gt;    broker.id.generation.enable = true&lt;/P&gt;&lt;P&gt;    broker.rack = null&lt;/P&gt;&lt;P&gt;    client.quota.callback.class = null&lt;/P&gt;&lt;P&gt;    compression.type = producer&lt;/P&gt;&lt;P&gt;    connections.max.idle.ms = 600000&lt;/P&gt;&lt;P&gt;    controlled.shutdown.enable = true&lt;/P&gt;&lt;P&gt;    controlled.shutdown.max.retries = 3&lt;/P&gt;&lt;P&gt;    controlled.shutdown.retry.backoff.ms = 5000&lt;/P&gt;&lt;P&gt;    controller.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    create.topic.policy.class.name = null&lt;/P&gt;&lt;P&gt;    default.replication.factor = 1&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.check.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.time.ms = 86400000&lt;/P&gt;&lt;P&gt;    delegation.token.master.key = null&lt;/P&gt;&lt;P&gt;    delegation.token.max.lifetime.ms = 604800000&lt;/P&gt;&lt;P&gt;    delete.records.purgatory.purge.interval.requests = 1&lt;/P&gt;&lt;P&gt;    delete.topic.enable = true&lt;/P&gt;&lt;P&gt;    fetch.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    group.initial.rebalance.delay.ms = 3000&lt;/P&gt;&lt;P&gt;    group.max.session.timeout.ms = 300000&lt;/P&gt;&lt;P&gt;    group.min.session.timeout.ms = 6000&lt;/P&gt;&lt;P&gt;    host.name = &lt;/P&gt;&lt;P&gt;    inter.broker.listener.name = null&lt;/P&gt;&lt;P&gt;    inter.broker.protocol.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    leader.imbalance.check.interval.seconds = 300&lt;/P&gt;&lt;P&gt;    leader.imbalance.per.broker.percentage = 10&lt;/P&gt;&lt;P&gt;    listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL&lt;/P&gt;&lt;P&gt;    listeners = PLAINTEXT://am-bigdata-01.am.local:6667&lt;/P&gt;&lt;P&gt;    log.cleaner.backoff.ms = 15000&lt;/P&gt;&lt;P&gt;    log.cleaner.dedupe.buffer.size = 134217728&lt;/P&gt;&lt;P&gt;    log.cleaner.delete.retention.ms = 86400000&lt;/P&gt;&lt;P&gt;    log.cleaner.enable = true&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.load.factor = 0.9&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.size = 524288&lt;/P&gt;&lt;P&gt;    log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308&lt;/P&gt;&lt;P&gt;    log.cleaner.min.cleanable.ratio = 0.5&lt;/P&gt;&lt;P&gt;    log.cleaner.min.compaction.lag.ms = 0&lt;/P&gt;&lt;P&gt;    log.cleaner.threads = 1&lt;/P&gt;&lt;P&gt;    log.cleanup.policy = [delete]&lt;/P&gt;&lt;P&gt;    log.dir = /tmp/kafka-logs&lt;/P&gt;&lt;P&gt;    log.dirs = /kafka-logs&lt;/P&gt;&lt;P&gt;    log.flush.interval.messages = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.interval.ms = null&lt;/P&gt;&lt;P&gt;    log.flush.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.flush.scheduler.interval.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.start.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.index.interval.bytes = 4096&lt;/P&gt;&lt;P&gt;    log.index.size.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    log.message.downconversion.enable = true&lt;/P&gt;&lt;P&gt;    log.message.format.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    log.message.timestamp.difference.max.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.message.timestamp.type = CreateTime&lt;/P&gt;&lt;P&gt;    log.preallocate = false&lt;/P&gt;&lt;P&gt;    log.retention.bytes = -1&lt;/P&gt;&lt;P&gt;    log.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    log.retention.hours = 168&lt;/P&gt;&lt;P&gt;    log.retention.minutes = null&lt;/P&gt;&lt;P&gt;    log.retention.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.hours = 168&lt;/P&gt;&lt;P&gt;    log.roll.jitter.hours = 0&lt;/P&gt;&lt;P&gt;    log.roll.jitter.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.ms = null&lt;/P&gt;&lt;P&gt;    log.segment.bytes = 1073741824&lt;/P&gt;&lt;P&gt;    log.segment.delete.delay.ms = 60000&lt;/P&gt;&lt;P&gt;    max.connections.per.ip = 2147483647&lt;/P&gt;&lt;P&gt;    max.connections.per.ip.overrides = &lt;/P&gt;&lt;P&gt;    max.incremental.fetch.session.cache.slots = 1000&lt;/P&gt;&lt;P&gt;    message.max.bytes = 1000000&lt;/P&gt;&lt;P&gt;    metric.reporters = []&lt;/P&gt;&lt;P&gt;    metrics.num.samples = 2&lt;/P&gt;&lt;P&gt;    metrics.recording.level = INFO&lt;/P&gt;&lt;P&gt;    metrics.sample.window.ms = 30000&lt;/P&gt;&lt;P&gt;    min.insync.replicas = 1&lt;/P&gt;&lt;P&gt;    num.io.threads = 8&lt;/P&gt;&lt;P&gt;    num.network.threads = 3&lt;/P&gt;&lt;P&gt;    num.partitions = 1&lt;/P&gt;&lt;P&gt;    num.recovery.threads.per.data.dir = 1&lt;/P&gt;&lt;P&gt;    num.replica.alter.log.dirs.threads = null&lt;/P&gt;&lt;P&gt;    num.replica.fetchers = 1&lt;/P&gt;&lt;P&gt;    offset.metadata.max.bytes = 4096&lt;/P&gt;&lt;P&gt;    offsets.commit.required.acks = -1&lt;/P&gt;&lt;P&gt;    offsets.commit.timeout.ms = 5000&lt;/P&gt;&lt;P&gt;    offsets.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    offsets.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    offsets.retention.minutes = 86400000&lt;/P&gt;&lt;P&gt;    offsets.topic.compression.codec = 0&lt;/P&gt;&lt;P&gt;    offsets.topic.num.partitions = 50&lt;/P&gt;&lt;P&gt;    offsets.topic.replication.factor = 3&lt;/P&gt;&lt;P&gt;    offsets.topic.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding&lt;/P&gt;&lt;P&gt;    password.encoder.iterations = 4096&lt;/P&gt;&lt;P&gt;    password.encoder.key.length = 128&lt;/P&gt;&lt;P&gt;    password.encoder.keyfactory.algorithm = null&lt;/P&gt;&lt;P&gt;    password.encoder.old.secret = null&lt;/P&gt;&lt;P&gt;    password.encoder.secret = null&lt;/P&gt;&lt;P&gt;    port = 6667&lt;/P&gt;&lt;P&gt;    principal.builder.class = null&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.entry.expiration.ms = 300000&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.max.size = 1000&lt;/P&gt;&lt;P&gt;    producer.metrics.enable = false&lt;/P&gt;&lt;P&gt;    producer.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    queued.max.request.bytes = -1&lt;/P&gt;&lt;P&gt;    queued.max.requests = 500&lt;/P&gt;&lt;P&gt;    quota.consumer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.producer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.window.num = 11&lt;/P&gt;&lt;P&gt;    quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.backoff.ms = 1000&lt;/P&gt;&lt;P&gt;    replica.fetch.max.bytes = 1048576&lt;/P&gt;&lt;P&gt;    replica.fetch.min.bytes = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.response.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    replica.fetch.wait.max.ms = 500&lt;/P&gt;&lt;P&gt;    replica.high.watermark.checkpoint.interval.ms = 5000&lt;/P&gt;&lt;P&gt;    replica.lag.time.max.ms = 10000&lt;/P&gt;&lt;P&gt;    replica.socket.receive.buffer.bytes = 65536&lt;/P&gt;&lt;P&gt;    replica.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    request.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    reserved.broker.max.id = 1000&lt;/P&gt;&lt;P&gt;    sasl.client.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.enabled.mechanisms = [GSSAPI]&lt;/P&gt;&lt;P&gt;    sasl.jaas.config = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.kinit.cmd = /usr/bin/kinit&lt;/P&gt;&lt;P&gt;    sasl.kerberos.min.time.before.relogin = 60000&lt;/P&gt;&lt;P&gt;    sasl.kerberos.principal.to.local.rules = [DEFAULT]&lt;/P&gt;&lt;P&gt;    sasl.kerberos.service.name = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.buffer.seconds = 300&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.min.period.seconds = 60&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.mechanism.inter.broker.protocol = GSSAPI&lt;/P&gt;&lt;P&gt;    sasl.server.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    security.inter.broker.protocol = PLAINTEXT&lt;/P&gt;&lt;P&gt;    socket.receive.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    socket.request.max.bytes = 104857600&lt;/P&gt;&lt;P&gt;    socket.send.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    ssl.cipher.suites = []&lt;/P&gt;&lt;P&gt;    ssl.client.auth = none&lt;/P&gt;&lt;P&gt;    ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]&lt;/P&gt;&lt;P&gt;    ssl.endpoint.identification.algorithm = https&lt;/P&gt;&lt;P&gt;    ssl.key.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keymanager.algorithm = SunX509&lt;/P&gt;&lt;P&gt;    ssl.keystore.location = &lt;/P&gt;&lt;P&gt;    ssl.keystore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keystore.type = JKS&lt;/P&gt;&lt;P&gt;    ssl.protocol = TLS&lt;/P&gt;&lt;P&gt;    ssl.provider = null&lt;/P&gt;&lt;P&gt;    ssl.secure.random.implementation = null&lt;/P&gt;&lt;P&gt;    ssl.trustmanager.algorithm = PKIX&lt;/P&gt;&lt;P&gt;    ssl.truststore.location = &lt;/P&gt;&lt;P&gt;    ssl.truststore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.truststore.type = JKS&lt;/P&gt;&lt;P&gt;    transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    transaction.max.timeout.ms = 900000&lt;/P&gt;&lt;P&gt;    transaction.remove.expired.transaction.cleanup.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    transaction.state.log.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    transaction.state.log.min.isr = 2&lt;/P&gt;&lt;P&gt;    transaction.state.log.num.partitions = 50&lt;/P&gt;&lt;P&gt;    transaction.state.log.replication.factor = 3&lt;/P&gt;&lt;P&gt;    transaction.state.log.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    transactional.id.expiration.ms = 604800000&lt;/P&gt;&lt;P&gt;    unclean.leader.election.enable = false&lt;/P&gt;&lt;P&gt;    zookeeper.connect = am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181&lt;/P&gt;&lt;P&gt;    zookeeper.connection.timeout.ms = 25000&lt;/P&gt;&lt;P&gt;    zookeeper.max.in.flight.requests = 10&lt;/P&gt;&lt;P&gt;    zookeeper.session.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    zookeeper.set.acl = false&lt;/P&gt;&lt;P&gt;    zookeeper.sync.time.ms = 2000&lt;/P&gt;&lt;P&gt; (kafka.server.KafkaConfig)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:19,325] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:19,325] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:19,326] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:19,358] ERROR [KafkaServer id=1001] Fatal error during KafkaServer startup. Prepare to shutdown (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;org.apache.kafka.common.KafkaException: Failed to acquire lock on file .lock in /kafka-logs. A Kafka instance in another process or thread is using this directory.&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$$anonfun$lockLogDirs$1.apply(LogManager.scala:240)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$$anonfun$lockLogDirs$1.apply(LogManager.scala:236)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)&lt;/P&gt;&lt;P&gt;    at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$class.flatMap(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.AbstractTraversable.flatMap(Traversable.scala:104)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager.lockLogDirs(LogManager.scala:236)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager.&amp;lt;init&amp;gt;(LogManager.scala:97)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$.apply(LogManager.scala:958)&lt;/P&gt;&lt;P&gt;    at kafka.server.KafkaServer.startup(KafkaServer.scala:237)&lt;/P&gt;&lt;P&gt;    at kafka.server.KafkaServerStartable.startup(KafkaServerStartable.scala:38)&lt;/P&gt;&lt;P&gt;    at kafka.Kafka$.main(Kafka.scala:75)&lt;/P&gt;&lt;P&gt;    at kafka.Kafka.main(Kafka.scala)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:19,361] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:19,363] INFO [ZooKeeperClient] Closing. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:19,367] INFO [ZooKeeperClient] Closed. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:19,368] INFO [ThrottledChannelReaper-Fetch]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:20,325] INFO [ThrottledChannelReaper-Fetch]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:20,325] INFO [ThrottledChannelReaper-Fetch]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:20,326] INFO [ThrottledChannelReaper-Produce]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:21,325] INFO [ThrottledChannelReaper-Produce]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:21,325] INFO [ThrottledChannelReaper-Produce]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:21,325] INFO [ThrottledChannelReaper-Request]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:21,326] INFO [ThrottledChannelReaper-Request]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:21,326] INFO [ThrottledChannelReaper-Request]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:21,331] INFO [KafkaServer id=1001] shut down completed (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:21,331] ERROR Exiting Kafka. (kafka.server.KafkaServerStartable)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:21,333] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:30:51,711] INFO [GroupMetadataManager brokerId=1001] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:58,207] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:58,757] INFO starting (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:58,758] INFO Connecting to zookeeper on am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181 (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:58,777] INFO [ZooKeeperClient] Initializing a new session to am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:58,804] INFO [ZooKeeperClient] Waiting until connected. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:58,863] INFO [ZooKeeperClient] Connected. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:59,129] INFO Cluster ID = z-4P_uf-RzmpT2QvMnOD2g (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:59,204] INFO KafkaConfig values: &lt;/P&gt;&lt;P&gt;    advertised.host.name = null&lt;/P&gt;&lt;P&gt;    advertised.listeners = null&lt;/P&gt;&lt;P&gt;    advertised.port = null&lt;/P&gt;&lt;P&gt;    alter.config.policy.class.name = null&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    authorizer.class.name = &lt;/P&gt;&lt;P&gt;    auto.create.topics.enable = true&lt;/P&gt;&lt;P&gt;    auto.leader.rebalance.enable = true&lt;/P&gt;&lt;P&gt;    background.threads = 10&lt;/P&gt;&lt;P&gt;    broker.id = -1&lt;/P&gt;&lt;P&gt;    broker.id.generation.enable = true&lt;/P&gt;&lt;P&gt;    broker.rack = null&lt;/P&gt;&lt;P&gt;    client.quota.callback.class = null&lt;/P&gt;&lt;P&gt;    compression.type = producer&lt;/P&gt;&lt;P&gt;    connections.max.idle.ms = 600000&lt;/P&gt;&lt;P&gt;    controlled.shutdown.enable = true&lt;/P&gt;&lt;P&gt;    controlled.shutdown.max.retries = 3&lt;/P&gt;&lt;P&gt;    controlled.shutdown.retry.backoff.ms = 5000&lt;/P&gt;&lt;P&gt;    controller.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    create.topic.policy.class.name = null&lt;/P&gt;&lt;P&gt;    default.replication.factor = 1&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.check.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.time.ms = 86400000&lt;/P&gt;&lt;P&gt;    delegation.token.master.key = null&lt;/P&gt;&lt;P&gt;    delegation.token.max.lifetime.ms = 604800000&lt;/P&gt;&lt;P&gt;    delete.records.purgatory.purge.interval.requests = 1&lt;/P&gt;&lt;P&gt;    delete.topic.enable = true&lt;/P&gt;&lt;P&gt;    fetch.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    group.initial.rebalance.delay.ms = 3000&lt;/P&gt;&lt;P&gt;    group.max.session.timeout.ms = 300000&lt;/P&gt;&lt;P&gt;    group.min.session.timeout.ms = 6000&lt;/P&gt;&lt;P&gt;    host.name = &lt;/P&gt;&lt;P&gt;    inter.broker.listener.name = null&lt;/P&gt;&lt;P&gt;    inter.broker.protocol.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    leader.imbalance.check.interval.seconds = 300&lt;/P&gt;&lt;P&gt;    leader.imbalance.per.broker.percentage = 10&lt;/P&gt;&lt;P&gt;    listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL&lt;/P&gt;&lt;P&gt;    listeners = PLAINTEXT://am-bigdata-01.am.local:6667&lt;/P&gt;&lt;P&gt;    log.cleaner.backoff.ms = 15000&lt;/P&gt;&lt;P&gt;    log.cleaner.dedupe.buffer.size = 134217728&lt;/P&gt;&lt;P&gt;    log.cleaner.delete.retention.ms = 86400000&lt;/P&gt;&lt;P&gt;    log.cleaner.enable = true&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.load.factor = 0.9&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.size = 524288&lt;/P&gt;&lt;P&gt;    log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308&lt;/P&gt;&lt;P&gt;    log.cleaner.min.cleanable.ratio = 0.5&lt;/P&gt;&lt;P&gt;    log.cleaner.min.compaction.lag.ms = 0&lt;/P&gt;&lt;P&gt;    log.cleaner.threads = 1&lt;/P&gt;&lt;P&gt;    log.cleanup.policy = [delete]&lt;/P&gt;&lt;P&gt;    log.dir = /tmp/kafka-logs&lt;/P&gt;&lt;P&gt;    log.dirs = /kafka-logs&lt;/P&gt;&lt;P&gt;    log.flush.interval.messages = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.interval.ms = null&lt;/P&gt;&lt;P&gt;    log.flush.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.flush.scheduler.interval.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.start.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.index.interval.bytes = 4096&lt;/P&gt;&lt;P&gt;    log.index.size.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    log.message.downconversion.enable = true&lt;/P&gt;&lt;P&gt;    log.message.format.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    log.message.timestamp.difference.max.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.message.timestamp.type = CreateTime&lt;/P&gt;&lt;P&gt;    log.preallocate = false&lt;/P&gt;&lt;P&gt;    log.retention.bytes = -1&lt;/P&gt;&lt;P&gt;    log.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    log.retention.hours = 168&lt;/P&gt;&lt;P&gt;    log.retention.minutes = null&lt;/P&gt;&lt;P&gt;    log.retention.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.hours = 168&lt;/P&gt;&lt;P&gt;    log.roll.jitter.hours = 0&lt;/P&gt;&lt;P&gt;    log.roll.jitter.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.ms = null&lt;/P&gt;&lt;P&gt;    log.segment.bytes = 1073741824&lt;/P&gt;&lt;P&gt;    log.segment.delete.delay.ms = 60000&lt;/P&gt;&lt;P&gt;    max.connections.per.ip = 2147483647&lt;/P&gt;&lt;P&gt;    max.connections.per.ip.overrides = &lt;/P&gt;&lt;P&gt;    max.incremental.fetch.session.cache.slots = 1000&lt;/P&gt;&lt;P&gt;    message.max.bytes = 1000000&lt;/P&gt;&lt;P&gt;    metric.reporters = []&lt;/P&gt;&lt;P&gt;    metrics.num.samples = 2&lt;/P&gt;&lt;P&gt;    metrics.recording.level = INFO&lt;/P&gt;&lt;P&gt;    metrics.sample.window.ms = 30000&lt;/P&gt;&lt;P&gt;    min.insync.replicas = 1&lt;/P&gt;&lt;P&gt;    num.io.threads = 8&lt;/P&gt;&lt;P&gt;    num.network.threads = 3&lt;/P&gt;&lt;P&gt;    num.partitions = 1&lt;/P&gt;&lt;P&gt;    num.recovery.threads.per.data.dir = 1&lt;/P&gt;&lt;P&gt;    num.replica.alter.log.dirs.threads = null&lt;/P&gt;&lt;P&gt;    num.replica.fetchers = 1&lt;/P&gt;&lt;P&gt;    offset.metadata.max.bytes = 4096&lt;/P&gt;&lt;P&gt;    offsets.commit.required.acks = -1&lt;/P&gt;&lt;P&gt;    offsets.commit.timeout.ms = 5000&lt;/P&gt;&lt;P&gt;    offsets.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    offsets.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    offsets.retention.minutes = 86400000&lt;/P&gt;&lt;P&gt;    offsets.topic.compression.codec = 0&lt;/P&gt;&lt;P&gt;    offsets.topic.num.partitions = 50&lt;/P&gt;&lt;P&gt;    offsets.topic.replication.factor = 3&lt;/P&gt;&lt;P&gt;    offsets.topic.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding&lt;/P&gt;&lt;P&gt;    password.encoder.iterations = 4096&lt;/P&gt;&lt;P&gt;    password.encoder.key.length = 128&lt;/P&gt;&lt;P&gt;    password.encoder.keyfactory.algorithm = null&lt;/P&gt;&lt;P&gt;    password.encoder.old.secret = null&lt;/P&gt;&lt;P&gt;    password.encoder.secret = null&lt;/P&gt;&lt;P&gt;    port = 6667&lt;/P&gt;&lt;P&gt;    principal.builder.class = null&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.entry.expiration.ms = 300000&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.max.size = 1000&lt;/P&gt;&lt;P&gt;    producer.metrics.enable = false&lt;/P&gt;&lt;P&gt;    producer.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    queued.max.request.bytes = -1&lt;/P&gt;&lt;P&gt;    queued.max.requests = 500&lt;/P&gt;&lt;P&gt;    quota.consumer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.producer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.window.num = 11&lt;/P&gt;&lt;P&gt;    quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.backoff.ms = 1000&lt;/P&gt;&lt;P&gt;    replica.fetch.max.bytes = 1048576&lt;/P&gt;&lt;P&gt;    replica.fetch.min.bytes = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.response.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    replica.fetch.wait.max.ms = 500&lt;/P&gt;&lt;P&gt;    replica.high.watermark.checkpoint.interval.ms = 5000&lt;/P&gt;&lt;P&gt;    replica.lag.time.max.ms = 10000&lt;/P&gt;&lt;P&gt;    replica.socket.receive.buffer.bytes = 65536&lt;/P&gt;&lt;P&gt;    replica.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    request.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    reserved.broker.max.id = 1000&lt;/P&gt;&lt;P&gt;    sasl.client.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.enabled.mechanisms = [GSSAPI]&lt;/P&gt;&lt;P&gt;    sasl.jaas.config = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.kinit.cmd = /usr/bin/kinit&lt;/P&gt;&lt;P&gt;    sasl.kerberos.min.time.before.relogin = 60000&lt;/P&gt;&lt;P&gt;    sasl.kerberos.principal.to.local.rules = [DEFAULT]&lt;/P&gt;&lt;P&gt;    sasl.kerberos.service.name = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.buffer.seconds = 300&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.min.period.seconds = 60&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.mechanism.inter.broker.protocol = GSSAPI&lt;/P&gt;&lt;P&gt;    sasl.server.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    security.inter.broker.protocol = PLAINTEXT&lt;/P&gt;&lt;P&gt;    socket.receive.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    socket.request.max.bytes = 104857600&lt;/P&gt;&lt;P&gt;    socket.send.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    ssl.cipher.suites = []&lt;/P&gt;&lt;P&gt;    ssl.client.auth = none&lt;/P&gt;&lt;P&gt;    ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]&lt;/P&gt;&lt;P&gt;    ssl.endpoint.identification.algorithm = https&lt;/P&gt;&lt;P&gt;    ssl.key.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keymanager.algorithm = SunX509&lt;/P&gt;&lt;P&gt;    ssl.keystore.location = &lt;/P&gt;&lt;P&gt;    ssl.keystore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keystore.type = JKS&lt;/P&gt;&lt;P&gt;    ssl.protocol = TLS&lt;/P&gt;&lt;P&gt;    ssl.provider = null&lt;/P&gt;&lt;P&gt;    ssl.secure.random.implementation = null&lt;/P&gt;&lt;P&gt;    ssl.trustmanager.algorithm = PKIX&lt;/P&gt;&lt;P&gt;    ssl.truststore.location = &lt;/P&gt;&lt;P&gt;    ssl.truststore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.truststore.type = JKS&lt;/P&gt;&lt;P&gt;    transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    transaction.max.timeout.ms = 900000&lt;/P&gt;&lt;P&gt;    transaction.remove.expired.transaction.cleanup.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    transaction.state.log.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    transaction.state.log.min.isr = 2&lt;/P&gt;&lt;P&gt;    transaction.state.log.num.partitions = 50&lt;/P&gt;&lt;P&gt;    transaction.state.log.replication.factor = 3&lt;/P&gt;&lt;P&gt;    transaction.state.log.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    transactional.id.expiration.ms = 604800000&lt;/P&gt;&lt;P&gt;    unclean.leader.election.enable = false&lt;/P&gt;&lt;P&gt;    zookeeper.connect = am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181&lt;/P&gt;&lt;P&gt;    zookeeper.connection.timeout.ms = 25000&lt;/P&gt;&lt;P&gt;    zookeeper.max.in.flight.requests = 10&lt;/P&gt;&lt;P&gt;    zookeeper.session.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    zookeeper.set.acl = false&lt;/P&gt;&lt;P&gt;    zookeeper.sync.time.ms = 2000&lt;/P&gt;&lt;P&gt; (kafka.server.KafkaConfig)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:59,212] INFO KafkaConfig values: &lt;/P&gt;&lt;P&gt;    advertised.host.name = null&lt;/P&gt;&lt;P&gt;    advertised.listeners = null&lt;/P&gt;&lt;P&gt;    advertised.port = null&lt;/P&gt;&lt;P&gt;    alter.config.policy.class.name = null&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    authorizer.class.name = &lt;/P&gt;&lt;P&gt;    auto.create.topics.enable = true&lt;/P&gt;&lt;P&gt;    auto.leader.rebalance.enable = true&lt;/P&gt;&lt;P&gt;    background.threads = 10&lt;/P&gt;&lt;P&gt;    broker.id = -1&lt;/P&gt;&lt;P&gt;    broker.id.generation.enable = true&lt;/P&gt;&lt;P&gt;    broker.rack = null&lt;/P&gt;&lt;P&gt;    client.quota.callback.class = null&lt;/P&gt;&lt;P&gt;    compression.type = producer&lt;/P&gt;&lt;P&gt;    connections.max.idle.ms = 600000&lt;/P&gt;&lt;P&gt;    controlled.shutdown.enable = true&lt;/P&gt;&lt;P&gt;    controlled.shutdown.max.retries = 3&lt;/P&gt;&lt;P&gt;    controlled.shutdown.retry.backoff.ms = 5000&lt;/P&gt;&lt;P&gt;    controller.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    create.topic.policy.class.name = null&lt;/P&gt;&lt;P&gt;    default.replication.factor = 1&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.check.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.time.ms = 86400000&lt;/P&gt;&lt;P&gt;    delegation.token.master.key = null&lt;/P&gt;&lt;P&gt;    delegation.token.max.lifetime.ms = 604800000&lt;/P&gt;&lt;P&gt;    delete.records.purgatory.purge.interval.requests = 1&lt;/P&gt;&lt;P&gt;    delete.topic.enable = true&lt;/P&gt;&lt;P&gt;    fetch.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    group.initial.rebalance.delay.ms = 3000&lt;/P&gt;&lt;P&gt;    group.max.session.timeout.ms = 300000&lt;/P&gt;&lt;P&gt;    group.min.session.timeout.ms = 6000&lt;/P&gt;&lt;P&gt;    host.name = &lt;/P&gt;&lt;P&gt;    inter.broker.listener.name = null&lt;/P&gt;&lt;P&gt;    inter.broker.protocol.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    leader.imbalance.check.interval.seconds = 300&lt;/P&gt;&lt;P&gt;    leader.imbalance.per.broker.percentage = 10&lt;/P&gt;&lt;P&gt;    listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL&lt;/P&gt;&lt;P&gt;    listeners = PLAINTEXT://am-bigdata-01.am.local:6667&lt;/P&gt;&lt;P&gt;    log.cleaner.backoff.ms = 15000&lt;/P&gt;&lt;P&gt;    log.cleaner.dedupe.buffer.size = 134217728&lt;/P&gt;&lt;P&gt;    log.cleaner.delete.retention.ms = 86400000&lt;/P&gt;&lt;P&gt;    log.cleaner.enable = true&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.load.factor = 0.9&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.size = 524288&lt;/P&gt;&lt;P&gt;    log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308&lt;/P&gt;&lt;P&gt;    log.cleaner.min.cleanable.ratio = 0.5&lt;/P&gt;&lt;P&gt;    log.cleaner.min.compaction.lag.ms = 0&lt;/P&gt;&lt;P&gt;    log.cleaner.threads = 1&lt;/P&gt;&lt;P&gt;    log.cleanup.policy = [delete]&lt;/P&gt;&lt;P&gt;    log.dir = /tmp/kafka-logs&lt;/P&gt;&lt;P&gt;    log.dirs = /kafka-logs&lt;/P&gt;&lt;P&gt;    log.flush.interval.messages = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.interval.ms = null&lt;/P&gt;&lt;P&gt;    log.flush.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.flush.scheduler.interval.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.start.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.index.interval.bytes = 4096&lt;/P&gt;&lt;P&gt;    log.index.size.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    log.message.downconversion.enable = true&lt;/P&gt;&lt;P&gt;    log.message.format.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    log.message.timestamp.difference.max.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.message.timestamp.type = CreateTime&lt;/P&gt;&lt;P&gt;    log.preallocate = false&lt;/P&gt;&lt;P&gt;    log.retention.bytes = -1&lt;/P&gt;&lt;P&gt;    log.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    log.retention.hours = 168&lt;/P&gt;&lt;P&gt;    log.retention.minutes = null&lt;/P&gt;&lt;P&gt;    log.retention.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.hours = 168&lt;/P&gt;&lt;P&gt;    log.roll.jitter.hours = 0&lt;/P&gt;&lt;P&gt;    log.roll.jitter.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.ms = null&lt;/P&gt;&lt;P&gt;    log.segment.bytes = 1073741824&lt;/P&gt;&lt;P&gt;    log.segment.delete.delay.ms = 60000&lt;/P&gt;&lt;P&gt;    max.connections.per.ip = 2147483647&lt;/P&gt;&lt;P&gt;    max.connections.per.ip.overrides = &lt;/P&gt;&lt;P&gt;    max.incremental.fetch.session.cache.slots = 1000&lt;/P&gt;&lt;P&gt;    message.max.bytes = 1000000&lt;/P&gt;&lt;P&gt;    metric.reporters = []&lt;/P&gt;&lt;P&gt;    metrics.num.samples = 2&lt;/P&gt;&lt;P&gt;    metrics.recording.level = INFO&lt;/P&gt;&lt;P&gt;    metrics.sample.window.ms = 30000&lt;/P&gt;&lt;P&gt;    min.insync.replicas = 1&lt;/P&gt;&lt;P&gt;    num.io.threads = 8&lt;/P&gt;&lt;P&gt;    num.network.threads = 3&lt;/P&gt;&lt;P&gt;    num.partitions = 1&lt;/P&gt;&lt;P&gt;    num.recovery.threads.per.data.dir = 1&lt;/P&gt;&lt;P&gt;    num.replica.alter.log.dirs.threads = null&lt;/P&gt;&lt;P&gt;    num.replica.fetchers = 1&lt;/P&gt;&lt;P&gt;    offset.metadata.max.bytes = 4096&lt;/P&gt;&lt;P&gt;    offsets.commit.required.acks = -1&lt;/P&gt;&lt;P&gt;    offsets.commit.timeout.ms = 5000&lt;/P&gt;&lt;P&gt;    offsets.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    offsets.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    offsets.retention.minutes = 86400000&lt;/P&gt;&lt;P&gt;    offsets.topic.compression.codec = 0&lt;/P&gt;&lt;P&gt;    offsets.topic.num.partitions = 50&lt;/P&gt;&lt;P&gt;    offsets.topic.replication.factor = 3&lt;/P&gt;&lt;P&gt;    offsets.topic.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding&lt;/P&gt;&lt;P&gt;    password.encoder.iterations = 4096&lt;/P&gt;&lt;P&gt;    password.encoder.key.length = 128&lt;/P&gt;&lt;P&gt;    password.encoder.keyfactory.algorithm = null&lt;/P&gt;&lt;P&gt;    password.encoder.old.secret = null&lt;/P&gt;&lt;P&gt;    password.encoder.secret = null&lt;/P&gt;&lt;P&gt;    port = 6667&lt;/P&gt;&lt;P&gt;    principal.builder.class = null&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.entry.expiration.ms = 300000&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.max.size = 1000&lt;/P&gt;&lt;P&gt;    producer.metrics.enable = false&lt;/P&gt;&lt;P&gt;    producer.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    queued.max.request.bytes = -1&lt;/P&gt;&lt;P&gt;    queued.max.requests = 500&lt;/P&gt;&lt;P&gt;    quota.consumer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.producer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.window.num = 11&lt;/P&gt;&lt;P&gt;    quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.backoff.ms = 1000&lt;/P&gt;&lt;P&gt;    replica.fetch.max.bytes = 1048576&lt;/P&gt;&lt;P&gt;    replica.fetch.min.bytes = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.response.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    replica.fetch.wait.max.ms = 500&lt;/P&gt;&lt;P&gt;    replica.high.watermark.checkpoint.interval.ms = 5000&lt;/P&gt;&lt;P&gt;    replica.lag.time.max.ms = 10000&lt;/P&gt;&lt;P&gt;    replica.socket.receive.buffer.bytes = 65536&lt;/P&gt;&lt;P&gt;    replica.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    request.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    reserved.broker.max.id = 1000&lt;/P&gt;&lt;P&gt;    sasl.client.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.enabled.mechanisms = [GSSAPI]&lt;/P&gt;&lt;P&gt;    sasl.jaas.config = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.kinit.cmd = /usr/bin/kinit&lt;/P&gt;&lt;P&gt;    sasl.kerberos.min.time.before.relogin = 60000&lt;/P&gt;&lt;P&gt;    sasl.kerberos.principal.to.local.rules = [DEFAULT]&lt;/P&gt;&lt;P&gt;    sasl.kerberos.service.name = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.buffer.seconds = 300&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.min.period.seconds = 60&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.mechanism.inter.broker.protocol = GSSAPI&lt;/P&gt;&lt;P&gt;    sasl.server.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    security.inter.broker.protocol = PLAINTEXT&lt;/P&gt;&lt;P&gt;    socket.receive.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    socket.request.max.bytes = 104857600&lt;/P&gt;&lt;P&gt;    socket.send.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    ssl.cipher.suites = []&lt;/P&gt;&lt;P&gt;    ssl.client.auth = none&lt;/P&gt;&lt;P&gt;    ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]&lt;/P&gt;&lt;P&gt;    ssl.endpoint.identification.algorithm = https&lt;/P&gt;&lt;P&gt;    ssl.key.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keymanager.algorithm = SunX509&lt;/P&gt;&lt;P&gt;    ssl.keystore.location = &lt;/P&gt;&lt;P&gt;    ssl.keystore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keystore.type = JKS&lt;/P&gt;&lt;P&gt;    ssl.protocol = TLS&lt;/P&gt;&lt;P&gt;    ssl.provider = null&lt;/P&gt;&lt;P&gt;    ssl.secure.random.implementation = null&lt;/P&gt;&lt;P&gt;    ssl.trustmanager.algorithm = PKIX&lt;/P&gt;&lt;P&gt;    ssl.truststore.location = &lt;/P&gt;&lt;P&gt;    ssl.truststore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.truststore.type = JKS&lt;/P&gt;&lt;P&gt;    transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    transaction.max.timeout.ms = 900000&lt;/P&gt;&lt;P&gt;    transaction.remove.expired.transaction.cleanup.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    transaction.state.log.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    transaction.state.log.min.isr = 2&lt;/P&gt;&lt;P&gt;    transaction.state.log.num.partitions = 50&lt;/P&gt;&lt;P&gt;    transaction.state.log.replication.factor = 3&lt;/P&gt;&lt;P&gt;    transaction.state.log.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    transactional.id.expiration.ms = 604800000&lt;/P&gt;&lt;P&gt;    unclean.leader.election.enable = false&lt;/P&gt;&lt;P&gt;    zookeeper.connect = am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181&lt;/P&gt;&lt;P&gt;    zookeeper.connection.timeout.ms = 25000&lt;/P&gt;&lt;P&gt;    zookeeper.max.in.flight.requests = 10&lt;/P&gt;&lt;P&gt;    zookeeper.session.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    zookeeper.set.acl = false&lt;/P&gt;&lt;P&gt;    zookeeper.sync.time.ms = 2000&lt;/P&gt;&lt;P&gt; (kafka.server.KafkaConfig)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:59,237] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:59,237] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:59,238] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:59,268] ERROR [KafkaServer id=1001] Fatal error during KafkaServer startup. Prepare to shutdown (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;org.apache.kafka.common.KafkaException: Failed to acquire lock on file .lock in /kafka-logs. A Kafka instance in another process or thread is using this directory.&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$$anonfun$lockLogDirs$1.apply(LogManager.scala:240)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$$anonfun$lockLogDirs$1.apply(LogManager.scala:236)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)&lt;/P&gt;&lt;P&gt;    at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$class.flatMap(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.AbstractTraversable.flatMap(Traversable.scala:104)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager.lockLogDirs(LogManager.scala:236)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager.&amp;lt;init&amp;gt;(LogManager.scala:97)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$.apply(LogManager.scala:958)&lt;/P&gt;&lt;P&gt;    at kafka.server.KafkaServer.startup(KafkaServer.scala:237)&lt;/P&gt;&lt;P&gt;    at kafka.server.KafkaServerStartable.startup(KafkaServerStartable.scala:38)&lt;/P&gt;&lt;P&gt;    at kafka.Kafka$.main(Kafka.scala:75)&lt;/P&gt;&lt;P&gt;    at kafka.Kafka.main(Kafka.scala)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:59,271] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:59,273] INFO [ZooKeeperClient] Closing. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:59,276] INFO [ZooKeeperClient] Closed. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:59,277] INFO [ThrottledChannelReaper-Fetch]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:37:00,238] INFO [ThrottledChannelReaper-Fetch]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:37:00,238] INFO [ThrottledChannelReaper-Fetch]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:37:00,239] INFO [ThrottledChannelReaper-Produce]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:37:01,238] INFO [ThrottledChannelReaper-Produce]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:37:01,238] INFO [ThrottledChannelReaper-Produce]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:37:01,238] INFO [ThrottledChannelReaper-Request]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:37:01,239] INFO [ThrottledChannelReaper-Request]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:37:01,239] INFO [ThrottledChannelReaper-Request]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:37:01,245] INFO [KafkaServer id=1001] shut down completed (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:37:01,246] ERROR Exiting Kafka. (kafka.server.KafkaServerStartable)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:37:01,248] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:40:51,711] INFO [GroupMetadataManager brokerId=1001] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:41:59,912] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,411] INFO starting (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,412] INFO Connecting to zookeeper on am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181 (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,425] INFO [ZooKeeperClient] Initializing a new session to am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,444] INFO [ZooKeeperClient] Waiting until connected. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,505] INFO [ZooKeeperClient] Connected. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,779] INFO Cluster ID = z-4P_uf-RzmpT2QvMnOD2g (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,846] INFO KafkaConfig values: &lt;/P&gt;&lt;P&gt;    advertised.host.name = null&lt;/P&gt;&lt;P&gt;    advertised.listeners = null&lt;/P&gt;&lt;P&gt;    advertised.port = null&lt;/P&gt;&lt;P&gt;    alter.config.policy.class.name = null&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    authorizer.class.name = &lt;/P&gt;&lt;P&gt;    auto.create.topics.enable = true&lt;/P&gt;&lt;P&gt;    auto.leader.rebalance.enable = true&lt;/P&gt;&lt;P&gt;    background.threads = 10&lt;/P&gt;&lt;P&gt;    broker.id = -1&lt;/P&gt;&lt;P&gt;    broker.id.generation.enable = true&lt;/P&gt;&lt;P&gt;    broker.rack = null&lt;/P&gt;&lt;P&gt;    client.quota.callback.class = null&lt;/P&gt;&lt;P&gt;    compression.type = producer&lt;/P&gt;&lt;P&gt;    connections.max.idle.ms = 600000&lt;/P&gt;&lt;P&gt;    controlled.shutdown.enable = true&lt;/P&gt;&lt;P&gt;    controlled.shutdown.max.retries = 3&lt;/P&gt;&lt;P&gt;    controlled.shutdown.retry.backoff.ms = 5000&lt;/P&gt;&lt;P&gt;    controller.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    create.topic.policy.class.name = null&lt;/P&gt;&lt;P&gt;    default.replication.factor = 1&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.check.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.time.ms = 86400000&lt;/P&gt;&lt;P&gt;    delegation.token.master.key = null&lt;/P&gt;&lt;P&gt;    delegation.token.max.lifetime.ms = 604800000&lt;/P&gt;&lt;P&gt;    delete.records.purgatory.purge.interval.requests = 1&lt;/P&gt;&lt;P&gt;    delete.topic.enable = true&lt;/P&gt;&lt;P&gt;    fetch.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    group.initial.rebalance.delay.ms = 3000&lt;/P&gt;&lt;P&gt;    group.max.session.timeout.ms = 300000&lt;/P&gt;&lt;P&gt;    group.min.session.timeout.ms = 6000&lt;/P&gt;&lt;P&gt;    host.name = &lt;/P&gt;&lt;P&gt;    inter.broker.listener.name = null&lt;/P&gt;&lt;P&gt;    inter.broker.protocol.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    leader.imbalance.check.interval.seconds = 300&lt;/P&gt;&lt;P&gt;    leader.imbalance.per.broker.percentage = 10&lt;/P&gt;&lt;P&gt;    listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL&lt;/P&gt;&lt;P&gt;    listeners = PLAINTEXT://am-bigdata-01.am.local:6667&lt;/P&gt;&lt;P&gt;    log.cleaner.backoff.ms = 15000&lt;/P&gt;&lt;P&gt;    log.cleaner.dedupe.buffer.size = 134217728&lt;/P&gt;&lt;P&gt;    log.cleaner.delete.retention.ms = 86400000&lt;/P&gt;&lt;P&gt;    log.cleaner.enable = true&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.load.factor = 0.9&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.size = 524288&lt;/P&gt;&lt;P&gt;    log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308&lt;/P&gt;&lt;P&gt;    log.cleaner.min.cleanable.ratio = 0.5&lt;/P&gt;&lt;P&gt;    log.cleaner.min.compaction.lag.ms = 0&lt;/P&gt;&lt;P&gt;    log.cleaner.threads = 1&lt;/P&gt;&lt;P&gt;    log.cleanup.policy = [delete]&lt;/P&gt;&lt;P&gt;    log.dir = /tmp/kafka-logs&lt;/P&gt;&lt;P&gt;    log.dirs = /kafka-logs&lt;/P&gt;&lt;P&gt;    log.flush.interval.messages = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.interval.ms = null&lt;/P&gt;&lt;P&gt;    log.flush.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.flush.scheduler.interval.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.start.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.index.interval.bytes = 4096&lt;/P&gt;&lt;P&gt;    log.index.size.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    log.message.downconversion.enable = true&lt;/P&gt;&lt;P&gt;    log.message.format.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    log.message.timestamp.difference.max.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.message.timestamp.type = CreateTime&lt;/P&gt;&lt;P&gt;    log.preallocate = false&lt;/P&gt;&lt;P&gt;    log.retention.bytes = -1&lt;/P&gt;&lt;P&gt;    log.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    log.retention.hours = 168&lt;/P&gt;&lt;P&gt;    log.retention.minutes = null&lt;/P&gt;&lt;P&gt;    log.retention.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.hours = 168&lt;/P&gt;&lt;P&gt;    log.roll.jitter.hours = 0&lt;/P&gt;&lt;P&gt;    log.roll.jitter.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.ms = null&lt;/P&gt;&lt;P&gt;    log.segment.bytes = 1073741824&lt;/P&gt;&lt;P&gt;    log.segment.delete.delay.ms = 60000&lt;/P&gt;&lt;P&gt;    max.connections.per.ip = 2147483647&lt;/P&gt;&lt;P&gt;    max.connections.per.ip.overrides = &lt;/P&gt;&lt;P&gt;    max.incremental.fetch.session.cache.slots = 1000&lt;/P&gt;&lt;P&gt;    message.max.bytes = 1000000&lt;/P&gt;&lt;P&gt;    metric.reporters = []&lt;/P&gt;&lt;P&gt;    metrics.num.samples = 2&lt;/P&gt;&lt;P&gt;    metrics.recording.level = INFO&lt;/P&gt;&lt;P&gt;    metrics.sample.window.ms = 30000&lt;/P&gt;&lt;P&gt;    min.insync.replicas = 1&lt;/P&gt;&lt;P&gt;    num.io.threads = 8&lt;/P&gt;&lt;P&gt;    num.network.threads = 3&lt;/P&gt;&lt;P&gt;    num.partitions = 1&lt;/P&gt;&lt;P&gt;    num.recovery.threads.per.data.dir = 1&lt;/P&gt;&lt;P&gt;    num.replica.alter.log.dirs.threads = null&lt;/P&gt;&lt;P&gt;    num.replica.fetchers = 1&lt;/P&gt;&lt;P&gt;    offset.metadata.max.bytes = 4096&lt;/P&gt;&lt;P&gt;    offsets.commit.required.acks = -1&lt;/P&gt;&lt;P&gt;    offsets.commit.timeout.ms = 5000&lt;/P&gt;&lt;P&gt;    offsets.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    offsets.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    offsets.retention.minutes = 86400000&lt;/P&gt;&lt;P&gt;    offsets.topic.compression.codec = 0&lt;/P&gt;&lt;P&gt;    offsets.topic.num.partitions = 50&lt;/P&gt;&lt;P&gt;    offsets.topic.replication.factor = 3&lt;/P&gt;&lt;P&gt;    offsets.topic.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding&lt;/P&gt;&lt;P&gt;    password.encoder.iterations = 4096&lt;/P&gt;&lt;P&gt;    password.encoder.key.length = 128&lt;/P&gt;&lt;P&gt;    password.encoder.keyfactory.algorithm = null&lt;/P&gt;&lt;P&gt;    password.encoder.old.secret = null&lt;/P&gt;&lt;P&gt;    password.encoder.secret = null&lt;/P&gt;&lt;P&gt;    port = 6667&lt;/P&gt;&lt;P&gt;    principal.builder.class = null&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.entry.expiration.ms = 300000&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.max.size = 1000&lt;/P&gt;&lt;P&gt;    producer.metrics.enable = false&lt;/P&gt;&lt;P&gt;    producer.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    queued.max.request.bytes = -1&lt;/P&gt;&lt;P&gt;    queued.max.requests = 500&lt;/P&gt;&lt;P&gt;    quota.consumer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.producer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.window.num = 11&lt;/P&gt;&lt;P&gt;    quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.backoff.ms = 1000&lt;/P&gt;&lt;P&gt;    replica.fetch.max.bytes = 1048576&lt;/P&gt;&lt;P&gt;    replica.fetch.min.bytes = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.response.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    replica.fetch.wait.max.ms = 500&lt;/P&gt;&lt;P&gt;    replica.high.watermark.checkpoint.interval.ms = 5000&lt;/P&gt;&lt;P&gt;    replica.lag.time.max.ms = 10000&lt;/P&gt;&lt;P&gt;    replica.socket.receive.buffer.bytes = 65536&lt;/P&gt;&lt;P&gt;    replica.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    request.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    reserved.broker.max.id = 1000&lt;/P&gt;&lt;P&gt;    sasl.client.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.enabled.mechanisms = [GSSAPI]&lt;/P&gt;&lt;P&gt;    sasl.jaas.config = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.kinit.cmd = /usr/bin/kinit&lt;/P&gt;&lt;P&gt;    sasl.kerberos.min.time.before.relogin = 60000&lt;/P&gt;&lt;P&gt;    sasl.kerberos.principal.to.local.rules = [DEFAULT]&lt;/P&gt;&lt;P&gt;    sasl.kerberos.service.name = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.buffer.seconds = 300&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.min.period.seconds = 60&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.mechanism.inter.broker.protocol = GSSAPI&lt;/P&gt;&lt;P&gt;    sasl.server.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    security.inter.broker.protocol = PLAINTEXT&lt;/P&gt;&lt;P&gt;    socket.receive.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    socket.request.max.bytes = 104857600&lt;/P&gt;&lt;P&gt;    socket.send.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    ssl.cipher.suites = []&lt;/P&gt;&lt;P&gt;    ssl.client.auth = none&lt;/P&gt;&lt;P&gt;    ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]&lt;/P&gt;&lt;P&gt;    ssl.endpoint.identification.algorithm = https&lt;/P&gt;&lt;P&gt;    ssl.key.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keymanager.algorithm = SunX509&lt;/P&gt;&lt;P&gt;    ssl.keystore.location = &lt;/P&gt;&lt;P&gt;    ssl.keystore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keystore.type = JKS&lt;/P&gt;&lt;P&gt;    ssl.protocol = TLS&lt;/P&gt;&lt;P&gt;    ssl.provider = null&lt;/P&gt;&lt;P&gt;    ssl.secure.random.implementation = null&lt;/P&gt;&lt;P&gt;    ssl.trustmanager.algorithm = PKIX&lt;/P&gt;&lt;P&gt;    ssl.truststore.location = &lt;/P&gt;&lt;P&gt;    ssl.truststore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.truststore.type = JKS&lt;/P&gt;&lt;P&gt;    transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    transaction.max.timeout.ms = 900000&lt;/P&gt;&lt;P&gt;    transaction.remove.expired.transaction.cleanup.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    transaction.state.log.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    transaction.state.log.min.isr = 2&lt;/P&gt;&lt;P&gt;    transaction.state.log.num.partitions = 50&lt;/P&gt;&lt;P&gt;    transaction.state.log.replication.factor = 3&lt;/P&gt;&lt;P&gt;    transaction.state.log.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    transactional.id.expiration.ms = 604800000&lt;/P&gt;&lt;P&gt;    unclean.leader.election.enable = false&lt;/P&gt;&lt;P&gt;    zookeeper.connect = am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181&lt;/P&gt;&lt;P&gt;    zookeeper.connection.timeout.ms = 25000&lt;/P&gt;&lt;P&gt;    zookeeper.max.in.flight.requests = 10&lt;/P&gt;&lt;P&gt;    zookeeper.session.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    zookeeper.set.acl = false&lt;/P&gt;&lt;P&gt;    zookeeper.sync.time.ms = 2000&lt;/P&gt;&lt;P&gt; (kafka.server.KafkaConfig)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,853] INFO KafkaConfig values: &lt;/P&gt;&lt;P&gt;    advertised.host.name = null&lt;/P&gt;&lt;P&gt;    advertised.listeners = null&lt;/P&gt;&lt;P&gt;    advertised.port = null&lt;/P&gt;&lt;P&gt;    alter.config.policy.class.name = null&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    authorizer.class.name = &lt;/P&gt;&lt;P&gt;    auto.create.topics.enable = true&lt;/P&gt;&lt;P&gt;    auto.leader.rebalance.enable = true&lt;/P&gt;&lt;P&gt;    background.threads = 10&lt;/P&gt;&lt;P&gt;    broker.id = -1&lt;/P&gt;&lt;P&gt;    broker.id.generation.enable = true&lt;/P&gt;&lt;P&gt;    broker.rack = null&lt;/P&gt;&lt;P&gt;    client.quota.callback.class = null&lt;/P&gt;&lt;P&gt;    compression.type = producer&lt;/P&gt;&lt;P&gt;    connections.max.idle.ms = 600000&lt;/P&gt;&lt;P&gt;    controlled.shutdown.enable = true&lt;/P&gt;&lt;P&gt;    controlled.shutdown.max.retries = 3&lt;/P&gt;&lt;P&gt;    controlled.shutdown.retry.backoff.ms = 5000&lt;/P&gt;&lt;P&gt;    controller.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    create.topic.policy.class.name = null&lt;/P&gt;&lt;P&gt;    default.replication.factor = 1&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.check.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.time.ms = 86400000&lt;/P&gt;&lt;P&gt;    delegation.token.master.key = null&lt;/P&gt;&lt;P&gt;    delegation.token.max.lifetime.ms = 604800000&lt;/P&gt;&lt;P&gt;    delete.records.purgatory.purge.interval.requests = 1&lt;/P&gt;&lt;P&gt;    delete.topic.enable = true&lt;/P&gt;&lt;P&gt;    fetch.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    group.initial.rebalance.delay.ms = 3000&lt;/P&gt;&lt;P&gt;    group.max.session.timeout.ms = 300000&lt;/P&gt;&lt;P&gt;    group.min.session.timeout.ms = 6000&lt;/P&gt;&lt;P&gt;    host.name = &lt;/P&gt;&lt;P&gt;    inter.broker.listener.name = null&lt;/P&gt;&lt;P&gt;    inter.broker.protocol.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    leader.imbalance.check.interval.seconds = 300&lt;/P&gt;&lt;P&gt;    leader.imbalance.per.broker.percentage = 10&lt;/P&gt;&lt;P&gt;    listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL&lt;/P&gt;&lt;P&gt;    listeners = PLAINTEXT://am-bigdata-01.am.local:6667&lt;/P&gt;&lt;P&gt;    log.cleaner.backoff.ms = 15000&lt;/P&gt;&lt;P&gt;    log.cleaner.dedupe.buffer.size = 134217728&lt;/P&gt;&lt;P&gt;    log.cleaner.delete.retention.ms = 86400000&lt;/P&gt;&lt;P&gt;    log.cleaner.enable = true&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.load.factor = 0.9&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.size = 524288&lt;/P&gt;&lt;P&gt;    log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308&lt;/P&gt;&lt;P&gt;    log.cleaner.min.cleanable.ratio = 0.5&lt;/P&gt;&lt;P&gt;    log.cleaner.min.compaction.lag.ms = 0&lt;/P&gt;&lt;P&gt;    log.cleaner.threads = 1&lt;/P&gt;&lt;P&gt;    log.cleanup.policy = [delete]&lt;/P&gt;&lt;P&gt;    log.dir = /tmp/kafka-logs&lt;/P&gt;&lt;P&gt;    log.dirs = /kafka-logs&lt;/P&gt;&lt;P&gt;    log.flush.interval.messages = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.interval.ms = null&lt;/P&gt;&lt;P&gt;    log.flush.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.flush.scheduler.interval.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.start.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.index.interval.bytes = 4096&lt;/P&gt;&lt;P&gt;    log.index.size.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    log.message.downconversion.enable = true&lt;/P&gt;&lt;P&gt;    log.message.format.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    log.message.timestamp.difference.max.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.message.timestamp.type = CreateTime&lt;/P&gt;&lt;P&gt;    log.preallocate = false&lt;/P&gt;&lt;P&gt;    log.retention.bytes = -1&lt;/P&gt;&lt;P&gt;    log.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    log.retention.hours = 168&lt;/P&gt;&lt;P&gt;    log.retention.minutes = null&lt;/P&gt;&lt;P&gt;    log.retention.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.hours = 168&lt;/P&gt;&lt;P&gt;    log.roll.jitter.hours = 0&lt;/P&gt;&lt;P&gt;    log.roll.jitter.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.ms = null&lt;/P&gt;&lt;P&gt;    log.segment.bytes = 1073741824&lt;/P&gt;&lt;P&gt;    log.segment.delete.delay.ms = 60000&lt;/P&gt;&lt;P&gt;    max.connections.per.ip = 2147483647&lt;/P&gt;&lt;P&gt;    max.connections.per.ip.overrides = &lt;/P&gt;&lt;P&gt;    max.incremental.fetch.session.cache.slots = 1000&lt;/P&gt;&lt;P&gt;    message.max.bytes = 1000000&lt;/P&gt;&lt;P&gt;    metric.reporters = []&lt;/P&gt;&lt;P&gt;    metrics.num.samples = 2&lt;/P&gt;&lt;P&gt;    metrics.recording.level = INFO&lt;/P&gt;&lt;P&gt;    metrics.sample.window.ms = 30000&lt;/P&gt;&lt;P&gt;    min.insync.replicas = 1&lt;/P&gt;&lt;P&gt;    num.io.threads = 8&lt;/P&gt;&lt;P&gt;    num.network.threads = 3&lt;/P&gt;&lt;P&gt;    num.partitions = 1&lt;/P&gt;&lt;P&gt;    num.recovery.threads.per.data.dir = 1&lt;/P&gt;&lt;P&gt;    num.replica.alter.log.dirs.threads = null&lt;/P&gt;&lt;P&gt;    num.replica.fetchers = 1&lt;/P&gt;&lt;P&gt;    offset.metadata.max.bytes = 4096&lt;/P&gt;&lt;P&gt;    offsets.commit.required.acks = -1&lt;/P&gt;&lt;P&gt;    offsets.commit.timeout.ms = 5000&lt;/P&gt;&lt;P&gt;    offsets.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    offsets.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    offsets.retention.minutes = 86400000&lt;/P&gt;&lt;P&gt;    offsets.topic.compression.codec = 0&lt;/P&gt;&lt;P&gt;    offsets.topic.num.partitions = 50&lt;/P&gt;&lt;P&gt;    offsets.topic.replication.factor = 3&lt;/P&gt;&lt;P&gt;    offsets.topic.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding&lt;/P&gt;&lt;P&gt;    password.encoder.iterations = 4096&lt;/P&gt;&lt;P&gt;    password.encoder.key.length = 128&lt;/P&gt;&lt;P&gt;    password.encoder.keyfactory.algorithm = null&lt;/P&gt;&lt;P&gt;    password.encoder.old.secret = null&lt;/P&gt;&lt;P&gt;    password.encoder.secret = null&lt;/P&gt;&lt;P&gt;    port = 6667&lt;/P&gt;&lt;P&gt;    principal.builder.class = null&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.entry.expiration.ms = 300000&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.max.size = 1000&lt;/P&gt;&lt;P&gt;    producer.metrics.enable = false&lt;/P&gt;&lt;P&gt;    producer.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    queued.max.request.bytes = -1&lt;/P&gt;&lt;P&gt;    queued.max.requests = 500&lt;/P&gt;&lt;P&gt;    quota.consumer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.producer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.window.num = 11&lt;/P&gt;&lt;P&gt;    quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.backoff.ms = 1000&lt;/P&gt;&lt;P&gt;    replica.fetch.max.bytes = 1048576&lt;/P&gt;&lt;P&gt;    replica.fetch.min.bytes = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.response.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    replica.fetch.wait.max.ms = 500&lt;/P&gt;&lt;P&gt;    replica.high.watermark.checkpoint.interval.ms = 5000&lt;/P&gt;&lt;P&gt;    replica.lag.time.max.ms = 10000&lt;/P&gt;&lt;P&gt;    replica.socket.receive.buffer.bytes = 65536&lt;/P&gt;&lt;P&gt;    replica.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    request.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    reserved.broker.max.id = 1000&lt;/P&gt;&lt;P&gt;    sasl.client.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.enabled.mechanisms = [GSSAPI]&lt;/P&gt;&lt;P&gt;    sasl.jaas.config = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.kinit.cmd = /usr/bin/kinit&lt;/P&gt;&lt;P&gt;    sasl.kerberos.min.time.before.relogin = 60000&lt;/P&gt;&lt;P&gt;    sasl.kerberos.principal.to.local.rules = [DEFAULT]&lt;/P&gt;&lt;P&gt;    sasl.kerberos.service.name = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.buffer.seconds = 300&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.min.period.seconds = 60&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.mechanism.inter.broker.protocol = GSSAPI&lt;/P&gt;&lt;P&gt;    sasl.server.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    security.inter.broker.protocol = PLAINTEXT&lt;/P&gt;&lt;P&gt;    socket.receive.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    socket.request.max.bytes = 104857600&lt;/P&gt;&lt;P&gt;    socket.send.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    ssl.cipher.suites = []&lt;/P&gt;&lt;P&gt;    ssl.client.auth = none&lt;/P&gt;&lt;P&gt;    ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]&lt;/P&gt;&lt;P&gt;    ssl.endpoint.identification.algorithm = https&lt;/P&gt;&lt;P&gt;    ssl.key.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keymanager.algorithm = SunX509&lt;/P&gt;&lt;P&gt;    ssl.keystore.location = &lt;/P&gt;&lt;P&gt;    ssl.keystore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keystore.type = JKS&lt;/P&gt;&lt;P&gt;    ssl.protocol = TLS&lt;/P&gt;&lt;P&gt;    ssl.provider = null&lt;/P&gt;&lt;P&gt;    ssl.secure.random.implementation = null&lt;/P&gt;&lt;P&gt;    ssl.trustmanager.algorithm = PKIX&lt;/P&gt;&lt;P&gt;    ssl.truststore.location = &lt;/P&gt;&lt;P&gt;    ssl.truststore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.truststore.type = JKS&lt;/P&gt;&lt;P&gt;    transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    transaction.max.timeout.ms = 900000&lt;/P&gt;&lt;P&gt;    transaction.remove.expired.transaction.cleanup.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    transaction.state.log.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    transaction.state.log.min.isr = 2&lt;/P&gt;&lt;P&gt;    transaction.state.log.num.partitions = 50&lt;/P&gt;&lt;P&gt;    transaction.state.log.replication.factor = 3&lt;/P&gt;&lt;P&gt;    transaction.state.log.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    transactional.id.expiration.ms = 604800000&lt;/P&gt;&lt;P&gt;    unclean.leader.election.enable = false&lt;/P&gt;&lt;P&gt;    zookeeper.connect = am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181&lt;/P&gt;&lt;P&gt;    zookeeper.connection.timeout.ms = 25000&lt;/P&gt;&lt;P&gt;    zookeeper.max.in.flight.requests = 10&lt;/P&gt;&lt;P&gt;    zookeeper.session.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    zookeeper.set.acl = false&lt;/P&gt;&lt;P&gt;    zookeeper.sync.time.ms = 2000&lt;/P&gt;&lt;P&gt; (kafka.server.KafkaConfig)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,879] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,879] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,880] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,909] ERROR [KafkaServer id=1001] Fatal error during KafkaServer startup. Prepare to shutdown (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;org.apache.kafka.common.KafkaException: Failed to acquire lock on file .lock in /kafka-logs. A Kafka instance in another process or thread is using this directory.&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$$anonfun$lockLogDirs$1.apply(LogManager.scala:240)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$$anonfun$lockLogDirs$1.apply(LogManager.scala:236)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)&lt;/P&gt;&lt;P&gt;    at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$class.flatMap(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.AbstractTraversable.flatMap(Traversable.scala:104)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager.lockLogDirs(LogManager.scala:236)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager.&amp;lt;init&amp;gt;(LogManager.scala:97)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$.apply(LogManager.scala:958)&lt;/P&gt;&lt;P&gt;    at kafka.server.KafkaServer.startup(KafkaServer.scala:237)&lt;/P&gt;&lt;P&gt;    at kafka.server.KafkaServerStartable.startup(KafkaServerStartable.scala:38)&lt;/P&gt;&lt;P&gt;    at kafka.Kafka$.main(Kafka.scala:75)&lt;/P&gt;&lt;P&gt;    at kafka.Kafka.main(Kafka.scala)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,912] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,914] INFO [ZooKeeperClient] Closing. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,917] INFO [ZooKeeperClient] Closed. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,918] INFO [ThrottledChannelReaper-Fetch]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:01,880] INFO [ThrottledChannelReaper-Fetch]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:01,880] INFO [ThrottledChannelReaper-Fetch]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:01,880] INFO [ThrottledChannelReaper-Produce]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:02,879] INFO [ThrottledChannelReaper-Produce]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:02,879] INFO [ThrottledChannelReaper-Produce]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:02,880] INFO [ThrottledChannelReaper-Request]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:02,880] INFO [ThrottledChannelReaper-Request]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:02,880] INFO [ThrottledChannelReaper-Request]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:02,885] INFO [KafkaServer id=1001] shut down completed (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:02,885] ERROR Exiting Kafka. (kafka.server.KafkaServerStartable)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:02,887] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;&lt;/P&gt;</description>
    <pubDate>Fri, 10 May 2019 22:59:50 GMT</pubDate>
    <dc:creator>anhdt061091</dc:creator>
    <dc:date>2019-05-10T22:59:50Z</dc:date>
    <item>
      <title>Mapreduce 2 and YARN auto stop after restart a few second, I use Hortonwork 3.1.1</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Mapreduce-2-and-YARN-auto-stop-after-restart-a-few-second-I/m-p/239682#M201491</link>
      <description>&lt;P&gt;Hi Everyone &lt;/P&gt;&lt;P&gt;I use Hortonwork 3.1.1 on Centos 7, everything start nomarlly after install, but yesterday service Yarn and Mapreduce stop, i try to restart but after few second it automatically stop. Please help me ! &lt;/P&gt;&lt;P&gt;Here is Log on /var/log/hadoop-yarn/yarn/hadoop-mapreduce.jobsummary.log&lt;/P&gt;&lt;P&gt;2019-04-27 15:57:23,150 INFO resourcemanager.RMAppManager$ApplicationSummary: appId=application_1554293667897_0131,name=JavaHBaseDistributedScan demo_kafka,user=hbase,queue=default,state=FINISHED,trackingUrl=http://bigdata-01.am.local:8088/proxy/application_1554293667897_0131/,appMasterHost=N/A,submitTime=1555412153616,startTime=1555412153617,finishTime=1555412160200,finalStatus=SUCCEEDED,memorySeconds=18035,vcoreSeconds=10,preemptedMemorySeconds=18035,preemptedVcoreSeconds=10,preemptedAMContainers=0,preemptedNonAMContainers=0,preemptedResources=&amp;lt;memory:0\, vCores:0&amp;gt;,applicationType=SPARK,resourceSeconds=18035 MB-seconds\, 10 vcore-seconds,preemptedResourceSeconds=18035 MB-seconds\, 10 vcore-seconds&lt;/P&gt;&lt;P&gt;2019-04-27 15:57:23,153 INFO resourcemanager.RMAppManager$ApplicationSummary: appId=application_1554293667897_0132,name=Thrift JDBC/ODBC Server,user=spark,queue=default,state=FAILED,trackingUrl=http://bigdata-01.am.local:8088/proxy/application_1554293667897_0132/,appMasterHost=N/A,submitTime=1555590937105,startTime=1555590937205,finishTime=1556006590180,finalStatus=FAILED,memorySeconds=425628448,vcoreSeconds=415652,preemptedMemorySeconds=425628448,preemptedVcoreSeconds=415652,preemptedAMContainers=0,preemptedNonAMContainers=0,preemptedResources=&amp;lt;memory:0\, vCores:0&amp;gt;,applicationType=SPARK,resourceSeconds=425628448 MB-seconds\, 415652 vcore-seconds,preemptedResourceSeconds=425628448 MB-seconds\, 415652 vcore-seconds&lt;/P&gt;&lt;P&gt;2019-04-27 15:57:23,153 INFO resourcemanager.RMAppManager$ApplicationSummary: appId=application_1554293667897_0134,name=Wordcount Background,user=hdfs,queue=default,state=FINISHED,trackingUrl=http://bigdata-01.am.local:8088/proxy/application_1554293667897_0134/,appMasterHost=N/A,submitTime=1555919241009,startTime=1555919241011,finishTime=1555930274213,finalStatus=SUCCEEDED,memorySeconds=56459868,vcoreSeconds=33083,preemptedMemorySeconds=56459868,preemptedVcoreSeconds=33083,preemptedAMContainers=0,preemptedNonAMContainers=0,preemptedResources=&amp;lt;memory:0\, vCores:0&amp;gt;,applicationType=SPARK,resourceSeconds=56459868 MB-seconds\, 33083 vcore-seconds,preemptedResourceSeconds=56459868 MB-seconds\, 33083 vcore-seconds&lt;/P&gt;&lt;P&gt;2019-04-27 15:57:23,153 INFO resourcemanager.RMAppManager$ApplicationSummary: appId=application_1556006587747_0001,name=HIVE-d222fe43-47e8-4777-99eb-1d626db7b1a9,user=hive,queue=default,state=FINISHED,trackingUrl=http://bigdata-01.am.local:8088/proxy/application_1556006587747_0001/,appMasterHost=N/A,submitTime=1556006598895,startTime=1556006598908,finishTime=1556007209543,finalStatus=SUCCEEDED,memorySeconds=1874359,vcoreSeconds=610,preemptedMemorySeconds=1874359,preemptedVcoreSeconds=610,preemptedAMContainers=0,preemptedNonAMContainers=0,preemptedResources=&amp;lt;memory:0\, vCores:0&amp;gt;,applicationType=TEZ,resourceSeconds=1874359 MB-seconds\, 610 vcore-seconds,preemptedResourceSeconds=1874359 MB-seconds\, 610 vcore-seconds&lt;/P&gt;&lt;P&gt;2019-04-27 15:57:23,153 INFO resourcemanager.RMAppManager$ApplicationSummary: appId=application_1556006587747_0002,name=Thrift JDBC/ODBC Server,user=spark,queue=default,state=FAILED,trackingUrl=http://bigdata-01.am.local:8088/proxy/application_1556006587747_0002/,appMasterHost=N/A,submitTime=1556006610698,startTime=1556006610699,finishTime=1556046968256,finalStatus=FAILED,memorySeconds=40712387,vcoreSeconds=39758,preemptedMemorySeconds=40712387,preemptedVcoreSeconds=39758,preemptedAMContainers=0,preemptedNonAMContainers=0,preemptedResources=&amp;lt;memory:0\, vCores:0&amp;gt;,applicationType=SPARK,resourceSeconds=40712387 MB-seconds\, 39758 vcore-seconds,preemptedResourceSeconds=40712387 MB-seconds\, 39758 vcore-seconds&lt;/P&gt;&lt;P&gt;2019-04-27 15:57:23,154 INFO resourcemanager.RMAppManager$ApplicationSummary: appId=application_1556006587747_0003,name=Thrift JDBC/ODBC Server,user=spark,queue=default,state=FAILED,trackingUrl=http://bigdata-01.am.local:8088/proxy/application_1556006587747_0003/,appMasterHost=N/A,submitTime=1556050435549,startTime=1556050435552,finishTime=1556102048938,finalStatus=FAILED,memorySeconds=52852082,vcoreSeconds=51613,preemptedMemorySeconds=52852082,preemptedVcoreSeconds=51613,preemptedAMContainers=0,preemptedNonAMContainers=0,preemptedResources=&amp;lt;memory:0\, vCores:0&amp;gt;,applicationType=SPARK,resourceSeconds=52852082 MB-seconds\, 51613 vcore-seconds,preemptedResourceSeconds=52852082 MB-seconds\, 51613 vcore-seconds&lt;/P&gt;&lt;P&gt;2019-04-27 15:57:23,155 INFO resourcemanager.RMAppManager$ApplicationSummary: appId=application_1556006587747_0004,name=Thrift JDBC/ODBC Server,user=spark,queue=default,state=FAILED,trackingUrl=http://bigdata-01.am.local:8088/proxy/application_1556006587747_0004/,appMasterHost=N/A,submitTime=1556115260583,startTime=1556115260585,finishTime=1556126768579,finalStatus=FAILED,memorySeconds=11784135,vcoreSeconds=11507,preemptedMemorySeconds=11784135,preemptedVcoreSeconds=11507,preemptedAMContainers=0,preemptedNonAMContainers=0,preemptedResources=&amp;lt;memory:0\, vCores:0&amp;gt;,applicationType=SPARK,resourceSeconds=11784135 MB-seconds\, 11507 vcore-seconds,preemptedResourceSeconds=11784135 MB-seconds\, 11507 vcore-seconds&lt;/P&gt;&lt;P&gt;2019-04-27 15:57:23,156 INFO resourcemanager.RMAppManager$ApplicationSummary: appId=application_1556006587747_0005,name=Thrift JDBC/ODBC Server,user=spark,queue=default,state=FAILED,trackingUrl=http://bigdata-01.am.local:8088/proxy/application_1556006587747_0005/,appMasterHost=N/A,submitTime=1556136892217,startTime=1556136892219,finishTime=1556151248704,finalStatus=FAILED,memorySeconds=14700999,vcoreSeconds=14356,preemptedMemorySeconds=14700999,preemptedVcoreSeconds=14356,preemptedAMContainers=0,preemptedNonAMContainers=0,preemptedResources=&amp;lt;memory:0\, vCores:0&amp;gt;,applicationType=SPARK,resourceSeconds=14700999 MB-seconds\, 14356 vcore-seconds,preemptedResourceSeconds=14700999 MB-seconds\, 14356 vcore-seconds&lt;/P&gt;&lt;P&gt;2019-04-27 15:57:23,157 INFO resourcemanager.RMAppManager$ApplicationSummary: appId=application_1556006587747_0006,name=Thrift JDBC/ODBC Server,user=spark,queue=default,state=FAILED,trackingUrl=http://bigdata-01.am.local:8088/proxy/application_1556006587747_0006/,appMasterHost=N/A,submitTime=1556158512111,startTime=1556158512113,finishTime=1556182808281,finalStatus=FAILED,memorySeconds=24879206,vcoreSeconds=24296,preemptedMemorySeconds=24879206,preemptedVcoreSeconds=24296,preemptedAMContainers=0,preemptedNonAMContainers=0,preemptedResources=&amp;lt;memory:0\, vCores:0&amp;gt;,applicationType=SPARK,resourceSeconds=24879206 MB-seconds\, 24296 vcore-seconds,preemptedResourceSeconds=24879206 MB-seconds\, 24296 vcore-seconds&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;&lt;/P&gt;</description>
      <pubDate>Fri, 03 May 2019 06:19:01 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Mapreduce-2-and-YARN-auto-stop-after-restart-a-few-second-I/m-p/239682#M201491</guid>
      <dc:creator>anhdt061091</dc:creator>
      <dc:date>2019-05-03T06:19:01Z</dc:date>
    </item>
    <item>
      <title>Re: Mapreduce 2 and YARN auto stop after restart a few second, I use Hortonwork 3.1.1</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Mapreduce-2-and-YARN-auto-stop-after-restart-a-few-second-I/m-p/239683#M201492</link>
      <description>&lt;P&gt;&lt;A rel="noopener noreferrer noopener noreferrer" href="http://@duong%20tuan%20anh" target="_blank"&gt;&lt;EM&gt;@duong tuan anh&lt;/EM&gt;&lt;/A&gt;&lt;/P&gt;&lt;P&gt;&lt;EM&gt;Can you also attach the below recent logs&lt;/EM&gt;&lt;/P&gt;&lt;PRE&gt;&lt;EM&gt;hadoop-yarn-resourcemanager-xxxx.log
hadoop-yarn-nodemanager-xxxx.log
hadoop-yarn-root-registrydns-xxxx.log
hbase-yarn-ats-master-xxxx.log&lt;/EM&gt;&lt;/PRE&gt;&lt;P&gt;&lt;EM&gt;&lt;BR /&gt;&lt;/EM&gt;&lt;/P&gt;&lt;P&gt;&lt;EM&gt;Thank you &lt;/EM&gt;&lt;/P&gt;</description>
      <pubDate>Fri, 03 May 2019 07:46:22 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Mapreduce-2-and-YARN-auto-stop-after-restart-a-few-second-I/m-p/239683#M201492</guid>
      <dc:creator>Shelton</dc:creator>
      <dc:date>2019-05-03T07:46:22Z</dc:date>
    </item>
    <item>
      <title>Re: Mapreduce 2 and YARN auto stop after restart a few second, I use Hortonwork 3.1.1</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Mapreduce-2-and-YARN-auto-stop-after-restart-a-few-second-I/m-p/239684#M201493</link>
      <description>&lt;P&gt;hi  &lt;A rel="user" href="https://community.hortonworks.com/users/1271/sheltong.html"&gt;Geoffrey Shelton Okot&lt;/A&gt;  here is the below recent logs you need but i can't upload in here because is too large , you can download on here or can you give me your mail, i will send it to you . &lt;/P&gt;&lt;P&gt;Link : &lt;A href="https://www.fshare.vn/file/Y38M7S51FSGK?token=1556863604"&gt;https://www.fshare.vn/file/Y38M7S51FSGK?token=1556863604&lt;/A&gt;  &lt;/P&gt;&lt;P&gt;Many thanks for your help &lt;/P&gt;&lt;P&gt;&lt;BR /&gt;&lt;/P&gt;</description>
      <pubDate>Fri, 03 May 2019 13:07:13 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Mapreduce-2-and-YARN-auto-stop-after-restart-a-few-second-I/m-p/239684#M201493</guid>
      <dc:creator>anhdt061091</dc:creator>
      <dc:date>2019-05-03T13:07:13Z</dc:date>
    </item>
    <item>
      <title>Re: Mapreduce 2 and YARN auto stop after restart a few second, I use Hortonwork 3.1.1</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Mapreduce-2-and-YARN-auto-stop-after-restart-a-few-second-I/m-p/239685#M201494</link>
      <description>&lt;P&gt;&lt;A rel="noopener noreferrer noopener noreferrer noopener noreferrer noopener noreferrer" href="http://duong%20tuan%20anh/" target="_blank"&gt;&lt;EM&gt;@duong tuan anh&lt;/EM&gt;&lt;/A&gt;&lt;EM&gt; &lt;/EM&gt;&lt;/P&gt;&lt;P&gt;&lt;EM&gt;Indeed the files are huge can you do a quick solution I saw after reading your logs, &lt;/EM&gt;&lt;/P&gt;&lt;P&gt;&lt;EM&gt;&lt;STRONG&gt;Caused by: org.apache.hadoop.security.AccessControlException&lt;/STRONG&gt;&lt;BR /&gt;&lt;/EM&gt;&lt;/P&gt;&lt;P&gt;&lt;EM&gt;As the root user switch to hdfs&lt;/EM&gt;&lt;/P&gt;&lt;PRE&gt;&lt;EM&gt;# su - hdfs&lt;/EM&gt;&lt;/PRE&gt;&lt;P&gt;&lt;EM&gt;Change ownership of the mapred directory&lt;/EM&gt;&lt;/P&gt;&lt;PRE&gt;&lt;EM&gt;$ hdfs dfs -chown -R mapred:hadoop /mr-history&lt;/EM&gt;&lt;/PRE&gt;&lt;P&gt;&lt;EM&gt;That should resolve the problem.&lt;/EM&gt;&lt;/P&gt;&lt;P&gt;&lt;EM&gt;&lt;BR /&gt;Keep me posted&lt;/EM&gt;&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;&lt;/P&gt;</description>
      <pubDate>Fri, 03 May 2019 15:03:20 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Mapreduce-2-and-YARN-auto-stop-after-restart-a-few-second-I/m-p/239685#M201494</guid>
      <dc:creator>Shelton</dc:creator>
      <dc:date>2019-05-03T15:03:20Z</dc:date>
    </item>
    <item>
      <title>Re: Mapreduce 2 and YARN auto stop after restart a few second, I use Hortonwork 3.1.1</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Mapreduce-2-and-YARN-auto-stop-after-restart-a-few-second-I/m-p/239686#M201495</link>
      <description>&lt;P&gt;&lt;A rel="noopener noreferrer noopener noreferrer noopener noreferrer noopener noreferrer noopener noreferrer noopener noreferrer" href="http://duong%20tuan%20anh/" target="_blank"&gt;&lt;EM&gt;@duong tuan anh&lt;/EM&gt;&lt;/A&gt;&lt;EM&gt; &lt;/EM&gt;&lt;/P&gt;&lt;P&gt;&lt;EM&gt;Any updates&lt;/EM&gt;&lt;/P&gt;</description>
      <pubDate>Fri, 03 May 2019 18:05:41 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Mapreduce-2-and-YARN-auto-stop-after-restart-a-few-second-I/m-p/239686#M201495</guid>
      <dc:creator>Shelton</dc:creator>
      <dc:date>2019-05-03T18:05:41Z</dc:date>
    </item>
    <item>
      <title>Re: Mapreduce 2 and YARN auto stop after restart a few second, I use Hortonwork 3.1.1</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Mapreduce-2-and-YARN-auto-stop-after-restart-a-few-second-I/m-p/239687#M201496</link>
      <description>&lt;P&gt;Hi &lt;a href="https://community.cloudera.com/t5/user/viewprofilepage/user-id/31920"&gt;@geoffrey&lt;/a&gt; Shelton Okot  i use command " &lt;EM&gt;$ hdfs dfs -chown -R mapred:hadoop /mr-history&lt;/EM&gt;" and mapreduce Service has worked normally, but YARN service still failed, Timeline Service V2.0 Stopped. I have attached the image below &lt;/P&gt;&lt;P&gt;&lt;span class="lia-inline-image-display-wrapper lia-image-align-inline" image-alt="108464-1.png" style="width: 1920px;"&gt;&lt;img src="https://community.cloudera.com/t5/image/serverpage/image-id/13914iCD7095EC19E9C781/image-size/medium?v=v2&amp;amp;px=400" role="button" title="108464-1.png" alt="108464-1.png" /&gt;&lt;/span&gt;&lt;BR /&gt;&lt;/P&gt;</description>
      <pubDate>Sat, 17 Aug 2019 22:37:01 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Mapreduce-2-and-YARN-auto-stop-after-restart-a-few-second-I/m-p/239687#M201496</guid>
      <dc:creator>anhdt061091</dc:creator>
      <dc:date>2019-08-17T22:37:01Z</dc:date>
    </item>
    <item>
      <title>Re: Mapreduce 2 and YARN auto stop after restart a few second, I use Hortonwork 3.1.1</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Mapreduce-2-and-YARN-auto-stop-after-restart-a-few-second-I/m-p/239688#M201497</link>
      <description>&lt;P&gt;&lt;A rel="noopener noreferrer noopener noreferrer noopener noreferrer noopener noreferrer noopener noreferrer noopener noreferrer noopener noreferrer noopener noreferrer noopener noreferrer noopener noreferrer" href="http://duong%20tuan%20anh/" target="_blank"&gt;&lt;EM&gt;@duong tuan anh&lt;/EM&gt;&lt;/A&gt;&lt;/P&gt;&lt;P&gt;&lt;EM&gt;I can see hiveServer2 also has an issue can you resolve that or what is the problem. It's the TSv2 which is not starting can you share specifically those logs?&lt;/EM&gt;&lt;/P&gt;&lt;P&gt;Change you run the below snippets&lt;/P&gt;&lt;PRE&gt;$ hdfs dfs -chown -R yarn:hadoop /ats&lt;/PRE&gt;&lt;P&gt;&lt;EM&gt;Finally&lt;/EM&gt;&lt;/P&gt;&lt;PRE&gt;&lt;EM&gt;$ hdfs dfs -chown -R yarn-ats:hdfs /atsv2/hbase&lt;/EM&gt;&lt;/PRE&gt;&lt;P&gt;&lt;EM&gt;Restart the services and revert&lt;/EM&gt;&lt;/P&gt;&lt;P&gt;&lt;EM&gt;HTH&lt;/EM&gt;&lt;/P&gt;</description>
      <pubDate>Sat, 04 May 2019 14:23:46 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Mapreduce-2-and-YARN-auto-stop-after-restart-a-few-second-I/m-p/239688#M201497</guid>
      <dc:creator>Shelton</dc:creator>
      <dc:date>2019-05-04T14:23:46Z</dc:date>
    </item>
    <item>
      <title>Re: Mapreduce 2 and YARN auto stop after restart a few second, I use Hortonwork 3.1.1</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Mapreduce-2-and-YARN-auto-stop-after-restart-a-few-second-I/m-p/239689#M201498</link>
      <description>&lt;P&gt;Thank &lt;A rel="user" href="https://community.hortonworks.com/users/1271/sheltong.html"&gt;Geoffrey Shelton Okot&lt;/A&gt;   &lt;/P&gt;&lt;P&gt;I have fixed that error . &lt;/P&gt;&lt;P&gt;Thank you !&lt;/P&gt;</description>
      <pubDate>Fri, 10 May 2019 14:04:56 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Mapreduce-2-and-YARN-auto-stop-after-restart-a-few-second-I/m-p/239689#M201498</guid>
      <dc:creator>anhdt061091</dc:creator>
      <dc:date>2019-05-10T14:04:56Z</dc:date>
    </item>
    <item>
      <title>Re: Mapreduce 2 and YARN auto stop after restart a few second, I use Hortonwork 3.1.1</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Mapreduce-2-and-YARN-auto-stop-after-restart-a-few-second-I/m-p/239690#M201499</link>
      <description>&lt;P&gt;hi @Geoffrey Shelton Okot &lt;/P&gt;&lt;P&gt; My kafka service is currently not working , Can you help me please ?  Here is the log on : /var/log/kafka/server.log and /var/log/kafka/kafka.err&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;&lt;/P&gt;&lt;P&gt;[2019-05-10 16:00:51,711] INFO [GroupMetadataManager brokerId=1001] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:25,008] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:25,518] INFO starting (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:25,519] INFO Connecting to zookeeper on am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181 (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:25,532] INFO [ZooKeeperClient] Initializing a new session to am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:25,551] INFO [ZooKeeperClient] Waiting until connected. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:25,616] INFO [ZooKeeperClient] Connected. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:25,881] INFO Cluster ID = z-4P_uf-RzmpT2QvMnOD2g (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:25,950] INFO KafkaConfig values: &lt;/P&gt;&lt;P&gt;    advertised.host.name = null&lt;/P&gt;&lt;P&gt;    advertised.listeners = null&lt;/P&gt;&lt;P&gt;    advertised.port = null&lt;/P&gt;&lt;P&gt;    alter.config.policy.class.name = null&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    authorizer.class.name = &lt;/P&gt;&lt;P&gt;    auto.create.topics.enable = true&lt;/P&gt;&lt;P&gt;    auto.leader.rebalance.enable = true&lt;/P&gt;&lt;P&gt;    background.threads = 10&lt;/P&gt;&lt;P&gt;    broker.id = -1&lt;/P&gt;&lt;P&gt;    broker.id.generation.enable = true&lt;/P&gt;&lt;P&gt;    broker.rack = null&lt;/P&gt;&lt;P&gt;    client.quota.callback.class = null&lt;/P&gt;&lt;P&gt;    compression.type = producer&lt;/P&gt;&lt;P&gt;    connections.max.idle.ms = 600000&lt;/P&gt;&lt;P&gt;    controlled.shutdown.enable = true&lt;/P&gt;&lt;P&gt;    controlled.shutdown.max.retries = 3&lt;/P&gt;&lt;P&gt;    controlled.shutdown.retry.backoff.ms = 5000&lt;/P&gt;&lt;P&gt;    controller.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    create.topic.policy.class.name = null&lt;/P&gt;&lt;P&gt;    default.replication.factor = 1&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.check.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.time.ms = 86400000&lt;/P&gt;&lt;P&gt;    delegation.token.master.key = null&lt;/P&gt;&lt;P&gt;    delegation.token.max.lifetime.ms = 604800000&lt;/P&gt;&lt;P&gt;    delete.records.purgatory.purge.interval.requests = 1&lt;/P&gt;&lt;P&gt;    delete.topic.enable = true&lt;/P&gt;&lt;P&gt;    fetch.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    group.initial.rebalance.delay.ms = 3000&lt;/P&gt;&lt;P&gt;    group.max.session.timeout.ms = 300000&lt;/P&gt;&lt;P&gt;    group.min.session.timeout.ms = 6000&lt;/P&gt;&lt;P&gt;    host.name = &lt;/P&gt;&lt;P&gt;    inter.broker.listener.name = null&lt;/P&gt;&lt;P&gt;    inter.broker.protocol.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    leader.imbalance.check.interval.seconds = 300&lt;/P&gt;&lt;P&gt;    leader.imbalance.per.broker.percentage = 10&lt;/P&gt;&lt;P&gt;    listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL&lt;/P&gt;&lt;P&gt;    listeners = PLAINTEXT://am-bigdata-01.am.local:6667&lt;/P&gt;&lt;P&gt;    log.cleaner.backoff.ms = 15000&lt;/P&gt;&lt;P&gt;    log.cleaner.dedupe.buffer.size = 134217728&lt;/P&gt;&lt;P&gt;    log.cleaner.delete.retention.ms = 86400000&lt;/P&gt;&lt;P&gt;    log.cleaner.enable = true&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.load.factor = 0.9&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.size = 524288&lt;/P&gt;&lt;P&gt;    log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308&lt;/P&gt;&lt;P&gt;    log.cleaner.min.cleanable.ratio = 0.5&lt;/P&gt;&lt;P&gt;    log.cleaner.min.compaction.lag.ms = 0&lt;/P&gt;&lt;P&gt;    log.cleaner.threads = 1&lt;/P&gt;&lt;P&gt;    log.cleanup.policy = [delete]&lt;/P&gt;&lt;P&gt;    log.dir = /tmp/kafka-logs&lt;/P&gt;&lt;P&gt;    log.dirs = /kafka-logs&lt;/P&gt;&lt;P&gt;    log.flush.interval.messages = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.interval.ms = null&lt;/P&gt;&lt;P&gt;    log.flush.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.flush.scheduler.interval.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.start.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.index.interval.bytes = 4096&lt;/P&gt;&lt;P&gt;    log.index.size.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    log.message.downconversion.enable = true&lt;/P&gt;&lt;P&gt;    log.message.format.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    log.message.timestamp.difference.max.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.message.timestamp.type = CreateTime&lt;/P&gt;&lt;P&gt;    log.preallocate = false&lt;/P&gt;&lt;P&gt;    log.retention.bytes = -1&lt;/P&gt;&lt;P&gt;    log.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    log.retention.hours = 168&lt;/P&gt;&lt;P&gt;    log.retention.minutes = null&lt;/P&gt;&lt;P&gt;    log.retention.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.hours = 168&lt;/P&gt;&lt;P&gt;    log.roll.jitter.hours = 0&lt;/P&gt;&lt;P&gt;    log.roll.jitter.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.ms = null&lt;/P&gt;&lt;P&gt;    log.segment.bytes = 1073741824&lt;/P&gt;&lt;P&gt;    log.segment.delete.delay.ms = 60000&lt;/P&gt;&lt;P&gt;    max.connections.per.ip = 2147483647&lt;/P&gt;&lt;P&gt;    max.connections.per.ip.overrides = &lt;/P&gt;&lt;P&gt;    max.incremental.fetch.session.cache.slots = 1000&lt;/P&gt;&lt;P&gt;    message.max.bytes = 1000000&lt;/P&gt;&lt;P&gt;    metric.reporters = []&lt;/P&gt;&lt;P&gt;    metrics.num.samples = 2&lt;/P&gt;&lt;P&gt;    metrics.recording.level = INFO&lt;/P&gt;&lt;P&gt;    metrics.sample.window.ms = 30000&lt;/P&gt;&lt;P&gt;    min.insync.replicas = 1&lt;/P&gt;&lt;P&gt;    num.io.threads = 8&lt;/P&gt;&lt;P&gt;    num.network.threads = 3&lt;/P&gt;&lt;P&gt;    num.partitions = 1&lt;/P&gt;&lt;P&gt;    num.recovery.threads.per.data.dir = 1&lt;/P&gt;&lt;P&gt;    num.replica.alter.log.dirs.threads = null&lt;/P&gt;&lt;P&gt;    num.replica.fetchers = 1&lt;/P&gt;&lt;P&gt;    offset.metadata.max.bytes = 4096&lt;/P&gt;&lt;P&gt;    offsets.commit.required.acks = -1&lt;/P&gt;&lt;P&gt;    offsets.commit.timeout.ms = 5000&lt;/P&gt;&lt;P&gt;    offsets.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    offsets.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    offsets.retention.minutes = 86400000&lt;/P&gt;&lt;P&gt;    offsets.topic.compression.codec = 0&lt;/P&gt;&lt;P&gt;    offsets.topic.num.partitions = 50&lt;/P&gt;&lt;P&gt;    offsets.topic.replication.factor = 3&lt;/P&gt;&lt;P&gt;    offsets.topic.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding&lt;/P&gt;&lt;P&gt;    password.encoder.iterations = 4096&lt;/P&gt;&lt;P&gt;    password.encoder.key.length = 128&lt;/P&gt;&lt;P&gt;    password.encoder.keyfactory.algorithm = null&lt;/P&gt;&lt;P&gt;    password.encoder.old.secret = null&lt;/P&gt;&lt;P&gt;    password.encoder.secret = null&lt;/P&gt;&lt;P&gt;    port = 6667&lt;/P&gt;&lt;P&gt;    principal.builder.class = null&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.entry.expiration.ms = 300000&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.max.size = 1000&lt;/P&gt;&lt;P&gt;    producer.metrics.enable = false&lt;/P&gt;&lt;P&gt;    producer.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    queued.max.request.bytes = -1&lt;/P&gt;&lt;P&gt;    queued.max.requests = 500&lt;/P&gt;&lt;P&gt;    quota.consumer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.producer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.window.num = 11&lt;/P&gt;&lt;P&gt;    quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.backoff.ms = 1000&lt;/P&gt;&lt;P&gt;    replica.fetch.max.bytes = 1048576&lt;/P&gt;&lt;P&gt;    replica.fetch.min.bytes = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.response.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    replica.fetch.wait.max.ms = 500&lt;/P&gt;&lt;P&gt;    replica.high.watermark.checkpoint.interval.ms = 5000&lt;/P&gt;&lt;P&gt;    replica.lag.time.max.ms = 10000&lt;/P&gt;&lt;P&gt;    replica.socket.receive.buffer.bytes = 65536&lt;/P&gt;&lt;P&gt;    replica.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    request.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    reserved.broker.max.id = 1000&lt;/P&gt;&lt;P&gt;    sasl.client.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.enabled.mechanisms = [GSSAPI]&lt;/P&gt;&lt;P&gt;    sasl.jaas.config = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.kinit.cmd = /usr/bin/kinit&lt;/P&gt;&lt;P&gt;    sasl.kerberos.min.time.before.relogin = 60000&lt;/P&gt;&lt;P&gt;    sasl.kerberos.principal.to.local.rules = [DEFAULT]&lt;/P&gt;&lt;P&gt;    sasl.kerberos.service.name = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.buffer.seconds = 300&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.min.period.seconds = 60&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.mechanism.inter.broker.protocol = GSSAPI&lt;/P&gt;&lt;P&gt;    sasl.server.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    security.inter.broker.protocol = PLAINTEXT&lt;/P&gt;&lt;P&gt;    socket.receive.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    socket.request.max.bytes = 104857600&lt;/P&gt;&lt;P&gt;    socket.send.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    ssl.cipher.suites = []&lt;/P&gt;&lt;P&gt;    ssl.client.auth = none&lt;/P&gt;&lt;P&gt;    ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]&lt;/P&gt;&lt;P&gt;    ssl.endpoint.identification.algorithm = https&lt;/P&gt;&lt;P&gt;    ssl.key.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keymanager.algorithm = SunX509&lt;/P&gt;&lt;P&gt;    ssl.keystore.location = &lt;/P&gt;&lt;P&gt;    ssl.keystore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keystore.type = JKS&lt;/P&gt;&lt;P&gt;    ssl.protocol = TLS&lt;/P&gt;&lt;P&gt;    ssl.provider = null&lt;/P&gt;&lt;P&gt;    ssl.secure.random.implementation = null&lt;/P&gt;&lt;P&gt;    ssl.trustmanager.algorithm = PKIX&lt;/P&gt;&lt;P&gt;    ssl.truststore.location = &lt;/P&gt;&lt;P&gt;    ssl.truststore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.truststore.type = JKS&lt;/P&gt;&lt;P&gt;    transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    transaction.max.timeout.ms = 900000&lt;/P&gt;&lt;P&gt;    transaction.remove.expired.transaction.cleanup.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    transaction.state.log.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    transaction.state.log.min.isr = 2&lt;/P&gt;&lt;P&gt;    transaction.state.log.num.partitions = 50&lt;/P&gt;&lt;P&gt;    transaction.state.log.replication.factor = 3&lt;/P&gt;&lt;P&gt;    transaction.state.log.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    transactional.id.expiration.ms = 604800000&lt;/P&gt;&lt;P&gt;    unclean.leader.election.enable = false&lt;/P&gt;&lt;P&gt;    zookeeper.connect = am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181&lt;/P&gt;&lt;P&gt;    zookeeper.connection.timeout.ms = 25000&lt;/P&gt;&lt;P&gt;    zookeeper.max.in.flight.requests = 10&lt;/P&gt;&lt;P&gt;    zookeeper.session.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    zookeeper.set.acl = false&lt;/P&gt;&lt;P&gt;    zookeeper.sync.time.ms = 2000&lt;/P&gt;&lt;P&gt; (kafka.server.KafkaConfig)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:25,956] INFO KafkaConfig values: &lt;/P&gt;&lt;P&gt;    advertised.host.name = null&lt;/P&gt;&lt;P&gt;    advertised.listeners = null&lt;/P&gt;&lt;P&gt;    advertised.port = null&lt;/P&gt;&lt;P&gt;    alter.config.policy.class.name = null&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    authorizer.class.name = &lt;/P&gt;&lt;P&gt;    auto.create.topics.enable = true&lt;/P&gt;&lt;P&gt;    auto.leader.rebalance.enable = true&lt;/P&gt;&lt;P&gt;    background.threads = 10&lt;/P&gt;&lt;P&gt;    broker.id = -1&lt;/P&gt;&lt;P&gt;    broker.id.generation.enable = true&lt;/P&gt;&lt;P&gt;    broker.rack = null&lt;/P&gt;&lt;P&gt;    client.quota.callback.class = null&lt;/P&gt;&lt;P&gt;    compression.type = producer&lt;/P&gt;&lt;P&gt;    connections.max.idle.ms = 600000&lt;/P&gt;&lt;P&gt;    controlled.shutdown.enable = true&lt;/P&gt;&lt;P&gt;    controlled.shutdown.max.retries = 3&lt;/P&gt;&lt;P&gt;    controlled.shutdown.retry.backoff.ms = 5000&lt;/P&gt;&lt;P&gt;    controller.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    create.topic.policy.class.name = null&lt;/P&gt;&lt;P&gt;    default.replication.factor = 1&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.check.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.time.ms = 86400000&lt;/P&gt;&lt;P&gt;    delegation.token.master.key = null&lt;/P&gt;&lt;P&gt;    delegation.token.max.lifetime.ms = 604800000&lt;/P&gt;&lt;P&gt;    delete.records.purgatory.purge.interval.requests = 1&lt;/P&gt;&lt;P&gt;    delete.topic.enable = true&lt;/P&gt;&lt;P&gt;    fetch.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    group.initial.rebalance.delay.ms = 3000&lt;/P&gt;&lt;P&gt;    group.max.session.timeout.ms = 300000&lt;/P&gt;&lt;P&gt;    group.min.session.timeout.ms = 6000&lt;/P&gt;&lt;P&gt;    host.name = &lt;/P&gt;&lt;P&gt;    inter.broker.listener.name = null&lt;/P&gt;&lt;P&gt;    inter.broker.protocol.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    leader.imbalance.check.interval.seconds = 300&lt;/P&gt;&lt;P&gt;    leader.imbalance.per.broker.percentage = 10&lt;/P&gt;&lt;P&gt;    listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL&lt;/P&gt;&lt;P&gt;    listeners = PLAINTEXT://am-bigdata-01.am.local:6667&lt;/P&gt;&lt;P&gt;    log.cleaner.backoff.ms = 15000&lt;/P&gt;&lt;P&gt;    log.cleaner.dedupe.buffer.size = 134217728&lt;/P&gt;&lt;P&gt;    log.cleaner.delete.retention.ms = 86400000&lt;/P&gt;&lt;P&gt;    log.cleaner.enable = true&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.load.factor = 0.9&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.size = 524288&lt;/P&gt;&lt;P&gt;    log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308&lt;/P&gt;&lt;P&gt;    log.cleaner.min.cleanable.ratio = 0.5&lt;/P&gt;&lt;P&gt;    log.cleaner.min.compaction.lag.ms = 0&lt;/P&gt;&lt;P&gt;    log.cleaner.threads = 1&lt;/P&gt;&lt;P&gt;    log.cleanup.policy = [delete]&lt;/P&gt;&lt;P&gt;    log.dir = /tmp/kafka-logs&lt;/P&gt;&lt;P&gt;    log.dirs = /kafka-logs&lt;/P&gt;&lt;P&gt;    log.flush.interval.messages = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.interval.ms = null&lt;/P&gt;&lt;P&gt;    log.flush.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.flush.scheduler.interval.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.start.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.index.interval.bytes = 4096&lt;/P&gt;&lt;P&gt;    log.index.size.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    log.message.downconversion.enable = true&lt;/P&gt;&lt;P&gt;    log.message.format.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    log.message.timestamp.difference.max.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.message.timestamp.type = CreateTime&lt;/P&gt;&lt;P&gt;    log.preallocate = false&lt;/P&gt;&lt;P&gt;    log.retention.bytes = -1&lt;/P&gt;&lt;P&gt;    log.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    log.retention.hours = 168&lt;/P&gt;&lt;P&gt;    log.retention.minutes = null&lt;/P&gt;&lt;P&gt;    log.retention.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.hours = 168&lt;/P&gt;&lt;P&gt;    log.roll.jitter.hours = 0&lt;/P&gt;&lt;P&gt;    log.roll.jitter.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.ms = null&lt;/P&gt;&lt;P&gt;    log.segment.bytes = 1073741824&lt;/P&gt;&lt;P&gt;    log.segment.delete.delay.ms = 60000&lt;/P&gt;&lt;P&gt;    max.connections.per.ip = 2147483647&lt;/P&gt;&lt;P&gt;    max.connections.per.ip.overrides = &lt;/P&gt;&lt;P&gt;    max.incremental.fetch.session.cache.slots = 1000&lt;/P&gt;&lt;P&gt;    message.max.bytes = 1000000&lt;/P&gt;&lt;P&gt;    metric.reporters = []&lt;/P&gt;&lt;P&gt;    metrics.num.samples = 2&lt;/P&gt;&lt;P&gt;    metrics.recording.level = INFO&lt;/P&gt;&lt;P&gt;    metrics.sample.window.ms = 30000&lt;/P&gt;&lt;P&gt;    min.insync.replicas = 1&lt;/P&gt;&lt;P&gt;    num.io.threads = 8&lt;/P&gt;&lt;P&gt;    num.network.threads = 3&lt;/P&gt;&lt;P&gt;    num.partitions = 1&lt;/P&gt;&lt;P&gt;    num.recovery.threads.per.data.dir = 1&lt;/P&gt;&lt;P&gt;    num.replica.alter.log.dirs.threads = null&lt;/P&gt;&lt;P&gt;    num.replica.fetchers = 1&lt;/P&gt;&lt;P&gt;    offset.metadata.max.bytes = 4096&lt;/P&gt;&lt;P&gt;    offsets.commit.required.acks = -1&lt;/P&gt;&lt;P&gt;    offsets.commit.timeout.ms = 5000&lt;/P&gt;&lt;P&gt;    offsets.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    offsets.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    offsets.retention.minutes = 86400000&lt;/P&gt;&lt;P&gt;    offsets.topic.compression.codec = 0&lt;/P&gt;&lt;P&gt;    offsets.topic.num.partitions = 50&lt;/P&gt;&lt;P&gt;    offsets.topic.replication.factor = 3&lt;/P&gt;&lt;P&gt;    offsets.topic.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding&lt;/P&gt;&lt;P&gt;    password.encoder.iterations = 4096&lt;/P&gt;&lt;P&gt;    password.encoder.key.length = 128&lt;/P&gt;&lt;P&gt;    password.encoder.keyfactory.algorithm = null&lt;/P&gt;&lt;P&gt;    password.encoder.old.secret = null&lt;/P&gt;&lt;P&gt;    password.encoder.secret = null&lt;/P&gt;&lt;P&gt;    port = 6667&lt;/P&gt;&lt;P&gt;    principal.builder.class = null&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.entry.expiration.ms = 300000&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.max.size = 1000&lt;/P&gt;&lt;P&gt;    producer.metrics.enable = false&lt;/P&gt;&lt;P&gt;    producer.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    queued.max.request.bytes = -1&lt;/P&gt;&lt;P&gt;    queued.max.requests = 500&lt;/P&gt;&lt;P&gt;    quota.consumer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.producer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.window.num = 11&lt;/P&gt;&lt;P&gt;    quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.backoff.ms = 1000&lt;/P&gt;&lt;P&gt;    replica.fetch.max.bytes = 1048576&lt;/P&gt;&lt;P&gt;    replica.fetch.min.bytes = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.response.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    replica.fetch.wait.max.ms = 500&lt;/P&gt;&lt;P&gt;    replica.high.watermark.checkpoint.interval.ms = 5000&lt;/P&gt;&lt;P&gt;    replica.lag.time.max.ms = 10000&lt;/P&gt;&lt;P&gt;    replica.socket.receive.buffer.bytes = 65536&lt;/P&gt;&lt;P&gt;    replica.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    request.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    reserved.broker.max.id = 1000&lt;/P&gt;&lt;P&gt;    sasl.client.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.enabled.mechanisms = [GSSAPI]&lt;/P&gt;&lt;P&gt;    sasl.jaas.config = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.kinit.cmd = /usr/bin/kinit&lt;/P&gt;&lt;P&gt;    sasl.kerberos.min.time.before.relogin = 60000&lt;/P&gt;&lt;P&gt;    sasl.kerberos.principal.to.local.rules = [DEFAULT]&lt;/P&gt;&lt;P&gt;    sasl.kerberos.service.name = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.buffer.seconds = 300&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.min.period.seconds = 60&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.mechanism.inter.broker.protocol = GSSAPI&lt;/P&gt;&lt;P&gt;    sasl.server.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    security.inter.broker.protocol = PLAINTEXT&lt;/P&gt;&lt;P&gt;    socket.receive.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    socket.request.max.bytes = 104857600&lt;/P&gt;&lt;P&gt;    socket.send.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    ssl.cipher.suites = []&lt;/P&gt;&lt;P&gt;    ssl.client.auth = none&lt;/P&gt;&lt;P&gt;    ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]&lt;/P&gt;&lt;P&gt;    ssl.endpoint.identification.algorithm = https&lt;/P&gt;&lt;P&gt;    ssl.key.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keymanager.algorithm = SunX509&lt;/P&gt;&lt;P&gt;    ssl.keystore.location = &lt;/P&gt;&lt;P&gt;    ssl.keystore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keystore.type = JKS&lt;/P&gt;&lt;P&gt;    ssl.protocol = TLS&lt;/P&gt;&lt;P&gt;    ssl.provider = null&lt;/P&gt;&lt;P&gt;    ssl.secure.random.implementation = null&lt;/P&gt;&lt;P&gt;    ssl.trustmanager.algorithm = PKIX&lt;/P&gt;&lt;P&gt;    ssl.truststore.location = &lt;/P&gt;&lt;P&gt;    ssl.truststore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.truststore.type = JKS&lt;/P&gt;&lt;P&gt;    transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    transaction.max.timeout.ms = 900000&lt;/P&gt;&lt;P&gt;    transaction.remove.expired.transaction.cleanup.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    transaction.state.log.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    transaction.state.log.min.isr = 2&lt;/P&gt;&lt;P&gt;    transaction.state.log.num.partitions = 50&lt;/P&gt;&lt;P&gt;    transaction.state.log.replication.factor = 3&lt;/P&gt;&lt;P&gt;    transaction.state.log.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    transactional.id.expiration.ms = 604800000&lt;/P&gt;&lt;P&gt;    unclean.leader.election.enable = false&lt;/P&gt;&lt;P&gt;    zookeeper.connect = am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181&lt;/P&gt;&lt;P&gt;    zookeeper.connection.timeout.ms = 25000&lt;/P&gt;&lt;P&gt;    zookeeper.max.in.flight.requests = 10&lt;/P&gt;&lt;P&gt;    zookeeper.session.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    zookeeper.set.acl = false&lt;/P&gt;&lt;P&gt;    zookeeper.sync.time.ms = 2000&lt;/P&gt;&lt;P&gt; (kafka.server.KafkaConfig)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:25,982] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:25,982] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:25,983] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:26,012] ERROR [KafkaServer id=1001] Fatal error during KafkaServer startup. Prepare to shutdown (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;org.apache.kafka.common.KafkaException: Failed to acquire lock on file .lock in /kafka-logs. A Kafka instance in another process or thread is using this directory.&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$$anonfun$lockLogDirs$1.apply(LogManager.scala:240)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$$anonfun$lockLogDirs$1.apply(LogManager.scala:236)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)&lt;/P&gt;&lt;P&gt;    at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$class.flatMap(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.AbstractTraversable.flatMap(Traversable.scala:104)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager.lockLogDirs(LogManager.scala:236)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager.&amp;lt;init&amp;gt;(LogManager.scala:97)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$.apply(LogManager.scala:958)&lt;/P&gt;&lt;P&gt;    at kafka.server.KafkaServer.startup(KafkaServer.scala:237)&lt;/P&gt;&lt;P&gt;    at kafka.server.KafkaServerStartable.startup(KafkaServerStartable.scala:38)&lt;/P&gt;&lt;P&gt;    at kafka.Kafka$.main(Kafka.scala:75)&lt;/P&gt;&lt;P&gt;    at kafka.Kafka.main(Kafka.scala)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:26,014] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:26,017] INFO [ZooKeeperClient] Closing. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:26,020] INFO [ZooKeeperClient] Closed. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:26,020] INFO [ThrottledChannelReaper-Fetch]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:26,982] INFO [ThrottledChannelReaper-Fetch]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:26,982] INFO [ThrottledChannelReaper-Fetch]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:26,982] INFO [ThrottledChannelReaper-Produce]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:27,982] INFO [ThrottledChannelReaper-Produce]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:27,982] INFO [ThrottledChannelReaper-Produce]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:27,983] INFO [ThrottledChannelReaper-Request]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:27,983] INFO [ThrottledChannelReaper-Request]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:27,983] INFO [ThrottledChannelReaper-Request]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:27,988] INFO [KafkaServer id=1001] shut down completed (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:27,988] ERROR Exiting Kafka. (kafka.server.KafkaServerStartable)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:07:27,990] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:10:51,711] INFO [GroupMetadataManager brokerId=1001] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:34,264] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:34,787] INFO starting (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:34,788] INFO Connecting to zookeeper on am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181 (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:34,801] INFO [ZooKeeperClient] Initializing a new session to am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:34,820] INFO [ZooKeeperClient] Waiting until connected. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:34,893] INFO [ZooKeeperClient] Connected. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:35,122] INFO Cluster ID = z-4P_uf-RzmpT2QvMnOD2g (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:35,200] INFO KafkaConfig values: &lt;/P&gt;&lt;P&gt;    advertised.host.name = null&lt;/P&gt;&lt;P&gt;    advertised.listeners = null&lt;/P&gt;&lt;P&gt;    advertised.port = null&lt;/P&gt;&lt;P&gt;    alter.config.policy.class.name = null&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    authorizer.class.name = &lt;/P&gt;&lt;P&gt;    auto.create.topics.enable = true&lt;/P&gt;&lt;P&gt;    auto.leader.rebalance.enable = true&lt;/P&gt;&lt;P&gt;    background.threads = 10&lt;/P&gt;&lt;P&gt;    broker.id = -1&lt;/P&gt;&lt;P&gt;    broker.id.generation.enable = true&lt;/P&gt;&lt;P&gt;    broker.rack = null&lt;/P&gt;&lt;P&gt;    client.quota.callback.class = null&lt;/P&gt;&lt;P&gt;    compression.type = producer&lt;/P&gt;&lt;P&gt;    connections.max.idle.ms = 600000&lt;/P&gt;&lt;P&gt;    controlled.shutdown.enable = true&lt;/P&gt;&lt;P&gt;    controlled.shutdown.max.retries = 3&lt;/P&gt;&lt;P&gt;    controlled.shutdown.retry.backoff.ms = 5000&lt;/P&gt;&lt;P&gt;    controller.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    create.topic.policy.class.name = null&lt;/P&gt;&lt;P&gt;    default.replication.factor = 1&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.check.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.time.ms = 86400000&lt;/P&gt;&lt;P&gt;    delegation.token.master.key = null&lt;/P&gt;&lt;P&gt;    delegation.token.max.lifetime.ms = 604800000&lt;/P&gt;&lt;P&gt;    delete.records.purgatory.purge.interval.requests = 1&lt;/P&gt;&lt;P&gt;    delete.topic.enable = true&lt;/P&gt;&lt;P&gt;    fetch.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    group.initial.rebalance.delay.ms = 3000&lt;/P&gt;&lt;P&gt;    group.max.session.timeout.ms = 300000&lt;/P&gt;&lt;P&gt;    group.min.session.timeout.ms = 6000&lt;/P&gt;&lt;P&gt;    host.name = &lt;/P&gt;&lt;P&gt;    inter.broker.listener.name = null&lt;/P&gt;&lt;P&gt;    inter.broker.protocol.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    leader.imbalance.check.interval.seconds = 300&lt;/P&gt;&lt;P&gt;    leader.imbalance.per.broker.percentage = 10&lt;/P&gt;&lt;P&gt;    listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL&lt;/P&gt;&lt;P&gt;    listeners = PLAINTEXT://am-bigdata-01.am.local:6667&lt;/P&gt;&lt;P&gt;    log.cleaner.backoff.ms = 15000&lt;/P&gt;&lt;P&gt;    log.cleaner.dedupe.buffer.size = 134217728&lt;/P&gt;&lt;P&gt;    log.cleaner.delete.retention.ms = 86400000&lt;/P&gt;&lt;P&gt;    log.cleaner.enable = true&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.load.factor = 0.9&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.size = 524288&lt;/P&gt;&lt;P&gt;    log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308&lt;/P&gt;&lt;P&gt;    log.cleaner.min.cleanable.ratio = 0.5&lt;/P&gt;&lt;P&gt;    log.cleaner.min.compaction.lag.ms = 0&lt;/P&gt;&lt;P&gt;    log.cleaner.threads = 1&lt;/P&gt;&lt;P&gt;    log.cleanup.policy = [delete]&lt;/P&gt;&lt;P&gt;    log.dir = /tmp/kafka-logs&lt;/P&gt;&lt;P&gt;    log.dirs = /kafka-logs&lt;/P&gt;&lt;P&gt;    log.flush.interval.messages = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.interval.ms = null&lt;/P&gt;&lt;P&gt;    log.flush.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.flush.scheduler.interval.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.start.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.index.interval.bytes = 4096&lt;/P&gt;&lt;P&gt;    log.index.size.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    log.message.downconversion.enable = true&lt;/P&gt;&lt;P&gt;    log.message.format.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    log.message.timestamp.difference.max.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.message.timestamp.type = CreateTime&lt;/P&gt;&lt;P&gt;    log.preallocate = false&lt;/P&gt;&lt;P&gt;    log.retention.bytes = -1&lt;/P&gt;&lt;P&gt;    log.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    log.retention.hours = 168&lt;/P&gt;&lt;P&gt;    log.retention.minutes = null&lt;/P&gt;&lt;P&gt;    log.retention.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.hours = 168&lt;/P&gt;&lt;P&gt;    log.roll.jitter.hours = 0&lt;/P&gt;&lt;P&gt;    log.roll.jitter.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.ms = null&lt;/P&gt;&lt;P&gt;    log.segment.bytes = 1073741824&lt;/P&gt;&lt;P&gt;    log.segment.delete.delay.ms = 60000&lt;/P&gt;&lt;P&gt;    max.connections.per.ip = 2147483647&lt;/P&gt;&lt;P&gt;    max.connections.per.ip.overrides = &lt;/P&gt;&lt;P&gt;    max.incremental.fetch.session.cache.slots = 1000&lt;/P&gt;&lt;P&gt;    message.max.bytes = 1000000&lt;/P&gt;&lt;P&gt;    metric.reporters = []&lt;/P&gt;&lt;P&gt;    metrics.num.samples = 2&lt;/P&gt;&lt;P&gt;    metrics.recording.level = INFO&lt;/P&gt;&lt;P&gt;    metrics.sample.window.ms = 30000&lt;/P&gt;&lt;P&gt;    min.insync.replicas = 1&lt;/P&gt;&lt;P&gt;    num.io.threads = 8&lt;/P&gt;&lt;P&gt;    num.network.threads = 3&lt;/P&gt;&lt;P&gt;    num.partitions = 1&lt;/P&gt;&lt;P&gt;    num.recovery.threads.per.data.dir = 1&lt;/P&gt;&lt;P&gt;    num.replica.alter.log.dirs.threads = null&lt;/P&gt;&lt;P&gt;    num.replica.fetchers = 1&lt;/P&gt;&lt;P&gt;    offset.metadata.max.bytes = 4096&lt;/P&gt;&lt;P&gt;    offsets.commit.required.acks = -1&lt;/P&gt;&lt;P&gt;    offsets.commit.timeout.ms = 5000&lt;/P&gt;&lt;P&gt;    offsets.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    offsets.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    offsets.retention.minutes = 86400000&lt;/P&gt;&lt;P&gt;    offsets.topic.compression.codec = 0&lt;/P&gt;&lt;P&gt;    offsets.topic.num.partitions = 50&lt;/P&gt;&lt;P&gt;    offsets.topic.replication.factor = 3&lt;/P&gt;&lt;P&gt;    offsets.topic.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding&lt;/P&gt;&lt;P&gt;    password.encoder.iterations = 4096&lt;/P&gt;&lt;P&gt;    password.encoder.key.length = 128&lt;/P&gt;&lt;P&gt;    password.encoder.keyfactory.algorithm = null&lt;/P&gt;&lt;P&gt;    password.encoder.old.secret = null&lt;/P&gt;&lt;P&gt;    password.encoder.secret = null&lt;/P&gt;&lt;P&gt;    port = 6667&lt;/P&gt;&lt;P&gt;    principal.builder.class = null&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.entry.expiration.ms = 300000&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.max.size = 1000&lt;/P&gt;&lt;P&gt;    producer.metrics.enable = false&lt;/P&gt;&lt;P&gt;    producer.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    queued.max.request.bytes = -1&lt;/P&gt;&lt;P&gt;    queued.max.requests = 500&lt;/P&gt;&lt;P&gt;    quota.consumer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.producer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.window.num = 11&lt;/P&gt;&lt;P&gt;    quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.backoff.ms = 1000&lt;/P&gt;&lt;P&gt;    replica.fetch.max.bytes = 1048576&lt;/P&gt;&lt;P&gt;    replica.fetch.min.bytes = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.response.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    replica.fetch.wait.max.ms = 500&lt;/P&gt;&lt;P&gt;    replica.high.watermark.checkpoint.interval.ms = 5000&lt;/P&gt;&lt;P&gt;    replica.lag.time.max.ms = 10000&lt;/P&gt;&lt;P&gt;    replica.socket.receive.buffer.bytes = 65536&lt;/P&gt;&lt;P&gt;    replica.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    request.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    reserved.broker.max.id = 1000&lt;/P&gt;&lt;P&gt;    sasl.client.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.enabled.mechanisms = [GSSAPI]&lt;/P&gt;&lt;P&gt;    sasl.jaas.config = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.kinit.cmd = /usr/bin/kinit&lt;/P&gt;&lt;P&gt;    sasl.kerberos.min.time.before.relogin = 60000&lt;/P&gt;&lt;P&gt;    sasl.kerberos.principal.to.local.rules = [DEFAULT]&lt;/P&gt;&lt;P&gt;    sasl.kerberos.service.name = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.buffer.seconds = 300&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.min.period.seconds = 60&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.mechanism.inter.broker.protocol = GSSAPI&lt;/P&gt;&lt;P&gt;    sasl.server.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    security.inter.broker.protocol = PLAINTEXT&lt;/P&gt;&lt;P&gt;    socket.receive.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    socket.request.max.bytes = 104857600&lt;/P&gt;&lt;P&gt;    socket.send.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    ssl.cipher.suites = []&lt;/P&gt;&lt;P&gt;    ssl.client.auth = none&lt;/P&gt;&lt;P&gt;    ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]&lt;/P&gt;&lt;P&gt;    ssl.endpoint.identification.algorithm = https&lt;/P&gt;&lt;P&gt;    ssl.key.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keymanager.algorithm = SunX509&lt;/P&gt;&lt;P&gt;    ssl.keystore.location = &lt;/P&gt;&lt;P&gt;    ssl.keystore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keystore.type = JKS&lt;/P&gt;&lt;P&gt;    ssl.protocol = TLS&lt;/P&gt;&lt;P&gt;    ssl.provider = null&lt;/P&gt;&lt;P&gt;    ssl.secure.random.implementation = null&lt;/P&gt;&lt;P&gt;    ssl.trustmanager.algorithm = PKIX&lt;/P&gt;&lt;P&gt;    ssl.truststore.location = &lt;/P&gt;&lt;P&gt;    ssl.truststore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.truststore.type = JKS&lt;/P&gt;&lt;P&gt;    transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    transaction.max.timeout.ms = 900000&lt;/P&gt;&lt;P&gt;    transaction.remove.expired.transaction.cleanup.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    transaction.state.log.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    transaction.state.log.min.isr = 2&lt;/P&gt;&lt;P&gt;    transaction.state.log.num.partitions = 50&lt;/P&gt;&lt;P&gt;    transaction.state.log.replication.factor = 3&lt;/P&gt;&lt;P&gt;    transaction.state.log.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    transactional.id.expiration.ms = 604800000&lt;/P&gt;&lt;P&gt;    unclean.leader.election.enable = false&lt;/P&gt;&lt;P&gt;    zookeeper.connect = am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181&lt;/P&gt;&lt;P&gt;    zookeeper.connection.timeout.ms = 25000&lt;/P&gt;&lt;P&gt;    zookeeper.max.in.flight.requests = 10&lt;/P&gt;&lt;P&gt;    zookeeper.session.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    zookeeper.set.acl = false&lt;/P&gt;&lt;P&gt;    zookeeper.sync.time.ms = 2000&lt;/P&gt;&lt;P&gt; (kafka.server.KafkaConfig)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:35,209] INFO KafkaConfig values: &lt;/P&gt;&lt;P&gt;    advertised.host.name = null&lt;/P&gt;&lt;P&gt;    advertised.listeners = null&lt;/P&gt;&lt;P&gt;    advertised.port = null&lt;/P&gt;&lt;P&gt;    alter.config.policy.class.name = null&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    authorizer.class.name = &lt;/P&gt;&lt;P&gt;    auto.create.topics.enable = true&lt;/P&gt;&lt;P&gt;    auto.leader.rebalance.enable = true&lt;/P&gt;&lt;P&gt;    background.threads = 10&lt;/P&gt;&lt;P&gt;    broker.id = -1&lt;/P&gt;&lt;P&gt;    broker.id.generation.enable = true&lt;/P&gt;&lt;P&gt;    broker.rack = null&lt;/P&gt;&lt;P&gt;    client.quota.callback.class = null&lt;/P&gt;&lt;P&gt;    compression.type = producer&lt;/P&gt;&lt;P&gt;    connections.max.idle.ms = 600000&lt;/P&gt;&lt;P&gt;    controlled.shutdown.enable = true&lt;/P&gt;&lt;P&gt;    controlled.shutdown.max.retries = 3&lt;/P&gt;&lt;P&gt;    controlled.shutdown.retry.backoff.ms = 5000&lt;/P&gt;&lt;P&gt;    controller.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    create.topic.policy.class.name = null&lt;/P&gt;&lt;P&gt;    default.replication.factor = 1&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.check.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.time.ms = 86400000&lt;/P&gt;&lt;P&gt;    delegation.token.master.key = null&lt;/P&gt;&lt;P&gt;    delegation.token.max.lifetime.ms = 604800000&lt;/P&gt;&lt;P&gt;    delete.records.purgatory.purge.interval.requests = 1&lt;/P&gt;&lt;P&gt;    delete.topic.enable = true&lt;/P&gt;&lt;P&gt;    fetch.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    group.initial.rebalance.delay.ms = 3000&lt;/P&gt;&lt;P&gt;    group.max.session.timeout.ms = 300000&lt;/P&gt;&lt;P&gt;    group.min.session.timeout.ms = 6000&lt;/P&gt;&lt;P&gt;    host.name = &lt;/P&gt;&lt;P&gt;    inter.broker.listener.name = null&lt;/P&gt;&lt;P&gt;    inter.broker.protocol.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    leader.imbalance.check.interval.seconds = 300&lt;/P&gt;&lt;P&gt;    leader.imbalance.per.broker.percentage = 10&lt;/P&gt;&lt;P&gt;    listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL&lt;/P&gt;&lt;P&gt;    listeners = PLAINTEXT://am-bigdata-01.am.local:6667&lt;/P&gt;&lt;P&gt;    log.cleaner.backoff.ms = 15000&lt;/P&gt;&lt;P&gt;    log.cleaner.dedupe.buffer.size = 134217728&lt;/P&gt;&lt;P&gt;    log.cleaner.delete.retention.ms = 86400000&lt;/P&gt;&lt;P&gt;    log.cleaner.enable = true&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.load.factor = 0.9&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.size = 524288&lt;/P&gt;&lt;P&gt;    log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308&lt;/P&gt;&lt;P&gt;    log.cleaner.min.cleanable.ratio = 0.5&lt;/P&gt;&lt;P&gt;    log.cleaner.min.compaction.lag.ms = 0&lt;/P&gt;&lt;P&gt;    log.cleaner.threads = 1&lt;/P&gt;&lt;P&gt;    log.cleanup.policy = [delete]&lt;/P&gt;&lt;P&gt;    log.dir = /tmp/kafka-logs&lt;/P&gt;&lt;P&gt;    log.dirs = /kafka-logs&lt;/P&gt;&lt;P&gt;    log.flush.interval.messages = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.interval.ms = null&lt;/P&gt;&lt;P&gt;    log.flush.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.flush.scheduler.interval.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.start.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.index.interval.bytes = 4096&lt;/P&gt;&lt;P&gt;    log.index.size.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    log.message.downconversion.enable = true&lt;/P&gt;&lt;P&gt;    log.message.format.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    log.message.timestamp.difference.max.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.message.timestamp.type = CreateTime&lt;/P&gt;&lt;P&gt;    log.preallocate = false&lt;/P&gt;&lt;P&gt;    log.retention.bytes = -1&lt;/P&gt;&lt;P&gt;    log.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    log.retention.hours = 168&lt;/P&gt;&lt;P&gt;    log.retention.minutes = null&lt;/P&gt;&lt;P&gt;    log.retention.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.hours = 168&lt;/P&gt;&lt;P&gt;    log.roll.jitter.hours = 0&lt;/P&gt;&lt;P&gt;    log.roll.jitter.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.ms = null&lt;/P&gt;&lt;P&gt;    log.segment.bytes = 1073741824&lt;/P&gt;&lt;P&gt;    log.segment.delete.delay.ms = 60000&lt;/P&gt;&lt;P&gt;    max.connections.per.ip = 2147483647&lt;/P&gt;&lt;P&gt;    max.connections.per.ip.overrides = &lt;/P&gt;&lt;P&gt;    max.incremental.fetch.session.cache.slots = 1000&lt;/P&gt;&lt;P&gt;    message.max.bytes = 1000000&lt;/P&gt;&lt;P&gt;    metric.reporters = []&lt;/P&gt;&lt;P&gt;    metrics.num.samples = 2&lt;/P&gt;&lt;P&gt;    metrics.recording.level = INFO&lt;/P&gt;&lt;P&gt;    metrics.sample.window.ms = 30000&lt;/P&gt;&lt;P&gt;    min.insync.replicas = 1&lt;/P&gt;&lt;P&gt;    num.io.threads = 8&lt;/P&gt;&lt;P&gt;    num.network.threads = 3&lt;/P&gt;&lt;P&gt;    num.partitions = 1&lt;/P&gt;&lt;P&gt;    num.recovery.threads.per.data.dir = 1&lt;/P&gt;&lt;P&gt;    num.replica.alter.log.dirs.threads = null&lt;/P&gt;&lt;P&gt;    num.replica.fetchers = 1&lt;/P&gt;&lt;P&gt;    offset.metadata.max.bytes = 4096&lt;/P&gt;&lt;P&gt;    offsets.commit.required.acks = -1&lt;/P&gt;&lt;P&gt;    offsets.commit.timeout.ms = 5000&lt;/P&gt;&lt;P&gt;    offsets.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    offsets.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    offsets.retention.minutes = 86400000&lt;/P&gt;&lt;P&gt;    offsets.topic.compression.codec = 0&lt;/P&gt;&lt;P&gt;    offsets.topic.num.partitions = 50&lt;/P&gt;&lt;P&gt;    offsets.topic.replication.factor = 3&lt;/P&gt;&lt;P&gt;    offsets.topic.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding&lt;/P&gt;&lt;P&gt;    password.encoder.iterations = 4096&lt;/P&gt;&lt;P&gt;    password.encoder.key.length = 128&lt;/P&gt;&lt;P&gt;    password.encoder.keyfactory.algorithm = null&lt;/P&gt;&lt;P&gt;    password.encoder.old.secret = null&lt;/P&gt;&lt;P&gt;    password.encoder.secret = null&lt;/P&gt;&lt;P&gt;    port = 6667&lt;/P&gt;&lt;P&gt;    principal.builder.class = null&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.entry.expiration.ms = 300000&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.max.size = 1000&lt;/P&gt;&lt;P&gt;    producer.metrics.enable = false&lt;/P&gt;&lt;P&gt;    producer.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    queued.max.request.bytes = -1&lt;/P&gt;&lt;P&gt;    queued.max.requests = 500&lt;/P&gt;&lt;P&gt;    quota.consumer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.producer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.window.num = 11&lt;/P&gt;&lt;P&gt;    quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.backoff.ms = 1000&lt;/P&gt;&lt;P&gt;    replica.fetch.max.bytes = 1048576&lt;/P&gt;&lt;P&gt;    replica.fetch.min.bytes = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.response.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    replica.fetch.wait.max.ms = 500&lt;/P&gt;&lt;P&gt;    replica.high.watermark.checkpoint.interval.ms = 5000&lt;/P&gt;&lt;P&gt;    replica.lag.time.max.ms = 10000&lt;/P&gt;&lt;P&gt;    replica.socket.receive.buffer.bytes = 65536&lt;/P&gt;&lt;P&gt;    replica.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    request.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    reserved.broker.max.id = 1000&lt;/P&gt;&lt;P&gt;    sasl.client.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.enabled.mechanisms = [GSSAPI]&lt;/P&gt;&lt;P&gt;    sasl.jaas.config = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.kinit.cmd = /usr/bin/kinit&lt;/P&gt;&lt;P&gt;    sasl.kerberos.min.time.before.relogin = 60000&lt;/P&gt;&lt;P&gt;    sasl.kerberos.principal.to.local.rules = [DEFAULT]&lt;/P&gt;&lt;P&gt;    sasl.kerberos.service.name = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.buffer.seconds = 300&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.min.period.seconds = 60&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.mechanism.inter.broker.protocol = GSSAPI&lt;/P&gt;&lt;P&gt;    sasl.server.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    security.inter.broker.protocol = PLAINTEXT&lt;/P&gt;&lt;P&gt;    socket.receive.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    socket.request.max.bytes = 104857600&lt;/P&gt;&lt;P&gt;    socket.send.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    ssl.cipher.suites = []&lt;/P&gt;&lt;P&gt;    ssl.client.auth = none&lt;/P&gt;&lt;P&gt;    ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]&lt;/P&gt;&lt;P&gt;    ssl.endpoint.identification.algorithm = https&lt;/P&gt;&lt;P&gt;    ssl.key.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keymanager.algorithm = SunX509&lt;/P&gt;&lt;P&gt;    ssl.keystore.location = &lt;/P&gt;&lt;P&gt;    ssl.keystore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keystore.type = JKS&lt;/P&gt;&lt;P&gt;    ssl.protocol = TLS&lt;/P&gt;&lt;P&gt;    ssl.provider = null&lt;/P&gt;&lt;P&gt;    ssl.secure.random.implementation = null&lt;/P&gt;&lt;P&gt;    ssl.trustmanager.algorithm = PKIX&lt;/P&gt;&lt;P&gt;    ssl.truststore.location = &lt;/P&gt;&lt;P&gt;    ssl.truststore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.truststore.type = JKS&lt;/P&gt;&lt;P&gt;    transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    transaction.max.timeout.ms = 900000&lt;/P&gt;&lt;P&gt;    transaction.remove.expired.transaction.cleanup.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    transaction.state.log.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    transaction.state.log.min.isr = 2&lt;/P&gt;&lt;P&gt;    transaction.state.log.num.partitions = 50&lt;/P&gt;&lt;P&gt;    transaction.state.log.replication.factor = 3&lt;/P&gt;&lt;P&gt;    transaction.state.log.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    transactional.id.expiration.ms = 604800000&lt;/P&gt;&lt;P&gt;    unclean.leader.election.enable = false&lt;/P&gt;&lt;P&gt;    zookeeper.connect = am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181&lt;/P&gt;&lt;P&gt;    zookeeper.connection.timeout.ms = 25000&lt;/P&gt;&lt;P&gt;    zookeeper.max.in.flight.requests = 10&lt;/P&gt;&lt;P&gt;    zookeeper.session.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    zookeeper.set.acl = false&lt;/P&gt;&lt;P&gt;    zookeeper.sync.time.ms = 2000&lt;/P&gt;&lt;P&gt; (kafka.server.KafkaConfig)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:35,236] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:35,236] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:35,237] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:35,269] ERROR [KafkaServer id=1001] Fatal error during KafkaServer startup. Prepare to shutdown (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;org.apache.kafka.common.KafkaException: Failed to acquire lock on file .lock in /kafka-logs. A Kafka instance in another process or thread is using this directory.&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$$anonfun$lockLogDirs$1.apply(LogManager.scala:240)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$$anonfun$lockLogDirs$1.apply(LogManager.scala:236)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)&lt;/P&gt;&lt;P&gt;    at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$class.flatMap(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.AbstractTraversable.flatMap(Traversable.scala:104)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager.lockLogDirs(LogManager.scala:236)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager.&amp;lt;init&amp;gt;(LogManager.scala:97)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$.apply(LogManager.scala:958)&lt;/P&gt;&lt;P&gt;    at kafka.server.KafkaServer.startup(KafkaServer.scala:237)&lt;/P&gt;&lt;P&gt;    at kafka.server.KafkaServerStartable.startup(KafkaServerStartable.scala:38)&lt;/P&gt;&lt;P&gt;    at kafka.Kafka$.main(Kafka.scala:75)&lt;/P&gt;&lt;P&gt;    at kafka.Kafka.main(Kafka.scala)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:35,271] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:35,274] INFO [ZooKeeperClient] Closing. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:35,277] INFO [ZooKeeperClient] Closed. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:35,278] INFO [ThrottledChannelReaper-Fetch]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:36,237] INFO [ThrottledChannelReaper-Fetch]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:36,237] INFO [ThrottledChannelReaper-Fetch]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:36,237] INFO [ThrottledChannelReaper-Produce]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:37,237] INFO [ThrottledChannelReaper-Produce]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:37,237] INFO [ThrottledChannelReaper-Produce]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:37,237] INFO [ThrottledChannelReaper-Request]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:37,238] INFO [ThrottledChannelReaper-Request]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:37,238] INFO [ThrottledChannelReaper-Request]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:37,245] INFO [KafkaServer id=1001] shut down completed (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:37,245] ERROR Exiting Kafka. (kafka.server.KafkaServerStartable)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:11:37,248] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:16,467] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:16,994] INFO starting (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:16,995] INFO Connecting to zookeeper on am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181 (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,009] INFO [ZooKeeperClient] Initializing a new session to am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,028] INFO [ZooKeeperClient] Waiting until connected. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,088] INFO [ZooKeeperClient] Connected. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,345] INFO Cluster ID = z-4P_uf-RzmpT2QvMnOD2g (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,424] INFO KafkaConfig values: &lt;/P&gt;&lt;P&gt;    advertised.host.name = null&lt;/P&gt;&lt;P&gt;    advertised.listeners = null&lt;/P&gt;&lt;P&gt;    advertised.port = null&lt;/P&gt;&lt;P&gt;    alter.config.policy.class.name = null&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    authorizer.class.name = &lt;/P&gt;&lt;P&gt;    auto.create.topics.enable = true&lt;/P&gt;&lt;P&gt;    auto.leader.rebalance.enable = true&lt;/P&gt;&lt;P&gt;    background.threads = 10&lt;/P&gt;&lt;P&gt;    broker.id = -1&lt;/P&gt;&lt;P&gt;    broker.id.generation.enable = true&lt;/P&gt;&lt;P&gt;    broker.rack = null&lt;/P&gt;&lt;P&gt;    client.quota.callback.class = null&lt;/P&gt;&lt;P&gt;    compression.type = producer&lt;/P&gt;&lt;P&gt;    connections.max.idle.ms = 600000&lt;/P&gt;&lt;P&gt;    controlled.shutdown.enable = true&lt;/P&gt;&lt;P&gt;    controlled.shutdown.max.retries = 3&lt;/P&gt;&lt;P&gt;    controlled.shutdown.retry.backoff.ms = 5000&lt;/P&gt;&lt;P&gt;    controller.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    create.topic.policy.class.name = null&lt;/P&gt;&lt;P&gt;    default.replication.factor = 1&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.check.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.time.ms = 86400000&lt;/P&gt;&lt;P&gt;    delegation.token.master.key = null&lt;/P&gt;&lt;P&gt;    delegation.token.max.lifetime.ms = 604800000&lt;/P&gt;&lt;P&gt;    delete.records.purgatory.purge.interval.requests = 1&lt;/P&gt;&lt;P&gt;    delete.topic.enable = true&lt;/P&gt;&lt;P&gt;    fetch.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    group.initial.rebalance.delay.ms = 3000&lt;/P&gt;&lt;P&gt;    group.max.session.timeout.ms = 300000&lt;/P&gt;&lt;P&gt;    group.min.session.timeout.ms = 6000&lt;/P&gt;&lt;P&gt;    host.name = &lt;/P&gt;&lt;P&gt;    inter.broker.listener.name = null&lt;/P&gt;&lt;P&gt;    inter.broker.protocol.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    leader.imbalance.check.interval.seconds = 300&lt;/P&gt;&lt;P&gt;    leader.imbalance.per.broker.percentage = 10&lt;/P&gt;&lt;P&gt;    listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL&lt;/P&gt;&lt;P&gt;    listeners = PLAINTEXT://am-bigdata-01.am.local:6667&lt;/P&gt;&lt;P&gt;    log.cleaner.backoff.ms = 15000&lt;/P&gt;&lt;P&gt;    log.cleaner.dedupe.buffer.size = 134217728&lt;/P&gt;&lt;P&gt;    log.cleaner.delete.retention.ms = 86400000&lt;/P&gt;&lt;P&gt;    log.cleaner.enable = true&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.load.factor = 0.9&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.size = 524288&lt;/P&gt;&lt;P&gt;    log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308&lt;/P&gt;&lt;P&gt;    log.cleaner.min.cleanable.ratio = 0.5&lt;/P&gt;&lt;P&gt;    log.cleaner.min.compaction.lag.ms = 0&lt;/P&gt;&lt;P&gt;    log.cleaner.threads = 1&lt;/P&gt;&lt;P&gt;    log.cleanup.policy = [delete]&lt;/P&gt;&lt;P&gt;    log.dir = /tmp/kafka-logs&lt;/P&gt;&lt;P&gt;    log.dirs = /kafka-logs&lt;/P&gt;&lt;P&gt;    log.flush.interval.messages = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.interval.ms = null&lt;/P&gt;&lt;P&gt;    log.flush.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.flush.scheduler.interval.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.start.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.index.interval.bytes = 4096&lt;/P&gt;&lt;P&gt;    log.index.size.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    log.message.downconversion.enable = true&lt;/P&gt;&lt;P&gt;    log.message.format.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    log.message.timestamp.difference.max.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.message.timestamp.type = CreateTime&lt;/P&gt;&lt;P&gt;    log.preallocate = false&lt;/P&gt;&lt;P&gt;    log.retention.bytes = -1&lt;/P&gt;&lt;P&gt;    log.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    log.retention.hours = 168&lt;/P&gt;&lt;P&gt;    log.retention.minutes = null&lt;/P&gt;&lt;P&gt;    log.retention.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.hours = 168&lt;/P&gt;&lt;P&gt;    log.roll.jitter.hours = 0&lt;/P&gt;&lt;P&gt;    log.roll.jitter.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.ms = null&lt;/P&gt;&lt;P&gt;    log.segment.bytes = 1073741824&lt;/P&gt;&lt;P&gt;    log.segment.delete.delay.ms = 60000&lt;/P&gt;&lt;P&gt;    max.connections.per.ip = 2147483647&lt;/P&gt;&lt;P&gt;    max.connections.per.ip.overrides = &lt;/P&gt;&lt;P&gt;    max.incremental.fetch.session.cache.slots = 1000&lt;/P&gt;&lt;P&gt;    message.max.bytes = 1000000&lt;/P&gt;&lt;P&gt;    metric.reporters = []&lt;/P&gt;&lt;P&gt;    metrics.num.samples = 2&lt;/P&gt;&lt;P&gt;    metrics.recording.level = INFO&lt;/P&gt;&lt;P&gt;    metrics.sample.window.ms = 30000&lt;/P&gt;&lt;P&gt;    min.insync.replicas = 1&lt;/P&gt;&lt;P&gt;    num.io.threads = 8&lt;/P&gt;&lt;P&gt;    num.network.threads = 3&lt;/P&gt;&lt;P&gt;    num.partitions = 1&lt;/P&gt;&lt;P&gt;    num.recovery.threads.per.data.dir = 1&lt;/P&gt;&lt;P&gt;    num.replica.alter.log.dirs.threads = null&lt;/P&gt;&lt;P&gt;    num.replica.fetchers = 1&lt;/P&gt;&lt;P&gt;    offset.metadata.max.bytes = 4096&lt;/P&gt;&lt;P&gt;    offsets.commit.required.acks = -1&lt;/P&gt;&lt;P&gt;    offsets.commit.timeout.ms = 5000&lt;/P&gt;&lt;P&gt;    offsets.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    offsets.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    offsets.retention.minutes = 86400000&lt;/P&gt;&lt;P&gt;    offsets.topic.compression.codec = 0&lt;/P&gt;&lt;P&gt;    offsets.topic.num.partitions = 50&lt;/P&gt;&lt;P&gt;    offsets.topic.replication.factor = 3&lt;/P&gt;&lt;P&gt;    offsets.topic.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding&lt;/P&gt;&lt;P&gt;    password.encoder.iterations = 4096&lt;/P&gt;&lt;P&gt;    password.encoder.key.length = 128&lt;/P&gt;&lt;P&gt;    password.encoder.keyfactory.algorithm = null&lt;/P&gt;&lt;P&gt;    password.encoder.old.secret = null&lt;/P&gt;&lt;P&gt;    password.encoder.secret = null&lt;/P&gt;&lt;P&gt;    port = 6667&lt;/P&gt;&lt;P&gt;    principal.builder.class = null&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.entry.expiration.ms = 300000&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.max.size = 1000&lt;/P&gt;&lt;P&gt;    producer.metrics.enable = false&lt;/P&gt;&lt;P&gt;    producer.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    queued.max.request.bytes = -1&lt;/P&gt;&lt;P&gt;    queued.max.requests = 500&lt;/P&gt;&lt;P&gt;    quota.consumer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.producer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.window.num = 11&lt;/P&gt;&lt;P&gt;    quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.backoff.ms = 1000&lt;/P&gt;&lt;P&gt;    replica.fetch.max.bytes = 1048576&lt;/P&gt;&lt;P&gt;    replica.fetch.min.bytes = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.response.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    replica.fetch.wait.max.ms = 500&lt;/P&gt;&lt;P&gt;    replica.high.watermark.checkpoint.interval.ms = 5000&lt;/P&gt;&lt;P&gt;    replica.lag.time.max.ms = 10000&lt;/P&gt;&lt;P&gt;    replica.socket.receive.buffer.bytes = 65536&lt;/P&gt;&lt;P&gt;    replica.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    request.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    reserved.broker.max.id = 1000&lt;/P&gt;&lt;P&gt;    sasl.client.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.enabled.mechanisms = [GSSAPI]&lt;/P&gt;&lt;P&gt;    sasl.jaas.config = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.kinit.cmd = /usr/bin/kinit&lt;/P&gt;&lt;P&gt;    sasl.kerberos.min.time.before.relogin = 60000&lt;/P&gt;&lt;P&gt;    sasl.kerberos.principal.to.local.rules = [DEFAULT]&lt;/P&gt;&lt;P&gt;    sasl.kerberos.service.name = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.buffer.seconds = 300&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.min.period.seconds = 60&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.mechanism.inter.broker.protocol = GSSAPI&lt;/P&gt;&lt;P&gt;    sasl.server.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    security.inter.broker.protocol = PLAINTEXT&lt;/P&gt;&lt;P&gt;    socket.receive.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    socket.request.max.bytes = 104857600&lt;/P&gt;&lt;P&gt;    socket.send.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    ssl.cipher.suites = []&lt;/P&gt;&lt;P&gt;    ssl.client.auth = none&lt;/P&gt;&lt;P&gt;    ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]&lt;/P&gt;&lt;P&gt;    ssl.endpoint.identification.algorithm = https&lt;/P&gt;&lt;P&gt;    ssl.key.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keymanager.algorithm = SunX509&lt;/P&gt;&lt;P&gt;    ssl.keystore.location = &lt;/P&gt;&lt;P&gt;    ssl.keystore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keystore.type = JKS&lt;/P&gt;&lt;P&gt;    ssl.protocol = TLS&lt;/P&gt;&lt;P&gt;    ssl.provider = null&lt;/P&gt;&lt;P&gt;    ssl.secure.random.implementation = null&lt;/P&gt;&lt;P&gt;    ssl.trustmanager.algorithm = PKIX&lt;/P&gt;&lt;P&gt;    ssl.truststore.location = &lt;/P&gt;&lt;P&gt;    ssl.truststore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.truststore.type = JKS&lt;/P&gt;&lt;P&gt;    transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    transaction.max.timeout.ms = 900000&lt;/P&gt;&lt;P&gt;    transaction.remove.expired.transaction.cleanup.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    transaction.state.log.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    transaction.state.log.min.isr = 2&lt;/P&gt;&lt;P&gt;    transaction.state.log.num.partitions = 50&lt;/P&gt;&lt;P&gt;    transaction.state.log.replication.factor = 3&lt;/P&gt;&lt;P&gt;    transaction.state.log.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    transactional.id.expiration.ms = 604800000&lt;/P&gt;&lt;P&gt;    unclean.leader.election.enable = false&lt;/P&gt;&lt;P&gt;    zookeeper.connect = am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181&lt;/P&gt;&lt;P&gt;    zookeeper.connection.timeout.ms = 25000&lt;/P&gt;&lt;P&gt;    zookeeper.max.in.flight.requests = 10&lt;/P&gt;&lt;P&gt;    zookeeper.session.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    zookeeper.set.acl = false&lt;/P&gt;&lt;P&gt;    zookeeper.sync.time.ms = 2000&lt;/P&gt;&lt;P&gt; (kafka.server.KafkaConfig)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,431] INFO KafkaConfig values: &lt;/P&gt;&lt;P&gt;    advertised.host.name = null&lt;/P&gt;&lt;P&gt;    advertised.listeners = null&lt;/P&gt;&lt;P&gt;    advertised.port = null&lt;/P&gt;&lt;P&gt;    alter.config.policy.class.name = null&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    authorizer.class.name = &lt;/P&gt;&lt;P&gt;    auto.create.topics.enable = true&lt;/P&gt;&lt;P&gt;    auto.leader.rebalance.enable = true&lt;/P&gt;&lt;P&gt;    background.threads = 10&lt;/P&gt;&lt;P&gt;    broker.id = -1&lt;/P&gt;&lt;P&gt;    broker.id.generation.enable = true&lt;/P&gt;&lt;P&gt;    broker.rack = null&lt;/P&gt;&lt;P&gt;    client.quota.callback.class = null&lt;/P&gt;&lt;P&gt;    compression.type = producer&lt;/P&gt;&lt;P&gt;    connections.max.idle.ms = 600000&lt;/P&gt;&lt;P&gt;    controlled.shutdown.enable = true&lt;/P&gt;&lt;P&gt;    controlled.shutdown.max.retries = 3&lt;/P&gt;&lt;P&gt;    controlled.shutdown.retry.backoff.ms = 5000&lt;/P&gt;&lt;P&gt;    controller.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    create.topic.policy.class.name = null&lt;/P&gt;&lt;P&gt;    default.replication.factor = 1&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.check.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.time.ms = 86400000&lt;/P&gt;&lt;P&gt;    delegation.token.master.key = null&lt;/P&gt;&lt;P&gt;    delegation.token.max.lifetime.ms = 604800000&lt;/P&gt;&lt;P&gt;    delete.records.purgatory.purge.interval.requests = 1&lt;/P&gt;&lt;P&gt;    delete.topic.enable = true&lt;/P&gt;&lt;P&gt;    fetch.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    group.initial.rebalance.delay.ms = 3000&lt;/P&gt;&lt;P&gt;    group.max.session.timeout.ms = 300000&lt;/P&gt;&lt;P&gt;    group.min.session.timeout.ms = 6000&lt;/P&gt;&lt;P&gt;    host.name = &lt;/P&gt;&lt;P&gt;    inter.broker.listener.name = null&lt;/P&gt;&lt;P&gt;    inter.broker.protocol.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    leader.imbalance.check.interval.seconds = 300&lt;/P&gt;&lt;P&gt;    leader.imbalance.per.broker.percentage = 10&lt;/P&gt;&lt;P&gt;    listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL&lt;/P&gt;&lt;P&gt;    listeners = PLAINTEXT://am-bigdata-01.am.local:6667&lt;/P&gt;&lt;P&gt;    log.cleaner.backoff.ms = 15000&lt;/P&gt;&lt;P&gt;    log.cleaner.dedupe.buffer.size = 134217728&lt;/P&gt;&lt;P&gt;    log.cleaner.delete.retention.ms = 86400000&lt;/P&gt;&lt;P&gt;    log.cleaner.enable = true&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.load.factor = 0.9&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.size = 524288&lt;/P&gt;&lt;P&gt;    log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308&lt;/P&gt;&lt;P&gt;    log.cleaner.min.cleanable.ratio = 0.5&lt;/P&gt;&lt;P&gt;    log.cleaner.min.compaction.lag.ms = 0&lt;/P&gt;&lt;P&gt;    log.cleaner.threads = 1&lt;/P&gt;&lt;P&gt;    log.cleanup.policy = [delete]&lt;/P&gt;&lt;P&gt;    log.dir = /tmp/kafka-logs&lt;/P&gt;&lt;P&gt;    log.dirs = /kafka-logs&lt;/P&gt;&lt;P&gt;    log.flush.interval.messages = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.interval.ms = null&lt;/P&gt;&lt;P&gt;    log.flush.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.flush.scheduler.interval.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.start.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.index.interval.bytes = 4096&lt;/P&gt;&lt;P&gt;    log.index.size.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    log.message.downconversion.enable = true&lt;/P&gt;&lt;P&gt;    log.message.format.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    log.message.timestamp.difference.max.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.message.timestamp.type = CreateTime&lt;/P&gt;&lt;P&gt;    log.preallocate = false&lt;/P&gt;&lt;P&gt;    log.retention.bytes = -1&lt;/P&gt;&lt;P&gt;    log.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    log.retention.hours = 168&lt;/P&gt;&lt;P&gt;    log.retention.minutes = null&lt;/P&gt;&lt;P&gt;    log.retention.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.hours = 168&lt;/P&gt;&lt;P&gt;    log.roll.jitter.hours = 0&lt;/P&gt;&lt;P&gt;    log.roll.jitter.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.ms = null&lt;/P&gt;&lt;P&gt;    log.segment.bytes = 1073741824&lt;/P&gt;&lt;P&gt;    log.segment.delete.delay.ms = 60000&lt;/P&gt;&lt;P&gt;    max.connections.per.ip = 2147483647&lt;/P&gt;&lt;P&gt;    max.connections.per.ip.overrides = &lt;/P&gt;&lt;P&gt;    max.incremental.fetch.session.cache.slots = 1000&lt;/P&gt;&lt;P&gt;    message.max.bytes = 1000000&lt;/P&gt;&lt;P&gt;    metric.reporters = []&lt;/P&gt;&lt;P&gt;    metrics.num.samples = 2&lt;/P&gt;&lt;P&gt;    metrics.recording.level = INFO&lt;/P&gt;&lt;P&gt;    metrics.sample.window.ms = 30000&lt;/P&gt;&lt;P&gt;    min.insync.replicas = 1&lt;/P&gt;&lt;P&gt;    num.io.threads = 8&lt;/P&gt;&lt;P&gt;    num.network.threads = 3&lt;/P&gt;&lt;P&gt;    num.partitions = 1&lt;/P&gt;&lt;P&gt;    num.recovery.threads.per.data.dir = 1&lt;/P&gt;&lt;P&gt;    num.replica.alter.log.dirs.threads = null&lt;/P&gt;&lt;P&gt;    num.replica.fetchers = 1&lt;/P&gt;&lt;P&gt;    offset.metadata.max.bytes = 4096&lt;/P&gt;&lt;P&gt;    offsets.commit.required.acks = -1&lt;/P&gt;&lt;P&gt;    offsets.commit.timeout.ms = 5000&lt;/P&gt;&lt;P&gt;    offsets.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    offsets.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    offsets.retention.minutes = 86400000&lt;/P&gt;&lt;P&gt;    offsets.topic.compression.codec = 0&lt;/P&gt;&lt;P&gt;    offsets.topic.num.partitions = 50&lt;/P&gt;&lt;P&gt;    offsets.topic.replication.factor = 3&lt;/P&gt;&lt;P&gt;    offsets.topic.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding&lt;/P&gt;&lt;P&gt;    password.encoder.iterations = 4096&lt;/P&gt;&lt;P&gt;    password.encoder.key.length = 128&lt;/P&gt;&lt;P&gt;    password.encoder.keyfactory.algorithm = null&lt;/P&gt;&lt;P&gt;    password.encoder.old.secret = null&lt;/P&gt;&lt;P&gt;    password.encoder.secret = null&lt;/P&gt;&lt;P&gt;    port = 6667&lt;/P&gt;&lt;P&gt;    principal.builder.class = null&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.entry.expiration.ms = 300000&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.max.size = 1000&lt;/P&gt;&lt;P&gt;    producer.metrics.enable = false&lt;/P&gt;&lt;P&gt;    producer.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    queued.max.request.bytes = -1&lt;/P&gt;&lt;P&gt;    queued.max.requests = 500&lt;/P&gt;&lt;P&gt;    quota.consumer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.producer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.window.num = 11&lt;/P&gt;&lt;P&gt;    quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.backoff.ms = 1000&lt;/P&gt;&lt;P&gt;    replica.fetch.max.bytes = 1048576&lt;/P&gt;&lt;P&gt;    replica.fetch.min.bytes = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.response.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    replica.fetch.wait.max.ms = 500&lt;/P&gt;&lt;P&gt;    replica.high.watermark.checkpoint.interval.ms = 5000&lt;/P&gt;&lt;P&gt;    replica.lag.time.max.ms = 10000&lt;/P&gt;&lt;P&gt;    replica.socket.receive.buffer.bytes = 65536&lt;/P&gt;&lt;P&gt;    replica.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    request.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    reserved.broker.max.id = 1000&lt;/P&gt;&lt;P&gt;    sasl.client.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.enabled.mechanisms = [GSSAPI]&lt;/P&gt;&lt;P&gt;    sasl.jaas.config = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.kinit.cmd = /usr/bin/kinit&lt;/P&gt;&lt;P&gt;    sasl.kerberos.min.time.before.relogin = 60000&lt;/P&gt;&lt;P&gt;    sasl.kerberos.principal.to.local.rules = [DEFAULT]&lt;/P&gt;&lt;P&gt;    sasl.kerberos.service.name = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.buffer.seconds = 300&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.min.period.seconds = 60&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.mechanism.inter.broker.protocol = GSSAPI&lt;/P&gt;&lt;P&gt;    sasl.server.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    security.inter.broker.protocol = PLAINTEXT&lt;/P&gt;&lt;P&gt;    socket.receive.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    socket.request.max.bytes = 104857600&lt;/P&gt;&lt;P&gt;    socket.send.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    ssl.cipher.suites = []&lt;/P&gt;&lt;P&gt;    ssl.client.auth = none&lt;/P&gt;&lt;P&gt;    ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]&lt;/P&gt;&lt;P&gt;    ssl.endpoint.identification.algorithm = https&lt;/P&gt;&lt;P&gt;    ssl.key.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keymanager.algorithm = SunX509&lt;/P&gt;&lt;P&gt;    ssl.keystore.location = &lt;/P&gt;&lt;P&gt;    ssl.keystore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keystore.type = JKS&lt;/P&gt;&lt;P&gt;    ssl.protocol = TLS&lt;/P&gt;&lt;P&gt;    ssl.provider = null&lt;/P&gt;&lt;P&gt;    ssl.secure.random.implementation = null&lt;/P&gt;&lt;P&gt;    ssl.trustmanager.algorithm = PKIX&lt;/P&gt;&lt;P&gt;    ssl.truststore.location = &lt;/P&gt;&lt;P&gt;    ssl.truststore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.truststore.type = JKS&lt;/P&gt;&lt;P&gt;    transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    transaction.max.timeout.ms = 900000&lt;/P&gt;&lt;P&gt;    transaction.remove.expired.transaction.cleanup.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    transaction.state.log.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    transaction.state.log.min.isr = 2&lt;/P&gt;&lt;P&gt;    transaction.state.log.num.partitions = 50&lt;/P&gt;&lt;P&gt;    transaction.state.log.replication.factor = 3&lt;/P&gt;&lt;P&gt;    transaction.state.log.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    transactional.id.expiration.ms = 604800000&lt;/P&gt;&lt;P&gt;    unclean.leader.election.enable = false&lt;/P&gt;&lt;P&gt;    zookeeper.connect = am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181&lt;/P&gt;&lt;P&gt;    zookeeper.connection.timeout.ms = 25000&lt;/P&gt;&lt;P&gt;    zookeeper.max.in.flight.requests = 10&lt;/P&gt;&lt;P&gt;    zookeeper.session.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    zookeeper.set.acl = false&lt;/P&gt;&lt;P&gt;    zookeeper.sync.time.ms = 2000&lt;/P&gt;&lt;P&gt; (kafka.server.KafkaConfig)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,458] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,458] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,459] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,492] ERROR [KafkaServer id=1001] Fatal error during KafkaServer startup. Prepare to shutdown (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;org.apache.kafka.common.KafkaException: Failed to acquire lock on file .lock in /kafka-logs. A Kafka instance in another process or thread is using this directory.&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$$anonfun$lockLogDirs$1.apply(LogManager.scala:240)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$$anonfun$lockLogDirs$1.apply(LogManager.scala:236)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)&lt;/P&gt;&lt;P&gt;    at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$class.flatMap(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.AbstractTraversable.flatMap(Traversable.scala:104)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager.lockLogDirs(LogManager.scala:236)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager.&amp;lt;init&amp;gt;(LogManager.scala:97)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$.apply(LogManager.scala:958)&lt;/P&gt;&lt;P&gt;    at kafka.server.KafkaServer.startup(KafkaServer.scala:237)&lt;/P&gt;&lt;P&gt;    at kafka.server.KafkaServerStartable.startup(KafkaServerStartable.scala:38)&lt;/P&gt;&lt;P&gt;    at kafka.Kafka$.main(Kafka.scala:75)&lt;/P&gt;&lt;P&gt;    at kafka.Kafka.main(Kafka.scala)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,494] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,497] INFO [ZooKeeperClient] Closing. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,501] INFO [ZooKeeperClient] Closed. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:17,501] INFO [ThrottledChannelReaper-Fetch]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:18,459] INFO [ThrottledChannelReaper-Fetch]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:18,459] INFO [ThrottledChannelReaper-Fetch]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:18,459] INFO [ThrottledChannelReaper-Produce]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:19,459] INFO [ThrottledChannelReaper-Produce]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:19,459] INFO [ThrottledChannelReaper-Produce]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:19,459] INFO [ThrottledChannelReaper-Request]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:19,460] INFO [ThrottledChannelReaper-Request]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:19,460] INFO [ThrottledChannelReaper-Request]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:19,465] INFO [KafkaServer id=1001] shut down completed (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:19,466] ERROR Exiting Kafka. (kafka.server.KafkaServerStartable)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:16:19,468] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:20:51,712] INFO [GroupMetadataManager brokerId=1001] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:18,284] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:18,803] INFO starting (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:18,804] INFO Connecting to zookeeper on am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181 (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:18,818] INFO [ZooKeeperClient] Initializing a new session to am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:18,838] INFO [ZooKeeperClient] Waiting until connected. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:18,904] INFO [ZooKeeperClient] Connected. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:19,210] INFO Cluster ID = z-4P_uf-RzmpT2QvMnOD2g (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:19,288] INFO KafkaConfig values: &lt;/P&gt;&lt;P&gt;    advertised.host.name = null&lt;/P&gt;&lt;P&gt;    advertised.listeners = null&lt;/P&gt;&lt;P&gt;    advertised.port = null&lt;/P&gt;&lt;P&gt;    alter.config.policy.class.name = null&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    authorizer.class.name = &lt;/P&gt;&lt;P&gt;    auto.create.topics.enable = true&lt;/P&gt;&lt;P&gt;    auto.leader.rebalance.enable = true&lt;/P&gt;&lt;P&gt;    background.threads = 10&lt;/P&gt;&lt;P&gt;    broker.id = -1&lt;/P&gt;&lt;P&gt;    broker.id.generation.enable = true&lt;/P&gt;&lt;P&gt;    broker.rack = null&lt;/P&gt;&lt;P&gt;    client.quota.callback.class = null&lt;/P&gt;&lt;P&gt;    compression.type = producer&lt;/P&gt;&lt;P&gt;    connections.max.idle.ms = 600000&lt;/P&gt;&lt;P&gt;    controlled.shutdown.enable = true&lt;/P&gt;&lt;P&gt;    controlled.shutdown.max.retries = 3&lt;/P&gt;&lt;P&gt;    controlled.shutdown.retry.backoff.ms = 5000&lt;/P&gt;&lt;P&gt;    controller.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    create.topic.policy.class.name = null&lt;/P&gt;&lt;P&gt;    default.replication.factor = 1&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.check.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.time.ms = 86400000&lt;/P&gt;&lt;P&gt;    delegation.token.master.key = null&lt;/P&gt;&lt;P&gt;    delegation.token.max.lifetime.ms = 604800000&lt;/P&gt;&lt;P&gt;    delete.records.purgatory.purge.interval.requests = 1&lt;/P&gt;&lt;P&gt;    delete.topic.enable = true&lt;/P&gt;&lt;P&gt;    fetch.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    group.initial.rebalance.delay.ms = 3000&lt;/P&gt;&lt;P&gt;    group.max.session.timeout.ms = 300000&lt;/P&gt;&lt;P&gt;    group.min.session.timeout.ms = 6000&lt;/P&gt;&lt;P&gt;    host.name = &lt;/P&gt;&lt;P&gt;    inter.broker.listener.name = null&lt;/P&gt;&lt;P&gt;    inter.broker.protocol.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    leader.imbalance.check.interval.seconds = 300&lt;/P&gt;&lt;P&gt;    leader.imbalance.per.broker.percentage = 10&lt;/P&gt;&lt;P&gt;    listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL&lt;/P&gt;&lt;P&gt;    listeners = PLAINTEXT://am-bigdata-01.am.local:6667&lt;/P&gt;&lt;P&gt;    log.cleaner.backoff.ms = 15000&lt;/P&gt;&lt;P&gt;    log.cleaner.dedupe.buffer.size = 134217728&lt;/P&gt;&lt;P&gt;    log.cleaner.delete.retention.ms = 86400000&lt;/P&gt;&lt;P&gt;    log.cleaner.enable = true&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.load.factor = 0.9&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.size = 524288&lt;/P&gt;&lt;P&gt;    log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308&lt;/P&gt;&lt;P&gt;    log.cleaner.min.cleanable.ratio = 0.5&lt;/P&gt;&lt;P&gt;    log.cleaner.min.compaction.lag.ms = 0&lt;/P&gt;&lt;P&gt;    log.cleaner.threads = 1&lt;/P&gt;&lt;P&gt;    log.cleanup.policy = [delete]&lt;/P&gt;&lt;P&gt;    log.dir = /tmp/kafka-logs&lt;/P&gt;&lt;P&gt;    log.dirs = /kafka-logs&lt;/P&gt;&lt;P&gt;    log.flush.interval.messages = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.interval.ms = null&lt;/P&gt;&lt;P&gt;    log.flush.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.flush.scheduler.interval.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.start.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.index.interval.bytes = 4096&lt;/P&gt;&lt;P&gt;    log.index.size.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    log.message.downconversion.enable = true&lt;/P&gt;&lt;P&gt;    log.message.format.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    log.message.timestamp.difference.max.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.message.timestamp.type = CreateTime&lt;/P&gt;&lt;P&gt;    log.preallocate = false&lt;/P&gt;&lt;P&gt;    log.retention.bytes = -1&lt;/P&gt;&lt;P&gt;    log.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    log.retention.hours = 168&lt;/P&gt;&lt;P&gt;    log.retention.minutes = null&lt;/P&gt;&lt;P&gt;    log.retention.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.hours = 168&lt;/P&gt;&lt;P&gt;    log.roll.jitter.hours = 0&lt;/P&gt;&lt;P&gt;    log.roll.jitter.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.ms = null&lt;/P&gt;&lt;P&gt;    log.segment.bytes = 1073741824&lt;/P&gt;&lt;P&gt;    log.segment.delete.delay.ms = 60000&lt;/P&gt;&lt;P&gt;    max.connections.per.ip = 2147483647&lt;/P&gt;&lt;P&gt;    max.connections.per.ip.overrides = &lt;/P&gt;&lt;P&gt;    max.incremental.fetch.session.cache.slots = 1000&lt;/P&gt;&lt;P&gt;    message.max.bytes = 1000000&lt;/P&gt;&lt;P&gt;    metric.reporters = []&lt;/P&gt;&lt;P&gt;    metrics.num.samples = 2&lt;/P&gt;&lt;P&gt;    metrics.recording.level = INFO&lt;/P&gt;&lt;P&gt;    metrics.sample.window.ms = 30000&lt;/P&gt;&lt;P&gt;    min.insync.replicas = 1&lt;/P&gt;&lt;P&gt;    num.io.threads = 8&lt;/P&gt;&lt;P&gt;    num.network.threads = 3&lt;/P&gt;&lt;P&gt;    num.partitions = 1&lt;/P&gt;&lt;P&gt;    num.recovery.threads.per.data.dir = 1&lt;/P&gt;&lt;P&gt;    num.replica.alter.log.dirs.threads = null&lt;/P&gt;&lt;P&gt;    num.replica.fetchers = 1&lt;/P&gt;&lt;P&gt;    offset.metadata.max.bytes = 4096&lt;/P&gt;&lt;P&gt;    offsets.commit.required.acks = -1&lt;/P&gt;&lt;P&gt;    offsets.commit.timeout.ms = 5000&lt;/P&gt;&lt;P&gt;    offsets.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    offsets.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    offsets.retention.minutes = 86400000&lt;/P&gt;&lt;P&gt;    offsets.topic.compression.codec = 0&lt;/P&gt;&lt;P&gt;    offsets.topic.num.partitions = 50&lt;/P&gt;&lt;P&gt;    offsets.topic.replication.factor = 3&lt;/P&gt;&lt;P&gt;    offsets.topic.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding&lt;/P&gt;&lt;P&gt;    password.encoder.iterations = 4096&lt;/P&gt;&lt;P&gt;    password.encoder.key.length = 128&lt;/P&gt;&lt;P&gt;    password.encoder.keyfactory.algorithm = null&lt;/P&gt;&lt;P&gt;    password.encoder.old.secret = null&lt;/P&gt;&lt;P&gt;    password.encoder.secret = null&lt;/P&gt;&lt;P&gt;    port = 6667&lt;/P&gt;&lt;P&gt;    principal.builder.class = null&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.entry.expiration.ms = 300000&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.max.size = 1000&lt;/P&gt;&lt;P&gt;    producer.metrics.enable = false&lt;/P&gt;&lt;P&gt;    producer.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    queued.max.request.bytes = -1&lt;/P&gt;&lt;P&gt;    queued.max.requests = 500&lt;/P&gt;&lt;P&gt;    quota.consumer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.producer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.window.num = 11&lt;/P&gt;&lt;P&gt;    quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.backoff.ms = 1000&lt;/P&gt;&lt;P&gt;    replica.fetch.max.bytes = 1048576&lt;/P&gt;&lt;P&gt;    replica.fetch.min.bytes = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.response.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    replica.fetch.wait.max.ms = 500&lt;/P&gt;&lt;P&gt;    replica.high.watermark.checkpoint.interval.ms = 5000&lt;/P&gt;&lt;P&gt;    replica.lag.time.max.ms = 10000&lt;/P&gt;&lt;P&gt;    replica.socket.receive.buffer.bytes = 65536&lt;/P&gt;&lt;P&gt;    replica.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    request.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    reserved.broker.max.id = 1000&lt;/P&gt;&lt;P&gt;    sasl.client.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.enabled.mechanisms = [GSSAPI]&lt;/P&gt;&lt;P&gt;    sasl.jaas.config = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.kinit.cmd = /usr/bin/kinit&lt;/P&gt;&lt;P&gt;    sasl.kerberos.min.time.before.relogin = 60000&lt;/P&gt;&lt;P&gt;    sasl.kerberos.principal.to.local.rules = [DEFAULT]&lt;/P&gt;&lt;P&gt;    sasl.kerberos.service.name = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.buffer.seconds = 300&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.min.period.seconds = 60&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.mechanism.inter.broker.protocol = GSSAPI&lt;/P&gt;&lt;P&gt;    sasl.server.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    security.inter.broker.protocol = PLAINTEXT&lt;/P&gt;&lt;P&gt;    socket.receive.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    socket.request.max.bytes = 104857600&lt;/P&gt;&lt;P&gt;    socket.send.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    ssl.cipher.suites = []&lt;/P&gt;&lt;P&gt;    ssl.client.auth = none&lt;/P&gt;&lt;P&gt;    ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]&lt;/P&gt;&lt;P&gt;    ssl.endpoint.identification.algorithm = https&lt;/P&gt;&lt;P&gt;    ssl.key.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keymanager.algorithm = SunX509&lt;/P&gt;&lt;P&gt;    ssl.keystore.location = &lt;/P&gt;&lt;P&gt;    ssl.keystore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keystore.type = JKS&lt;/P&gt;&lt;P&gt;    ssl.protocol = TLS&lt;/P&gt;&lt;P&gt;    ssl.provider = null&lt;/P&gt;&lt;P&gt;    ssl.secure.random.implementation = null&lt;/P&gt;&lt;P&gt;    ssl.trustmanager.algorithm = PKIX&lt;/P&gt;&lt;P&gt;    ssl.truststore.location = &lt;/P&gt;&lt;P&gt;    ssl.truststore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.truststore.type = JKS&lt;/P&gt;&lt;P&gt;    transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    transaction.max.timeout.ms = 900000&lt;/P&gt;&lt;P&gt;    transaction.remove.expired.transaction.cleanup.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    transaction.state.log.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    transaction.state.log.min.isr = 2&lt;/P&gt;&lt;P&gt;    transaction.state.log.num.partitions = 50&lt;/P&gt;&lt;P&gt;    transaction.state.log.replication.factor = 3&lt;/P&gt;&lt;P&gt;    transaction.state.log.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    transactional.id.expiration.ms = 604800000&lt;/P&gt;&lt;P&gt;    unclean.leader.election.enable = false&lt;/P&gt;&lt;P&gt;    zookeeper.connect = am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181&lt;/P&gt;&lt;P&gt;    zookeeper.connection.timeout.ms = 25000&lt;/P&gt;&lt;P&gt;    zookeeper.max.in.flight.requests = 10&lt;/P&gt;&lt;P&gt;    zookeeper.session.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    zookeeper.set.acl = false&lt;/P&gt;&lt;P&gt;    zookeeper.sync.time.ms = 2000&lt;/P&gt;&lt;P&gt; (kafka.server.KafkaConfig)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:19,296] INFO KafkaConfig values: &lt;/P&gt;&lt;P&gt;    advertised.host.name = null&lt;/P&gt;&lt;P&gt;    advertised.listeners = null&lt;/P&gt;&lt;P&gt;    advertised.port = null&lt;/P&gt;&lt;P&gt;    alter.config.policy.class.name = null&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    authorizer.class.name = &lt;/P&gt;&lt;P&gt;    auto.create.topics.enable = true&lt;/P&gt;&lt;P&gt;    auto.leader.rebalance.enable = true&lt;/P&gt;&lt;P&gt;    background.threads = 10&lt;/P&gt;&lt;P&gt;    broker.id = -1&lt;/P&gt;&lt;P&gt;    broker.id.generation.enable = true&lt;/P&gt;&lt;P&gt;    broker.rack = null&lt;/P&gt;&lt;P&gt;    client.quota.callback.class = null&lt;/P&gt;&lt;P&gt;    compression.type = producer&lt;/P&gt;&lt;P&gt;    connections.max.idle.ms = 600000&lt;/P&gt;&lt;P&gt;    controlled.shutdown.enable = true&lt;/P&gt;&lt;P&gt;    controlled.shutdown.max.retries = 3&lt;/P&gt;&lt;P&gt;    controlled.shutdown.retry.backoff.ms = 5000&lt;/P&gt;&lt;P&gt;    controller.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    create.topic.policy.class.name = null&lt;/P&gt;&lt;P&gt;    default.replication.factor = 1&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.check.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.time.ms = 86400000&lt;/P&gt;&lt;P&gt;    delegation.token.master.key = null&lt;/P&gt;&lt;P&gt;    delegation.token.max.lifetime.ms = 604800000&lt;/P&gt;&lt;P&gt;    delete.records.purgatory.purge.interval.requests = 1&lt;/P&gt;&lt;P&gt;    delete.topic.enable = true&lt;/P&gt;&lt;P&gt;    fetch.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    group.initial.rebalance.delay.ms = 3000&lt;/P&gt;&lt;P&gt;    group.max.session.timeout.ms = 300000&lt;/P&gt;&lt;P&gt;    group.min.session.timeout.ms = 6000&lt;/P&gt;&lt;P&gt;    host.name = &lt;/P&gt;&lt;P&gt;    inter.broker.listener.name = null&lt;/P&gt;&lt;P&gt;    inter.broker.protocol.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    leader.imbalance.check.interval.seconds = 300&lt;/P&gt;&lt;P&gt;    leader.imbalance.per.broker.percentage = 10&lt;/P&gt;&lt;P&gt;    listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL&lt;/P&gt;&lt;P&gt;    listeners = PLAINTEXT://am-bigdata-01.am.local:6667&lt;/P&gt;&lt;P&gt;    log.cleaner.backoff.ms = 15000&lt;/P&gt;&lt;P&gt;    log.cleaner.dedupe.buffer.size = 134217728&lt;/P&gt;&lt;P&gt;    log.cleaner.delete.retention.ms = 86400000&lt;/P&gt;&lt;P&gt;    log.cleaner.enable = true&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.load.factor = 0.9&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.size = 524288&lt;/P&gt;&lt;P&gt;    log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308&lt;/P&gt;&lt;P&gt;    log.cleaner.min.cleanable.ratio = 0.5&lt;/P&gt;&lt;P&gt;    log.cleaner.min.compaction.lag.ms = 0&lt;/P&gt;&lt;P&gt;    log.cleaner.threads = 1&lt;/P&gt;&lt;P&gt;    log.cleanup.policy = [delete]&lt;/P&gt;&lt;P&gt;    log.dir = /tmp/kafka-logs&lt;/P&gt;&lt;P&gt;    log.dirs = /kafka-logs&lt;/P&gt;&lt;P&gt;    log.flush.interval.messages = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.interval.ms = null&lt;/P&gt;&lt;P&gt;    log.flush.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.flush.scheduler.interval.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.start.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.index.interval.bytes = 4096&lt;/P&gt;&lt;P&gt;    log.index.size.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    log.message.downconversion.enable = true&lt;/P&gt;&lt;P&gt;    log.message.format.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    log.message.timestamp.difference.max.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.message.timestamp.type = CreateTime&lt;/P&gt;&lt;P&gt;    log.preallocate = false&lt;/P&gt;&lt;P&gt;    log.retention.bytes = -1&lt;/P&gt;&lt;P&gt;    log.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    log.retention.hours = 168&lt;/P&gt;&lt;P&gt;    log.retention.minutes = null&lt;/P&gt;&lt;P&gt;    log.retention.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.hours = 168&lt;/P&gt;&lt;P&gt;    log.roll.jitter.hours = 0&lt;/P&gt;&lt;P&gt;    log.roll.jitter.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.ms = null&lt;/P&gt;&lt;P&gt;    log.segment.bytes = 1073741824&lt;/P&gt;&lt;P&gt;    log.segment.delete.delay.ms = 60000&lt;/P&gt;&lt;P&gt;    max.connections.per.ip = 2147483647&lt;/P&gt;&lt;P&gt;    max.connections.per.ip.overrides = &lt;/P&gt;&lt;P&gt;    max.incremental.fetch.session.cache.slots = 1000&lt;/P&gt;&lt;P&gt;    message.max.bytes = 1000000&lt;/P&gt;&lt;P&gt;    metric.reporters = []&lt;/P&gt;&lt;P&gt;    metrics.num.samples = 2&lt;/P&gt;&lt;P&gt;    metrics.recording.level = INFO&lt;/P&gt;&lt;P&gt;    metrics.sample.window.ms = 30000&lt;/P&gt;&lt;P&gt;    min.insync.replicas = 1&lt;/P&gt;&lt;P&gt;    num.io.threads = 8&lt;/P&gt;&lt;P&gt;    num.network.threads = 3&lt;/P&gt;&lt;P&gt;    num.partitions = 1&lt;/P&gt;&lt;P&gt;    num.recovery.threads.per.data.dir = 1&lt;/P&gt;&lt;P&gt;    num.replica.alter.log.dirs.threads = null&lt;/P&gt;&lt;P&gt;    num.replica.fetchers = 1&lt;/P&gt;&lt;P&gt;    offset.metadata.max.bytes = 4096&lt;/P&gt;&lt;P&gt;    offsets.commit.required.acks = -1&lt;/P&gt;&lt;P&gt;    offsets.commit.timeout.ms = 5000&lt;/P&gt;&lt;P&gt;    offsets.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    offsets.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    offsets.retention.minutes = 86400000&lt;/P&gt;&lt;P&gt;    offsets.topic.compression.codec = 0&lt;/P&gt;&lt;P&gt;    offsets.topic.num.partitions = 50&lt;/P&gt;&lt;P&gt;    offsets.topic.replication.factor = 3&lt;/P&gt;&lt;P&gt;    offsets.topic.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding&lt;/P&gt;&lt;P&gt;    password.encoder.iterations = 4096&lt;/P&gt;&lt;P&gt;    password.encoder.key.length = 128&lt;/P&gt;&lt;P&gt;    password.encoder.keyfactory.algorithm = null&lt;/P&gt;&lt;P&gt;    password.encoder.old.secret = null&lt;/P&gt;&lt;P&gt;    password.encoder.secret = null&lt;/P&gt;&lt;P&gt;    port = 6667&lt;/P&gt;&lt;P&gt;    principal.builder.class = null&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.entry.expiration.ms = 300000&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.max.size = 1000&lt;/P&gt;&lt;P&gt;    producer.metrics.enable = false&lt;/P&gt;&lt;P&gt;    producer.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    queued.max.request.bytes = -1&lt;/P&gt;&lt;P&gt;    queued.max.requests = 500&lt;/P&gt;&lt;P&gt;    quota.consumer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.producer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.window.num = 11&lt;/P&gt;&lt;P&gt;    quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.backoff.ms = 1000&lt;/P&gt;&lt;P&gt;    replica.fetch.max.bytes = 1048576&lt;/P&gt;&lt;P&gt;    replica.fetch.min.bytes = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.response.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    replica.fetch.wait.max.ms = 500&lt;/P&gt;&lt;P&gt;    replica.high.watermark.checkpoint.interval.ms = 5000&lt;/P&gt;&lt;P&gt;    replica.lag.time.max.ms = 10000&lt;/P&gt;&lt;P&gt;    replica.socket.receive.buffer.bytes = 65536&lt;/P&gt;&lt;P&gt;    replica.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    request.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    reserved.broker.max.id = 1000&lt;/P&gt;&lt;P&gt;    sasl.client.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.enabled.mechanisms = [GSSAPI]&lt;/P&gt;&lt;P&gt;    sasl.jaas.config = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.kinit.cmd = /usr/bin/kinit&lt;/P&gt;&lt;P&gt;    sasl.kerberos.min.time.before.relogin = 60000&lt;/P&gt;&lt;P&gt;    sasl.kerberos.principal.to.local.rules = [DEFAULT]&lt;/P&gt;&lt;P&gt;    sasl.kerberos.service.name = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.buffer.seconds = 300&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.min.period.seconds = 60&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.mechanism.inter.broker.protocol = GSSAPI&lt;/P&gt;&lt;P&gt;    sasl.server.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    security.inter.broker.protocol = PLAINTEXT&lt;/P&gt;&lt;P&gt;    socket.receive.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    socket.request.max.bytes = 104857600&lt;/P&gt;&lt;P&gt;    socket.send.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    ssl.cipher.suites = []&lt;/P&gt;&lt;P&gt;    ssl.client.auth = none&lt;/P&gt;&lt;P&gt;    ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]&lt;/P&gt;&lt;P&gt;    ssl.endpoint.identification.algorithm = https&lt;/P&gt;&lt;P&gt;    ssl.key.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keymanager.algorithm = SunX509&lt;/P&gt;&lt;P&gt;    ssl.keystore.location = &lt;/P&gt;&lt;P&gt;    ssl.keystore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keystore.type = JKS&lt;/P&gt;&lt;P&gt;    ssl.protocol = TLS&lt;/P&gt;&lt;P&gt;    ssl.provider = null&lt;/P&gt;&lt;P&gt;    ssl.secure.random.implementation = null&lt;/P&gt;&lt;P&gt;    ssl.trustmanager.algorithm = PKIX&lt;/P&gt;&lt;P&gt;    ssl.truststore.location = &lt;/P&gt;&lt;P&gt;    ssl.truststore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.truststore.type = JKS&lt;/P&gt;&lt;P&gt;    transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    transaction.max.timeout.ms = 900000&lt;/P&gt;&lt;P&gt;    transaction.remove.expired.transaction.cleanup.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    transaction.state.log.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    transaction.state.log.min.isr = 2&lt;/P&gt;&lt;P&gt;    transaction.state.log.num.partitions = 50&lt;/P&gt;&lt;P&gt;    transaction.state.log.replication.factor = 3&lt;/P&gt;&lt;P&gt;    transaction.state.log.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    transactional.id.expiration.ms = 604800000&lt;/P&gt;&lt;P&gt;    unclean.leader.election.enable = false&lt;/P&gt;&lt;P&gt;    zookeeper.connect = am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181&lt;/P&gt;&lt;P&gt;    zookeeper.connection.timeout.ms = 25000&lt;/P&gt;&lt;P&gt;    zookeeper.max.in.flight.requests = 10&lt;/P&gt;&lt;P&gt;    zookeeper.session.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    zookeeper.set.acl = false&lt;/P&gt;&lt;P&gt;    zookeeper.sync.time.ms = 2000&lt;/P&gt;&lt;P&gt; (kafka.server.KafkaConfig)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:19,325] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:19,325] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:19,326] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:19,358] ERROR [KafkaServer id=1001] Fatal error during KafkaServer startup. Prepare to shutdown (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;org.apache.kafka.common.KafkaException: Failed to acquire lock on file .lock in /kafka-logs. A Kafka instance in another process or thread is using this directory.&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$$anonfun$lockLogDirs$1.apply(LogManager.scala:240)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$$anonfun$lockLogDirs$1.apply(LogManager.scala:236)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)&lt;/P&gt;&lt;P&gt;    at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$class.flatMap(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.AbstractTraversable.flatMap(Traversable.scala:104)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager.lockLogDirs(LogManager.scala:236)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager.&amp;lt;init&amp;gt;(LogManager.scala:97)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$.apply(LogManager.scala:958)&lt;/P&gt;&lt;P&gt;    at kafka.server.KafkaServer.startup(KafkaServer.scala:237)&lt;/P&gt;&lt;P&gt;    at kafka.server.KafkaServerStartable.startup(KafkaServerStartable.scala:38)&lt;/P&gt;&lt;P&gt;    at kafka.Kafka$.main(Kafka.scala:75)&lt;/P&gt;&lt;P&gt;    at kafka.Kafka.main(Kafka.scala)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:19,361] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:19,363] INFO [ZooKeeperClient] Closing. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:19,367] INFO [ZooKeeperClient] Closed. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:19,368] INFO [ThrottledChannelReaper-Fetch]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:20,325] INFO [ThrottledChannelReaper-Fetch]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:20,325] INFO [ThrottledChannelReaper-Fetch]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:20,326] INFO [ThrottledChannelReaper-Produce]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:21,325] INFO [ThrottledChannelReaper-Produce]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:21,325] INFO [ThrottledChannelReaper-Produce]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:21,325] INFO [ThrottledChannelReaper-Request]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:21,326] INFO [ThrottledChannelReaper-Request]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:21,326] INFO [ThrottledChannelReaper-Request]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:21,331] INFO [KafkaServer id=1001] shut down completed (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:21,331] ERROR Exiting Kafka. (kafka.server.KafkaServerStartable)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:21:21,333] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:30:51,711] INFO [GroupMetadataManager brokerId=1001] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:58,207] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:58,757] INFO starting (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:58,758] INFO Connecting to zookeeper on am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181 (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:58,777] INFO [ZooKeeperClient] Initializing a new session to am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:58,804] INFO [ZooKeeperClient] Waiting until connected. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:58,863] INFO [ZooKeeperClient] Connected. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:59,129] INFO Cluster ID = z-4P_uf-RzmpT2QvMnOD2g (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:59,204] INFO KafkaConfig values: &lt;/P&gt;&lt;P&gt;    advertised.host.name = null&lt;/P&gt;&lt;P&gt;    advertised.listeners = null&lt;/P&gt;&lt;P&gt;    advertised.port = null&lt;/P&gt;&lt;P&gt;    alter.config.policy.class.name = null&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    authorizer.class.name = &lt;/P&gt;&lt;P&gt;    auto.create.topics.enable = true&lt;/P&gt;&lt;P&gt;    auto.leader.rebalance.enable = true&lt;/P&gt;&lt;P&gt;    background.threads = 10&lt;/P&gt;&lt;P&gt;    broker.id = -1&lt;/P&gt;&lt;P&gt;    broker.id.generation.enable = true&lt;/P&gt;&lt;P&gt;    broker.rack = null&lt;/P&gt;&lt;P&gt;    client.quota.callback.class = null&lt;/P&gt;&lt;P&gt;    compression.type = producer&lt;/P&gt;&lt;P&gt;    connections.max.idle.ms = 600000&lt;/P&gt;&lt;P&gt;    controlled.shutdown.enable = true&lt;/P&gt;&lt;P&gt;    controlled.shutdown.max.retries = 3&lt;/P&gt;&lt;P&gt;    controlled.shutdown.retry.backoff.ms = 5000&lt;/P&gt;&lt;P&gt;    controller.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    create.topic.policy.class.name = null&lt;/P&gt;&lt;P&gt;    default.replication.factor = 1&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.check.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.time.ms = 86400000&lt;/P&gt;&lt;P&gt;    delegation.token.master.key = null&lt;/P&gt;&lt;P&gt;    delegation.token.max.lifetime.ms = 604800000&lt;/P&gt;&lt;P&gt;    delete.records.purgatory.purge.interval.requests = 1&lt;/P&gt;&lt;P&gt;    delete.topic.enable = true&lt;/P&gt;&lt;P&gt;    fetch.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    group.initial.rebalance.delay.ms = 3000&lt;/P&gt;&lt;P&gt;    group.max.session.timeout.ms = 300000&lt;/P&gt;&lt;P&gt;    group.min.session.timeout.ms = 6000&lt;/P&gt;&lt;P&gt;    host.name = &lt;/P&gt;&lt;P&gt;    inter.broker.listener.name = null&lt;/P&gt;&lt;P&gt;    inter.broker.protocol.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    leader.imbalance.check.interval.seconds = 300&lt;/P&gt;&lt;P&gt;    leader.imbalance.per.broker.percentage = 10&lt;/P&gt;&lt;P&gt;    listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL&lt;/P&gt;&lt;P&gt;    listeners = PLAINTEXT://am-bigdata-01.am.local:6667&lt;/P&gt;&lt;P&gt;    log.cleaner.backoff.ms = 15000&lt;/P&gt;&lt;P&gt;    log.cleaner.dedupe.buffer.size = 134217728&lt;/P&gt;&lt;P&gt;    log.cleaner.delete.retention.ms = 86400000&lt;/P&gt;&lt;P&gt;    log.cleaner.enable = true&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.load.factor = 0.9&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.size = 524288&lt;/P&gt;&lt;P&gt;    log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308&lt;/P&gt;&lt;P&gt;    log.cleaner.min.cleanable.ratio = 0.5&lt;/P&gt;&lt;P&gt;    log.cleaner.min.compaction.lag.ms = 0&lt;/P&gt;&lt;P&gt;    log.cleaner.threads = 1&lt;/P&gt;&lt;P&gt;    log.cleanup.policy = [delete]&lt;/P&gt;&lt;P&gt;    log.dir = /tmp/kafka-logs&lt;/P&gt;&lt;P&gt;    log.dirs = /kafka-logs&lt;/P&gt;&lt;P&gt;    log.flush.interval.messages = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.interval.ms = null&lt;/P&gt;&lt;P&gt;    log.flush.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.flush.scheduler.interval.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.start.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.index.interval.bytes = 4096&lt;/P&gt;&lt;P&gt;    log.index.size.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    log.message.downconversion.enable = true&lt;/P&gt;&lt;P&gt;    log.message.format.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    log.message.timestamp.difference.max.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.message.timestamp.type = CreateTime&lt;/P&gt;&lt;P&gt;    log.preallocate = false&lt;/P&gt;&lt;P&gt;    log.retention.bytes = -1&lt;/P&gt;&lt;P&gt;    log.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    log.retention.hours = 168&lt;/P&gt;&lt;P&gt;    log.retention.minutes = null&lt;/P&gt;&lt;P&gt;    log.retention.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.hours = 168&lt;/P&gt;&lt;P&gt;    log.roll.jitter.hours = 0&lt;/P&gt;&lt;P&gt;    log.roll.jitter.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.ms = null&lt;/P&gt;&lt;P&gt;    log.segment.bytes = 1073741824&lt;/P&gt;&lt;P&gt;    log.segment.delete.delay.ms = 60000&lt;/P&gt;&lt;P&gt;    max.connections.per.ip = 2147483647&lt;/P&gt;&lt;P&gt;    max.connections.per.ip.overrides = &lt;/P&gt;&lt;P&gt;    max.incremental.fetch.session.cache.slots = 1000&lt;/P&gt;&lt;P&gt;    message.max.bytes = 1000000&lt;/P&gt;&lt;P&gt;    metric.reporters = []&lt;/P&gt;&lt;P&gt;    metrics.num.samples = 2&lt;/P&gt;&lt;P&gt;    metrics.recording.level = INFO&lt;/P&gt;&lt;P&gt;    metrics.sample.window.ms = 30000&lt;/P&gt;&lt;P&gt;    min.insync.replicas = 1&lt;/P&gt;&lt;P&gt;    num.io.threads = 8&lt;/P&gt;&lt;P&gt;    num.network.threads = 3&lt;/P&gt;&lt;P&gt;    num.partitions = 1&lt;/P&gt;&lt;P&gt;    num.recovery.threads.per.data.dir = 1&lt;/P&gt;&lt;P&gt;    num.replica.alter.log.dirs.threads = null&lt;/P&gt;&lt;P&gt;    num.replica.fetchers = 1&lt;/P&gt;&lt;P&gt;    offset.metadata.max.bytes = 4096&lt;/P&gt;&lt;P&gt;    offsets.commit.required.acks = -1&lt;/P&gt;&lt;P&gt;    offsets.commit.timeout.ms = 5000&lt;/P&gt;&lt;P&gt;    offsets.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    offsets.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    offsets.retention.minutes = 86400000&lt;/P&gt;&lt;P&gt;    offsets.topic.compression.codec = 0&lt;/P&gt;&lt;P&gt;    offsets.topic.num.partitions = 50&lt;/P&gt;&lt;P&gt;    offsets.topic.replication.factor = 3&lt;/P&gt;&lt;P&gt;    offsets.topic.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding&lt;/P&gt;&lt;P&gt;    password.encoder.iterations = 4096&lt;/P&gt;&lt;P&gt;    password.encoder.key.length = 128&lt;/P&gt;&lt;P&gt;    password.encoder.keyfactory.algorithm = null&lt;/P&gt;&lt;P&gt;    password.encoder.old.secret = null&lt;/P&gt;&lt;P&gt;    password.encoder.secret = null&lt;/P&gt;&lt;P&gt;    port = 6667&lt;/P&gt;&lt;P&gt;    principal.builder.class = null&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.entry.expiration.ms = 300000&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.max.size = 1000&lt;/P&gt;&lt;P&gt;    producer.metrics.enable = false&lt;/P&gt;&lt;P&gt;    producer.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    queued.max.request.bytes = -1&lt;/P&gt;&lt;P&gt;    queued.max.requests = 500&lt;/P&gt;&lt;P&gt;    quota.consumer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.producer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.window.num = 11&lt;/P&gt;&lt;P&gt;    quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.backoff.ms = 1000&lt;/P&gt;&lt;P&gt;    replica.fetch.max.bytes = 1048576&lt;/P&gt;&lt;P&gt;    replica.fetch.min.bytes = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.response.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    replica.fetch.wait.max.ms = 500&lt;/P&gt;&lt;P&gt;    replica.high.watermark.checkpoint.interval.ms = 5000&lt;/P&gt;&lt;P&gt;    replica.lag.time.max.ms = 10000&lt;/P&gt;&lt;P&gt;    replica.socket.receive.buffer.bytes = 65536&lt;/P&gt;&lt;P&gt;    replica.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    request.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    reserved.broker.max.id = 1000&lt;/P&gt;&lt;P&gt;    sasl.client.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.enabled.mechanisms = [GSSAPI]&lt;/P&gt;&lt;P&gt;    sasl.jaas.config = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.kinit.cmd = /usr/bin/kinit&lt;/P&gt;&lt;P&gt;    sasl.kerberos.min.time.before.relogin = 60000&lt;/P&gt;&lt;P&gt;    sasl.kerberos.principal.to.local.rules = [DEFAULT]&lt;/P&gt;&lt;P&gt;    sasl.kerberos.service.name = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.buffer.seconds = 300&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.min.period.seconds = 60&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.mechanism.inter.broker.protocol = GSSAPI&lt;/P&gt;&lt;P&gt;    sasl.server.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    security.inter.broker.protocol = PLAINTEXT&lt;/P&gt;&lt;P&gt;    socket.receive.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    socket.request.max.bytes = 104857600&lt;/P&gt;&lt;P&gt;    socket.send.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    ssl.cipher.suites = []&lt;/P&gt;&lt;P&gt;    ssl.client.auth = none&lt;/P&gt;&lt;P&gt;    ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]&lt;/P&gt;&lt;P&gt;    ssl.endpoint.identification.algorithm = https&lt;/P&gt;&lt;P&gt;    ssl.key.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keymanager.algorithm = SunX509&lt;/P&gt;&lt;P&gt;    ssl.keystore.location = &lt;/P&gt;&lt;P&gt;    ssl.keystore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keystore.type = JKS&lt;/P&gt;&lt;P&gt;    ssl.protocol = TLS&lt;/P&gt;&lt;P&gt;    ssl.provider = null&lt;/P&gt;&lt;P&gt;    ssl.secure.random.implementation = null&lt;/P&gt;&lt;P&gt;    ssl.trustmanager.algorithm = PKIX&lt;/P&gt;&lt;P&gt;    ssl.truststore.location = &lt;/P&gt;&lt;P&gt;    ssl.truststore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.truststore.type = JKS&lt;/P&gt;&lt;P&gt;    transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    transaction.max.timeout.ms = 900000&lt;/P&gt;&lt;P&gt;    transaction.remove.expired.transaction.cleanup.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    transaction.state.log.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    transaction.state.log.min.isr = 2&lt;/P&gt;&lt;P&gt;    transaction.state.log.num.partitions = 50&lt;/P&gt;&lt;P&gt;    transaction.state.log.replication.factor = 3&lt;/P&gt;&lt;P&gt;    transaction.state.log.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    transactional.id.expiration.ms = 604800000&lt;/P&gt;&lt;P&gt;    unclean.leader.election.enable = false&lt;/P&gt;&lt;P&gt;    zookeeper.connect = am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181&lt;/P&gt;&lt;P&gt;    zookeeper.connection.timeout.ms = 25000&lt;/P&gt;&lt;P&gt;    zookeeper.max.in.flight.requests = 10&lt;/P&gt;&lt;P&gt;    zookeeper.session.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    zookeeper.set.acl = false&lt;/P&gt;&lt;P&gt;    zookeeper.sync.time.ms = 2000&lt;/P&gt;&lt;P&gt; (kafka.server.KafkaConfig)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:59,212] INFO KafkaConfig values: &lt;/P&gt;&lt;P&gt;    advertised.host.name = null&lt;/P&gt;&lt;P&gt;    advertised.listeners = null&lt;/P&gt;&lt;P&gt;    advertised.port = null&lt;/P&gt;&lt;P&gt;    alter.config.policy.class.name = null&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    authorizer.class.name = &lt;/P&gt;&lt;P&gt;    auto.create.topics.enable = true&lt;/P&gt;&lt;P&gt;    auto.leader.rebalance.enable = true&lt;/P&gt;&lt;P&gt;    background.threads = 10&lt;/P&gt;&lt;P&gt;    broker.id = -1&lt;/P&gt;&lt;P&gt;    broker.id.generation.enable = true&lt;/P&gt;&lt;P&gt;    broker.rack = null&lt;/P&gt;&lt;P&gt;    client.quota.callback.class = null&lt;/P&gt;&lt;P&gt;    compression.type = producer&lt;/P&gt;&lt;P&gt;    connections.max.idle.ms = 600000&lt;/P&gt;&lt;P&gt;    controlled.shutdown.enable = true&lt;/P&gt;&lt;P&gt;    controlled.shutdown.max.retries = 3&lt;/P&gt;&lt;P&gt;    controlled.shutdown.retry.backoff.ms = 5000&lt;/P&gt;&lt;P&gt;    controller.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    create.topic.policy.class.name = null&lt;/P&gt;&lt;P&gt;    default.replication.factor = 1&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.check.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.time.ms = 86400000&lt;/P&gt;&lt;P&gt;    delegation.token.master.key = null&lt;/P&gt;&lt;P&gt;    delegation.token.max.lifetime.ms = 604800000&lt;/P&gt;&lt;P&gt;    delete.records.purgatory.purge.interval.requests = 1&lt;/P&gt;&lt;P&gt;    delete.topic.enable = true&lt;/P&gt;&lt;P&gt;    fetch.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    group.initial.rebalance.delay.ms = 3000&lt;/P&gt;&lt;P&gt;    group.max.session.timeout.ms = 300000&lt;/P&gt;&lt;P&gt;    group.min.session.timeout.ms = 6000&lt;/P&gt;&lt;P&gt;    host.name = &lt;/P&gt;&lt;P&gt;    inter.broker.listener.name = null&lt;/P&gt;&lt;P&gt;    inter.broker.protocol.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    leader.imbalance.check.interval.seconds = 300&lt;/P&gt;&lt;P&gt;    leader.imbalance.per.broker.percentage = 10&lt;/P&gt;&lt;P&gt;    listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL&lt;/P&gt;&lt;P&gt;    listeners = PLAINTEXT://am-bigdata-01.am.local:6667&lt;/P&gt;&lt;P&gt;    log.cleaner.backoff.ms = 15000&lt;/P&gt;&lt;P&gt;    log.cleaner.dedupe.buffer.size = 134217728&lt;/P&gt;&lt;P&gt;    log.cleaner.delete.retention.ms = 86400000&lt;/P&gt;&lt;P&gt;    log.cleaner.enable = true&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.load.factor = 0.9&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.size = 524288&lt;/P&gt;&lt;P&gt;    log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308&lt;/P&gt;&lt;P&gt;    log.cleaner.min.cleanable.ratio = 0.5&lt;/P&gt;&lt;P&gt;    log.cleaner.min.compaction.lag.ms = 0&lt;/P&gt;&lt;P&gt;    log.cleaner.threads = 1&lt;/P&gt;&lt;P&gt;    log.cleanup.policy = [delete]&lt;/P&gt;&lt;P&gt;    log.dir = /tmp/kafka-logs&lt;/P&gt;&lt;P&gt;    log.dirs = /kafka-logs&lt;/P&gt;&lt;P&gt;    log.flush.interval.messages = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.interval.ms = null&lt;/P&gt;&lt;P&gt;    log.flush.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.flush.scheduler.interval.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.start.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.index.interval.bytes = 4096&lt;/P&gt;&lt;P&gt;    log.index.size.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    log.message.downconversion.enable = true&lt;/P&gt;&lt;P&gt;    log.message.format.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    log.message.timestamp.difference.max.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.message.timestamp.type = CreateTime&lt;/P&gt;&lt;P&gt;    log.preallocate = false&lt;/P&gt;&lt;P&gt;    log.retention.bytes = -1&lt;/P&gt;&lt;P&gt;    log.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    log.retention.hours = 168&lt;/P&gt;&lt;P&gt;    log.retention.minutes = null&lt;/P&gt;&lt;P&gt;    log.retention.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.hours = 168&lt;/P&gt;&lt;P&gt;    log.roll.jitter.hours = 0&lt;/P&gt;&lt;P&gt;    log.roll.jitter.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.ms = null&lt;/P&gt;&lt;P&gt;    log.segment.bytes = 1073741824&lt;/P&gt;&lt;P&gt;    log.segment.delete.delay.ms = 60000&lt;/P&gt;&lt;P&gt;    max.connections.per.ip = 2147483647&lt;/P&gt;&lt;P&gt;    max.connections.per.ip.overrides = &lt;/P&gt;&lt;P&gt;    max.incremental.fetch.session.cache.slots = 1000&lt;/P&gt;&lt;P&gt;    message.max.bytes = 1000000&lt;/P&gt;&lt;P&gt;    metric.reporters = []&lt;/P&gt;&lt;P&gt;    metrics.num.samples = 2&lt;/P&gt;&lt;P&gt;    metrics.recording.level = INFO&lt;/P&gt;&lt;P&gt;    metrics.sample.window.ms = 30000&lt;/P&gt;&lt;P&gt;    min.insync.replicas = 1&lt;/P&gt;&lt;P&gt;    num.io.threads = 8&lt;/P&gt;&lt;P&gt;    num.network.threads = 3&lt;/P&gt;&lt;P&gt;    num.partitions = 1&lt;/P&gt;&lt;P&gt;    num.recovery.threads.per.data.dir = 1&lt;/P&gt;&lt;P&gt;    num.replica.alter.log.dirs.threads = null&lt;/P&gt;&lt;P&gt;    num.replica.fetchers = 1&lt;/P&gt;&lt;P&gt;    offset.metadata.max.bytes = 4096&lt;/P&gt;&lt;P&gt;    offsets.commit.required.acks = -1&lt;/P&gt;&lt;P&gt;    offsets.commit.timeout.ms = 5000&lt;/P&gt;&lt;P&gt;    offsets.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    offsets.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    offsets.retention.minutes = 86400000&lt;/P&gt;&lt;P&gt;    offsets.topic.compression.codec = 0&lt;/P&gt;&lt;P&gt;    offsets.topic.num.partitions = 50&lt;/P&gt;&lt;P&gt;    offsets.topic.replication.factor = 3&lt;/P&gt;&lt;P&gt;    offsets.topic.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding&lt;/P&gt;&lt;P&gt;    password.encoder.iterations = 4096&lt;/P&gt;&lt;P&gt;    password.encoder.key.length = 128&lt;/P&gt;&lt;P&gt;    password.encoder.keyfactory.algorithm = null&lt;/P&gt;&lt;P&gt;    password.encoder.old.secret = null&lt;/P&gt;&lt;P&gt;    password.encoder.secret = null&lt;/P&gt;&lt;P&gt;    port = 6667&lt;/P&gt;&lt;P&gt;    principal.builder.class = null&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.entry.expiration.ms = 300000&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.max.size = 1000&lt;/P&gt;&lt;P&gt;    producer.metrics.enable = false&lt;/P&gt;&lt;P&gt;    producer.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    queued.max.request.bytes = -1&lt;/P&gt;&lt;P&gt;    queued.max.requests = 500&lt;/P&gt;&lt;P&gt;    quota.consumer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.producer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.window.num = 11&lt;/P&gt;&lt;P&gt;    quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.backoff.ms = 1000&lt;/P&gt;&lt;P&gt;    replica.fetch.max.bytes = 1048576&lt;/P&gt;&lt;P&gt;    replica.fetch.min.bytes = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.response.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    replica.fetch.wait.max.ms = 500&lt;/P&gt;&lt;P&gt;    replica.high.watermark.checkpoint.interval.ms = 5000&lt;/P&gt;&lt;P&gt;    replica.lag.time.max.ms = 10000&lt;/P&gt;&lt;P&gt;    replica.socket.receive.buffer.bytes = 65536&lt;/P&gt;&lt;P&gt;    replica.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    request.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    reserved.broker.max.id = 1000&lt;/P&gt;&lt;P&gt;    sasl.client.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.enabled.mechanisms = [GSSAPI]&lt;/P&gt;&lt;P&gt;    sasl.jaas.config = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.kinit.cmd = /usr/bin/kinit&lt;/P&gt;&lt;P&gt;    sasl.kerberos.min.time.before.relogin = 60000&lt;/P&gt;&lt;P&gt;    sasl.kerberos.principal.to.local.rules = [DEFAULT]&lt;/P&gt;&lt;P&gt;    sasl.kerberos.service.name = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.buffer.seconds = 300&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.min.period.seconds = 60&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.mechanism.inter.broker.protocol = GSSAPI&lt;/P&gt;&lt;P&gt;    sasl.server.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    security.inter.broker.protocol = PLAINTEXT&lt;/P&gt;&lt;P&gt;    socket.receive.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    socket.request.max.bytes = 104857600&lt;/P&gt;&lt;P&gt;    socket.send.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    ssl.cipher.suites = []&lt;/P&gt;&lt;P&gt;    ssl.client.auth = none&lt;/P&gt;&lt;P&gt;    ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]&lt;/P&gt;&lt;P&gt;    ssl.endpoint.identification.algorithm = https&lt;/P&gt;&lt;P&gt;    ssl.key.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keymanager.algorithm = SunX509&lt;/P&gt;&lt;P&gt;    ssl.keystore.location = &lt;/P&gt;&lt;P&gt;    ssl.keystore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keystore.type = JKS&lt;/P&gt;&lt;P&gt;    ssl.protocol = TLS&lt;/P&gt;&lt;P&gt;    ssl.provider = null&lt;/P&gt;&lt;P&gt;    ssl.secure.random.implementation = null&lt;/P&gt;&lt;P&gt;    ssl.trustmanager.algorithm = PKIX&lt;/P&gt;&lt;P&gt;    ssl.truststore.location = &lt;/P&gt;&lt;P&gt;    ssl.truststore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.truststore.type = JKS&lt;/P&gt;&lt;P&gt;    transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    transaction.max.timeout.ms = 900000&lt;/P&gt;&lt;P&gt;    transaction.remove.expired.transaction.cleanup.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    transaction.state.log.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    transaction.state.log.min.isr = 2&lt;/P&gt;&lt;P&gt;    transaction.state.log.num.partitions = 50&lt;/P&gt;&lt;P&gt;    transaction.state.log.replication.factor = 3&lt;/P&gt;&lt;P&gt;    transaction.state.log.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    transactional.id.expiration.ms = 604800000&lt;/P&gt;&lt;P&gt;    unclean.leader.election.enable = false&lt;/P&gt;&lt;P&gt;    zookeeper.connect = am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181&lt;/P&gt;&lt;P&gt;    zookeeper.connection.timeout.ms = 25000&lt;/P&gt;&lt;P&gt;    zookeeper.max.in.flight.requests = 10&lt;/P&gt;&lt;P&gt;    zookeeper.session.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    zookeeper.set.acl = false&lt;/P&gt;&lt;P&gt;    zookeeper.sync.time.ms = 2000&lt;/P&gt;&lt;P&gt; (kafka.server.KafkaConfig)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:59,237] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:59,237] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:59,238] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:59,268] ERROR [KafkaServer id=1001] Fatal error during KafkaServer startup. Prepare to shutdown (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;org.apache.kafka.common.KafkaException: Failed to acquire lock on file .lock in /kafka-logs. A Kafka instance in another process or thread is using this directory.&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$$anonfun$lockLogDirs$1.apply(LogManager.scala:240)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$$anonfun$lockLogDirs$1.apply(LogManager.scala:236)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)&lt;/P&gt;&lt;P&gt;    at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$class.flatMap(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.AbstractTraversable.flatMap(Traversable.scala:104)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager.lockLogDirs(LogManager.scala:236)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager.&amp;lt;init&amp;gt;(LogManager.scala:97)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$.apply(LogManager.scala:958)&lt;/P&gt;&lt;P&gt;    at kafka.server.KafkaServer.startup(KafkaServer.scala:237)&lt;/P&gt;&lt;P&gt;    at kafka.server.KafkaServerStartable.startup(KafkaServerStartable.scala:38)&lt;/P&gt;&lt;P&gt;    at kafka.Kafka$.main(Kafka.scala:75)&lt;/P&gt;&lt;P&gt;    at kafka.Kafka.main(Kafka.scala)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:59,271] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:59,273] INFO [ZooKeeperClient] Closing. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:59,276] INFO [ZooKeeperClient] Closed. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:36:59,277] INFO [ThrottledChannelReaper-Fetch]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:37:00,238] INFO [ThrottledChannelReaper-Fetch]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:37:00,238] INFO [ThrottledChannelReaper-Fetch]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:37:00,239] INFO [ThrottledChannelReaper-Produce]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:37:01,238] INFO [ThrottledChannelReaper-Produce]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:37:01,238] INFO [ThrottledChannelReaper-Produce]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:37:01,238] INFO [ThrottledChannelReaper-Request]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:37:01,239] INFO [ThrottledChannelReaper-Request]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:37:01,239] INFO [ThrottledChannelReaper-Request]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:37:01,245] INFO [KafkaServer id=1001] shut down completed (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:37:01,246] ERROR Exiting Kafka. (kafka.server.KafkaServerStartable)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:37:01,248] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:40:51,711] INFO [GroupMetadataManager brokerId=1001] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:41:59,912] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,411] INFO starting (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,412] INFO Connecting to zookeeper on am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181 (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,425] INFO [ZooKeeperClient] Initializing a new session to am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,444] INFO [ZooKeeperClient] Waiting until connected. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,505] INFO [ZooKeeperClient] Connected. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,779] INFO Cluster ID = z-4P_uf-RzmpT2QvMnOD2g (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,846] INFO KafkaConfig values: &lt;/P&gt;&lt;P&gt;    advertised.host.name = null&lt;/P&gt;&lt;P&gt;    advertised.listeners = null&lt;/P&gt;&lt;P&gt;    advertised.port = null&lt;/P&gt;&lt;P&gt;    alter.config.policy.class.name = null&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    authorizer.class.name = &lt;/P&gt;&lt;P&gt;    auto.create.topics.enable = true&lt;/P&gt;&lt;P&gt;    auto.leader.rebalance.enable = true&lt;/P&gt;&lt;P&gt;    background.threads = 10&lt;/P&gt;&lt;P&gt;    broker.id = -1&lt;/P&gt;&lt;P&gt;    broker.id.generation.enable = true&lt;/P&gt;&lt;P&gt;    broker.rack = null&lt;/P&gt;&lt;P&gt;    client.quota.callback.class = null&lt;/P&gt;&lt;P&gt;    compression.type = producer&lt;/P&gt;&lt;P&gt;    connections.max.idle.ms = 600000&lt;/P&gt;&lt;P&gt;    controlled.shutdown.enable = true&lt;/P&gt;&lt;P&gt;    controlled.shutdown.max.retries = 3&lt;/P&gt;&lt;P&gt;    controlled.shutdown.retry.backoff.ms = 5000&lt;/P&gt;&lt;P&gt;    controller.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    create.topic.policy.class.name = null&lt;/P&gt;&lt;P&gt;    default.replication.factor = 1&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.check.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.time.ms = 86400000&lt;/P&gt;&lt;P&gt;    delegation.token.master.key = null&lt;/P&gt;&lt;P&gt;    delegation.token.max.lifetime.ms = 604800000&lt;/P&gt;&lt;P&gt;    delete.records.purgatory.purge.interval.requests = 1&lt;/P&gt;&lt;P&gt;    delete.topic.enable = true&lt;/P&gt;&lt;P&gt;    fetch.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    group.initial.rebalance.delay.ms = 3000&lt;/P&gt;&lt;P&gt;    group.max.session.timeout.ms = 300000&lt;/P&gt;&lt;P&gt;    group.min.session.timeout.ms = 6000&lt;/P&gt;&lt;P&gt;    host.name = &lt;/P&gt;&lt;P&gt;    inter.broker.listener.name = null&lt;/P&gt;&lt;P&gt;    inter.broker.protocol.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    leader.imbalance.check.interval.seconds = 300&lt;/P&gt;&lt;P&gt;    leader.imbalance.per.broker.percentage = 10&lt;/P&gt;&lt;P&gt;    listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL&lt;/P&gt;&lt;P&gt;    listeners = PLAINTEXT://am-bigdata-01.am.local:6667&lt;/P&gt;&lt;P&gt;    log.cleaner.backoff.ms = 15000&lt;/P&gt;&lt;P&gt;    log.cleaner.dedupe.buffer.size = 134217728&lt;/P&gt;&lt;P&gt;    log.cleaner.delete.retention.ms = 86400000&lt;/P&gt;&lt;P&gt;    log.cleaner.enable = true&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.load.factor = 0.9&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.size = 524288&lt;/P&gt;&lt;P&gt;    log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308&lt;/P&gt;&lt;P&gt;    log.cleaner.min.cleanable.ratio = 0.5&lt;/P&gt;&lt;P&gt;    log.cleaner.min.compaction.lag.ms = 0&lt;/P&gt;&lt;P&gt;    log.cleaner.threads = 1&lt;/P&gt;&lt;P&gt;    log.cleanup.policy = [delete]&lt;/P&gt;&lt;P&gt;    log.dir = /tmp/kafka-logs&lt;/P&gt;&lt;P&gt;    log.dirs = /kafka-logs&lt;/P&gt;&lt;P&gt;    log.flush.interval.messages = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.interval.ms = null&lt;/P&gt;&lt;P&gt;    log.flush.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.flush.scheduler.interval.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.start.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.index.interval.bytes = 4096&lt;/P&gt;&lt;P&gt;    log.index.size.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    log.message.downconversion.enable = true&lt;/P&gt;&lt;P&gt;    log.message.format.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    log.message.timestamp.difference.max.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.message.timestamp.type = CreateTime&lt;/P&gt;&lt;P&gt;    log.preallocate = false&lt;/P&gt;&lt;P&gt;    log.retention.bytes = -1&lt;/P&gt;&lt;P&gt;    log.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    log.retention.hours = 168&lt;/P&gt;&lt;P&gt;    log.retention.minutes = null&lt;/P&gt;&lt;P&gt;    log.retention.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.hours = 168&lt;/P&gt;&lt;P&gt;    log.roll.jitter.hours = 0&lt;/P&gt;&lt;P&gt;    log.roll.jitter.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.ms = null&lt;/P&gt;&lt;P&gt;    log.segment.bytes = 1073741824&lt;/P&gt;&lt;P&gt;    log.segment.delete.delay.ms = 60000&lt;/P&gt;&lt;P&gt;    max.connections.per.ip = 2147483647&lt;/P&gt;&lt;P&gt;    max.connections.per.ip.overrides = &lt;/P&gt;&lt;P&gt;    max.incremental.fetch.session.cache.slots = 1000&lt;/P&gt;&lt;P&gt;    message.max.bytes = 1000000&lt;/P&gt;&lt;P&gt;    metric.reporters = []&lt;/P&gt;&lt;P&gt;    metrics.num.samples = 2&lt;/P&gt;&lt;P&gt;    metrics.recording.level = INFO&lt;/P&gt;&lt;P&gt;    metrics.sample.window.ms = 30000&lt;/P&gt;&lt;P&gt;    min.insync.replicas = 1&lt;/P&gt;&lt;P&gt;    num.io.threads = 8&lt;/P&gt;&lt;P&gt;    num.network.threads = 3&lt;/P&gt;&lt;P&gt;    num.partitions = 1&lt;/P&gt;&lt;P&gt;    num.recovery.threads.per.data.dir = 1&lt;/P&gt;&lt;P&gt;    num.replica.alter.log.dirs.threads = null&lt;/P&gt;&lt;P&gt;    num.replica.fetchers = 1&lt;/P&gt;&lt;P&gt;    offset.metadata.max.bytes = 4096&lt;/P&gt;&lt;P&gt;    offsets.commit.required.acks = -1&lt;/P&gt;&lt;P&gt;    offsets.commit.timeout.ms = 5000&lt;/P&gt;&lt;P&gt;    offsets.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    offsets.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    offsets.retention.minutes = 86400000&lt;/P&gt;&lt;P&gt;    offsets.topic.compression.codec = 0&lt;/P&gt;&lt;P&gt;    offsets.topic.num.partitions = 50&lt;/P&gt;&lt;P&gt;    offsets.topic.replication.factor = 3&lt;/P&gt;&lt;P&gt;    offsets.topic.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding&lt;/P&gt;&lt;P&gt;    password.encoder.iterations = 4096&lt;/P&gt;&lt;P&gt;    password.encoder.key.length = 128&lt;/P&gt;&lt;P&gt;    password.encoder.keyfactory.algorithm = null&lt;/P&gt;&lt;P&gt;    password.encoder.old.secret = null&lt;/P&gt;&lt;P&gt;    password.encoder.secret = null&lt;/P&gt;&lt;P&gt;    port = 6667&lt;/P&gt;&lt;P&gt;    principal.builder.class = null&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.entry.expiration.ms = 300000&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.max.size = 1000&lt;/P&gt;&lt;P&gt;    producer.metrics.enable = false&lt;/P&gt;&lt;P&gt;    producer.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    queued.max.request.bytes = -1&lt;/P&gt;&lt;P&gt;    queued.max.requests = 500&lt;/P&gt;&lt;P&gt;    quota.consumer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.producer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.window.num = 11&lt;/P&gt;&lt;P&gt;    quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.backoff.ms = 1000&lt;/P&gt;&lt;P&gt;    replica.fetch.max.bytes = 1048576&lt;/P&gt;&lt;P&gt;    replica.fetch.min.bytes = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.response.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    replica.fetch.wait.max.ms = 500&lt;/P&gt;&lt;P&gt;    replica.high.watermark.checkpoint.interval.ms = 5000&lt;/P&gt;&lt;P&gt;    replica.lag.time.max.ms = 10000&lt;/P&gt;&lt;P&gt;    replica.socket.receive.buffer.bytes = 65536&lt;/P&gt;&lt;P&gt;    replica.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    request.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    reserved.broker.max.id = 1000&lt;/P&gt;&lt;P&gt;    sasl.client.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.enabled.mechanisms = [GSSAPI]&lt;/P&gt;&lt;P&gt;    sasl.jaas.config = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.kinit.cmd = /usr/bin/kinit&lt;/P&gt;&lt;P&gt;    sasl.kerberos.min.time.before.relogin = 60000&lt;/P&gt;&lt;P&gt;    sasl.kerberos.principal.to.local.rules = [DEFAULT]&lt;/P&gt;&lt;P&gt;    sasl.kerberos.service.name = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.buffer.seconds = 300&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.min.period.seconds = 60&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.mechanism.inter.broker.protocol = GSSAPI&lt;/P&gt;&lt;P&gt;    sasl.server.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    security.inter.broker.protocol = PLAINTEXT&lt;/P&gt;&lt;P&gt;    socket.receive.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    socket.request.max.bytes = 104857600&lt;/P&gt;&lt;P&gt;    socket.send.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    ssl.cipher.suites = []&lt;/P&gt;&lt;P&gt;    ssl.client.auth = none&lt;/P&gt;&lt;P&gt;    ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]&lt;/P&gt;&lt;P&gt;    ssl.endpoint.identification.algorithm = https&lt;/P&gt;&lt;P&gt;    ssl.key.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keymanager.algorithm = SunX509&lt;/P&gt;&lt;P&gt;    ssl.keystore.location = &lt;/P&gt;&lt;P&gt;    ssl.keystore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keystore.type = JKS&lt;/P&gt;&lt;P&gt;    ssl.protocol = TLS&lt;/P&gt;&lt;P&gt;    ssl.provider = null&lt;/P&gt;&lt;P&gt;    ssl.secure.random.implementation = null&lt;/P&gt;&lt;P&gt;    ssl.trustmanager.algorithm = PKIX&lt;/P&gt;&lt;P&gt;    ssl.truststore.location = &lt;/P&gt;&lt;P&gt;    ssl.truststore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.truststore.type = JKS&lt;/P&gt;&lt;P&gt;    transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    transaction.max.timeout.ms = 900000&lt;/P&gt;&lt;P&gt;    transaction.remove.expired.transaction.cleanup.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    transaction.state.log.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    transaction.state.log.min.isr = 2&lt;/P&gt;&lt;P&gt;    transaction.state.log.num.partitions = 50&lt;/P&gt;&lt;P&gt;    transaction.state.log.replication.factor = 3&lt;/P&gt;&lt;P&gt;    transaction.state.log.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    transactional.id.expiration.ms = 604800000&lt;/P&gt;&lt;P&gt;    unclean.leader.election.enable = false&lt;/P&gt;&lt;P&gt;    zookeeper.connect = am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181&lt;/P&gt;&lt;P&gt;    zookeeper.connection.timeout.ms = 25000&lt;/P&gt;&lt;P&gt;    zookeeper.max.in.flight.requests = 10&lt;/P&gt;&lt;P&gt;    zookeeper.session.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    zookeeper.set.acl = false&lt;/P&gt;&lt;P&gt;    zookeeper.sync.time.ms = 2000&lt;/P&gt;&lt;P&gt; (kafka.server.KafkaConfig)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,853] INFO KafkaConfig values: &lt;/P&gt;&lt;P&gt;    advertised.host.name = null&lt;/P&gt;&lt;P&gt;    advertised.listeners = null&lt;/P&gt;&lt;P&gt;    advertised.port = null&lt;/P&gt;&lt;P&gt;    alter.config.policy.class.name = null&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    alter.log.dirs.replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    authorizer.class.name = &lt;/P&gt;&lt;P&gt;    auto.create.topics.enable = true&lt;/P&gt;&lt;P&gt;    auto.leader.rebalance.enable = true&lt;/P&gt;&lt;P&gt;    background.threads = 10&lt;/P&gt;&lt;P&gt;    broker.id = -1&lt;/P&gt;&lt;P&gt;    broker.id.generation.enable = true&lt;/P&gt;&lt;P&gt;    broker.rack = null&lt;/P&gt;&lt;P&gt;    client.quota.callback.class = null&lt;/P&gt;&lt;P&gt;    compression.type = producer&lt;/P&gt;&lt;P&gt;    connections.max.idle.ms = 600000&lt;/P&gt;&lt;P&gt;    controlled.shutdown.enable = true&lt;/P&gt;&lt;P&gt;    controlled.shutdown.max.retries = 3&lt;/P&gt;&lt;P&gt;    controlled.shutdown.retry.backoff.ms = 5000&lt;/P&gt;&lt;P&gt;    controller.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    create.topic.policy.class.name = null&lt;/P&gt;&lt;P&gt;    default.replication.factor = 1&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.check.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    delegation.token.expiry.time.ms = 86400000&lt;/P&gt;&lt;P&gt;    delegation.token.master.key = null&lt;/P&gt;&lt;P&gt;    delegation.token.max.lifetime.ms = 604800000&lt;/P&gt;&lt;P&gt;    delete.records.purgatory.purge.interval.requests = 1&lt;/P&gt;&lt;P&gt;    delete.topic.enable = true&lt;/P&gt;&lt;P&gt;    fetch.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    group.initial.rebalance.delay.ms = 3000&lt;/P&gt;&lt;P&gt;    group.max.session.timeout.ms = 300000&lt;/P&gt;&lt;P&gt;    group.min.session.timeout.ms = 6000&lt;/P&gt;&lt;P&gt;    host.name = &lt;/P&gt;&lt;P&gt;    inter.broker.listener.name = null&lt;/P&gt;&lt;P&gt;    inter.broker.protocol.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    leader.imbalance.check.interval.seconds = 300&lt;/P&gt;&lt;P&gt;    leader.imbalance.per.broker.percentage = 10&lt;/P&gt;&lt;P&gt;    listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL&lt;/P&gt;&lt;P&gt;    listeners = PLAINTEXT://am-bigdata-01.am.local:6667&lt;/P&gt;&lt;P&gt;    log.cleaner.backoff.ms = 15000&lt;/P&gt;&lt;P&gt;    log.cleaner.dedupe.buffer.size = 134217728&lt;/P&gt;&lt;P&gt;    log.cleaner.delete.retention.ms = 86400000&lt;/P&gt;&lt;P&gt;    log.cleaner.enable = true&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.load.factor = 0.9&lt;/P&gt;&lt;P&gt;    log.cleaner.io.buffer.size = 524288&lt;/P&gt;&lt;P&gt;    log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308&lt;/P&gt;&lt;P&gt;    log.cleaner.min.cleanable.ratio = 0.5&lt;/P&gt;&lt;P&gt;    log.cleaner.min.compaction.lag.ms = 0&lt;/P&gt;&lt;P&gt;    log.cleaner.threads = 1&lt;/P&gt;&lt;P&gt;    log.cleanup.policy = [delete]&lt;/P&gt;&lt;P&gt;    log.dir = /tmp/kafka-logs&lt;/P&gt;&lt;P&gt;    log.dirs = /kafka-logs&lt;/P&gt;&lt;P&gt;    log.flush.interval.messages = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.interval.ms = null&lt;/P&gt;&lt;P&gt;    log.flush.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.flush.scheduler.interval.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.flush.start.offset.checkpoint.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    log.index.interval.bytes = 4096&lt;/P&gt;&lt;P&gt;    log.index.size.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    log.message.downconversion.enable = true&lt;/P&gt;&lt;P&gt;    log.message.format.version = 2.0-IV1&lt;/P&gt;&lt;P&gt;    log.message.timestamp.difference.max.ms = 9223372036854775807&lt;/P&gt;&lt;P&gt;    log.message.timestamp.type = CreateTime&lt;/P&gt;&lt;P&gt;    log.preallocate = false&lt;/P&gt;&lt;P&gt;    log.retention.bytes = -1&lt;/P&gt;&lt;P&gt;    log.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    log.retention.hours = 168&lt;/P&gt;&lt;P&gt;    log.retention.minutes = null&lt;/P&gt;&lt;P&gt;    log.retention.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.hours = 168&lt;/P&gt;&lt;P&gt;    log.roll.jitter.hours = 0&lt;/P&gt;&lt;P&gt;    log.roll.jitter.ms = null&lt;/P&gt;&lt;P&gt;    log.roll.ms = null&lt;/P&gt;&lt;P&gt;    log.segment.bytes = 1073741824&lt;/P&gt;&lt;P&gt;    log.segment.delete.delay.ms = 60000&lt;/P&gt;&lt;P&gt;    max.connections.per.ip = 2147483647&lt;/P&gt;&lt;P&gt;    max.connections.per.ip.overrides = &lt;/P&gt;&lt;P&gt;    max.incremental.fetch.session.cache.slots = 1000&lt;/P&gt;&lt;P&gt;    message.max.bytes = 1000000&lt;/P&gt;&lt;P&gt;    metric.reporters = []&lt;/P&gt;&lt;P&gt;    metrics.num.samples = 2&lt;/P&gt;&lt;P&gt;    metrics.recording.level = INFO&lt;/P&gt;&lt;P&gt;    metrics.sample.window.ms = 30000&lt;/P&gt;&lt;P&gt;    min.insync.replicas = 1&lt;/P&gt;&lt;P&gt;    num.io.threads = 8&lt;/P&gt;&lt;P&gt;    num.network.threads = 3&lt;/P&gt;&lt;P&gt;    num.partitions = 1&lt;/P&gt;&lt;P&gt;    num.recovery.threads.per.data.dir = 1&lt;/P&gt;&lt;P&gt;    num.replica.alter.log.dirs.threads = null&lt;/P&gt;&lt;P&gt;    num.replica.fetchers = 1&lt;/P&gt;&lt;P&gt;    offset.metadata.max.bytes = 4096&lt;/P&gt;&lt;P&gt;    offsets.commit.required.acks = -1&lt;/P&gt;&lt;P&gt;    offsets.commit.timeout.ms = 5000&lt;/P&gt;&lt;P&gt;    offsets.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    offsets.retention.check.interval.ms = 600000&lt;/P&gt;&lt;P&gt;    offsets.retention.minutes = 86400000&lt;/P&gt;&lt;P&gt;    offsets.topic.compression.codec = 0&lt;/P&gt;&lt;P&gt;    offsets.topic.num.partitions = 50&lt;/P&gt;&lt;P&gt;    offsets.topic.replication.factor = 3&lt;/P&gt;&lt;P&gt;    offsets.topic.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding&lt;/P&gt;&lt;P&gt;    password.encoder.iterations = 4096&lt;/P&gt;&lt;P&gt;    password.encoder.key.length = 128&lt;/P&gt;&lt;P&gt;    password.encoder.keyfactory.algorithm = null&lt;/P&gt;&lt;P&gt;    password.encoder.old.secret = null&lt;/P&gt;&lt;P&gt;    password.encoder.secret = null&lt;/P&gt;&lt;P&gt;    port = 6667&lt;/P&gt;&lt;P&gt;    principal.builder.class = null&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.entry.expiration.ms = 300000&lt;/P&gt;&lt;P&gt;    producer.metrics.cache.max.size = 1000&lt;/P&gt;&lt;P&gt;    producer.metrics.enable = false&lt;/P&gt;&lt;P&gt;    producer.purgatory.purge.interval.requests = 10000&lt;/P&gt;&lt;P&gt;    queued.max.request.bytes = -1&lt;/P&gt;&lt;P&gt;    queued.max.requests = 500&lt;/P&gt;&lt;P&gt;    quota.consumer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.producer.default = 9223372036854775807&lt;/P&gt;&lt;P&gt;    quota.window.num = 11&lt;/P&gt;&lt;P&gt;    quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.backoff.ms = 1000&lt;/P&gt;&lt;P&gt;    replica.fetch.max.bytes = 1048576&lt;/P&gt;&lt;P&gt;    replica.fetch.min.bytes = 1&lt;/P&gt;&lt;P&gt;    replica.fetch.response.max.bytes = 10485760&lt;/P&gt;&lt;P&gt;    replica.fetch.wait.max.ms = 500&lt;/P&gt;&lt;P&gt;    replica.high.watermark.checkpoint.interval.ms = 5000&lt;/P&gt;&lt;P&gt;    replica.lag.time.max.ms = 10000&lt;/P&gt;&lt;P&gt;    replica.socket.receive.buffer.bytes = 65536&lt;/P&gt;&lt;P&gt;    replica.socket.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    replication.quota.window.num = 11&lt;/P&gt;&lt;P&gt;    replication.quota.window.size.seconds = 1&lt;/P&gt;&lt;P&gt;    request.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    reserved.broker.max.id = 1000&lt;/P&gt;&lt;P&gt;    sasl.client.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.enabled.mechanisms = [GSSAPI]&lt;/P&gt;&lt;P&gt;    sasl.jaas.config = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.kinit.cmd = /usr/bin/kinit&lt;/P&gt;&lt;P&gt;    sasl.kerberos.min.time.before.relogin = 60000&lt;/P&gt;&lt;P&gt;    sasl.kerberos.principal.to.local.rules = [DEFAULT]&lt;/P&gt;&lt;P&gt;    sasl.kerberos.service.name = null&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.kerberos.ticket.renew.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.class = null&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.buffer.seconds = 300&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.min.period.seconds = 60&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.factor = 0.8&lt;/P&gt;&lt;P&gt;    sasl.login.refresh.window.jitter = 0.05&lt;/P&gt;&lt;P&gt;    sasl.mechanism.inter.broker.protocol = GSSAPI&lt;/P&gt;&lt;P&gt;    sasl.server.callback.handler.class = null&lt;/P&gt;&lt;P&gt;    security.inter.broker.protocol = PLAINTEXT&lt;/P&gt;&lt;P&gt;    socket.receive.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    socket.request.max.bytes = 104857600&lt;/P&gt;&lt;P&gt;    socket.send.buffer.bytes = 102400&lt;/P&gt;&lt;P&gt;    ssl.cipher.suites = []&lt;/P&gt;&lt;P&gt;    ssl.client.auth = none&lt;/P&gt;&lt;P&gt;    ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]&lt;/P&gt;&lt;P&gt;    ssl.endpoint.identification.algorithm = https&lt;/P&gt;&lt;P&gt;    ssl.key.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keymanager.algorithm = SunX509&lt;/P&gt;&lt;P&gt;    ssl.keystore.location = &lt;/P&gt;&lt;P&gt;    ssl.keystore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.keystore.type = JKS&lt;/P&gt;&lt;P&gt;    ssl.protocol = TLS&lt;/P&gt;&lt;P&gt;    ssl.provider = null&lt;/P&gt;&lt;P&gt;    ssl.secure.random.implementation = null&lt;/P&gt;&lt;P&gt;    ssl.trustmanager.algorithm = PKIX&lt;/P&gt;&lt;P&gt;    ssl.truststore.location = &lt;/P&gt;&lt;P&gt;    ssl.truststore.password = [hidden]&lt;/P&gt;&lt;P&gt;    ssl.truststore.type = JKS&lt;/P&gt;&lt;P&gt;    transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000&lt;/P&gt;&lt;P&gt;    transaction.max.timeout.ms = 900000&lt;/P&gt;&lt;P&gt;    transaction.remove.expired.transaction.cleanup.interval.ms = 3600000&lt;/P&gt;&lt;P&gt;    transaction.state.log.load.buffer.size = 5242880&lt;/P&gt;&lt;P&gt;    transaction.state.log.min.isr = 2&lt;/P&gt;&lt;P&gt;    transaction.state.log.num.partitions = 50&lt;/P&gt;&lt;P&gt;    transaction.state.log.replication.factor = 3&lt;/P&gt;&lt;P&gt;    transaction.state.log.segment.bytes = 104857600&lt;/P&gt;&lt;P&gt;    transactional.id.expiration.ms = 604800000&lt;/P&gt;&lt;P&gt;    unclean.leader.election.enable = false&lt;/P&gt;&lt;P&gt;    zookeeper.connect = am-bigdata-03.am.local:2181,am-bigdata-01.am.local:2181,am-bigdata-02.am.local:2181&lt;/P&gt;&lt;P&gt;    zookeeper.connection.timeout.ms = 25000&lt;/P&gt;&lt;P&gt;    zookeeper.max.in.flight.requests = 10&lt;/P&gt;&lt;P&gt;    zookeeper.session.timeout.ms = 30000&lt;/P&gt;&lt;P&gt;    zookeeper.set.acl = false&lt;/P&gt;&lt;P&gt;    zookeeper.sync.time.ms = 2000&lt;/P&gt;&lt;P&gt; (kafka.server.KafkaConfig)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,879] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,879] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,880] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,909] ERROR [KafkaServer id=1001] Fatal error during KafkaServer startup. Prepare to shutdown (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;org.apache.kafka.common.KafkaException: Failed to acquire lock on file .lock in /kafka-logs. A Kafka instance in another process or thread is using this directory.&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$$anonfun$lockLogDirs$1.apply(LogManager.scala:240)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$$anonfun$lockLogDirs$1.apply(LogManager.scala:236)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)&lt;/P&gt;&lt;P&gt;    at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)&lt;/P&gt;&lt;P&gt;    at scala.collection.TraversableLike$class.flatMap(TraversableLike.scala:241)&lt;/P&gt;&lt;P&gt;    at scala.collection.AbstractTraversable.flatMap(Traversable.scala:104)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager.lockLogDirs(LogManager.scala:236)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager.&amp;lt;init&amp;gt;(LogManager.scala:97)&lt;/P&gt;&lt;P&gt;    at kafka.log.LogManager$.apply(LogManager.scala:958)&lt;/P&gt;&lt;P&gt;    at kafka.server.KafkaServer.startup(KafkaServer.scala:237)&lt;/P&gt;&lt;P&gt;    at kafka.server.KafkaServerStartable.startup(KafkaServerStartable.scala:38)&lt;/P&gt;&lt;P&gt;    at kafka.Kafka$.main(Kafka.scala:75)&lt;/P&gt;&lt;P&gt;    at kafka.Kafka.main(Kafka.scala)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,912] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,914] INFO [ZooKeeperClient] Closing. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,917] INFO [ZooKeeperClient] Closed. (kafka.zookeeper.ZooKeeperClient)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:00,918] INFO [ThrottledChannelReaper-Fetch]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:01,880] INFO [ThrottledChannelReaper-Fetch]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:01,880] INFO [ThrottledChannelReaper-Fetch]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:01,880] INFO [ThrottledChannelReaper-Produce]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:02,879] INFO [ThrottledChannelReaper-Produce]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:02,879] INFO [ThrottledChannelReaper-Produce]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:02,880] INFO [ThrottledChannelReaper-Request]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:02,880] INFO [ThrottledChannelReaper-Request]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:02,880] INFO [ThrottledChannelReaper-Request]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:02,885] INFO [KafkaServer id=1001] shut down completed (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:02,885] ERROR Exiting Kafka. (kafka.server.KafkaServerStartable)&lt;/P&gt;&lt;P&gt;[2019-05-10 16:42:02,887] INFO [KafkaServer id=1001] shutting down (kafka.server.KafkaServer)&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;&lt;/P&gt;</description>
      <pubDate>Fri, 10 May 2019 22:59:50 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Mapreduce-2-and-YARN-auto-stop-after-restart-a-few-second-I/m-p/239690#M201499</guid>
      <dc:creator>anhdt061091</dc:creator>
      <dc:date>2019-05-10T22:59:50Z</dc:date>
    </item>
  </channel>
</rss>

