<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>question Re: How to reissue a delegated token after max lifetime passes for a spark streaming application on a Kerberized cluster in Support Questions</title>
    <link>https://community.cloudera.com/t5/Support-Questions/How-to-reissue-a-delegated-token-after-max-lifetime-passes/m-p/240825#M202629</link>
    <description>&lt;P&gt;Spark submit: &lt;/P&gt;&lt;P&gt;spark-submit \
         --master yarn \
         --deploy-mode cluster \
         --conf "spark.executor.extraJavaOptions=-Djava.security.auth.login.config=kafka_client_jaas.conf -Dlog4j.configuration=xxx -Djava.util.Arrays.useLegacyMergeSort=true" \
         --conf "spark.driver.extraJavaOptions=-Djava.security.auth.login.config=kafka_client_jaas.conf  -Dlog4j.configuration=xxx -Djava.util.Arrays.useLegacyMergeSort=true" \
         --conf spark.ui.port=18086 \
         --conf spark.executor.memory=${executor_memory} \
         --conf spark.executor.instances=${num_executors} \
         --conf spark.executor.cores=${executor_cores} \
         --conf spark.driver.memory=4g \
         --conf spark.driver.maxResultSize=3g \
         --conf spark.kafka.broker.ingest=xxx \
         --conf spark.kafka.zookeeper.ingest=xxx \
         --conf spark.kafka.broker.egest=xxx \
         --conf spark.kafka.topic.input=xxx \
         --conf spark.kafka.topic.output=xxx \
         --conf spark.kafka.input.interval=10 \
         --conf spark.kafka.group=xxx \
         --conf spark.streaming.kafka.maxRetries=10 \
         --conf spark.kafka.security.protocol.ingress=SASL_PLAINTEXT \
         --conf spark.kafka.security.protocol.egress=SASL_PLAINTEXT \
         --conf spark.fetch.message.max.bytes=104857600 \
         --conf spark.hive.enable.stats=true \
         --conf spark.streaming.backpressure.enabled=true \
         --conf spark.streaming.kafka.maxRatePerPartition=1 \
         --conf spark.streaming.receiver.maxRate=10 \
         --conf spark.executor.heartbeatInterval=120s \
         --conf spark.network.timeout=600s \
         --conf spark.yarn.scheduler.heartbeat.interval-ms=1000 \
         --conf spark.sql.parquet.compression.codec=snappy \
         --conf spark.scheduler.minRegisteredResourcesRatio=1 \
         --conf spark.yarn.maxAppAttempts=10 \
         --conf spark.yarn.am.attemptFailuresValidityInterval=1h \
         --conf spark.yarn.max.executor.failures=$((8 * ${num_executors})) `# Increase max executor failures (Default: max(numExecutors * 2, 3))` \
         --conf spark.yarn.executor.failuresValidityInterval=1h \
         --conf spark.task.maxFailures=8 \
         --conf spark.yarn.submit.waitAppCompletion=false \
         --conf spark.yarn.principal=xxx \
         --conf spark.yarn.keytab=xxx \
         --conf spark.hadoop.fs.hdfs.impl.disable.cache=true \ 
         --queue default \
         ${APP_HOME}/xxx.jar&lt;/P&gt;</description>
    <pubDate>Tue, 08 Jan 2019 14:00:00 GMT</pubDate>
    <dc:creator>alinazemian</dc:creator>
    <dc:date>2019-01-08T14:00:00Z</dc:date>
  </channel>
</rss>

