Support Questions
Find answers, ask questions, and share your expertise

Oozie job always in running state

Oozie job always in running state

Explorer
<?xml version="1.0" encoding="UTF-8" standalone="no"?><configuration>
<property><name>dfs.journalnode.rpc-address</name><value>0.0.0.0:8485</value><source>hdfs-default.xml</source></property>
<property><name>yarn.ipc.rpc.class</name><value>org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC</value><source>yarn-default.xml</source></property>
<property><name>mapreduce.job.maxtaskfailures.per.tracker</name><value>3</value><source>mapred-default.xml</source></property>
<property><name>yarn.client.max-cached-nodemanagers-proxies</name><value>0</value><source>yarn-default.xml</source></property>
<property><name>mapreduce.job.speculative.retry-after-speculate</name><value>15000</value><source>mapred-default.xml</source></property>
<property><name>ha.health-monitor.connect-retry-interval.ms</name><value>1000</value><source>core-default.xml</source></property>
<property><name>yarn.resourcemanager.work-preserving-recovery.enabled</name><value>true</value><source>programatically</source></property>
<property><name>yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round</name><value>0.1</value><source>programatically</source></property>
<property><name>dfs.client.mmap.cache.size</name><value>256</value><source>hdfs-default.xml</source></property>
<property><name>dfs.namenode.read-lock-reporting-threshold-ms</name><value>5000</value><source>hdfs-default.xml</source></property>
<property><name>mapreduce.reduce.markreset.buffer.percent</name><value>0.0</value><source>mapred-default.xml</source></property>
<property><name>dfs.datanode.data.dir</name><value>/hadoop/hdfs/data</value><source>programatically</source></property>
<property><name>mapreduce.jobhistory.max-age-ms</name><value>604800000</value><source>mapred-default.xml</source></property>
<property><name>dfs.namenode.lazypersist.file.scrub.interval.sec</name><value>300</value><source>hdfs-default.xml</source></property>
<property><name>mapreduce.job.ubertask.enable</name><value>false</value><source>mapred-default.xml</source></property>
<property><name>dfs.namenode.delegation.token.renew-interval</name><value>86400000</value><source>hdfs-default.xml</source></property>
<property><name>yarn.nodemanager.log-aggregation.compression-type</name><value>gz</value><source>programatically</source></property>
<property><name>dfs.namenode.replication.considerLoad</name><value>true</value><source>hdfs-default.xml</source></property>
<property><name>mapreduce.job.complete.cancel.delegation.tokens</name><value>false</value><source>programatically</source></property>
<property><name>mapreduce.jobhistory.datestring.cache.size</name><value>200000</value><source>mapred-default.xml</source></property>
<property><name>hadoop.security.kms.client.authentication.retry-count</name><value>1</value><source>core-default.xml</source></property>
<property><name>hadoop.ssl.enabled.protocols</name><value>TLSv1,SSLv2Hello,TLSv1.1,TLSv1.2</value><source>core-default.xml</source></property>
<property><name>dfs.namenode.retrycache.heap.percent</name><value>0.03f</value><source>hdfs-default.xml</source></property>
<property><name>dfs.namenode.top.window.num.buckets</name><value>10</value><source>hdfs-default.xml</source></property>
<property><name>yarn.resourcemanager.scheduler.address</name><value>sandbox-hdp.hortonworks.com:8030</value><source>programatically</source></property>
<property><name>hadoop.http.cross-origin.enabled</name><value>false</value><source>core-default.xml</source></property>
<property><name>ssl.client.keystore.password</name><value>bigdata</value><source>programatically</source></property>
<property><name>dfs.client.file-block-storage-locations.num-threads</name><value>10</value><source>hdfs-default.xml</source></property>
<property><name>dfs.datanode.balance.bandwidthPerSec</name><value>6250000</value><source>programatically</source></property>
<property><name>yarn.resourcemanager.proxy-user-privileges.enabled</name><value>false</value><source>yarn-default.xml</source></property>
<property><name>dfs.namenode.decommission.max.concurrent.tracked.nodes</name><value>100</value><source>hdfs-default.xml</source></property>
<property><name>mapreduce.reduce.shuffle.fetch.retry.enabled</name><value>1</value><source>programatically</source></property>
<property><name>io.mapfile.bloom.error.rate</name><value>0.005</value><source>core-default.xml</source></property>
<property><name>yarn.nodemanager.resourcemanager.minimum.version</name><value>NONE</value><source>yarn-default.xml</source></property>
<property><name>yarn.resourcemanager.nodemanagers.heartbeat-interval-ms</name><value>1000</value><source>yarn-default.xml</source></property>
<property><name>fs.azure.user.agent.prefix</name><value>User-Agent: APN/1.0 Hortonworks/1.0 HDP/</value><source>programatically</source></property>
<property><name>dfs.secondary.namenode.kerberos.internal.spnego.principal</name><value>${dfs.web.authentication.kerberos.principal}</value><source>hdfs-default.xml</source></property>
<property><name>hadoop.http.cross-origin.allowed-headers</name><value>X-Requested-With,Content-Type,Accept,Origin</value><source>core-default.xml</source></property>
<property><name>yarn.nodemanager.delete.debug-delay-sec</name><value>0</value><source>programatically</source></property>
<property><name>hadoop.proxyuser.hue.hosts</name><value>*</value><source>programatically</source></property>
<property><name>dfs.namenode.write-lock-reporting-threshold-ms</name><value>1000</value><source>hdfs-default.xml</source></property>
<property><name>dfs.client.read.shortcircuit.streams.cache.size</name><value>4096</value><source>programatically</source></property>
<property><name>dfs.image.transfer.bandwidthPerSec</name><value>0</value><source>hdfs-default.xml</source></property>
<property><name>yarn.scheduler.maximum-allocation-vcores</name><value>8</value><source>programatically</source></property>
<property><name>yarn.resourcemanager.webapp.rest-csrf.enabled</name><value>false</value><source>yarn-default.xml</source></property>
<property><name>yarn.timeline-service.address</name><value>sandbox-hdp.hortonworks.com:10200</value><source>programatically</source></property>
<property><name>yarn.webapp.xfs-filter.enabled</name><value>true</value><source>yarn-default.xml</source></property>
<property><name>yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb</name><value>1000</value><source>programatically</source></property>
<property><name>mapreduce.job.hdfs-servers</name><value>${fs.defaultFS}</value><source>yarn-default.xml</source></property>
<property><name>mapreduce.task.profile.reduce.params</name><value>${mapreduce.task.profile.params}</value><source>mapred-default.xml</source></property>
<property><name>dfs.namenode.fs-limits.min-block-size</name><value>1048576</value><source>hdfs-default.xml</source></property>
<property><name>ftp.stream-buffer-size</name><value>4096</value><source>core-default.xml</source></property>
<property><name>dfs.client.use.legacy.blockreader.local</name><value>false</value><source>hdfs-default.xml</source></property>
<property><name>hadoop.http.cross-origin.allowed-methods</name><value>GET,POST,HEAD</value><source>core-default.xml</source></property>
<property><name>dfs.short.circuit.shared.memory.watcher.interrupt.check.ms</name><value>60000</value><source>hdfs-default.xml</source></property>
<property><name>dfs.datanode.directoryscan.threads</name><value>1</value><source>hdfs-default.xml</source></property>
<property><name>fs.s3a.buffer.dir</name><value>${hadoop.tmp.dir}/s3a</value><source>core-default.xml</source></property>
<property><name>yarn.client.application-client-protocol.poll-interval-ms</name><value>200</value><source>yarn-default.xml</source></property>
<property><name>yarn.timeline-service.leveldb-timeline-store.path</name><value>/hadoop/yarn/timeline</value><source>programatically</source></property>
<property><name>mapreduce.job.split.metainfo.maxsize</name><value>10000000</value><source>mapred-default.xml</source></property>
<property><name>dfs.namenode.edits.noeditlogchannelflush</name><value>false</value><source>hdfs-default.xml</source></property>
<property><name>fs.s3a.fast.upload.buffer</name><value>disk</value><source>programatically</source></property>
<property><name>s3native.bytes-per-checksum</name><value>512</value><source>core-default.xml</source></property>
<property><name>yarn.client.failover-retries-on-socket-timeouts</name><value>0</value><source>yarn-default.xml</source></property>
<property><name>hadoop.security.sensitive-config-keys</name><value>
    secret$
    password$
    ssl.keystore.pass$
    fs.s3.*[Ss]ecret.?[Kk]ey
    fs.s3a.*.server-side-encryption.key
    fs.azure.account.key.*
    credential$
    oauth.*token$
    hadoop.security.sensitive-config-keys
  </value><source>core-default.xml</source></property>
<property><name>dfs.namenode.startup.delay.block.deletion.sec</name><value>3600</value><source>programatically</source></property>
<property><name>dfs.webhdfs.user.provider.user.pattern</name><value>^[A-Za-z_][A-Za-z0-9._-]*[$]?$</value><source>hdfs-default.xml</source></property>
<property><name>yarn.nodemanager.webapp.rest-csrf.custom-header</name><value>X-XSRF-Header</value><source>yarn-default.xml</source></property>
<property><name>mapreduce.tasktracker.tasks.sleeptimebeforesigkill</name><value>5000</value><source>mapred-default.xml</source></property>
<property><name>yarn.timeline-service.client.retry-interval-ms</name><value>1000</value><source>programatically</source></property>
<property><name>dfs.encrypt.data.transfer.cipher.key.bitlength</name><value>128</value><source>hdfs-default.xml</source></property>
<property><name>yarn.timeline-service.entity-group-fs-store.with-user-dir</name><value>false</value><source>yarn-default.xml</source></property>
<property><name>hadoop.http.authentication.type</name><value>simple</value><source>core-default.xml</source></property>
<property><name>dfs.namenode.path.based.cache.refresh.interval.ms</name><value>30000</value><source>hdfs-default.xml</source></property>
<property><name>yarn.nodemanager.linux-container-executor.cgroups.mount-path</name><value>/cgroup</value><source>programatically</source></property>
<property><name>mapreduce.local.clientfactory.class.name</name><value>org.apache.hadoop.mapred.LocalClientFactory</value><source>mapred-default.xml</source></property>
<property><name>dfs.namenode.max.full.block.report.leases</name><value>6</value><source>hdfs-default.xml</source></property>
<property><name>dfs.datanode.cache.revocation.timeout.ms</name><value>900000</value><source>hdfs-default.xml</source></property>
<property><name>ipc.client.connection.maxidletime</name><value>30000</value><source>programatically</source></property>
<property><name>ipc.server.max.connections</name><value>0</value><source>core-default.xml</source></property>
<property><name>mapreduce.jobhistory.recovery.store.leveldb.path</name><value>/hadoop/mapreduce/jhs</value><source>programatically</source></property>
<property><name>dfs.namenode.safemode.threshold-pct</name><value>1</value><source>programatically</source></property>
<property><name>fs.s3a.multipart.purge.age</name><value>86400</value><source>core-default.xml</source></property>
<property><name>dfs.namenode.num.checkpoints.retained</name><value>2</value><source>hdfs-default.xml</source></property>
<property><name>mapreduce.jobhistory.webapp.xfs-filter.xframe-options</name><value>SAMEORIGIN</value><source>mapred-default.xml</source></property>
<property><name>yarn.timeline-service.client.best-effort</name><value>false</value><source>yarn-default.xml</source></property>
<property><name>fs.azure.authorization</name><value>false</value><source>core-default.xml</source></property>
<property><name>yarn.timeline-service.bind-host</name><value>0.0.0.0</value><source>programatically</source></property>
<property><name>mapreduce.job.ubertask.maxmaps</name><value>9</value><source>mapred-default.xml</source></property>
<property><name>dfs.namenode.stale.datanode.interval</name><value>30000</value><source>programatically</source></property>
<property><name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name><value>90</value><source>programatically</source></property>
<property><name>mapreduce.tasktracker.http.address</name><value>0.0.0.0:50060</value><source>mapred-default.xml</source></property>
<property><name>mapreduce.ifile.readahead.bytes</name><value>4194304</value><source>mapred-default.xml</source></property>
<property><name>mapreduce.jobhistory.webapp.rest-csrf.enabled</name><value>false</value><source>mapred-default.xml</source></property>
<property><name>yarn.sharedcache.uploader.server.thread-count</name><value>50</value><source>yarn-default.xml</source></property>
<property><name>mapreduce.jobhistory.admin.address</name><value>0.0.0.0:10033</value><source>mapred-default.xml</source></property>
<property><name>s3.client-write-packet-size</name><value>65536</value><source>core-default.xml</source></property>
<property><name>dfs.block.access.token.lifetime</name><value>600</value><source>hdfs-default.xml</source></property>
<property><name>yarn.app.mapreduce.am.resource.cpu-vcores</name><value>1</value><source>mapred-default.xml</source></property>
<property><name>mapreduce.input.lineinputformat.linespermap</name><value>1</value><source>mapred-default.xml</source></property>
<property><name>dfs.namenode.num.extra.edits.retained</name><value>1000000</value><source>hdfs-default.xml</source></property>
<property><name>mapreduce.reduce.shuffle.input.buffer.percent</name><value>0.7</value><source>programatically</source></property>
<property><name>hadoop.http.staticuser.user</name><value>dr.who</value><source>core-default.xml</source></property>
<property><name>mapreduce.reduce.maxattempts</name><value>4</value><source>mapred-default.xml</source></property>
<property><name>hadoop.security.group.mapping.ldap.search.filter.user</name><value>(&(objectClass=user)(sAMAccountName={0}))</value><source>core-default.xml</source></property>
<property><name>mapreduce.jobhistory.admin.acl</name><value>*</value><source>mapred-default.xml</source></property>
<property><name>hadoop.workaround.non.threadsafe.getpwuid</name><value>false</value><source>core-default.xml</source></property>
<property><name>dfs.client.context</name><value>default</value><source>hdfs-default.xml</source></property>
<property><name>mapreduce.map.maxattempts</name><value>4</value><source>mapred-default.xml</source></property>
<property><name>yarn.timeline-service.entity-group-fs-store.active-dir</name><value>/ats/active/</value><source>programatically</source></property>
<property><name>yarn.resourcemanager.zk-retry-interval-ms</name><value>1000</value><source>programatically</source></property>
<property><name>mapreduce.jobhistory.cleaner.interval-ms</name><value>86400000</value><source>mapred-default.xml</source></property>
<property><name>dfs.datanode.drop.cache.behind.reads</name><value>false</value><source>hdfs-default.xml</source></property>
<property><name>dfs.permissions.superusergroup</name><value>hdfs</value><source>programatically</source></property>
<property><name>yarn.application.classpath</name><value>$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*</value><source>programatically</source></property>
<property><name>mapreduce.jobhistory.bind-host</name><value>0.0.0.0</value><source>programatically</source></property>
<property><name>fs.s3n.block.size</name><value>67108864</value><source>core-default.xml</source></property>
<property><name>hadoop.registry.system.acls</name><value>sasl:yarn@, sasl:mapred@, sasl:hdfs@</value><source>core-default.xml</source></property>
<property><name>yarn.nodemanager.kill-escape.user</name><value>hive</value><source>programatically</source></property>
<property><name>dfs.namenode.list.cache.pools.num.responses</name><value>100</value><source>hdfs-default.xml</source></property>
<property><name>dfs.datanode.slow.io.warning.threshold.ms</name><value>300</value><source>hdfs-default.xml</source></property>
<property><name>yarn.sharedcache.store.in-memory.check-period-mins</name><value>720</value><source>yarn-default.xml</source></property>
<property><name>fs.s3a.multiobjectdelete.enable</name><value>true</value><source>core-default.xml</source></property>
<property><name>yarn.nodemanager.bind-host</name><value>0.0.0.0</value><source>programatically</source></property>
<property><name>dfs.namenode.fs-limits.max-blocks-per-file</name><value>1048576</value><source>hdfs-default.xml</source></property>
<property><name>yarn.nodemanager.vmem-check-enabled</name><value>false</value><source>programatically</source></property>
<property><name>hadoop.security.authentication</name><value>simple</value><source>programatically</source></property>
<property><name>mapreduce.reduce.cpu.vcores</name><value>1</value><source>mapred-default.xml</source></property>
<property><name>net.topology.node.switch.mapping.impl</name><value>org.apache.hadoop.net.ScriptBasedMapping</value><source>core-default.xml</source></property>
<property><name>fs.s3.sleepTimeSeconds</name><value>10</value><source>core-default.xml</source></property>
<property><name>dfs.datanode.peer.stats.enabled</name><value>false</value><source>hdfs-default.xml</source></property>
<property><name>yarn.timeline-service.ttl-ms</name><value>2678400000</value><source>programatically</source></property>
<property><name>yarn.sharedcache.root-dir</name><value>/sharedcache</value><source>yarn-default.xml</source></property>
<property><name>yarn.resourcemanager.keytab</name><value>/etc/krb5.keytab</value><source>yarn-default.xml</source></property>
<property><name>yarn.resourcemanager.container.liveness-monitor.interval-ms</name><value>600000</value><source>yarn-default.xml</source></property>
<property><name>mapreduce.jobtracker.heartbeats.in.second</name><value>100</value><source>mapred-default.xml</source></property>
<property><name>yarn.node-labels.fs-store.root-dir</name><value>/system/yarn/node-labels</value><source>programatically</source></property>
<property><name>hadoop.security.group.mapping.ldap.posix.attr.gid.name</name><value>gidNumber</value><source>core-default.xml</source></property>
<property><name>yarn.app.mapreduce.am.scheduler.heartbeat.interval-ms</name><value>1000</value><source>mapred-default.xml</source></property>
<property><name>yarn.app.mapreduce.client-am.ipc.max-retries-on-timeouts</name><value>3</value><source>mapred-default.xml</source></property>
<property><name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name><value>hadoop-yarn</value><source>programatically</source></property>
<property><name>yarn.resourcemanager.delegation-token.max-conf-size-bytes</name><value>12800</value><source>yarn-default.xml</source></property>
<property><name>s3.bytes-per-checksum</name><value>512</value><source>core-default.xml</source></property>
<property><name>hadoop.ssl.require.client.cert</name><value>false</value><source>core-default.xml</source></property>
<property><name>dfs.journalnode.http-address</name><value>0.0.0.0:8480</value><source>programatically</source></property>
<property><name>mapreduce.output.fileoutputformat.compress</name><value>false</value><source>programatically</source></property>
<property><name>fs.default.name</name><value>hdfs://sandbox-hdp.hortonworks.com:8020</value></property>
<property><name>dfs.ha.automatic-failover.enabled</name><value>false</value><source>hdfs-default.xml</source></property>
<property><name>dfs.namenode.metrics.logger.period.seconds</name><value>600</value><source>hdfs-default.xml</source></property>
<property><name>yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled</name><value>false</value><source>programatically</source></property>
<property><name>mapreduce.shuffle.max.threads</name><value>0</value><source>mapred-default.xml</source></property>
<property><name>mapred.job.tracker</name><value>http://sandbox-hdp.hortonworks.com:8050</value><source>programatically</source></property>
<property><name>mapreduce.jobhistory.webapp.rest-csrf.custom-header</name><value>X-XSRF-Header</value><source>mapred-default.xml</source></property>
<property><name>dfs.namenode.invalidate.work.pct.per.iteration</name><value>0.32f</value><source>hdfs-default.xml</source></property>
<property><name>s3native.client-write-packet-size</name><value>65536</value><source>core-default.xml</source></property>
<property><name>dfs.namenode.max-lock-hold-to-release-lease-ms</name><value>25</value><source>hdfs-default.xml</source></property>
<property><name>dfs.client.block.write.replace-datanode-on-failure.policy</name><value>DEFAULT</value><source>hdfs-default.xml</source></property>
<property><name>mapreduce.client.submit.file.replication</name><value>10</value><source>mapred-default.xml</source></property>
<property><name>yarn.app.mapreduce.am.job.committer.commit-window</name><value>10000</value><source>mapred-default.xml</source></property>
<property><name>dfs.namenode.audit.log.async</name><value>true</value><source>programatically</source></property>
<property><name>yarn.nodemanager.sleep-delay-before-sigkill.ms</name><value>250</value><source>yarn-default.xml</source></property>
<property><name>ssl.client.truststore.password</name><value>bigdata</value><source>programatically</source></property>
<property><name>yarn.nodemanager.env-whitelist</name><value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,HADOOP_YARN_HOME,HADOOP_HOME,PATH,LANG,TZ</value><source>yarn-default.xml</source></property>
<property><name>dfs.namenode.acls.enabled</name><value>false</value><source>hdfs-default.xml</source></property>
<property><name>dfs.namenode.secondary.http-address</name><value>sandbox-hdp.hortonworks.com:50090</value><source>programatically</source></property>
<property><name>mapreduce.map.speculative</name><value>false</value><source>programatically</source></property>
<property><name>mapreduce.job.speculative.slowtaskthreshold</name><value>1.0</value><source>mapred-default.xml</source></property>
<property><name>yarn.nodemanager.linux-container-executor.cgroups.mount</name><value>false</value><source>programatically</source></property>
<property><name>mapreduce.tasktracker.http.threads</name><value>40</value><source>mapred-default.xml</source></property>
<property><name>mapreduce.jobtracker.webinterface.trusted</name><value>false</value><source>programatically</source></property>
<property><name>mapreduce.jobhistory.http.policy</name><value>HTTP_ONLY</value><source>programatically</source></property>
<property><name>fs.s3a.paging.maximum</name><value>5000</value><source>core-default.xml</source></property>
<property><name>hadoop.kerberos.min.seconds.before.relogin</name><value>60</value><source>core-default.xml</source></property>
<property><name>yarn.resourcemanager.nodemanager-connect-retries</name><value>10</value><source>yarn-default.xml</source></property>
<property><name>fs.s3.buffer.dir</name><value>${hadoop.tmp.dir}/s3</value><source>core-default.xml</source></property>
<property><name>io.native.lib.available</name><value>true</value><source>core-default.xml</source></property>
<property><name>dfs.namenode.heartbeat.recheck-interval</name><value>300000</value><source>hdfs-default.xml</source></property>
<property><name>mapreduce.jobhistory.done-dir</name><value>/mr-history/done</value><source>programatically</source></property>
<property><name>hadoop.registry.zk.retry.interval.ms</name><value>1000</value><source>core-default.xml</source></property>
<property><name>mapreduce.job.reducer.unconditional-preempt.delay.sec</name><value>300</value><source>mapred-default.xml</source></property>
<property><name>dfs.namenode.avoid.write.stale.datanode</name><value>true</value><source>programatically</source></property>
<property><name>dfs.namenode.checkpoint.txns</name><value>1000000</value><source>programatically</source></property>
<property><name>hadoop.ssl.hostname.verifier</name><value>DEFAULT</value><source>core-default.xml</source></property>
<property><name>mapreduce.task.timeout</name><value>300000</value><source>programatically</source></property>
<property><name>yarn.nodemanager.disk-health-checker.interval-ms</name><value>120000</value><source>yarn-default.xml</source></property>
<property><name>adl.feature.ownerandgroup.enableupn</name><value>false</value><source>core-default.xml</source></property>
<property><name>dfs.journalnode.https-address</name><value>0.0.0.0:8481</value><source>programatically</source></property>
<property><name>hadoop.security.groups.cache.secs</name><value>300</value><source>core-default.xml</source></property>
<property><name>mapreduce.input.fileinputformat.split.minsize</name><value>0</value><source>mapred-default.xml</source></property>
<property><name>dfs.datanode.sync.behind.writes</name><value>false</value><source>hdfs-default.xml</source></property>
<property><name>yarn.resourcemanager.fail-fast</name><value>${yarn.fail-fast}</value><source>yarn-default.xml</source></property>
<property><name>dfs.namenode.full.block.report.lease.length.ms</name><value>300000</value><source>hdfs-default.xml</source></property>
<property><name>hadoop.proxyuser.hue.groups</name><value>*</value><source>programatically</source></property>
<property><name>ipc.server.tcpnodelay</name><value>true</value><source>programatically</source></property>
<property><name>mapreduce.shuffle.port</name><value>13562</value><source>programatically</source></property>
<property><name>hadoop.rpc.protection</name><value>authentication</value><source>core-default.xml</source></property>
<property><name>dfs.client.https.keystore.resource</name><value>ssl-client.xml</value><source>hdfs-default.xml</source></property>
<property><name>dfs.namenode.list.encryption.zones.num.responses</name><value>100</value><source>hdfs-default.xml</source></property>
<property><name>yarn.client.failover-proxy-provider</name><value>org.apache.hadoop.yarn.client.RequestHedgingRMFailoverProxyProvider</value><source>programatically</source></property>
<property><name>yarn.timeline-service.recovery.enabled</name><value>true</value><source>programatically</source></property>
<property><name>mapreduce.jobtracker.retiredjobs.cache.size</name><value>1000</value><source>mapred-default.xml</source></property>
<property><name>dfs.ha.tail-edits.period</name><value>60</value><source>hdfs-default.xml</source></property>
<property><name>dfs.datanode.drop.cache.behind.writes</name><value>false</value><source>hdfs-default.xml</source></property>
<property><name>fs.s3.maxRetries</name><value>4</value><source>core-default.xml</source></property>
<property><name>mapreduce.jobtracker.address</name><value>http://sandbox-hdp.hortonworks.com:8050</value><source>programatically</source></property>
<property><name>hadoop.http.authentication.kerberos.principal</name><value>HTTP/_HOST@LOCALHOST</value><source>core-default.xml</source></property>
<property><name>hadoop.security.group.mapping.ldap.posix.attr.uid.name</name><value>uidNumber</value><source>core-default.xml</source></property>
<property><name>nfs.server.port</name><value>2049</value><source>hdfs-default.xml</source></property>
<property><name>yarn.resourcemanager.webapp.address</name><value>sandbox-hdp.hortonworks.com:8088</value><source>programatically</source></property>
<property><name>mapreduce.task.profile.reduces</name><value>0-2</value><source>mapred-default.xml</source></property>
<property><name>yarn.timeline-service.client.max-retries</name><value>30</value><source>programatically</source></property>
<property><name>yarn.resourcemanager.am.max-attempts</name><value>2</value><source>programatically</source></property>
<property><name>ssl.client.truststore.type</name><value>jks</value><source>programatically</source></property>
<property><name>nfs.dump.dir</name><value>/tmp/.hdfs-nfs</value><source>hdfs-default.xml</source></property>
<property><name>mapred.job.name</name><value>oozie:action:T=sqoop:W=old-movies:A=sqoop-node:ID=0000000-180823180623663-oozie-oozi-W</value></property>
<property><name>dfs.bytes-per-checksum</name><value>512</value><source>hdfs-default.xml</source></property>
<property><name>mapreduce.job.end-notification.max.retry.interval</name><value>5000</value><source>mapred-default.xml</source></property>
<property><name>ipc.client.connect.retry.interval</name><value>1000</value><source>core-default.xml</source></property>
<property><name>fs.s3a.multipart.size</name><value>67108864</value><source>programatically</source></property>
<property><name>yarn.app.mapreduce.am.command-opts</name><value>-Xmx200m -Djava.io.tmpdir=./tmp</value><source>programatically</source></property>
<property><name>yarn.nodemanager.process-kill-wait.ms</name><value>2000</value><source>yarn-default.xml</source></property>
<property><name>yarn.timeline-service.state-store-class</name><value>org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore</value><source>programatically</source></property>
<property><name>yarn.nodemanager.container.stderr.tail.bytes</name><value>4096</value><source>yarn-default.xml</source></property>
<property><name>dfs.namenode.safemode.min.datanodes</name><value>0</value><source>hdfs-default.xml</source></property>
<property><name>yarn.timeline-service.client.fd-clean-interval-secs</name><value>60</value><source>yarn-default.xml</source></property>
<property><name>mapreduce.job.speculative.minimum-allowed-tasks</name><value>10</value><source>mapred-default.xml</source></property>
<property><name>dfs.namenode.write.stale.datanode.ratio</name><value>1.0f</value><source>programatically</source></property>
<property><name>hadoop.jetty.logs.serve.aliases</name><value>true</value><source>core-default.xml</source></property>
<property><name>oozie.sqoop.args.size</name><value>13</value><source>programatically</source></property>
<property><name>yarn.resourcemanager.webapp.proxyuser.hcat.groups</name><value>*</value><source>programatically</source></property>
<property><name>mapreduce.reduce.shuffle.fetch.retry.timeout-ms</name><value>30000</value><source>programatically</source></property>
<property><name>fs.du.interval</name><value>600000</value><source>core-default.xml</source></property>
<property><name>yarn.resourcemanager.webapp.proxyuser.hcat.hosts</name><value>*</value><source>programatically</source></property>
<property><name>mapreduce.tasktracker.dns.nameserver</name><value>default</value><source>mapred-default.xml</source></property>
<property><name>yarn.sharedcache.admin.address</name><value>0.0.0.0:8047</value><source>yarn-default.xml</source></property>
<property><name>mapreduce.admin.reduce.child.java.opts</name><value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=2.6.5.0-292</value><source>programatically</source></property>
<property><name>hadoop.custom-extensions.root</name><value>/hdp/ext/2.6/hadoop</value><source>programatically</source></property>
<property><name>mapred.job.reduce.memory.mb</name><value>250</value><source>programatically</source></property>
<property><name>hadoop.security.random.device.file.path</name><value>/dev/urandom</value><source>core-default.xml</source></property>
<property><name>mapreduce.task.merge.progress.records</name><value>10000</value><source>mapred-default.xml</source></property>
<property><name>dfs.webhdfs.enabled</name><value>true</value><source>programatically</source></property>
<property><name>hadoop.registry.secure</name><value>false</value><source>core-default.xml</source></property>
<property><name>hadoop.ssl.client.conf</name><value>ssl-client.xml</value><source>core-default.xml</source></property>
<property><name>mapreduce.job.counters.max</name><value>130</value><source>programatically</source></property>
<property><name>yarn.nodemanager.localizer.fetch.thread-count</name><value>4</value><source>yarn-default.xml</source></property>
<property><name>io.mapfile.bloom.size</name><value>1048576</value><source>core-default.xml</source></property>
<property><name>yarn.nodemanager.localizer.client.thread-count</name><value>5</value><source>yarn-default.xml</source></property>
<property><name>fs.automatic.close</name><value>true</value><source>core-default.xml</source></property>
<property><name>mapreduce.task.profile</name><value>false</value><source>mapred-default.xml</source></property>
<property><name>yarn.nodemanager.recovery.compaction-interval-secs</name><value>3600</value><source>yarn-default.xml</source></property>
<property><name>dfs.namenode.edit.log.autoroll.multiplier.threshold</name><value>2.0</value><source>hdfs-default.xml</source></property>
<property><name>mapreduce.task.combine.progress.records</name><value>10000</value><source>mapred-default.xml</source></property>
<property><name>mapreduce.shuffle.ssl.file.buffer.size</name><value>65536</value><source>mapred-default.xml</source></property>
<property><name>yarn.app.mapreduce.client.job.max-retries</name><value>30</value><source>programatically</source></property>
<property><name>fs.swift.impl</name><value>org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem</value><source>core-default.xml</source></property>
<property><name>yarn.app.mapreduce.am.container.log.backups</name><value>0</value><source>mapred-default.xml</source></property>
<property><name>dfs.datanode.available-space-volume-choosing-policy.balanced-space-preference-fraction</name><value>0.75f</value><source>hdfs-default.xml</source></property>
<property><name>dfs.namenode.backup.address</name><value>0.0.0.0:50100</value><source>hdfs-default.xml</source></property>
<property><name>dfs.client.https.need-auth</name><value>false</value><source>hdfs-default.xml</source></property>
<property><name>mapreduce.app-submission.cross-platform</name><value>false</value><source>mapred-default.xml</source></property>
<property><name>mapreduce.job.name</name><value>oozie:action:T=sqoop:W=old-movies:A=sqoop-node:ID=0000000-180823180623663-oozie-oozi-W</value><source>because mapred.job.name is deprecated</source></property>
<property><name>yarn.timeline-service.ttl-enable</name><value>true</value><source>programatically</source></property>
<property><name>hadoop.security.group.mapping.ldap.conversion.rule</name><value>none</value><source>core-default.xml</source></property>
<property><name>dfs.user.home.dir.prefix</name><value>/user</value><source>hdfs-default.xml</source></property>
<property><name>yarn.nodemanager.container-monitor.procfs-tree.smaps-based-rss.enabled</name><value>false</value><source>yarn-default.xml</source></property>
<property><name>yarn.nodemanager.keytab</name><value>/etc/krb5.keytab</value><source>yarn-default.xml</source></property>
<property><name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name><value>true</value><source>programatically</source></property>
<property><name>fs.azure.authorization.caching.enable</name><value>true</value><source>core-default.xml</source></property>
<property><name>dfs.namenode.xattrs.enabled</name><value>true</value><source>hdfs-default.xml</source></property>
<property><name>yarn.app.mapreduce.am.admin-command-opts</name><value>-Dhdp.version=2.6.5.0-292</value><source>programatically</source></property>
<property><name>nfs.file.dump.dir</name><value>/tmp/.hdfs-nfs</value><source>programatically</source></property>
<property><name>dfs.client.write.exclude.nodes.cache.expiry.interval.millis</name><value>600000</value><source>hdfs-default.xml</source></property>
<property><name>dfs.datanode.fileio.profiling.sampling.percentage</name><value>0</value><source>hdfs-default.xml</source></property>
<property><name>yarn.sharedcache.client-server.address</name><value>0.0.0.0:8045</value><source>yarn-default.xml</source></property>
<property><name>mapreduce.jobtracker.restart.recover</name><value>false</value><source>mapred-default.xml</source></property>
<property><name>mapreduce.map.skip.proc.count.autoincr</name><value>true</value><source>mapred-default.xml</source></property>
<property><name>dfs.namenode.datanode.registration.ip-hostname-check</name><value>true</value><source>hdfs-default.xml</source></property>
<property><name>dfs.image.transfer.chunksize</name><value>65536</value><source>hdfs-default.xml</source></property>
<property><name>yarn.nodemanager.webapp.cross-origin.enabled</name><value>false</value><source>yarn-default.xml</source></property>
<property><name>yarn.nodemanager.runtime.linux.docker.privileged-containers.allowed</name><value>false</value><source>yarn-default.xml</source></property>
<property><name>hadoop.security.instrumentation.requires.admin</name><value>false</value><source>core-default.xml</source></property>
<property><name>io.compression.codec.bzip2.library</name><value>system-native</value><source>core-default.xml</source></property>
<property><name>yarn.nodemanager.webapp.rest-csrf.methods-to-ignore</name><value>GET,OPTIONS,HEAD</value><source>yarn-default.xml</source></property>
<property><name>dfs.namenode.name.dir.restore</name><value>true</value><source>programatically</source></property>
<property><name>dfs.datanode.outliers.report.interval</name><value>1800000</value><source>hdfs-default.xml</source></property>
<property><name>dfs.namenode.resource.checked.volumes.minimum</name><value>1</value><source>hdfs-default.xml</source></property>
<property><name>hadoop.ssl.keystores.factory.class</name><value>org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory</value><source>core-default.xml</source></property>
<property><name>dfs.namenode.list.cache.directives.num.responses</name><value>100</value><source>hdfs-default.xml</source></property>
<property><name>fs.ftp.host</name><value>0.0.0.0</value><source>core-default.xml</source></property>
<property><name>yarn.app.mapreduce.am.containerlauncher.threadpool-initial-size</name><value>10</value><source>mapred-default.xml</source></property>
<property><name>yarn.nodemanager.log-aggregation.debug-enabled</name><value>false</value><source>programatically</source></property>
<property><name>s3.blocksize</name><value>67108864</value><source>core-default.xml</source></property>
<property><name>s3native.stream-buffer-size</name><value>4096</value><source>core-default.xml</source></property>
<property><name>mapreduce.jobtracker.taskscheduler</name><value>org.apache.hadoop.mapred.JobQueueTaskScheduler</value><source>mapred-default.xml</source></property>
<property><name>dfs.datanode.dns.nameserver</name><value>default</value><source>hdfs-default.xml</source></property>
<property><name>yarn.nodemanager.resource.memory-mb</name><value>3000</value><source>programatically</source></property>
<property><name>yarn.log.server.web-service.url</name><value>http://sandbox-hdp.hortonworks.com:8188/ws/v1/applicationhistory</value><source>programatically</source></property>
<property><name>mapreduce.task.userlog.limit.kb</name><value>0</value><source>mapred-default.xml</source></property>
<property><name>hadoop.security.crypto.codec.classes.aes.ctr.nopadding</name><value>org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec,org.apache.hadoop.crypto.JceAesCtrCryptoCodec</value><source>core-default.xml</source></property>
<property><name>mapreduce.reduce.speculative</name><value>false</value><source>programatically</source></property>
<property><name>yarn.nodemanager.container-monitor.interval-ms</name><value>3000</value><source>programatically</source></property>
<property><name>yarn.node-labels.fs-store.impl.class</name><value>org.apache.hadoop.yarn.nodelabels.FileSystemNodeLabelsStore</value><source>yarn-default.xml</source></property>
<property><name>net.topology.script.file.name</name><value>/etc/hadoop/conf/topology_script.py</value><source>programatically</source></property>
<property><name>yarn.nodemanager.kill-escape.launch-command-line</name><value>slider-agent,LLAP</value><source>programatically</source></property>
<property><name>dfs.replication.max</name><value>50</value><source>programatically</source></property>
<property><name>dfs.replication</name><value>1</value><source>programatically</source></property>
<property><name>yarn.client.failover-retries</name><value>0</value><source>yarn-default.xml</source></property>
<property><name>yarn.nodemanager.resource.cpu-vcores</name><value>8</value><source>programatically</source></property>
<property><name>mapreduce.jobhistory.recovery.enable</name><value>true</value><source>programatically</source></property>
<property><name>mapreduce.job.classpath.files</name><value>hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/hadoop-azure-datalake-2.7.3.2.6.5.0-292.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/snappy-java-1.1.1.3.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/aws-java-sdk-core-1.10.6.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/avro-1.8.0.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/aws-java-sdk-kms-1.10.6.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/sqoop-1.4.6.2.6.5.0-292.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/azure-storage-5.4.0.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/jsr305-3.0.2.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/commons-io-2.4.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/jackson-annotations-2.4.0.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/jackson-core-2.4.4.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/mysql-connector-java.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/aws-java-sdk-s3-1.10.6.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/hadoop-azure-2.7.3.2.6.5.0-292.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/azure-keyvault-core-0.8.0.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/jackson-databind-2.4.4.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/okhttp-2.7.5.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/gcs-connector-1.8.1.2.6.5.0-292-shaded.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/hadoop-aws-2.7.3.2.6.5.0-292.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/guava-11.0.2.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/okio-1.6.0.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/xz-1.5.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/commons-compress-1.8.1.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/paranamer-2.7.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/azure-data-lake-store-sdk-2.2.5.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/hsqldb-1.8.0.7.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/commons-lang3-3.4.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/joda-time-2.9.6.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/sqoop/oozie-sharelib-sqoop-4.2.0.2.6.5.0-292.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/oozie/aws-java-sdk-core-1.10.6.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/oozie/aws-java-sdk-kms-1.10.6.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/oozie/aws-java-sdk-s3-1.10.6.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/oozie/azure-data-lake-store-sdk-2.2.5.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/oozie/azure-keyvault-core-0.8.0.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/oozie/azure-storage-5.4.0.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/oozie/commons-lang3-3.4.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/oozie/gcs-connector-1.8.1.2.6.5.0-292-shaded.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/oozie/guava-11.0.2.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/oozie/hadoop-aws-2.7.3.2.6.5.0-292.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/oozie/hadoop-azure-2.7.3.2.6.5.0-292.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/oozie/hadoop-azure-datalake-2.7.3.2.6.5.0-292.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/oozie/jackson-annotations-2.4.0.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/oozie/jackson-core-2.4.4.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/oozie/jackson-databind-2.4.4.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/oozie/joda-time-2.9.6.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/oozie/json-simple-1.1.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/oozie/jsr305-3.0.2.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/oozie/okhttp-2.7.5.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/oozie/okio-1.6.0.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/oozie/oozie-hadoop-utils-hadoop-2-4.2.0.2.6.5.0-292.jar,hdfs://sandbox-hdp.hortonworks.com:8020/user/oozie/share/lib/lib_20180618160835/oozie/oozie-sharelib-oozie-4.2.0.2.6.5.0-292.jar</value><source>programatically</source></property>
<property><name>nfs.exports.allowed.hosts</name><value>* rw</value><source>programatically</source></property>
<property><name>yarn.sharedcache.checksum.algo.impl</name><value>org.apache.hadoop.yarn.sharedcache.ChecksumSHA256Impl</value><source>yarn-default.xml</source></property>
<property><name>mapreduce.reduce.shuffle.memory.limit.percent</name><value>0.25</value><source>mapred-default.xml</source></property>
<property><name>file.replication</name><value>1</value><source>core-default.xml</source></property>
<property><name>mapreduce.job.reduce.shuffle.consumer.plugin.class</name><value>org.apache.hadoop.mapreduce.task.reduce.Shuffle</value><source>mapred-default.xml</source></property>
<property><name>yarn.app.mapreduce.am.log.level</name><value>INFO</value><source>programatically</source></property>
<property><name>yarn.nodemanager.webapp.rest-csrf.enabled</name><value>false</value><source>yarn-default.xml</source></property>
<property><name>mapreduce.job.jvm.numtasks</name><value>1</value><source>mapred-default.xml</source></property>
<property><name>dfs.datanode.fsdatasetcache.max.threads.per.volume</name><value>4</value><source>hdfs-default.xml</source></property>
<property><name>mapreduce.am.max-attempts</name><value>2</value><source>programatically</source></property>
<property><name>mapreduce.shuffle.connection-keep-alive.timeout</name><value>5</value><source>mapred-default.xml</source></property>
<property><name>yarn.timeline-service.plugin.enabled</name><value>true</value><source>programatically</source></property>
<property><name>hadoop.fuse.timer.period</name><value>5</value><source>hdfs-default.xml</source></property>
<property><name>mapreduce.job.reduces</name><value>1</value><source>mapred-default.xml</source></property>
<property><name>hadoop.security.group.mapping.ldap.connection.timeout.ms</name><value>60000</value><source>core-default.xml</source></property>
<property><name>job.end.notification.url</name><value>http://sandbox-hdp.hortonworks.com:11000/oozie/callback?id=0000000-180823180623663-oozie-oozi-W@sqoop-node&status=$jobStatus</value></property>
<property><name>yarn.app.mapreduce.am.job.task.listener.thread-count</name><value>30</value><source>mapred-default.xml</source></property>
<property><name>yarn.resourcemanager.store.class</name><value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value><source>programatically</source></property>
<property><name>dfs.client.retry.policy.enabled</name><value>false</value><source>programatically</source></property>
<property><name>s3native.replication</name><value>3</value><source>core-default.xml</source></property>
<property><name>mapreduce.tasktracker.reduce.tasks.maximum</name><value>2</value><source>mapred-default.xml</source></property>
<property><name>fs.permissions.umask-mode</name><value>022</value><source>programatically</source></property>
<property><name>mapreduce.cluster.local.dir</name><value>${hadoop.tmp.dir}/mapred/local</value><source>mapred-default.xml</source></property>
<property><name>mapreduce.client.output.filter</name><value>FAILED</value><source>mapred-default.xml</source></property>
<property><name>yarn.nodemanager.pmem-check-enabled</name><value>false</value><source>programatically</source></property>
<property><name>dfs.client.failover.connection.retries.on.timeouts</name><value>0</value><source>hdfs-default.xml</source></property>
<property><name>mapreduce.jobtracker.instrumentation</name><value>org.apache.hadoop.mapred.JobTrackerMetricsInst</value><source>mapred-default.xml</source></property>
<property><name>ftp.replication</name><value>3</value><source>core-default.xml</source></property>
<property><name>yarn.timeline-service.webapp.rest-csrf.methods-to-ignore</name><value>GET,OPTIONS,HEAD</va
1 REPLY 1
Highlighted

Re: Oozie job always in running state

Explorer

Error from the oozie web console:

2018-08-23 18:49:03,792  WARN ActionStartXCommand:523 - SERVER[sandbox-hdp.hortonworks.com] USER[maria_dev] GROUP[-] TOKEN[] APP[old-movies] JOB[0000000-180823180623663-oozie-oozi-W] ACTION[0000000-180823180623663-oozie-oozi-W@sqoop-node] Error starting action [sqoop-node]. ErrorType [TRANSIENT], ErrorCode [  JA006], Message [  JA006: Call From sandbox-hdp.hortonworks.com/172.18.0.2 to sandbox-hdp.hortonworks.com:8050 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused]
org.apache.oozie.action.ActionExecutorException:   JA006: Call From sandbox-hdp.hortonworks.com/172.18.0.2 to sandbox-hdp.hortonworks.com:8050 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
at org.apache.oozie.action.ActionExecutor.convertExceptionHelper(ActionExecutor.java:457)
at org.apache.oozie.action.ActionExecutor.convertException(ActionExecutor.java:437)
at org.apache.oozie.action.hadoop.JavaActionExecutor.submitLauncher(JavaActionExecutor.java:1258)
at org.apache.oozie.action.hadoop.JavaActionExecutor.start(JavaActionExecutor.java:1440)
at org.apache.oozie.command.wf.ActionStartXCommand.execute(ActionStartXCommand.java:234)
at org.apache.oozie.command.wf.ActionStartXCommand.execute(ActionStartXCommand.java:65)
at org.apache.oozie.command.XCommand.call(XCommand.java:287)
at org.apache.oozie.service.CallableQueueService$CompositeCallable.call(CallableQueueService.java:331)
at org.apache.oozie.service.CallableQueueService$CompositeCallable.call(CallableQueueService.java:260)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at org.apache.oozie.service.CallableQueueService$CallableWrapper.run(CallableQueueService.java:178)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Caused by: java.net.ConnectException: Call From sandbox-hdp.hortonworks.com/172.18.0.2 to sandbox-hdp.hortonworks.com:8050 failed on connection exception: java.net.ConnectException: Connection refused; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused
at sun.reflect.GeneratedConstructorAccessor74.newInstance(Unknown Source)
at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:801)
at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:732)
at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1558)
at org.apache.hadoop.ipc.Client.call(Client.java:1498)
at org.apache.hadoop.ipc.Client.call(Client.java:1398)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:233)
at com.sun.proxy.$Proxy33.getDelegationToken(Unknown Source)
at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getDelegationToken(ApplicationClientProtocolPBClientImpl.java:310)
at sun.reflect.GeneratedMethodAccessor30.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:290)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:202)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:184)
at com.sun.proxy.$Proxy34.getDelegationToken(Unknown Source)
at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getRMDelegationToken(YarnClientImpl.java:550)
at org.apache.hadoop.mapred.ResourceMgrDelegate.getDelegationToken(ResourceMgrDelegate.java:176)
at org.apache.hadoop.mapred.YARNRunner.getDelegationToken(YARNRunner.java:232)
at org.apache.hadoop.mapreduce.Cluster.getDelegationToken(Cluster.java:401)
at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1240)
at org.apache.hadoop.mapred.JobClient$16.run(JobClient.java:1237)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1869)
at org.apache.hadoop.mapred.JobClient.getDelegationToken(JobClient.java:1236)
at org.apache.oozie.service.HadoopAccessorService.addRMDelegationToken(HadoopAccessorService.java:525)
at org.apache.oozie.action.hadoop.JavaActionExecutor.submitLauncher(JavaActionExecutor.java:1217)
... 11 more
Caused by: java.net.ConnectException: Connection refused
at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717)
at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:650)
at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:745)
at org.apache.hadoop.ipc.Client$Connection.access$3200(Client.java:397)
at org.apache.hadoop.ipc.Client.getConnection(Client.java:1620)
at org.apache.hadoop.ipc.Client.call(Client.java:1451)
... 34 more