Created on 12-25-2016 11:37 AM - edited 08-17-2019 06:40 AM
SYMPTOM: Cluster was upgraded to 2.3
After the upgrade oozie has configuration issues. User has workflow defined to create job files in directory /tmp/hadoop-${user.name}/job_details but instead, the directory is getting created in / and permission denied error is thrown
ERROR:
Sample workflow:
<workflow-app xmlns='uri:oozie:workflow:0.5' name='scisit_all_oozie_workflow'>
<global>
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<job-xml>${runtime}/runtime_params.xml</job-xml>
<job-xml>scisit_all_tables_config.xml</job-xml>
<job-xml>ColumnTransformationRules.xml</job-xml>
<job-xml>HeadersAndTrailers.xml</job-xml>
<configuration>
<property>
<name>oozie.use.system.libpath</name>
<value>true</value>
</property>
<property>
<name>oozie.action.sharelib.for.java</name>
<value>hive</value>
</property>
<property>
<name>mapreduce.map.maxattempts</name>
<value>1</value>
</property>
<property>
<name>mapreduce.reduce.maxattempts</name>
<value>1</value>
</property>
<property>
<name>mapred.job.queue.name</name>
<value>${queueName}</value>
</property>
<property>
<name>mapreduce.input.fileinputformat.split.maxsize</name>
<value>134217728</value>
</property>
<property>
<name>mapreduce.map.output.compress</name>
<value>true</value>
</property>
<property>
<name>mapreduce.map.output.compress.codec</name>
<value>org.apache.hadoop.io.compress.SnappyCodec</value>
</property>
<property>
<name>mapreduce.output.fileoutputformat.compress</name>
<value>true</value>
</property>
<property>
<name>mapreduce.output.fileoutputformat.compress.codec</name>
<value>org.apache.hadoop.io.compress.SnappyCodec</value>
</property>
<property>
<name>edmhdpif.hive.warehouse</name>
<value>${hiveWarehouseDataDir}</value>
</property>
<property>
<name>edmhdpif.individual.tableprefix</name>
<value>scisit_all_</value>
</property>
<property>
<name>edmhdpif.cdccolumns</name>
<value>${cdcColumns}</value>
</property>
<property>
<name>edmhdpif.rowcounts.database</name>
<value>${falcon_rowcounts_database}</value>
</property>
<property>
<name>edmhdpif.rowcounts.table</name>
<value>${falcon_rowcounts_table}</value>
</property>
<property>
<name>edmhdpif.rowcounts.partition</name>
<value>${falcon_rowcounts_partitions_java}
</value>
</property>
<property>
<name>edmhdpif.rerun.table</name>
<value>${wf:conf('edmhdpif.rerun.table')}</value>
</property>
<property>
<name>edmhdpif.fixwidth</name>
<value>${fixWidth}</value>
</property>
<property>
<name>edmhdpif.delimiter.framework</name>
<value>${frmDelimiter}</value>
</property>
<property>
<name>edmhdpif.delimiter.data</name>
<value>${dataDelimiter}</value>
</property>
<property>
<name>edmhdpif.hive.outputformat</name>
<value>${fileType}</value>
</property>
</configuration>
</global>
<start to="decision-containervalidator" />
<decision name="decision-containervalidator">
<switch>
<case to="containervalidatorjava">${containerValidatorType=="java"}</case>
<case to="containervalidatorpig">${containerValidatorType=="pig"}</case>
<case to="containervalidatorhive">${containerValidatorType=="hive"}</case>
<default to="rowid" />
</switch>
</decision>
<action name="containervalidatorjava">
<java>
<configuration>
<property>
<name>edmhdpif.input.database</name>
<value>${falcon_input_database}</value>
</property>
<property>
<name>edmhdpif.input.table</name>
<value>${falcon_input_table}</value>
</property>
<property>
<name>edmhdpif.input.partition</name>
<value>${falcon_input_partition_filter_java}</value>
</property>
<property>
<name>edmhdpif.containervalidator.args</name>
<value>${containerValidatorArgs}</value>
</property>
<property>
<name>edmhdpif.output.path</name>
<value>${wf:conf('hadoop.tmp.dir')}/${falcon_containervalidation_table}/${falcon_containervalidation_dated_partition_value_fvds}
</value>
</property>
</configuration>
<main-class>${containerValidatorCodeFile}</main-class>
</java>
<ok to="hive-add-partitions-after-containervalidator" />
<error to="fail" />
</action>
<action name="containervalidatorpig">
<pig>
<configuration>
<property>
<name>edmhdpif.input.database</name>
<value>${falcon_input_database}</value>
</property>
<property>
<name>edmhdpif.input.table</name>
<value>${falcon_input_table}</value>
</property>
<property>
<name>edmhdpif.input.partition</name>
<value>${falcon_input_partition_filter_java}</value>
</property>
<property>
<name>edmhdpif.containervalidator.args</name>
<value>${containerValidatorArgs}</value>
</property>
<property>
<name>edmhdpif.output.path</name>
<value>${wf:conf('hadoop.tmp.dir')}/${falcon_containervalidation_table}/${falcon_containervalidation_dated_partition_value_fvds}
</value>
</property>
</configuration>
<script>${containerValidatorCodeFile}</script>
</pig>
<ok to="hive-add-partitions-after-containervalidator" />
<error to="fail" />
</action>
<action name="containervalidatorhive">
<hive xmlns="uri:oozie:hive-action:0.5">
<job-xml>${wf:appPath()}/conf/hive-site.xml</job-xml>
<job-xml>${wf:appPath()}/conf/tez-site.xml</job-xml>
<configuration>
<property>
<name>edmhdpif.input.database</name>
<value>${falcon_input_database}</value>
</property>
<property>
<name>edmhdpif.input.table</name>
<value>${falcon_input_table}</value>
</property>
<property>
<name>edmhdpif.input.partition</name>
<value>${falcon_input_partition_filter_java}</value>
</property>
<property>
<name>edmhdpif.containervalidator.args</name>
<value>${containerValidatorArgs}</value>
</property>
<property>
<name>edmhdpif.output.path</name>
<value>${wf:conf('hadoop.tmp.dir')}/${falcon_containervalidation_table}/${falcon_containervalidation_dated_partition_value_fvds}
</value>
</property>
</configuration>
<script>${containerValidatorCodeFile}</script>
</hive>
<ok to="hive-add-partitions-after-containervalidator" />
<error to="fail" />
</action>
<action name="hive-add-partitions-after-containervalidator">
<hive xmlns="uri:oozie:hive-action:0.5">
<job-xml>${wf:appPath()}/conf/hive-site.xml</job-xml>
<job-xml>${wf:appPath()}/conf/tez-site.xml</job-xml>
<script>${wf:appPath()}/scisit_all_add_partitions_after_containervalidation.hql
</script>
<param>param_dated_partition_value=${falcon_rowid_dated_partition_value_rds}
</param>
</hive>
<ok to="rowid" />
<error to="fail" />
</action>
<action name="rowid">
<java>
<configuration>
<property>
<name>edmhdpif.input.database</name>
<value>${falcon_input_database}</value>
</property>
<property>
<name>edmhdpif.input.table</name>
<value>${falcon_input_table}</value>
</property>
<property>
<name>edmhdpif.input.partition</name>
<value>${falcon_input_partition_filter_java}</value>
</property>
<property>
<name>edmhdpif.rowid.database</name>
<value>${falcon_rowid_database}</value>
</property>
<property>
<name>edmhdpif.rowid.table</name>
<value>${falcon_rowid_table}</value>
</property>
<property>
<name>edmhdpif.rowid.partition</name>
<value>${falcon_rowid_partitions_java}</value>
</property>
<property>
<name>edmhdpif.rowhistory.database</name>
<value>${falcon_rowhistory_database}</value>
</property>
<property>
<name>edmhdpif.rowhistory.table</name>
<value>${falcon_rowhistory_table}</value>
</property>
<property>
<name>edmhdpif.rowhistory.partition</name>
<value>${falcon_rowhistory_partitions_java}</value>
</property>
<property>
<name>edmhdpif.output.path</name>
<value>${wf:conf('hadoop.tmp.dir')}/${falcon_input_table}/${falcon_rowid_dated_partition_value_rds}
</value>
</property>
<property>
<name>edmhdpif.containervalidator.type</name>
<value>${containerValidatorType}</value>
</property>
</configuration>
<main-class>com.scb.edmhdpif.rowid.RowId</main-class>
</java>
<ok to="hive-add-partitions-after-rowid" />
<error to="fail" />
</action>
<action name="hive-add-partitions-after-rowid">
<hive xmlns="uri:oozie:hive-action:0.5">
<job-xml>${wf:appPath()}/conf/hive-site.xml</job-xml>
<job-xml>${wf:appPath()}/conf/tez-site.xml</job-xml>
<script>${wf:appPath()}/scisit_all_add_partitions_after_rowid.hql
</script>
<param>param_dated_partition_value=${falcon_rowid_dated_partition_value_rds}
</param>
</hive>
<ok to="decision-datatransform" />
<error to="fail" />
</action>
<decision name="decision-datatransform">
<switch>
<case to="datatransform">${dataTransform=="REQUIRED"}</case>
<default to="decision-typevalidator" />
</switch>
</decision>
<action name="datatransform">
<java>
<configuration>
<property>
<name>edmhdpif.input.database</name>
<value>${falcon_rowid_database}</value>
</property>
<property>
<name>edmhdpif.input.table</name>
<value>${falcon_rowid_table}</value>
</property>
<property>
<name>edmhdpif.input.partition</name>
<value>${falcon_rowid_partitions_java}
</value>
</property>
<property>
<name>edmhdpif.datatransform.valid.database</name>
<value>${falcon_datatransformvalid_database}</value>
</property>
<property>
<name>edmhdpif.datatransform.valid.table</name>
<value>${falcon_datatransformvalid_table}</value>
</property>
<property>
<name>edmhdpif.datatransform.valid.partition</name>
<value>${falcon_datatransformvalid_partitions_java}
</value>
</property>
<property>
<name>edmhdpif.datatransform.invalid.database</name>
<value>${falcon_datatransforminvalid_database}</value>
</property>
<property>
<name>edmhdpif.datatransform.invalid.table</name>
<value>${falcon_datatransforminvalid_table}</value>
</property>
<property>
<name>edmhdpif.datatransform.invalid.partition</name>
<value>${falcon_datatransforminvalid_partitions_java}
</value>
</property>
<property>
<name>edmhdpif.output.path</name>
<value>${wf:conf('hadoop.tmp.dir')}/${falcon_rowid_table}/${falcon_rowid_dated_partition_value_rds}
</value>
</property>
<property>
<name>oozie.action.sharelib.for.java</name>
<value>hive,libserver</value>
</property>
</configuration>
<main-class>com.scb.edmhdpif.datatransform.DataTransform</main-class>
</java>
<ok to="hive-add-partitions-after-datatransform" />
<error to="fail" />
</action>
<action name="hive-add-partitions-after-datatransform">
<hive xmlns="uri:oozie:hive-action:0.5">
<job-xml>${wf:appPath()}/conf/hive-site.xml</job-xml>
<job-xml>${wf:appPath()}/conf/tez-site.xml</job-xml>
<script>${wf:appPath()}/scisit_all_add_partitions_after_datatransform.hql
</script>
<param>param_dated_partition_value=${falcon_rowid_dated_partition_value_rds}
</param>
</hive>
<ok to="decision-typevalidator" />
<error to="fail" />
</action>
<decision name="decision-typevalidator">
<switch>
<case to="typevalidatorjava">${typeValidatorType=="java"}</case>
<case to="typevalidatorpig">${typeValidatorType=="pig"}</case>
<case to="typevalidatorhive">${typeValidatorType=="hive"}</case>
<default to="decision-sri" />
</switch>
</decision>
<action name="typevalidatorjava">
<java>
<configuration>
<property>
<name>edmhdpif.input.database</name>
<value>${falcon_datatransformvalid_database}</value>
</property>
<property>
<name>edmhdpif.input.table</name>
<value>${falcon_datatransformvalid_table}</value>
</property>
<property>
<name>edmhdpif.input.partition</name>
<value>${falcon_datatransformvalid_partitions_java}</value>
</property>
<property>
<name>edmhdpif.typevalidator.validtypes.database</name>
<value>${falcon_verify_database}</value>
</property>
<property>
<name>edmhdpif.typevalidator.validtypes.table</name>
<value>${falcon_verify_table}</value>
</property>
<property>
<name>edmhdpif.typevalidator.validtypes.partition</name>
<value>${falcon_verify_partitions_java}</value>
</property>
<property>
<name>edmhdpif.typevalidator.invalidtypes.database</name>
<value>${falcon_invalid_database}</value>
</property>
<property>
<name>edmhdpif.typevalidator.invalidtypes.table</name>
<value>${falcon_invalid_table}</value>
</property>
<property>
<name>edmhdpif.typevalidator.invalidtypes.partition</name>
<value>${falcon_invalid_partitions_java}</value>
</property>
<property>
<name>edmhdpif.typevalidator.warntypes.database</name>
<value>${falcon_warn_database}</value>
</property>
<property>
<name>edmhdpif.typevalidator.warntypes.table</name>
<value>${falcon_warn_table}</value>
</property>
<property>
<name>edmhdpif.typevalidator.warntypes.partition</name>
<value>${falcon_warn_partitions_java}</value>
</property>
<property>
<name>edmhdpif.output.path</name>
<value>${wf:conf('hadoop.tmp.dir')}/${falcon_rowid_table}/${falcon_rowid_dated_partition_value_rds}
</value>
</property>
<property>
<name>edmhdpif.typevalidator.onetable</name>
<value>${wf:conf('SRIStep')}</value>
</property>
<property>
<name>edmhdpif.typevalidator.args</name>
<value>${typeValidatorArgs}</value>
</property>
</configuration>
<main-class>${typeValidatorCodeFile}</main-class>
</java>
<ok to="hive-add-partitions-after-typevalidator" />
<error to="fail" />
</action>
<action name="typevalidatorhive">
<hive xmlns="uri:oozie:hive-action:0.5">
<job-xml>${wf:appPath()}/conf/hive-site.xml</job-xml>
<job-xml>${wf:appPath()}/conf/tez-site.xml</job-xml>
<configuration>
<property>
<name>edmhdpif.input.database</name>
<value>${falcon_datatransformvalid_database}</value>
</property>
<property>
<name>edmhdpif.input.table</name>
<value>${falcon_datatransformvalid_table}</value>
</property>
<property>
<name>edmhdpif.input.partition</name>
<value>${falcon_datatransformvalid_partitions_java}</value>
</property>
<property>
<name>edmhdpif.typevalidator.validtypes.database</name>
<value>${falcon_verify_database}</value>
</property>
<property>
<name>edmhdpif.typevalidator.validtypes.table</name>
<value>${falcon_verify_table}</value>
</property>
<property>
<name>edmhdpif.typevalidator.validtypes.partition</name>
<value>${falcon_verify_partitions_java}</value>
</property>
<property>
<name>edmhdpif.typevalidator.invalidtypes.database</name>
<value>${falcon_invalid_database}</value>
</property>
<property>
<name>edmhdpif.typevalidator.invalidtypes.table</name>
<value>${falcon_invalid_table}</value>
</property>
<property>
<name>edmhdpif.typevalidator.invalidtypes.partition</name>
<value>${falcon_invalid_partitions_java}</value>
</property>
<property>
<name>edmhdpif.typevalidator.warntypes.database</name>
<value>${falcon_warn_database}</value>
</property>
<property>
<name>edmhdpif.typevalidator.warntypes.table</name>
<value>${falcon_warn_table}</value>
</property>
<property>
<name>edmhdpif.typevalidator.warntypes.partition</name>
<value>${falcon_warn_partitions_java}</value>
</property>
<property>
<name>edmhdpif.output.path</name>
<value>${wf:conf('hadoop.tmp.dir')}/${falcon_rowid_table}/${falcon_rowid_dated_partition_value_rds}
</value>
</property>
<property>
<name>edmhdpif.typevalidator.onetable</name>
<value>${wf:conf('SRIStep')}</value>
</property>
<property>
<name>edmhdpif.typevalidator.args</name>
<value>${typeValidatorArgs}</value>
</property>
</configuration>
<script>${typeValidatorCodeFile}</script>
</hive>
<ok to="hive-add-partitions-after-typevalidator" />
<error to="fail" />
</action>
<action name="typevalidatorpig">
<pig>
<configuration>
<property>
<name>edmhdpif.input.database</name>
<value>${falcon_datatransformvalid_database}</value>
</property>
<property>
<name>edmhdpif.input.table</name>
<value>${falcon_datatransformvalid_table}</value>
</property>
<property>
<name>edmhdpif.input.partition</name>
<value>${falcon_datatransformvalid_partitions_java}</value>
</property>
<property>
<name>edmhdpif.typevalidator.validtypes.database</name>
<value>${falcon_verify_database}</value>
</property>
<property>
<name>edmhdpif.typevalidator.validtypes.table</name>
<value>${falcon_verify_table}</value>
</property>
<property>
<name>edmhdpif.typevalidator.validtypes.partition</name>
<value>${falcon_verify_partitions_java}</value>
</property>
<property>
<name>edmhdpif.typevalidator.invalidtypes.database</name>
<value>${falcon_invalid_database}</value>
</property>
<property>
<name>edmhdpif.typevalidator.invalidtypes.table</name>
<value>${falcon_invalid_table}</value>
</property>
<property>
<name>edmhdpif.typevalidator.invalidtypes.partition</name>
<value>${falcon_invalid_partitions_java}</value>
</property>
<property>
<name>edmhdpif.typevalidator.warntypes.database</name>
<value>${falcon_warn_database}</value>
</property>
<property>
<name>edmhdpif.typevalidator.warntypes.table</name>
<value>${falcon_warn_table}</value>
</property>
<property>
<name>edmhdpif.typevalidator.warntypes.partition</name>
<value>${falcon_warn_partitions_java}</value>
</property>
<property>
<name>edmhdpif.output.path</name>
<value>${wf:conf('hadoop.tmp.dir')}/${falcon_rowid_table}/${falcon_rowid_dated_partition_value_rds}
</value>
</property>
<property>
<name>edmhdpif.typevalidator.onetable</name>
<value>${wf:conf('SRIStep')}</value>
</property>
<property>
<name>edmhdpif.typevalidator.args</name>
<value>${typeValidatorArgs}</value>
</property>
</configuration>
<script>${typeValidatorCodeFile}</script>
</pig>
<ok to="hive-add-partitions-after-typevalidator" />
<error to="fail" />
</action>
<action name="hive-add-partitions-after-typevalidator">
<hive xmlns="uri:oozie:hive-action:0.5">
<job-xml>${wf:appPath()}/conf/hive-site.xml</job-xml>
<job-xml>${wf:appPath()}/conf/tez-site.xml</job-xml>
<script>${wf:appPath()}/scisit_all_add_partitions_after_typevalidation.hql
</script>
<param>param_dated_partition_value=${falcon_rowid_dated_partition_value_rds}
</param>
</hive>
<ok to="decision-sri" />
<error to="fail" />
</action>
<decision name="decision-sri">
<switch>
<case to="sri">${wf:conf('SRIStep')}</case>
<default to="end" />
</switch>
</decision>
<action name="sri">
<java>
<configuration>
<property>
<name>edmhdpif.input.database</name>
<value>${falcon_verify_database}</value>
</property>
<property>
<name>edmhdpif.input.table</name>
<value>${falcon_verify_table}</value>
</property>
<property>
<name>edmhdpif.input.partition</name>
<value>${falcon_verify_partitions_java}</value>
</property>
<property>
<name>edmhdpif.input.partition.previous</name>
<value>${falcon_verifyprevious_partitions_java}</value>
</property>
<property>
<name>edmhdpif.output.path</name>
<value>${wf:conf('hadoop.tmp.dir')}/${falcon_verify_table}/${falcon_rowid_dated_partition_value_rds}
</value>
</property>
<property>
<name>edmhdpif.open.database</name>
<value>sit_sri_open</value>
</property>
<property>
<name>edmhdpif.open.partition</name>
<value>'ods=${falcon_rowid_dated_partition_value_rds}'
</value>
</property>
<property>
<name>edmhdpif.open.partition.previous</name>
<value>'ods=${falcon_verifyprevious_dated_partition_value_vds}'
</value>
</property>
<property>
<name>edmhdpif.nonopen.database</name>
<value>sit_sri_nonopen</value>
</property>
<property>
<name>edmhdpif.nonopen.partition</name>
<value>'nds=${falcon_rowid_dated_partition_value_rds}'
</value>
</property>
<property>
<name>edmhdpif.duplicatedrows.database</name>
<value>${falcon_duplicates_database}</value>
</property>
<property>
<name>edmhdpif.duplicatedrows.table</name>
<value>${falcon_duplicates_table}</value>
</property>
<property>
<name>edmhdpif.duplicatedrows.partition</name>
<value>${falcon_duplicates_partitions_java}</value>
</property>
</configuration>
<main-class>com.scb.edmhdpif.sri.SRI</main-class>
</java>
<ok to="hive-add-partitions-after-sri" />
<error to="fail" />
</action>
<action name="hive-add-partitions-after-sri">
<hive xmlns="uri:oozie:hive-action:0.5">
<job-xml>${wf:appPath()}/conf/hive-site.xml</job-xml>
<job-xml>${wf:appPath()}/conf/tez-site.xml</job-xml>
<script>${wf:appPath()}/scisit_all_add_partitions_after_sri.hql
</script>
<param>param_dated_partition_value=${falcon_rowid_dated_partition_value_rds}
</param>
</hive>
<ok to="decision-postprocessing" />
<error to="fail" />
</action>
<decision name="decision-postprocessing">
<switch>
<case to="postprocessing">${wf:conf('postProcessingType')=="ebbs"
}
</case>
<default to="end" />
</switch>
</decision>
<action name="postprocessing">
<java>
<main-class>${postProcessingCodeFile}</main-class>
</java>
<ok to="hive-add-partitions-after-postprocessing" />
<error to="fail" />
</action>
<action name="hive-add-partitions-after-postprocessing">
<hive xmlns="uri:oozie:hive-action:0.5">
<script>${wf:appPath()}/scisit_all_add_partitions_after_postprocessing.hql
</script>
<param>param_dated_partition_value=${wf:conf('edmhdpif.sri.nextworkingdate')}
</param>
</hive>
<ok to="end" />
<error to="fail" />
</action>
<kill name="fail">
<message>Java failed, error
message[${wf:errorMessage(wf:lastErrorNode())}]
</message>
</kill>
<end name="end" />
</workflow-app>
Diagnostics: Job setup failed : org.apache.hadoop.security.AccessControlException: Permission denied: user=sitsciapp, access=WRITE, inode="/scisit_all_verifytypes/2016_03_07/_temporary/1":hdfs:hdfs:drwxr-xr-x at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:319) at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:292) at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:213) at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:190) at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1771) at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1755) at org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkAncestorAccess(FSDirectory.java:1738) at org.apache.hadoop.hdfs.server.namenode.FSDirMkdirOp.mkdirs(FSDirMkdirOp.java:71) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirs(FSNamesystem.java:3896) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.mkdirs(NameNodeRpcServer.java:984) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.mkdirs(ClientNamenodeProtocolServerSideTranslatorPB.java:622) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:616) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:969) at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2137) at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2133) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2131) at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62) at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.lang.reflect.Constructor.newInstance(Constructor.java:422) at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:106) at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:73) at org.apache.hadoop.hdfs.DFSClient.primitiveMkdir(DFSClient.java:3010) at org.apache.hadoop.hdfs.DFSClient.mkdirs(DFSClient.java:2978) at org.apache.hadoop.hdfs.DistributedFileSystem$21.doCall(DistributedFileSystem.java:1047) at org.apache.hadoop.hdfs.DistributedFileSystem$21.doCall(DistributedFileSystem.java:1043) at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirsInternal(DistributedFileSystem.java:1043) at org.apache.hadoop.hdfs.DistributedFileSystem.mkdirs(DistributedFileSystem.java:1036) at org.apache.hadoop.fs.FileSystem.mkdirs(FileSystem.java:1877) at org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter.setupJob(FileOutputCommitter.java:305) at org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler$EventProcessor.handleJobSetup(CommitterEventHandler.java:254) at org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler$EventProcessor.run(CommitterEventHandler.java:234) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745)
ROOT CAUSE: Found the /etc/oozie/conf/action-conf/hive.xml was empty(zero size) which was causing the oozie in picking up "hadoop.tmp.dir" variable defined in oozie workflow.
RESOLUTION: Coping the hive.xml from backup copy to "/etc/oozie/conf/action-conf/" resolved the issue.