stderr: Traceback (most recent call last): File "/usr/lib/ambari-agent/lib/resource_management/libraries/providers/hdfs_resource.py", line 287, in _run_command result_dict = json.loads(out) File "/usr/lib/ambari-agent/lib/ambari_simplejson/__init__.py", line 307, in loads return _default_decoder.decode(s) File "/usr/lib/ambari-agent/lib/ambari_simplejson/decoder.py", line 335, in decode obj, end = self.raw_decode(s, idx=_w(s, 0).end()) File "/usr/lib/ambari-agent/lib/ambari_simplejson/decoder.py", line 353, in raw_decode raise ValueError("No JSON object could be decoded") ValueError: No JSON object could be decoded The above exception was the cause of the following exception: Traceback (most recent call last): File "/var/lib/ambari-agent/cache/stacks/HDP/3.0/services/HDFS/package/scripts/namenode.py", line 408, in NameNode().execute() File "/usr/lib/ambari-agent/lib/resource_management/libraries/script/script.py", line 352, in execute method(env) File "/var/lib/ambari-agent/cache/stacks/HDP/3.0/services/HDFS/package/scripts/namenode.py", line 138, in start upgrade_suspended=params.upgrade_suspended, env=env) File "/usr/lib/ambari-agent/lib/ambari_commons/os_family_impl.py", line 89, in thunk return fn(*args, **kwargs) File "/var/lib/ambari-agent/cache/stacks/HDP/3.0/services/HDFS/package/scripts/hdfs_namenode.py", line 264, in namenode create_hdfs_directories(name_service) File "/var/lib/ambari-agent/cache/stacks/HDP/3.0/services/HDFS/package/scripts/hdfs_namenode.py", line 336, in create_hdfs_directories nameservices=name_services File "/usr/lib/ambari-agent/lib/resource_management/core/base.py", line 166, in __init__ self.env.run() File "/usr/lib/ambari-agent/lib/resource_management/core/environment.py", line 160, in run self.run_action(resource, action) File "/usr/lib/ambari-agent/lib/resource_management/core/environment.py", line 124, in run_action provider_action() File "/usr/lib/ambari-agent/lib/resource_management/libraries/providers/hdfs_resource.py", line 677, in action_create_on_execute self.action_delayed("create") File "/usr/lib/ambari-agent/lib/resource_management/libraries/providers/hdfs_resource.py", line 674, in action_delayed self.get_hdfs_resource_executor().action_delayed(action_name, self) File "/usr/lib/ambari-agent/lib/resource_management/libraries/providers/hdfs_resource.py", line 373, in action_delayed self.action_delayed_for_nameservice(None, action_name, main_resource) File "/usr/lib/ambari-agent/lib/resource_management/libraries/providers/hdfs_resource.py", line 395, in action_delayed_for_nameservice self._assert_valid() File "/usr/lib/ambari-agent/lib/resource_management/libraries/providers/hdfs_resource.py", line 334, in _assert_valid self.target_status = self._get_file_status(target) File "/usr/lib/ambari-agent/lib/resource_management/libraries/providers/hdfs_resource.py", line 497, in _get_file_status list_status = self.util.run_command(target, 'GETFILESTATUS', method='GET', ignore_status_codes=['404'], assertable_result=False) File "/usr/lib/ambari-agent/lib/resource_management/libraries/providers/hdfs_resource.py", line 214, in run_command return self._run_command(*args, **kwargs) File "/usr/lib/ambari-agent/lib/resource_management/libraries/providers/hdfs_resource.py", line 295, in _run_command raise WebHDFSCallException(err_msg, result_dict) resource_management.libraries.providers.hdfs_resource.WebHDFSCallException: Execution of 'curl -sS -L -w '%{http_code}' -X GET -d '' -H 'Content-Length: 0' 'http://nodetwo:50070/webhdfs/v1/tmp?op=GETFILESTATUS&user.name=hdfs'' returned status_code=407. Access Denied
Access Denied (authentication_failed)

Your credentials could not be authenticated: "General authentication failure due to bad user ID or authentication token.". You will not be permitted access until your credentials can be verified.
This is typically caused by an incorrect username and/or password, but could also be caused by network problems.

stdout: 2019-06-28 14:58:30,604 - Stack Feature Version Info: Cluster Stack=3.1, Command Stack=None, Command Version=3.1.0.0-78 -> 3.1.0.0-78 2019-06-28 14:58:30,629 - Using hadoop conf dir: /usr/hdp/3.1.0.0-78/hadoop/conf 2019-06-28 14:58:30,801 - Stack Feature Version Info: Cluster Stack=3.1, Command Stack=None, Command Version=3.1.0.0-78 -> 3.1.0.0-78 2019-06-28 14:58:30,808 - Using hadoop conf dir: /usr/hdp/3.1.0.0-78/hadoop/conf 2019-06-28 14:58:30,809 - Group['hdfs'] {} 2019-06-28 14:58:30,843 - Group['hadoop'] {} 2019-06-28 14:58:30,843 - Group['users'] {} 2019-06-28 14:58:30,843 - User['yarn-ats'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': ['hadoop'], 'uid': None} 2019-06-28 14:58:30,844 - User['zookeeper'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': ['hadoop'], 'uid': None} 2019-06-28 14:58:30,845 - User['ams'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': ['hadoop'], 'uid': None} 2019-06-28 14:58:30,845 - User['ambari-qa'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': ['hadoop', 'users'], 'uid': None} 2019-06-28 14:58:30,846 - User['hdfs'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': ['hdfs', 'hadoop'], 'uid': None} 2019-06-28 14:58:30,847 - User['yarn'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': ['hadoop'], 'uid': None} 2019-06-28 14:58:30,847 - User['mapred'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': ['hadoop'], 'uid': None} 2019-06-28 14:58:30,848 - File['/var/lib/ambari-agent/tmp/changeUid.sh'] {'content': StaticFile('changeToSecureUid.sh'), 'mode': 0555} 2019-06-28 14:58:30,899 - Execute['/var/lib/ambari-agent/tmp/changeUid.sh ambari-qa /tmp/hadoop-ambari-qa,/tmp/hsperfdata_ambari-qa,/home/ambari-qa,/tmp/ambari-qa,/tmp/sqoop-ambari-qa 0'] {'not_if': '(test $(id -u ambari-qa) -gt 1000) || (false)'} 2019-06-28 14:58:30,904 - Skipping Execute['/var/lib/ambari-agent/tmp/changeUid.sh ambari-qa /tmp/hadoop-ambari-qa,/tmp/hsperfdata_ambari-qa,/home/ambari-qa,/tmp/ambari-qa,/tmp/sqoop-ambari-qa 0'] due to not_if 2019-06-28 14:58:30,904 - Group['hdfs'] {} 2019-06-28 14:58:30,905 - User['hdfs'] {'fetch_nonlocal_groups': True, 'groups': ['hdfs', 'hadoop', u'hdfs']} 2019-06-28 14:58:30,905 - FS Type: HDFS 2019-06-28 14:58:30,905 - Directory['/etc/hadoop'] {'mode': 0755} 2019-06-28 14:58:30,923 - File['/usr/hdp/3.1.0.0-78/hadoop/conf/hadoop-env.sh'] {'content': InlineTemplate(...), 'owner': 'hdfs', 'group': 'hadoop'} 2019-06-28 14:58:30,945 - Directory['/var/lib/ambari-agent/tmp/hadoop_java_io_tmpdir'] {'owner': 'hdfs', 'group': 'hadoop', 'mode': 01777} 2019-06-28 14:58:30,965 - Execute[('setenforce', '0')] {'not_if': '(! which getenforce ) || (which getenforce && getenforce | grep -q Disabled)', 'sudo': True, 'only_if': 'test -f /selinux/enforce'} 2019-06-28 14:58:30,969 - Skipping Execute[('setenforce', '0')] due to not_if 2019-06-28 14:58:30,969 - Directory['/var/log/hadoop'] {'owner': 'root', 'create_parents': True, 'group': 'hadoop', 'mode': 0775, 'cd_access': 'a'} 2019-06-28 14:58:30,970 - Directory['/var/run/hadoop'] {'owner': 'root', 'create_parents': True, 'group': 'root', 'cd_access': 'a'} 2019-06-28 14:58:30,971 - Creating directory Directory['/var/run/hadoop'] since it doesn't exist. 2019-06-28 14:58:30,971 - Directory['/var/run/hadoop/hdfs'] {'owner': 'hdfs', 'cd_access': 'a'} 2019-06-28 14:58:30,971 - Creating directory Directory['/var/run/hadoop/hdfs'] since it doesn't exist. 2019-06-28 14:58:30,971 - Changing owner for /var/run/hadoop/hdfs from 0 to hdfs 2019-06-28 14:58:30,972 - Directory['/tmp/hadoop-hdfs'] {'owner': 'hdfs', 'create_parents': True, 'cd_access': 'a'} 2019-06-28 14:58:30,972 - Creating directory Directory['/tmp/hadoop-hdfs'] since it doesn't exist. 2019-06-28 14:58:30,972 - Changing owner for /tmp/hadoop-hdfs from 0 to hdfs 2019-06-28 14:58:30,976 - File['/usr/hdp/3.1.0.0-78/hadoop/conf/commons-logging.properties'] {'content': Template('commons-logging.properties.j2'), 'owner': 'hdfs'} 2019-06-28 14:58:31,020 - File['/usr/hdp/3.1.0.0-78/hadoop/conf/health_check'] {'content': Template('health_check.j2'), 'owner': 'hdfs'} 2019-06-28 14:58:31,026 - File['/usr/hdp/3.1.0.0-78/hadoop/conf/log4j.properties'] {'content': InlineTemplate(...), 'owner': 'hdfs', 'group': 'hadoop', 'mode': 0644} 2019-06-28 14:58:31,062 - File['/usr/hdp/3.1.0.0-78/hadoop/conf/hadoop-metrics2.properties'] {'content': InlineTemplate(...), 'owner': 'hdfs', 'group': 'hadoop'} 2019-06-28 14:58:31,063 - File['/usr/hdp/3.1.0.0-78/hadoop/conf/task-log4j.properties'] {'content': StaticFile('task-log4j.properties'), 'mode': 0755} 2019-06-28 14:58:31,118 - File['/usr/hdp/3.1.0.0-78/hadoop/conf/configuration.xsl'] {'owner': 'hdfs', 'group': 'hadoop'} 2019-06-28 14:58:31,144 - File['/etc/hadoop/conf/topology_mappings.data'] {'owner': 'hdfs', 'content': Template('topology_mappings.data.j2'), 'only_if': 'test -d /etc/hadoop/conf', 'group': 'hadoop', 'mode': 0644} 2019-06-28 14:58:31,148 - File['/etc/hadoop/conf/topology_script.py'] {'content': StaticFile('topology_script.py'), 'only_if': 'test -d /etc/hadoop/conf', 'mode': 0755} 2019-06-28 14:58:31,173 - Skipping unlimited key JCE policy check and setup since it is not required 2019-06-28 14:58:31,442 - Using hadoop conf dir: /usr/hdp/3.1.0.0-78/hadoop/conf 2019-06-28 14:58:31,442 - Stack Feature Version Info: Cluster Stack=3.1, Command Stack=None, Command Version=3.1.0.0-78 -> 3.1.0.0-78 2019-06-28 14:58:31,470 - Using hadoop conf dir: /usr/hdp/3.1.0.0-78/hadoop/conf 2019-06-28 14:58:31,488 - Directory['/etc/security/limits.d'] {'owner': 'root', 'create_parents': True, 'group': 'root'} 2019-06-28 14:58:31,512 - File['/etc/security/limits.d/hdfs.conf'] {'content': Template('hdfs.conf.j2'), 'owner': 'root', 'group': 'root', 'mode': 0644} 2019-06-28 14:58:31,513 - XmlConfig['hadoop-policy.xml'] {'group': 'hadoop', 'conf_dir': '/usr/hdp/3.1.0.0-78/hadoop/conf', 'mode': 0644, 'configuration_attributes': {}, 'owner': 'hdfs', 'configurations': ...} 2019-06-28 14:58:31,553 - Generating config: /usr/hdp/3.1.0.0-78/hadoop/conf/hadoop-policy.xml 2019-06-28 14:58:31,553 - File['/usr/hdp/3.1.0.0-78/hadoop/conf/hadoop-policy.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': 0644, 'encoding': 'UTF-8'} 2019-06-28 14:58:31,588 - XmlConfig['ssl-client.xml'] {'group': 'hadoop', 'conf_dir': '/usr/hdp/3.1.0.0-78/hadoop/conf', 'mode': 0644, 'configuration_attributes': {}, 'owner': 'hdfs', 'configurations': ...} 2019-06-28 14:58:31,598 - Generating config: /usr/hdp/3.1.0.0-78/hadoop/conf/ssl-client.xml 2019-06-28 14:58:31,598 - File['/usr/hdp/3.1.0.0-78/hadoop/conf/ssl-client.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': 0644, 'encoding': 'UTF-8'} 2019-06-28 14:58:31,623 - Directory['/usr/hdp/3.1.0.0-78/hadoop/conf/secure'] {'owner': 'root', 'create_parents': True, 'group': 'hadoop', 'cd_access': 'a'} 2019-06-28 14:58:31,624 - XmlConfig['ssl-client.xml'] {'group': 'hadoop', 'conf_dir': '/usr/hdp/3.1.0.0-78/hadoop/conf/secure', 'mode': 0644, 'configuration_attributes': {}, 'owner': 'hdfs', 'configurations': ...} 2019-06-28 14:58:31,634 - Generating config: /usr/hdp/3.1.0.0-78/hadoop/conf/secure/ssl-client.xml 2019-06-28 14:58:31,635 - File['/usr/hdp/3.1.0.0-78/hadoop/conf/secure/ssl-client.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': 0644, 'encoding': 'UTF-8'} 2019-06-28 14:58:31,655 - XmlConfig['ssl-server.xml'] {'group': 'hadoop', 'conf_dir': '/usr/hdp/3.1.0.0-78/hadoop/conf', 'mode': 0644, 'configuration_attributes': {}, 'owner': 'hdfs', 'configurations': ...} 2019-06-28 14:58:31,664 - Generating config: /usr/hdp/3.1.0.0-78/hadoop/conf/ssl-server.xml 2019-06-28 14:58:31,664 - File['/usr/hdp/3.1.0.0-78/hadoop/conf/ssl-server.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': 0644, 'encoding': 'UTF-8'} 2019-06-28 14:58:31,679 - XmlConfig['hdfs-site.xml'] {'group': 'hadoop', 'conf_dir': '/usr/hdp/3.1.0.0-78/hadoop/conf', 'mode': 0644, 'configuration_attributes': {u'final': {u'dfs.datanode.failed.volumes.tolerated': u'true', u'dfs.datanode.data.dir': u'true', u'dfs.namenode.http-address': u'true', u'dfs.namenode.name.dir': u'true', u'dfs.webhdfs.enabled': u'true'}}, 'owner': 'hdfs', 'configurations': ...} 2019-06-28 14:58:31,688 - Generating config: /usr/hdp/3.1.0.0-78/hadoop/conf/hdfs-site.xml 2019-06-28 14:58:31,689 - File['/usr/hdp/3.1.0.0-78/hadoop/conf/hdfs-site.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': 0644, 'encoding': 'UTF-8'} 2019-06-28 14:58:31,730 - XmlConfig['core-site.xml'] {'group': 'hadoop', 'conf_dir': '/usr/hdp/3.1.0.0-78/hadoop/conf', 'xml_include_file': None, 'mode': 0644, 'configuration_attributes': {u'final': {u'fs.defaultFS': u'true'}}, 'owner': 'hdfs', 'configurations': ...} 2019-06-28 14:58:31,739 - Generating config: /usr/hdp/3.1.0.0-78/hadoop/conf/core-site.xml 2019-06-28 14:58:31,739 - File['/usr/hdp/3.1.0.0-78/hadoop/conf/core-site.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': 0644, 'encoding': 'UTF-8'} 2019-06-28 14:58:31,806 - File['/usr/hdp/3.1.0.0-78/hadoop/conf/slaves'] {'content': Template('slaves.j2'), 'owner': 'hdfs'} 2019-06-28 14:58:31,832 - Directory['/hadoop/hdfs/namenode'] {'owner': 'hdfs', 'group': 'hadoop', 'create_parents': True, 'mode': 0755, 'cd_access': 'a'} 2019-06-28 14:58:31,930 - Directory['/usr/lib/ambari-logsearch-logfeeder/conf'] {'create_parents': True, 'mode': 0755, 'cd_access': 'a'} 2019-06-28 14:58:31,931 - Generate Log Feeder config file: /usr/lib/ambari-logsearch-logfeeder/conf/input.config-hdfs.json 2019-06-28 14:58:31,931 - File['/usr/lib/ambari-logsearch-logfeeder/conf/input.config-hdfs.json'] {'content': Template('input.config-hdfs.json.j2'), 'mode': 0644} 2019-06-28 14:58:31,947 - Skipping setting up secure ZNode ACL for HFDS as it's supported only for NameNode HA mode. 2019-06-28 14:58:31,952 - Called service start with upgrade_type: None 2019-06-28 14:58:31,952 - Ranger Hdfs plugin is not enabled 2019-06-28 14:58:31,954 - File['/etc/hadoop/conf/dfs.exclude'] {'owner': 'hdfs', 'content': Template('exclude_hosts_list.j2'), 'group': 'hadoop'} 2019-06-28 14:58:31,978 - /hadoop/hdfs/namenode/namenode-formatted/ exists. Namenode DFS already formatted 2019-06-28 14:58:31,978 - Directory['/hadoop/hdfs/namenode/namenode-formatted/'] {'create_parents': True} 2019-06-28 14:58:31,978 - Options for start command are: 2019-06-28 14:58:31,978 - Directory['/var/run/hadoop'] {'owner': 'hdfs', 'group': 'hadoop', 'mode': 0755} 2019-06-28 14:58:31,979 - Changing owner for /var/run/hadoop from 0 to hdfs 2019-06-28 14:58:31,979 - Changing group for /var/run/hadoop from 0 to hadoop 2019-06-28 14:58:31,979 - Directory['/var/run/hadoop/hdfs'] {'owner': 'hdfs', 'group': 'hadoop', 'create_parents': True} 2019-06-28 14:58:31,979 - Changing group for /var/run/hadoop/hdfs from 0 to hadoop 2019-06-28 14:58:31,979 - Directory['/var/log/hadoop/hdfs'] {'owner': 'hdfs', 'group': 'hadoop', 'create_parents': True} 2019-06-28 14:58:31,980 - File['/var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid'] {'action': ['delete'], 'not_if': 'ambari-sudo.sh -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid && ambari-sudo.sh -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid'} 2019-06-28 14:58:31,985 - Execute['ambari-sudo.sh su hdfs -l -s /bin/bash -c 'ulimit -c unlimited ; /usr/hdp/3.1.0.0-78/hadoop/bin/hdfs --config /usr/hdp/3.1.0.0-78/hadoop/conf --daemon start namenode''] {'environment': {'HADOOP_LIBEXEC_DIR': '/usr/hdp/3.1.0.0-78/hadoop/libexec'}, 'not_if': 'ambari-sudo.sh -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid && ambari-sudo.sh -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid'} 2019-06-28 14:58:34,423 - Waiting for this NameNode to leave Safemode due to the following conditions: HA: False, isActive: True, upgradeType: None 2019-06-28 14:58:34,423 - Waiting up to 19 minutes for the NameNode to leave Safemode... 2019-06-28 14:58:34,423 - Execute['/usr/hdp/current/hadoop-hdfs-namenode/bin/hdfs dfsadmin -fs hdfs://nodetwo:8020 -safemode get | grep 'Safe mode is OFF''] {'logoutput': True, 'tries': 115, 'user': 'hdfs', 'try_sleep': 10} safemode: Call From nodetwo/172.16.217.206 to nodetwo:8020 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused 2019-06-28 14:58:44,564 - Retrying after 10 seconds. Reason: Execution of '/usr/hdp/current/hadoop-hdfs-namenode/bin/hdfs dfsadmin -fs hdfs://nodetwo:8020 -safemode get | grep 'Safe mode is OFF'' returned 1. safemode: Call From nodetwo/172.16.217.206 to nodetwo:8020 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused Safe mode is OFF 2019-06-28 14:58:57,583 - HdfsResource['/tmp'] {'security_enabled': False, 'hadoop_bin_dir': '/usr/hdp/3.1.0.0-78/hadoop/bin', 'keytab': [EMPTY], 'dfs_type': 'HDFS', 'default_fs': 'hdfs://nodetwo:8020', 'hdfs_resource_ignore_file': '/var/lib/ambari-agent/data/.hdfs_resource_ignore', 'hdfs_site': ..., 'kinit_path_local': 'kinit', 'principal_name': None, 'user': 'hdfs', 'owner': 'hdfs', 'nameservices': None, 'hadoop_conf_dir': '/usr/hdp/3.1.0.0-78/hadoop/conf', 'type': 'directory', 'action': ['create_on_execute'], 'immutable_paths': [u'/mr-history/done', u'/app-logs', u'/tmp'], 'mode': 0777} 2019-06-28 14:58:57,589 - call['ambari-sudo.sh su hdfs -l -s /bin/bash -c 'curl -sS -L -w '"'"'%{http_code}'"'"' -X GET -d '"'"''"'"' -H '"'"'Content-Length: 0'"'"' '"'"'http://nodetwo:50070/webhdfs/v1/tmp?op=GETFILESTATUS&user.name=hdfs'"'"' 1>/tmp/tmpUKmxxe 2>/tmp/tmp7owsWM''] {'logoutput': None, 'quiet': False} 2019-06-28 14:59:03,059 - call returned (0, '') 2019-06-28 14:59:03,060 - get_user_call_output returned (0, u'\r\nAccess Denied\r\n\r\n\r\n\r\n
\r\n
\r\n
\r\n\r\n\r\n\r\n\r\n\r\n
\r\n\r\nAccess Denied (authentication_failed)\r\n
\r\n
\r\n
\r\n
\r\n\r\nYour credentials could not be authenticated: "General authentication failure due to bad user ID or authentication token.". You will not be permitted access until your credentials can be verified.\r\n\r\n
\r\n\r\nThis is typically caused by an incorrect username and/or password, but could also be caused by network problems.\r\n\r\n
\r\n\r\n
\r\n
\r\n
\r\n
\r\n\r\n\r\n407', u'') Command failed after 1 tries