<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>question datanode not starting only on 1 node by ambari installation in Support Questions</title>
    <link>https://community.cloudera.com/t5/Support-Questions/datanode-not-starting-only-on-1-node-by-ambari-installation/m-p/235205#M197025</link>
    <description>&lt;PRE&gt;2019-07-10 05:48:38,266 - Stack Feature Version Info: Cluster Stack=2.6, Command Stack=None, Command Version=2.6.5.1175-1 -&amp;gt; 2.6.5.1175-1 2019-07-10 05:48:38,291 - Using hadoop conf dir: /usr/hdp/2.6.5.1175-1/hadoop/conf 2019-07-10 05:48:38,648 - Stack Feature Version Info: Cluster Stack=2.6, Command Stack=None, Command Version=2.6.5.1175-1 -&amp;gt; 2.6.5.1175-1 2019-07-10 05:48:38,660 - Using hadoop conf dir: /usr/hdp/2.6.5.1175-1/hadoop/conf 2019-07-10 05:48:38,662 - Group['hdfs'] {} 2019-07-10 05:48:38,665 - Group['hadoop'] {} 2019-07-10 05:48:38,666 - Group['users'] {} 2019-07-10 05:48:38,667 - User['zookeeper'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop'], 'uid': None} 2019-07-10 05:48:38,669 - User['ams'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop'], 'uid': None} 2019-07-10 05:48:38,671 - User['ambari-qa'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'users'], 'uid': None} 2019-07-10 05:48:38,673 - User['hdfs'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': ['hdfs'], 'uid': None} 2019-07-10 05:48:38,675 - File['/var/lib/ambari-agent/tmp/changeUid.sh'] {'content': StaticFile('changeToSecureUid.sh'), 'mode': 0555} 2019-07-10 05:48:38,678 - Execute['/var/lib/ambari-agent/tmp/changeUid.sh ambari-qa /tmp/hadoop-ambari-qa,/tmp/hsperfdata_ambari-qa,/home/ambari-qa,/tmp/ambari-qa,/tmp/sqoop-ambari-qa 0'] {'not_if': '(test $(id -u ambari-qa) -gt 1000) || (false)'} 2019-07-10 05:48:38,691 - Skipping Execute['/var/lib/ambari-agent/tmp/changeUid.sh ambari-qa /tmp/hadoop-ambari-qa,/tmp/hsperfdata_ambari-qa,/home/ambari-qa,/tmp/ambari-qa,/tmp/sqoop-ambari-qa 0'] due to not_if 2019-07-10 05:48:38,692 - Group['hdfs'] {} 2019-07-10 05:48:38,693 - User['hdfs'] {'fetch_nonlocal_groups': True, 'groups': ['hdfs', u'hdfs']} 2019-07-10 05:48:38,695 - FS Type: 2019-07-10 05:48:38,695 - Directory['/etc/hadoop'] {'mode': 0755} 2019-07-10 05:48:38,729 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/hadoop-env.sh'] {'content': InlineTemplate(...), 'owner': 'hdfs', 'group': 'hadoop'} 2019-07-10 05:48:38,731 - Directory['/var/lib/ambari-agent/tmp/hadoop_java_io_tmpdir'] {'owner': 'hdfs', 'group': 'hadoop', 'mode': 01777} 2019-07-10 05:48:38,764 - Execute[('setenforce', '0')] {'not_if': '(! which getenforce ) || (which getenforce &amp;amp;&amp;amp; getenforce | grep -q Disabled)', 'sudo': True, 'only_if': 'test -f /selinux/enforce'} 2019-07-10 05:48:38,779 - Skipping Execute[('setenforce', '0')] due to not_if 2019-07-10 05:48:38,780 - Directory['/var/log/hadoop'] {'owner': 'root', 'create_parents': True, 'group': 'hadoop', 'mode': 0775, 'cd_access': 'a'} 2019-07-10 05:48:38,785 - Directory['/var/run/hadoop'] {'owner': 'root', 'create_parents': True, 'group': 'root', 'cd_access': 'a'} 2019-07-10 05:48:38,787 - Directory['/tmp/hadoop-hdfs'] {'owner': 'hdfs', 'create_parents': True, 'cd_access': 'a'} 2019-07-10 05:48:38,795 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/commons-logging.properties'] {'content': Template('commons-logging.properties.j2'), 'owner': 'hdfs'} 2019-07-10 05:48:38,799 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/health_check'] {'content': Template('health_check.j2'), 'owner': 'hdfs'} 2019-07-10 05:48:38,809 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/log4j.properties'] {'content': InlineTemplate(...), 'owner': 'hdfs', 'group': 'hadoop', 'mode': 0644} 2019-07-10 05:48:38,832 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/hadoop-metrics2.properties'] {'content': InlineTemplate(...), 'owner': 'hdfs', 'group': 'hadoop'} 2019-07-10 05:48:38,833 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/task-log4j.properties'] {'content': StaticFile('task-log4j.properties'), 'mode': 0755} 2019-07-10 05:48:38,835 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/configuration.xsl'] {'owner': 'hdfs', 'group': 'hadoop'} 2019-07-10 05:48:38,845 - File['/etc/hadoop/conf/topology_mappings.data'] {'owner': 'hdfs', 'content': Template('topology_mappings.data.j2'), 'only_if': 'test -d /etc/hadoop/conf', 'group': 'hadoop', 'mode': 0644} 2019-07-10 05:48:38,853 - File['/etc/hadoop/conf/topology_script.py'] {'content': StaticFile('topology_script.py'), 'only_if': 'test -d /etc/hadoop/conf', 'mode': 0755} 2019-07-10 05:48:39,469 - Using hadoop conf dir: /usr/hdp/2.6.5.1175-1/hadoop/conf 2019-07-10 05:48:39,474 - Stack Feature Version Info: Cluster Stack=2.6, Command Stack=None, Command Version=2.6.5.1175-1 -&amp;gt; 2.6.5.1175-1 2019-07-10 05:48:39,521 - Using hadoop conf dir: /usr/hdp/2.6.5.1175-1/hadoop/conf 2019-07-10 05:48:39,554 - Directory['/etc/security/limits.d'] {'owner': 'root', 'create_parents': True, 'group': 'root'} 2019-07-10 05:48:39,564 - File['/etc/security/limits.d/hdfs.conf'] {'content': Template('hdfs.conf.j2'), 'owner': 'root', 'group': 'root', 'mode': 0644} 2019-07-10 05:48:39,566 - XmlConfig['hadoop-policy.xml'] {'owner': 'hdfs', 'group': 'hadoop', 'conf_dir': '/usr/hdp/2.6.5.1175-1/hadoop/conf', 'configuration_attributes': {}, 'configurations': ...} 2019-07-10 05:48:39,583 - Generating config: /usr/hdp/2.6.5.1175-1/hadoop/conf/hadoop-policy.xml 2019-07-10 05:48:39,583 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/hadoop-policy.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': None, 'encoding': 'UTF-8'} 2019-07-10 05:48:39,601 - XmlConfig['ssl-client.xml'] {'owner': 'hdfs', 'group': 'hadoop', 'conf_dir': '/usr/hdp/2.6.5.1175-1/hadoop/conf', 'configuration_attributes': {}, 'configurations': ...} 2019-07-10 05:48:39,615 - Generating config: /usr/hdp/2.6.5.1175-1/hadoop/conf/ssl-client.xml 2019-07-10 05:48:39,616 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/ssl-client.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': None, 'encoding': 'UTF-8'} 2019-07-10 05:48:39,627 - Directory['/usr/hdp/2.6.5.1175-1/hadoop/conf/secure'] {'owner': 'root', 'create_parents': True, 'group': 'hadoop', 'cd_access': 'a'} 2019-07-10 05:48:39,629 - XmlConfig['ssl-client.xml'] {'owner': 'hdfs', 'group': 'hadoop', 'conf_dir': '/usr/hdp/2.6.5.1175-1/hadoop/conf/secure', 'configuration_attributes': {}, 'configurations': ...} 2019-07-10 05:48:39,644 - Generating config: /usr/hdp/2.6.5.1175-1/hadoop/conf/secure/ssl-client.xml 2019-07-10 05:48:39,644 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/secure/ssl-client.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': None, 'encoding': 'UTF-8'} 2019-07-10 05:48:39,654 - XmlConfig['ssl-server.xml'] {'owner': 'hdfs', 'group': 'hadoop', 'conf_dir': '/usr/hdp/2.6.5.1175-1/hadoop/conf', 'configuration_attributes': {}, 'configurations': ...} 2019-07-10 05:48:39,667 - Generating config: /usr/hdp/2.6.5.1175-1/hadoop/conf/ssl-server.xml 2019-07-10 05:48:39,667 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/ssl-server.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': None, 'encoding': 'UTF-8'} 2019-07-10 05:48:39,679 - XmlConfig['hdfs-site.xml'] {'owner': 'hdfs', 'group': 'hadoop', 'conf_dir': '/usr/hdp/2.6.5.1175-1/hadoop/conf', 'configuration_attributes': {u'final': {u'dfs.support.append': u'true', u'dfs.datanode.data.dir': u'true', u'dfs.namenode.http-address': u'true', u'dfs.namenode.name.dir': u'true', u'dfs.webhdfs.enabled': u'true', u'dfs.datanode.failed.volumes.tolerated': u'true'}}, 'configurations': ...} 2019-07-10 05:48:39,691 - Generating config: /usr/hdp/2.6.5.1175-1/hadoop/conf/hdfs-site.xml 2019-07-10 05:48:39,691 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/hdfs-site.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': None, 'encoding': 'UTF-8'} 2019-07-10 05:48:39,763 - XmlConfig['core-site.xml'] {'group': 'hadoop', 'conf_dir': '/usr/hdp/2.6.5.1175-1/hadoop/conf', 'mode': 0644, 'configuration_attributes': {u'final': {u'fs.defaultFS': u'true'}}, 'owner': 'hdfs', 'configurations': ...} 2019-07-10 05:48:39,774 - Generating config: /usr/hdp/2.6.5.1175-1/hadoop/conf/core-site.xml 2019-07-10 05:48:39,774 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/core-site.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': 0644, 'encoding': 'UTF-8'} 2019-07-10 05:48:39,807 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/slaves'] {'content': Template('slaves.j2'), 'owner': 'hdfs'} 2019-07-10 05:48:39,808 - Stack Feature Version Info: Cluster Stack=2.6, Command Stack=None, Command Version=2.6.5.1175-1 -&amp;gt; 2.6.5.1175-1 2019-07-10 05:48:39,813 - Directory['/var/lib/hadoop-hdfs'] {'owner': 'hdfs', 'create_parents': True, 'group': 'hadoop', 'mode': 0751} 2019-07-10 05:48:39,814 - Directory['/var/lib/ambari-agent/data/datanode'] {'create_parents': True, 'mode': 0755} 2019-07-10 05:48:39,824 - Host contains mounts: ['/sys', '/proc', '/dev', '/sys/kernel/security', '/dev/shm', '/dev/pts', '/run', '/sys/fs/cgroup', '/sys/fs/cgroup/systemd', '/sys/fs/pstore', '/sys/fs/cgroup/hugetlb', '/sys/fs/cgroup/memory', '/sys/fs/cgroup/perf_event', '/sys/fs/cgroup/pids', '/sys/fs/cgroup/devices', '/sys/fs/cgroup/cpuset', '/sys/fs/cgroup/net_cls,net_prio', '/sys/fs/cgroup/cpu,cpuacct', '/sys/fs/cgroup/blkio', '/sys/fs/cgroup/freezer', '/sys/kernel/config', '/', '/proc/sys/fs/binfmt_misc', '/dev/hugepages', '/dev/mqueue', '/sys/kernel/debug', '/boot', '/var/lib/nfs/rpc_pipefs', '/run/user/0']. 2019-07-10 05:48:39,824 - Mount point for directory /hadoop/hdfs/data is / 2019-07-10 05:48:39,825 - Mount point for directory /hadoop/hdfs/data is / 2019-07-10 05:48:39,825 - Forcefully ensuring existence and permissions of the directory: /hadoop/hdfs/data 2019-07-10 05:48:39,826 - Directory['/hadoop/hdfs/data'] {'group': 'hadoop', 'cd_access': 'a', 'create_parents': True, 'ignore_failures': True, 'mode': 0750, 'owner': 'hdfs'} 2019-07-10 05:48:39,827 - Changing permission for /hadoop/hdfs/data from 755 to 750 2019-07-10 05:48:39,837 - Host contains mounts: ['/sys', '/proc', '/dev', '/sys/kernel/security', '/dev/shm', '/dev/pts', '/run', '/sys/fs/cgroup', '/sys/fs/cgroup/systemd', '/sys/fs/pstore', '/sys/fs/cgroup/hugetlb', '/sys/fs/cgroup/memory', '/sys/fs/cgroup/perf_event', '/sys/fs/cgroup/pids', '/sys/fs/cgroup/devices', '/sys/fs/cgroup/cpuset', '/sys/fs/cgroup/net_cls,net_prio', '/sys/fs/cgroup/cpu,cpuacct', '/sys/fs/cgroup/blkio', '/sys/fs/cgroup/freezer', '/sys/kernel/config', '/', '/proc/sys/fs/binfmt_misc', '/dev/hugepages', '/dev/mqueue', '/sys/kernel/debug', '/boot', '/var/lib/nfs/rpc_pipefs', '/run/user/0']. 2019-07-10 05:48:39,838 - Mount point for directory /hadoop/hdfs/data is / 2019-07-10 05:48:39,838 - File['/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist'] {'content': '\n# This file keeps track of the last known mount-point for each dir.\n# It is safe to delete, since it will get regenerated the next time that the component of the service starts.\n# However, it is not advised to delete this file since Ambari may\n# re-create a dir that used to be mounted on a drive but is now mounted on the root.\n# Comments begin with a hash (#) symbol\n# dir,mount_point\n/hadoop/hdfs/data,/\n', 'owner': 'hdfs', 'group': 'hadoop', 'mode': 0644} 2019-07-10 05:48:39,842 - Directory['/var/run/hadoop'] {'owner': 'hdfs', 'group': 'hadoop', 'mode': 0755} 2019-07-10 05:48:39,842 - Changing owner for /var/run/hadoop from 0 to hdfs 2019-07-10 05:48:39,843 - Changing group for /var/run/hadoop from 0 to hadoop 2019-07-10 05:48:39,843 - Directory['/var/run/hadoop/hdfs'] {'owner': 'hdfs', 'group': 'hadoop', 'create_parents': True} 2019-07-10 05:48:39,844 - Directory['/var/log/hadoop/hdfs'] {'owner': 'hdfs', 'group': 'hadoop', 'create_parents': True} 2019-07-10 05:48:39,845 - File['/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid'] {'action': ['delete'], 'not_if': 'ambari-sudo.sh  -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid &amp;amp;&amp;amp; ambari-sudo.sh  -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid'} 2019-07-10 05:48:39,878 - Deleting File['/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid'] 2019-07-10 05:48:39,879 - Execute['ambari-sudo.sh su hdfs -l -s /bin/bash -c 'ulimit -c unlimited ;  /usr/hdp/2.6.5.1175-1/hadoop/sbin/hadoop-daemon.sh --config /usr/hdp/2.6.5.1175-1/hadoop/conf start datanode''] {'environment': {'HADOOP_LIBEXEC_DIR': '/usr/hdp/2.6.5.1175-1/hadoop/libexec'}, 'not_if': 'ambari-sudo.sh  -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid &amp;amp;&amp;amp; ambari-sudo.sh  -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid'} 2019-07-10 05:48:44,152 - Execute['find /var/log/hadoop/hdfs -maxdepth 1 -type f -name '*' -exec echo '==&amp;gt; {} &amp;lt;==' \; -exec tail -n 40 {} \;'] {'logoutput': True, 'ignore_failures': True, 'user': 'hdfs'} ==&amp;gt; /var/log/hadoop/hdfs/hadoop-hdfs-datanode-worker2.sip.com.out.5 &amp;lt;== Error: could not find libjava.so Error: Could not find Java SE Runtime Environment. ulimit -a for user hdfs core file size          (blocks, -c) unlimited data seg size           (kbytes, -d) unlimited scheduling priority             (-e) 0 file size               (blocks, -f) unlimited pending signals                 (-i) 97256 max locked memory       (kbytes, -l) 64 max memory size         (kbytes, -m) unlimited open files                      (-n) 128000 pipe size            (512 bytes, -p) 8 POSIX message queues     (bytes, -q) 819200 real-time priority              (-r) 0 stack size              (kbytes, -s) 8192 cpu time               (seconds, -t) unlimited max user processes              (-u) 65536 virtual memory          (kbytes, -v) unlimited file locks                      (-x) unlimited ==&amp;gt; /var/log/hadoop/hdfs/hadoop-hdfs-datanode-worker2.sip.com.out.4 &amp;lt;== Error: could not find libjava.so Error: Could not find Java SE Runtime Environment. ulimit -a for user hdfs core file size          (blocks, -c) unlimited data seg size           (kbytes, -d) unlimited scheduling priority             (-e) 0 file size               (blocks, -f) unlimited pending signals                 (-i) 97256 max locked memory       (kbytes, -l) 64 max memory size         (kbytes, -m) unlimited open files                      (-n) 128000 pipe size            (512 bytes, -p) 8 POSIX message queues     (bytes, -q) 819200 real-time priority              (-r) 0 stack size              (kbytes, -s) 8192 cpu time               (seconds, -t) unlimited max user processes              (-u) 65536 virtual memory          (kbytes, -v) unlimited file locks                      (-x) unlimited ==&amp;gt; /var/log/hadoop/hdfs/hadoop-hdfs-datanode-worker2.sip.com.out.3 &amp;lt;== Error: could not find libjava.so Error: Could not find Java SE Runtime Environment. ulimit -a for user hdfs core file size          (blocks, -c) unlimited data seg size           (kbytes, -d) unlimited scheduling priority             (-e) 0 file size               (blocks, -f) unlimited pending signals                 (-i) 97256 max locked memory       (kbytes, -l) 64 max memory size         (kbytes, -m) unlimited open files                      (-n) 128000 pipe size            (512 bytes, -p) 8 POSIX message queues     (bytes, -q) 819200 real-time priority              (-r) 0 stack size              (kbytes, -s) 8192 cpu time               (seconds, -t) unlimited max user processes              (-u) 65536 virtual memory          (kbytes, -v) unlimited file locks                      (-x) unlimited ==&amp;gt; /var/log/hadoop/hdfs/hadoop-hdfs-datanode-worker2.sip.com.out.2 &amp;lt;== Error: could not find libjava.so Error: Could not find Java SE Runtime Environment. ulimit -a for user hdfs core file size          (blocks, -c) unlimited data seg size           (kbytes, -d) unlimited scheduling priority             (-e) 0 file size               (blocks, -f) unlimited pending signals                 (-i) 97256 max locked memory       (kbytes, -l) 64 max memory size         (kbytes, -m) unlimited open files                      (-n) 32768 pipe size            (512 bytes, -p) 8 POSIX message queues     (bytes, -q) 819200 real-time priority              (-r) 0 stack size              (kbytes, -s) 8192 cpu time               (seconds, -t) unlimited max user processes              (-u) 65536 virtual memory          (kbytes, -v) unlimited file locks                      (-x) unlimited ==&amp;gt; /var/log/hadoop/hdfs/hadoop-hdfs-datanode-worker2.sip.com.out.1 &amp;lt;== Error: could not find libjava.so Error: Could not find Java SE Runtime Environment. ulimit -a for user hdfs core file size          (blocks, -c) 10000 data seg size           (kbytes, -d) unlimited scheduling priority             (-e) 0 file size               (blocks, -f) unlimited pending signals                 (-i) 97256 max locked memory       (kbytes, -l) 64 max memory size         (kbytes, -m) unlimited open files                      (-n) 32768 pipe size            (512 bytes, -p) 8 POSIX message queues     (bytes, -q) 819200 real-time priority              (-r) 0 stack size              (kbytes, -s) 8192 cpu time               (seconds, -t) unlimited max user processes              (-u) 65536 virtual memory          (kbytes, -v) unlimited file locks                      (-x) unlimited ==&amp;gt; /var/log/hadoop/hdfs/hadoop-hdfs-datanode-worker2.sip.com.out &amp;lt;== Error: could not find libjava.so Error: Could not find Java SE Runtime Environment. ulimit -a for user hdfs core file size          (blocks, -c) 10000 data seg size           (kbytes, -d) unlimited scheduling priority             (-e) 0 file size               (blocks, -f) unlimited pending signals                 (-i) 97256 max locked memory       (kbytes, -l) 64 max memory size         (kbytes, -m) unlimited open files                      (-n) 32768 pipe size            (512 bytes, -p) 8 POSIX message queues     (bytes, -q) 819200 real-time priority              (-r) 0 stack size              (kbytes, -s) 8192 cpu time               (seconds, -t) unlimited max user processes              (-u) 65536 virtual memory          (kbytes, -v) unlimited file locks                      (-x) unlimited 
Command failed after 1 tries&lt;/PRE&gt;</description>
    <pubDate>Wed, 10 Jul 2019 19:52:14 GMT</pubDate>
    <dc:creator>abrahamfikire</dc:creator>
    <dc:date>2019-07-10T19:52:14Z</dc:date>
    <item>
      <title>datanode not starting only on 1 node by ambari installation</title>
      <link>https://community.cloudera.com/t5/Support-Questions/datanode-not-starting-only-on-1-node-by-ambari-installation/m-p/235205#M197025</link>
      <description>&lt;PRE&gt;2019-07-10 05:48:38,266 - Stack Feature Version Info: Cluster Stack=2.6, Command Stack=None, Command Version=2.6.5.1175-1 -&amp;gt; 2.6.5.1175-1 2019-07-10 05:48:38,291 - Using hadoop conf dir: /usr/hdp/2.6.5.1175-1/hadoop/conf 2019-07-10 05:48:38,648 - Stack Feature Version Info: Cluster Stack=2.6, Command Stack=None, Command Version=2.6.5.1175-1 -&amp;gt; 2.6.5.1175-1 2019-07-10 05:48:38,660 - Using hadoop conf dir: /usr/hdp/2.6.5.1175-1/hadoop/conf 2019-07-10 05:48:38,662 - Group['hdfs'] {} 2019-07-10 05:48:38,665 - Group['hadoop'] {} 2019-07-10 05:48:38,666 - Group['users'] {} 2019-07-10 05:48:38,667 - User['zookeeper'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop'], 'uid': None} 2019-07-10 05:48:38,669 - User['ams'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop'], 'uid': None} 2019-07-10 05:48:38,671 - User['ambari-qa'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'users'], 'uid': None} 2019-07-10 05:48:38,673 - User['hdfs'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': ['hdfs'], 'uid': None} 2019-07-10 05:48:38,675 - File['/var/lib/ambari-agent/tmp/changeUid.sh'] {'content': StaticFile('changeToSecureUid.sh'), 'mode': 0555} 2019-07-10 05:48:38,678 - Execute['/var/lib/ambari-agent/tmp/changeUid.sh ambari-qa /tmp/hadoop-ambari-qa,/tmp/hsperfdata_ambari-qa,/home/ambari-qa,/tmp/ambari-qa,/tmp/sqoop-ambari-qa 0'] {'not_if': '(test $(id -u ambari-qa) -gt 1000) || (false)'} 2019-07-10 05:48:38,691 - Skipping Execute['/var/lib/ambari-agent/tmp/changeUid.sh ambari-qa /tmp/hadoop-ambari-qa,/tmp/hsperfdata_ambari-qa,/home/ambari-qa,/tmp/ambari-qa,/tmp/sqoop-ambari-qa 0'] due to not_if 2019-07-10 05:48:38,692 - Group['hdfs'] {} 2019-07-10 05:48:38,693 - User['hdfs'] {'fetch_nonlocal_groups': True, 'groups': ['hdfs', u'hdfs']} 2019-07-10 05:48:38,695 - FS Type: 2019-07-10 05:48:38,695 - Directory['/etc/hadoop'] {'mode': 0755} 2019-07-10 05:48:38,729 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/hadoop-env.sh'] {'content': InlineTemplate(...), 'owner': 'hdfs', 'group': 'hadoop'} 2019-07-10 05:48:38,731 - Directory['/var/lib/ambari-agent/tmp/hadoop_java_io_tmpdir'] {'owner': 'hdfs', 'group': 'hadoop', 'mode': 01777} 2019-07-10 05:48:38,764 - Execute[('setenforce', '0')] {'not_if': '(! which getenforce ) || (which getenforce &amp;amp;&amp;amp; getenforce | grep -q Disabled)', 'sudo': True, 'only_if': 'test -f /selinux/enforce'} 2019-07-10 05:48:38,779 - Skipping Execute[('setenforce', '0')] due to not_if 2019-07-10 05:48:38,780 - Directory['/var/log/hadoop'] {'owner': 'root', 'create_parents': True, 'group': 'hadoop', 'mode': 0775, 'cd_access': 'a'} 2019-07-10 05:48:38,785 - Directory['/var/run/hadoop'] {'owner': 'root', 'create_parents': True, 'group': 'root', 'cd_access': 'a'} 2019-07-10 05:48:38,787 - Directory['/tmp/hadoop-hdfs'] {'owner': 'hdfs', 'create_parents': True, 'cd_access': 'a'} 2019-07-10 05:48:38,795 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/commons-logging.properties'] {'content': Template('commons-logging.properties.j2'), 'owner': 'hdfs'} 2019-07-10 05:48:38,799 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/health_check'] {'content': Template('health_check.j2'), 'owner': 'hdfs'} 2019-07-10 05:48:38,809 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/log4j.properties'] {'content': InlineTemplate(...), 'owner': 'hdfs', 'group': 'hadoop', 'mode': 0644} 2019-07-10 05:48:38,832 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/hadoop-metrics2.properties'] {'content': InlineTemplate(...), 'owner': 'hdfs', 'group': 'hadoop'} 2019-07-10 05:48:38,833 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/task-log4j.properties'] {'content': StaticFile('task-log4j.properties'), 'mode': 0755} 2019-07-10 05:48:38,835 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/configuration.xsl'] {'owner': 'hdfs', 'group': 'hadoop'} 2019-07-10 05:48:38,845 - File['/etc/hadoop/conf/topology_mappings.data'] {'owner': 'hdfs', 'content': Template('topology_mappings.data.j2'), 'only_if': 'test -d /etc/hadoop/conf', 'group': 'hadoop', 'mode': 0644} 2019-07-10 05:48:38,853 - File['/etc/hadoop/conf/topology_script.py'] {'content': StaticFile('topology_script.py'), 'only_if': 'test -d /etc/hadoop/conf', 'mode': 0755} 2019-07-10 05:48:39,469 - Using hadoop conf dir: /usr/hdp/2.6.5.1175-1/hadoop/conf 2019-07-10 05:48:39,474 - Stack Feature Version Info: Cluster Stack=2.6, Command Stack=None, Command Version=2.6.5.1175-1 -&amp;gt; 2.6.5.1175-1 2019-07-10 05:48:39,521 - Using hadoop conf dir: /usr/hdp/2.6.5.1175-1/hadoop/conf 2019-07-10 05:48:39,554 - Directory['/etc/security/limits.d'] {'owner': 'root', 'create_parents': True, 'group': 'root'} 2019-07-10 05:48:39,564 - File['/etc/security/limits.d/hdfs.conf'] {'content': Template('hdfs.conf.j2'), 'owner': 'root', 'group': 'root', 'mode': 0644} 2019-07-10 05:48:39,566 - XmlConfig['hadoop-policy.xml'] {'owner': 'hdfs', 'group': 'hadoop', 'conf_dir': '/usr/hdp/2.6.5.1175-1/hadoop/conf', 'configuration_attributes': {}, 'configurations': ...} 2019-07-10 05:48:39,583 - Generating config: /usr/hdp/2.6.5.1175-1/hadoop/conf/hadoop-policy.xml 2019-07-10 05:48:39,583 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/hadoop-policy.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': None, 'encoding': 'UTF-8'} 2019-07-10 05:48:39,601 - XmlConfig['ssl-client.xml'] {'owner': 'hdfs', 'group': 'hadoop', 'conf_dir': '/usr/hdp/2.6.5.1175-1/hadoop/conf', 'configuration_attributes': {}, 'configurations': ...} 2019-07-10 05:48:39,615 - Generating config: /usr/hdp/2.6.5.1175-1/hadoop/conf/ssl-client.xml 2019-07-10 05:48:39,616 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/ssl-client.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': None, 'encoding': 'UTF-8'} 2019-07-10 05:48:39,627 - Directory['/usr/hdp/2.6.5.1175-1/hadoop/conf/secure'] {'owner': 'root', 'create_parents': True, 'group': 'hadoop', 'cd_access': 'a'} 2019-07-10 05:48:39,629 - XmlConfig['ssl-client.xml'] {'owner': 'hdfs', 'group': 'hadoop', 'conf_dir': '/usr/hdp/2.6.5.1175-1/hadoop/conf/secure', 'configuration_attributes': {}, 'configurations': ...} 2019-07-10 05:48:39,644 - Generating config: /usr/hdp/2.6.5.1175-1/hadoop/conf/secure/ssl-client.xml 2019-07-10 05:48:39,644 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/secure/ssl-client.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': None, 'encoding': 'UTF-8'} 2019-07-10 05:48:39,654 - XmlConfig['ssl-server.xml'] {'owner': 'hdfs', 'group': 'hadoop', 'conf_dir': '/usr/hdp/2.6.5.1175-1/hadoop/conf', 'configuration_attributes': {}, 'configurations': ...} 2019-07-10 05:48:39,667 - Generating config: /usr/hdp/2.6.5.1175-1/hadoop/conf/ssl-server.xml 2019-07-10 05:48:39,667 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/ssl-server.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': None, 'encoding': 'UTF-8'} 2019-07-10 05:48:39,679 - XmlConfig['hdfs-site.xml'] {'owner': 'hdfs', 'group': 'hadoop', 'conf_dir': '/usr/hdp/2.6.5.1175-1/hadoop/conf', 'configuration_attributes': {u'final': {u'dfs.support.append': u'true', u'dfs.datanode.data.dir': u'true', u'dfs.namenode.http-address': u'true', u'dfs.namenode.name.dir': u'true', u'dfs.webhdfs.enabled': u'true', u'dfs.datanode.failed.volumes.tolerated': u'true'}}, 'configurations': ...} 2019-07-10 05:48:39,691 - Generating config: /usr/hdp/2.6.5.1175-1/hadoop/conf/hdfs-site.xml 2019-07-10 05:48:39,691 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/hdfs-site.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': None, 'encoding': 'UTF-8'} 2019-07-10 05:48:39,763 - XmlConfig['core-site.xml'] {'group': 'hadoop', 'conf_dir': '/usr/hdp/2.6.5.1175-1/hadoop/conf', 'mode': 0644, 'configuration_attributes': {u'final': {u'fs.defaultFS': u'true'}}, 'owner': 'hdfs', 'configurations': ...} 2019-07-10 05:48:39,774 - Generating config: /usr/hdp/2.6.5.1175-1/hadoop/conf/core-site.xml 2019-07-10 05:48:39,774 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/core-site.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': 0644, 'encoding': 'UTF-8'} 2019-07-10 05:48:39,807 - File['/usr/hdp/2.6.5.1175-1/hadoop/conf/slaves'] {'content': Template('slaves.j2'), 'owner': 'hdfs'} 2019-07-10 05:48:39,808 - Stack Feature Version Info: Cluster Stack=2.6, Command Stack=None, Command Version=2.6.5.1175-1 -&amp;gt; 2.6.5.1175-1 2019-07-10 05:48:39,813 - Directory['/var/lib/hadoop-hdfs'] {'owner': 'hdfs', 'create_parents': True, 'group': 'hadoop', 'mode': 0751} 2019-07-10 05:48:39,814 - Directory['/var/lib/ambari-agent/data/datanode'] {'create_parents': True, 'mode': 0755} 2019-07-10 05:48:39,824 - Host contains mounts: ['/sys', '/proc', '/dev', '/sys/kernel/security', '/dev/shm', '/dev/pts', '/run', '/sys/fs/cgroup', '/sys/fs/cgroup/systemd', '/sys/fs/pstore', '/sys/fs/cgroup/hugetlb', '/sys/fs/cgroup/memory', '/sys/fs/cgroup/perf_event', '/sys/fs/cgroup/pids', '/sys/fs/cgroup/devices', '/sys/fs/cgroup/cpuset', '/sys/fs/cgroup/net_cls,net_prio', '/sys/fs/cgroup/cpu,cpuacct', '/sys/fs/cgroup/blkio', '/sys/fs/cgroup/freezer', '/sys/kernel/config', '/', '/proc/sys/fs/binfmt_misc', '/dev/hugepages', '/dev/mqueue', '/sys/kernel/debug', '/boot', '/var/lib/nfs/rpc_pipefs', '/run/user/0']. 2019-07-10 05:48:39,824 - Mount point for directory /hadoop/hdfs/data is / 2019-07-10 05:48:39,825 - Mount point for directory /hadoop/hdfs/data is / 2019-07-10 05:48:39,825 - Forcefully ensuring existence and permissions of the directory: /hadoop/hdfs/data 2019-07-10 05:48:39,826 - Directory['/hadoop/hdfs/data'] {'group': 'hadoop', 'cd_access': 'a', 'create_parents': True, 'ignore_failures': True, 'mode': 0750, 'owner': 'hdfs'} 2019-07-10 05:48:39,827 - Changing permission for /hadoop/hdfs/data from 755 to 750 2019-07-10 05:48:39,837 - Host contains mounts: ['/sys', '/proc', '/dev', '/sys/kernel/security', '/dev/shm', '/dev/pts', '/run', '/sys/fs/cgroup', '/sys/fs/cgroup/systemd', '/sys/fs/pstore', '/sys/fs/cgroup/hugetlb', '/sys/fs/cgroup/memory', '/sys/fs/cgroup/perf_event', '/sys/fs/cgroup/pids', '/sys/fs/cgroup/devices', '/sys/fs/cgroup/cpuset', '/sys/fs/cgroup/net_cls,net_prio', '/sys/fs/cgroup/cpu,cpuacct', '/sys/fs/cgroup/blkio', '/sys/fs/cgroup/freezer', '/sys/kernel/config', '/', '/proc/sys/fs/binfmt_misc', '/dev/hugepages', '/dev/mqueue', '/sys/kernel/debug', '/boot', '/var/lib/nfs/rpc_pipefs', '/run/user/0']. 2019-07-10 05:48:39,838 - Mount point for directory /hadoop/hdfs/data is / 2019-07-10 05:48:39,838 - File['/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist'] {'content': '\n# This file keeps track of the last known mount-point for each dir.\n# It is safe to delete, since it will get regenerated the next time that the component of the service starts.\n# However, it is not advised to delete this file since Ambari may\n# re-create a dir that used to be mounted on a drive but is now mounted on the root.\n# Comments begin with a hash (#) symbol\n# dir,mount_point\n/hadoop/hdfs/data,/\n', 'owner': 'hdfs', 'group': 'hadoop', 'mode': 0644} 2019-07-10 05:48:39,842 - Directory['/var/run/hadoop'] {'owner': 'hdfs', 'group': 'hadoop', 'mode': 0755} 2019-07-10 05:48:39,842 - Changing owner for /var/run/hadoop from 0 to hdfs 2019-07-10 05:48:39,843 - Changing group for /var/run/hadoop from 0 to hadoop 2019-07-10 05:48:39,843 - Directory['/var/run/hadoop/hdfs'] {'owner': 'hdfs', 'group': 'hadoop', 'create_parents': True} 2019-07-10 05:48:39,844 - Directory['/var/log/hadoop/hdfs'] {'owner': 'hdfs', 'group': 'hadoop', 'create_parents': True} 2019-07-10 05:48:39,845 - File['/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid'] {'action': ['delete'], 'not_if': 'ambari-sudo.sh  -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid &amp;amp;&amp;amp; ambari-sudo.sh  -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid'} 2019-07-10 05:48:39,878 - Deleting File['/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid'] 2019-07-10 05:48:39,879 - Execute['ambari-sudo.sh su hdfs -l -s /bin/bash -c 'ulimit -c unlimited ;  /usr/hdp/2.6.5.1175-1/hadoop/sbin/hadoop-daemon.sh --config /usr/hdp/2.6.5.1175-1/hadoop/conf start datanode''] {'environment': {'HADOOP_LIBEXEC_DIR': '/usr/hdp/2.6.5.1175-1/hadoop/libexec'}, 'not_if': 'ambari-sudo.sh  -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid &amp;amp;&amp;amp; ambari-sudo.sh  -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid'} 2019-07-10 05:48:44,152 - Execute['find /var/log/hadoop/hdfs -maxdepth 1 -type f -name '*' -exec echo '==&amp;gt; {} &amp;lt;==' \; -exec tail -n 40 {} \;'] {'logoutput': True, 'ignore_failures': True, 'user': 'hdfs'} ==&amp;gt; /var/log/hadoop/hdfs/hadoop-hdfs-datanode-worker2.sip.com.out.5 &amp;lt;== Error: could not find libjava.so Error: Could not find Java SE Runtime Environment. ulimit -a for user hdfs core file size          (blocks, -c) unlimited data seg size           (kbytes, -d) unlimited scheduling priority             (-e) 0 file size               (blocks, -f) unlimited pending signals                 (-i) 97256 max locked memory       (kbytes, -l) 64 max memory size         (kbytes, -m) unlimited open files                      (-n) 128000 pipe size            (512 bytes, -p) 8 POSIX message queues     (bytes, -q) 819200 real-time priority              (-r) 0 stack size              (kbytes, -s) 8192 cpu time               (seconds, -t) unlimited max user processes              (-u) 65536 virtual memory          (kbytes, -v) unlimited file locks                      (-x) unlimited ==&amp;gt; /var/log/hadoop/hdfs/hadoop-hdfs-datanode-worker2.sip.com.out.4 &amp;lt;== Error: could not find libjava.so Error: Could not find Java SE Runtime Environment. ulimit -a for user hdfs core file size          (blocks, -c) unlimited data seg size           (kbytes, -d) unlimited scheduling priority             (-e) 0 file size               (blocks, -f) unlimited pending signals                 (-i) 97256 max locked memory       (kbytes, -l) 64 max memory size         (kbytes, -m) unlimited open files                      (-n) 128000 pipe size            (512 bytes, -p) 8 POSIX message queues     (bytes, -q) 819200 real-time priority              (-r) 0 stack size              (kbytes, -s) 8192 cpu time               (seconds, -t) unlimited max user processes              (-u) 65536 virtual memory          (kbytes, -v) unlimited file locks                      (-x) unlimited ==&amp;gt; /var/log/hadoop/hdfs/hadoop-hdfs-datanode-worker2.sip.com.out.3 &amp;lt;== Error: could not find libjava.so Error: Could not find Java SE Runtime Environment. ulimit -a for user hdfs core file size          (blocks, -c) unlimited data seg size           (kbytes, -d) unlimited scheduling priority             (-e) 0 file size               (blocks, -f) unlimited pending signals                 (-i) 97256 max locked memory       (kbytes, -l) 64 max memory size         (kbytes, -m) unlimited open files                      (-n) 128000 pipe size            (512 bytes, -p) 8 POSIX message queues     (bytes, -q) 819200 real-time priority              (-r) 0 stack size              (kbytes, -s) 8192 cpu time               (seconds, -t) unlimited max user processes              (-u) 65536 virtual memory          (kbytes, -v) unlimited file locks                      (-x) unlimited ==&amp;gt; /var/log/hadoop/hdfs/hadoop-hdfs-datanode-worker2.sip.com.out.2 &amp;lt;== Error: could not find libjava.so Error: Could not find Java SE Runtime Environment. ulimit -a for user hdfs core file size          (blocks, -c) unlimited data seg size           (kbytes, -d) unlimited scheduling priority             (-e) 0 file size               (blocks, -f) unlimited pending signals                 (-i) 97256 max locked memory       (kbytes, -l) 64 max memory size         (kbytes, -m) unlimited open files                      (-n) 32768 pipe size            (512 bytes, -p) 8 POSIX message queues     (bytes, -q) 819200 real-time priority              (-r) 0 stack size              (kbytes, -s) 8192 cpu time               (seconds, -t) unlimited max user processes              (-u) 65536 virtual memory          (kbytes, -v) unlimited file locks                      (-x) unlimited ==&amp;gt; /var/log/hadoop/hdfs/hadoop-hdfs-datanode-worker2.sip.com.out.1 &amp;lt;== Error: could not find libjava.so Error: Could not find Java SE Runtime Environment. ulimit -a for user hdfs core file size          (blocks, -c) 10000 data seg size           (kbytes, -d) unlimited scheduling priority             (-e) 0 file size               (blocks, -f) unlimited pending signals                 (-i) 97256 max locked memory       (kbytes, -l) 64 max memory size         (kbytes, -m) unlimited open files                      (-n) 32768 pipe size            (512 bytes, -p) 8 POSIX message queues     (bytes, -q) 819200 real-time priority              (-r) 0 stack size              (kbytes, -s) 8192 cpu time               (seconds, -t) unlimited max user processes              (-u) 65536 virtual memory          (kbytes, -v) unlimited file locks                      (-x) unlimited ==&amp;gt; /var/log/hadoop/hdfs/hadoop-hdfs-datanode-worker2.sip.com.out &amp;lt;== Error: could not find libjava.so Error: Could not find Java SE Runtime Environment. ulimit -a for user hdfs core file size          (blocks, -c) 10000 data seg size           (kbytes, -d) unlimited scheduling priority             (-e) 0 file size               (blocks, -f) unlimited pending signals                 (-i) 97256 max locked memory       (kbytes, -l) 64 max memory size         (kbytes, -m) unlimited open files                      (-n) 32768 pipe size            (512 bytes, -p) 8 POSIX message queues     (bytes, -q) 819200 real-time priority              (-r) 0 stack size              (kbytes, -s) 8192 cpu time               (seconds, -t) unlimited max user processes              (-u) 65536 virtual memory          (kbytes, -v) unlimited file locks                      (-x) unlimited 
Command failed after 1 tries&lt;/PRE&gt;</description>
      <pubDate>Wed, 10 Jul 2019 19:52:14 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/datanode-not-starting-only-on-1-node-by-ambari-installation/m-p/235205#M197025</guid>
      <dc:creator>abrahamfikire</dc:creator>
      <dc:date>2019-07-10T19:52:14Z</dc:date>
    </item>
    <item>
      <title>Re: datanode not starting only on 1 node by ambari installation</title>
      <link>https://community.cloudera.com/t5/Support-Questions/datanode-not-starting-only-on-1-node-by-ambari-installation/m-p/235206#M197026</link>
      <description>&lt;P&gt;&lt;A rel="noopener noreferrer noopener noreferrer noopener noreferrer noopener noreferrer" href="https://community.hortonworks.com/users/3418/jsensharma.html" target="_blank"&gt;https://community.hortonworks.com/users/3418/jsensharma.html&lt;/A&gt;  please help me bro&lt;/P&gt;</description>
      <pubDate>Thu, 11 Jul 2019 15:43:01 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/datanode-not-starting-only-on-1-node-by-ambari-installation/m-p/235206#M197026</guid>
      <dc:creator>abrahamfikire</dc:creator>
      <dc:date>2019-07-11T15:43:01Z</dc:date>
    </item>
    <item>
      <title>Re: datanode not starting only on 1 node by ambari installation</title>
      <link>https://community.cloudera.com/t5/Support-Questions/datanode-not-starting-only-on-1-node-by-ambari-installation/m-p/235207#M197027</link>
      <description>&lt;P&gt;&lt;A rel="user" href="https://community.cloudera.com/users/97229/abrahamfikire.html"&gt;@abraham fikire&lt;/A&gt;&lt;/P&gt;&lt;PRE&gt;/var/log/hadoop/hdfs/hadoop-hdfs-datanode-worker2.sip.com.out.2&amp;nbsp;
Error: could not find libjava.so Error: Could not find Java SE Runtime Environment&lt;/PRE&gt;&lt;P&gt;&lt;BR /&gt;&lt;/P&gt;&lt;P&gt;We see the above error in your DataNode startup logs. Please validate the same with the user who is running the DataNOde process using the command:&lt;/P&gt;&lt;PRE&gt;# java -version

# su - hdfs
# java -version&lt;/PRE&gt;&lt;P&gt;&lt;BR /&gt;&lt;/P&gt;&lt;P&gt;this error indicates that you Might not have a Valid JDK installed on your machine.&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;&lt;/P&gt;&lt;P&gt;So please try this:&lt;/P&gt;&lt;P&gt;1. Install a Valid JDK 1.8 on your machine. You can download one from here: &lt;A href="https://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html"&gt;https://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html&lt;/A&gt;&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;&lt;/P&gt;&lt;P&gt;2. set the JAVA_HOME as following inside the "/etc/profile" or inside the "~/.bash_profile" file"&lt;/P&gt;&lt;PRE&gt;export JAVA_HOME=/PATH/TO/jdk1.8.0-120
export PATH=$JAVA_HOME/bin:$PATH&lt;/PRE&gt;&lt;P&gt;&lt;BR /&gt;&lt;/P&gt;&lt;P&gt;The path "/PATH/TO/jdk1.8.0-120" is a dummy path please use your own JDK path here.&lt;/P&gt;&lt;P&gt;.&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;&lt;/P&gt;</description>
      <pubDate>Thu, 11 Jul 2019 15:50:30 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/datanode-not-starting-only-on-1-node-by-ambari-installation/m-p/235207#M197027</guid>
      <dc:creator>jsensharma</dc:creator>
      <dc:date>2019-07-11T15:50:30Z</dc:date>
    </item>
    <item>
      <title>Re: datanode not starting only on 1 node by ambari installation</title>
      <link>https://community.cloudera.com/t5/Support-Questions/datanode-not-starting-only-on-1-node-by-ambari-installation/m-p/235208#M197028</link>
      <description>&lt;H3 id="toc-hId-1450989093"&gt;@&lt;A rel="noopener noreferrer noopener noreferrer noopener noreferrer noopener noreferrer nofollow noopener noreferrer" href="https://community.hortonworks.com/users/3418/jsensharma.html" target="_blank"&gt;https://community.hortonworks.com/users/3418/jsensharma.html&lt;/A&gt;   i did what you say but no change&lt;span class="lia-inline-image-display-wrapper lia-image-align-inline" image-alt="109841-capture.jpg" style="width: 552px;"&gt;&lt;img src="https://community.cloudera.com/t5/image/serverpage/image-id/14575i0AAE1C4951DAB733/image-size/medium?v=v2&amp;amp;px=400" role="button" title="109841-capture.jpg" alt="109841-capture.jpg" /&gt;&lt;/span&gt;&lt;/H3&gt;</description>
      <pubDate>Sat, 17 Aug 2019 23:54:24 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/datanode-not-starting-only-on-1-node-by-ambari-installation/m-p/235208#M197028</guid>
      <dc:creator>abrahamfikire</dc:creator>
      <dc:date>2019-08-17T23:54:24Z</dc:date>
    </item>
    <item>
      <title>Re: datanode not starting only on 1 node by ambari installation</title>
      <link>https://community.cloudera.com/t5/Support-Questions/datanode-not-starting-only-on-1-node-by-ambari-installation/m-p/235209#M197029</link>
      <description>&lt;H4 id="toc-hId-1254475589"&gt;this is the error&lt;span class="lia-inline-image-display-wrapper lia-image-align-inline" image-alt="109851-1562919575579.png" style="width: 711px;"&gt;&lt;img src="https://community.cloudera.com/t5/image/serverpage/image-id/14574i7D97100A4211B2A2/image-size/medium?v=v2&amp;amp;px=400" role="button" title="109851-1562919575579.png" alt="109851-1562919575579.png" /&gt;&lt;/span&gt;&lt;/H4&gt;</description>
      <pubDate>Sat, 17 Aug 2019 23:54:16 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/datanode-not-starting-only-on-1-node-by-ambari-installation/m-p/235209#M197029</guid>
      <dc:creator>abrahamfikire</dc:creator>
      <dc:date>2019-08-17T23:54:16Z</dc:date>
    </item>
  </channel>
</rss>

