<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>question Re: Unable to start datanode - Too many failed volumes. in Archives of Support Questions (Read Only)</title>
    <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Unable-to-start-datanode-Too-many-failed-volumes/m-p/360487#M30107</link>
    <description>&lt;P&gt;&lt;BR /&gt;&amp;lt;configuration&amp;gt;&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;&amp;lt;property&amp;gt;&lt;BR /&gt;&amp;lt;name&amp;gt;dfs.replication&amp;lt;/name&amp;gt;&lt;BR /&gt;&amp;lt;value&amp;gt;1&amp;lt;/value&amp;gt;&lt;BR /&gt;&amp;lt;/property&amp;gt;&lt;BR /&gt;&lt;BR /&gt;&amp;lt;property&amp;gt;&lt;BR /&gt;&amp;lt;name&amp;gt;dfs.namenode.name.dir&amp;lt;/name&amp;gt;&lt;BR /&gt;&amp;lt;value&amp;gt;/hadoop-3.3.4/data/namenode&amp;lt;/value&amp;gt;&lt;BR /&gt;&amp;lt;/property&amp;gt;&lt;BR /&gt;&lt;BR /&gt;&amp;lt;property&amp;gt;&lt;BR /&gt;&amp;lt;name&amp;gt;dfs.datanode.data.dir&amp;lt;/name&amp;gt;&lt;BR /&gt;&amp;lt;value&amp;gt;/hadoop-3.3.4/data/datanode&amp;lt;/value&amp;gt;&lt;BR /&gt;&amp;lt;/property&amp;gt;&lt;BR /&gt;&lt;BR /&gt;&amp;lt;/configuration&amp;gt;&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;i have this , buy yet problems&lt;/P&gt;</description>
    <pubDate>Tue, 03 Jan 2023 19:42:52 GMT</pubDate>
    <dc:creator>janopolis73</dc:creator>
    <dc:date>2023-01-03T19:42:52Z</dc:date>
    <item>
      <title>Unable to start datanode - Too many failed volumes.</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Unable-to-start-datanode-Too-many-failed-volumes/m-p/108504#M30103</link>
      <description>&lt;P&gt;Hi,&lt;/P&gt;&lt;P&gt;After reinstalling HDP2.3,  I am getting the following error when I try to restart the service.&lt;/P&gt;&lt;P&gt;org.apache.hadoop.util.DiskChecker$DiskErrorException: Too many failed volumes - current valid volumes: 3, volumes configured: 9, volumes failed: 6, volume failures tolerated: 0
at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.&amp;lt;init&amp;gt;(FsDatasetImpl.java:289)
at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory.newInstance(FsDatasetFactory.java:34)
at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory.newInstance(FsDatasetFactory.java:30)
at org.apache.hadoop.hdfs.server.datanode.DataNode.initStorage(DataNode.java:1412)
at org.apache.hadoop.hdfs.server.datanode.DataNode.initBlockPool(DataNode.java:1364)
at org.apache.hadoop.hdfs.server.datanode.BPOfferService.verifyAndSetNamespaceInfo(BPOfferService.java:317)
at org.apache.hadoop.hdfs.server.datanode.BPServiceActor.connectToNNAndHandshake(BPServiceActor.java:224)
at org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:821)
at java.lang.Thread.run(Thread.java:745)&lt;/P&gt;&lt;P&gt;When I digged the data dir, some of them contains directory from prior installation.  How to fix this issue.&lt;/P&gt;&lt;P&gt;Thanks in advance.&lt;/P&gt;&lt;P&gt;Regards,&lt;/P&gt;&lt;P&gt;Subramanian S.&lt;/P&gt;</description>
      <pubDate>Fri, 16 Sep 2022 10:22:25 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Unable-to-start-datanode-Too-many-failed-volumes/m-p/108504#M30103</guid>
      <dc:creator>rush2subbu</dc:creator>
      <dc:date>2022-09-16T10:22:25Z</dc:date>
    </item>
    <item>
      <title>Re: Unable to start datanode - Too many failed volumes.</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Unable-to-start-datanode-Too-many-failed-volumes/m-p/108505#M30104</link>
      <description>&lt;P&gt;&lt;A href="https://community.hortonworks.com/users/10651/rush2subbu.html"&gt;Subramanian Santhanam&lt;/A&gt;&lt;/P&gt;&lt;P&gt;Chek you hdfs-site.xml for dfs.data.dir.&lt;/P&gt;&lt;P&gt;This is a comma-delimited list of directories. Remove what you do not need.&lt;/P&gt;&lt;P&gt;If this is ambari managed then change this from ambari. &lt;/P&gt;&lt;P&gt;HDFS -&amp;gt; Config -&amp;gt; DataNode directories&lt;/P&gt;&lt;P&gt;Ensure that it is configured correctly.&lt;/P&gt;</description>
      <pubDate>Mon, 30 May 2016 21:23:45 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Unable-to-start-datanode-Too-many-failed-volumes/m-p/108505#M30104</guid>
      <dc:creator>rpathak</dc:creator>
      <dc:date>2016-05-30T21:23:45Z</dc:date>
    </item>
    <item>
      <title>Re: Unable to start datanode - Too many failed volumes.</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Unable-to-start-datanode-Too-many-failed-volumes/m-p/108506#M30105</link>
      <description>&lt;A rel="user" href="https://community.cloudera.com/users/10651/rush2subbu.html" nodeid="10651"&gt;@Subramanian Santhanam&lt;/A&gt;&lt;P&gt;Please check why you have 6 disks failures. For a workaround you can do what Rahul has suggested in his answer or you can increase value of below property to allow datanode to tolerate X number of disks failures.&lt;/P&gt;&lt;PRE&gt;dfs.datanode.failed.volumes.tolerated - By default this is set to 0 in hdfs-site.xml&lt;/PRE&gt;</description>
      <pubDate>Tue, 31 May 2016 01:27:28 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Unable-to-start-datanode-Too-many-failed-volumes/m-p/108506#M30105</guid>
      <dc:creator>KuldeepK</dc:creator>
      <dc:date>2016-05-31T01:27:28Z</dc:date>
    </item>
    <item>
      <title>Re: Unable to start datanode - Too many failed volumes.</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Unable-to-start-datanode-Too-many-failed-volumes/m-p/108507#M30106</link>
      <description>&lt;P&gt;Hi,&lt;/P&gt;&lt;P&gt;The issue is fixed.  When I am running the cleanup script I removed the users, so the folder permission become zombie.&lt;/P&gt;&lt;P&gt;I fixed them using chown.  Now its working fine.  Thanks &lt;A rel="user" href="https://community.cloudera.com/users/872/rahulpathak109.html" nodeid="872"&gt;@Rahul Pathak&lt;/A&gt; and &lt;A rel="user" href="https://community.cloudera.com/users/504/kkulkarni.html" nodeid="504"&gt;@Kuldeep Kulkarni&lt;/A&gt;.&lt;/P&gt;&lt;P&gt;Regards,&lt;/P&gt;&lt;P&gt;Subramanian S.&lt;/P&gt;</description>
      <pubDate>Tue, 31 May 2016 02:08:12 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Unable-to-start-datanode-Too-many-failed-volumes/m-p/108507#M30106</guid>
      <dc:creator>rush2subbu</dc:creator>
      <dc:date>2016-05-31T02:08:12Z</dc:date>
    </item>
    <item>
      <title>Re: Unable to start datanode - Too many failed volumes.</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Unable-to-start-datanode-Too-many-failed-volumes/m-p/360487#M30107</link>
      <description>&lt;P&gt;&lt;BR /&gt;&amp;lt;configuration&amp;gt;&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;&amp;lt;property&amp;gt;&lt;BR /&gt;&amp;lt;name&amp;gt;dfs.replication&amp;lt;/name&amp;gt;&lt;BR /&gt;&amp;lt;value&amp;gt;1&amp;lt;/value&amp;gt;&lt;BR /&gt;&amp;lt;/property&amp;gt;&lt;BR /&gt;&lt;BR /&gt;&amp;lt;property&amp;gt;&lt;BR /&gt;&amp;lt;name&amp;gt;dfs.namenode.name.dir&amp;lt;/name&amp;gt;&lt;BR /&gt;&amp;lt;value&amp;gt;/hadoop-3.3.4/data/namenode&amp;lt;/value&amp;gt;&lt;BR /&gt;&amp;lt;/property&amp;gt;&lt;BR /&gt;&lt;BR /&gt;&amp;lt;property&amp;gt;&lt;BR /&gt;&amp;lt;name&amp;gt;dfs.datanode.data.dir&amp;lt;/name&amp;gt;&lt;BR /&gt;&amp;lt;value&amp;gt;/hadoop-3.3.4/data/datanode&amp;lt;/value&amp;gt;&lt;BR /&gt;&amp;lt;/property&amp;gt;&lt;BR /&gt;&lt;BR /&gt;&amp;lt;/configuration&amp;gt;&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;i have this , buy yet problems&lt;/P&gt;</description>
      <pubDate>Tue, 03 Jan 2023 19:42:52 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Unable-to-start-datanode-Too-many-failed-volumes/m-p/360487#M30107</guid>
      <dc:creator>janopolis73</dc:creator>
      <dc:date>2023-01-03T19:42:52Z</dc:date>
    </item>
  </channel>
</rss>

