<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>question Datanode failure in Archives of Support Questions (Read Only)</title>
    <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Datanode-failure/m-p/98640#M12026</link>
    <description>&lt;P&gt;Under what circumstances we could notice  map-reduce job getting failed/terminated when one of datanode goes down ?&lt;/P&gt;</description>
    <pubDate>Fri, 11 Dec 2015 05:36:00 GMT</pubDate>
    <dc:creator>ahshanmd</dc:creator>
    <dc:date>2015-12-11T05:36:00Z</dc:date>
    <item>
      <title>Datanode failure</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Datanode-failure/m-p/98640#M12026</link>
      <description>&lt;P&gt;Under what circumstances we could notice  map-reduce job getting failed/terminated when one of datanode goes down ?&lt;/P&gt;</description>
      <pubDate>Fri, 11 Dec 2015 05:36:00 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Datanode-failure/m-p/98640#M12026</guid>
      <dc:creator>ahshanmd</dc:creator>
      <dc:date>2015-12-11T05:36:00Z</dc:date>
    </item>
    <item>
      <title>Re: Datanode failure</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Datanode-failure/m-p/98641#M12027</link>
      <description>&lt;P&gt;MapReduce job would not fail on a typical HDP cluster unless there is only one Datanode+Node Manager in the cluster. MapReduce tasks would fail on the datanode if datanode goes down but the same failed tasks on failed node (Datanode+NM) would be allocated on other datanodes where other replica of Data is present and MR job would continue.&lt;/P&gt;</description>
      <pubDate>Fri, 11 Dec 2015 06:03:29 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Datanode-failure/m-p/98641#M12027</guid>
      <dc:creator>pardeep_kumar</dc:creator>
      <dc:date>2015-12-11T06:03:29Z</dc:date>
    </item>
    <item>
      <title>Re: Datanode failure</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Datanode-failure/m-p/98642#M12028</link>
      <description>&lt;P&gt;Golden rule for MRv2 an Hadoop cluster should always be an odd number of data nodes 3,5,7,9  etc because of the distributed workload architecture any failed job is automatically restarted on the surviving data nodes. remember to configure the mapred.sites.xml  parameter  mapreduce.jobtracker.restart.recover parameter to TRUE and dont forget to set the number of  tries in the mapreduce.map.maxattempts parameter in the mapred-default.xml&lt;/P&gt;</description>
      <pubDate>Fri, 11 Dec 2015 06:20:46 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Datanode-failure/m-p/98642#M12028</guid>
      <dc:creator>Shelton</dc:creator>
      <dc:date>2015-12-11T06:20:46Z</dc:date>
    </item>
  </channel>
</rss>

