<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>question Datanode WRITE_BLOCK Error in Archives of Support Questions (Read Only)</title>
    <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Datanode-WRITE-BLOCK-Error/m-p/58278#M65836</link>
    <description>&lt;P&gt;Hi,&lt;/P&gt;&lt;P&gt;I have 3 node Cloudera 5.9 Cluster running on CentOS 6.7.&lt;/P&gt;&lt;P&gt;Recently during any write operation on Hadoop, I am witnessing these errors in Datanode logs. However the write happens but I am concerned why this is happening.&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;PFB the stack trace.&lt;/P&gt;&lt;PRE&gt;2017-07-29 10:33:04,109 ERROR org.apache.hadoop.hdfs.server.datanode.DataNode: &amp;lt;datanodename&amp;gt;:50010:DataXceiver error processing WRITE_BLOCK
operation  src: /Y.Y.Y.Y:43298 dst: /X.X.X.X:50010
java.io.IOException: Premature EOF from inputStream
        at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:201)
        at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:213)
        at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:134)
        at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:109)
        at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:500)
        at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:896)
        at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:802)
        at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:169)
        at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:106)
        at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:246)
        at java.lang.Thread.run(Thread.java:745)
2017-07-29 10:36:06,172 ERROR org.apache.hadoop.jmx.JMXJsonServlet: getting attribute DatanodeNetworkCounts of Hadoop:service=DataNode,name=DataNodeInfo threw an except
ion
javax.management.RuntimeMBeanException: java.lang.NullPointerException
        at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.rethrow(DefaultMBeanServerInterceptor.java:839)
        at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.rethrowMaybeMBeanException(DefaultMBeanServerInterceptor.java:852)
        at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getAttribute(DefaultMBeanServerInterceptor.java:651)
        at com.sun.jmx.mbeanserver.JmxMBeanServer.getAttribute(JmxMBeanServer.java:678)
        at org.apache.hadoop.jmx.JMXJsonServlet.writeAttribute(JMXJsonServlet.java:346)
        at org.apache.hadoop.jmx.JMXJsonServlet.listBeans(JMXJsonServlet.java:324)
        at org.apache.hadoop.jmx.JMXJsonServlet.doGet(JMXJsonServlet.java:217)
        at javax.servlet.http.HttpServlet.service(HttpServlet.java:707)
        at javax.servlet.http.HttpServlet.service(HttpServlet.java:820)
        at org.mortbay.jetty.servlet.ServletHolder.handle(ServletHolder.java:511)
        at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1221)
        at org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter.doFilter(StaticUserWebFilter.java:109)
        at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1212)
        at org.apache.hadoop.http.HttpServer2$QuotingInputFilter.doFilter(HttpServer2.java:1296)
        at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1212)
        at org.apache.hadoop.http.NoCacheFilter.doFilter(NoCacheFilter.java:45)
        at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1212)
        at org.apache.hadoop.http.NoCacheFilter.doFilter(NoCacheFilter.java:45)
        at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1212)
        at org.mortbay.jetty.servlet.ServletHandler.handle(ServletHandler.java:399)
        at org.mortbay.jetty.security.SecurityHandler.handle(SecurityHandler.java:216)
        at org.mortbay.jetty.servlet.SessionHandler.handle(SessionHandler.java:182)
        at org.mortbay.jetty.handler.ContextHandler.handle(ContextHandler.java:767)
        at org.mortbay.jetty.webapp.WebAppContext.handle(WebAppContext.java:450)at org.mortbay.jetty.handler.HandlerWrapper.handle(HandlerWrapper.java:152)
        at org.mortbay.jetty.Server.handle(Server.java:326)
        at org.mortbay.jetty.HttpConnection.handleRequest(HttpConnection.java:542)
        at org.mortbay.jetty.HttpConnection$RequestHandler.headerComplete(HttpConnection.java:928)
        at org.mortbay.jetty.HttpParser.parseNext(HttpParser.java:549)
        at org.mortbay.jetty.HttpParser.parseAvailable(HttpParser.java:212)
        at org.mortbay.jetty.HttpConnection.handle(HttpConnection.java:404)
        at org.mortbay.io.nio.SelectChannelEndPoint.run(SelectChannelEndPoint.java:410)
        at org.mortbay.thread.QueuedThreadPool$PoolThread.run(QueuedThreadPool.java:582)
Caused by: java.lang.NullPointerException
        at org.apache.hadoop.hdfs.server.datanode.DataNode.getDatanodeNetworkCounts(DataNode.java:1956)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
        at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at sun.reflect.misc.Trampoline.invoke(MethodUtil.java:75)
        at sun.reflect.GeneratedMethodAccessor2.invoke(Unknown Source)
        at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at sun.reflect.misc.MethodUtil.invoke(MethodUtil.java:279)
        at com.sun.jmx.mbeanserver.ConvertingMethod.invokeWithOpenReturn(ConvertingMethod.java:193)
        at com.sun.jmx.mbeanserver.ConvertingMethod.invokeWithOpenReturn(ConvertingMethod.java:175)
        at com.sun.jmx.mbeanserver.MXBeanIntrospector.invokeM2(MXBeanIntrospector.java:117)
        at com.sun.jmx.mbeanserver.MXBeanIntrospector.invokeM2(MXBeanIntrospector.java:54)
        at com.sun.jmx.mbeanserver.MBeanIntrospector.invokeM(MBeanIntrospector.java:237)
        at com.sun.jmx.mbeanserver.PerInterface.getAttribute(PerInterface.java:83)
        at com.sun.jmx.mbeanserver.MBeanSupport.getAttribute(MBeanSupport.java:206)
        at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getAttribute(DefaultMBeanServerInterceptor.java:647)
        ... 31 more
2017-07-29 10:36:06,231 ERROR org.apache.hadoop.jmx.JMXJsonServlet: getting attribute NamenodeAddresses of Hadoop:service=DataNode,name=DataNodeInfo threw an exception
javax.management.RuntimeMBeanException: java.lang.NullPointerException
2017-07-31 14:49:41,561 ERROR org.apache.hadoop.hdfs.server.datanode.DataNode: &amp;lt;datanodename&amp;gt;:50010:DataXceiver error processing WRITE_BLOCK operation src: /Y.Y.Y.Y:43298 dst: /X.X.X.X:50010 java.io.IOException: Not ready to serve the block pool, BP-939287337-X.X.X.X-1484085163925.
        at org.apache.hadoop.hdfs.server.datanode.DataXceiver.checkAndWaitForBP(DataXceiver.java:1284)
        at org.apache.hadoop.hdfs.server.datanode.DataXceiver.checkAccess(DataXceiver.java:1292)
        at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:624)
        at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:169)
        at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:106)
        at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:246)
        at java.lang.Thread.run(Thread.java:745)&lt;/PRE&gt;&lt;P&gt;Some Important configurations of my cluster:&lt;/P&gt;&lt;BLOCKQUOTE&gt;&lt;P&gt;yarn.nodemanager.resource.memory-mb - 12GB yarn.scheduler.maximum-allocation-mb - 16GB mapreduce.map.memory.mb - 4GB mapreduce.reduce.memory.mb - 4GB mapreduce.map.java.opts.max.heap - 3GB mapreduce.reduce.java.opts.max.heap - 3GB namenode_java_heapsize - 6GB secondarynamenode_java_heapsize - 6GB dfs_datanode_max_locked_memory - 3GB&lt;/P&gt;&lt;P&gt;dfs blocksize - 128 MB&lt;/P&gt;&lt;/BLOCKQUOTE&gt;&lt;P&gt;Can anyone please help me?&lt;/P&gt;&lt;P&gt;Thanks,&lt;/P&gt;&lt;P&gt;Shilpa&lt;/P&gt;</description>
    <pubDate>Fri, 16 Sep 2022 12:01:39 GMT</pubDate>
    <dc:creator>ShilpaSinha</dc:creator>
    <dc:date>2022-09-16T12:01:39Z</dc:date>
    <item>
      <title>Datanode WRITE_BLOCK Error</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Datanode-WRITE-BLOCK-Error/m-p/58278#M65836</link>
      <description>&lt;P&gt;Hi,&lt;/P&gt;&lt;P&gt;I have 3 node Cloudera 5.9 Cluster running on CentOS 6.7.&lt;/P&gt;&lt;P&gt;Recently during any write operation on Hadoop, I am witnessing these errors in Datanode logs. However the write happens but I am concerned why this is happening.&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;PFB the stack trace.&lt;/P&gt;&lt;PRE&gt;2017-07-29 10:33:04,109 ERROR org.apache.hadoop.hdfs.server.datanode.DataNode: &amp;lt;datanodename&amp;gt;:50010:DataXceiver error processing WRITE_BLOCK
operation  src: /Y.Y.Y.Y:43298 dst: /X.X.X.X:50010
java.io.IOException: Premature EOF from inputStream
        at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:201)
        at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:213)
        at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:134)
        at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:109)
        at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:500)
        at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:896)
        at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:802)
        at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:169)
        at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:106)
        at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:246)
        at java.lang.Thread.run(Thread.java:745)
2017-07-29 10:36:06,172 ERROR org.apache.hadoop.jmx.JMXJsonServlet: getting attribute DatanodeNetworkCounts of Hadoop:service=DataNode,name=DataNodeInfo threw an except
ion
javax.management.RuntimeMBeanException: java.lang.NullPointerException
        at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.rethrow(DefaultMBeanServerInterceptor.java:839)
        at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.rethrowMaybeMBeanException(DefaultMBeanServerInterceptor.java:852)
        at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getAttribute(DefaultMBeanServerInterceptor.java:651)
        at com.sun.jmx.mbeanserver.JmxMBeanServer.getAttribute(JmxMBeanServer.java:678)
        at org.apache.hadoop.jmx.JMXJsonServlet.writeAttribute(JMXJsonServlet.java:346)
        at org.apache.hadoop.jmx.JMXJsonServlet.listBeans(JMXJsonServlet.java:324)
        at org.apache.hadoop.jmx.JMXJsonServlet.doGet(JMXJsonServlet.java:217)
        at javax.servlet.http.HttpServlet.service(HttpServlet.java:707)
        at javax.servlet.http.HttpServlet.service(HttpServlet.java:820)
        at org.mortbay.jetty.servlet.ServletHolder.handle(ServletHolder.java:511)
        at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1221)
        at org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter.doFilter(StaticUserWebFilter.java:109)
        at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1212)
        at org.apache.hadoop.http.HttpServer2$QuotingInputFilter.doFilter(HttpServer2.java:1296)
        at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1212)
        at org.apache.hadoop.http.NoCacheFilter.doFilter(NoCacheFilter.java:45)
        at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1212)
        at org.apache.hadoop.http.NoCacheFilter.doFilter(NoCacheFilter.java:45)
        at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1212)
        at org.mortbay.jetty.servlet.ServletHandler.handle(ServletHandler.java:399)
        at org.mortbay.jetty.security.SecurityHandler.handle(SecurityHandler.java:216)
        at org.mortbay.jetty.servlet.SessionHandler.handle(SessionHandler.java:182)
        at org.mortbay.jetty.handler.ContextHandler.handle(ContextHandler.java:767)
        at org.mortbay.jetty.webapp.WebAppContext.handle(WebAppContext.java:450)at org.mortbay.jetty.handler.HandlerWrapper.handle(HandlerWrapper.java:152)
        at org.mortbay.jetty.Server.handle(Server.java:326)
        at org.mortbay.jetty.HttpConnection.handleRequest(HttpConnection.java:542)
        at org.mortbay.jetty.HttpConnection$RequestHandler.headerComplete(HttpConnection.java:928)
        at org.mortbay.jetty.HttpParser.parseNext(HttpParser.java:549)
        at org.mortbay.jetty.HttpParser.parseAvailable(HttpParser.java:212)
        at org.mortbay.jetty.HttpConnection.handle(HttpConnection.java:404)
        at org.mortbay.io.nio.SelectChannelEndPoint.run(SelectChannelEndPoint.java:410)
        at org.mortbay.thread.QueuedThreadPool$PoolThread.run(QueuedThreadPool.java:582)
Caused by: java.lang.NullPointerException
        at org.apache.hadoop.hdfs.server.datanode.DataNode.getDatanodeNetworkCounts(DataNode.java:1956)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
        at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at sun.reflect.misc.Trampoline.invoke(MethodUtil.java:75)
        at sun.reflect.GeneratedMethodAccessor2.invoke(Unknown Source)
        at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at sun.reflect.misc.MethodUtil.invoke(MethodUtil.java:279)
        at com.sun.jmx.mbeanserver.ConvertingMethod.invokeWithOpenReturn(ConvertingMethod.java:193)
        at com.sun.jmx.mbeanserver.ConvertingMethod.invokeWithOpenReturn(ConvertingMethod.java:175)
        at com.sun.jmx.mbeanserver.MXBeanIntrospector.invokeM2(MXBeanIntrospector.java:117)
        at com.sun.jmx.mbeanserver.MXBeanIntrospector.invokeM2(MXBeanIntrospector.java:54)
        at com.sun.jmx.mbeanserver.MBeanIntrospector.invokeM(MBeanIntrospector.java:237)
        at com.sun.jmx.mbeanserver.PerInterface.getAttribute(PerInterface.java:83)
        at com.sun.jmx.mbeanserver.MBeanSupport.getAttribute(MBeanSupport.java:206)
        at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.getAttribute(DefaultMBeanServerInterceptor.java:647)
        ... 31 more
2017-07-29 10:36:06,231 ERROR org.apache.hadoop.jmx.JMXJsonServlet: getting attribute NamenodeAddresses of Hadoop:service=DataNode,name=DataNodeInfo threw an exception
javax.management.RuntimeMBeanException: java.lang.NullPointerException
2017-07-31 14:49:41,561 ERROR org.apache.hadoop.hdfs.server.datanode.DataNode: &amp;lt;datanodename&amp;gt;:50010:DataXceiver error processing WRITE_BLOCK operation src: /Y.Y.Y.Y:43298 dst: /X.X.X.X:50010 java.io.IOException: Not ready to serve the block pool, BP-939287337-X.X.X.X-1484085163925.
        at org.apache.hadoop.hdfs.server.datanode.DataXceiver.checkAndWaitForBP(DataXceiver.java:1284)
        at org.apache.hadoop.hdfs.server.datanode.DataXceiver.checkAccess(DataXceiver.java:1292)
        at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:624)
        at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:169)
        at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:106)
        at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:246)
        at java.lang.Thread.run(Thread.java:745)&lt;/PRE&gt;&lt;P&gt;Some Important configurations of my cluster:&lt;/P&gt;&lt;BLOCKQUOTE&gt;&lt;P&gt;yarn.nodemanager.resource.memory-mb - 12GB yarn.scheduler.maximum-allocation-mb - 16GB mapreduce.map.memory.mb - 4GB mapreduce.reduce.memory.mb - 4GB mapreduce.map.java.opts.max.heap - 3GB mapreduce.reduce.java.opts.max.heap - 3GB namenode_java_heapsize - 6GB secondarynamenode_java_heapsize - 6GB dfs_datanode_max_locked_memory - 3GB&lt;/P&gt;&lt;P&gt;dfs blocksize - 128 MB&lt;/P&gt;&lt;/BLOCKQUOTE&gt;&lt;P&gt;Can anyone please help me?&lt;/P&gt;&lt;P&gt;Thanks,&lt;/P&gt;&lt;P&gt;Shilpa&lt;/P&gt;</description>
      <pubDate>Fri, 16 Sep 2022 12:01:39 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Datanode-WRITE-BLOCK-Error/m-p/58278#M65836</guid>
      <dc:creator>ShilpaSinha</dc:creator>
      <dc:date>2022-09-16T12:01:39Z</dc:date>
    </item>
    <item>
      <title>Re: Datanode WRITE_BLOCK Error</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Datanode-WRITE-BLOCK-Error/m-p/58282#M65837</link>
      <description>&lt;P&gt;It is related to an JMX counter within the Datanode process. &amp;nbsp;I am not sure what it is counting but it something within it is throwing an NPE. &amp;nbsp;This is likely coming after the write stream has process all data but since it hits this exception it throws and exits. &amp;nbsp;It should be safe to ignore this error.&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;PRE&gt;getDatanodeNetworkCounts&lt;/PRE&gt;&lt;P&gt;&amp;nbsp;A related JIRA although it doesn't seem to be part of CDH yet.&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;&lt;A href="https://issues.apache.org/jira/browse/HDFS-7331" target="_blank"&gt;https://issues.apache.org/jira/browse/HDFS-7331&lt;/A&gt;&lt;/P&gt;</description>
      <pubDate>Tue, 01 Aug 2017 06:32:28 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Datanode-WRITE-BLOCK-Error/m-p/58282#M65837</guid>
      <dc:creator>mbigelow</dc:creator>
      <dc:date>2017-08-01T06:32:28Z</dc:date>
    </item>
    <item>
      <title>Re: Datanode WRITE_BLOCK Error</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Datanode-WRITE-BLOCK-Error/m-p/58296#M65838</link>
      <description>&lt;P&gt;Ok&amp;nbsp;&lt;a href="https://community.cloudera.com/t5/user/viewprofilepage/user-id/18127"&gt;@mbigelow&lt;/a&gt;&amp;nbsp;thanks. I also reasearched more and found this hortonworks link,&amp;nbsp;&lt;A href="https://community.hortonworks.com/questions/45962/dataxceiver-error-processing-write-block-operation.html" target="_blank"&gt;https://community.hortonworks.com/questions/45962/dataxceiver-error-processing-write-block-operation.html&lt;/A&gt; where they say we can ignore this error.&amp;nbsp;&lt;BR /&gt;&lt;BR /&gt;&lt;SPAN&gt;This issue has been already fixed in 2.3 version of ambari&amp;nbsp;but of course I am using CDH.&lt;/SPAN&gt;&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;&lt;SPAN&gt;Thanks,&lt;/SPAN&gt;&lt;/P&gt;&lt;P&gt;&lt;SPAN&gt;Shilpa&lt;/SPAN&gt;&lt;/P&gt;</description>
      <pubDate>Tue, 01 Aug 2017 16:53:17 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Datanode-WRITE-BLOCK-Error/m-p/58296#M65838</guid>
      <dc:creator>ShilpaSinha</dc:creator>
      <dc:date>2017-08-01T16:53:17Z</dc:date>
    </item>
  </channel>
</rss>

