Reply
Highlighted
New Contributor
Posts: 2
Registered: ‎06-26-2017

Spark logs doesn't show anything else than fileSystem closed exception

Hi,

 

Please advise what next is to be done.

 

I see below message for all my executors but nothing else. My app was stuck for 10 hours. Another app was also stuck for the same reason.

 

17/06/24 23:01:38 WARN BlockManager: Putting block rdd_3_3033 failed
17/06/24 23:01:38 ERROR Executor: Exception in task 3033.0 in stage 0.0 (TID 511)
java.io.IOException: Filesystem closed
	at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:839)
	at org.apache.hadoop.hdfs.DFSInputStream.readWithStrategy(DFSInputStream.java:875)
	at org.apache.hadoop.hdfs.DFSInputStream.read(DFSInputStream.java:942)
	at org.apache.hadoop.hdfs.DFSInputStream.read(DFSInputStream.java:742)
	at java.io.DataInputStream.readInt(DataInputStream.java:388)
	at org.apache.hadoop.io.SequenceFile$Reader.readRecordLength(SequenceFile.java:2407)
	at org.apache.hadoop.io.SequenceFile$Reader.next(SequenceFile.java:2438)
	at org.apache.hadoop.io.SequenceFile$Reader.next(SequenceFile.java:2575)
	at org.apache.hadoop.mapred.SequenceFileRecordReader.next(SequenceFileRecordReader.java:82)
	at org.apache.spark.rdd.HadoopRDD$$anon$1.getNext(HadoopRDD.scala:246)
	at org.apache.spark.rdd.HadoopRDD$$anon$1.getNext(HadoopRDD.scala:208)
	at org.apache.spark.util.NextIterator.hasNext(NextIterator.scala:73)
	at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:39)
	at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)
	at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)
	at scala.collection.Iterator$$anon$13.hasNext(Iterator.scala:371)
	at org.apache.spark.serializer.SerializationStream.writeAll(Serializer.scala:152)
	at org.apache.spark.storage.BlockManager.dataSerializeStream(BlockManager.scala:1196)
	at org.apache.spark.storage.DiskStore$$anonfun$putIterator$1.apply$mcV$sp(DiskStore.scala:81)
	at org.apache.spark.storage.DiskStore$$anonfun$putIterator$1.apply(DiskStore.scala:81)
	at org.apache.spark.storage.DiskStore$$anonfun$putIterator$1.apply(DiskStore.scala:81)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1251)
	at org.apache.spark.storage.DiskStore.putIterator(DiskStore.scala:82)
	at org.apache.spark.storage.BlockManager.doPut(BlockManager.scala:798)
	at org.apache.spark.storage.BlockManager.putIterator(BlockManager.scala:645)
	at org.apache.spark.CacheManager.putInBlockManager(CacheManager.scala:153)
	at org.apache.spark.CacheManager.getOrCompute(CacheManager.scala:78)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:268)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
	at org.apache.spark.scheduler.Task.run(Task.scala:89)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
	at java.lang.Thread.run(Thread.java:745)
17/06/24 23:01:38 WARN BlockManager: Putting block rdd_3_3032 failed
17/06/24 23:01:38 ERROR Executor: Exception in task 3032.0 in stage 0.0 (TID 404)
java.io.IOException: Filesystem closed
	at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:839)
	at org.apache.hadoop.hdfs.DFSInputStream.readWithStrategy(DFSInputStream.java:875)
	at org.apache.hadoop.hdfs.DFSInputStream.read(DFSInputStream.java:942)
	at org.apache.hadoop.hdfs.DFSInputStream.read(DFSInputStream.java:742)
	at java.io.DataInputStream.readInt(DataInputStream.java:387)
	at org.apache.hadoop.io.SequenceFile$Reader.readRecordLength(SequenceFile.java:2407)
	at org.apache.hadoop.io.SequenceFile$Reader.next(SequenceFile.java:2438)
	at org.apache.hadoop.io.SequenceFile$Reader.next(SequenceFile.java:2575)
	at org.apache.hadoop.mapred.SequenceFileRecordReader.next(SequenceFileRecordReader.java:82)
	at org.apache.spark.rdd.HadoopRDD$$anon$1.getNext(HadoopRDD.scala:246)
	at org.apache.spark.rdd.HadoopRDD$$anon$1.getNext(HadoopRDD.scala:208)
	at org.apache.spark.util.NextIterator.hasNext(NextIterator.scala:73)
	at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:39)
	at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)
	at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)
	at scala.collection.Iterator$$anon$13.hasNext(Iterator.scala:371)
	at org.apache.spark.serializer.SerializationStream.writeAll(Serializer.scala:152)
	at org.apache.spark.storage.BlockManager.dataSerializeStream(BlockManager.scala:1196)
	at org.apache.spark.storage.DiskStore$$anonfun$putIterator$1.apply$mcV$sp(DiskStore.scala:81)
	at org.apache.spark.storage.DiskStore$$anonfun$putIterator$1.apply(DiskStore.scala:81)
	at org.apache.spark.storage.DiskStore$$anonfun$putIterator$1.apply(DiskStore.scala:81)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1251)
	at org.apache.spark.storage.DiskStore.putIterator(DiskStore.scala:82)
	at org.apache.spark.storage.BlockManager.doPut(BlockManager.scala:798)
	at org.apache.spark.storage.BlockManager.putIterator(BlockManager.scala:645)
	at org.apache.spark.CacheManager.putInBlockManager(CacheManager.scala:153)
	at org.apache.spark.CacheManager.getOrCompute(CacheManager.scala:78)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:268)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
	at org.apache.spark.scheduler.Task.run(Task.scala:89)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
	at java.lang.Thread.run(Thread.java:745)

 

 

Announcements