Member since
10-27-2015
12
Posts
1
Kudos Received
2
Solutions
My Accepted Solutions
Title | Views | Posted |
---|---|---|
5015 | 03-01-2016 06:23 AM | |
17448 | 03-01-2016 03:29 AM |
04-25-2016
07:35 AM
Hi Kishore, Make sure that your spark program recognizes the ip of the hadoop cluster.
... View more
03-01-2016
04:30 AM
I ran a select count query in parquet table using spark sql but it throws an error : 16/03/01 04:12:52 INFO parse.ParseDriver: Parsing command: SELECT COUNT(*) FROM iis_log_parquet 16/03/01 04:12:53 INFO parse.ParseDriver: Parse Completed Exception in thread "main" java.lang.NoSuchFieldError: doubleTypeInfo at parquet.hive.serde.ArrayWritableObjectInspector.getObjectInspector(ArrayWritableObjectInspector.java:79) at parquet.hive.serde.ArrayWritableObjectInspector.<init>(ArrayWritableObjectInspector.java:72) at parquet.hive.serde.ParquetHiveSerDe.initialize(ParquetHiveSerDe.java:117) at org.apache.hadoop.hive.serde2.SerDeUtils.initializeSerDe(SerDeUtils.java:527) at org.apache.hadoop.hive.metastore.MetaStoreUtils.getDeserializer(MetaStoreUtils.java:391) at org.apache.hadoop.hive.ql.metadata.Table.getDeserializerFromMetaStore(Table.java:276) at org.apache.hadoop.hive.ql.metadata.Table.getDeserializer(Table.java:258) at org.apache.hadoop.hive.ql.metadata.Table.getCols(Table.java:605) at org.apache.spark.sql.hive.client.ClientWrapper$$anonfun$getTableOption$1$$anonfun$3.apply(ClientWrapper.scala:331) at org.apache.spark.sql.hive.client.ClientWrapper$$anonfun$getTableOption$1$$anonfun$3.apply(ClientWrapper.scala:326) at scala.Option.map(Option.scala:146) at org.apache.spark.sql.hive.client.ClientWrapper$$anonfun$getTableOption$1.apply(ClientWrapper.scala:326) at org.apache.spark.sql.hive.client.ClientWrapper$$anonfun$getTableOption$1.apply(ClientWrapper.scala:321) at org.apache.spark.sql.hive.client.ClientWrapper$$anonfun$withHiveState$1.apply(ClientWrapper.scala:279) at org.apache.spark.sql.hive.client.ClientWrapper.liftedTree1$1(ClientWrapper.scala:226) at org.apache.spark.sql.hive.client.ClientWrapper.retryLocked(ClientWrapper.scala:225) at org.apache.spark.sql.hive.client.ClientWrapper.withHiveState(ClientWrapper.scala:268) at org.apache.spark.sql.hive.client.ClientWrapper.getTableOption(ClientWrapper.scala:321) at org.apache.spark.sql.hive.client.ClientInterface$class.getTable(ClientInterface.scala:122) at org.apache.spark.sql.hive.client.ClientWrapper.getTable(ClientWrapper.scala:60) at org.apache.spark.sql.hive.HiveMetastoreCatalog.lookupRelation(HiveMetastoreCatalog.scala:384) at org.apache.spark.sql.hive.HiveContext$$anon$2.org$apache$spark$sql$catalyst$analysis$OverrideCatalog$$super$lookupRelation(HiveContext.scala:457) at org.apache.spark.sql.catalyst.analysis.OverrideCatalog$class.lookupRelation(Catalog.scala:161) at org.apache.spark.sql.hive.HiveContext$$anon$2.lookupRelation(HiveContext.scala:457) at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.getTable(Analyzer.scala:303) at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$$anonfun$apply$8.applyOrElse(Analyzer.scala:315) at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$$anonfun$apply$8.applyOrElse(Analyzer.scala:310) at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$$anonfun$resolveOperators$1.apply(LogicalPlan.scala:57) at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$$anonfun$resolveOperators$1.apply(LogicalPlan.scala:57) at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:53) at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperators(LogicalPlan.scala:56) at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$$anonfun$1.apply(LogicalPlan.scala:54) at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$$anonfun$1.apply(LogicalPlan.scala:54) at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:265) at scala.collection.Iterator$$anon$11.next(Iterator.scala:370) at scala.collection.Iterator$class.foreach(Iterator.scala:742) at scala.collection.AbstractIterator.foreach(Iterator.scala:1194) at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:59) at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:104) at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:48) at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:308) at scala.collection.AbstractIterator.to(Iterator.scala:1194) at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:300) at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1194) at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:287) at scala.collection.AbstractIterator.toArray(Iterator.scala:1194) at org.apache.spark.sql.catalyst.trees.TreeNode.transformChildren(TreeNode.scala:305) at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperators(LogicalPlan.scala:54) at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.apply(Analyzer.scala:310) at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.apply(Analyzer.scala:300) at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:83) at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:80) at scala.collection.LinearSeqOptimized$class.foldLeft(LinearSeqOptimized.scala:124) at scala.collection.immutable.List.foldLeft(List.scala:84) at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:80) at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:72) at scala.collection.immutable.List.foreach(List.scala:381) at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:72) at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:36) at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:36) at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:34) at org.apache.spark.sql.DataFrame.<init>(DataFrame.scala:133) at org.apache.spark.sql.DataFrame$.apply(DataFrame.scala:52) at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:817) at SparkPi$.main(SparkPi.scala:27) at SparkPi.main(SparkPi.scala) 16/03/01 04:12:53 INFO spark.SparkContext: Invoking stop() from shutdown hook 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/static/sql,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/SQL/execution/json,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/SQL/execution,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/SQL/json,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/SQL,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/metrics/json,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/stages/stage/kill,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/api,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/static,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/executors/threadDump/json,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/executors/threadDump,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/executors/json,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/executors,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/environment/json,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/environment,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/storage/rdd/json,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/storage/rdd,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/storage/json,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/storage,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/stages/pool/json,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/stages/pool,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/stages/stage/json,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/stages/stage,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/stages/json,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/stages,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/jobs/job/json,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/jobs/job,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/jobs/json,null} 16/03/01 04:12:53 INFO handler.ContextHandler: stopped o.s.j.s.ServletContextHandler{/jobs,null} 16/03/01 04:12:53 INFO ui.SparkUI: Stopped Spark web UI at http://172.31.1.118:4040 16/03/01 04:12:53 INFO spark.MapOutputTrackerMasterEndpoint: MapOutputTrackerMasterEndpoint stopped! 16/03/01 04:12:53 INFO storage.MemoryStore: MemoryStore cleared 16/03/01 04:12:53 INFO storage.BlockManager: BlockManager stopped 16/03/01 04:12:53 INFO storage.BlockManagerMaster: BlockManagerMaster stopped 16/03/01 04:12:53 INFO scheduler.OutputCommitCoordinator$OutputCommitCoordinatorEndpoint: OutputCommitCoordinator stopped! 16/03/01 04:12:53 INFO spark.SparkContext: Successfully stopped SparkContext 16/03/01 04:12:53 INFO util.ShutdownHookManager: Shutdown hook called 16/03/01 04:12:53 INFO util.ShutdownHookManager: Deleting directory /tmp/spark-df6908b3-58ff-4ee8-83d3-c4b94bc2b541 16/03/01 04:12:53 INFO util.ShutdownHookManager: Deleting directory /tmp/spark-997a1278-94aa-4214-8569-3f597996891f 16/03/01 04:12:53 INFO remote.RemoteActorRefProvider$RemotingTerminator: Shutting down remote daemon. 16/03/01 04:12:53 INFO remote.RemoteActorRefProvider$RemotingTerminator: Remote daemon shut down; proceeding with flushing remote transports. Am not sure what am missing. Please help. Thanks!
... View more
Labels:
- Labels:
-
Apache Hadoop
-
Apache Hive
-
Apache Spark
03-01-2016
03:29 AM
I moved my development environment relative to hadoop cluster to avoid the issue.
... View more
02-29-2016
06:16 AM
It appears that it uses private ip of the cloudera vm instead of EC2 elastic IP. Is there a way we can re route this? Thanks!
... View more
02-29-2016
05:11 AM
Hi All, I created a simple select query using HiveContext in spark but it seems I have a connectivity issues between my cluster and Eclipse IDE. Is there any configuration should I apply to resolve this? Logs : Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties 16/02/29 21:04:43 INFO SparkContext: Running Spark version 1.6.0 16/02/29 21:04:54 INFO SecurityManager: Changing view acls to: Orson 16/02/29 21:04:54 INFO SecurityManager: Changing modify acls to: Orson 16/02/29 21:04:54 INFO SecurityManager: SecurityManager: authentication disabled; ui acls disabled; users with view permissions: Set(Orson); users with modify permissions: Set(Orson) 16/02/29 21:04:55 INFO Utils: Successfully started service 'sparkDriver' on port 60476. 16/02/29 21:04:55 INFO Slf4jLogger: Slf4jLogger started 16/02/29 21:04:55 INFO Remoting: Starting remoting 16/02/29 21:04:55 INFO Remoting: Remoting started; listening on addresses :[akka.tcp://sparkDriverActorSystem@192.168.181.1:60489] 16/02/29 21:04:55 INFO Utils: Successfully started service 'sparkDriverActorSystem' on port 60489. 16/02/29 21:04:55 INFO SparkEnv: Registering MapOutputTracker 16/02/29 21:04:55 INFO SparkEnv: Registering BlockManagerMaster 16/02/29 21:04:55 INFO DiskBlockManager: Created local directory at C:\Users\Orson\AppData\Local\Temp\blockmgr-7fdfa330-9d04-4bdc-a933-30b63c7a1710 16/02/29 21:04:55 INFO MemoryStore: MemoryStore started with capacity 6.4 GB 16/02/29 21:04:55 INFO SparkEnv: Registering OutputCommitCoordinator 16/02/29 21:04:56 INFO Utils: Successfully started service 'SparkUI' on port 4040. 16/02/29 21:04:56 INFO SparkUI: Started SparkUI at http://192.168.181.1:4040 16/02/29 21:04:56 INFO Executor: Starting executor ID driver on host localhost 16/02/29 21:04:56 INFO Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 60496. 16/02/29 21:04:56 INFO NettyBlockTransferService: Server created on 60496 16/02/29 21:04:56 INFO BlockManagerMaster: Trying to register BlockManager 16/02/29 21:04:56 INFO BlockManagerMasterEndpoint: Registering block manager localhost:60496 with 6.4 GB RAM, BlockManagerId(driver, localhost, 60496) 16/02/29 21:04:56 INFO BlockManagerMaster: Registered BlockManager 16/02/29 21:04:57 INFO HiveContext: Initializing execution hive, version 1.2.1 16/02/29 21:04:57 INFO ClientWrapper: Inspected Hadoop version: 2.2.0 16/02/29 21:04:57 INFO ClientWrapper: Loaded org.apache.hadoop.hive.shims.Hadoop23Shims for Hadoop version 2.2.0 16/02/29 21:04:57 INFO deprecation: mapred.reduce.tasks is deprecated. Instead, use mapreduce.job.reduces 16/02/29 21:04:57 INFO deprecation: mapred.min.split.size is deprecated. Instead, use mapreduce.input.fileinputformat.split.minsize 16/02/29 21:04:57 INFO deprecation: mapred.reduce.tasks.speculative.execution is deprecated. Instead, use mapreduce.reduce.speculative 16/02/29 21:04:57 INFO deprecation: mapred.min.split.size.per.node is deprecated. Instead, use mapreduce.input.fileinputformat.split.minsize.per.node 16/02/29 21:04:57 INFO deprecation: mapred.input.dir.recursive is deprecated. Instead, use mapreduce.input.fileinputformat.input.dir.recursive 16/02/29 21:04:57 INFO deprecation: mapred.min.split.size.per.rack is deprecated. Instead, use mapreduce.input.fileinputformat.split.minsize.per.rack 16/02/29 21:04:57 INFO deprecation: mapred.max.split.size is deprecated. Instead, use mapreduce.input.fileinputformat.split.maxsize 16/02/29 21:04:57 INFO deprecation: mapred.committer.job.setup.cleanup.needed is deprecated. Instead, use mapreduce.job.committer.setup.cleanup.needed 16/02/29 21:04:58 WARN HiveConf: HiveConf of name hive.enable.spark.execution.engine does not exist 16/02/29 21:04:58 INFO HiveMetaStore: 0: Opening raw store with implemenation class:org.apache.hadoop.hive.metastore.ObjectStore 16/02/29 21:04:58 INFO ObjectStore: ObjectStore, initialize called 16/02/29 21:04:58 INFO Persistence: Property datanucleus.cache.level2 unknown - will be ignored 16/02/29 21:04:58 INFO Persistence: Property hive.metastore.integral.jdo.pushdown unknown - will be ignored 16/02/29 21:05:07 WARN HiveConf: HiveConf of name hive.enable.spark.execution.engine does not exist 16/02/29 21:05:07 INFO ObjectStore: Setting MetaStore object pin classes with hive.metastore.cache.pinobjtypes="Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order" 16/02/29 21:05:09 INFO Datastore: The class "org.apache.hadoop.hive.metastore.model.MFieldSchema" is tagged as "embedded-only" so does not have its own datastore table. 16/02/29 21:05:09 INFO Datastore: The class "org.apache.hadoop.hive.metastore.model.MOrder" is tagged as "embedded-only" so does not have its own datastore table. 16/02/29 21:05:16 INFO Datastore: The class "org.apache.hadoop.hive.metastore.model.MFieldSchema" is tagged as "embedded-only" so does not have its own datastore table. 16/02/29 21:05:16 INFO Datastore: The class "org.apache.hadoop.hive.metastore.model.MOrder" is tagged as "embedded-only" so does not have its own datastore table. 16/02/29 21:05:18 INFO MetaStoreDirectSql: Using direct SQL, underlying DB is DERBY 16/02/29 21:05:18 INFO ObjectStore: Initialized ObjectStore 16/02/29 21:05:18 WARN ObjectStore: Version information not found in metastore. hive.metastore.schema.verification is not enabled so recording the schema version 1.2.0 16/02/29 21:05:19 WARN ObjectStore: Failed to get database default, returning NoSuchObjectException 16/02/29 21:05:21 WARN : Your hostname, solvento-orson resolves to a loopback/non-reachable address: fe80:0:0:0:0:5efe:c0a8:4801%42, but we couldn't find any external IP address! 16/02/29 21:05:27 INFO HiveMetaStore: Added admin role in metastore 16/02/29 21:05:27 INFO HiveMetaStore: Added public role in metastore 16/02/29 21:05:28 INFO HiveMetaStore: No user is added in admin role, since config is empty 16/02/29 21:05:28 INFO HiveMetaStore: 0: get_all_databases 16/02/29 21:05:28 INFO audit: ugi=Orson ip=unknown-ip-addr cmd=get_all_databases 16/02/29 21:05:28 INFO HiveMetaStore: 0: get_functions: db=default pat=* 16/02/29 21:05:28 INFO audit: ugi=Orson ip=unknown-ip-addr cmd=get_functions: db=default pat=* 16/02/29 21:05:28 INFO Datastore: The class "org.apache.hadoop.hive.metastore.model.MResourceUri" is tagged as "embedded-only" so does not have its own datastore table. 16/02/29 21:05:30 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 16/02/29 21:05:30 INFO SessionState: Created local directory: C:/Users/Orson/AppData/Local/Temp/5746f851-1a41-433f-9183-380cc74b23e9_resources 16/02/29 21:05:30 INFO SessionState: Created HDFS directory: /tmp/hive/Orson/5746f851-1a41-433f-9183-380cc74b23e9 16/02/29 21:05:30 INFO SessionState: Created local directory: C:/Users/Orson/AppData/Local/Temp/Orson/5746f851-1a41-433f-9183-380cc74b23e9 16/02/29 21:05:30 INFO SessionState: Created HDFS directory: /tmp/hive/Orson/5746f851-1a41-433f-9183-380cc74b23e9/_tmp_space.db 16/02/29 21:05:31 WARN HiveConf: HiveConf of name hive.enable.spark.execution.engine does not exist 16/02/29 21:05:31 INFO HiveContext: default warehouse location is /user/hive/warehouse 16/02/29 21:05:31 INFO HiveContext: Initializing HiveMetastoreConnection version 1.2.1 using Spark classes. 16/02/29 21:05:31 INFO ClientWrapper: Inspected Hadoop version: 2.2.0 16/02/29 21:05:31 INFO ClientWrapper: Loaded org.apache.hadoop.hive.shims.Hadoop23Shims for Hadoop version 2.2.0 16/02/29 21:05:31 INFO deprecation: mapred.reduce.tasks is deprecated. Instead, use mapreduce.job.reduces 16/02/29 21:05:31 INFO deprecation: mapred.min.split.size is deprecated. Instead, use mapreduce.input.fileinputformat.split.minsize 16/02/29 21:05:31 INFO deprecation: mapred.reduce.tasks.speculative.execution is deprecated. Instead, use mapreduce.reduce.speculative 16/02/29 21:05:31 INFO deprecation: mapred.min.split.size.per.node is deprecated. Instead, use mapreduce.input.fileinputformat.split.minsize.per.node 16/02/29 21:05:31 INFO deprecation: mapred.input.dir.recursive is deprecated. Instead, use mapreduce.input.fileinputformat.input.dir.recursive 16/02/29 21:05:31 INFO deprecation: mapred.min.split.size.per.rack is deprecated. Instead, use mapreduce.input.fileinputformat.split.minsize.per.rack 16/02/29 21:05:31 INFO deprecation: mapred.max.split.size is deprecated. Instead, use mapreduce.input.fileinputformat.split.maxsize 16/02/29 21:05:31 INFO deprecation: mapred.committer.job.setup.cleanup.needed is deprecated. Instead, use mapreduce.job.committer.setup.cleanup.needed 16/02/29 21:05:31 WARN HiveConf: HiveConf of name hive.enable.spark.execution.engine does not exist 16/02/29 21:05:31 INFO metastore: Trying to connect to metastore with URI thrift://quickstart.cloudera:9083 16/02/29 21:05:31 INFO metastore: Connected to metastore. 16/02/29 21:05:32 INFO SessionState: Created local directory: C:/Users/Orson/AppData/Local/Temp/634cfd84-fe30-4c5b-bce3-629f998e4c07_resources 16/02/29 21:05:32 INFO SessionState: Created HDFS directory: /tmp/hive/Orson/634cfd84-fe30-4c5b-bce3-629f998e4c07 16/02/29 21:05:32 INFO SessionState: Created local directory: C:/Users/Orson/AppData/Local/Temp/Orson/634cfd84-fe30-4c5b-bce3-629f998e4c07 16/02/29 21:05:32 INFO SessionState: Created HDFS directory: /tmp/hive/Orson/634cfd84-fe30-4c5b-bce3-629f998e4c07/_tmp_space.db 16/02/29 21:05:32 INFO ParseDriver: Parsing command: select * from flume_data limit 1 16/02/29 21:05:33 INFO ParseDriver: Parse Completed 16/02/29 21:05:34 INFO MemoryStore: Block broadcast_0 stored as values in memory (estimated size 423.8 KB, free 423.8 KB) 16/02/29 21:05:34 INFO MemoryStore: Block broadcast_0_piece0 stored as bytes in memory (estimated size 37.4 KB, free 461.2 KB) 16/02/29 21:05:34 INFO BlockManagerInfo: Added broadcast_0_piece0 in memory on localhost:60496 (size: 37.4 KB, free: 6.4 GB) 16/02/29 21:05:34 INFO SparkContext: Created broadcast 0 from show at SparkPi.scala:23 16/02/29 21:05:35 INFO FileInputFormat: Total input paths to process : 5 16/02/29 21:05:35 INFO SparkContext: Starting job: show at SparkPi.scala:23 16/02/29 21:05:35 INFO DAGScheduler: Got job 0 (show at SparkPi.scala:23) with 1 output partitions 16/02/29 21:05:35 INFO DAGScheduler: Final stage: ResultStage 0 (show at SparkPi.scala:23) 16/02/29 21:05:35 INFO DAGScheduler: Parents of final stage: List() 16/02/29 21:05:35 INFO DAGScheduler: Missing parents: List() 16/02/29 21:05:35 INFO DAGScheduler: Submitting ResultStage 0 (MapPartitionsRDD[3] at show at SparkPi.scala:23), which has no missing parents 16/02/29 21:05:35 INFO MemoryStore: Block broadcast_1 stored as values in memory (estimated size 5.8 KB, free 467.0 KB) 16/02/29 21:05:35 INFO MemoryStore: Block broadcast_1_piece0 stored as bytes in memory (estimated size 3.3 KB, free 470.2 KB) 16/02/29 21:05:35 INFO BlockManagerInfo: Added broadcast_1_piece0 in memory on localhost:60496 (size: 3.3 KB, free: 6.4 GB) 16/02/29 21:05:35 INFO SparkContext: Created broadcast 1 from broadcast at DAGScheduler.scala:1006 16/02/29 21:05:35 INFO DAGScheduler: Submitting 1 missing tasks from ResultStage 0 (MapPartitionsRDD[3] at show at SparkPi.scala:23) 16/02/29 21:05:35 INFO TaskSchedulerImpl: Adding task set 0.0 with 1 tasks 16/02/29 21:05:35 INFO TaskSetManager: Starting task 0.0 in stage 0.0 (TID 0, localhost, partition 0,ANY, 2108 bytes) 16/02/29 21:05:35 INFO Executor: Running task 0.0 in stage 0.0 (TID 0) 16/02/29 21:05:35 INFO HadoopRDD: Input split: hdfs://quickstart.cloudera:8020/user/cloudera/flume/landing/FlumeData.1455241486989.log:0+28 16/02/29 21:05:35 INFO deprecation: mapred.tip.id is deprecated. Instead, use mapreduce.task.id 16/02/29 21:05:35 INFO deprecation: mapred.task.id is deprecated. Instead, use mapreduce.task.attempt.id 16/02/29 21:05:35 INFO deprecation: mapred.task.is.map is deprecated. Instead, use mapreduce.task.ismap 16/02/29 21:05:35 INFO deprecation: mapred.task.partition is deprecated. Instead, use mapreduce.task.partition 16/02/29 21:05:35 INFO deprecation: mapred.job.id is deprecated. Instead, use mapreduce.job.id 16/02/29 21:05:56 WARN DFSClient: Failed to connect to /172.31.1.118:50010 for block, add to deadNodes and continue. java.net.ConnectException: Connection timed out: no further information java.net.ConnectException: Connection timed out: no further information at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:735) at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206) at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:529) at org.apache.hadoop.hdfs.DFSInputStream.newTcpPeer(DFSInputStream.java:955) at org.apache.hadoop.hdfs.DFSInputStream.getBlockReader(DFSInputStream.java:1107) at org.apache.hadoop.hdfs.DFSInputStream.blockSeekTo(DFSInputStream.java:533) at org.apache.hadoop.hdfs.DFSInputStream.readWithStrategy(DFSInputStream.java:749) at org.apache.hadoop.hdfs.DFSInputStream.read(DFSInputStream.java:793) at java.io.DataInputStream.read(DataInputStream.java:100) at org.apache.hadoop.util.LineReader.readDefaultLine(LineReader.java:211) at org.apache.hadoop.util.LineReader.readLine(LineReader.java:174) at org.apache.hadoop.mapred.LineRecordReader.next(LineRecordReader.java:206) at org.apache.hadoop.mapred.LineRecordReader.next(LineRecordReader.java:45) at org.apache.spark.rdd.HadoopRDD$$anon$1.getNext(HadoopRDD.scala:246) at org.apache.spark.rdd.HadoopRDD$$anon$1.getNext(HadoopRDD.scala:208) at org.apache.spark.util.NextIterator.hasNext(NextIterator.scala:73) at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:39) at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:369) at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:369) at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:369) at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:350) at scala.collection.Iterator$class.foreach(Iterator.scala:750) at scala.collection.AbstractIterator.foreach(Iterator.scala:1202) at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:59) at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:104) at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:48) at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:295) at scala.collection.AbstractIterator.to(Iterator.scala:1202) at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:287) at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1202) at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:274) at scala.collection.AbstractIterator.toArray(Iterator.scala:1202) at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212) at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212) at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858) at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858) at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66) at org.apache.spark.scheduler.Task.run(Task.scala:89) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:744) 16/02/29 21:05:56 INFO DFSClient: Could not obtain BP-1614789257-127.0.0.1-1447880472993:blk_1073744408_3606 from any node: java.io.IOException: No live nodes contain current block. Will get new block locations from namenode and retry... 16/02/29 21:05:56 WARN DFSClient: DFS chooseDataNode: got # 1 IOException, will wait for 882.0029599386166 msec. Thanks!
... View more
Labels:
02-29-2016
04:37 AM
Hi All, Am trying to create a simple spark program in eclipse. Unfortunately, im getting an Out of Memory Error (Exception in thread "main" java.lang.OutOfMemoryError: PermGen space) Here's the configuration of my ini file --launcher.XXMaxPermSize 256m --launcher.defaultAction openFile -vmargs -Xms512m -Xmx1024m -XX:+UseParallelGC -XX:PermSize=8g -XX:MaxPermSize=10g Run configurations > Arguments : -Xmx10g Scala code : ... val sqlContext = new HiveContext(spark) sqlContext.sql("SELECT * from sample_csv limit 1") ... Logs : Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties 16/02/29 20:11:46 INFO SparkContext: Running Spark version 1.6.0 16/02/29 20:11:56 INFO SecurityManager: Changing view acls to: Orson 16/02/29 20:11:56 INFO SecurityManager: Changing modify acls to: Orson 16/02/29 20:11:56 INFO SecurityManager: SecurityManager: authentication disabled; ui acls disabled; users with view permissions: Set(Orson); users with modify permissions: Set(Orson) 16/02/29 20:11:57 INFO Utils: Successfully started service 'sparkDriver' on port 57135. 16/02/29 20:11:57 INFO Slf4jLogger: Slf4jLogger started 16/02/29 20:11:58 INFO Remoting: Starting remoting 16/02/29 20:11:58 INFO Remoting: Remoting started; listening on addresses :[akka.tcp://sparkDriverActorSystem@192.168.181.1:57148] 16/02/29 20:11:58 INFO Utils: Successfully started service 'sparkDriverActorSystem' on port 57148. 16/02/29 20:11:58 INFO SparkEnv: Registering MapOutputTracker 16/02/29 20:11:58 INFO SparkEnv: Registering BlockManagerMaster 16/02/29 20:11:58 INFO DiskBlockManager: Created local directory at C:\Users\Orson\AppData\Local\Temp\blockmgr-be56133f-c657-4146-9e19-cfae46545b70 16/02/29 20:11:58 INFO MemoryStore: MemoryStore started with capacity 6.4 GB 16/02/29 20:11:58 INFO SparkEnv: Registering OutputCommitCoordinator 16/02/29 20:11:58 INFO Utils: Successfully started service 'SparkUI' on port 4040. 16/02/29 20:11:58 INFO SparkUI: Started SparkUI at http://192.168.181.1:4040 16/02/29 20:11:58 INFO Executor: Starting executor ID driver on host localhost 16/02/29 20:11:58 INFO Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 57155. 16/02/29 20:11:58 INFO NettyBlockTransferService: Server created on 57155 16/02/29 20:11:58 INFO BlockManagerMaster: Trying to register BlockManager 16/02/29 20:11:58 INFO BlockManagerMasterEndpoint: Registering block manager localhost:57155 with 6.4 GB RAM, BlockManagerId(driver, localhost, 57155) 16/02/29 20:11:58 INFO BlockManagerMaster: Registered BlockManager 16/02/29 20:12:00 INFO HiveContext: Initializing execution hive, version 1.2.1 16/02/29 20:12:00 INFO ClientWrapper: Inspected Hadoop version: 2.2.0 16/02/29 20:12:00 INFO ClientWrapper: Loaded org.apache.hadoop.hive.shims.Hadoop23Shims for Hadoop version 2.2.0 16/02/29 20:12:00 INFO deprecation: mapred.reduce.tasks is deprecated. Instead, use mapreduce.job.reduces 16/02/29 20:12:00 INFO deprecation: mapred.min.split.size is deprecated. Instead, use mapreduce.input.fileinputformat.split.minsize 16/02/29 20:12:00 INFO deprecation: mapred.reduce.tasks.speculative.execution is deprecated. Instead, use mapreduce.reduce.speculative 16/02/29 20:12:00 INFO deprecation: mapred.min.split.size.per.node is deprecated. Instead, use mapreduce.input.fileinputformat.split.minsize.per.node 16/02/29 20:12:00 INFO deprecation: mapred.input.dir.recursive is deprecated. Instead, use mapreduce.input.fileinputformat.input.dir.recursive 16/02/29 20:12:00 INFO deprecation: mapred.min.split.size.per.rack is deprecated. Instead, use mapreduce.input.fileinputformat.split.minsize.per.rack 16/02/29 20:12:00 INFO deprecation: mapred.max.split.size is deprecated. Instead, use mapreduce.input.fileinputformat.split.maxsize 16/02/29 20:12:00 INFO deprecation: mapred.committer.job.setup.cleanup.needed is deprecated. Instead, use mapreduce.job.committer.setup.cleanup.needed 16/02/29 20:12:00 WARN HiveConf: HiveConf of name hive.enable.spark.execution.engine does not exist 16/02/29 20:12:00 INFO HiveMetaStore: 0: Opening raw store with implemenation class:org.apache.hadoop.hive.metastore.ObjectStore 16/02/29 20:12:00 INFO ObjectStore: ObjectStore, initialize called 16/02/29 20:12:01 INFO Persistence: Property datanucleus.cache.level2 unknown - will be ignored 16/02/29 20:12:01 INFO Persistence: Property hive.metastore.integral.jdo.pushdown unknown - will be ignored 16/02/29 20:12:11 WARN HiveConf: HiveConf of name hive.enable.spark.execution.engine does not exist 16/02/29 20:12:11 INFO ObjectStore: Setting MetaStore object pin classes with hive.metastore.cache.pinobjtypes="Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order" 16/02/29 20:12:13 INFO Datastore: The class "org.apache.hadoop.hive.metastore.model.MFieldSchema" is tagged as "embedded-only" so does not have its own datastore table. 16/02/29 20:12:13 INFO Datastore: The class "org.apache.hadoop.hive.metastore.model.MOrder" is tagged as "embedded-only" so does not have its own datastore table. 16/02/29 20:12:19 INFO Datastore: The class "org.apache.hadoop.hive.metastore.model.MFieldSchema" is tagged as "embedded-only" so does not have its own datastore table. 16/02/29 20:12:19 INFO Datastore: The class "org.apache.hadoop.hive.metastore.model.MOrder" is tagged as "embedded-only" so does not have its own datastore table. 16/02/29 20:12:21 INFO MetaStoreDirectSql: Using direct SQL, underlying DB is DERBY 16/02/29 20:12:21 INFO ObjectStore: Initialized ObjectStore 16/02/29 20:12:21 WARN ObjectStore: Version information not found in metastore. hive.metastore.schema.verification is not enabled so recording the schema version 1.2.0 16/02/29 20:12:22 WARN ObjectStore: Failed to get database default, returning NoSuchObjectException 16/02/29 20:12:24 WARN : Your hostname, solvento-orson resolves to a loopback/non-reachable address: fe80:0:0:0:0:5efe:c0a8:4801%42, but we couldn't find any external IP address! 16/02/29 20:12:25 INFO HiveMetaStore: Added admin role in metastore 16/02/29 20:12:25 INFO HiveMetaStore: Added public role in metastore 16/02/29 20:12:26 INFO HiveMetaStore: No user is added in admin role, since config is empty 16/02/29 20:12:26 INFO HiveMetaStore: 0: get_all_databases 16/02/29 20:12:26 INFO audit: ugi=Orson ip=unknown-ip-addr cmd=get_all_databases 16/02/29 20:12:26 INFO HiveMetaStore: 0: get_functions: db=default pat=* 16/02/29 20:12:26 INFO audit: ugi=Orson ip=unknown-ip-addr cmd=get_functions: db=default pat=* 16/02/29 20:12:26 INFO Datastore: The class "org.apache.hadoop.hive.metastore.model.MResourceUri" is tagged as "embedded-only" so does not have its own datastore table. 16/02/29 20:12:28 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 16/02/29 20:12:28 INFO SessionState: Created local directory: C:/Users/Orson/AppData/Local/Temp/0c1b1e0d-5e6c-47b8-a8d5-5398e262c874_resources 16/02/29 20:12:28 INFO SessionState: Created HDFS directory: /tmp/hive/Orson/0c1b1e0d-5e6c-47b8-a8d5-5398e262c874 16/02/29 20:12:28 INFO SessionState: Created local directory: C:/Users/Orson/AppData/Local/Temp/Orson/0c1b1e0d-5e6c-47b8-a8d5-5398e262c874 16/02/29 20:12:28 INFO SessionState: Created HDFS directory: /tmp/hive/Orson/0c1b1e0d-5e6c-47b8-a8d5-5398e262c874/_tmp_space.db 16/02/29 20:12:28 WARN HiveConf: HiveConf of name hive.enable.spark.execution.engine does not exist 16/02/29 20:12:28 INFO HiveContext: default warehouse location is /user/hive/warehouse 16/02/29 20:12:28 INFO HiveContext: Initializing HiveMetastoreConnection version 1.2.1 using Spark classes. 16/02/29 20:12:28 INFO ClientWrapper: Inspected Hadoop version: 2.2.0 16/02/29 20:12:28 INFO ClientWrapper: Loaded org.apache.hadoop.hive.shims.Hadoop23Shims for Hadoop version 2.2.0 16/02/29 20:12:29 INFO deprecation: mapred.reduce.tasks is deprecated. Instead, use mapreduce.job.reduces 16/02/29 20:12:29 INFO deprecation: mapred.min.split.size is deprecated. Instead, use mapreduce.input.fileinputformat.split.minsize 16/02/29 20:12:29 INFO deprecation: mapred.reduce.tasks.speculative.execution is deprecated. Instead, use mapreduce.reduce.speculative 16/02/29 20:12:29 INFO deprecation: mapred.min.split.size.per.node is deprecated. Instead, use mapreduce.input.fileinputformat.split.minsize.per.node 16/02/29 20:12:29 INFO deprecation: mapred.input.dir.recursive is deprecated. Instead, use mapreduce.input.fileinputformat.input.dir.recursive 16/02/29 20:12:29 INFO deprecation: mapred.min.split.size.per.rack is deprecated. Instead, use mapreduce.input.fileinputformat.split.minsize.per.rack 16/02/29 20:12:29 INFO deprecation: mapred.max.split.size is deprecated. Instead, use mapreduce.input.fileinputformat.split.maxsize 16/02/29 20:12:29 INFO deprecation: mapred.committer.job.setup.cleanup.needed is deprecated. Instead, use mapreduce.job.committer.setup.cleanup.needed 16/02/29 20:12:29 WARN HiveConf: HiveConf of name hive.enable.spark.execution.engine does not exist 16/02/29 20:12:29 INFO metastore: Trying to connect to metastore with URI thrift://quickstart.cloudera:9083 16/02/29 20:12:29 INFO metastore: Connected to metastore. 16/02/29 20:12:29 INFO SessionState: Created local directory: C:/Users/Orson/AppData/Local/Temp/ccfc9462-2c5a-49ce-a811-503694353c1a_resources 16/02/29 20:12:30 INFO SessionState: Created HDFS directory: /tmp/hive/Orson/ccfc9462-2c5a-49ce-a811-503694353c1a 16/02/29 20:12:30 INFO SessionState: Created local directory: C:/Users/Orson/AppData/Local/Temp/Orson/ccfc9462-2c5a-49ce-a811-503694353c1a 16/02/29 20:12:30 INFO SessionState: Created HDFS directory: /tmp/hive/Orson/ccfc9462-2c5a-49ce-a811-503694353c1a/_tmp_space.db 16/02/29 20:12:30 INFO ParseDriver: Parsing command: SELECT * from sample_csv limit 1 Exception in thread "main" java.lang.OutOfMemoryError: PermGen space at java.lang.ClassLoader.defineClass1(Native Method) at java.lang.ClassLoader.defineClass(ClassLoader.java:800) at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142) at java.net.URLClassLoader.defineClass(URLClassLoader.java:449) at java.net.URLClassLoader.access$100(URLClassLoader.java:71) at java.net.URLClassLoader$1.run(URLClassLoader.java:361) at java.net.URLClassLoader$1.run(URLClassLoader.java:355) at java.security.AccessController.doPrivileged(Native Method) at java.net.URLClassLoader.findClass(URLClassLoader.java:354) at java.lang.ClassLoader.loadClass(ClassLoader.java:425) at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308) at java.lang.ClassLoader.loadClass(ClassLoader.java:358) at org.apache.hadoop.hive.ql.parse.HiveParser_IdentifiersParser.<init>(HiveParser_IdentifiersParser.java:12377) at org.apache.hadoop.hive.ql.parse.HiveParser.<init>(HiveParser.java:706) at org.apache.hadoop.hive.ql.parse.HiveParser.<init>(HiveParser.java:700) at org.apache.hadoop.hive.ql.parse.ParseDriver.parse(ParseDriver.java:195) at org.apache.hadoop.hive.ql.parse.ParseDriver.parse(ParseDriver.java:166) at org.apache.spark.sql.hive.HiveQl$.getAst(HiveQl.scala:276) at org.apache.spark.sql.hive.HiveQl$.createPlan(HiveQl.scala:303) at org.apache.spark.sql.hive.ExtendedHiveQlParser$$anonfun$hiveQl$1.apply(ExtendedHiveQlParser.scala:41) at org.apache.spark.sql.hive.ExtendedHiveQlParser$$anonfun$hiveQl$1.apply(ExtendedHiveQlParser.scala:40) at scala.util.parsing.combinator.Parsers$Success.map(Parsers.scala:137) at scala.util.parsing.combinator.Parsers$Success.map(Parsers.scala:136) at scala.util.parsing.combinator.Parsers$Parser$$anonfun$map$1.apply(Parsers.scala:237) at scala.util.parsing.combinator.Parsers$Parser$$anonfun$map$1.apply(Parsers.scala:237) at scala.util.parsing.combinator.Parsers$$anon$3.apply(Parsers.scala:217) at scala.util.parsing.combinator.Parsers$Parser$$anonfun$append$1$$anonfun$apply$2.apply(Parsers.scala:249) at scala.util.parsing.combinator.Parsers$Parser$$anonfun$append$1$$anonfun$apply$2.apply(Parsers.scala:249) at scala.util.parsing.combinator.Parsers$Failure.append(Parsers.scala:197) at scala.util.parsing.combinator.Parsers$Parser$$anonfun$append$1.apply(Parsers.scala:249) at scala.util.parsing.combinator.Parsers$Parser$$anonfun$append$1.apply(Parsers.scala:249) at scala.util.parsing.combinator.Parsers$$anon$3.apply(Parsers.scala:217) 16/02/29 20:12:32 INFO SparkContext: Invoking stop() from shutdown hook 16/02/29 20:12:32 INFO SparkUI: Stopped Spark web UI at http://192.168.181.1:4040 16/02/29 20:12:32 INFO MapOutputTrackerMasterEndpoint: MapOutputTrackerMasterEndpoint stopped! 16/02/29 20:12:32 INFO MemoryStore: MemoryStore cleared 16/02/29 20:12:32 INFO BlockManager: BlockManager stopped 16/02/29 20:12:32 INFO BlockManagerMaster: BlockManagerMaster stopped 16/02/29 20:12:32 INFO OutputCommitCoordinator$OutputCommitCoordinatorEndpoint: OutputCommitCoordinator stopped! 16/02/29 20:12:32 INFO SparkContext: Successfully stopped SparkContext 16/02/29 20:12:32 INFO ShutdownHookManager: Shutdown hook called 16/02/29 20:12:32 INFO ShutdownHookManager: Deleting directory C:\Users\Orson\AppData\Local\Temp\spark-7efe7a3c-e47c-41a0-8e94-a1fd19ca7197 16/02/29 20:12:32 INFO ShutdownHookManager: Deleting directory C:\Users\Orson\AppData\Local\Temp\spark-b20f169a-0a3b-426e-985e-6641b3be3fd6 16/02/29 20:12:32 INFO RemoteActorRefProvider$RemotingTerminator: Shutting down remote daemon. 16/02/29 20:12:32 INFO RemoteActorRefProvider$RemotingTerminator: Remote daemon shut down; proceeding with flushing remote transports. 16/02/29 20:12:32 INFO RemoteActorRefProvider$RemotingTerminator: Remoting shut down. 16/02/29 20:12:32 ERROR ShutdownHookManager: Exception while deleting Spark temp dir: C:\Users\Orson\AppData\Local\Temp\spark-b20f169a-0a3b-426e-985e-6641b3be3fd6 java.io.IOException: Failed to delete: C:\Users\Orson\AppData\Local\Temp\spark-b20f169a-0a3b-426e-985e-6641b3be3fd6 at org.apache.spark.util.Utils$.deleteRecursively(Utils.scala:928) at org.apache.spark.util.ShutdownHookManager$$anonfun$1$$anonfun$apply$mcV$sp$3.apply(ShutdownHookManager.scala:65) at org.apache.spark.util.ShutdownHookManager$$anonfun$1$$anonfun$apply$mcV$sp$3.apply(ShutdownHookManager.scala:62) at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) at org.apache.spark.util.ShutdownHookManager$$anonfun$1.apply$mcV$sp(ShutdownHookManager.scala:62) at org.apache.spark.util.SparkShutdownHook.run(ShutdownHookManager.scala:267) at org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1$$anonfun$apply$mcV$sp$1.apply$mcV$sp(ShutdownHookManager.scala:239) at org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1$$anonfun$apply$mcV$sp$1.apply(ShutdownHookManager.scala:239) at org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1$$anonfun$apply$mcV$sp$1.apply(ShutdownHookManager.scala:239) at org.apache.spark.util.Utils$.logUncaughtExceptions(Utils.scala:1741) at org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1.apply$mcV$sp(ShutdownHookManager.scala:239) at org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1.apply(ShutdownHookManager.scala:239) at org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1.apply(ShutdownHookManager.scala:239) at scala.util.Try$.apply(Try.scala:191) at org.apache.spark.util.SparkShutdownHookManager.runAll(ShutdownHookManager.scala:239) at org.apache.spark.util.SparkShutdownHookManager$$anon$2.run(ShutdownHookManager.scala:218) at org.apache.hadoop.util.ShutdownHookManager$1.run(ShutdownHookManager.java:54) Thanks!
... View more
Labels: