[testuser@fqdn01 ~]$ spark-submit --class org.apache.spark.examples.SparkPi --master yarn-client --num-executors 1 --driver-memory 512m --executor-memory 512m --conf spark.yarn.maxAppAttempts=1 --executor-cores 1 /usr/hdp/2.5.5.0-157/spark/lib/spark-examples-1.6.3.2.5.5.0-157-hadoop2.7.3.2.5.5.0-157.jar 10 17/12/21 08:07:39 INFO SparkContext: Running Spark version 1.6.3 17/12/21 08:07:40 INFO SecurityManager: Changing view acls to: testuser 17/12/21 08:07:40 INFO SecurityManager: Changing modify acls to: testuser 17/12/21 08:07:40 INFO SecurityManager: SecurityManager: authentication disabled; ui acls disabled; users with view permissions: Set(testuser); users with modify permissions: Set(testuser) 17/12/21 08:07:41 INFO Utils: Successfully started service 'sparkDriver' on port 40454. 17/12/21 08:07:41 INFO Slf4jLogger: Slf4jLogger started 17/12/21 08:07:41 INFO Remoting: Starting remoting 17/12/21 08:07:42 INFO Remoting: Remoting started; listening on addresses :[akka.tcp://sparkDriverActorSystem@192.168.0.58:44978] 17/12/21 08:07:42 INFO Utils: Successfully started service 'sparkDriverActorSystem' on port 44978. 17/12/21 08:07:42 INFO SparkEnv: Registering MapOutputTracker 17/12/21 08:07:42 INFO SparkEnv: Registering BlockManagerMaster 17/12/21 08:07:42 INFO DiskBlockManager: Created local directory at /tmp/blockmgr-10e21913-f518-480c-87e0-e83df9561983 17/12/21 08:07:42 INFO MemoryStore: MemoryStore started with capacity 143.3 MB 17/12/21 08:07:42 INFO SparkEnv: Registering OutputCommitCoordinator 17/12/21 08:07:42 INFO Server: jetty-8.y.z-SNAPSHOT 17/12/21 08:07:43 WARN AbstractLifeCycle: FAILED SelectChannelConnector@0.0.0.0:4040: java.net.BindException: Address already in use java.net.BindException: Address already in use at sun.nio.ch.Net.bind0(Native Method) at sun.nio.ch.Net.bind(Net.java:433) at sun.nio.ch.Net.bind(Net.java:425) at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:223) at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74) at org.spark-project.jetty.server.nio.SelectChannelConnector.open(SelectChannelConnector.java:187) at org.spark-project.jetty.server.AbstractConnector.doStart(AbstractConnector.java:316) at org.spark-project.jetty.server.nio.SelectChannelConnector.doStart(SelectChannelConnector.java:265) at org.spark-project.jetty.util.component.AbstractLifeCycle.start(AbstractLifeCycle.java:64) at org.spark-project.jetty.server.Server.doStart(Server.java:293) at org.spark-project.jetty.util.component.AbstractLifeCycle.start(AbstractLifeCycle.java:64) at org.apache.spark.ui.JettyUtils$.org$apache$spark$ui$JettyUtils$$connect$1(JettyUtils.scala:252) at org.apache.spark.ui.JettyUtils$$anonfun$5.apply(JettyUtils.scala:262) at org.apache.spark.ui.JettyUtils$$anonfun$5.apply(JettyUtils.scala:262) at org.apache.spark.util.Utils$$anonfun$startServiceOnPort$1.apply$mcVI$sp(Utils.scala:2040) at scala.collection.immutable.Range.foreach$mVc$sp(Range.scala:141) at org.apache.spark.util.Utils$.startServiceOnPort(Utils.scala:2031) at org.apache.spark.ui.JettyUtils$.startJettyServer(JettyUtils.scala:262) at org.apache.spark.ui.WebUI.bind(WebUI.scala:137) at org.apache.spark.SparkContext$$anonfun$13.apply(SparkContext.scala:481) at org.apache.spark.SparkContext$$anonfun$13.apply(SparkContext.scala:481) at scala.Option.foreach(Option.scala:236) at org.apache.spark.SparkContext.(SparkContext.scala:481) at org.apache.spark.examples.SparkPi$.main(SparkPi.scala:29) at org.apache.spark.examples.SparkPi.main(SparkPi.scala) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:738) at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:181) at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:206) at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:121) at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala) 17/12/21 08:07:43 WARN AbstractLifeCycle: FAILED org.spark-project.jetty.server.Server@14c053c6: java.net.BindException: Address already in use java.net.BindException: Address already in use at sun.nio.ch.Net.bind0(Native Method) at sun.nio.ch.Net.bind(Net.java:433) at sun.nio.ch.Net.bind(Net.java:425) at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:223) at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74) at org.spark-project.jetty.server.nio.SelectChannelConnector.open(SelectChannelConnector.java:187) at org.spark-project.jetty.server.AbstractConnector.doStart(AbstractConnector.java:316) at org.spark-project.jetty.server.nio.SelectChannelConnector.doStart(SelectChannelConnector.java:265) at org.spark-project.jetty.util.component.AbstractLifeCycle.start(AbstractLifeCycle.java:64) at org.spark-project.jetty.server.Server.doStart(Server.java:293) at org.spark-project.jetty.util.component.AbstractLifeCycle.start(AbstractLifeCycle.java:64) at org.apache.spark.ui.JettyUtils$.org$apache$spark$ui$JettyUtils$$connect$1(JettyUtils.scala:252) at org.apache.spark.ui.JettyUtils$$anonfun$5.apply(JettyUtils.scala:262) at org.apache.spark.ui.JettyUtils$$anonfun$5.apply(JettyUtils.scala:262) at org.apache.spark.util.Utils$$anonfun$startServiceOnPort$1.apply$mcVI$sp(Utils.scala:2040) at scala.collection.immutable.Range.foreach$mVc$sp(Range.scala:141) at org.apache.spark.util.Utils$.startServiceOnPort(Utils.scala:2031) at org.apache.spark.ui.JettyUtils$.startJettyServer(JettyUtils.scala:262) at org.apache.spark.ui.WebUI.bind(WebUI.scala:137) at org.apache.spark.SparkContext$$anonfun$13.apply(SparkContext.scala:481) at org.apache.spark.SparkContext$$anonfun$13.apply(SparkContext.scala:481) at scala.Option.foreach(Option.scala:236) at org.apache.spark.SparkContext.(SparkContext.scala:481) at org.apache.spark.examples.SparkPi$.main(SparkPi.scala:29) at org.apache.spark.examples.SparkPi.main(SparkPi.scala) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:738) at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:181) at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:206) at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:121) at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala) 17/12/21 08:07:43 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/stages/stage/kill,null} 17/12/21 08:07:43 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/api,null} 17/12/21 08:07:43 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/,null} 17/12/21 08:07:43 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/static,null} 17/12/21 08:07:43 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/executors/threadDump/json,null} 17/12/21 08:07:43 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/executors/threadDump,null} 17/12/21 08:07:43 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/executors/json,null} 17/12/21 08:07:43 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/executors,null} 17/12/21 08:07:43 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/environment/json,null} 17/12/21 08:07:43 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/environment,null} 17/12/21 08:07:43 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/storage/rdd/json,null} 17/12/21 08:07:43 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/storage/rdd,null} 17/12/21 08:07:43 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/storage/json,null} 17/12/21 08:07:43 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/storage,null} 17/12/21 08:07:43 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/stages/pool/json,null} 17/12/21 08:07:43 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/stages/pool,null} 17/12/21 08:07:43 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/stages/stage/json,null} 17/12/21 08:07:43 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/stages/stage,null} 17/12/21 08:07:43 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/stages/json,null} 17/12/21 08:07:43 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/stages,null} 17/12/21 08:07:43 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/jobs/job/json,null} 17/12/21 08:07:43 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/jobs/job,null} 17/12/21 08:07:43 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/jobs/json,null} 17/12/21 08:07:43 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/jobs,null} 17/12/21 08:07:43 WARN Utils: Service 'SparkUI' could not bind on port 4040. Attempting port 4041. 17/12/21 08:07:43 INFO Server: jetty-8.y.z-SNAPSHOT 17/12/21 08:07:43 INFO AbstractConnector: Started SelectChannelConnector@0.0.0.0:4041 17/12/21 08:07:43 INFO Utils: Successfully started service 'SparkUI' on port 4041. 17/12/21 08:07:43 INFO SparkUI: Bound SparkUI to 0.0.0.0, and started at http://192.168.0.58:4041 17/12/21 08:07:43 INFO HttpFileServer: HTTP File server directory is /tmp/spark-ca0baf4f-04e5-4cd8-80ff-6a29597665d2/httpd-75c1605a-46c1-4e5a-b628-5d03e4023784 17/12/21 08:07:43 INFO HttpServer: Starting HTTP Server 17/12/21 08:07:43 INFO Server: jetty-8.y.z-SNAPSHOT 17/12/21 08:07:43 INFO AbstractConnector: Started SocketConnector@0.0.0.0:41892 17/12/21 08:07:43 INFO Utils: Successfully started service 'HTTP file server' on port 41892. 17/12/21 08:07:43 INFO SparkContext: Added JAR file:/usr/hdp/2.5.5.0-157/spark/lib/spark-examples-1.6.3.2.5.5.0-157-hadoop2.7.3.2.5.5.0-157.jar at http://192.168.0.58:41892/jars/spark-examples-1.6.3.2.5.5.0-157-hadoop2.7.3.2.5.5.0-157.jar with timestamp 1513843663820 spark.yarn.driver.memoryOverhead is set but does not apply in client mode. 17/12/21 08:07:44 INFO TimelineClientImpl: Timeline service address: http://fqdn01:8188/ws/v1/timeline/ 17/12/21 08:07:45 INFO RMProxy: Connecting to ResourceManager at fqdn01/192.168.0.58:8050 17/12/21 08:07:45 INFO AHSProxy: Connecting to Application History server at fqdn01/192.168.0.58:10200 17/12/21 08:07:47 INFO Client: Requesting a new application from cluster with 4 NodeManagers 17/12/21 08:07:47 INFO Client: Verifying our application has not requested more than the maximum memory capability of the cluster (30720 MB per container) 17/12/21 08:07:47 INFO Client: Will allocate AM container, with 896 MB memory including 384 MB overhead 17/12/21 08:07:47 INFO Client: Setting up container launch context for our AM 17/12/21 08:07:47 INFO Client: Setting up the launch environment for our AM container 17/12/21 08:07:47 INFO Client: Using the spark assembly jar on HDFS because you are using HDP, defaultSparkAssembly:hdfs://fqdn01:8020/hdp/apps/2.5.5.0-157/spark/spark-hdp-assembly.jar 17/12/21 08:07:47 INFO Client: Preparing resources for our AM container 17/12/21 08:07:47 INFO YarnSparkHadoopUtil: getting token for namenode: hdfs://fqdn01:8020/user/testuser/.sparkStaging/application_1513683381321_0015 17/12/21 08:07:47 INFO DFSClient: Created HDFS_DELEGATION_TOKEN token 851 for testuser on 192.168.0.58:8020 17/12/21 08:07:49 INFO metastore: Trying to connect to metastore with URI thrift://fqdn02:9083 17/12/21 08:07:49 INFO metastore: Connected to metastore. 17/12/21 08:07:49 INFO YarnSparkHadoopUtil: HBase class not found java.lang.ClassNotFoundException: org.apache.hadoop.hbase.HBaseConfiguration 17/12/21 08:07:49 INFO Client: Using the spark assembly jar on HDFS because you are using HDP, defaultSparkAssembly:hdfs://fqdn01:8020/hdp/apps/2.5.5.0-157/spark/spark-hdp-assembly.jar 17/12/21 08:07:49 INFO Client: Source and destination file systems are the same. Not copying hdfs://fqdn01:8020/hdp/apps/2.5.5.0-157/spark/spark-hdp-assembly.jar 17/12/21 08:07:49 INFO Client: Uploading resource file:/tmp/spark-ca0baf4f-04e5-4cd8-80ff-6a29597665d2/__spark_conf__2978110364098525002.zip -> hdfs://fqdn01:8020/user/testuser/.sparkStaging/application_1513683381321_0015/__spark_conf__2978110364098525002.zip 17/12/21 08:07:49 INFO SecurityManager: Changing view acls to: testuser 17/12/21 08:07:49 INFO SecurityManager: Changing modify acls to: testuser 17/12/21 08:07:49 INFO SecurityManager: SecurityManager: authentication disabled; ui acls disabled; users with view permissions: Set(testuser); users with modify permissions: Set(testuser) 17/12/21 08:07:49 INFO Client: Submitting application 15 to ResourceManager 17/12/21 08:07:50 INFO YarnClientImpl: Submitted application application_1513683381321_0015 17/12/21 08:07:50 INFO SchedulerExtensionServices: Starting Yarn extension services with app application_1513683381321_0015 and attemptId None 17/12/21 08:07:51 INFO Client: Application report for application_1513683381321_0015 (state: ACCEPTED) 17/12/21 08:07:51 INFO Client: client token: Token { kind: YARN_CLIENT_TOKEN, service: } diagnostics: AM container is launched, waiting for AM container to Register with RM ApplicationMaster host: N/A ApplicationMaster RPC port: -1 queue: default start time: 1513843670217 final status: UNDEFINED tracking URL: http://fqdn01:8088/proxy/application_1513683381321_0015/ user: testuser 17/12/21 08:07:52 INFO Client: Application report for application_1513683381321_0015 (state: ACCEPTED) 17/12/21 08:07:53 INFO Client: Application report for application_1513683381321_0015 (state: ACCEPTED) 17/12/21 08:07:54 INFO Client: Application report for application_1513683381321_0015 (state: ACCEPTED) 17/12/21 08:07:55 INFO Client: Application report for application_1513683381321_0015 (state: ACCEPTED) 17/12/21 08:07:56 INFO Client: Application report for application_1513683381321_0015 (state: ACCEPTED) 17/12/21 08:07:57 INFO Client: Application report for application_1513683381321_0015 (state: ACCEPTED) 17/12/21 08:07:58 INFO Client: Application report for application_1513683381321_0015 (state: ACCEPTED) 17/12/21 08:07:58 INFO YarnSchedulerBackend$YarnSchedulerEndpoint: ApplicationMaster registered as NettyRpcEndpointRef(null) 17/12/21 08:07:58 INFO YarnClientSchedulerBackend: Add WebUI Filter. org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpFilter, Map(PROXY_HOSTS -> fqdn01, PROXY_URI_BASES ->http://fqdn01:8088/proxy/application_1513683381321_0015), /proxy/application_1513683381321_0015 17/12/21 08:07:58 INFO JettyUtils: Adding filter: org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpFilter 17/12/21 08:07:59 INFO Client: Application report for application_1513683381321_0015 (state: RUNNING) 17/12/21 08:07:59 INFO Client: client token: Token { kind: YARN_CLIENT_TOKEN, service: } diagnostics: N/A ApplicationMaster host: 192.168.0.45 ApplicationMaster RPC port: 0 queue: default start time: 1513843670217 final status: UNDEFINED tracking URL: http://fqdn01:8088/proxy/application_1513683381321_0015/ user: testuser 17/12/21 08:07:59 INFO YarnClientSchedulerBackend: Application application_1513683381321_0015 has started running. 17/12/21 08:07:59 INFO Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 37510. 17/12/21 08:07:59 INFO NettyBlockTransferService: Server created on 37510 17/12/21 08:07:59 INFO BlockManagerMaster: Trying to register BlockManager 17/12/21 08:07:59 INFO BlockManagerMasterEndpoint: Registering block manager 192.168.0.58:37510 with 143.3 MB RAM, BlockManagerId(driver, 192.168.0.58, 37510) 17/12/21 08:07:59 INFO BlockManagerMaster: Registered BlockManager 17/12/21 08:07:59 INFO EventLoggingListener: Logging events to hdfs:///spark-history/application_1513683381321_0015 17/12/21 08:08:09 WARN YarnSchedulerBackend$YarnSchedulerEndpoint: Container marked as failed: container_e20_1513683381321_0015_01_000002 on host: FQDN-DN01. Exit status: 1. Diagnostics: Exception from container-launch. Container id: container_e20_1513683381321_0015_01_000002 Exit code: 1 Stack trace: org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException: Launch container failed at org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DefaultLinuxContainerRuntime.launchContainer(DefaultLinuxContainerRuntime.java:109) at org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DelegatingLinuxContainerRuntime.launchContainer(DelegatingLinuxContainerRuntime.java:89) at org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor.launchContainer(LinuxContainerExecutor.java:392) at org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:317) at org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:83) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) Shell output: main : command provided 1 main : run as user is testuser main : requested yarn user is testuser Getting exit code file... Creating script paths... Writing pid file... Writing to tmp file /hadoop/yarn/local/nmPrivate/application_1513683381321_0015/container_e20_1513683381321_0015_01_000002/container_e20_1513683381321_0015_01_000002.pid.tmp Writing to cgroup task files... Creating local dirs... Launching container... Getting exit code file... Creating script paths... Container exited with a non-zero exit code 1 17/12/21 08:08:09 INFO BlockManagerMaster: Removal of executor 1 requested 17/12/21 08:08:09 INFO YarnClientSchedulerBackend: Asked to remove non-existent executor 1 17/12/21 08:08:09 INFO BlockManagerMasterEndpoint: Trying to remove executor 1 from BlockManagerMaster. 17/12/21 08:08:13 INFO YarnClientSchedulerBackend: SchedulerBackend is ready for scheduling beginning after waiting maxRegisteredResourcesWaitingTime: 30000(ms) 17/12/21 08:08:14 INFO SparkContext: Starting job: reduce at SparkPi.scala:36 17/12/21 08:08:14 INFO DAGScheduler: Got job 0 (reduce at SparkPi.scala:36) with 10 output partitions 17/12/21 08:08:14 INFO DAGScheduler: Final stage: ResultStage 0 (reduce at SparkPi.scala:36) 17/12/21 08:08:14 INFO DAGScheduler: Parents of final stage: List() 17/12/21 08:08:14 INFO DAGScheduler: Missing parents: List() 17/12/21 08:08:14 INFO DAGScheduler: Submitting ResultStage 0 (MapPartitionsRDD[1] at map at SparkPi.scala:32), which has no missing parents 17/12/21 08:08:14 INFO MemoryStore: Block broadcast_0 stored as values in memory (estimated size 1960.0 B, free 143.2 MB) 17/12/21 08:08:14 INFO MemoryStore: Block broadcast_0_piece0 stored as bytes in memory (estimated size 1238.0 B, free 143.2 MB) 17/12/21 08:08:14 INFO BlockManagerInfo: Added broadcast_0_piece0 in memory on 192.168.0.58:37510 (size: 1238.0 B, free: 143.2 MB) 17/12/21 08:08:14 INFO SparkContext: Created broadcast 0 from broadcast at DAGScheduler.scala:1008 17/12/21 08:08:14 INFO DAGScheduler: Submitting 10 missing tasks from ResultStage 0 (MapPartitionsRDD[1] at map at SparkPi.scala:32) 17/12/21 08:08:14 INFO YarnScheduler: Adding task set 0.0 with 10 tasks 17/12/21 08:08:25 WARN YarnSchedulerBackend$YarnSchedulerEndpoint: Container marked as failed: container_e20_1513683381321_0015_01_000003 on host: FQDN-DN02. Exit status: 1. Diagnostics: Exception from container-launch. Container id: container_e20_1513683381321_0015_01_000003 Exit code: 1 Stack trace: org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException: Launch container failed at org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DefaultLinuxContainerRuntime.launchContainer(DefaultLinuxContainerRuntime.java:109) at org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DelegatingLinuxContainerRuntime.launchContainer(DelegatingLinuxContainerRuntime.java:89) at org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor.launchContainer(LinuxContainerExecutor.java:392) at org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:317) at org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:83) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) Shell output: main : command provided 1 main : run as user is testuser main : requested yarn user is testuser Getting exit code file... Creating script paths... Writing pid file... Writing to tmp file /hadoop/yarn/local/nmPrivate/application_1513683381321_0015/container_e20_1513683381321_0015_01_000003/container_e20_1513683381321_0015_01_000003.pid.tmp Writing to cgroup task files... Creating local dirs... Launching container... Getting exit code file... Creating script paths... Container exited with a non-zero exit code 1 17/12/21 08:08:25 INFO BlockManagerMaster: Removal of executor 2 requested 17/12/21 08:08:25 INFO YarnClientSchedulerBackend: Asked to remove non-existent executor 2 17/12/21 08:08:25 INFO BlockManagerMasterEndpoint: Trying to remove executor 2 from BlockManagerMaster. 17/12/21 08:08:29 WARN YarnScheduler: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources 17/12/21 08:08:36 WARN YarnSchedulerBackend$YarnSchedulerEndpoint: Container marked as failed: container_e20_1513683381321_0015_01_000004 on host: FQDN-DN03. Exit status: 1. Diagnostics: Exception from container-launch. Container id: container_e20_1513683381321_0015_01_000004 Exit code: 1 Stack trace: org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException: Launch container failed at org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DefaultLinuxContainerRuntime.launchContainer(DefaultLinuxContainerRuntime.java:109) at org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DelegatingLinuxContainerRuntime.launchContainer(DelegatingLinuxContainerRuntime.java:89) at org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor.launchContainer(LinuxContainerExecutor.java:392) at org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:317) at org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:83) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) Shell output: main : command provided 1 main : run as user is testuser main : requested yarn user is testuser Getting exit code file... Creating script paths... Writing pid file... Writing to tmp file /hadoop/yarn/local/nmPrivate/application_1513683381321_0015/container_e20_1513683381321_0015_01_000004/container_e20_1513683381321_0015_01_000004.pid.tmp Writing to cgroup task files... Creating local dirs... Launching container... Getting exit code file... Creating script paths... Container exited with a non-zero exit code 1 17/12/21 08:08:36 INFO BlockManagerMaster: Removal of executor 3 requested 17/12/21 08:08:36 INFO BlockManagerMasterEndpoint: Trying to remove executor 3 from BlockManagerMaster. 17/12/21 08:08:36 INFO YarnClientSchedulerBackend: Asked to remove non-existent executor 3 17/12/21 08:08:44 WARN YarnScheduler: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources 17/12/21 08:08:46 ERROR YarnClientSchedulerBackend: Yarn application has already exited with state FINISHED! 17/12/21 08:08:46 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/metrics/json,null} 17/12/21 08:08:46 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/stages/stage/kill,null} 17/12/21 08:08:46 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/api,null} 17/12/21 08:08:46 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/,null} 17/12/21 08:08:46 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/static,null} 17/12/21 08:08:46 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/executors/threadDump/json,null} 17/12/21 08:08:46 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/executors/threadDump,null} 17/12/21 08:08:46 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/executors/json,null} 17/12/21 08:08:46 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/executors,null} 17/12/21 08:08:46 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/environment/json,null} 17/12/21 08:08:46 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/environment,null} 17/12/21 08:08:46 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/storage/rdd/json,null} 17/12/21 08:08:46 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/storage/rdd,null} 17/12/21 08:08:46 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/storage/json,null} 17/12/21 08:08:46 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/storage,null} 17/12/21 08:08:46 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/stages/pool/json,null} 17/12/21 08:08:46 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/stages/pool,null} 17/12/21 08:08:46 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/stages/stage/json,null} 17/12/21 08:08:46 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/stages/stage,null} 17/12/21 08:08:46 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/stages/json,null} 17/12/21 08:08:46 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/stages,null} 17/12/21 08:08:46 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/jobs/job/json,null} 17/12/21 08:08:46 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/jobs/job,null} 17/12/21 08:08:46 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/jobs/json,null} 17/12/21 08:08:46 INFO ContextHandler: stopped o.s.j.s.ServletContextHandler{/jobs,null} 17/12/21 08:08:46 INFO SparkUI: Stopped Spark web UI at http://192.168.0.58:4041 17/12/21 08:08:46 INFO DAGScheduler: Job 0 failed: reduce at SparkPi.scala:36, took 32.431779 s Exception in thread "main" org.apache.spark.SparkException: Job 0 cancelled because SparkContext was shut down at org.apache.spark.scheduler.DAGScheduler$$anonfun$cleanUpAfterSchedulerStop$1.apply(DAGScheduler.scala:808) at org.apache.spark.scheduler.DAGScheduler$$anonfun$cleanUpAfterSchedulerStop$1.apply(DAGScheduler.scala:806) at scala.collection.mutable.HashSet.foreach(HashSet.scala:79) at org.apache.spark.scheduler.DAGScheduler.cleanUpAfterSchedulerStop(DAGScheduler.scala:806) at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onStop(DAGScheduler.scala:1660) at org.apache.spark.util.EventLoop.stop(EventLoop.scala:84) at org.apache.spark.scheduler.DAGScheduler.stop(DAGScheduler.scala:1583) at org.apache.spark.SparkContext$$anonfun$stop$9.apply$mcV$sp(SparkContext.scala:1739) at org.apache.spark.util.Utils$.tryLogNonFatalError(Utils.scala:1219) at org.apache.spark.SparkContext.stop(SparkContext.scala:1738) at org.apache.spark.scheduler.cluster.YarnClientSchedulerBackend$MonitorThread.run(YarnClientSchedulerBackend.scala:145) at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:622) at org.apache.spark.SparkContext.runJob(SparkContext.scala:1831) at org.apache.spark.SparkContext.runJob(SparkContext.scala:1951) at org.apache.spark.rdd.RDD$$anonfun$reduce$1.apply(RDD.scala:1032) at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150) at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:111) at org.apache.spark.rdd.RDD.withScope(RDD.scala:323) at org.apache.spark.rdd.RDD.reduce(RDD.scala:1014) at org.apache.spark.examples.SparkPi$.main(SparkPi.scala:36) at org.apache.spark.examples.SparkPi.main(SparkPi.scala) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:738) at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:181) at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:206) at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:121) at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala) 17/12/21 08:08:46 INFO DAGScheduler: ResultStage 0 (reduce at SparkPi.scala:36) failed in 32.097 s 17/12/21 08:08:46 ERROR LiveListenerBus: SparkListenerBus has already stopped! Dropping event SparkListenerStageCompleted(org.apache.spark.scheduler.StageInfo@4c7b2171) 17/12/21 08:08:46 ERROR LiveListenerBus: SparkListenerBus has already stopped! Dropping event SparkListenerJobEnd(0,1513843726729,JobFailed(org.apache.spark.SparkException: Job 0 cancelled because SparkContext was shut down)) 17/12/21 08:08:46 ERROR TransportClient: Failed to send RPC 4673082556803368313 to FQDN-DN03/192.168.0.45:55752: java.nio.channels.ClosedChannelException java.nio.channels.ClosedChannelException 17/12/21 08:08:46 WARN NettyRpcEndpointRef: Error sending message [message = RequestExecutors(0,0,Map())] in 1 attempts java.io.IOException: Failed to send RPC 4673082556803368313 to FQDN-DN03/192.168.0.45:55752: java.nio.channels.ClosedChannelException at org.apache.spark.network.client.TransportClient$3.operationComplete(TransportClient.java:239) at org.apache.spark.network.client.TransportClient$3.operationComplete(TransportClient.java:226) at io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:680) at io.netty.util.concurrent.DefaultPromise$LateListeners.run(DefaultPromise.java:845) at io.netty.util.concurrent.DefaultPromise$LateListenerNotifier.run(DefaultPromise.java:873) at io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:357) at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:357) at io.netty.util.concurrent.SingleThreadEventExecutor$2.run(SingleThreadEventExecutor.java:111) at java.lang.Thread.run(Thread.java:745) Caused by: java.nio.channels.ClosedChannelException 17/12/21 08:08:46 INFO DiskBlockManager: Shutdown hook called 17/12/21 08:08:46 INFO ShutdownHookManager: Shutdown hook called 17/12/21 08:08:46 INFO ShutdownHookManager: Deleting directory /tmp/spark-ca0baf4f-04e5-4cd8-80ff-6a29597665d2/userFiles-86980761-aa26-4422-a16c-ee141812627c 17/12/21 08:08:46 INFO ShutdownHookManager: Deleting directory /tmp/spark-ca0baf4f-04e5-4cd8-80ff-6a29597665d2 17/12/21 08:08:46 INFO ShutdownHookManager: Deleting directory /tmp/spark-ca0baf4f-04e5-4cd8-80ff-6a29597665d2/httpd-75c1605a-46c1-4e5a-b628-5d03e4023784 [testuser@fqdn01 ~]$