<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>question Re: Failed to connect to server  (port 8032) retries get failed due to exceeded maximum allowed retries number: 0 in Support Questions</title>
    <link>https://community.cloudera.com/t5/Support-Questions/Failed-to-connect-to-server-port-8032-retries-get-failed-due/m-p/148359#M110885</link>
    <description>&lt;P&gt;Your standby RM (rm1) must be the first RM in the configured list of RMs. So its tried first and that results in exceptions.&lt;/P&gt;</description>
    <pubDate>Tue, 13 Dec 2016 06:10:13 GMT</pubDate>
    <dc:creator>bikas</dc:creator>
    <dc:date>2016-12-13T06:10:13Z</dc:date>
    <item>
      <title>Failed to connect to server  (port 8032) retries get failed due to exceeded maximum allowed retries number: 0</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Failed-to-connect-to-server-port-8032-retries-get-failed-due/m-p/148356#M110882</link>
      <description>&lt;P&gt;Hi all,&lt;/P&gt;&lt;P&gt;I am using HDP 2.5. When I try to run a spark job or context (using a Jupyter notebook or pyspark shell), I always obtain the following error:&lt;/P&gt;&lt;PRE&gt;WARN Client: Failed to connect to server: mycluster.at/111.11.11.11:8032: retries get failed due to exceeded maximum allowed retries number: 0
java.net.ConnectException: Connection refused
        at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
        at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717)
        at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
        at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
        at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
        at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:650)
        at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:745)
        at org.apache.hadoop.ipc.Client$Connection.access$3200(Client.java:397)
        at org.apache.hadoop.ipc.Client.getConnection(Client.java:1618)
        at org.apache.hadoop.ipc.Client.call(Client.java:1449)
        at org.apache.hadoop.ipc.Client.call(Client.java:1396)
        at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:233)
        at com.sun.proxy.$Proxy15.getNewApplication(Unknown Source)
        at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getNewApplication(ApplicationClientProtocolPBClientImpl.java:221)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:278)
        at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:194)
        at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:176)
        at com.sun.proxy.$Proxy16.getNewApplication(Unknown Source)
        at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getNewApplication(YarnClientImpl.java:225)
        at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.createApplication(YarnClientImpl.java:233)
        at org.apache.spark.deploy.yarn.Client.submitApplication(Client.scala:157)
        at org.apache.spark.scheduler.cluster.YarnClientSchedulerBackend.start(YarnClientSchedulerBackend.scala:56)
        at org.apache.spark.scheduler.TaskSchedulerImpl.start(TaskSchedulerImpl.scala:149)
        at org.apache.spark.SparkContext.&amp;lt;init&amp;gt;(SparkContext.scala:500)
        at org.apache.spark.api.java.JavaSparkContext.&amp;lt;init&amp;gt;(JavaSparkContext.scala:58)
        at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
        at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
        at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
        at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
        at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:240)
        at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
        at py4j.Gateway.invoke(Gateway.java:236)
        at py4j.commands.ConstructorCommand.invokeConstructor(ConstructorCommand.java:80)
        at py4j.commands.ConstructorCommand.execute(ConstructorCommand.java:69)
        at py4j.GatewayConnection.run(GatewayConnection.java:211)
        at java.lang.Thread.run(Thread.java:745)
&lt;/PRE&gt;&lt;P&gt;Then the job is running fine, but that warning is always there. I have another cluster with HDP 2.4 and I don't see that warning.&lt;/P&gt;&lt;P&gt;Any ideas?&lt;/P&gt;&lt;P&gt;Thanks in advance,&lt;/P&gt;</description>
      <pubDate>Mon, 12 Dec 2016 18:28:37 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Failed-to-connect-to-server-port-8032-retries-get-failed-due/m-p/148356#M110882</guid>
      <dc:creator>jmlero</dc:creator>
      <dc:date>2016-12-12T18:28:37Z</dc:date>
    </item>
    <item>
      <title>Re: Failed to connect to server  (port 8032) retries get failed due to exceeded maximum allowed retries number: 0</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Failed-to-connect-to-server-port-8032-retries-get-failed-due/m-p/148357#M110883</link>
      <description>&lt;P&gt;&lt;A rel="user" href="https://community.cloudera.com/users/10706/jmlero.html" nodeid="10706"&gt;@Jose Molero&lt;/A&gt;
&lt;/P&gt;&lt;P&gt;Do you have resource manager HA cofigured? by looking at this error, it looks like your rm1 is standby and rm2 works fine.&lt;/P&gt;&lt;P&gt;Can you please check?&lt;/P&gt;</description>
      <pubDate>Mon, 12 Dec 2016 18:29:41 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Failed-to-connect-to-server-port-8032-retries-get-failed-due/m-p/148357#M110883</guid>
      <dc:creator>KuldeepK</dc:creator>
      <dc:date>2016-12-12T18:29:41Z</dc:date>
    </item>
    <item>
      <title>Re: Failed to connect to server  (port 8032) retries get failed due to exceeded maximum allowed retries number: 0</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Failed-to-connect-to-server-port-8032-retries-get-failed-due/m-p/148358#M110884</link>
      <description>&lt;P&gt;Hi &lt;A rel="user" href="https://community.cloudera.com/users/504/kkulkarni.html" nodeid="504"&gt;@Kuldeep Kulkarni&lt;/A&gt;&lt;/P&gt;&lt;P&gt;Yes, Resource manager HA is configured, but both are working fine, just rm1 is in standby mode and rm2 is active.&lt;/P&gt;</description>
      <pubDate>Mon, 12 Dec 2016 19:54:37 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Failed-to-connect-to-server-port-8032-retries-get-failed-due/m-p/148358#M110884</guid>
      <dc:creator>jmlero</dc:creator>
      <dc:date>2016-12-12T19:54:37Z</dc:date>
    </item>
    <item>
      <title>Re: Failed to connect to server  (port 8032) retries get failed due to exceeded maximum allowed retries number: 0</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Failed-to-connect-to-server-port-8032-retries-get-failed-due/m-p/148359#M110885</link>
      <description>&lt;P&gt;Your standby RM (rm1) must be the first RM in the configured list of RMs. So its tried first and that results in exceptions.&lt;/P&gt;</description>
      <pubDate>Tue, 13 Dec 2016 06:10:13 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Failed-to-connect-to-server-port-8032-retries-get-failed-due/m-p/148359#M110885</guid>
      <dc:creator>bikas</dc:creator>
      <dc:date>2016-12-13T06:10:13Z</dc:date>
    </item>
    <item>
      <title>Re: Failed to connect to server  (port 8032) retries get failed due to exceeded maximum allowed retries number: 0</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Failed-to-connect-to-server-port-8032-retries-get-failed-due/m-p/148360#M110886</link>
      <description>&lt;P&gt;Hi &lt;A rel="user" href="https://community.cloudera.com/users/462/bikas.html" nodeid="462"&gt;@bikas&lt;/A&gt;, ok I understood. So I should not worry about. Thanks&lt;/P&gt;</description>
      <pubDate>Tue, 13 Dec 2016 16:19:13 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Failed-to-connect-to-server-port-8032-retries-get-failed-due/m-p/148360#M110886</guid>
      <dc:creator>jmlero</dc:creator>
      <dc:date>2016-12-13T16:19:13Z</dc:date>
    </item>
  </channel>
</rss>

