<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>question Re: Hbase: Failed to become active master in Archives of Support Questions (Read Only)</title>
    <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Hbase-Failed-to-become-active-master/m-p/226760#M67120</link>
    <description>&lt;P&gt;Can you please check the config "hbase.rootdir". Looks like this config is pointing to NameNode which is in StandBy Node.&lt;/P&gt;&lt;P&gt;Try changing this to point to Active NameNode or change it to value of your config fs.defaultFS in core-site.xml and then the hdfs path.&lt;/P&gt;</description>
    <pubDate>Wed, 23 Aug 2017 13:33:50 GMT</pubDate>
    <dc:creator>sgowda</dc:creator>
    <dc:date>2017-08-23T13:33:50Z</dc:date>
    <item>
      <title>Hbase: Failed to become active master</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Hbase-Failed-to-become-active-master/m-p/226759#M67119</link>
      <description>&lt;P&gt;Hi All,&lt;/P&gt;&lt;PRE&gt;I installed hpd 2.5.5 HA cluster using ambari. Hbase is not working. Problem with hbase is both when starts act as standby hbase master..then after some time both stops.A part of error log is here.&lt;/PRE&gt;
&lt;PRE&gt;[master/wdctestlab0730.systems.uk.hsbc/128.160.120.239:60000-SendThread(wdctestlab0733.systems.uk.hsbc:2181)] zookeeper.ClientCnxn: Session establishment complete on server wdctestlab0733.systems.uk.hsbc/128.160.120.242:2181, sessionid = 0x35e04eef946000b, negotiated timeout = 30000
2017-08-21 15:39:02,875 INFO  [master/wdctestlab0730.systems.uk.hsbc/128.160.120.239:60000] client.ZooKeeperRegistry: ClusterId read in ZooKeeper is null
2017-08-21 15:39:09,792 FATAL [wdctestlab0730:60000.activeMasterManager] master.HMaster: Failed to become active master
org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.ipc.StandbyException): Operation category READ is not supported in state standby
        at org.apache.hadoop.hdfs.server.namenode.ha.StandbyState.checkOperation(StandbyState.java:87)
        at org.apache.hadoop.hdfs.server.namenode.NameNode$NameNodeHAContext.checkOperation(NameNode.java:1978)
        at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkOperation(FSNamesystem.java:1368)
        at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getFileInfo(FSNamesystem.java:4096)
        at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.getFileInfo(NameNodeRpcServer.java:1130)
        at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.getFileInfo(ClientNamenodeProtocolServerSideTranslatorPB.java:851)
        at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
        at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:640)
        at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:982)
        at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2351)
        at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2347)
        at java.security.AccessController.doPrivileged(Native Method)
        at javax.security.auth.Subject.doAs(Subject.java:422)
        at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1865)
        at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2345)


        at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1554)
        at org.apache.hadoop.ipc.Client.call(Client.java:1498)
          at org.apache.hadoop.ipc.Client.call(Client.java:1398)
        at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:233)
        at com.sun.proxy.$Proxy16.getFileInfo(Unknown Source)
        at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getFileInfo(ClientNamenodeProtocolTranslatorPB.java:816)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:291)
        at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:203)
        at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:185)
        at com.sun.proxy.$Proxy17.getFileInfo(Unknown Source)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:279)
        at com.sun.proxy.$Proxy22.getFileInfo(Unknown Source)
        at org.apache.hadoop.hdfs.DFSClient.getFileInfo(DFSClient.java:2158)
        at org.apache.hadoop.hdfs.DistributedFileSystem$25.doCall(DistributedFileSystem.java:1423)
        at org.apache.hadoop.hdfs.DistributedFileSystem$25.doCall(DistributedFileSystem.java:1419)
        at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
        at org.apache.hadoop.hdfs.DistributedFileSystem.getFileStatus(DistributedFileSystem.java:1419)
        at org.apache.hadoop.fs.FileSystem.exists(FileSystem.java:1447)
        at org.apache.hadoop.hbase.master.MasterFileSystem.checkRootDir(MasterFileSystem.java:431)
        at org.apache.hadoop.hbase.master.MasterFileSystem.createInitialFileSystemLayout(MasterFileSystem.java:148)
        at org.apache.hadoop.hbase.master.MasterFileSystem.&amp;lt;init&amp;gt;(MasterFileSystem.java:128)
        at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:714)
        at org.apache.hadoop.hbase.master.HMaster.access$500(HMaster.java:214)
        at org.apache.hadoop.hbase.master.HMaster$1.run(HMaster.java:1884)
        at java.lang.Thread.run(Thread.java:748)
2017-08-21 15:39:09,885 FATAL [wdctestlab0730:60000.activeMasterManager] master.HMaster: Unhandled exception. Starting shutdown.
org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.ipc.StandbyException): Operation category READ is not supported in state standby
        at org.apache.hadoop.hdfs.server.namenode.ha.StandbyState.checkOperation(StandbyState.java:87)
        at org.apache.hadoop.hdfs.server.namenode.NameNode$NameNodeHAContext.checkOperation(NameNode.java:1978)
        at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkOperation(FSNamesystem.java:1368)
        at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getFileInfo(FSNamesystem.java:4096)
        at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.getFileInfo(NameNodeRpcServer.java:1130)
        at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.getFileInfo(ClientNamenodeProtocolServerSideTranslatorPB.java:851)
        at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
        at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:640)
        at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:982)
        at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2351)
        at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2347)
        at java.security.AccessController.doPrivileged(Native Method)
        at javax.security.auth.Subject.doAs(Subject.java:422)
        at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1865)
        at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2345)


        at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1554)
        at org.apache.hadoop.ipc.Client.call(Client.java:1498)
        at org.apache.hadoop.ipc.Client.call(Client.java:1398)
        at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:233)
        at com.sun.proxy.$Proxy16.getFileInfo(Unknown Source)
        at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getFileInfo(ClientNamenodeProtocolTranslatorPB.java:816)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:291)
        at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:203)
        at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:185)
        at com.sun.proxy.$Proxy17.getFileInfo(Unknown Source)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:279)
        at com.sun.proxy.$Proxy22.getFileInfo(Unknown Source)
        at org.apache.hadoop.hdfs.DFSClient.getFileInfo(DFSClient.java:2158)
        at org.apache.hadoop.hdfs.DistributedFileSystem$25.doCall(DistributedFileSystem.java:1423)
        at org.apache.hadoop.hdfs.DistributedFileSystem$25.doCall(DistributedFileSystem.java:1419)
        at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
        at org.apache.hadoop.hdfs.DistributedFileSystem.getFileStatus(DistributedFileSystem.java:1419)
        at org.apache.hadoop.fs.FileSystem.exists(FileSystem.java:1447)
        at org.apache.hadoop.hbase.master.MasterFileSystem.checkRootDir(MasterFileSystem.java:431)
        at org.apache.hadoop.hbase.master.MasterFileSystem.createInitialFileSystemLayout(MasterFileSystem.java:148)
        at org.apache.hadoop.hbase.master.MasterFileSystem.&amp;lt;init&amp;gt;(MasterFileSystem.java:128)
        at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:714)
        at org.apache.hadoop.hbase.master.HMaster.access$500(HMaster.java:214)
        at org.apache.hadoop.hbase.master.HMaster$1.run(HMaster.java:1884)
        at java.lang.Thread.run(Thread.java:748)
2017-08-21 15:39:09,886 INFO  [wdctestlab0730:60000.activeMasterManager] regionserver.HRegionServer: STOPPED: Unhandled exception. Starting shutdown.
&lt;/PRE&gt;&lt;P&gt;&lt;span class="lia-inline-image-display-wrapper lia-image-align-inline" image-alt="34407-hbase.png" style="width: 741px;"&gt;&lt;img src="https://community.cloudera.com/t5/image/serverpage/image-id/15340i08598F8281B0CF40/image-size/medium?v=v2&amp;amp;px=400" role="button" title="34407-hbase.png" alt="34407-hbase.png" /&gt;&lt;/span&gt;&lt;/P&gt;</description>
      <pubDate>Sun, 18 Aug 2019 01:25:01 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Hbase-Failed-to-become-active-master/m-p/226759#M67119</guid>
      <dc:creator>prabhakar_bharg</dc:creator>
      <dc:date>2019-08-18T01:25:01Z</dc:date>
    </item>
    <item>
      <title>Re: Hbase: Failed to become active master</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Hbase-Failed-to-become-active-master/m-p/226760#M67120</link>
      <description>&lt;P&gt;Can you please check the config "hbase.rootdir". Looks like this config is pointing to NameNode which is in StandBy Node.&lt;/P&gt;&lt;P&gt;Try changing this to point to Active NameNode or change it to value of your config fs.defaultFS in core-site.xml and then the hdfs path.&lt;/P&gt;</description>
      <pubDate>Wed, 23 Aug 2017 13:33:50 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Hbase-Failed-to-become-active-master/m-p/226760#M67120</guid>
      <dc:creator>sgowda</dc:creator>
      <dc:date>2017-08-23T13:33:50Z</dc:date>
    </item>
    <item>
      <title>Re: Hbase: Failed to become active master</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Hbase-Failed-to-become-active-master/m-p/226761#M67121</link>
      <description>&lt;P&gt;@Santosh&lt;/P&gt;&lt;P&gt;Thank you..the problem is solved..hbase is working fine&lt;/P&gt;</description>
      <pubDate>Wed, 23 Aug 2017 15:04:56 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Hbase-Failed-to-become-active-master/m-p/226761#M67121</guid>
      <dc:creator>prabhakar_bharg</dc:creator>
      <dc:date>2017-08-23T15:04:56Z</dc:date>
    </item>
  </channel>
</rss>

