<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>question HBase blueprint configuration not working in Archives of Support Questions (Read Only)</title>
    <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/HBase-blueprint-configuration-not-working/m-p/218922#M72309</link>
    <description>&lt;P&gt;&lt;span class="lia-inline-image-display-wrapper lia-image-align-inline" image-alt="43824-hbase.jpg" style="width: 1190px;"&gt;&lt;img src="https://community.cloudera.com/t5/image/serverpage/image-id/16026i5F64B1464240A37B/image-size/medium?v=v2&amp;amp;px=400" role="button" title="43824-hbase.jpg" alt="43824-hbase.jpg" /&gt;&lt;/span&gt;&lt;/P&gt;&lt;P&gt;Hello I have the following blueprint:&lt;/P&gt;&lt;UL&gt;
&lt;LI&gt;cluster_configuration.json&lt;/LI&gt;&lt;/UL&gt;&lt;PRE&gt;{
  "Blueprints": {
    "stack_name": "HDP",
    "stack_version": "2.6"
  },
  "host_groups": [
    {
      "name": "namenode1",
      "cardinality" : "1",
      "components": [
        { "name" : "HST_AGENT" },
        { "name" : "HDFS_CLIENT" },
        { "name" : "ZKFC" },
        { "name" : "ZOOKEEPER_SERVER" },
        { "name" : "HST_SERVER" },
        { "name" : "HBASE_CLIENT"},
        { "name" : "METRICS_MONITOR" },
        { "name" : "JOURNALNODE" },
        { "name" : "HBASE_MASTER"},
        { "name" : "NAMENODE" },
        { "name" : "APP_TIMELINE_SERVER" },
        { "name" : "METRICS_GRAFANA" }
      ]
    },
    {
      "name": "namenode2",
      "cardinality" : "1",
      "components": [
        { "name" : "ACTIVITY_EXPLORER" },
        { "name" : "HST_AGENT" },
        { "name" : "HDFS_CLIENT" },
        { "name" : "ZKFC" },
        { "name" : "ZOOKEEPER_SERVER" },
        { "name" : "HBASE_CLIENT"},
        { "name" : "HISTORYSERVER" },
        { "name" : "METRICS_MONITOR" },
        { "name" : "JOURNALNODE" },
        { "name" : "HBASE_MASTER"},
        { "name" : "NAMENODE" },
        { "name" : "METRICS_COLLECTOR" }
      ]
    },
    {
      "name": "namenode3",
      "cardinality" : "1",
      "components": [
        { "name" : "ACTIVITY_ANALYZER" },
        { "name" : "HST_AGENT" },
        { "name" : "MAPREDUCE2_CLIENT" },
        { "name" : "YARN_CLIENT" },
        { "name" : "HDFS_CLIENT" },
        { "name" : "ZOOKEEPER_SERVER" },
        { "name" : "HBASE_CLIENT"},
        { "name" : "METRICS_MONITOR" },
        { "name" : "JOURNALNODE" },
        { "name" : "RESOURCEMANAGER" }
      ]
    },
    {
      "name": "hosts_group",
      "cardinality" : "3",
      "components": [
        { "name" : "NODEMANAGER" },
        { "name" : "HST_AGENT" },
        { "name" : "MAPREDUCE2_CLIENT" },
        { "name" : "YARN_CLIENT" },
        { "name" : "HDFS_CLIENT" },
        { "name" : "HBASE_REGIONSERVER"},
        { "name" : "DATANODE" },
        { "name" : "HBASE_CLIENT"},
        { "name" : "METRICS_MONITOR" },
        { "name" : "ZOOKEEPER_CLIENT" }
      ]
    }
  ],
  "configurations": [
    {
      "core-site": {
        "properties" : {
          "fs.defaultFS" : "hdfs://HACluster",
          "ha.zookeeper.quorum": "%HOSTGROUP::namenode1%:2181,%HOSTGROUP::namenode2%:2181,%HOSTGROUP::namenode3%:2181",
          "hadoop.proxyuser.yarn.hosts": "%HOSTGROUP::namenode2%,%HOSTGROUP::namenode3%"
        }
      }
    },
    { "hdfs-site": {
        "properties" : {
          "dfs.client.failover.proxy.provider.HACluster" : "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider",
          "dfs.ha.automatic-failover.enabled" : "true",
          "dfs.ha.fencing.methods" : "shell(/bin/true)",
          "dfs.ha.namenodes.HACluster" : "nn1,nn2",
          "dfs.namenode.http-address" : "%HOSTGROUP::namenode1%:50070",
          "dfs.namenode.http-address.HACluster.nn1" : "%HOSTGROUP::namenode1%:50070",
          "dfs.namenode.http-address.HACluster.nn2" : "%HOSTGROUP::namenode2%:50070",
          "dfs.namenode.https-address" : "%HOSTGROUP::namenode1%:50470",
          "dfs.namenode.https-address.HACluster.nn1" : "%HOSTGROUP::namenode1%:50470",
          "dfs.namenode.https-address.HACluster.nn2" : "%HOSTGROUP::namenode2%:50470",
          "dfs.namenode.rpc-address.HACluster.nn1" : "%HOSTGROUP::namenode1%:8020",
          "dfs.namenode.rpc-address.HACluster.nn2" : "%HOSTGROUP::namenode2%:8020",
          "dfs.namenode.shared.edits.dir" : "qjournal://%HOSTGROUP::namenode1%:8485;%HOSTGROUP::namenode2%:8485;%HOSTGROUP::namenode3%:8485/mycluster",
          "dfs.nameservices" : "HACluster"
        }
      }
    },
    { "yarn-site": {
        "properties": {
          "yarn.resourcemanager.ha.enabled": "true",
          "yarn.resourcemanager.ha.rm-ids": "rm1,rm2",
          "yarn.resourcemanager.hostname.rm1": "%HOSTGROUP::namenode2%",
          "yarn.resourcemanager.hostname.rm2": "%HOSTGROUP::namenode3%",
          "yarn.resourcemanager.webapp.address.rm1": "%HOSTGROUP::namenode2%:8088",
          "yarn.resourcemanager.webapp.address.rm2": "%HOSTGROUP::namenode3%:8088",
          "yarn.resourcemanager.webapp.https.address.rm1": "%HOSTGROUP::namenode2%:8090",
          "yarn.resourcemanager.webapp.https.address.rm2": "%HOSTGROUP::namenode3%:8090",
          "yarn.resourcemanager.recovery.enabled": "true",
          "yarn.resourcemanager.store.class": "org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore",
          "yarn.resourcemanager.zk-address": "%HOSTGROUP::namenode1%:2181,%HOSTGROUP::namenode2%:2181,%HOSTGROUP::namenode3%:2181",
          "yarn.client.failover-proxy-provider": "org.apache.hadoop.yarn.client.ConfiguredRMFailoverProxyProvider",
          "yarn.resourcemanager.cluster-id": "yarn-cluster",
          "yarn.resourcemanager.ha.automatic-failover.zk-base-path": "/yarn-leader-election"
        }
      }
    },
    {
      "hdfs-site" : {
        "properties_attributes" : { },
          "properties" : {
            "dfs.datanode.data.dir" : "/mnt/secondary1,/mnt/secondary2"
          }
      }
    },
    {
      "hadoop-env" : {
        "properties_attributes" : { },
          "properties" : {
            "namenode_heapsize" : "2048m"
          }
      }
    },
    {
      "activity-zeppelin-shiro": {
        "properties": {
          "users.admin": "admin"
        }
      }
    },
    {
      "hbase-site" : {
        "properties" : {
          "hbase.rootdir" : "hdfs://HACluster/apps/hbase/data"
        }
      }
   }
  ]
}
&lt;/PRE&gt;&lt;UL&gt;
&lt;LI&gt;hostmap.json&lt;/LI&gt;&lt;/UL&gt;&lt;PRE&gt;{
  "blueprint":"HACluster",
  "default_password":"admin",
  "host_groups": [
    {
      "name": "namenode1",
      "hosts":
      [
        { "fqdn": "namenode1" }
      ]
    },
    {
      "name": "namenode2",
      "hosts":
      [
        { "fqdn": "namenode2" }
      ]


    },
    {
      "name": "namenode3",
      "hosts":
      [
        { "fqdn": "namenode3" }
      ]
    },
    {
      "name": "hosts_group",
      "hosts":
      [
        { "fqdn": "datanode1" },
        { "fqdn": "datanode2" },
        { "fqdn": "datanode3" }
      ]
    }
  ]
}


&lt;/PRE&gt;&lt;P&gt;When I launch this configuration, HBase is the only service that doesn´t work. I get the following errors (screeshot attached).&lt;/P&gt;&lt;P&gt;What I am missing? &lt;/P&gt;&lt;P&gt;Thank you.&lt;/P&gt;&lt;P&gt;&lt;span class="lia-inline-image-display-wrapper lia-image-align-inline" image-alt="43825-hbase.jpg" style="width: 1190px;"&gt;&lt;img src="https://community.cloudera.com/t5/image/serverpage/image-id/16027iDE5404B247C97433/image-size/medium?v=v2&amp;amp;px=400" role="button" title="43825-hbase.jpg" alt="43825-hbase.jpg" /&gt;&lt;/span&gt;&lt;/P&gt;</description>
    <pubDate>Sun, 18 Aug 2019 02:44:38 GMT</pubDate>
    <dc:creator>jon_udaondo</dc:creator>
    <dc:date>2019-08-18T02:44:38Z</dc:date>
    <item>
      <title>HBase blueprint configuration not working</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/HBase-blueprint-configuration-not-working/m-p/218922#M72309</link>
      <description>&lt;P&gt;&lt;span class="lia-inline-image-display-wrapper lia-image-align-inline" image-alt="43824-hbase.jpg" style="width: 1190px;"&gt;&lt;img src="https://community.cloudera.com/t5/image/serverpage/image-id/16026i5F64B1464240A37B/image-size/medium?v=v2&amp;amp;px=400" role="button" title="43824-hbase.jpg" alt="43824-hbase.jpg" /&gt;&lt;/span&gt;&lt;/P&gt;&lt;P&gt;Hello I have the following blueprint:&lt;/P&gt;&lt;UL&gt;
&lt;LI&gt;cluster_configuration.json&lt;/LI&gt;&lt;/UL&gt;&lt;PRE&gt;{
  "Blueprints": {
    "stack_name": "HDP",
    "stack_version": "2.6"
  },
  "host_groups": [
    {
      "name": "namenode1",
      "cardinality" : "1",
      "components": [
        { "name" : "HST_AGENT" },
        { "name" : "HDFS_CLIENT" },
        { "name" : "ZKFC" },
        { "name" : "ZOOKEEPER_SERVER" },
        { "name" : "HST_SERVER" },
        { "name" : "HBASE_CLIENT"},
        { "name" : "METRICS_MONITOR" },
        { "name" : "JOURNALNODE" },
        { "name" : "HBASE_MASTER"},
        { "name" : "NAMENODE" },
        { "name" : "APP_TIMELINE_SERVER" },
        { "name" : "METRICS_GRAFANA" }
      ]
    },
    {
      "name": "namenode2",
      "cardinality" : "1",
      "components": [
        { "name" : "ACTIVITY_EXPLORER" },
        { "name" : "HST_AGENT" },
        { "name" : "HDFS_CLIENT" },
        { "name" : "ZKFC" },
        { "name" : "ZOOKEEPER_SERVER" },
        { "name" : "HBASE_CLIENT"},
        { "name" : "HISTORYSERVER" },
        { "name" : "METRICS_MONITOR" },
        { "name" : "JOURNALNODE" },
        { "name" : "HBASE_MASTER"},
        { "name" : "NAMENODE" },
        { "name" : "METRICS_COLLECTOR" }
      ]
    },
    {
      "name": "namenode3",
      "cardinality" : "1",
      "components": [
        { "name" : "ACTIVITY_ANALYZER" },
        { "name" : "HST_AGENT" },
        { "name" : "MAPREDUCE2_CLIENT" },
        { "name" : "YARN_CLIENT" },
        { "name" : "HDFS_CLIENT" },
        { "name" : "ZOOKEEPER_SERVER" },
        { "name" : "HBASE_CLIENT"},
        { "name" : "METRICS_MONITOR" },
        { "name" : "JOURNALNODE" },
        { "name" : "RESOURCEMANAGER" }
      ]
    },
    {
      "name": "hosts_group",
      "cardinality" : "3",
      "components": [
        { "name" : "NODEMANAGER" },
        { "name" : "HST_AGENT" },
        { "name" : "MAPREDUCE2_CLIENT" },
        { "name" : "YARN_CLIENT" },
        { "name" : "HDFS_CLIENT" },
        { "name" : "HBASE_REGIONSERVER"},
        { "name" : "DATANODE" },
        { "name" : "HBASE_CLIENT"},
        { "name" : "METRICS_MONITOR" },
        { "name" : "ZOOKEEPER_CLIENT" }
      ]
    }
  ],
  "configurations": [
    {
      "core-site": {
        "properties" : {
          "fs.defaultFS" : "hdfs://HACluster",
          "ha.zookeeper.quorum": "%HOSTGROUP::namenode1%:2181,%HOSTGROUP::namenode2%:2181,%HOSTGROUP::namenode3%:2181",
          "hadoop.proxyuser.yarn.hosts": "%HOSTGROUP::namenode2%,%HOSTGROUP::namenode3%"
        }
      }
    },
    { "hdfs-site": {
        "properties" : {
          "dfs.client.failover.proxy.provider.HACluster" : "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider",
          "dfs.ha.automatic-failover.enabled" : "true",
          "dfs.ha.fencing.methods" : "shell(/bin/true)",
          "dfs.ha.namenodes.HACluster" : "nn1,nn2",
          "dfs.namenode.http-address" : "%HOSTGROUP::namenode1%:50070",
          "dfs.namenode.http-address.HACluster.nn1" : "%HOSTGROUP::namenode1%:50070",
          "dfs.namenode.http-address.HACluster.nn2" : "%HOSTGROUP::namenode2%:50070",
          "dfs.namenode.https-address" : "%HOSTGROUP::namenode1%:50470",
          "dfs.namenode.https-address.HACluster.nn1" : "%HOSTGROUP::namenode1%:50470",
          "dfs.namenode.https-address.HACluster.nn2" : "%HOSTGROUP::namenode2%:50470",
          "dfs.namenode.rpc-address.HACluster.nn1" : "%HOSTGROUP::namenode1%:8020",
          "dfs.namenode.rpc-address.HACluster.nn2" : "%HOSTGROUP::namenode2%:8020",
          "dfs.namenode.shared.edits.dir" : "qjournal://%HOSTGROUP::namenode1%:8485;%HOSTGROUP::namenode2%:8485;%HOSTGROUP::namenode3%:8485/mycluster",
          "dfs.nameservices" : "HACluster"
        }
      }
    },
    { "yarn-site": {
        "properties": {
          "yarn.resourcemanager.ha.enabled": "true",
          "yarn.resourcemanager.ha.rm-ids": "rm1,rm2",
          "yarn.resourcemanager.hostname.rm1": "%HOSTGROUP::namenode2%",
          "yarn.resourcemanager.hostname.rm2": "%HOSTGROUP::namenode3%",
          "yarn.resourcemanager.webapp.address.rm1": "%HOSTGROUP::namenode2%:8088",
          "yarn.resourcemanager.webapp.address.rm2": "%HOSTGROUP::namenode3%:8088",
          "yarn.resourcemanager.webapp.https.address.rm1": "%HOSTGROUP::namenode2%:8090",
          "yarn.resourcemanager.webapp.https.address.rm2": "%HOSTGROUP::namenode3%:8090",
          "yarn.resourcemanager.recovery.enabled": "true",
          "yarn.resourcemanager.store.class": "org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore",
          "yarn.resourcemanager.zk-address": "%HOSTGROUP::namenode1%:2181,%HOSTGROUP::namenode2%:2181,%HOSTGROUP::namenode3%:2181",
          "yarn.client.failover-proxy-provider": "org.apache.hadoop.yarn.client.ConfiguredRMFailoverProxyProvider",
          "yarn.resourcemanager.cluster-id": "yarn-cluster",
          "yarn.resourcemanager.ha.automatic-failover.zk-base-path": "/yarn-leader-election"
        }
      }
    },
    {
      "hdfs-site" : {
        "properties_attributes" : { },
          "properties" : {
            "dfs.datanode.data.dir" : "/mnt/secondary1,/mnt/secondary2"
          }
      }
    },
    {
      "hadoop-env" : {
        "properties_attributes" : { },
          "properties" : {
            "namenode_heapsize" : "2048m"
          }
      }
    },
    {
      "activity-zeppelin-shiro": {
        "properties": {
          "users.admin": "admin"
        }
      }
    },
    {
      "hbase-site" : {
        "properties" : {
          "hbase.rootdir" : "hdfs://HACluster/apps/hbase/data"
        }
      }
   }
  ]
}
&lt;/PRE&gt;&lt;UL&gt;
&lt;LI&gt;hostmap.json&lt;/LI&gt;&lt;/UL&gt;&lt;PRE&gt;{
  "blueprint":"HACluster",
  "default_password":"admin",
  "host_groups": [
    {
      "name": "namenode1",
      "hosts":
      [
        { "fqdn": "namenode1" }
      ]
    },
    {
      "name": "namenode2",
      "hosts":
      [
        { "fqdn": "namenode2" }
      ]


    },
    {
      "name": "namenode3",
      "hosts":
      [
        { "fqdn": "namenode3" }
      ]
    },
    {
      "name": "hosts_group",
      "hosts":
      [
        { "fqdn": "datanode1" },
        { "fqdn": "datanode2" },
        { "fqdn": "datanode3" }
      ]
    }
  ]
}


&lt;/PRE&gt;&lt;P&gt;When I launch this configuration, HBase is the only service that doesn´t work. I get the following errors (screeshot attached).&lt;/P&gt;&lt;P&gt;What I am missing? &lt;/P&gt;&lt;P&gt;Thank you.&lt;/P&gt;&lt;P&gt;&lt;span class="lia-inline-image-display-wrapper lia-image-align-inline" image-alt="43825-hbase.jpg" style="width: 1190px;"&gt;&lt;img src="https://community.cloudera.com/t5/image/serverpage/image-id/16027iDE5404B247C97433/image-size/medium?v=v2&amp;amp;px=400" role="button" title="43825-hbase.jpg" alt="43825-hbase.jpg" /&gt;&lt;/span&gt;&lt;/P&gt;</description>
      <pubDate>Sun, 18 Aug 2019 02:44:38 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/HBase-blueprint-configuration-not-working/m-p/218922#M72309</guid>
      <dc:creator>jon_udaondo</dc:creator>
      <dc:date>2019-08-18T02:44:38Z</dc:date>
    </item>
    <item>
      <title>Re: HBase blueprint configuration not working</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/HBase-blueprint-configuration-not-working/m-p/218923#M72310</link>
      <description>&lt;P&gt; &lt;A rel="user" href="https://community.cloudera.com/users/47506/jonudaondo.html" nodeid="47506"&gt;@Jon Udaondo&lt;/A&gt;,&lt;/P&gt;&lt;P&gt;Can you please attach the region server logs located under (/var/log/hbase/hbase-hbase-regionserver-{hostname}.log)&lt;/P&gt;&lt;P&gt;Thanks,&lt;/P&gt;&lt;P&gt;Aditya&lt;/P&gt;</description>
      <pubDate>Tue, 05 Dec 2017 19:54:53 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/HBase-blueprint-configuration-not-working/m-p/218923#M72310</guid>
      <dc:creator>asirna</dc:creator>
      <dc:date>2017-12-05T19:54:53Z</dc:date>
    </item>
    <item>
      <title>Re: HBase blueprint configuration not working</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/HBase-blueprint-configuration-not-working/m-p/218924#M72311</link>
      <description>&lt;P&gt;&lt;A rel="user" href="https://community.cloudera.com/users/14200/asirna.html" nodeid="14200"&gt;@Aditya Sirna&lt;/A&gt;&lt;/P&gt;&lt;P&gt;This is the output:&lt;/P&gt;&lt;P&gt;$ cat /var/log/hbase/hbase-hbase-regionserver-namenode1.log&lt;/P&gt;&lt;PRE&gt;2017-12-05 12:11:29,525 INFO  [timeline] availability.MetricSinkWriteShardHostnameHashingStrategy: Calculated collector shard namenode2 based on hostname: namenode1
2017-12-05 12:15:09,962 INFO  [LruBlockCacheStatsExecutor] hfile.LruBlockCache: totalSize=1.67 MB, freeSize=1.59 GB, max=1.59 GB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0
2017-12-05 12:20:09,961 INFO  [LruBlockCacheStatsExecutor] hfile.LruBlockCache: totalSize=1.67 MB, freeSize=1.59 GB, max=1.59 GB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=59, evicted=0, evictedPerRun=0.0
2017-12-05 12:25:09,961 INFO  [LruBlockCacheStatsExecutor] hfile.LruBlockCache: totalSize=1.67 MB, freeSize=1.59 GB, max=1.59 GB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=89, evicted=0, evictedPerRun=0.0
2017-12-05 12:30:09,961 INFO  [LruBlockCacheStatsExecutor] hfile.LruBlockCache: totalSize=1.67 MB, freeSize=1.59 GB, max=1.59 GB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=119, evicted=0, evictedPerRun=0.0
2017-12-05 12:35:09,961 INFO  [LruBlockCacheStatsExecutor] hfile.LruBlockCache: totalSize=1.67 MB, freeSize=1.59 GB, max=1.59 GB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=149, evicted=0, evictedPerRun=0.0
2017-12-05 12:40:09,961 INFO  [LruBlockCacheStatsExecutor] hfile.LruBlockCache: totalSize=1.67 MB, freeSize=1.59 GB, max=1.59 GB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=179, evicted=0, evictedPerRun=0.0
2017-12-05 12:45:09,961 INFO  [LruBlockCacheStatsExecutor] hfile.LruBlockCache: totalSize=1.67 MB, freeSize=1.59 GB, max=1.59 GB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=209, evicted=0, evictedPerRun=0.0
2017-12-05 12:50:09,961 INFO  [LruBlockCacheStatsExecutor] hfile.LruBlockCache: totalSize=1.67 MB, freeSize=1.59 GB, max=1.59 GB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=239, evicted=0, evictedPerRun=0.0
2017-12-05 12:55:09,961 INFO  [LruBlockCacheStatsExecutor] hfile.LruBlockCache: totalSize=1.67 MB, freeSize=1.59 GB, max=1.59 GB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=269, evicted=0, evictedPerRun=0.0
&lt;/PRE&gt;&lt;P&gt;$ cat /var/log/hbase/hbase-hbase-regionserver-datanode1.log&lt;/P&gt;&lt;PRE&gt;Tue Dec  5 12:08:28 CET 2017 Starting regionserver on datanode1
core file size          (blocks, -c) 0
data seg size           (kbytes, -d) unlimited
scheduling priority             (-e) 0
file size               (blocks, -f) unlimited
pending signals                 (-i) 13671
max locked memory       (kbytes, -l) 64
max memory size         (kbytes, -m) unlimited
open files                      (-n) 32000
pipe size            (512 bytes, -p) 8
POSIX message queues     (bytes, -q) 819200
real-time priority              (-r) 0
stack size              (kbytes, -s) 8192
cpu time               (seconds, -t) unlimited
max user processes              (-u) 16000
virtual memory          (kbytes, -v) unlimited
file locks                      (-x) unlimited
Tue Dec  5 12:18:37 CET 2017 Starting regionserver on datanode1
core file size          (blocks, -c) 0
data seg size           (kbytes, -d) unlimited
scheduling priority             (-e) 0
file size               (blocks, -f) unlimited
pending signals                 (-i) 13671
max locked memory       (kbytes, -l) 64
max memory size         (kbytes, -m) unlimited
open files                      (-n) 32000
pipe size            (512 bytes, -p) 8
POSIX message queues     (bytes, -q) 819200
real-time priority              (-r) 0
stack size              (kbytes, -s) 8192
cpu time               (seconds, -t) unlimited
max user processes              (-u) 16000
virtual memory          (kbytes, -v) unlimited
file locks                      (-x) unlimited
Tue Dec  5 12:21:20 CET 2017 Starting regionserver on datanode1
core file size          (blocks, -c) 0
data seg size           (kbytes, -d) unlimited
scheduling priority             (-e) 0
file size               (blocks, -f) unlimited
pending signals                 (-i) 13671
max locked memory       (kbytes, -l) 64
max memory size         (kbytes, -m) unlimited
open files                      (-n) 32000
pipe size            (512 bytes, -p) 8
POSIX message queues     (bytes, -q) 819200
real-time priority              (-r) 0
stack size              (kbytes, -s) 8192
cpu time               (seconds, -t) unlimited
max user processes              (-u) 16000
virtual memory          (kbytes, -v) unlimited
file locks                      (-x) unlimited
Tue Dec  5 12:46:37 CET 2017 Starting regionserver on datanode1
core file size          (blocks, -c) 0
data seg size           (kbytes, -d) unlimited
scheduling priority             (-e) 0
file size               (blocks, -f) unlimited
pending signals                 (-i) 13671
max locked memory       (kbytes, -l) 64
max memory size         (kbytes, -m) unlimited
open files                      (-n) 32000
pipe size            (512 bytes, -p) 8
POSIX message queues     (bytes, -q) 819200
real-time priority              (-r) 0
stack size              (kbytes, -s) 8192
cpu time               (seconds, -t) unlimited
max user processes              (-u) 16000
virtual memory          (kbytes, -v) unlimited
file locks                      (-x) unlimited
&lt;/PRE&gt;</description>
      <pubDate>Tue, 05 Dec 2017 20:00:14 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/HBase-blueprint-configuration-not-working/m-p/218924#M72311</guid>
      <dc:creator>jon_udaondo</dc:creator>
      <dc:date>2017-12-05T20:00:14Z</dc:date>
    </item>
    <item>
      <title>Re: HBase blueprint configuration not working</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/HBase-blueprint-configuration-not-working/m-p/218925#M72312</link>
      <description>&lt;P&gt;&lt;A rel="user" href="https://community.cloudera.com/users/47506/jonudaondo.html" nodeid="47506"&gt;@Jon Udaondo&lt;/A&gt;,&lt;/P&gt;&lt;P&gt;I do not see any errors in the above logs. Can you do a tail on these logs and restart the region servers to see if there are any ERROR logs printed. That would be helpful for debugging.&lt;/P&gt;</description>
      <pubDate>Tue, 05 Dec 2017 20:04:01 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/HBase-blueprint-configuration-not-working/m-p/218925#M72312</guid>
      <dc:creator>asirna</dc:creator>
      <dc:date>2017-12-05T20:04:01Z</dc:date>
    </item>
    <item>
      <title>Re: HBase blueprint configuration not working</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/HBase-blueprint-configuration-not-working/m-p/218926#M72313</link>
      <description>&lt;A rel="user" href="https://community.cloudera.com/users/14200/asirna.html" nodeid="14200"&gt;@Aditya Sirna&lt;/A&gt;&lt;P&gt;When region servers are restarted just the following is displayed when tail (same as obove) :(:&lt;/P&gt;&lt;PRE&gt;Tue Dec  5 13:11:53 CET 2017 Starting regionserver on datanode2
core file size          (blocks, -c) 0
data seg size           (kbytes, -d) unlimited
scheduling priority             (-e) 0
file size               (blocks, -f) unlimited
pending signals                 (-i) 13671
max locked memory       (kbytes, -l) 64
max memory size         (kbytes, -m) unlimited
open files                      (-n) 32000
pipe size            (512 bytes, -p) 8
POSIX message queues     (bytes, -q) 819200
real-time priority              (-r) 0
stack size              (kbytes, -s) 8192
cpu time               (seconds, -t) unlimited
max user processes              (-u) 16000
virtual memory          (kbytes, -v) unlimited
file locks                      (-x) unlimited


&lt;/PRE&gt;</description>
      <pubDate>Tue, 05 Dec 2017 20:16:51 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/HBase-blueprint-configuration-not-working/m-p/218926#M72313</guid>
      <dc:creator>jon_udaondo</dc:creator>
      <dc:date>2017-12-05T20:16:51Z</dc:date>
    </item>
    <item>
      <title>Re: HBase blueprint configuration not working</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/HBase-blueprint-configuration-not-working/m-p/218927#M72314</link>
      <description>&lt;A rel="user" href="https://community.cloudera.com/users/14200/asirna.html" nodeid="14200"&gt;@Aditya Sirna&lt;/A&gt;&lt;P&gt;I found the error... By default "hbase_regionserver_heapsize" was set to 4096m, greater than my server, therefore, regionservers were not able to start.&lt;/P&gt;&lt;P&gt;I Changed that value to 1024 and everything went ok!&lt;/P&gt;&lt;PRE&gt;"hbase_regionserver_heapsize" : "4096m",

"hbase_regionserver_heapsize" : "1024",&lt;/PRE&gt;</description>
      <pubDate>Tue, 05 Dec 2017 22:32:22 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/HBase-blueprint-configuration-not-working/m-p/218927#M72314</guid>
      <dc:creator>jon_udaondo</dc:creator>
      <dc:date>2017-12-05T22:32:22Z</dc:date>
    </item>
  </channel>
</rss>

