Support Questions

Find answers, ask questions, and share your expertise
Announcements
Celebrating as our community reaches 100,000 members! Thank you!

Cloudbreak: How to enable High Availability for NameNode in Blueprint

avatar
Expert Contributor

I want to enable NameNode high availability in my cluster but can't seem to find the right way to enable this from the blueprint I'm supplying to Cloudbreak.

1 ACCEPTED SOLUTION

avatar
Super Collaborator

Hi,

Here's and example blueprint, below. In the cluster creation wizard you must disable blueprint validation (it's under the advanced options)(don't worry about the localhost references in the configuration, it will be resolved by Ambari)

{
  "configurations": [
    {
      "hdfs-site": {
        "properties": {
          "dfs.namenode.http-address": "localhost:50070",
          "dfs.namenode.https-address": "localhost:50470",
          "dfs.namenode.rpc-address": "localhost:8020",
          "dfs.namenode.secondary.http-address": "localhost:50090",
          "dfs.nameservices": "mycluster",
          "dfs.internal.nameservices": "mycluster",
          "dfs.ha.namenodes.mycluster": "nn1,nn2",
          "dfs.client.failover.proxy.provider.mycluster": "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider",
          "dfs.ha.fencing.methods": "shell(/bin/true)",
          "dfs.ha.automatic-failover.enabled": "true",
          "dfs.namenode.shared.edits.dir": "qjournal://localhost:8485/mycluster",
          "dfs.namenode.https-address.mycluster.nn1": "%HOSTGROUP::master1%:50470",
          "dfs.namenode.http-address.mycluster.nn1": "%HOSTGROUP::master1%:50070",
          "dfs.namenode.rpc-address.mycluster.nn1": "%HOSTGROUP::master1%:8020",
          "dfs.namenode.https-address.mycluster.nn2": "%HOSTGROUP::master2%:50470",
          "dfs.namenode.http-address.mycluster.nn2": "%HOSTGROUP::master2%:50070",
          "dfs.namenode.rpc-address.mycluster.nn2": "%HOSTGROUP::master2%:8020"
        }
      }
    },
    {
      "core-site": {
        "properties": {
          "fs.defaultFS": "hdfs://mycluster",
          "ha.zookeeper.quorum": "localhost:2181"
        }
      }
    }
  ],
  "host_groups": [
    {
      "name": "master1",
      "configurations": [],
      "components": [
        {
          "name": "ZOOKEEPER_SERVER"
        },
        {
          "name": "ZOOKEEPER_CLIENT"
        },
        {
          "name": "NAMENODE"
        },
        {
          "name": "DATANODE"
        },
        {
          "name": "JOURNALNODE"
        },
        {
          "name": "ZKFC"
        },
        {
          "name": "HDFS_CLIENT"
        },
        {
          "name": "YARN_CLIENT"
        },
        {
          "name": "MAPREDUCE2_CLIENT"
        }
      ],
      "cardinality": "1"
    },
    {
      "name": "master2",
      "configurations": [],
      "components": [
        {
          "name": "ZOOKEEPER_SERVER"
        },
        {
          "name": "ZOOKEEPER_CLIENT"
        },
        {
          "name": "NAMENODE"
        },
        {
          "name": "DATANODE"
        },
        {
          "name": "ZKFC"
        },
        {
          "name": "JOURNALNODE"
        },
        {
          "name": "RESOURCEMANAGER"
        },
        {
          "name": "HISTORYSERVER"
        },
        {
          "name": "APP_TIMELINE_SERVER"
        },
        {
          "name": "HDFS_CLIENT"
        },
        {
          "name": "YARN_CLIENT"
        }
      ],
      "cardinality": "1"
    },
    {
      "name": "master3",
      "configurations": [],
      "components": [
        {
          "name": "ZOOKEEPER_SERVER"
        },
        {
          "name": "ZOOKEEPER_CLIENT"
        },
        {
          "name": "DATANODE"
        },
        {
          "name": "JOURNALNODE"
        },
        {
          "name": "HDFS_CLIENT"
        },
        {
          "name": "NODEMANAGER"
        },
        {
          "name": "YARN_CLIENT"
        }
      ],
      "cardinality": "1"
    },
    {
      "name": "worker",
      "configurations": [],
      "components": [
        {
          "name": "DATANODE"
        },
        {
          "name": "HDFS_CLIENT"
        },
        {
          "name": "NODEMANAGER"
        },
        {
          "name": "YARN_CLIENT"
        },
        {
          "name": "MAPREDUCE2_CLIENT"
        }
      ],
      "cardinality": "1"
    }
  ],
  "Blueprints": {
    "blueprint_name": "nn-ha",
    "stack_name": "HDP",
    "stack_version": "2.5"
  } 

}

View solution in original post

3 REPLIES 3

avatar
Super Collaborator

Hi,

Here's and example blueprint, below. In the cluster creation wizard you must disable blueprint validation (it's under the advanced options)(don't worry about the localhost references in the configuration, it will be resolved by Ambari)

{
  "configurations": [
    {
      "hdfs-site": {
        "properties": {
          "dfs.namenode.http-address": "localhost:50070",
          "dfs.namenode.https-address": "localhost:50470",
          "dfs.namenode.rpc-address": "localhost:8020",
          "dfs.namenode.secondary.http-address": "localhost:50090",
          "dfs.nameservices": "mycluster",
          "dfs.internal.nameservices": "mycluster",
          "dfs.ha.namenodes.mycluster": "nn1,nn2",
          "dfs.client.failover.proxy.provider.mycluster": "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider",
          "dfs.ha.fencing.methods": "shell(/bin/true)",
          "dfs.ha.automatic-failover.enabled": "true",
          "dfs.namenode.shared.edits.dir": "qjournal://localhost:8485/mycluster",
          "dfs.namenode.https-address.mycluster.nn1": "%HOSTGROUP::master1%:50470",
          "dfs.namenode.http-address.mycluster.nn1": "%HOSTGROUP::master1%:50070",
          "dfs.namenode.rpc-address.mycluster.nn1": "%HOSTGROUP::master1%:8020",
          "dfs.namenode.https-address.mycluster.nn2": "%HOSTGROUP::master2%:50470",
          "dfs.namenode.http-address.mycluster.nn2": "%HOSTGROUP::master2%:50070",
          "dfs.namenode.rpc-address.mycluster.nn2": "%HOSTGROUP::master2%:8020"
        }
      }
    },
    {
      "core-site": {
        "properties": {
          "fs.defaultFS": "hdfs://mycluster",
          "ha.zookeeper.quorum": "localhost:2181"
        }
      }
    }
  ],
  "host_groups": [
    {
      "name": "master1",
      "configurations": [],
      "components": [
        {
          "name": "ZOOKEEPER_SERVER"
        },
        {
          "name": "ZOOKEEPER_CLIENT"
        },
        {
          "name": "NAMENODE"
        },
        {
          "name": "DATANODE"
        },
        {
          "name": "JOURNALNODE"
        },
        {
          "name": "ZKFC"
        },
        {
          "name": "HDFS_CLIENT"
        },
        {
          "name": "YARN_CLIENT"
        },
        {
          "name": "MAPREDUCE2_CLIENT"
        }
      ],
      "cardinality": "1"
    },
    {
      "name": "master2",
      "configurations": [],
      "components": [
        {
          "name": "ZOOKEEPER_SERVER"
        },
        {
          "name": "ZOOKEEPER_CLIENT"
        },
        {
          "name": "NAMENODE"
        },
        {
          "name": "DATANODE"
        },
        {
          "name": "ZKFC"
        },
        {
          "name": "JOURNALNODE"
        },
        {
          "name": "RESOURCEMANAGER"
        },
        {
          "name": "HISTORYSERVER"
        },
        {
          "name": "APP_TIMELINE_SERVER"
        },
        {
          "name": "HDFS_CLIENT"
        },
        {
          "name": "YARN_CLIENT"
        }
      ],
      "cardinality": "1"
    },
    {
      "name": "master3",
      "configurations": [],
      "components": [
        {
          "name": "ZOOKEEPER_SERVER"
        },
        {
          "name": "ZOOKEEPER_CLIENT"
        },
        {
          "name": "DATANODE"
        },
        {
          "name": "JOURNALNODE"
        },
        {
          "name": "HDFS_CLIENT"
        },
        {
          "name": "NODEMANAGER"
        },
        {
          "name": "YARN_CLIENT"
        }
      ],
      "cardinality": "1"
    },
    {
      "name": "worker",
      "configurations": [],
      "components": [
        {
          "name": "DATANODE"
        },
        {
          "name": "HDFS_CLIENT"
        },
        {
          "name": "NODEMANAGER"
        },
        {
          "name": "YARN_CLIENT"
        },
        {
          "name": "MAPREDUCE2_CLIENT"
        }
      ],
      "cardinality": "1"
    }
  ],
  "Blueprints": {
    "blueprint_name": "nn-ha",
    "stack_name": "HDP",
    "stack_version": "2.5"
  } 

}

avatar
Expert Contributor

Thanks for the fast answer. If I used cloudbreakshell, is there a way to tell it to ignore the validation?

avatar
Super Collaborator