Member since
10-18-2018
11
Posts
0
Kudos Received
0
Solutions
04-10-2019
06:19 PM
Dear Jay, When i am trying to enable HA in Ambari .then getting below error . Traceback (most recent call last):
File "/var/lib/ambari-agent/cache/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py", line 348, in <module>
NameNode().execute()
File "/usr/lib/ambari-agent/lib/resource_management/libraries/script/script.py", line 375, in execute
method(env)
File "/var/lib/ambari-agent/cache/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py", line 90, in start
upgrade_suspended=params.upgrade_suspended, env=env)
File "/usr/lib/ambari-agent/lib/ambari_commons/os_family_impl.py", line 89, in thunk
return fn(*args, **kwargs)
File "/var/lib/ambari-agent/cache/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py", line 175, in namenode
create_log_dir=True
File "/var/lib/ambari-agent/cache/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py", line 276, in service
Execute(daemon_cmd, not_if=process_id_exists_command, environment=hadoop_env_exports)
File "/usr/lib/ambari-agent/lib/resource_management/core/base.py", line 166, in __init__
self.env.run()
File "/usr/lib/ambari-agent/lib/resource_management/core/environment.py", line 160, in run
self.run_action(resource, action)
File "/usr/lib/ambari-agent/lib/resource_management/core/environment.py", line 124, in run_action
provider_action()
File "/usr/lib/ambari-agent/lib/rexample.comesource_management/core/providers/system.py", line 262, in action_run
tries=self.resource.tries, try_sleep=self.resource.try_sleep)
File "/usr/lib/ambari-agent/lib/resource_management/core/shell.py", line 72, in inner
result = function(command, **kwargs)
File "/usr/lib/ambari-agent/lib/resource_management/core/shell.py", line 102, in checked_call
tries=tries, try_sleep=try_sleep, timeout_kill_strategy=timeout_kill_strategy)
File "/usr/lib/ambari-agent/lib/resource_management/core/shell.py", line 150, in _call_wrapper
result = _call(command, **kwargs_copy)
File "/usr/lib/ambari-agent/lib/resource_management/core/shell.py", line 303, in _call
raise ExecutionFailed(err_msg, code, out, err)
resource_management.core.exceptions.ExecutionFailed: Execution of 'ambari-sudo.sh su hdfs -l -s /bin/bash -c 'ulimit -c unlimited ; /usr/hdp/2.6.5.0-292/hadoop/sbin/hadoop-daemon.sh --config /usr/hdp/2.6.5.0-292/hadoop/conf start namenode'' returned 1. starting namenode, logging to /var/log/hadoop/hdfs/hadoop-hdfs-namenode-HBDCAUTDBN14.cidr.gov.in.out
SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder".
SLF4J: Defaulting to no-operation (NOP) logger implementation
SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details. Thanks & Regards, Prashant Gupta
... View more
03-04-2019
07:35 PM
Dear Team, Getting error when installing App time line server . Please find the below error . stderr: Traceback (most recent call last): File "/var/lib/ambari-agent/cache/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py", line 89, in <module> ApplicationTimelineServer().execute() File " /usr/lib/ambari-agent/lib/resource_management/libraries/script/script.py ", line 375, in execute method(env) File " /var/lib/ambari-agent/cache/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py ", line 38, in install self.install_packages(env) File " /usr/lib/ambari-agent/lib/resource_management/libraries/script/script.py ", line 811, in install_packages name = self.format_package_name(package['name']) File " /usr/lib/ambari-agent/lib/resource_management/libraries/script/script.py ", line 546, in format_package_name raise Fail("Cannot match package for regexp name {0}. Available packages: {1}".format(name, self.available_packages_in_repos)) resource_management.core.exceptions.Fail: Cannot match package for regexp name hadoop_${stack_version}-yarn. Available packages: ['accumulo', 'accumulo-conf-standalone', 'accumulo-source', 'accumulo_2_6_5_0_292', 'accumulo_2_6_5_0_292-conf-standalone', 'accumulo_2_6_5_0_292-source', 'atlas-metadata', 'atlas-metadata-falcon-plugin', 'atlas-metadata-hive-plugin', 'atlas-metadata-sqoop-plugin', 'atlas-metadata-storm-plugin', 'atlas-metadata_2_6_5_0_292', 'atlas-metadata_2_6_5_0_292-falcon-plugin', 'atlas-metadata_2_6_5_0_292-hive-plugin', 'atlas-metadata_2_6_5_0_292-sqoop-plugin', 'atlas-metadata_2_6_5_0_292-storm-plugin', 'bigtop-tomcat', 'datafu', 'datafu_2_6_5_0_292', 'druid', 'druid_2_6_5_0_292', 'falcon', 'falcon-doc', 'falcon_2_6_5_0_292', 'falcon_2_6_5_0_292-doc', 'flume', 'flume-agent', 'flume_2_6_5_0_292', 'flume_2_6_5_0_292-agent', 'hadoop', 'hadoop-client', 'hadoop-conf-pseudo', 'hadoop-doc', 'hadoop-hdfs', 'hadoop-hdfs-datanode', 'hadoop-hdfs-fuse', 'hadoop-hdfs-journalnode', 'hadoop-hdfs-namenode', 'hadoop-hdfs-secondarynamenode', 'hadoop-hdfs-zkfc', 'hadoop-httpfs', 'hadoop-httpfs-server', 'hadoop-libhdfs', 'hadoop-mapreduce', 'hadoop-mapreduce-historyserver', 'hadoop-source', 'hadoop-yarn', 'hadoop-yarn-nodemanager', 'hadoop-yarn-proxyserver', 'hadoop-yarn-resourcemanager', 'hadoop-yarn-timelineserver', 'hadoop_2_6_5_0_292-client', 'hadoop_2_6_5_0_292-conf-pseudo', 'hadoop_2_6_5_0_292-doc', 'hadoop_2_6_5_0_292-hdfs-datanode', 'hadoop_2_6_5_0_292-hdfs-fuse', 'hadoop_2_6_5_0_292-hdfs-journalnode', 'hadoop_2_6_5_0_292-hdfs-namenode', 'hadoop_2_6_5_0_292-hdfs-secondarynamenode', 'hadoop_2_6_5_0_292-hdfs-zkfc', 'hadoop_2_6_5_0_292-httpfs', 'hadoop_2_6_5_0_292-httpfs-server', 'hadoop_2_6_5_0_292-libhdfs', 'hadoop_2_6_5_0_292-mapreduce-historyserver', 'hadoop_2_6_5_0_292-source', 'hadoop_2_6_5_0_292-yarn-nodemanager', 'hadoop_2_6_5_0_292-yarn-proxyserver', 'hadoop_2_6_5_0_292-yarn-resourcemanager', 'hadoop_2_6_5_0_292-yarn-timelineserver', 'hbase', 'hbase-doc', 'hbase-master', 'hbase-regionserver', 'hbase-rest', 'hbase-thrift', 'hbase-thrift2', 'hbase_2_6_5_0_292', 'hbase_2_6_5_0_292-doc', 'hbase_2_6_5_0_292-master', 'hbase_2_6_5_0_292-regionserver', 'hbase_2_6_5_0_292-rest', 'hbase_2_6_5_0_292-thrift', 'hbase_2_6_5_0_292-thrift2', 'hive', 'hive-hcatalog', 'hive-hcatalog-server', 'hive-jdbc', 'hive-metastore', 'hive-server', 'hive-server2', 'hive-webhcat', 'hive-webhcat-server', 'hive2', 'hive2-jdbc', 'hive2_2_6_5_0_292', 'hive2_2_6_5_0_292-jdbc', 'hive_2_6_5_0_292', 'hive_2_6_5_0_292-hcatalog', 'hive_2_6_5_0_292-hcatalog-server', 'hive_2_6_5_0_292-jdbc', 'hive_2_6_5_0_292-metastore', 'hive_2_6_5_0_292-server', 'hive_2_6_5_0_292-server2', 'hive_2_6_5_0_292-webhcat', 'hive_2_6_5_0_292-webhcat-server', 'hue', 'hue-beeswax', 'hue-common', 'hue-hcatalog', 'hue-oozie', 'hue-pig', 'hue-server', 'kafka', 'kafka_2_6_5_0_292', 'knox', 'knox_2_6_5_0_292', 'livy', 'livy2', 'livy2_2_6_5_0_292', 'livy_2_6_5_0_292', 'mahout', 'mahout-doc', 'mahout_2_6_5_0_292', 'mahout_2_6_5_0_292-doc', 'oozie', 'oozie-client', 'oozie-common', 'oozie-sharelib', 'oozie-sharelib-distcp', 'oozie-sharelib-hcatalog', 'oozie-sharelib-hive', 'oozie-sharelib-hive2', 'oozie-sharelib-mapreduce-streaming', 'oozie-sharelib-pig', 'oozie-sharelib-spark', 'oozie-sharelib-sqoop', 'oozie-webapp', 'oozie_2_6_5_0_292', 'oozie_2_6_5_0_292-client', 'oozie_2_6_5_0_292-common', 'oozie_2_6_5_0_292-sharelib', 'oozie_2_6_5_0_292-sharelib-distcp', 'oozie_2_6_5_0_292-sharelib-hcatalog', 'oozie_2_6_5_0_292-sharelib-hive', 'oozie_2_6_5_0_292-sharelib-hive2', 'oozie_2_6_5_0_292-sharelib-mapreduce-streaming', 'oozie_2_6_5_0_292-sharelib-pig', 'oozie_2_6_5_0_292-sharelib-spark', 'oozie_2_6_5_0_292-sharelib-sqoop', 'oozie_2_6_5_0_292-webapp', 'phoenix', 'phoenix-queryserver', 'phoenix_2_6_5_0_292', 'phoenix_2_6_5_0_292-queryserver', 'pig', 'pig_2_6_5_0_292', 'ranger-admin', 'ranger-atlas-plugin', 'ranger-hbase-plugin', 'ranger-hdfs-plugin', 'ranger-hive-plugin', 'ranger-kafka-plugin', 'ranger-kms', 'ranger-knox-plugin', 'ranger-solr-plugin', 'ranger-storm-plugin', 'ranger-tagsync', 'ranger-usersync', 'ranger-yarn-plugin', 'ranger_2_6_5_0_292-admin', 'ranger_2_6_5_0_292-atlas-plugin', 'ranger_2_6_5_0_292-hbase-plugin', 'ranger_2_6_5_0_292-hive-plugin', 'ranger_2_6_5_0_292-kafka-plugin', 'ranger_2_6_5_0_292-kms', 'ranger_2_6_5_0_292-knox-plugin', 'ranger_2_6_5_0_292-solr-plugin', 'ranger_2_6_5_0_292-storm-plugin', 'ranger_2_6_5_0_292-tagsync', 'ranger_2_6_5_0_292-usersync', 'shc', 'shc_2_6_5_0_292', 'slider', 'slider_2_6_5_0_292', 'spark', 'spark-history-server', 'spark-master', 'spark-python', 'spark-worker', 'spark-yarn-shuffle', 'spark2', 'spark2-history-server', 'spark2-master', 'spark2-python', 'spark2-worker', 'spark2-yarn-shuffle', 'spark2_2_6_5_0_292', 'spark2_2_6_5_0_292-history-server', 'spark2_2_6_5_0_292-master', 'spark2_2_6_5_0_292-python', 'spark2_2_6_5_0_292-worker', 'spark_2_6_5_0_292', 'spark_2_6_5_0_292-history-server', 'spark_2_6_5_0_292-master', 'spark_2_6_5_0_292-python', 'spark_2_6_5_0_292-worker', 'spark_llap', 'spark_llap_2_6_5_0_292', 'sqoop', 'sqoop-metastore', 'sqoop_2_6_5_0_292', 'sqoop_2_6_5_0_292-metastore', 'storm', 'storm-slider-client', 'storm_2_6_5_0_292', 'storm_2_6_5_0_292-slider-client', 'superset', 'superset_2_6_5_0_292', 'tez', 'tez_2_6_5_0_292', 'tez_hive2', 'tez_hive2_2_6_5_0_292', 'zeppelin', 'zeppelin_2_6_5_0_292', 'zookeeper', 'zookeeper-server', 'zookeeper_2_6_5_0_292-server', 'openblas', 'openblas-Rblas', 'openblas-devel', 'openblas-openmp', 'openblas-openmp64', 'openblas-openmp64_', 'openblas-serial64', 'openblas-serial64_', 'openblas-static', 'openblas-threads', 'openblas-threads64', 'openblas-threads64_', 'snappy', 'snappy-devel', 'openblas', 'openblas-Rblas', 'openblas-devel', 'openblas-openmp', 'openblas-openmp64', 'openblas-openmp64_', 'openblas-serial64', 'openblas-serial64_', 'openblas-static', 'openblas-threads', 'openblas-threads64', 'openblas-threads64_', 'snappy', 'snappy-devel', 'accumulo', 'accumulo-conf-standalone', 'accumulo-source', 'accumulo_2_6_5_0_292', 'accumulo_2_6_5_0_292-conf-standalone', 'accumulo_2_6_5_0_292-source', 'atlas-metadata', 'atlas-metadata-falcon-plugin', 'atlas-metadata-hive-plugin', 'atlas-metadata-sqoop-plugin', 'atlas-metadata-storm-plugin', 'atlas-metadata_2_6_5_0_292', 'atlas-metadata_2_6_5_0_292-falcon-plugin', 'atlas-metadata_2_6_5_0_292-hive-plugin', 'atlas-metadata_2_6_5_0_292-sqoop-plugin', 'atlas-metadata_2_6_5_0_292-storm-plugin', 'bigtop-tomcat', 'datafu', 'datafu_2_6_5_0_292', 'druid', 'druid_2_6_5_0_292', 'falcon', 'falcon-doc', 'falcon_2_6_5_0_292', 'falcon_2_6_5_0_292-doc', 'flume', 'flume-agent', 'flume_2_6_5_0_292', 'flume_2_6_5_0_292-agent', 'hadoop', 'hadoop-client', 'hadoop-conf-pseudo', 'hadoop-doc', 'hadoop-hdfs', 'hadoop-hdfs-datanode', 'hadoop-hdfs-fuse', 'hadoop-hdfs-journalnode', 'hadoop-hdfs-namenode', 'hadoop-hdfs-secondarynamenode', 'hadoop-hdfs-zkfc', 'hadoop-httpfs', 'hadoop-httpfs-server', 'hadoop-libhdfs', 'hadoop-mapreduce', 'hadoop-mapreduce-historyserver', 'hadoop-source', 'hadoop-yarn', 'hadoop-yarn-nodemanager', 'hadoop-yarn-proxyserver', 'hadoop-yarn-resourcemanager', 'hadoop-yarn-timelineserver', 'hadoop_2_6_5_0_292-client', 'hadoop_2_6_5_0_292-conf-pseudo', 'hadoop_2_6_5_0_292-doc', 'hadoop_2_6_5_0_292-hdfs-datanode', 'hadoop_2_6_5_0_292-hdfs-fuse', 'hadoop_2_6_5_0_292-hdfs-journalnode', 'hadoop_2_6_5_0_292-hdfs-namenode', 'hadoop_2_6_5_0_292-hdfs-secondarynamenode', 'hadoop_2_6_5_0_292-hdfs-zkfc', 'hadoop_2_6_5_0_292-httpfs', 'hadoop_2_6_5_0_292-httpfs-server', 'hadoop_2_6_5_0_292-libhdfs', 'hadoop_2_6_5_0_292-mapreduce-historyserver', 'hadoop_2_6_5_0_292-source', 'hadoop_2_6_5_0_292-yarn-nodemanager', 'hadoop_2_6_5_0_292-yarn-proxyserver', 'hadoop_2_6_5_0_292-yarn-resourcemanager', 'hadoop_2_6_5_0_292-yarn-timelineserver', 'hbase', 'hbase-doc', 'hbase-master', 'hbase-regionserver', 'hbase-rest', 'hbase-thrift', 'hbase-thrift2', 'hbase_2_6_5_0_292', 'hbase_2_6_5_0_292-doc', 'hbase_2_6_5_0_292-master', 'hbase_2_6_5_0_292-regionserver', 'hbase_2_6_5_0_292-rest', 'hbase_2_6_5_0_292-thrift', 'hbase_2_6_5_0_292-thrift2', 'hive', 'hive-hcatalog', 'hive-hcatalog-server', 'hive-jdbc', 'hive-metastore', 'hive-server', 'hive-server2', 'hive-webhcat', 'hive-webhcat-server', 'hive2', 'hive2-jdbc', 'hive2_2_6_5_0_292', 'hive2_2_6_5_0_292-jdbc', 'hive_2_6_5_0_292', 'hive_2_6_5_0_292-hcatalog', 'hive_2_6_5_0_292-hcatalog-server', 'hive_2_6_5_0_292-jdbc', 'hive_2_6_5_0_292-metastore', 'hive_2_6_5_0_292-server', 'hive_2_6_5_0_292-server2', 'hive_2_6_5_0_292-webhcat', 'hive_2_6_5_0_292-webhcat-server', 'hue', 'hue-beeswax', 'hue-common', 'hue-hcatalog', 'hue-oozie', 'hue-pig', 'hue-server', 'kafka', 'kafka_2_6_5_0_292', 'knox', 'knox_2_6_5_0_292', 'livy', 'livy2', 'livy2_2_6_5_0_292', 'livy_2_6_5_0_292', 'mahout', 'mahout-doc', 'mahout_2_6_5_0_292', 'mahout_2_6_5_0_292-doc', 'oozie', 'oozie-client', 'oozie-common', 'oozie-sharelib', 'oozie-sharelib-distcp', 'oozie-sharelib-hcatalog', 'oozie-sharelib-hive', 'oozie-sharelib-hive2', 'oozie-sharelib-mapreduce-streaming', 'oozie-sharelib-pig', 'oozie-sharelib-spark', 'oozie-sharelib-sqoop', 'oozie-webapp', 'oozie_2_6_5_0_292', 'oozie_2_6_5_0_292-client', 'oozie_2_6_5_0_292-common', 'oozie_2_6_5_0_292-sharelib', 'oozie_2_6_5_0_292-sharelib-distcp', 'oozie_2_6_5_0_292-sharelib-hcatalog', 'oozie_2_6_5_0_292-sharelib-hive', 'oozie_2_6_5_0_292-sharelib-hive2', 'oozie_2_6_5_0_292-sharelib-mapreduce-streaming', 'oozie_2_6_5_0_292-sharelib-pig', 'oozie_2_6_5_0_292-sharelib-spark', 'oozie_2_6_5_0_292-sharelib-sqoop', 'oozie_2_6_5_0_292-webapp', 'phoenix', 'phoenix-queryserver', 'phoenix_2_6_5_0_292', 'phoenix_2_6_5_0_292-queryserver', 'pig', 'pig_2_6_5_0_292', 'ranger-admin', 'ranger-atlas-plugin', 'ranger-hbase-plugin', 'ranger-hdfs-plugin', 'ranger-hive-plugin', 'ranger-kafka-plugin', 'ranger-kms', 'ranger-knox-plugin', 'ranger-solr-plugin', 'ranger-storm-plugin', 'ranger-tagsync', 'ranger-usersync', 'ranger-yarn-plugin', 'ranger_2_6_5_0_292-admin', 'ranger_2_6_5_0_292-atlas-plugin', 'ranger_2_6_5_0_292-hbase-plugin', 'ranger_2_6_5_0_292-hive-plugin', 'ranger_2_6_5_0_292-kafka-plugin', 'ranger_2_6_5_0_292-kms', 'ranger_2_6_5_0_292-knox-plugin', 'ranger_2_6_5_0_292-solr-plugin', 'ranger_2_6_5_0_292-storm-plugin', 'ranger_2_6_5_0_292-tagsync', 'ranger_2_6_5_0_292-usersync', 'shc', 'shc_2_6_5_0_292', 'slider', 'slider_2_6_5_0_292', 'spark', 'spark-history-server', 'spark-master', 'spark-python', 'spark-worker', 'spark-yarn-shuffle', 'spark2', 'spark2-history-server', 'spark2-master', 'spark2-python', 'spark2-worker', 'spark2-yarn-shuffle', 'spark2_2_6_5_0_292', 'spark2_2_6_5_0_292-history-server', 'spark2_2_6_5_0_292-master', 'spark2_2_6_5_0_292-python', 'spark2_2_6_5_0_292-worker', 'spark_2_6_5_0_292', 'spark_2_6_5_0_292-history-server', 'spark_2_6_5_0_292-master', 'spark_2_6_5_0_292-python', 'spark_2_6_5_0_292-worker', 'spark_llap', 'spark_llap_2_6_5_0_292', 'sqoop', 'sqoop-metastore', 'sqoop_2_6_5_0_292', 'sqoop_2_6_5_0_292-metastore', 'storm', 'storm-slider-client', 'storm_2_6_5_0_292', 'storm_2_6_5_0_292-slider-client', 'superset', 'superset_2_6_5_0_292', 'tez', 'tez_2_6_5_0_292', 'tez_hive2', 'tez_hive2_2_6_5_0_292', 'zeppelin', 'zeppelin_2_6_5_0_292', 'zookeeper', 'zookeeper-server', 'zookeeper_2_6_5_0_292-server'] stdout: 2019-02-28 19:11:15,211 - Stack Feature Version Info: Cluster Stack=2.6, Command Stack=None, Command Version=None -> 2.6 2019-02-28 19:11:15,216 - Using hadoop conf dir: /usr/hdp/current/hadoop-client/conf 2019-02-28 19:11:15,217 - Group['hdfs'] {} 2019-02-28 19:11:15,219 - Group['hadoop'] {} 2019-02-28 19:11:15,219 - Group['users'] {} 2019-02-28 19:11:15,219 - User['zookeeper'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop'], 'uid': None} 2019-02-28 19:11:15,358 - User['ams'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop'], 'uid': None} 2019-02-28 19:11:15,368 - User['ambari-qa'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'users'], 'uid': None} 2019-02-28 19:11:15,379 - User['hdfs'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': ['hdfs'], 'uid': None} 2019-02-28 19:11:15,391 - User['yarn'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop'], 'uid': None} 2019-02-28 19:11:15,402 - User['mapred'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop'], 'uid': None} 2019-02-28 19:11:15,413 - File['/var/lib/ambari-agent/tmp/changeUid.sh'] {'content': StaticFile('changeToSecureUid.sh'), 'mode': 0555} 2019-02-28 19:11:15,415 - Execute['/var/lib/ambari-agent/tmp/changeUid.sh ambari-qa /tmp/hadoop-ambari-qa,/tmp/hsperfdata_ambari-qa,/home/ambari-qa,/tmp/ambari-qa,/tmp/sqoop-ambari-qa 0'] {'not_if': '(test $(id -u ambari-qa) -gt 1000) || (false)'} 2019-02-28 19:11:15,430 - Skipping Execute['/var/lib/ambari-agent/tmp/changeUid.sh ambari-qa /tmp/hadoop-ambari-qa,/tmp/hsperfdata_ambari-qa,/home/ambari-qa,/tmp/ambari-qa,/tmp/sqoop-ambari-qa 0'] due to not_if 2019-02-28 19:11:15,431 - Group['hdfs'] {} 2019-02-28 19:11:15,431 - User['hdfs'] {'fetch_nonlocal_groups': True, 'groups': ['hdfs', u'hdfs']} 2019-02-28 19:11:15,442 - FS Type: 2019-02-28 19:11:15,442 - Directory['/etc/hadoop'] {'mode': 0755} 2019-02-28 19:11:15,456 - File['/usr/hdp/current/hadoop-client/conf/hadoop-env.sh'] {'content': InlineTemplate(...), 'owner': 'hdfs', 'group': 'hadoop'} 2019-02-28 19:11:15,457 - Directory['/var/lib/ambari-agent/tmp/hadoop_java_io_tmpdir'] {'owner': 'hdfs', 'group': 'hadoop', 'mode': 01777} 2019-02-28 19:11:15,474 - Repository['HDP-2.6-repo-51'] {'append_to_file': False, 'base_url': 'http://10.66.72.201/HDP/centos7/2.6.5.0-292', 'action': ['create'], 'components': [u'HDP', 'main'], 'repo_template': '[{ {repo_id}}]\nname={ {repo_id}}\n{% if mirror_list %}mirrorlist={ {mirror_list}}{% else %}baseurl={ {base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0', 'repo_file_name': 'ambari-hdp-51', 'mirror_list': None} 2019-02-28 19:11:15,482 - File['/etc/yum.repos.d/ambari-hdp-51.repo'] {'content': '[HDP-2.6-repo-51]\nname=HDP-2.6-repo-51\nbaseurl=http://10.66.72.201/HDP/centos7/2.6.5.0-292\n\npath=/\nenabled=1\ngpgcheck=0'} 2019-02-28 19:11:15,483 - Writing File['/etc/yum.repos.d/ambari-hdp-51.repo'] because contents don't match 2019-02-28 19:11:15,483 - Repository['HDP-UTILS-1.1.0.21-repo-51'] {'append_to_file': True, 'base_url': 'http://10.66.72.201/HDP-UTILS', 'action': ['create'], 'components': [u'HDP-UTILS', 'main'], 'repo_template': '[{ {repo_id}}]\nname={ {repo_id}}\n{% if mirror_list %}mirrorlist={ {mirror_list}}{% else %}baseurl={ {base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0', 'repo_file_name': 'ambari-hdp-51', 'mirror_list': None} 2019-02-28 19:11:15,487 - File['/etc/yum.repos.d/ambari-hdp-51.repo'] {'content': '[HDP-2.6-repo-51]\nname=HDP-2.6-repo-51\nbaseurl=http://10.66.72.201/HDP/centos7/2.6.5.0-292\n\npath=/\nenabled=1\ngpgcheck=0\n[HDP-UTILS-1.1.0.21-repo-51]\nname=HDP-UTILS-1.1.0.21-repo-51\nbaseurl=http://10.66.72.201/HDP-UTILS\n\npath=/\nenabled=1\ngpgcheck=0'} 2019-02-28 19:11:15,487 - Writing File['/etc/yum.repos.d/ambari-hdp-51.repo'] because contents don't match 2019-02-28 19:11:15,491 - Package['unzip'] {'retry_on_repo_unavailability': False, 'retry_count': 5} 2019-02-28 19:11:15,840 - Skipping installation of existing package unzip 2019-02-28 19:11:15,841 - Package['curl'] {'retry_on_repo_unavailability': False, 'retry_count': 5} 2019-02-28 19:11:15,860 - Skipping installation of existing package curl 2019-02-28 19:11:15,860 - Package['hdp-select'] {'retry_on_repo_unavailability': False, 'retry_count': 5} 2019-02-28 19:11:15,877 - Skipping installation of existing package hdp-select 2019-02-28 19:11:16,194 - Command repositories: HDP-2.6-repo-51, HDP-UTILS-1.1.0.21-repo-51 2019-02-28 19:11:16,194 - Applicable repositories: HDP-2.6-repo-51, HDP-UTILS-1.1.0.21-repo-51 2019-02-28 19:11:16,196 - Looking for matching packages in the following repositories: HDP-2.6-repo-51, HDP-UTILS-1.1.0.21-repo-51 2019-02-28 19:11:19,536 - Adding fallback repositories: HDP-UTILS-1.1.0.21-repo-5, HDP-2.6-repo-5 2019-02-28 19:11:22,829 - No package found for hadoop_${stack_version}-yarn(hadoop_(\d|_)+-yarn$) Command failed after 1 tries Thanks & Regards , Prashant Gupta
... View more
Labels:
02-27-2019
05:50 PM
Dear Team,
When we are trying to launching the instance in ambai 2.6.2 then getting error. Please find the below steps and error.
steps :-
========
Get Started > Select Version > install options > Confirm Hosts
error:-
======
Registering with the server...
Registration with the server failed.
Ambari-server log file :-
27 Feb 2019 14:40:42,613 INFO [pool-18-thread-1] AmbariMetricSinkImpl:95 - No clusters configured.
27 Feb 2019 14:45:42,613 INFO [pool-18-thread-1] MetricsServiceImpl:65 - Attempting to initialize metrics sink
27 Feb 2019 14:45:42,613 INFO [pool-18-thread-1] MetricsServiceImpl:81 - ********* Configuring Metric Sink **********
27 Feb 2019 14:45:42,613 INFO [pool-18-thread-1] AmbariMetricSinkImpl:95 - No clusters configured.
27 Feb 2019 14:50:42,614 INFO [pool-18-thread-1] MetricsServiceImpl:65 - Attempting to initialize metrics sink
27 Feb 2019 14:50:42,614 INFO [pool-18-thread-1] MetricsServiceImpl:81 - ********* Configuring Metric Sink **********
27 Feb 2019 14:50:42,614 INFO [pool-18-thread-1] AmbariMetricSinkImpl:95 - No clusters configured.
27 Feb 2019 14:55:42,614 INFO [pool-18-thread-1] MetricsServiceImpl:65 - Attempting to initialize metrics sink
27 Feb 2019 14:55:42,615 INFO [pool-18-thread-1] MetricsServiceImpl:81 - ********* Configuring Metric Sink **********
27 Feb 2019 14:55:42,615 INFO [pool-18-thread-1] AmbariMetricSinkImpl:95 - No clusters configured.
Ambari-agent log file :-
Refer to: https://bugzilla.redhat.com/show_bug.cgi?id=1022468 for more details. WARNING 2019-02-27 15:00:31,741 NetUtil.py:124 - Server at https://xxx.yyyy.example.com:8440 is not reachable, sleeping for 10 seconds... INFO 2019-02-27 15:00:41,741 NetUtil.py:70 - Connecting to https://xxx.yyyy.example.com:8440/ca ERROR 2019-02-27 15:00:41,744 NetUtil.py:96 - [Errno 8] _ssl.c:492: EOF occurred in violation of protocol ERROR 2019-02-27 15:00:41,744 NetUtil.py:97 - SSLError: Failed to connect. Please check openssl library versions. Refer to: https://bugzilla.redhat.com/show_bug.cgi?id=1022468 for more details. WARNING 2019-02-27 15:00:41,744 NetUtil.py:124 - Server at https://xxx.yyyy.example.com:8440 is not reachable, sleeping for 10 seconds... INFO 2019-02-27 15:00:51,745 NetUtil.py:70 - Connecting to https://xxx.yyyy.example.com:8440/ca ERROR 2019-02-27 15:00:51,747 NetUtil.py:96 - [Errno 8] _ssl.c:492: EOF occurred in violation of protocol ERROR 2019-02-27 15:00:51,747 NetUtil.py:97 - SSLError: Failed to connect. Please check openssl library versions. Refer to: https://bugzilla.redhat.com/show_bug.cgi?id=1022468 for more details. WARNING 2019-02-27 15:00:51,748 NetUtil.py:124 - Server at https://xxx.yyyy.example.com:8440 is not reachable, sleeping for 10 seconds... INFO 2019-02-27 15:01:01,748 NetUtil.py:70 - Connecting to https://xxx.yyyy.example.com:8440/ca ERROR 2019-02-27 15:01:01,751 NetUtil.py:96 - [Errno 8] _ssl.c:492: EOF occurred in violation of protocol ERROR 2019-02-27 15:01:01,751 NetUtil.py:97 - SSLError: Failed to connect. Please check openssl library versions. Refer to: https://bugzilla.redhat.com/show_bug.cgi?id=1022468 for more details. WARNING 2019-02-27 15:01:01,751 NetUtil.py:124 - Server at https://xxx.yyyy.example.com:8440 is not reachable, sleeping for 10 seconds... INFO 2019-02-27 15:01:11,752 main.py:439 - Connecting to Ambari server at https://xxx.yyyy.example.com:8440 (10.66.52.93) INFO 2019-02-27 15:01:11,752 NetUtil.py:70 - Connecting to https://xxx.yyyy.example.com:8440/ca ERROR 2019-02-27 15:01:11,755 NetUtil.py:96 - [Errno 8] _ssl.c:492: EOF occurred in violation of protocol ERROR 2019-02-27 15:01:11,755 NetUtil.py:97 - SSLError: Failed to connect. Please check openssl library versions. Refer to: https://bugzilla.redhat.com/show_bug.cgi?id=1022468 for more details. WARNING 2019-02-27 15:01:11,755 NetUtil.py:124 - Server at https://xxx.yyyy.example.com:8440 is not reachable, sleeping for 10 seconds...
Thanks & Regards,
Prashant Gupta
... View more
01-09-2019
01:36 PM
Dear @Akhil S Naik, We have followed same prerequesties step as per mentioned above the reference link but still getting same error. please find below details. >> tail -f /var/log/ambari-server/ambari-server.log 03 Jan 2019 18:24:27,338 INFO [pool-18-thread-1] AmbariMetricSinkImpl:95 - No clusters configured.
03 Jan 2019 18:29:27,339 INFO [pool-18-thread-1] MetricsServiceImpl:65 - Attempting to initialize metrics sink
03 Jan 2019 18:29:27,339 INFO [pool-18-thread-1] MetricsServiceImpl:81 - ********* Configuring Metric Sink **********
03 Jan 2019 18:29:27,339 INFO [pool-18-thread-1] AmbariMetricSinkImpl:95 - No clusters configured.
03 Jan 2019 18:34:27,339 INFO [pool-18-thread-1] MetricsServiceImpl:65 - Attempting to initialize metrics sink
03 Jan 2019 18:34:27,339 INFO [pool-18-thread-1] MetricsServiceImpl:81 - ********* Configuring Metric Sink **********
03 Jan 2019 18:34:27,340 INFO [pool-18-thread-1] AmbariMetricSinkImpl:95 - No clusters configured.
03 Jan 2019 18:39:27,340 INFO [pool-18-thread-1] MetricsServiceImpl:65 - Attempting to initialize metrics sink
03 Jan 2019 18:39:27,340 INFO [pool-18-thread-1] MetricsServiceImpl:81 - ********* Configuring Metric Sink **********
03 Jan 2019 18:39:27,340 INFO [pool-18-thread-1] AmbariMetricSinkImpl:95 - No clusters configured.
03 Jan 2019 18:44:27,340 INFO [pool-18-thread-1] MetricsServiceImpl:65 - Attempting to initialize metrics sink
03 Jan 2019 18:44:27,341 INFO [pool-18-thread-1] MetricsServiceImpl:81 - ********* Configuring Metric Sink **********
03 Jan 2019 18:44:27,341 INFO [pool-18-thread-1] AmbariMetricSinkImpl:95 - No clusters configured.
03 Jan 2019 18:49:27,341 INFO [pool-18-thread-1] MetricsServiceImpl:65 - Attempting to initialize metrics sink
03 Jan 2019 18:49:27,341 INFO [pool-18-thread-1] MetricsServiceImpl:81 - ********* Configuring Metric Sink **********
03 Jan 2019 18:49:27,341 INFO [pool-18-thread-1] AmbariMetricSinkImpl:95 - No clusters configured.
03 Jan 2019 18:54:27,342 INFO [pool-18-thread-1] MetricsServiceImpl:65 - Attempting to initialize metrics sink
03 Jan 2019 18:54:27,342 INFO [pool-18-thread-1] MetricsServiceImpl:81 - ********* Configuring Metric Sink **********
03 Jan 2019 18:54:27,342 INFO [pool-18-thread-1] AmbariMetricSinkImpl:95 - No clusters configured. Regards, Prashant Gupta
... View more
01-09-2019
11:01 AM
Dear @Akhil S Naik, Any updates of above the query. Thanks & Regards, Prashant Gupta
... View more
01-03-2019
02:22 PM
Dear Akhil, We have followed same steps as per mention reference link but i am geeing same error. > > tail -f /var/log/ambari-server/ambari-server.log 03 Jan 2019 18:24:27,338 INFO [pool-18-thread-1] AmbariMetricSinkImpl:95 - No clusters configured.
03 Jan 2019 18:29:27,339 INFO [pool-18-thread-1] MetricsServiceImpl:65 - Attempting to initialize metrics sink
03 Jan 2019 18:29:27,339 INFO [pool-18-thread-1] MetricsServiceImpl:81 - ********* Configuring Metric Sink **********
03 Jan 2019 18:29:27,339 INFO [pool-18-thread-1] AmbariMetricSinkImpl:95 - No clusters configured.
03 Jan 2019 18:34:27,339 INFO [pool-18-thread-1] MetricsServiceImpl:65 - Attempting to initialize metrics sink
03 Jan 2019 18:34:27,339 INFO [pool-18-thread-1] MetricsServiceImpl:81 - ********* Configuring Metric Sink **********
03 Jan 2019 18:34:27,340 INFO [pool-18-thread-1] AmbariMetricSinkImpl:95 - No clusters configured.
03 Jan 2019 18:39:27,340 INFO [pool-18-thread-1] MetricsServiceImpl:65 - Attempting to initialize metrics sink
03 Jan 2019 18:39:27,340 INFO [pool-18-thread-1] MetricsServiceImpl:81 - ********* Configuring Metric Sink **********
03 Jan 2019 18:39:27,340 INFO [pool-18-thread-1] AmbariMetricSinkImpl:95 - No clusters configured. >> hostname -f MNDCAUTDBN16.cidr.gov.in
... View more
01-02-2019
02:28 PM
Dear Team, When we are trying to launching the instance in ambai then getting error. Please find the below steps and error. steps :- ======== Get Started > Select Version > install options > Confirm Hosts error:- ====== Registering with the server... Registration with the server failed. Thanks & Regards, Prashant Gupta gupta67222@gmail.com
... View more
10-24-2018
10:45 AM
Dear Jay, After added above the property it's working fine. Thanks for support. Regards, Prashant Gupta
... View more
10-24-2018
07:30 AM
Dear Jay, Any update for above the query. Thanks & Regards, Prashant Gupta
... View more
10-22-2018
12:29 PM
Dear Jay, Please find the resource manager log file after executed the above the command. we have changed server name to localhost , but we r using FQN. Thanks & Regards, Prashant Gupta resourcemanager-logfile.txt
... View more
10-18-2018
05:22 PM
Traceback (most recent call last): File "/var/lib/ambari-agent/cache/stacks/HDP/3.0/services/YARN/package/scripts/resourcemanager.py", line 275, in <module>
Resourcemanager().execute()
File "/usr/lib/ambari-agent/lib/resource_management/libraries/script/script.py", line 353, in execute
method(env)
File "/var/lib/ambari-agent/cache/stacks/HDP/3.0/services/YARN/package/scripts/resourcemanager.py", line 158, in start
service('resourcemanager', action='start')
File "/usr/lib/ambari-agent/lib/ambari_commons/os_family_impl.py", line 89, in thunk
return fn(*args, **kwargs)
File "/var/lib/ambari-agent/cache/stacks/HDP/3.0/services/YARN/package/scripts/service.py", line 92, in service
Execute(daemon_cmd, user = usr, not_if = check_process)
File "/usr/lib/ambari-agent/lib/resource_management/core/base.py", line 166, in __init__
self.env.run()
File "/usr/lib/ambari-agent/lib/resource_management/core/environment.py", line 160, in run
self.run_action(resource, action)
File "/usr/lib/ambari-agent/lib/resource_management/core/environment.py", line 124, in run_action
provider_action()
File "/usr/lib/ambari-agent/lib/resource_management/core/providers/system.py", line 263, in action_run
returns=self.resource.returns)
File "/usr/lib/ambari-agent/lib/resource_management/core/shell.py", line 72, in inner
result = function(command, **kwargs)
File "/usr/lib/ambari-agent/lib/resource_management/core/shell.py", line 102, in checked_call
tries=tries, try_sleep=try_sleep, timeout_kill_strategy=timeout_kill_strategy, returns=returns)
File "/usr/lib/ambari-agent/lib/resource_management/core/shell.py", line 150, in _call_wrapper
result = _call(command, **kwargs_copy)
File "/usr/lib/ambari-agent/lib/resource_management/core/shell.py", line 314, in _call
raise ExecutionFailed(err_msg, code, out, err)
resource_management.core.exceptions.ExecutionFailed: Execution of 'ulimit -c unlimited; export HADOOP_LIBEXEC_DIR=/usr/hdp/3.0.0.0-1634/hadoop/libexec && /usr/hdp/3.0.0.0-1634/hadoop-yarn/bin/yarn --config /usr/hdp/3.0.0.0-1634/hadoop/conf --daemon start resourcemanager' returned 1.
... View more
Labels: