Member since
05-04-2017
11
Posts
0
Kudos Received
0
Solutions
07-21-2017
01:10 PM
@Jay SenSharma I have shared the output for it.
... View more
07-21-2017
12:41 PM
@Jay SenSharma Could you please help.
... View more
07-20-2017
06:21 AM
@Jay SenSharma Output for free -m total used free shared buff/cache available
Mem: 15870 3850 2752 648 9268 10931
Also, I guess issue is with pid value. Following is the output that i got: [root@colo-hdop-prd01 hdfs]# cat hadoop-hdfs-datanode.pid
44745
[root@colo-hdop-prd01 hdfs]# ps -ef | grep DataNode
root 45961 45856 0 02:19 pts/0 00:00:00 grep --color=auto DataNode
Please guide me if I should delete the process or what should I do
... View more
07-20-2017
06:15 AM
@Jay SenSharma This is the log of for /var/log/hadoop/hdfs/hadoop-hdfs-datanode-xxxx.out ulimit -a for user hdfs
core file size (blocks, -c) unlimited
data seg size (kbytes, -d) unlimited
scheduling priority (-e) 0
file size (blocks, -f) unlimited
pending signals (-i) 63398
max locked memory (kbytes, -l) 64
max memory size (kbytes, -m) unlimited
open files (-n) 128000
pipe size (512 bytes, -p) 8
POSIX message queues (bytes, -q) 819200
real-time priority (-r) 0
stack size (kbytes, -s) 8192
cpu time (seconds, -t) unlimited
max user processes (-u) 65536
virtual memory (kbytes, -v) unlimited
file locks (-x) unlimited
... View more
07-19-2017
01:34 PM
I am unable to start my datanode from Ambari. Namenode, node manager and resource manger are working fine. The datanode on other 5 hosts are working fine. This is the log that I am getting: stderr:
Traceback (most recent call last):
File "/var/lib/ambari-agent/cache/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py", line 167, in <module>
DataNode().execute()
File "/usr/lib/python2.6/site-packages/resource_management/libraries/script/script.py", line 219, in execute
method(env)
File "/var/lib/ambari-agent/cache/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py", line 62, in start
datanode(action="start")
File "/usr/lib/python2.6/site-packages/ambari_commons/os_family_impl.py", line 89, in thunk
return fn(*args, **kwargs)
File "/var/lib/ambari-agent/cache/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_datanode.py", line 72, in datanode
create_log_dir=True
File "/var/lib/ambari-agent/cache/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py", line 267, in service
Execute(daemon_cmd, not_if=process_id_exists_command, environment=hadoop_env_exports)
File "/usr/lib/python2.6/site-packages/resource_management/core/base.py", line 154, in __init__
self.env.run()
File "/usr/lib/python2.6/site-packages/resource_management/core/environment.py", line 160, in run
self.run_action(resource, action)
File "/usr/lib/python2.6/site-packages/resource_management/core/environment.py", line 124, in run_action
provider_action()
File "/usr/lib/python2.6/site-packages/resource_management/core/providers/system.py", line 238, in action_run
tries=self.resource.tries, try_sleep=self.resource.try_sleep)
File "/usr/lib/python2.6/site-packages/resource_management/core/shell.py", line 70, in inner
result = function(command, **kwargs)
File "/usr/lib/python2.6/site-packages/resource_management/core/shell.py", line 92, in checked_call
tries=tries, try_sleep=try_sleep)
File "/usr/lib/python2.6/site-packages/resource_management/core/shell.py", line 140, in _call_wrapper
result = _call(command, **kwargs_copy)
File "/usr/lib/python2.6/site-packages/resource_management/core/shell.py", line 291, in _call
raise Fail(err_msg)
resource_management.core.exceptions.Fail: Execution of 'ambari-sudo.sh su hdfs -l -s /bin/bash -c 'ulimit -c unlimited ; /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf start datanode'' returned 1. starting datanode, logging to /var/log/hadoop/hdfs/hadoop-hdfs-datanode-colo-hdop-prd01.thepartshouse.com.out
stdout:
2017-07-19 09:27:26,104 - The hadoop conf dir /usr/hdp/current/hadoop-client/conf exists, will call conf-select on it for version 2.4.2.0-258
2017-07-19 09:27:26,104 - Checking if need to create versioned conf dir /etc/hadoop/2.4.2.0-258/0
2017-07-19 09:27:26,105 - call['conf-select create-conf-dir --package hadoop --stack-version 2.4.2.0-258 --conf-version 0'] {'logoutput': False, 'sudo': True, 'quiet': False, 'stderr': -1}
2017-07-19 09:27:26,134 - call returned (1, '/etc/hadoop/2.4.2.0-258/0 exist already', '')
2017-07-19 09:27:26,135 - checked_call['conf-select set-conf-dir --package hadoop --stack-version 2.4.2.0-258 --conf-version 0'] {'logoutput': False, 'sudo': True, 'quiet': False}
2017-07-19 09:27:26,163 - checked_call returned (0, '')
2017-07-19 09:27:26,163 - Ensuring that hadoop has the correct symlink structure
2017-07-19 09:27:26,163 - Using hadoop conf dir: /usr/hdp/current/hadoop-client/conf
2017-07-19 09:27:26,291 - The hadoop conf dir /usr/hdp/current/hadoop-client/conf exists, will call conf-select on it for version 2.4.2.0-258
2017-07-19 09:27:26,291 - Checking if need to create versioned conf dir /etc/hadoop/2.4.2.0-258/0
2017-07-19 09:27:26,291 - call['conf-select create-conf-dir --package hadoop --stack-version 2.4.2.0-258 --conf-version 0'] {'logoutput': False, 'sudo': True, 'quiet': False, 'stderr': -1}
2017-07-19 09:27:26,321 - call returned (1, '/etc/hadoop/2.4.2.0-258/0 exist already', '')
2017-07-19 09:27:26,321 - checked_call['conf-select set-conf-dir --package hadoop --stack-version 2.4.2.0-258 --conf-version 0'] {'logoutput': False, 'sudo': True, 'quiet': False}
2017-07-19 09:27:26,346 - checked_call returned (0, '')
2017-07-19 09:27:26,347 - Ensuring that hadoop has the correct symlink structure
2017-07-19 09:27:26,347 - Using hadoop conf dir: /usr/hdp/current/hadoop-client/conf
2017-07-19 09:27:26,348 - Group['spark'] {}
2017-07-19 09:27:26,349 - Group['hadoop'] {}
2017-07-19 09:27:26,349 - Group['users'] {}
2017-07-19 09:27:26,350 - User['hive'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-07-19 09:27:26,350 - User['zookeeper'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-07-19 09:27:26,351 - User['oozie'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'users']}
2017-07-19 09:27:26,352 - User['ams'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-07-19 09:27:26,352 - User['falcon'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'users']}
2017-07-19 09:27:26,353 - User['tez'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'users']}
2017-07-19 09:27:26,354 - User['mahout'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-07-19 09:27:26,354 - User['spark'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-07-19 09:27:26,355 - User['ambari-qa'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'users']}
2017-07-19 09:27:26,355 - User['flume'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-07-19 09:27:26,356 - User['hdfs'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-07-19 09:27:26,357 - User['sqoop'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-07-19 09:27:26,357 - User['yarn'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-07-19 09:27:26,358 - User['mapred'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-07-19 09:27:26,358 - User['hcat'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-07-19 09:27:26,359 - File['/var/lib/ambari-agent/tmp/changeUid.sh'] {'content': StaticFile('changeToSecureUid.sh'), 'mode': 0555}
2017-07-19 09:27:26,361 - Execute['/var/lib/ambari-agent/tmp/changeUid.sh ambari-qa /tmp/hadoop-ambari-qa,/tmp/hsperfdata_ambari-qa,/home/ambari-qa,/tmp/ambari-qa,/tmp/sqoop-ambari-qa'] {'not_if': '(test $(id -u ambari-qa) -gt 1000) || (false)'}
2017-07-19 09:27:26,367 - Skipping Execute['/var/lib/ambari-agent/tmp/changeUid.sh ambari-qa /tmp/hadoop-ambari-qa,/tmp/hsperfdata_ambari-qa,/home/ambari-qa,/tmp/ambari-qa,/tmp/sqoop-ambari-qa'] due to not_if
2017-07-19 09:27:26,368 - Group['hdfs'] {}
2017-07-19 09:27:26,368 - User['hdfs'] {'fetch_nonlocal_groups': True, 'groups': [u'hadoop', u'hdfs']}
2017-07-19 09:27:26,369 - FS Type:
2017-07-19 09:27:26,369 - Directory['/etc/hadoop'] {'mode': 0755}
2017-07-19 09:27:26,381 - File['/usr/hdp/current/hadoop-client/conf/hadoop-env.sh'] {'content': InlineTemplate(...), 'owner': 'hdfs', 'group': 'hadoop'}
2017-07-19 09:27:26,382 - Directory['/var/lib/ambari-agent/tmp/hadoop_java_io_tmpdir'] {'owner': 'hdfs', 'group': 'hadoop', 'mode': 0777}
2017-07-19 09:27:26,394 - Execute[('setenforce', '0')] {'not_if': '(! which getenforce ) || (which getenforce && getenforce | grep -q Disabled)', 'sudo': True, 'only_if': 'test -f /selinux/enforce'}
2017-07-19 09:27:26,404 - Skipping Execute[('setenforce', '0')] due to only_if
2017-07-19 09:27:26,405 - Directory['/var/log/hadoop'] {'owner': 'root', 'mode': 0775, 'group': 'hadoop', 'recursive': True, 'cd_access': 'a'}
2017-07-19 09:27:26,409 - Directory['/var/run/hadoop'] {'owner': 'root', 'group': 'root', 'recursive': True, 'cd_access': 'a'}
2017-07-19 09:27:26,410 - Changing owner for /var/run/hadoop from 1029 to root
2017-07-19 09:27:26,410 - Changing group for /var/run/hadoop from 1019 to root
2017-07-19 09:27:26,410 - Directory['/tmp/hadoop-hdfs'] {'owner': 'hdfs', 'recursive': True, 'cd_access': 'a'}
2017-07-19 09:27:26,417 - File['/usr/hdp/current/hadoop-client/conf/commons-logging.properties'] {'content': Template('commons-logging.properties.j2'), 'owner': 'hdfs'}
2017-07-19 09:27:26,419 - File['/usr/hdp/current/hadoop-client/conf/health_check'] {'content': Template('health_check.j2'), 'owner': 'hdfs'}
2017-07-19 09:27:26,420 - File['/usr/hdp/current/hadoop-client/conf/log4j.properties'] {'content': ..., 'owner': 'hdfs', 'group': 'hadoop', 'mode': 0644}
2017-07-19 09:27:26,433 - File['/usr/hdp/current/hadoop-client/conf/hadoop-metrics2.properties'] {'content': Template('hadoop-metrics2.properties.j2'), 'owner': 'hdfs', 'group': 'hadoop'}
2017-07-19 09:27:26,433 - File['/usr/hdp/current/hadoop-client/conf/task-log4j.properties'] {'content': StaticFile('task-log4j.properties'), 'mode': 0755}
2017-07-19 09:27:26,434 - File['/usr/hdp/current/hadoop-client/conf/configuration.xsl'] {'owner': 'hdfs', 'group': 'hadoop'}
2017-07-19 09:27:26,438 - File['/etc/hadoop/conf/topology_mappings.data'] {'owner': 'hdfs', 'content': Template('topology_mappings.data.j2'), 'only_if': 'test -d /etc/hadoop/conf', 'group': 'hadoop'}
2017-07-19 09:27:26,445 - File['/etc/hadoop/conf/topology_script.py'] {'content': StaticFile('topology_script.py'), 'only_if': 'test -d /etc/hadoop/conf', 'mode': 0755}
2017-07-19 09:27:26,624 - The hadoop conf dir /usr/hdp/current/hadoop-client/conf exists, will call conf-select on it for version 2.4.2.0-258
2017-07-19 09:27:26,624 - Checking if need to create versioned conf dir /etc/hadoop/2.4.2.0-258/0
2017-07-19 09:27:26,625 - call['conf-select create-conf-dir --package hadoop --stack-version 2.4.2.0-258 --conf-version 0'] {'logoutput': False, 'sudo': True, 'quiet': False, 'stderr': -1}
2017-07-19 09:27:26,649 - call returned (1, '/etc/hadoop/2.4.2.0-258/0 exist already', '')
2017-07-19 09:27:26,650 - checked_call['conf-select set-conf-dir --package hadoop --stack-version 2.4.2.0-258 --conf-version 0'] {'logoutput': False, 'sudo': True, 'quiet': False}
2017-07-19 09:27:26,674 - checked_call returned (0, '')
2017-07-19 09:27:26,674 - Ensuring that hadoop has the correct symlink structure
2017-07-19 09:27:26,674 - Using hadoop conf dir: /usr/hdp/current/hadoop-client/conf
2017-07-19 09:27:26,676 - The hadoop conf dir /usr/hdp/current/hadoop-client/conf exists, will call conf-select on it for version 2.4.2.0-258
2017-07-19 09:27:26,676 - Checking if need to create versioned conf dir /etc/hadoop/2.4.2.0-258/0
2017-07-19 09:27:26,677 - call['conf-select create-conf-dir --package hadoop --stack-version 2.4.2.0-258 --conf-version 0'] {'logoutput': False, 'sudo': True, 'quiet': False, 'stderr': -1}
2017-07-19 09:27:26,720 - call returned (1, '/etc/hadoop/2.4.2.0-258/0 exist already', '')
2017-07-19 09:27:26,721 - checked_call['conf-select set-conf-dir --package hadoop --stack-version 2.4.2.0-258 --conf-version 0'] {'logoutput': False, 'sudo': True, 'quiet': False}
2017-07-19 09:27:26,744 - checked_call returned (0, '')
2017-07-19 09:27:26,745 - Ensuring that hadoop has the correct symlink structure
2017-07-19 09:27:26,745 - Using hadoop conf dir: /usr/hdp/current/hadoop-client/conf
2017-07-19 09:27:26,751 - Directory['/etc/security/limits.d'] {'owner': 'root', 'group': 'root', 'recursive': True}
2017-07-19 09:27:26,757 - File['/etc/security/limits.d/hdfs.conf'] {'content': Template('hdfs.conf.j2'), 'owner': 'root', 'group': 'root', 'mode': 0644}
2017-07-19 09:27:26,758 - XmlConfig['hadoop-policy.xml'] {'owner': 'hdfs', 'group': 'hadoop', 'conf_dir': '/usr/hdp/current/hadoop-client/conf', 'configuration_attributes': {}, 'configurations': ...}
2017-07-19 09:27:26,767 - Generating config: /usr/hdp/current/hadoop-client/conf/hadoop-policy.xml
2017-07-19 09:27:26,767 - File['/usr/hdp/current/hadoop-client/conf/hadoop-policy.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': None, 'encoding': 'UTF-8'}
2017-07-19 09:27:26,776 - XmlConfig['ssl-client.xml'] {'owner': 'hdfs', 'group': 'hadoop', 'conf_dir': '/usr/hdp/current/hadoop-client/conf', 'configuration_attributes': {}, 'configurations': ...}
2017-07-19 09:27:26,784 - Generating config: /usr/hdp/current/hadoop-client/conf/ssl-client.xml
2017-07-19 09:27:26,784 - File['/usr/hdp/current/hadoop-client/conf/ssl-client.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': None, 'encoding': 'UTF-8'}
2017-07-19 09:27:26,790 - Directory['/usr/hdp/current/hadoop-client/conf/secure'] {'owner': 'root', 'group': 'hadoop', 'recursive': True, 'cd_access': 'a'}
2017-07-19 09:27:26,791 - XmlConfig['ssl-client.xml'] {'owner': 'hdfs', 'group': 'hadoop', 'conf_dir': '/usr/hdp/current/hadoop-client/conf/secure', 'configuration_attributes': {}, 'configurations': ...}
2017-07-19 09:27:26,798 - Generating config: /usr/hdp/current/hadoop-client/conf/secure/ssl-client.xml
2017-07-19 09:27:26,799 - File['/usr/hdp/current/hadoop-client/conf/secure/ssl-client.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': None, 'encoding': 'UTF-8'}
2017-07-19 09:27:26,805 - XmlConfig['ssl-server.xml'] {'owner': 'hdfs', 'group': 'hadoop', 'conf_dir': '/usr/hdp/current/hadoop-client/conf', 'configuration_attributes': {}, 'configurations': ...}
2017-07-19 09:27:26,812 - Generating config: /usr/hdp/current/hadoop-client/conf/ssl-server.xml
2017-07-19 09:27:26,813 - File['/usr/hdp/current/hadoop-client/conf/ssl-server.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': None, 'encoding': 'UTF-8'}
2017-07-19 09:27:26,820 - XmlConfig['hdfs-site.xml'] {'owner': 'hdfs', 'group': 'hadoop', 'conf_dir': '/usr/hdp/current/hadoop-client/conf', 'configuration_attributes': {}, 'configurations': ...}
2017-07-19 09:27:26,828 - Generating config: /usr/hdp/current/hadoop-client/conf/hdfs-site.xml
2017-07-19 09:27:26,828 - File['/usr/hdp/current/hadoop-client/conf/hdfs-site.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': None, 'encoding': 'UTF-8'}
2017-07-19 09:27:26,872 - XmlConfig['core-site.xml'] {'group': 'hadoop', 'conf_dir': '/usr/hdp/current/hadoop-client/conf', 'mode': 0644, 'configuration_attributes': {}, 'owner': 'hdfs', 'configurations': ...}
2017-07-19 09:27:26,879 - Generating config: /usr/hdp/current/hadoop-client/conf/core-site.xml
2017-07-19 09:27:26,879 - File['/usr/hdp/current/hadoop-client/conf/core-site.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': 0644, 'encoding': 'UTF-8'}
2017-07-19 09:27:26,901 - File['/usr/hdp/current/hadoop-client/conf/slaves'] {'content': Template('slaves.j2'), 'owner': 'hdfs'}
2017-07-19 09:27:26,902 - Directory['/var/lib/hadoop-hdfs'] {'owner': 'hdfs', 'group': 'hadoop', 'mode': 0751, 'recursive': True}
2017-07-19 09:27:26,908 - Host contains mounts: ['/sys', '/proc', '/dev', '/sys/kernel/security', '/dev/shm', '/dev/pts', '/run', '/sys/fs/cgroup', '/sys/fs/cgroup/systemd', '/sys/fs/pstore', '/sys/fs/cgroup/cpu,cpuacct', '/sys/fs/cgroup/memory', '/sys/fs/cgroup/pids', '/sys/fs/cgroup/freezer', '/sys/fs/cgroup/blkio', '/sys/fs/cgroup/hugetlb', '/sys/fs/cgroup/net_cls,net_prio', '/sys/fs/cgroup/devices', '/sys/fs/cgroup/perf_event', '/sys/fs/cgroup/cpuset', '/sys/kernel/config', '/', '/sys/fs/selinux', '/proc/sys/fs/binfmt_misc', '/sys/kernel/debug', '/dev/hugepages', '/dev/mqueue', '/boot', '/hadoop-data', '/run/user/1000', '/run/user/1003', '/proc/sys/fs/binfmt_misc'].
2017-07-19 09:27:26,908 - Mount point for directory /hadoop-data/hadoop/hdfs/data is /hadoop-data
2017-07-19 09:27:26,909 - File['/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist'] {'content': '\n# This file keeps track of the last known mount-point for each DFS data dir.\n# It is safe to delete, since it will get regenerated the next time that the DataNode starts.\n# However, it is not advised to delete this file since Ambari may\n# re-create a DFS data dir that used to be mounted on a drive but is now mounted on the root.\n# Comments begin with a hash (#) symbol\n# data_dir,mount_point\n/hadoop-data/hadoop/hdfs/data,/hadoop-data\n', 'owner': 'hdfs', 'group': 'hadoop', 'mode': 0644}
2017-07-19 09:27:26,910 - Directory['/var/run/hadoop'] {'owner': 'hdfs', 'group': 'hadoop', 'mode': 0755}
2017-07-19 09:27:26,910 - Changing owner for /var/run/hadoop from 0 to hdfs
2017-07-19 09:27:26,910 - Changing group for /var/run/hadoop from 0 to hadoop
2017-07-19 09:27:26,910 - Directory['/var/run/hadoop/hdfs'] {'owner': 'hdfs', 'recursive': True}
2017-07-19 09:27:26,911 - Directory['/var/log/hadoop/hdfs'] {'owner': 'hdfs', 'recursive': True}
2017-07-19 09:27:26,911 - File['/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid'] {'action': ['delete'], 'not_if': 'ambari-sudo.sh -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid'}
2017-07-19 09:27:26,929 - Deleting File['/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid']
2017-07-19 09:27:26,930 - Execute['ambari-sudo.sh su hdfs -l -s /bin/bash -c 'ulimit -c unlimited ; /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf start datanode''] {'environment': {'HADOOP_LIBEXEC_DIR': '/usr/hdp/current/hadoop-client/libexec'}, 'not_if': 'ambari-sudo.sh -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid'}
... View more
Labels:
- Labels:
-
Apache Hadoop
07-17-2017
09:29 AM
@Jay SenSharma I tried enabling the HA mode as a resolution for this issue but no success. Ambari veriso is 2.2.2.0
... View more
07-17-2017
07:48 AM
@Jay SenSharma This is the error that I am getting while trying to start it.
... View more
07-17-2017
07:47 AM
stderr:
Traceback (most recent call last):
File "/var/lib/ambari-agent/cache/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py", line 147, in <module>
ApplicationTimelineServer().execute()
File "/usr/lib/python2.6/site-packages/resource_management/libraries/script/script.py", line 219, in execute
method(env)
File "/var/lib/ambari-agent/cache/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py", line 43, in start
self.configure(env) # FOR SECURITY
File "/var/lib/ambari-agent/cache/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py", line 54, in configure
yarn(name='apptimelineserver')
File "/usr/lib/python2.6/site-packages/ambari_commons/os_family_impl.py", line 89, in thunk
return fn(*args, **kwargs)
File "/var/lib/ambari-agent/cache/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py", line 276, in yarn
mode=0755
File "/usr/lib/python2.6/site-packages/resource_management/core/base.py", line 154, in __init__
self.env.run()
File "/usr/lib/python2.6/site-packages/resource_management/core/environment.py", line 160, in run
self.run_action(resource, action)
File "/usr/lib/python2.6/site-packages/resource_management/core/environment.py", line 124, in run_action
provider_action()
File "/usr/lib/python2.6/site-packages/resource_management/libraries/providers/hdfs_resource.py", line 463, in action_create_on_execute
self.action_delayed("create")
File "/usr/lib/python2.6/site-packages/resource_management/libraries/providers/hdfs_resource.py", line 460, in action_delayed
self.get_hdfs_resource_executor().action_delayed(action_name, self)
File "/usr/lib/python2.6/site-packages/resource_management/libraries/providers/hdfs_resource.py", line 259, in action_delayed
self._set_mode(self.target_status)
File "/usr/lib/python2.6/site-packages/resource_management/libraries/providers/hdfs_resource.py", line 366, in _set_mode
self.util.run_command(self.main_resource.resource.target, 'SETPERMISSION', method='PUT', permission=self.mode, assertable_result=False)
File "/usr/lib/python2.6/site-packages/resource_management/libraries/providers/hdfs_resource.py", line 195, in run_command
raise Fail(err_msg)
resource_management.core.exceptions.Fail: Execution of 'curl -sS -L -w '%{http_code}' -X PUT 'http://colo-hdop-prd01.thepartshouse.com:50070/webhdfs/v1/ats/done?op=SETPERMISSION&user.name=hdfs&permission=755'' returned status_code=403.
{
"RemoteException": {
"exception": "SafeModeException",
"javaClassName": "org.apache.hadoop.hdfs.server.namenode.SafeModeException",
"message": "Cannot set permission for /ats/done. Name node is in safe mode.\nThe reported blocks 47250 needs additional 3 blocks to reach the threshold 1.0000 of total blocks 47252.\nThe number of live datanodes 3 has reached the minimum number 0. Safe mode will be turned off automatically once the thresholds have been reached."
}
}
stdout:
2017-07-17 03:42:28,806 - The hadoop conf dir /usr/hdp/current/hadoop-client/conf exists, will call conf-select on it for version 2.4.2.0-258
2017-07-17 03:42:28,806 - Checking if need to create versioned conf dir /etc/hadoop/2.4.2.0-258/0
2017-07-17 03:42:28,806 - call['conf-select create-conf-dir --package hadoop --stack-version 2.4.2.0-258 --conf-version 0'] {'logoutput': False, 'sudo': True, 'quiet': False, 'stderr': -1}
2017-07-17 03:42:28,829 - call returned (1, '/etc/hadoop/2.4.2.0-258/0 exist already', '')
2017-07-17 03:42:28,830 - checked_call['conf-select set-conf-dir --package hadoop --stack-version 2.4.2.0-258 --conf-version 0'] {'logoutput': False, 'sudo': True, 'quiet': False}
2017-07-17 03:42:28,854 - checked_call returned (0, '')
2017-07-17 03:42:28,854 - Ensuring that hadoop has the correct symlink structure
2017-07-17 03:42:28,854 - Using hadoop conf dir: /usr/hdp/current/hadoop-client/conf
2017-07-17 03:42:28,972 - The hadoop conf dir /usr/hdp/current/hadoop-client/conf exists, will call conf-select on it for version 2.4.2.0-258
2017-07-17 03:42:28,972 - Checking if need to create versioned conf dir /etc/hadoop/2.4.2.0-258/0
2017-07-17 03:42:28,973 - call['conf-select create-conf-dir --package hadoop --stack-version 2.4.2.0-258 --conf-version 0'] {'logoutput': False, 'sudo': True, 'quiet': False, 'stderr': -1}
2017-07-17 03:42:28,995 - call returned (1, '/etc/hadoop/2.4.2.0-258/0 exist already', '')
2017-07-17 03:42:28,995 - checked_call['conf-select set-conf-dir --package hadoop --stack-version 2.4.2.0-258 --conf-version 0'] {'logoutput': False, 'sudo': True, 'quiet': False}
2017-07-17 03:42:29,020 - checked_call returned (0, '')
2017-07-17 03:42:29,020 - Ensuring that hadoop has the correct symlink structure
2017-07-17 03:42:29,020 - Using hadoop conf dir: /usr/hdp/current/hadoop-client/conf
2017-07-17 03:42:29,022 - Group['spark'] {}
2017-07-17 03:42:29,023 - Group['hadoop'] {}
2017-07-17 03:42:29,023 - Group['users'] {}
2017-07-17 03:42:29,023 - User['hive'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-07-17 03:42:29,024 - User['zookeeper'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-07-17 03:42:29,024 - User['oozie'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'users']}
2017-07-17 03:42:29,025 - User['ams'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-07-17 03:42:29,025 - User['falcon'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'users']}
2017-07-17 03:42:29,026 - User['tez'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'users']}
2017-07-17 03:42:29,027 - User['mahout'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-07-17 03:42:29,027 - User['spark'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-07-17 03:42:29,028 - User['ambari-qa'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'users']}
2017-07-17 03:42:29,028 - User['flume'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-07-17 03:42:29,029 - User['hdfs'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-07-17 03:42:29,029 - User['sqoop'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-07-17 03:42:29,030 - User['yarn'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-07-17 03:42:29,030 - User['mapred'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-07-17 03:42:29,031 - User['hcat'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-07-17 03:42:29,032 - File['/var/lib/ambari-agent/tmp/changeUid.sh'] {'content': StaticFile('changeToSecureUid.sh'), 'mode': 0555}
2017-07-17 03:42:29,033 - Execute['/var/lib/ambari-agent/tmp/changeUid.sh ambari-qa /tmp/hadoop-ambari-qa,/tmp/hsperfdata_ambari-qa,/home/ambari-qa,/tmp/ambari-qa,/tmp/sqoop-ambari-qa'] {'not_if': '(test $(id -u ambari-qa) -gt 1000) || (false)'}
2017-07-17 03:42:29,039 - Skipping Execute['/var/lib/ambari-agent/tmp/changeUid.sh ambari-qa /tmp/hadoop-ambari-qa,/tmp/hsperfdata_ambari-qa,/home/ambari-qa,/tmp/ambari-qa,/tmp/sqoop-ambari-qa'] due to not_if
2017-07-17 03:42:29,039 - Group['hdfs'] {}
2017-07-17 03:42:29,040 - User['hdfs'] {'fetch_nonlocal_groups': True, 'groups': [u'hadoop', u'hdfs']}
2017-07-17 03:42:29,040 - FS Type:
2017-07-17 03:42:29,040 - Directory['/etc/hadoop'] {'mode': 0755}
2017-07-17 03:42:29,053 - File['/usr/hdp/current/hadoop-client/conf/hadoop-env.sh'] {'content': InlineTemplate(...), 'owner': 'hdfs', 'group': 'hadoop'}
2017-07-17 03:42:29,053 - Directory['/var/lib/ambari-agent/tmp/hadoop_java_io_tmpdir'] {'owner': 'hdfs', 'group': 'hadoop', 'mode': 0777}
2017-07-17 03:42:29,064 - Execute[('setenforce', '0')] {'not_if': '(! which getenforce ) || (which getenforce && getenforce | grep -q Disabled)', 'sudo': True, 'only_if': 'test -f /selinux/enforce'}
2017-07-17 03:42:29,071 - Skipping Execute[('setenforce', '0')] due to only_if
2017-07-17 03:42:29,072 - Directory['/var/log/hadoop'] {'owner': 'root', 'mode': 0775, 'group': 'hadoop', 'recursive': True, 'cd_access': 'a'}
2017-07-17 03:42:29,074 - Directory['/var/run/hadoop'] {'owner': 'root', 'group': 'root', 'recursive': True, 'cd_access': 'a'}
2017-07-17 03:42:29,074 - Directory['/tmp/hadoop-hdfs'] {'owner': 'hdfs', 'recursive': True, 'cd_access': 'a'}
2017-07-17 03:42:29,078 - File['/usr/hdp/current/hadoop-client/conf/commons-logging.properties'] {'content': Template('commons-logging.properties.j2'), 'owner': 'hdfs'}
2017-07-17 03:42:29,079 - File['/usr/hdp/current/hadoop-client/conf/health_check'] {'content': Template('health_check.j2'), 'owner': 'hdfs'}
2017-07-17 03:42:29,080 - File['/usr/hdp/current/hadoop-client/conf/log4j.properties'] {'content': ..., 'owner': 'hdfs', 'group': 'hadoop', 'mode': 0644}
2017-07-17 03:42:29,091 - File['/usr/hdp/current/hadoop-client/conf/hadoop-metrics2.properties'] {'content': Template('hadoop-metrics2.properties.j2'), 'owner': 'hdfs', 'group': 'hadoop'}
2017-07-17 03:42:29,092 - File['/usr/hdp/current/hadoop-client/conf/task-log4j.properties'] {'content': StaticFile('task-log4j.properties'), 'mode': 0755}
2017-07-17 03:42:29,093 - File['/usr/hdp/current/hadoop-client/conf/configuration.xsl'] {'owner': 'hdfs', 'group': 'hadoop'}
2017-07-17 03:42:29,097 - File['/etc/hadoop/conf/topology_mappings.data'] {'owner': 'hdfs', 'content': Template('topology_mappings.data.j2'), 'only_if': 'test -d /etc/hadoop/conf', 'group': 'hadoop'}
2017-07-17 03:42:29,100 - File['/etc/hadoop/conf/topology_script.py'] {'content': StaticFile('topology_script.py'), 'only_if': 'test -d /etc/hadoop/conf', 'mode': 0755}
2017-07-17 03:42:29,271 - The hadoop conf dir /usr/hdp/current/hadoop-client/conf exists, will call conf-select on it for version 2.4.2.0-258
2017-07-17 03:42:29,271 - Checking if need to create versioned conf dir /etc/hadoop/2.4.2.0-258/0
2017-07-17 03:42:29,271 - call['conf-select create-conf-dir --package hadoop --stack-version 2.4.2.0-258 --conf-version 0'] {'logoutput': False, 'sudo': True, 'quiet': False, 'stderr': -1}
2017-07-17 03:42:29,294 - call returned (1, '/etc/hadoop/2.4.2.0-258/0 exist already', '')
2017-07-17 03:42:29,295 - checked_call['conf-select set-conf-dir --package hadoop --stack-version 2.4.2.0-258 --conf-version 0'] {'logoutput': False, 'sudo': True, 'quiet': False}
2017-07-17 03:42:29,317 - checked_call returned (0, '')
2017-07-17 03:42:29,317 - Ensuring that hadoop has the correct symlink structure
2017-07-17 03:42:29,317 - Using hadoop conf dir: /usr/hdp/current/hadoop-client/conf
2017-07-17 03:42:29,343 - The hadoop conf dir /usr/hdp/current/hadoop-client/conf exists, will call conf-select on it for version 2.4.2.0-258
2017-07-17 03:42:29,343 - Checking if need to create versioned conf dir /etc/hadoop/2.4.2.0-258/0
2017-07-17 03:42:29,343 - call['conf-select create-conf-dir --package hadoop --stack-version 2.4.2.0-258 --conf-version 0'] {'logoutput': False, 'sudo': True, 'quiet': False, 'stderr': -1}
2017-07-17 03:42:29,369 - call returned (1, '/etc/hadoop/2.4.2.0-258/0 exist already', '')
2017-07-17 03:42:29,370 - checked_call['conf-select set-conf-dir --package hadoop --stack-version 2.4.2.0-258 --conf-version 0'] {'logoutput': False, 'sudo': True, 'quiet': False}
2017-07-17 03:42:29,392 - checked_call returned (0, '')
2017-07-17 03:42:29,392 - Ensuring that hadoop has the correct symlink structure
2017-07-17 03:42:29,392 - Using hadoop conf dir: /usr/hdp/current/hadoop-client/conf
2017-07-17 03:42:29,400 - Directory['/var/log/hadoop-yarn/nodemanager/recovery-state'] {'owner': 'yarn', 'mode': 0755, 'group': 'hadoop', 'recursive': True, 'cd_access': 'a'}
2017-07-17 03:42:29,402 - Directory['/var/run/hadoop-yarn'] {'owner': 'yarn', 'group': 'hadoop', 'recursive': True, 'cd_access': 'a'}
2017-07-17 03:42:29,402 - Directory['/var/run/hadoop-yarn/yarn'] {'owner': 'yarn', 'group': 'hadoop', 'recursive': True, 'cd_access': 'a'}
2017-07-17 03:42:29,403 - Directory['/var/log/hadoop-yarn/yarn'] {'owner': 'yarn', 'group': 'hadoop', 'recursive': True, 'cd_access': 'a'}
2017-07-17 03:42:29,403 - Directory['/var/run/hadoop-mapreduce'] {'owner': 'mapred', 'group': 'hadoop', 'recursive': True, 'cd_access': 'a'}
2017-07-17 03:42:29,404 - Directory['/var/run/hadoop-mapreduce/mapred'] {'owner': 'mapred', 'group': 'hadoop', 'recursive': True, 'cd_access': 'a'}
2017-07-17 03:42:29,404 - Directory['/var/log/hadoop-mapreduce'] {'owner': 'mapred', 'group': 'hadoop', 'recursive': True, 'cd_access': 'a'}
2017-07-17 03:42:29,404 - Directory['/var/log/hadoop-mapreduce/mapred'] {'owner': 'mapred', 'group': 'hadoop', 'recursive': True, 'cd_access': 'a'}
2017-07-17 03:42:29,405 - Directory['/var/log/hadoop-yarn'] {'owner': 'yarn', 'ignore_failures': True, 'recursive': True, 'cd_access': 'a'}
2017-07-17 03:42:29,405 - XmlConfig['core-site.xml'] {'group': 'hadoop', 'conf_dir': '/usr/hdp/current/hadoop-client/conf', 'mode': 0644, 'configuration_attributes': {}, 'owner': 'hdfs', 'configurations': ...}
2017-07-17 03:42:29,414 - Generating config: /usr/hdp/current/hadoop-client/conf/core-site.xml
2017-07-17 03:42:29,414 - File['/usr/hdp/current/hadoop-client/conf/core-site.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': 0644, 'encoding': 'UTF-8'}
2017-07-17 03:42:29,435 - XmlConfig['hdfs-site.xml'] {'group': 'hadoop', 'conf_dir': '/usr/hdp/current/hadoop-client/conf', 'mode': 0644, 'configuration_attributes': {}, 'owner': 'hdfs', 'configurations': ...}
2017-07-17 03:42:29,442 - Generating config: /usr/hdp/current/hadoop-client/conf/hdfs-site.xml
2017-07-17 03:42:29,442 - File['/usr/hdp/current/hadoop-client/conf/hdfs-site.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': 0644, 'encoding': 'UTF-8'}
2017-07-17 03:42:29,482 - XmlConfig['mapred-site.xml'] {'group': 'hadoop', 'conf_dir': '/usr/hdp/current/hadoop-client/conf', 'mode': 0644, 'configuration_attributes': {}, 'owner': 'yarn', 'configurations': ...}
2017-07-17 03:42:29,490 - Generating config: /usr/hdp/current/hadoop-client/conf/mapred-site.xml
2017-07-17 03:42:29,490 - File['/usr/hdp/current/hadoop-client/conf/mapred-site.xml'] {'owner': 'yarn', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': 0644, 'encoding': 'UTF-8'}
2017-07-17 03:42:29,522 - Changing owner for /usr/hdp/current/hadoop-client/conf/mapred-site.xml from 1032 to yarn
2017-07-17 03:42:29,523 - XmlConfig['yarn-site.xml'] {'group': 'hadoop', 'conf_dir': '/usr/hdp/current/hadoop-client/conf', 'mode': 0644, 'configuration_attributes': {}, 'owner': 'yarn', 'configurations': ...}
2017-07-17 03:42:29,530 - Generating config: /usr/hdp/current/hadoop-client/conf/yarn-site.xml
2017-07-17 03:42:29,530 - File['/usr/hdp/current/hadoop-client/conf/yarn-site.xml'] {'owner': 'yarn', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': 0644, 'encoding': 'UTF-8'}
2017-07-17 03:42:29,608 - XmlConfig['capacity-scheduler.xml'] {'group': 'hadoop', 'conf_dir': '/usr/hdp/current/hadoop-client/conf', 'mode': 0644, 'configuration_attributes': {}, 'owner': 'yarn', 'configurations': ...}
2017-07-17 03:42:29,616 - Generating config: /usr/hdp/current/hadoop-client/conf/capacity-scheduler.xml
2017-07-17 03:42:29,616 - File['/usr/hdp/current/hadoop-client/conf/capacity-scheduler.xml'] {'owner': 'yarn', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': 0644, 'encoding': 'UTF-8'}
2017-07-17 03:42:29,627 - Changing owner for /usr/hdp/current/hadoop-client/conf/capacity-scheduler.xml from 1029 to yarn
2017-07-17 03:42:29,627 - Directory['/hadoop-data/hadoop/yarn/timeline'] {'owner': 'yarn', 'group': 'hadoop', 'recursive': True, 'cd_access': 'a'}
2017-07-17 03:42:29,628 - Directory['/hadoop-data/hadoop/yarn/timeline'] {'owner': 'yarn', 'group': 'hadoop', 'recursive': True, 'cd_access': 'a'}
2017-07-17 03:42:29,628 - HdfsResource['/ats/done'] {'security_enabled': False, 'hadoop_bin_dir': '/usr/hdp/current/hadoop-client/bin', 'keytab': [EMPTY], 'dfs_type': '', 'default_fs': 'hdfs://colo-hdop-prd01.thepartshouse.com:8020', 'hdfs_resource_ignore_file': '/var/lib/ambari-agent/data/.hdfs_resource_ignore', 'hdfs_site': ..., 'kinit_path_local': 'kinit', 'principal_name': [EMPTY], 'user': 'hdfs', 'change_permissions_for_parents': True, 'owner': 'yarn', 'group': 'hadoop', 'hadoop_conf_dir': '/usr/hdp/current/hadoop-client/conf', 'type': 'directory', 'action': ['create_on_execute'], 'immutable_paths': [u'/apps/hive/warehouse', u'/apps/falcon', u'/mr-history/done', u'/app-logs', u'/tmp'], 'mode': 0755}
2017-07-17 03:42:29,630 - call['ambari-sudo.sh su hdfs -l -s /bin/bash -c 'curl -sS -L -w '"'"'%{http_code}'"'"' -X GET '"'"'http://colo-hdop-prd01.thepartshouse.com:50070/webhdfs/v1/ats/done?op=GETFILESTATUS&user.name=hdfs'"'"' 1>/tmp/tmpHuT6LP 2>/tmp/tmpA4uNcf''] {'logoutput': None, 'quiet': False}
2017-07-17 03:42:29,695 - call returned (0, '')
2017-07-17 03:42:29,697 - call['ambari-sudo.sh su hdfs -l -s /bin/bash -c 'curl -sS -L -w '"'"'%{http_code}'"'"' -X PUT '"'"'http://colo-hdop-prd01.thepartshouse.com:50070/webhdfs/v1/ats/done?op=SETPERMISSION&user.name=hdfs&permission=755'"'"' 1>/tmp/tmpulChqQ 2>/tmp/tmpE1LtCD''] {'logoutput': None, 'quiet': False}
2017-07-17 03:42:29,754 - call returned (0, '')
... View more
07-17-2017
07:14 AM
My app timeline server is not starting. It was working fine earlier on but now I am unable to start it from Ambari. Is there any option or way to reinstall the timeline server without losing my data on the server?
... View more
Labels:
- Labels:
-
Apache Hadoop
-
Apache YARN
05-04-2017
11:54 AM
I want to take a backup of my hive database (metadata + data) Please guide me in how to do it. Is it possible to do it using distcp? Also, is it possible to take the backup on a local system?
... View more
Labels:
- Labels:
-
Apache Hive