Member since
08-08-2017
11
Posts
0
Kudos Received
0
Solutions
09-07-2017
03:19 PM
Hi comunity, i need to run the HBaseBulkLoadbut i have in exception main "Mkdirs failed to create" this is my code : import java.io.IOException;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.Path;import org.apache.hadoop.hbase.HBaseConfiguration;import org.apache.hadoop.hbase.TableName;import org.apache.hadoop.hbase.client.Connection;import org.apache.hadoop.hbase.client.ConnectionFactory;import org.apache.hadoop.hbase.client.Put;import org.apache.hadoop.hbase.client.RegionLocator;import org.apache.hadoop.hbase.client.Table;import org.apache.hadoop.hbase.io.ImmutableBytesWritable;import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;import org.apache.hadoop.util.GenericOptionsParser;import org.apache.hadoop.hbase.util.Bytes;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Mapper;public class HBaseBulkLoad { public static class BulkLoadMap extends Mapper<LongWritable, Text, ImmutableBytesWritable, Put> { @Override public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { String line = value.toString(); String[] parts = line.split(","); String rowKey = parts[0]; ImmutableBytesWritable HKey = new ImmutableBytesWritable(Bytes.toBytes(rowKey)); Put HPut = new Put(Bytes.toBytes(rowKey)); HPut.addColumn(Bytes.toBytes("id"), Bytes.toBytes("name"), Bytes.toBytes(parts[1])); HPut.addColumn(Bytes.toBytes("id"), Bytes.toBytes("mail_id"), Bytes.toBytes(parts[2])); HPut.addColumn(Bytes.toBytes("id"), Bytes.toBytes("sal"), Bytes.toBytes(parts[3])); context.write(HKey, HPut); } } public static void main(String[] args) throws Exception { Configuration conf = HBaseConfiguration.create(); String inputPath = args[0]; // Define and set the host and the port conf.set("hbase.master", "tcb-inspiron-5559:16000"); // Set the configuration: force the configuration conf.set("zookeeper.znode.parent", "/hbase-unsecure"); // create a connection using createConnection() Connection connection = ConnectionFactory.createConnection(conf); Configuration config = new Configuration(); //configure hdfs config.set("fs.defaultFS", "hdfs://tcb-inspiron-5559:8020"); // hdfs permissions config.set("dfs.permissions.enabled", "true"); String[] files = new GenericOptionsParser(config, args).getRemainingArgs(); Table table = connection.getTable(TableName.valueOf("hbaseexample")); conf.set("hbase.mapred.outputtable", "hbaseexample"); Job job = Job.getInstance(conf, "HBASE_BULK_LOAD"); job.setMapOutputKeyClass(ImmutableBytesWritable.class); job.setMapOutputValueClass(Put.class); job.setSpeculativeExecution(false); job.setReduceSpeculativeExecution(false); job.setInputFormatClass(TextInputFormat.class); job.setOutputFormatClass(HFileOutputFormat2.class); job.setJarByClass(HBaseBulkLoad.class); job.setMapperClass(HBaseBulkLoad.BulkLoadMap.class); FileInputFormat.setInputPaths(job, inputPath); TextOutputFormat.setOutputPath(job, new Path(args[1])); RegionLocator regionLocator = connection.getRegionLocator(TableName.valueOf("hbaseexample")); try { HFileOutputFormat2.configureIncrementalLoad(job, table, regionLocator); System.exit(job.waitForCompletion(true) ? 0 : 1); } finally { table.close(); connection.close(); } }} and this is the exception error: can help me please and thanks Exception in thread "main" java.io.IOException: Mkdirs failed to create /user/tcb/hbase-staging (exists=false, cwd=file:/home/tcb/Documents/workspace/HbaseExampleOne) at org.apache.hadoop.fs.ChecksumFileSystem.create(ChecksumFileSystem.java:455) at org.apache.hadoop.fs.ChecksumFileSystem.create(ChecksumFileSystem.java:440) at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:911) at org.apache.hadoop.io.SequenceFile$Writer.<init>(SequenceFile.java:1135) at org.apache.hadoop.io.SequenceFile$RecordCompressWriter.<init>(SequenceFile.java:1441) at org.apache.hadoop.io.SequenceFile.createWriter(SequenceFile.java:275) at org.apache.hadoop.io.SequenceFile.createWriter(SequenceFile.java:297) at org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2.writePartitions(HFileOutputFormat2.java:335) at org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2.configurePartitioner(HFileOutputFormat2.java:593) at org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2.configureIncrementalLoad(HFileOutputFormat2.java:440) at org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2.configureIncrementalLoad(HFileOutputFormat2.java:405) at org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2.configureIncrementalLoad(HFileOutputFormat2.java:386) at com.hbase.example.HBaseBulkLoad.main(HBaseBulkLoad.java:77)
... View more
Labels:
- Labels:
-
Apache Hadoop
-
Apache HBase
08-10-2017
02:42 PM
yees the problem in the directory, i return in customize services and i change the directory thank you
... View more
08-10-2017
01:43 PM
hi yes ambari-server and ambari-agent running as root user , mask is:0022
@Jay SenSharma
... View more
08-10-2017
01:30 PM
Hi all, I have installed all my required services of hadoop using ambari
and they are vissible on ambari UI. But i am not able to start any
services. I am not able to identify why this error is coming
and what should be the resolution for this. I would be glad if anyone can help me with the resolution of this error. stderr: /var/lib/ambari-agent/data/errors-992.txt
Traceback (most recent call last):
File "/var/lib/ambari-agent/cache/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py", line 367, in <module>
NameNode().execute()
File "/usr/lib/python2.6/site-packages/resource_management/libraries/script/script.py", line 329, in execute
method(env)
File "/var/lib/ambari-agent/cache/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py", line 85, in install
self.configure(env)
File "/usr/lib/python2.6/site-packages/resource_management/libraries/script/script.py", line 119, in locking_configure
original_configure(obj, *args, **kw)
File "/var/lib/ambari-agent/cache/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py", line 92, in configure
namenode(action="configure", hdfs_binary=hdfs_binary, env=env)
File "/usr/lib/python2.6/site-packages/ambari_commons/os_family_impl.py", line 89, in thunk
return fn(*args, **kwargs)
File "/var/lib/ambari-agent/cache/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py", line 98, in namenode
create_name_dirs(params.dfs_name_dir)
File "/var/lib/ambari-agent/cache/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py", line 282, in create_name_dirs
cd_access="a",
File "/usr/lib/python2.6/site-packages/resource_management/core/base.py", line 155, in __init__
self.env.run()
File "/usr/lib/python2.6/site-packages/resource_management/core/environment.py", line 160, in run
self.run_action(resource, action)
File "/usr/lib/python2.6/site-packages/resource_management/core/environment.py", line 124, in run_action
provider_action()
File "/usr/lib/python2.6/site-packages/resource_management/core/providers/system.py", line 199, in action_create
recursion_follow_links=self.resource.recursion_follow_links, safemode_folders=self.resource.safemode_folders)
File "/usr/lib/python2.6/site-packages/resource_management/core/providers/system.py", line 75, in _ensure_metadata
sudo.chown(path, user_entity, group_entity)
File "/usr/lib/python2.6/site-packages/resource_management/core/sudo.py", line 40, in chown
return os.chown(path, uid, gid)
OSError: [Errno 1] Operation not permitted: '/boot/efi/hadoop/hdfs/namenode' stdout: /var/lib/ambari-agent/data/output-992.txt
2017-08-10 14:21:53,602 - Stack Feature Version Info: stack_version=2.6, version=None, current_cluster_version=None -> 2.6
2017-08-10 14:21:53,608 - Using hadoop conf dir: /usr/hdp/current/hadoop-client/conf
User Group mapping (user_group) is missing in the hostLevelParams
2017-08-10 14:21:53,609 - Group['hadoop'] {}
2017-08-10 14:21:53,610 - Group['users'] {}
2017-08-10 14:21:53,610 - User['zookeeper'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-08-10 14:21:53,611 - User['ams'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-08-10 14:21:53,611 - User['ambari-qa'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'users']}
2017-08-10 14:21:53,612 - User['hdfs'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-08-10 14:21:53,612 - User['yarn'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-08-10 14:21:53,613 - User['mapred'] {'gid': 'hadoop', 'fetch_nonlocal_groups': True, 'groups': [u'hadoop']}
2017-08-10 14:21:53,613 - File['/var/lib/ambari-agent/tmp/changeUid.sh'] {'content': StaticFile('changeToSecureUid.sh'), 'mode': 0555}
2017-08-10 14:21:53,614 - Execute['/var/lib/ambari-agent/tmp/changeUid.sh ambari-qa /tmp/hadoop-ambari-qa,/tmp/hsperfdata_ambari-qa,/home/ambari-qa,/tmp/ambari-qa,/tmp/sqoop-ambari-qa'] {'not_if': '(test $(id -u ambari-qa) -gt 1000) || (false)'}
2017-08-10 14:21:53,619 - Skipping Execute['/var/lib/ambari-agent/tmp/changeUid.sh ambari-qa /tmp/hadoop-ambari-qa,/tmp/hsperfdata_ambari-qa,/home/ambari-qa,/tmp/ambari-qa,/tmp/sqoop-ambari-qa'] due to not_if
2017-08-10 14:21:53,619 - Group['hdfs'] {}
2017-08-10 14:21:53,620 - User['hdfs'] {'fetch_nonlocal_groups': True, 'groups': [u'hadoop', u'hdfs']}
2017-08-10 14:21:53,620 - FS Type:
2017-08-10 14:21:53,620 - Directory['/etc/hadoop'] {'mode': 0755}
2017-08-10 14:21:53,631 - File['/usr/hdp/current/hadoop-client/conf/hadoop-env.sh'] {'content': InlineTemplate(...), 'owner': 'hdfs', 'group': 'hadoop'}
2017-08-10 14:21:53,632 - Directory['/var/lib/ambari-agent/tmp/hadoop_java_io_tmpdir'] {'owner': 'hdfs', 'group': 'hadoop', 'mode': 01777}
2017-08-10 14:21:53,646 - Initializing 2 repositories
2017-08-10 14:21:53,646 - Repository['HDP-2.6'] {'base_url': 'http://public-repo-1.hortonworks.com/HDP/ubuntu16/2.x/updates/2.6.1.0', 'action': ['create'], 'components': [u'HDP', 'main'], 'repo_template': '{{package_type}} {{base_url}} {{components}}', 'repo_file_name': 'HDP', 'mirror_list': None}
2017-08-10 14:21:53,653 - File['/tmp/tmpJF7raC'] {'content': 'deb http://public-repo-1.hortonworks.com/HDP/ubuntu16/2.x/updates/2.6.1.0 HDP main'}
2017-08-10 14:21:53,654 - Writing File['/tmp/tmpJF7raC'] because contents don't match
2017-08-10 14:21:53,654 - File['/tmp/tmpYDxCae'] {'content': StaticFile('/etc/apt/sources.list.d/HDP.list')}
2017-08-10 14:21:53,654 - Writing File['/tmp/tmpYDxCae'] because contents don't match
2017-08-10 14:21:53,654 - Repository['HDP-UTILS-1.1.0.21'] {'base_url': 'http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/ubuntu16', 'action': ['create'], 'components': [u'HDP-UTILS', 'main'], 'repo_template': '{{package_type}} {{base_url}} {{components}}', 'repo_file_name': 'HDP-UTILS', 'mirror_list': None}
2017-08-10 14:21:53,656 - File['/tmp/tmpetOFxF'] {'content': 'deb http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/ubuntu16 HDP-UTILS main'}
2017-08-10 14:21:53,656 - Writing File['/tmp/tmpetOFxF'] because contents don't match
2017-08-10 14:21:53,656 - File['/tmp/tmphnjpHy'] {'content': StaticFile('/etc/apt/sources.list.d/HDP-UTILS.list')}
2017-08-10 14:21:53,656 - Writing File['/tmp/tmphnjpHy'] because contents don't match
2017-08-10 14:21:53,657 - Package['unzip'] {'retry_on_repo_unavailability': False, 'retry_count': 5}
2017-08-10 14:21:53,688 - Skipping installation of existing package unzip
2017-08-10 14:21:53,689 - Package['curl'] {'retry_on_repo_unavailability': False, 'retry_count': 5}
2017-08-10 14:21:53,721 - Skipping installation of existing package curl
2017-08-10 14:21:53,722 - Package['hdp-select'] {'retry_on_repo_unavailability': False, 'retry_count': 5}
2017-08-10 14:21:53,756 - Skipping installation of existing package hdp-select
2017-08-10 14:21:53,892 - Using hadoop conf dir: /usr/hdp/current/hadoop-client/conf
2017-08-10 14:21:53,892 - Stack Feature Version Info: stack_version=2.6, version=None, current_cluster_version=None -> 2.6
2017-08-10 14:21:53,911 - Using hadoop conf dir: /usr/hdp/current/hadoop-client/conf
2017-08-10 14:21:53,923 - checked_call['dpkg -s hdp-select | grep Version | awk '{print $2}''] {'stderr': -1}
2017-08-10 14:21:53,955 - checked_call returned (0, '2.6.1.0-129', '')
2017-08-10 14:21:53,960 - Package['hadoop-2-6-1-0-129-client'] {'retry_on_repo_unavailability': False, 'retry_count': 5}
2017-08-10 14:21:53,995 - Skipping installation of existing package hadoop-2-6-1-0-129-client
2017-08-10 14:21:53,996 - Package['hadoop-2-6-1-0-129-hdfs-datanode'] {'retry_on_repo_unavailability': False, 'retry_count': 5}
2017-08-10 14:21:54,030 - Skipping installation of existing package hadoop-2-6-1-0-129-hdfs-datanode
2017-08-10 14:21:54,031 - Package['hadoop-2-6-1-0-129-hdfs-journalnode'] {'retry_on_repo_unavailability': False, 'retry_count': 5}
2017-08-10 14:21:54,063 - Skipping installation of existing package hadoop-2-6-1-0-129-hdfs-journalnode
2017-08-10 14:21:54,064 - Package['hadoop-2-6-1-0-129-hdfs-namenode'] {'retry_on_repo_unavailability': False, 'retry_count': 5}
2017-08-10 14:21:54,098 - Skipping installation of existing package hadoop-2-6-1-0-129-hdfs-namenode
2017-08-10 14:21:54,099 - Package['hadoop-2-6-1-0-129-hdfs-secondarynamenode'] {'retry_on_repo_unavailability': False, 'retry_count': 5}
2017-08-10 14:21:54,134 - Skipping installation of existing package hadoop-2-6-1-0-129-hdfs-secondarynamenode
2017-08-10 14:21:54,135 - Package['hadoop-2-6-1-0-129-hdfs-zkfc'] {'retry_on_repo_unavailability': False, 'retry_count': 5}
2017-08-10 14:21:54,167 - Skipping installation of existing package hadoop-2-6-1-0-129-hdfs-zkfc
2017-08-10 14:21:54,167 - Package['libsnappy1'] {'retry_on_repo_unavailability': False, 'retry_count': 5}
2017-08-10 14:21:54,202 - Installing package libsnappy1 ('/usr/bin/apt-get -q -o Dpkg::Options::=--force-confdef --allow-unauthenticated --assume-yes install libsnappy1')
2017-08-10 14:22:00,518 - Package['libsnappy-dev'] {'retry_on_repo_unavailability': False, 'retry_count': 5}
2017-08-10 14:22:00,553 - Installing package libsnappy-dev ('/usr/bin/apt-get -q -o Dpkg::Options::=--force-confdef --allow-unauthenticated --assume-yes install libsnappy-dev')
2017-08-10 14:22:09,936 - Package['libhdfs0-2-6-1-0-129'] {'retry_on_repo_unavailability': False, 'retry_count': 5}
2017-08-10 14:22:09,998 - Skipping installation of existing package libhdfs0-2-6-1-0-129
2017-08-10 14:22:10,000 - Directory['/etc/security/limits.d'] {'owner': 'root', 'create_parents': True, 'group': 'root'}
2017-08-10 14:22:10,007 - File['/etc/security/limits.d/hdfs.conf'] {'content': Template('hdfs.conf.j2'), 'owner': 'root', 'group': 'root', 'mode': 0644}
2017-08-10 14:22:10,008 - XmlConfig['hadoop-policy.xml'] {'owner': 'hdfs', 'group': 'hadoop', 'conf_dir': '/usr/hdp/current/hadoop-client/conf', 'configuration_attributes': {}, 'configurations': ...}
2017-08-10 14:22:10,019 - Generating config: /usr/hdp/current/hadoop-client/conf/hadoop-policy.xml
2017-08-10 14:22:10,019 - File['/usr/hdp/current/hadoop-client/conf/hadoop-policy.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': None, 'encoding': 'UTF-8'}
2017-08-10 14:22:10,026 - XmlConfig['ssl-client.xml'] {'owner': 'hdfs', 'group': 'hadoop', 'conf_dir': '/usr/hdp/current/hadoop-client/conf', 'configuration_attributes': {}, 'configurations': ...}
2017-08-10 14:22:10,035 - Generating config: /usr/hdp/current/hadoop-client/conf/ssl-client.xml
2017-08-10 14:22:10,035 - File['/usr/hdp/current/hadoop-client/conf/ssl-client.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': None, 'encoding': 'UTF-8'}
2017-08-10 14:22:10,041 - Directory['/usr/hdp/current/hadoop-client/conf/secure'] {'owner': 'root', 'create_parents': True, 'group': 'hadoop', 'cd_access': 'a'}
2017-08-10 14:22:10,041 - XmlConfig['ssl-client.xml'] {'owner': 'hdfs', 'group': 'hadoop', 'conf_dir': '/usr/hdp/current/hadoop-client/conf/secure', 'configuration_attributes': {}, 'configurations': ...}
2017-08-10 14:22:10,049 - Generating config: /usr/hdp/current/hadoop-client/conf/secure/ssl-client.xml
2017-08-10 14:22:10,050 - File['/usr/hdp/current/hadoop-client/conf/secure/ssl-client.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': None, 'encoding': 'UTF-8'}
2017-08-10 14:22:10,055 - XmlConfig['ssl-server.xml'] {'owner': 'hdfs', 'group': 'hadoop', 'conf_dir': '/usr/hdp/current/hadoop-client/conf', 'configuration_attributes': {}, 'configurations': ...}
2017-08-10 14:22:10,061 - Generating config: /usr/hdp/current/hadoop-client/conf/ssl-server.xml
2017-08-10 14:22:10,061 - File['/usr/hdp/current/hadoop-client/conf/ssl-server.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': None, 'encoding': 'UTF-8'}
2017-08-10 14:22:10,070 - XmlConfig['hdfs-site.xml'] {'owner': 'hdfs', 'group': 'hadoop', 'conf_dir': '/usr/hdp/current/hadoop-client/conf', 'configuration_attributes': {u'final': {u'dfs.support.append': u'true', u'dfs.datanode.data.dir': u'true', u'dfs.namenode.http-address': u'true', u'dfs.namenode.name.dir': u'true', u'dfs.webhdfs.enabled': u'true', u'dfs.datanode.failed.volumes.tolerated': u'true'}}, 'configurations': ...}
2017-08-10 14:22:10,076 - Generating config: /usr/hdp/current/hadoop-client/conf/hdfs-site.xml
2017-08-10 14:22:10,076 - File['/usr/hdp/current/hadoop-client/conf/hdfs-site.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': None, 'encoding': 'UTF-8'}
2017-08-10 14:22:10,126 - XmlConfig['core-site.xml'] {'group': 'hadoop', 'conf_dir': '/usr/hdp/current/hadoop-client/conf', 'mode': 0644, 'configuration_attributes': {u'final': {u'fs.defaultFS': u'true'}}, 'owner': 'hdfs', 'configurations': ...}
2017-08-10 14:22:10,137 - Generating config: /usr/hdp/current/hadoop-client/conf/core-site.xml
2017-08-10 14:22:10,138 - File['/usr/hdp/current/hadoop-client/conf/core-site.xml'] {'owner': 'hdfs', 'content': InlineTemplate(...), 'group': 'hadoop', 'mode': 0644, 'encoding': 'UTF-8'}
2017-08-10 14:22:10,161 - File['/usr/hdp/current/hadoop-client/conf/slaves'] {'content': Template('slaves.j2'), 'owner': 'hdfs'}
2017-08-10 14:22:10,166 - Directory['/hadoop/hdfs/namenode'] {'owner': 'hdfs', 'create_parents': True, 'group': 'hadoop', 'mode': 0755, 'cd_access': 'a'}
2017-08-10 14:22:10,166 - Directory['/boot/efi/hadoop/hdfs/namenode'] {'owner': 'hdfs', 'group': 'hadoop', 'create_parents': True, 'mode': 0755, 'cd_access': 'a'}
2017-08-10 14:22:10,167 - Changing owner for /boot/efi/hadoop/hdfs/namenode from 0 to hdfs
2017-08-10 14:22:10,167 - Changing group for /boot/efi/hadoop/hdfs/namenode from 0 to hadoop
Command failed after 1 tries
... View more
Labels:
- Labels:
-
Apache Ambari
-
Apache Hadoop