Member since 
    
	
		
		
		05-25-2016
	
	
	
	
	
	
	
	
	
	
	
	
	
	
			
      
                31
            
            
                Posts
            
        
                1
            
            
                Kudos Received
            
        
                1
            
            
                Solution
            
        My Accepted Solutions
| Title | Views | Posted | 
|---|---|---|
| 3324 | 09-08-2016 06:38 AM | 
			
    
	
		
		
		09-08-2016
	
		
		06:38 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
	
		1 Kudo
		
	
				
		
	
		
					
							 I solved this problem.This problem was caught by supervisor. You can shutdown the supervisor or reset the supervisor. 
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
			
    
	
		
		
		09-07-2016
	
		
		08:57 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 when i installing the HDP, I have issuse,some body can help me?  Traceback (most recent call last):
  File "/var/lib/ambari-agent/cache/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_collector.py", line 133, in <module>
    AmsCollector().execute()
  File "/usr/lib/python2.6/site-packages/resource_management/libraries/script/script.py", line 219, in execute
    method(env)
  File "/var/lib/ambari-agent/cache/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_collector.py", line 34, in install
    self.install_packages(env)
  File "/usr/lib/python2.6/site-packages/resource_management/libraries/script/script.py", line 404, in install_packages
    Package(name)
  File "/usr/lib/python2.6/site-packages/resource_management/core/base.py", line 154, in __init__
    self.env.run()
  File "/usr/lib/python2.6/site-packages/resource_management/core/environment.py", line 158, in run
    self.run_action(resource, action)
  File "/usr/lib/python2.6/site-packages/resource_management/core/environment.py", line 121, in run_action
    provider_action()
  File "/usr/lib/python2.6/site-packages/resource_management/core/providers/package/__init__.py", line 49, in action_install
    self.install_package(package_name, self.resource.use_repos, self.resource.skip_repos)
  File "/usr/lib/python2.6/site-packages/resource_management/core/providers/package/yumrpm.py", line 49, in install_package
    shell.checked_call(cmd, sudo=True, logoutput=self.get_logoutput())
  File "/usr/lib/python2.6/site-packages/resource_management/core/shell.py", line 70, in inner
    result = function(command, **kwargs)
  File "/usr/lib/python2.6/site-packages/resource_management/core/shell.py", line 92, in checked_call
    tries=tries, try_sleep=try_sleep)
  File "/usr/lib/python2.6/site-packages/resource_management/core/shell.py", line 140, in _call_wrapper
    result = _call(command, **kwargs_copy)
  File "/usr/lib/python2.6/site-packages/resource_management/core/shell.py", line 291, in _call
    raise Fail(err_msg)
resource_management.core.exceptions.Fail: Execution of '/usr/bin/yum -d 0 -e 0 -y install ambari-metrics-collector' returned 1. Error: Nothing to do 
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
		
			
				
						
							Labels:
						
						
		
			
	
					
			
		
	
	
	
	
				
		
	
	
- Labels:
 - 
						
							
		
			Apache Ambari
 
			
    
	
		
		
		09-01-2016
	
		
		07:23 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 both of node of nimbus and node of supervisor was configurated DRPC. 
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
			
    
	
		
		
		09-01-2016
	
		
		07:16 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 when i run the storm topology at localhost like this  public class Client {
 public static void main(String[] args) throws Exception {
  DRPCClient client = new DRPCClient("10.10.12.XX", 3772);
  String[] words = { "hello", "storm", "drpc" };
  for (String word : words) {
   String result = client.execute("exclamation", word);
   System.out.println("Result for \"" + word + "\": " + result);
  }
 }
}
  I got a error       my storm.yaml is  dev.zookeeper.path : '/tmp/dev-storm-zookeeper'
drpc.childopts : '-Xmx768m '
drpc.invocations.port : 3773
drpc.port : 3772
drpc.queue.size : 128
drpc.request.timeout.secs : 600
drpc.worker.threads : 64
drpc_server_host : [hdp-m1]
java.library.path : '/usr/local/lib:/opt/local/lib:/usr/lib:/usr/hdp/current/storm-client/lib'
logviewer.appender.name : 'A1'
logviewer.childopts : '-Xmx128m '
logviewer.port : 8000
metrics.reporter.register : 'org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter'
nimbus.childopts : '-Xmx1024m  -javaagent:/usr/hdp/current/storm-nimbus/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-nimbus/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM'
nimbus.cleanup.inbox.freq.secs : 600
nimbus.file.copy.expiration.secs : 600
nimbus.inbox.jar.expiration.secs : 3600
nimbus.monitor.freq.secs : 120
nimbus.reassign : true
nimbus.seeds : ['hdp-m1']
nimbus.supervisor.timeout.secs : 60
nimbus.task.launch.secs : 120
nimbus.task.timeout.secs : 30
nimbus.thrift.max_buffer_size : 1048576
nimbus.thrift.port : 6627
nimbus.topology.validator : 'backtype.storm.nimbus.DefaultTopologyValidator'
nimbus_hosts : [hdp-m1]
storm.cluster.mode : 'distributed'
storm.local.dir : '/hadoop/storm'
storm.local.mode.zmq : false
storm.log.dir : '/var/log/storm'
storm.messaging.netty.buffer_size : 5242880
storm.messaging.netty.client_worker_threads : 1
storm.messaging.netty.max_retries : 30
storm.messaging.netty.max_wait_ms : 1000
storm.messaging.netty.min_wait_ms : 100
storm.messaging.netty.server_worker_threads : 1
storm.messaging.transport : 'backtype.storm.messaging.netty.Context'
storm.thrift.transport : 'backtype.storm.security.auth.SimpleTransportPlugin'
storm.zookeeper.connection.timeout : 15000
storm.zookeeper.port : 2181
storm.zookeeper.retry.interval : 1000
storm.zookeeper.retry.intervalceiling.millis : 30000
storm.zookeeper.retry.times : 5
storm.zookeeper.root : '/storm'
storm.zookeeper.servers : ['hdp-s2','hdp-m1','hdp-s1','hdp-m2']
storm.zookeeper.session.timeout : 20000
storm_ui_server_host : [hdp-m1]
supervisor.childopts : '-Xmx256m  -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=56431 -javaagent:/usr/hdp/current/storm-supervisor/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-supervisor/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM'
supervisor.heartbeat.frequency.secs : 5
supervisor.monitor.frequency.secs : 3
supervisor.slots.ports : [6700, 6701]
supervisor.worker.start.timeout.secs : 120
supervisor.worker.timeout.secs : 30
supervisor_hosts : [hdp-s3]
task.heartbeat.frequency.secs : 3
task.refresh.poll.secs : 10
topology.acker.executors : null
topology.builtin.metrics.bucket.size.secs : 60
topology.debug : false
topology.disruptor.wait.strategy : 'com.lmax.disruptor.BlockingWaitStrategy'
topology.enable.message.timeouts : true
topology.error.throttle.interval.secs : 10
topology.executor.receive.buffer.size : 1024
topology.executor.send.buffer.size : 1024
topology.fall.back.on.java.serialization : true
topology.kryo.factory : 'backtype.storm.serialization.DefaultKryoFactory'
topology.max.error.report.per.interval : 5
topology.max.replication.wait.time.sec : 60
topology.max.spout.pending : 1000
topology.max.task.parallelism : null
topology.message.timeout.secs : 30
topology.min.replication.count : 1
topology.optimize : true
topology.receiver.buffer.size : 8
topology.skip.missing.kryo.registrations : false
topology.sleep.spout.wait.strategy.time.ms : 1
topology.spout.wait.strategy : 'backtype.storm.spout.SleepSpoutWaitStrategy'
topology.state.synchronization.timeout.secs : 60
topology.stats.sample.rate : 0.05
topology.tick.tuple.freq.secs : null
topology.transfer.buffer.size : 1024
topology.trident.batch.emit.interval.millis : 500
topology.tuple.serializer : 'backtype.storm.serialization.types.ListDelegateSerializer'
topology.worker.childopts : null
topology.worker.shared.thread.pool.size : 4
topology.workers : 1
transactional.zookeeper.port : null
transactional.zookeeper.root : '/transactional'
transactional.zookeeper.servers : null
ui.childopts : '-Xmx768m '
ui.filter : null
ui.port : 8744
worker.childopts : '-Xmx768m  -javaagent:/usr/hdp/current/storm-client/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-client/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM'
worker.heartbeat.frequency.secs : 1
zmq.hwm : 0
zmq.linger.millis : 5000
zmq.threads : 1
  and my topology is  public class BasicDRPCTopology {
 public static class ExclaimBolt extends BaseBasicBolt {
  @Override
  public void execute(Tuple tuple, BasicOutputCollector collector) {
   String input = tuple.getString(1);
   collector.emit(new Values(tuple.getValue(0), input + "!"));
  }
  @Override
  public void declareOutputFields(OutputFieldsDeclarer declarer) {
   declarer.declare(new Fields("id", "result"));
  }
 }
 public static void main(String[] args) throws Exception {
  // Topology
  LinearDRPCTopologyBuilder builder = new LinearDRPCTopologyBuilder("exclamation");
  // spout、bolt
  builder.addBolt(new ExclaimBolt(), 3);
  // Topology
  Config conf = new Config();
  if (args == null || args.length == 0) {
   LocalDRPC drpc = new LocalDRPC();
   // cluster
   LocalCluster cluster = new LocalCluster();
   // submit Topology
   cluster.submitTopology("drpc-demo", conf, builder.createLocalTopology(drpc));
   for (String word : new String[] { "hello", "goodbye" }) {
    System.out.println("Result for \"" + word + "\": " + drpc.execute("exclamation", word));
   }
   // stop storm
   cluster.shutdown();
   drpc.shutdown();
  } else {
   conf.setNumWorkers(3);
   StormSubmitter.submitTopology(args[0], conf, builder.createRemoteTopology());
  }
 }
  my project works fine in storm-0.9.5.  what worng with my project? can some guy help me? 
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
		
			
				
						
							Labels:
						
						
		
			
	
					
			
		
	
	
	
	
				
		
	
	
- Labels:
 - 
						
							
		
			Apache Storm
 
			
    
	
		
		
		08-25-2016
	
		
		02:56 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 When I run the job,I got a error:  2016-08-25 10:34:31,351  WARN ActionStartXCommand:523 - SERVER[hdp-m2] USER[oozie] GROUP[-] TOKEN[] APP[sqoop-wf] JOB[0000002-160825102604568-oozie-root-W] ACTION[0000002-160825102604568-oozie-root-W@sqoop-node] Error starting action [sqoop-node]. ErrorType [FAILED], ErrorCode [EJ001], Message [Could not locate Oozie sharelib]
org.apache.oozie.action.ActionExecutorException: Could not locate Oozie sharelib
	at org.apache.oozie.action.hadoop.JavaActionExecutor.addSystemShareLibForAction(JavaActionExecutor.java:730)
	at org.apache.oozie.action.hadoop.JavaActionExecutor.addAllShareLibs(JavaActionExecutor.java:825)
	at org.apache.oozie.action.hadoop.JavaActionExecutor.setLibFilesArchives(JavaActionExecutor.java:816)
	at org.apache.oozie.action.hadoop.JavaActionExecutor.submitLauncher(JavaActionExecutor.java:1044)
	at org.apache.oozie.action.hadoop.JavaActionExecutor.start(JavaActionExecutor.java:1293)
	at org.apache.oozie.command.wf.ActionStartXCommand.execute(ActionStartXCommand.java:250)
	at org.apache.oozie.command.wf.ActionStartXCommand.execute(ActionStartXCommand.java:64)
	at org.apache.oozie.command.XCommand.call(XCommand.java:286)
	at org.apache.oozie.service.CallableQueueService$CompositeCallable.call(CallableQueueService.java:321)
	at org.apache.oozie.service.CallableQueueService$CompositeCallable.call(CallableQueueService.java:250)
	at org.apache.oozie.service.CallableQueueService$CallableWrapper.run(CallableQueueService.java:175)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
	at java.lang.Thread.run(Thread.java:745)
  then, I check the sharelib  like this  [root@hdp-m2 bin]# oozie admin -oozie http://hdp-m2:11000/oozie -shareliblist
[Available ShareLib]
  so, I create sharelib like this   ./oozie-setup.sh sharelib create -fs http://hdp-m2:8020 -locallib /usr/hdp/2.4.0.0-169/oozie/lib/*
  then, I got the other error  log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.
Error: tar: This does not look like a tar archive
tar: Skipping to next header
tar: Exiting with failure status due to previous errors
Stack trace for the error was (for debug purposes):
--------------------------------------
ExitCodeException exitCode=2: tar: This does not look like a tar archive
tar: Skipping to next header
tar: Exiting with failure status due to previous errors
 at org.apache.hadoop.util.Shell.runCommand(Shell.java:576)
 at org.apache.hadoop.util.Shell.run(Shell.java:487)
 at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:753)
 at org.apache.hadoop.fs.FileUtil.unTarUsingTar(FileUtil.java:675)
 at org.apache.hadoop.fs.FileUtil.unTar(FileUtil.java:651)
 at org.apache.oozie.tools.OozieSharelibCLI.run(OozieSharelibCLI.java:131)
 at org.apache.oozie.tools.OozieSharelibCLI.main(OozieSharelibCLI.java:57)
--------------------------------------   I don't konw why, Can somebody help me please.   Thank you very much! 
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
		
			
				
						
							Labels:
						
						
		
			
	
					
			
		
	
	
	
	
				
		
	
	
- Labels:
 - 
						
							
		
			Apache Oozie
 
			
    
	
		
		
		08-24-2016
	
		
		09:55 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 I want to load my data from mysql to HDFS, There are my files  my workflow.xml  <workflow-app xmlns="uri:oozie:workflow:0.2" name="sqoop-wf">
    <start to="sqoop-node"/>
    <action name="sqoop-node">
        <sqoop xmlns="uri:oozie:sqoop-action:0.2">
            <job-tracker>${jobTracker}</job-tracker>
            <name-node>${nameNode}</name-node>
            <prepare>
                <delete path="${nameNode}/user/${wf:user()}/${examplesRoot}/output-data/sqoop"/>
                <mkdir path="${nameNode}/user/${wf:user()}/${examplesRoot}/output-data"/>
            </prepare>
            <configuration>
                <property>
                    <name>mapred.job.queue.name</name>
                    <value>${queueName}</value>
                </property>
            </configuration>
            <!-- <command>import --connect jdbc:mysql://XXX:3306/ph51_dcp --table ph51dcp_visit_log  --username root --password lida123321 --target-dir /user/${wf:user()}/${examplesRoot}/output-data/sqoop -m 1</command> -->
            <command>import --connect jdbc:mysql://XXX:3306/ph51_dcp --table ph51dcp_visit_log  --username root --password lida123321 --target-dir /user/sqoop-1 -m 1</command>
            <file>db.hsqldb.properties#db.hsqldb.properties</file>
            <file>db.hsqldb.script#db.hsqldb.script</file>
        </sqoop>
        <ok to="end"/>
        <error to="fail"/>
    </action>
    <kill name="fail">
        <message>Sqoop failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
    </kill>
    <end name="end"/>
</workflow-app>
  my job.pro  # limitations under the License.
#
nameNode=hdfs://hdp-m1:8020
jobTracker=hdp-m1:8021
queueName=default
examplesRoot=examples
oozie.use.system.libpath=true
oozie.wf.application.path=${nameNode}/user/${user.name}/${examplesRoot}/apps/sqoop
~                                                                                                                                                            
~                                                                                                                                                            
~                                                                                                                                                            
"job.properties" 26L, 996C
 
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
			
    
	
		
		
		08-24-2016
	
		
		09:50 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 I have virified the hdfs-site.xml like this      and restarted my HDP on Ambari , then i     [root@hdp-m2 testData]# oozie job -oozie http://hdp-m2:11000/oozie -config job.properties -run  Error: E0501 : E0501:Couldnot perform authorization operation,User: root isnot allowed to impersonate root   but it doesn't work.  why??Do you have some other solution?  Thank you very much. 
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
			
    
	
		
		
		08-24-2016
	
		
		09:11 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 [root@hdp-m2 testData]# oozie job -auth SIMPLE -oozie http://hdp-m2:11000/oozie -config job.properties -run
Error: E0501 : E0501: Could not perform authorization operation, User: root is not allowed to impersonate root
  Can somebody help me? 
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
		
			
				
						
							Labels:
						
						
		
			
	
					
			
		
	
	
	
	
				
		
	
	
- Labels:
 - 
						
							
		
			Apache Sqoop