Member since 
    
	
		
		
		01-03-2018
	
	
	
	
	
	
	
	
	
	
	
	
	
	
			
      
                11
            
            
                Posts
            
        
                0
            
            
                Kudos Received
            
        
                0
            
            
                Solutions
            
        
			
    
	
		
		
		04-18-2018
	
		
		04:10 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 Hi @Harald Berghoff Thanks for the information.  
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
			
    
	
		
		
		12-08-2017
	
		
		01:14 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 Try these configs.  hdfs-site.xml   <configuration>    <property>      <name>dfs.replication</name>  
   <value>1</value>    </property>    <property>      <name>dfs.namenode.name.dir</name>  
   -- <value>file:/home/sameer/mydata/hdfs/namenode</value>     -> Wrong      <value>/data/hdfs/namenode</value>  // Do not use "/home" directory path for hdfs service. That because /home/sameer directory permission is 700, and it'll cause permission issues.
                                       // And normally many services which are deploying in Linux or Ubuntu are using local file option with "file:///SOME_PATH"    </property>  
 <property>      <name>dfs.datanode.data.dir</name>  
   -- <value>file:/home/sameer/mydata/hdfs/datanode</value>     -> Wrong      <value>/data/hdfs/datanode</value>    </property>   </configuration>   core-site.xml   <configuration>      <property>         <name>fs.default.name </name>  
      -- <value> hdfs://localhost:9000 </value>     -> Wrong  
      <value> hdfs://NAMENODE_SERVER_FQDN:9000 </value>    // fs.default.name is deprecated since that hadoop v2 as I know. 
                                                           // And I'm not recommend using localhost value for hostname. Just use FQDN even if it's a single node.  </property>   </configuration>   yarn-site.xml   <configuration>   <!-- Site specific YARN configuration properties -->      <property>         <name>yarn.nodemanager.aux-services</name>         <value>mapreduce_shuffle</value>   
   </property>      <property>  
      <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>  
      <value>org.apache.hadoop.mapred.ShuffleHandler</value>      </property>  
   <property>     <-  Add         <name>yarn.web-proxy.address</name>         <value>YARN_SERVER_FQDN:8089</value>  // Installed resourcemanager's fqdn in server.      </property>      <property>     <-  Add
      <name>yarn.resourcemanager.address</name>         <value>YARN_SERVER_FQDN:8032</value>  // Installed resourcemanager's fqdn in server.  
   </property>     
</configuration>   mapred-site.xml   <configuration>      <property>   
      <name>mapreduce.framework.name</name>         <value>yarn</value>      </property>      <property>   
      <name>mapreduce.jobhistory.address</name>     <- Add this config for jobhistoryserver  
      <value>NAMENODE_SERVER_FQDN:10020</value>      </property>      <property>   
      <name>mapreduce.jobhistory.webapp.address</name>     <- Add this config for jobhistoryserver  
      <value>NAMENODE_SERVER_FQDN:19888</value>      </property>      <property>   
      <name>mapreduce.jobhistory.intermediate-done-dir</name>     <- Add this config for jobhistoryserver         <value>/mr-history/tmp</value>   // It is temporarily directory in HDFS for MR Jobs.      </property>      <property>   
      <name>mapreduce.jobhistory.done-dir</name>     <- Add this config for jobhistoryserver         <value>/mr-history/done</value>   // It is finished MR Jobs directory in HDFS.      </property>   </configuration> 
						
					
					... View more