Member since 
    
	
		
		
		12-18-2018
	
	
	
	
	
	
	
	
	
	
	
	
	
	
			
      
                7
            
            
                Posts
            
        
                0
            
            
                Kudos Received
            
        
                2
            
            
                Solutions
            
        My Accepted Solutions
| Title | Views | Posted | 
|---|---|---|
| 32262 | 01-03-2019 05:46 AM | |
| 5642 | 01-03-2019 05:44 AM | 
			
    
	
		
		
		01-03-2019
	
		
		05:46 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 Stop and remove the docker and rerun the docker run script worked for me. 
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
			
    
	
		
		
		01-03-2019
	
		
		05:44 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 Same error 
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
			
    
	
		
		
		01-03-2019
	
		
		05:44 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 Needed to open a tcp port and not an http port. That resolved the issue.  
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
			
    
	
		
		
		12-20-2018
	
		
		01:36 PM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 The above issue is an intermittent issue. However now I am facing a different issue. I have a docker sandbox set up of hdp. I am trying to do a copyFromLocal (hdfs copy) from host machine. But it gives this error.  amedhi:~ amedhi$ hadoop fs -copyFromLocal trial.txt hdfs://sandbox-hdp.hortonworks.com:8020/tmp/
2018-12-19 14:27:33,103 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
2018-12-19 14:28:34,062 INFO hdfs.DataStreamer: Exception in createBlockOutputStream blk_1073743875_3061
org.apache.hadoop.net.ConnectTimeoutException: 60000 millis timeout while waiting for channel to be ready for connect. ch : java.nio.channels.SocketChannel[connection-pending remote=/172.18.0.2:50010]
                at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:534)
                at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:253)
                at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1725)
                at org.apache.hadoop.hdfs.DataStreamer.nextBlockOutputStream(DataStreamer.java:1679)
                at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:716)
2018-12-19 14:28:34,063 WARN hdfs.DataStreamer: Abandoning BP-1419118625-172.17.0.2-1543512323726:blk_1073743875_3061
2018-12-19 14:28:34,078 WARN hdfs.DataStreamer: Excluding datanode DatanodeInfoWithStorage[172.18.0.2:50010,DS-6c34ba72-0587-4927-88a1-781ba7d444d9,DISK]
2018-12-19 14:28:34,105 WARN hdfs.DataStreamer: DataStreamer Exception
org.apache.hadoop.ipc.RemoteException(java.io.IOException): File /tmp/trial.txt._COPYING_ could only be written to 0 of the 1 minReplication nodes. There are 1 datanode(s) running and 1 node(s) are excluded in this operation.
                at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:2121)
                at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:286)
                at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:2706)
                at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:875)
                at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:561)
                at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
                at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:524)
                at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1025)
                at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:876)
                at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:822)
                at java.security.AccessController.doPrivileged(Native Method)
                at javax.security.auth.Subject.doAs(Subject.java:422)
                at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
                at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2682)
 
                at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1497)
                at org.apache.hadoop.ipc.Client.call(Client.java:1443)
                at org.apache.hadoop.ipc.Client.call(Client.java:1353)
                at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:228)
                at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:116)
                at com.sun.proxy.$Proxy11.addBlock(Unknown Source)
                at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:510)
                at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
                at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
                at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
                at java.lang.reflect.Method.invoke(Method.java:498)
                at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:422)
                at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:165)
                at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:157)
                at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:95)
                at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:359)
                at com.sun.proxy.$Proxy12.addBlock(Unknown Source)
                at org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:1078)
                at org.apache.hadoop.hdfs.DataStreamer.locateFollowingBlock(DataStreamer.java:1865)
                at org.apache.hadoop.hdfs.DataStreamer.nextBlockOutputStream(DataStreamer.java:1668)
                at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:716)
copyFromLocal: File /tmp/trial.txt._COPYING_ could only be written to 0 of the 1 minReplication nodes. There are 1 datanode(s) running and 1 node(s) are excluded in this operation.
  I made the following fixes to solve the above issue.   1. Open http port 50010 on the docker  2. Add the property dfs.client.use.datanode.hostname to true in my host machine hadoop conf and also in ambari conf  Now I am facing the following issue. Please note that hadoop version is same in my host machine and docker  amedhi:hadoop amedhi$ hadoop fs -copyFromLocal hdfs-site.xml hdfs://sandbox-hdp.hortonworks.com:8020/tmp
2018-12-20 09:55:04,203 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
2018-12-20 09:55:05,045 INFO hdfs.DataStreamer: Exception in createBlockOutputStream blk_1073743815_2999
com.google.protobuf.InvalidProtocolBufferException: Protocol message end-group tag did not match expected tag.
	at com.google.protobuf.InvalidProtocolBufferException.invalidEndTag(InvalidProtocolBufferException.java:94)
	at com.google.protobuf.CodedInputStream.checkLastTagWas(CodedInputStream.java:124)
	at com.google.protobuf.AbstractParser.parsePartialFrom(AbstractParser.java:202)
	at com.google.protobuf.AbstractParser.parseFrom(AbstractParser.java:217)
	at com.google.protobuf.AbstractParser.parseFrom(AbstractParser.java:223)
	at com.google.protobuf.AbstractParser.parseFrom(AbstractParser.java:49)
	at org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos$BlockOpResponseProto.parseFrom(DataTransferProtos.java:23592)
	at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1761)
	at org.apache.hadoop.hdfs.DataStreamer.nextBlockOutputStream(DataStreamer.java:1679)
	at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:716)
2018-12-20 09:55:05,048 WARN hdfs.DataStreamer: Abandoning BP-1419118625-172.17.0.2-1543512323726:blk_1073743815_2999
2018-12-20 09:55:05,055 WARN hdfs.DataStreamer: Excluding datanode DatanodeInfoWithStorage[172.18.0.2:50010,DS-6c34ba72-0587-4927-88a1-781ba7d444d9,DISK]
2018-12-20 09:55:05,075 WARN hdfs.DataStreamer: DataStreamer Exception
org.apache.hadoop.ipc.RemoteException(java.io.IOException): File /tmp/hdfs-site.xml._COPYING_ could only be written to 0 of the 1 minReplication nodes. There are 1 datanode(s) running and 1 node(s) are excluded in this operation.
	at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:2121)
	at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:286)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:2706)
	at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:875)
	at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:561)
	at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:524)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1025)
	at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:876)
	at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:822)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2682)
	at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1497)
	at org.apache.hadoop.ipc.Client.call(Client.java:1443)
	at org.apache.hadoop.ipc.Client.call(Client.java:1353)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:228)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:116)
	at com.sun.proxy.$Proxy11.addBlock(Unknown Source)
	at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:510)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:498)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:422)
	at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:165)
	at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:157)
	at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:95)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:359)
	at com.sun.proxy.$Proxy12.addBlock(Unknown Source)
	at org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:1078)
	at org.apache.hadoop.hdfs.DataStreamer.locateFollowingBlock(DataStreamer.java:1865)
	at org.apache.hadoop.hdfs.DataStreamer.nextBlockOutputStream(DataStreamer.java:1668)
	at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:716)
copyFromLocal: File /tmp/hdfs-site.xml._COPYING_ could only be written to 0 of the 1 minReplication nodes. There are 1 datanode(s) running and 1 node(s) are excluded in this operation.
 
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
			
    
	
		
		
		12-20-2018
	
		
		01:36 PM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 I have set up docker sandbox set up of hdp as explained in the docs. The dockers are up and running and ambari is also running. I am trying to do a copyFromLocal (hdfs copy) from host machine. But it gives this error.  amedhi:~ amedhi$ hadoop fs -copyFromLocal trial.txt hdfs://sandbox-hdp.hortonworks.com:8020/tmp/
2018-12-19 14:27:33,103 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
2018-12-19 14:28:34,062 INFO hdfs.DataStreamer: Exception in createBlockOutputStream blk_1073743875_3061
org.apache.hadoop.net.ConnectTimeoutException: 60000 millis timeout while waiting for channel to be ready for connect. ch : java.nio.channels.SocketChannel[connection-pending remote=/172.18.0.2:50010]
                at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:534)
                at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:253)
                at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1725)
                at org.apache.hadoop.hdfs.DataStreamer.nextBlockOutputStream(DataStreamer.java:1679)
                at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:716)
2018-12-19 14:28:34,063 WARN hdfs.DataStreamer: Abandoning BP-1419118625-172.17.0.2-1543512323726:blk_1073743875_3061
2018-12-19 14:28:34,078 WARN hdfs.DataStreamer: Excluding datanode DatanodeInfoWithStorage[172.18.0.2:50010,DS-6c34ba72-0587-4927-88a1-781ba7d444d9,DISK]
2018-12-19 14:28:34,105 WARN hdfs.DataStreamer: DataStreamer Exception
org.apache.hadoop.ipc.RemoteException(java.io.IOException): File /tmp/trial.txt._COPYING_ could only be written to 0 of the 1 minReplication nodes. There are 1 datanode(s) running and 1 node(s) are excluded in this operation.
                at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:2121)
                at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:286)
                at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:2706)
                at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:875)
                at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:561)
                at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
                at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:524)
                at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1025)
                at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:876)
                at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:822)
                at java.security.AccessController.doPrivileged(Native Method)
                at javax.security.auth.Subject.doAs(Subject.java:422)
                at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
                at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2682)
 
                at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1497)
                at org.apache.hadoop.ipc.Client.call(Client.java:1443)
                at org.apache.hadoop.ipc.Client.call(Client.java:1353)
                at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:228)
                at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:116)
                at com.sun.proxy.$Proxy11.addBlock(Unknown Source)
                at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:510)
                at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
                at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
                at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
                at java.lang.reflect.Method.invoke(Method.java:498)
                at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:422)
                at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:165)
                at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:157)
                at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:95)
                at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:359)
                at com.sun.proxy.$Proxy12.addBlock(Unknown Source)
                at org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:1078)
                at org.apache.hadoop.hdfs.DataStreamer.locateFollowingBlock(DataStreamer.java:1865)
                at org.apache.hadoop.hdfs.DataStreamer.nextBlockOutputStream(DataStreamer.java:1668)
                at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:716)
copyFromLocal: File /tmp/trial.txt._COPYING_ could only be written to 0 of the 1 minReplication nodes. There are 1 datanode(s) running and 1 node(s) are excluded in this operation.
  I made the following fixes to solve the above issue.  1. Open http port 50010 on the docker  2. Add the property dfs.client.use.datanode.hostname to true in my host machine hadoop conf and also in ambari conf  Now I am facing the following issue. Please note that hadoop version is same in my host machine and docker  2018-12-20 09:55:05,045 INFO hdfs.DataStreamer: Exception in createBlockOutputStream blk_1073743815_2999
com.google.protobuf.InvalidProtocolBufferException: Protocol message end-group tag did not match expected tag.
	at com.google.protobuf.InvalidProtocolBufferException.invalidEndTag(InvalidProtocolBufferException.java:94)
	at com.google.protobuf.CodedInputStream.checkLastTagWas(CodedInputStream.java:124)
	at com.google.protobuf.AbstractParser.parsePartialFrom(AbstractParser.java:202)
	at com.google.protobuf.AbstractParser.parseFrom(AbstractParser.java:217)
	at com.google.protobuf.AbstractParser.parseFrom(AbstractParser.java:223)
	at com.google.protobuf.AbstractParser.parseFrom(AbstractParser.java:49)
	at org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos$BlockOpResponseProto.parseFrom(DataTransferProtos.java:23592)
	at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1761)
	at org.apache.hadoop.hdfs.DataStreamer.nextBlockOutputStream(DataStreamer.java:1679)
	at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:716)
2018-12-20 09:55:05,048 WARN hdfs.DataStreamer: Abandoning BP-1419118625-172.17.0.2-1543512323726:blk_1073743815_2999
2018-12-20 09:55:05,055 WARN hdfs.DataStreamer: Excluding datanode DatanodeInfoWithStorage[172.18.0.2:50010,DS-6c34ba72-0587-4927-88a1-781ba7d444d9,DISK]
2018-12-20 09:55:05,075 WARN hdfs.DataStreamer: DataStreamer Exception
org.apache.hadoop.ipc.RemoteException(java.io.IOException): File /tmp/hdfs-site.xml._COPYING_ could only be written to 0 of the 1 minReplication nodes. There are 1 datanode(s) running and 1 node(s) are excluded in this operation.
	at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:2121)
	at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:286)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:2706)
	at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:875)
	at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:561)
	at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:524)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1025)
	at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:876)
	at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:822)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2682)
	at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1497)
	at org.apache.hadoop.ipc.Client.call(Client.java:1443)
	at org.apache.hadoop.ipc.Client.call(Client.java:1353)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:228)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:116)
	at com.sun.proxy.$Proxy11.addBlock(Unknown Source)
	at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:510)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:498)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:422)
	at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:165)
	at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:157)
	at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:95)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:359)
	at com.sun.proxy.$Proxy12.addBlock(Unknown Source)
	at org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:1078)
	at org.apache.hadoop.hdfs.DataStreamer.locateFollowingBlock(DataStreamer.java:1865)
	at org.apache.hadoop.hdfs.DataStreamer.nextBlockOutputStream(DataStreamer.java:1668)
	at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:716)
copyFromLocal: File /tmp/hdfs-site.xml._COPYING_ could only be written to 0 of the 1 minReplication nodes. There are 1 datanode(s) running and 1 node(s) are excluded in this operation.
 
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
		
			
				
						
							Labels:
						
						
		
			
	
					
			
		
	
	
	
	
				
		
	
	
- Labels:
- 
						
							
		
			Apache Hadoop
			
    
	
		
		
		12-20-2018
	
		
		04:27 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 No error. It just hangs. Same with ssh 
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
			
    
	
		
		
		12-19-2018
	
		
		06:11 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 
	Docker ps gives the following output. Both sandbox-hdp and sandbox-proxy are running. However ambari is not accessible. Neither am I able to do a ssh root@localhost -p 2222. It just hangs.  amedhi:HDP_3.0.1_docker-deploy-scripts_18120587fc7fb amedhi$ docker ps
CONTAINER ID        IMAGE                           COMMAND                  CREATED              STATUS              PORTS             NAMES
fac36a5d52c4        hortonworks/sandbox-proxy:1.0   "nginx -g 'daemon of…"   About a minute ago   Up 59 seconds       0.0.0.0:1080->1080/tcp, 0.0.0.0:1100->1100/tcp, 0.0.0.0:1111->1111/tcp, 0.0.0.0:1988->1988/tcp, 0.0.0.0:2100->2100/tcp, 0.0.0.0:2181-2182->2181-2182/tcp, 0.0.0.0:2201-2202->2201-2202/tcp, 0.0.0.0:2222->2222/tcp, 0.0.0.0:3000->3000/tcp, 0.0.0.0:4040->4040/tcp, 0.0.0.0:4200->4200/tcp, 0.0.0.0:4242->4242/tcp, 0.0.0.0:4557->4557/tcp, 0.0.0.0:5007->5007/tcp, 0.0.0.0:5011->5011/tcp, 0.0.0.0:6001->6001/tcp, 0.0.0.0:6003->6003/tcp, 0.0.0.0:6008->6008/tcp, 0.0.0.0:6080->6080/tcp, 0.0.0.0:618
8->6188/tcp, 0.0.0.0:6627->6627/tcp, 0.0.0.0:6667-6668->6667-6668/tcp, 0.0.0.0:7777->7777/tcp, 0.0.0.0:7788->7788/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8005->8005/tcp, 0.0.0.0:8020->8020/tcp, 0.0.0.0:8032->8032/tcp, 0.0.0.0:8040->8040/tcp, 0.0.0.0:8042->8042/tcp, 0.0.0.0:8080-8082->8080-8082/tcp, 0.0.0.0:8086->8086/tcp, 0.0.0.0:8088->8088/tcp, 0.0.0.0:8090-8091->8090-8091/tcp, 0.0.0.0:8188->8188/tcp, 0.0.0.0:8198->8198/tcp, 0.0.0.0:8443->8443/tcp, 0.0.0.0:8585->8585/tcp, 0.0.0.0:8744->8744/tcp, 0.0.0.0:8765->8765/tcp, 0.0.0.0:8886->8886/tcp, 0.0.0.0:8888-8889->8888-8889/tcp, 0.0.0.0:8983->8983/tcp, 0.0.0.0:8993->8993/tcp, 0.0.0.0:9000->9000/tcp, 0.0.0.0:9088-9091->9088-9091/tcp, 0.0.0.0:9995-9996->9995-9996/tcp, 0.0.0.0:10000-10002->10000-10002/tcp, 0.0.0.0:10015-10016->10015-10016/tcp, 0.0.0.0:10500->10500/tcp, 0.0.0.0:10502->10502/tcp, 0.0.0.0:11000->11000/tcp, 0.0.0.0:12049->12049/tcp, 0.0.0.0:12200->12200/tcp, 0.0.0.0:15000->15000/tcp, 0.0.0.0:15002->15002/tcp, 0.0.0.0:15500->15500/tcp, 0.0.0.0:16000->16000/tcp, 0.0.0.0:16010->16010/tcp, 0.0.0.0:16020->16020/tcp, 0.0.0.0:16030->16030/tcp, 0.0.0.0:18080-18081->18080-18081/tcp, 0.0.0.0:19888->19888/tcp, 0.0.0.0:21000->21000/tcp, 0.0.0.0:30800->30800/tcp, 0.0.0.0:33553->33553/tcp, 0.0.0.0:39419->39419/tcp, 0.0.0.0:42111->42111/tcp, 0.0.0.0:50070->50070/tcp, 0.0.0.0:50075->50075/tcp, 0.0.0.0:50079->50079/tcp, 0.0.0.0:50095->50095/tcp, 0.0.0.0:50111->50111/tcp, 0.0.0.0:60000->60000/tcp, 0.0.0.0:60080->60080/tcp, 0.0.0.0:61080->61080/tcp, 80/tcp, 0.0.0.0:61888->61888/tcp   sandbox-proxy
5d6bbab1d61c        hortonworks/sandbox-hdp:3.0.1   "/usr/sbin/init"         About a minute ago   Up About a minute   22/tcp, 4200/tcp, 8080/tcp       sandbox-hdp   
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
		
			
				
						
							Labels:
						
						
		
	
					
			
		
	
	
	
	
				
		
	
	
 
        

