Member since 
    
	
		
		
		04-25-2016
	
	
	
	
	
	
	
	
	
	
	
	
	
	
			
      
                579
            
            
                Posts
            
        
                609
            
            
                Kudos Received
            
        
                111
            
            
                Solutions
            
        My Accepted Solutions
| Title | Views | Posted | 
|---|---|---|
| 2926 | 02-12-2020 03:17 PM | |
| 2136 | 08-10-2017 09:42 AM | |
| 12474 | 07-28-2017 03:57 AM | |
| 3411 | 07-19-2017 02:43 AM | |
| 2522 | 07-13-2017 11:42 AM | 
			
    
	
		
		
		12-21-2016
	
		
		04:17 PM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
	
		2 Kudos
		
	
				
		
	
		
					
							 SYMPTOM:  HiveServer2 remains in hung state, jstack reveals the following trace.  "HiveServer2-Handler-Pool: Thread-139105" prio=10 tid=0x00007ff34e080800 nid=0x3d43e in Object.wait() [0x00007ff30974e000]
   java.lang.Thread.State: WAITING (on object monitor)
	at java.lang.Object.wait(Native Method)
	at java.lang.Object.wait(Object.java:503)
	at org.apache.hadoop.ipc.Client.call(Client.java:1417)
	- locked <0x00000003e1c5f298> (a org.apache.hadoop.ipc.Client$Call)
	at org.apache.hadoop.ipc.Client.call(Client.java:1363)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:229)
	at com.sun.proxy.$Proxy23.checkAccess(Unknown Source)
	at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.checkAccess(ClientNamenodeProtocolTranslatorPB.java:1469)
	at sun.reflect.GeneratedMethodAccessor93.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:256)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:104)
	at com.sun.proxy.$Proxy24.checkAccess(Unknown Source)
	at org.apache.hadoop.hdfs.DFSClient.checkAccess(DFSClient.java:3472)
	at org.apache.hadoop.hdfs.DistributedFileSystem$53.doCall(DistributedFileSystem.java:2270)
	at org.apache.hadoop.hdfs.DistributedFileSystem$53.doCall(DistributedFileSystem.java:2267)
	at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
	at org.apache.hadoop.hdfs.DistributedFileSystem.access(DistributedFileSystem.java:2267)
	at sun.reflect.GeneratedMethodAccessor92.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.hive.shims.Hadoop23Shims.checkFileAccess(Hadoop23Shims.java:1006)
	at org.apache.hadoop.hive.common.FileUtils.checkFileAccessWithImpersonation(FileUtils.java:378)
	at org.apache.hadoop.hive.common.FileUtils.isActionPermittedForFileHierarchy(FileUtils.java:417)
	at org.apache.hadoop.hive.common.FileUtils.isActionPermittedForFileHierarchy(FileUtils.java:431)
	at org.apache.hadoop.hive.common.FileUtils.isActionPermittedForFileHierarchy(FileUtils.java:431)
	at org.apache.hadoop.hive.common.FileUtils.isActionPermittedForFileHierarchy(FileUtils.java:431)
	at org.apache.hadoop.hive.common.FileUtils.isActionPermittedForFileHierarchy(FileUtils.java:431)
	at org.apache.hadoop.hive.common.FileUtils.isActionPermittedForFileHierarchy(FileUtils.java:431)
	at org.apache.ranger.authorization.hive.authorizer.RangerHiveAuthorizer.isURIAccessAllowed(RangerHiveAuthorizer.java:752)
	at org.apache.ranger.authorization.hive.authorizer.RangerHiveAuthorizer.checkPrivileges(RangerHiveAuthorizer.java:252)
	at org.apache.hadoop.hive.ql.Driver.doAuthorizationV2(Driver.java:837)
	at org.apache.hadoop.hive.ql.Driver.doAuthorization(Driver.java:628)
	at org.apache.hadoop.hive.ql.Driver.compile(Driver.java:504)
	at org.apache.hadoop.hive.ql.Driver.compile(Driver.java:316)
	at org.apache.hadoop.hive.ql.Driver.compileInternal(Driver.java:1189)
	- locked <0x0000000433e8e4b8> (a java.lang.Object)
	at org.apache.hadoop.hive.ql.Driver.compileAndRespond(Driver.java:1183)
	at org.apache.hive.service.cli.operation.SQLOperation.prepare(SQLOperation.java:110)
	at org.apache.hive.service.cli.operation.SQLOperation.runInternal(SQLOperation.java:181)
	at org.apache.hive.service.cli.operation.Operation.run(Operation.java:257)
	at org.apache.hive.service.cli.session.HiveSessionImpl.executeStatementInternal(HiveSessionImpl.java:419)
	at org.apache.hive.service.cli.session.HiveSessionImpl.executeStatement(HiveSessionImpl.java:400)
	at sun.reflect.GeneratedMethodAccessor148.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hive.service.cli.session.HiveSessionProxy.invoke(HiveSessionProxy.java:78)
	at org.apache.hive.service.cli.session.HiveSessionProxy.access$000(HiveSessionProxy.java:36)
	at org.apache.hive.service.cli.session.HiveSessionProxy$1.run(HiveSessionProxy.java:63)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:415)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1709)
	at org.apache.hive.service.cli.session.HiveSessionProxy.invoke(HiveSessionProxy.java:59)
	at com.sun.proxy.$Proxy37.executeStatement(Unknown Source)
	at org.apache.hive.service.cli.CLIService.executeStatement(CLIService.java:263)
	at org.apache.hive.service.cli.thrift.ThriftCLIService.ExecuteStatement(ThriftCLIService.java:486)
	at org.apache.hive.service.cli.thrift.TCLIService$Processor$ExecuteStatement.getResult(TCLIService.java:1317)
	at org.apache.hive.service.cli.thrift.TCLIService$Processor$ExecuteStatement.getResult(TCLIService.java:1302)
	at org.apache.thrift.ProcessFunction.process(ProcessFunction.java:39)
	at org.apache.thrift.TBaseProcessor.process(TBaseProcessor.java:39)
	at org.apache.hive.service.auth.TSetIpAddressProcessor.process(TSetIpAddressProcessor.java:56)
	at org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:285)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
	at java.lang.Thread.run(Thread.java:745)
  ROOT CAUSE:  hive.security.authorization.enabled is true and user is running a create external table command with a non-existent directory, so authorizer will check parent directory and all the files/directory inside it recursively.the issue is reported in https://issues.apache.org/jira/browse/HIVE-10022.  WORKAROUND:  Restart hiveserver2 and create a table in a directory which has few file under it.  RESOLUTION:  the fix for this is available as HOTFIX-332, if you are using Ranger based authorization then please get the fix for RANGER-1126. 
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
		
			
				
						
							Labels:
						
						
		
	
					
			
		
	
	
	
	
				
		
	
	
			
    
	
		
		
		12-21-2016
	
		
		02:21 PM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
	
		3 Kudos
		
	
				
		
	
		
					
							  ENV: HDP-2.5  Java : openjdk version "1.8.0_111"  the following storm topology consist of a KafkaSpout and a SinkTypeBolt  Step 1: Create pom.xml with following dependencies  <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
   xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <groupId>hadoop</groupId>
   <artifactId>KafkaSpoutStorm</artifactId>
   <version>0.0.1-SNAPSHOT</version>
   <packaging>jar</packaging>
   <name>stormkafka</name>
   <url>http://maven.apache.org</url>
   <properties>
      <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
   </properties>
   <repositories>
      <repository>
         <id>HDPReleases</id>
         <name>HDP Releases</name>
         <url>http://repo.hortonworks.com/content/repositories/public</url>
         <layout>default</layout>
      </repository>
      <repository>
         <id>HDPJetty</id>
         <name>Hadoop Jetty</name>
         <url>http://repo.hortonworks.com/content/repositories/jetty-hadoop/</url>
         <layout>default</layout>
      </repository>
   </repositories>
   <dependencies>
      <dependency>
         <groupId>junit</groupId>
         <artifactId>junit</artifactId>
         <version>4.11</version>
         <scope>test</scope>
      </dependency>
      <dependency>
         <groupId>org.apache.storm</groupId>
         <artifactId>storm-core</artifactId>
         <version>1.0.1.2.5.3.0-37</version>
         <scope>provided</scope>
         <exclusions>
            <exclusion>
               <groupId>org.slf4j</groupId>
               <artifactId>slf4j-log4j12</artifactId>
            </exclusion>
         </exclusions>
      </dependency>
      <dependency>
         <groupId>org.apache.kafka</groupId>
         <artifactId>kafka_2.10</artifactId>
         <version>0.10.0.2.5.3.0-37</version>
         <exclusions>
            <exclusion>
               <groupId>org.slf4j</groupId>
               <artifactId>slf4j-log4j12</artifactId>
            </exclusion>
         </exclusions>
      </dependency>
      <dependency>
         <groupId>org.apache.storm</groupId>
         <artifactId>storm-kafka</artifactId>
         <version>1.0.1.2.5.3.0-37</version>
      </dependency>
      <dependency>
         <groupId>org.apache.storm</groupId>
         <artifactId>storm-hdfs</artifactId>
         <version>1.0.1.2.5.3.0-37</version>
      </dependency>
      <dependency>
         <groupId>com.googlecode.json-simple</groupId>
         <artifactId>json-simple</artifactId>
         <version>1.1</version>
      </dependency>
   </dependencies>
   <build>
      <plugins>
         <plugin>
            <groupId>org.apache.maven.plugins</groupId>
            <artifactId>maven-shade-plugin</artifactId>
            <version>1.4</version>
            <configuration>
               <createDependencyReducedPom>true</createDependencyReducedPom>
            </configuration>
            <executions>
               <execution>
                  <phase>package</phase>
                  <goals>
                     <goal>shade</goal>
                  </goals>
                  <configuration>
                     <transformers>
                        <transformer
                           implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer" />
                        <transformer
                           implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
                           <mainClass>com.rajkrrsingh.storm.Topology</mainClass>
                        </transformer>
                     </transformers>
                     <filters>
                        <filter>
                           <artifact>*:*</artifact>
                           <excludes>
                              <exclude>META-INF/*.SF</exclude>
                              <exclude>META-INF/*.DSA</exclude>
                              <exclude>META-INF/*.RSA</exclude>
                           </excludes>
                        </filter>
                     </filters>
                  </configuration>
               </execution>
            </executions>
         </plugin>
         <plugin>
            <groupId>org.apache.maven.plugins</groupId>
            <artifactId>maven-resources-plugin</artifactId>
            <version>2.4</version>
         </plugin>
         <plugin>
            <groupId>org.apache.maven.plugins</groupId>
            <artifactId>maven-source-plugin</artifactId>
            <executions>
               <execution>
                  <id>attach-sources</id>
                  <goals>
                     <goal>jar</goal>
                  </goals>
               </execution>
            </executions>
         </plugin>
      </plugins>
      <resources>
         <resource>
            <directory>src/main/java</directory>
            <includes>
               <include> **/*.properties</include>
            </includes>
         </resource>
      </resources>
   </build>
</project>
  Step 2: clone the git repo to get the complete code  git clone https://github.com/rajkrrsingh/KafkaSpoutStorm.git  Step 3: modify default_config.properties according to your cluster  Step 4: build using maven, this will create a fat jar in target folder  mvn clean package
  Step 5: Now Run it on storm cluster  storm jar KafkaSpoutStorm-0.0.1-SNAPSHOT.jar com.rajkrrsingh.storm.Topology
 
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
		
			
				
						
							Labels:
						
						
		
	
					
			
		
	
	
	
	
				
		
	
	
			
    
	
		
		
		12-25-2016
	
		
		10:52 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 @Huahua Wei lsof output suggest that your zk log location is /var/log/zookeeper/zookeeper-zookeeper-server-insightcluster132.out 
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
			
    
	
		
		
		12-21-2016
	
		
		06:56 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 @Hoang Le this is to Get the maximum am resource percent per queue configured capacity.
yarn.nodemanager.resource.memory-mb is the memory that a nodemanger announce to RM. 
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
			
    
	
		
		
		12-20-2016
	
		
		02:19 PM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
	
		2 Kudos
		
	
				
		
	
		
					
							 while running the sqoop command from the java program with -verbose option can result into race condition during obtaining lock on the console appender.we can workaround this with the help of SSHXCUTE framework which will create java program and sqoop command context separately.  ENV: HDP 2.4  Java Version : JDK-8  Step 1: download sshxcute jar from https://sourceforge.net/projects/sshxcute/  Step 2: Create RunSqoopCommand.java  import net.neoremind.sshxcute.core.SSHExec;
import net.neoremind.sshxcute.core.ConnBean;
import net.neoremind.sshxcute.task.CustomTask;
import net.neoremind.sshxcute.task.impl.ExecCommand;
public class RunSqoopCommand {
public static void main(String args[]) throws Exception{
    ConnBean cb = new ConnBean("localhost", "root","hadoop");
    SSHExec ssh = SSHExec.getInstance(cb);          
    ssh.connect();
    CustomTask sqoopCommand = new ExecCommand("sqoop import -Dorg.apache.sqoop.splitter.allow_text_splitter=true  
                                              -Dmapred.job.name=test --connect jdbc:oracle:thin:@10.0.2.12:1521:XE 
                                              --table TEST_INCREMENTAL -m 1 --username system 
                                              --password oracle --target-dir 
                                              /tmp/test26 
                                              --verbose");
    ssh.exec(sqoopCommand);
    ssh.disconnect();   
}
}
  Step 3: compile program  javac -cp sshxcute-1.0.jar RunSqoopCommand.java  Step 4: Run program  java -cp sshxcute-1.0.jar RunSqoopCommand 
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
		
			
				
						
							Labels:
						
						
		
	
					
			
		
	
	
	
	
				
		
	
	
			
    
	
		
		
		12-20-2016
	
		
		01:59 PM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
	
		3 Kudos
		
	
				
		
	
		
					
							 These are the steps to build and run spark streaming application, it was built and tested on HDP-2.5  setup:  ENV: HDP2.5  scala : 2.10.4  sbt: 0.13.11  mkdir spark-streaming-example 
cd spark-streaming-example/ 
mkdir -p src/main/scala 
cd src/main/scala  sample code:  import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{SQLContext, SaveMode}
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.Time;
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.{Time, Seconds, StreamingContext}
import org.apache.spark.util.IntParam
import org.apache.spark.sql.SQLContext
import org.apache.spark.storage.StorageLevel
object SqlNetworkWordCount {
        def main(args: Array[String]) {
                val sparkConf = new SparkConf().setAppName("SqlNetworkWordCount")
                val ssc = new StreamingContext(sparkConf, Seconds(2))
                val lines = ssc.socketTextStream(args(0), args(1).toInt, StorageLevel.MEMORY_AND_DISK_SER)
                val words = lines.flatMap(_.split(" "))
                // Convert RDDs of the words DStream to DataFrame and run SQL query
                words.foreachRDD((rdd: RDD[String], time: Time) => {
                        val sqlContext = SQLContextSingleton.getInstance(rdd.sparkContext)
                        import sqlContext.implicits._
                        val wordsDataFrame = rdd.map(w => Record(w)).toDF()
                        wordsDataFrame.write.mode(SaveMode.Append).parquet("/tmp/parquet");
                        })
                ssc.start()
                ssc.awaitTermination()
        }
}
case class Record(word: String)
object SQLContextSingleton {
        @transient  private var instance: SQLContext = _
        def getInstance(sparkContext: SparkContext): SQLContext = {
                if (instance == null) {
                        instance = new SQLContext(sparkContext)
                }
                instance
        }
}  cd -   vim build.sbt   name := "Spark Streaming Example"   version := "1.0"   scalaVersion := "2.10.4"   libraryDependencies ++= Seq("org.apache.spark" %% "spark-core" % "1.4.1","org.apache.spark" %% "spark-streaming" % "1.4.1")   *Now run sbt package from project home and it will build a jar inside target/scala-2.10/spark-streaming-example_2.10-1.0.jar
*Run this jar using spark-submit
#bin/spark-submit --class TestStreaming target/scala-2.10/spark-streaming-example_2.10-1.0.jar hostname 6002   to test this program open a different terminal and run nc -lk `hostname` 6002 hit enter and 
type anything on console while will display on the spark console. 
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
		
			
				
						
							Labels:
						
						
		
	
					
			
		
	
	
	
	
				
		
	
	
			
    
	
		
		
		12-19-2016
	
		
		12:40 PM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
	
		4 Kudos
		
	
				
		
	
		
					
							 During debugging a problem with delete topic,I dig into Kafka code to know how delete command works, this the sequence of event occurred during command execution  1. TopicCommand issues topic deletion   /usr/hdp/current/kafka-broker/bin/kafka-run-class.sh kafka.admin.TopicCommand --zookeeper rkk3.hdp.local:2181 --delete --topic sample   2. which create a new admin path /admin/delete_topics/<topic>   3. The controller listens for child changes on /admin/delete_topic and starts topic deletion for the respective topics   4. The controller has a background thread that handles topic deletion. A topic's deletion can be started only by the onPartitionDeletion callback on the controller.  5. Once DeleteTopicsThread invoked it looks for topicsToBeDeleted and for each topic it deregister partition change listener on the deleted topic, This is to prevent the partition change listener firing before the new topic listener when a deleted topic gets auto created.   6. Controller will remove this replica from the state machine as well as its partition assignment cache.   7. Deletes all topic state from the controllerContext as well as from zookeeper and finally delete /brokers/topics/<topic> path. 8. onTopicDeletion is callback by the DeleteTopicThread,This lets each broker know that this topic is being deleted and can be removed from their caches. 
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
		
			
				
						
							Labels:
						
						
		
	
					
			
		
	
	
	
	
				
		
	
	
			
    
	
		
		
		12-21-2016
	
		
		07:40 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							     add jar /usr/hdp/current/hive-client/lib/commons-httpclient-3.0.1.jar  when i add the above jar it works fine but it wont works when i try for new table.  afterwards i restart the hive session that jar works fine.  how i permanently add that jar in hive  
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
			
    
	
		
		
		12-18-2016
	
		
		06:03 PM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
	
		2 Kudos
		
	
				
		
	
		
					
							 ENV: HDP 2.4.2  STEP 1: Setting up MySQL SSL  # Create clean environment
shell> rm -rf newcerts
shell> mkdir newcerts && cd newcerts
# Create CA certificate
shell> openssl genrsa 2048 > ca-key.pem
shell> openssl req -new -x509 -nodes -days 3600 \
         -key ca-key.pem -out ca.pem
# Create server certificate, remove passphrase, and sign it
# server-cert.pem = public key, server-key.pem = private key
shell> openssl req -newkey rsa:2048 -days 3600 \
         -nodes -keyout server-key.pem -out server-req.pem
shell> openssl rsa -in server-key.pem -out server-key.pem
shell> openssl x509 -req -in server-req.pem -days 3600 \
         -CA ca.pem -CAkey ca-key.pem -set_serial 01 -out server-cert.pem
# Create client certificate, remove passphrase, and sign it
# client-cert.pem = public key, client-key.pem = private key
shell> openssl req -newkey rsa:2048 -days 3600 \
         -nodes -keyout client-key.pem -out client-req.pem
shell> openssl rsa -in client-key.pem -out client-key.pem
shell> openssl x509 -req -in client-req.pem -days 3600 \
         -CA ca.pem -CAkey ca-key.pem -set_serial 01 -out client-cert.pem  STEP 2:update my.cnf as follow and restart MySQL  [mysqld]
ssl-ca=/home/hive/ca-cert.pem
ssl-cert=/home/hive/server-cert.pem
ssl-key=/home/hive/server-key.pem  STEP 3:grant priv to hive user  mysql> GRANT ALL PRIVILEGES ON *.* TO 'hive'@'%' IDENTIFIED BY 'hive' REQUIRE SSL;
mysql> FLUSH PRIVILEGES;  import client cert and key into keystore as there is no direct way to do it I have taken a help from this guide http://www.agentbob.info/agentbob/79-AB.html convert cert and pem key into DER format and import it using the java program provided at the link.  STEP 4: edit hive-env.sh  # specified truststore location and password with hive client opts
if [ "$SERVICE" = "hiveserver2" ]; then
 export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS -Djavax.net.ssl.trustStore=/home/hive/keystore.ImportKey -Djavax.net.ssl.trustStorePassword=importkey"
fi  STEP 5: updated hive-site.xml  javax.jdo.option.ConnectionURL
jdbc:mysql://sandbox.hortonworks.com/hive?createDatabaseIfNotExist=true&useSSL=true&verifyServerCertificate=false  STEP 6: Restarted HS2 which is now able to connect to MySQL over SSL 
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
		
			
				
						
							Labels:
						
						
		
	
					
			
		
	
	
	
	
				
		
	
	
			
    
	
		
		
		04-23-2018
	
		
		12:36 PM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 @Rajkumar Singh   I did the same thing but I'm getting either HTTP Error 401 or 404 or certificate error. The cluster I'm testing this on is also Kerberized.      
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		 
        













