<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>question Re: Spark Hbase connector for Spark 1.6.1 version in Archives of Support Questions (Read Only)</title>
    <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-Hbase-connector-for-Spark-1-6-1-version/m-p/135396#M43745</link>
    <description>&lt;P&gt;I have followed the above steps and using the JAR zhzhan/shc:0.0.11-1.6.1-s_2.10” . &lt;/P&gt;&lt;P&gt;On executing the code ,i am getting the following expection&lt;/P&gt;&lt;P&gt;Exception in thread "main" java.io.IOException: java.lang.reflect.InvocationTargetException
at org.apache.hadoop.hbase.client.ConnectionFactory.createConnection(ConnectionFactory.java:240)
at org.apache.hadoop.hbase.client.ConnectionFactory.createConnection(ConnectionFactory.java:218)
at org.apache.hadoop.hbase.client.ConnectionFactory.createConnection(ConnectionFactory.java:119)
at org.apache.spark.sql.execution.datasources.hbase.RegionResource.init(HBaseResources.scala:93)
at org.apache.spark.sql.execution.datasources.hbase.ReferencedResource$class.liftedTree1$1(HBaseResources.scala:57)
at org.apache.spark.sql.execution.datasources.hbase.ReferencedResource$class.acquire(HBaseResources.scala:54)
at org.apache.spark.sql.execution.datasources.hbase.RegionResource.acquire(HBaseResources.scala:88)
at org.apache.spark.sql.execution.datasources.hbase.ReferencedResource$class.releaseOnException(HBaseResources.scala:74)
at org.apache.spark.sql.execution.datasources.hbase.RegionResource.releaseOnException(HBaseResources.scala:88)
at org.apache.spark.sql.execution.datasources.hbase.RegionResource.&amp;lt;init&amp;gt;(HBaseResources.scala:108)
at org.apache.spark.sql.execution.datasources.hbase.HBaseTableScanRDD.getPartitions(HBaseTableScan.scala:60)
at org.apache.spark.rdd.RDD$anonfun$partitions$2.apply(RDD.scala:239)
at org.apache.spark.rdd.RDD$anonfun$partitions$2.apply(RDD.scala:237)
at scala.Option.getOrElse(Option.scala:120)
at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
at org.apache.spark.rdd.RDD$anonfun$partitions$2.apply(RDD.scala:239)
at org.apache.spark.rdd.RDD$anonfun$partitions$2.apply(RDD.scala:237)
at scala.Option.getOrElse(Option.scala:120)
at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
at org.apache.spark.rdd.RDD$anonfun$partitions$2.apply(RDD.scala:239)
at org.apache.spark.rdd.RDD$anonfun$partitions$2.apply(RDD.scala:237)
at scala.Option.getOrElse(Option.scala:120)
at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:190)
at org.apache.spark.sql.execution.Limit.executeCollect(basicOperators.scala:165)
at org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:174)
at org.apache.spark.sql.DataFrame$anonfun$org$apache$spark$sql$DataFrame$execute$1$1.apply(DataFrame.scala:1538)
at org.apache.spark.sql.DataFrame$anonfun$org$apache$spark$sql$DataFrame$execute$1$1.apply(DataFrame.scala:1538)
at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)
at org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:2125)
at org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$execute$1(DataFrame.scala:1537)
at org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$collect(DataFrame.scala:1544)
at org.apache.spark.sql.DataFrame$anonfun$head$1.apply(DataFrame.scala:1414)
at org.apache.spark.sql.DataFrame$anonfun$head$1.apply(DataFrame.scala:1413)
at org.apache.spark.sql.DataFrame.withCallback(DataFrame.scala:2138)
at org.apache.spark.sql.DataFrame.head(DataFrame.scala:1413)
at org.apache.spark.sql.DataFrame.take(DataFrame.scala:1495)
at org.apache.spark.sql.DataFrame.showString(DataFrame.scala:171)
at org.apache.spark.sql.DataFrame.show(DataFrame.scala:394)
at org.apache.spark.sql.DataFrame.show(DataFrame.scala:355)
at org.apache.spark.sql.DataFrame.show(DataFrame.scala:363)
at com.sparhbaseintg.trnsfm.HBasesrc$.main(Hbasesrc.scala:83)
at com.sparhbaseintg.trnsfm.HBasesrc.main(Hbasesrc.scala)
Caused by: java.lang.reflect.InvocationTargetException
at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
at org.apache.hadoop.hbase.client.ConnectionFactory.createConnection(ConnectionFactory.java:238)
... 44 more
Caused by: java.lang.NoSuchMethodError: org.apache.hadoop.hbase.client.RpcRetryingCallerFactory.instantiate(Lorg/apache/hadoop/conf/Configuration;Lorg/apache/hadoop/hbase/client/ServerStatisticTracker;)Lorg/apache/hadoop/hbase/client/RpcRetryingCallerFactory;
at org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.createAsyncProcess(ConnectionManager.java:2242)
at org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.&amp;lt;init&amp;gt;(ConnectionManager.java:690)
at org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.&amp;lt;init&amp;gt;(ConnectionManager.java:630)&lt;/P&gt;&lt;P&gt;I can see the methods  &lt;STRONG&gt;org.apache.hadoop.hbase.client.RpcRetryingCallerFactory.instantiate method in hbase client ja&lt;/STRONG&gt;r. I am not sure why its not referring it.&lt;/P&gt;&lt;P&gt;Please help .&lt;/P&gt;&lt;P&gt;Thanks!&lt;/P&gt;</description>
    <pubDate>Mon, 24 Oct 2016 17:47:54 GMT</pubDate>
    <dc:creator>senthilkumarP</dc:creator>
    <dc:date>2016-10-24T17:47:54Z</dc:date>
    <item>
      <title>Spark Hbase connector for Spark 1.6.1 version</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-Hbase-connector-for-Spark-1-6-1-version/m-p/135392#M43741</link>
      <description>&lt;P&gt;We are planning to use Spark Hbase connector from HortonWorks for the new project.[https://github.com/hortonworks-spark/shc]&lt;/P&gt;&lt;P&gt;Since we are using HortonWorks 2.4.2, the supported Spark Version is 1.6.1&lt;/P&gt;&lt;P style="margin-left: 20px;"&gt;Can we use this Spark-Hbase connector jar in Spark 1.6.1?&lt;/P&gt;</description>
      <pubDate>Mon, 17 Oct 2016 22:56:57 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-Hbase-connector-for-Spark-1-6-1-version/m-p/135392#M43741</guid>
      <dc:creator>sancar_sn</dc:creator>
      <dc:date>2016-10-17T22:56:57Z</dc:date>
    </item>
    <item>
      <title>Re: Spark Hbase connector for Spark 1.6.1 version</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-Hbase-connector-for-Spark-1-6-1-version/m-p/135393#M43742</link>
      <description>&lt;P&gt;@&lt;A href="https://community.hortonworks.com/users/13783/sancarsn.html"&gt;Sankaraiah Narayanasamy&lt;/A&gt;&lt;/P&gt;&lt;P&gt;That is supported. I am sure that you researched on using this connector. This article that points out the use of Spark 1.6.1 is supported, but it works practically with any version of Spark since 1.2.: &lt;A href="http://hortonworks.com/blog/spark-hbase-dataframe-based-hbase-connector/" target="_blank"&gt;http://hortonworks.com/blog/spark-hbase-dataframe-based-hbase-connector/&lt;/A&gt;. The Github confirms the same. Look at the pom.xml: &lt;A href="https://github.com/hortonworks-spark/shc/blob/master/pom.xml" target="_blank"&gt;https://github.com/hortonworks-spark/shc/blob/master/pom.xml&lt;/A&gt;, properties section:&lt;/P&gt;&lt;TABLE&gt;&lt;TBODY&gt;&lt;TR&gt;&lt;TD&gt;&amp;lt;properties&amp;gt;&lt;/TD&gt;&lt;/TR&gt;&lt;TR&gt;&lt;TD&gt;    &amp;lt;spark.version&amp;gt;1.6.1&amp;lt;/spark.version&amp;gt;&lt;/TD&gt;&lt;/TR&gt;&lt;TR&gt;&lt;TD&gt;    &amp;lt;hbase.version&amp;gt;1.1.2&amp;lt;/hbase.version&amp;gt;&lt;/TD&gt;&lt;/TR&gt;&lt;/TBODY&gt;&lt;/TABLE&gt;&lt;P&gt;Use the Spark-on-HBase connector as a standard Spark package.&lt;/P&gt;&lt;P&gt;+++&lt;/P&gt;&lt;P&gt;If the response was helpful, please vote and accept the best answer.&lt;/P&gt;</description>
      <pubDate>Tue, 18 Oct 2016 10:06:38 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-Hbase-connector-for-Spark-1-6-1-version/m-p/135393#M43742</guid>
      <dc:creator>cstanca</dc:creator>
      <dc:date>2016-10-18T10:06:38Z</dc:date>
    </item>
    <item>
      <title>Re: Spark Hbase connector for Spark 1.6.1 version</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-Hbase-connector-for-Spark-1-6-1-version/m-p/135394#M43743</link>
      <description>&lt;A rel="user" href="https://community.cloudera.com/users/3486/cstanca.html" nodeid="3486"&gt;@Constantin Stanca&lt;/A&gt;&lt;P&gt;: Can i use this as a Maven dependency? or i should use it as Standard Spark package? what is the difference? i never used Standard Spark package.&lt;/P&gt;</description>
      <pubDate>Tue, 18 Oct 2016 12:33:48 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-Hbase-connector-for-Spark-1-6-1-version/m-p/135394#M43743</guid>
      <dc:creator>sancar_sn</dc:creator>
      <dc:date>2016-10-18T12:33:48Z</dc:date>
    </item>
    <item>
      <title>Re: Spark Hbase connector for Spark 1.6.1 version</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-Hbase-connector-for-Spark-1-6-1-version/m-p/135395#M43744</link>
      <description>&lt;P&gt;@&lt;A href="https://community.hortonworks.com/users/13783/sancarsn.html"&gt;Sankaraiah Narayanasamy&lt;/A&gt;&lt;/P&gt;&lt;P&gt;&lt;A href="https://community.hortonworks.com/users/13783/sancarsn.html"&gt;&lt;/A&gt;To include Spark-on-HBase connector as a standard Spark package, in your Spark application use:&lt;/P&gt;&lt;P&gt;spark-shell, pyspark, or spark-submit&lt;/P&gt;&lt;P&gt;&amp;gt; $SPARK_HOME/bin/spark-shell –packages zhzhan:shc:0.0.11-1.6.1-s_2.10&lt;/P&gt;&lt;P&gt;You can also include the package as the dependency in your SBT file as well. The format is the spark-package-name:version&lt;/P&gt;&lt;P&gt;spDependencies += “zhzhan/shc:0.0.11-1.6.1-s_2.10”&lt;/P&gt;&lt;P&gt;You can also use it as a Maven dependency.&lt;/P&gt;&lt;P&gt;All options are possible.&lt;/P&gt;</description>
      <pubDate>Tue, 18 Oct 2016 21:15:58 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-Hbase-connector-for-Spark-1-6-1-version/m-p/135395#M43744</guid>
      <dc:creator>cstanca</dc:creator>
      <dc:date>2016-10-18T21:15:58Z</dc:date>
    </item>
    <item>
      <title>Re: Spark Hbase connector for Spark 1.6.1 version</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-Hbase-connector-for-Spark-1-6-1-version/m-p/135396#M43745</link>
      <description>&lt;P&gt;I have followed the above steps and using the JAR zhzhan/shc:0.0.11-1.6.1-s_2.10” . &lt;/P&gt;&lt;P&gt;On executing the code ,i am getting the following expection&lt;/P&gt;&lt;P&gt;Exception in thread "main" java.io.IOException: java.lang.reflect.InvocationTargetException
at org.apache.hadoop.hbase.client.ConnectionFactory.createConnection(ConnectionFactory.java:240)
at org.apache.hadoop.hbase.client.ConnectionFactory.createConnection(ConnectionFactory.java:218)
at org.apache.hadoop.hbase.client.ConnectionFactory.createConnection(ConnectionFactory.java:119)
at org.apache.spark.sql.execution.datasources.hbase.RegionResource.init(HBaseResources.scala:93)
at org.apache.spark.sql.execution.datasources.hbase.ReferencedResource$class.liftedTree1$1(HBaseResources.scala:57)
at org.apache.spark.sql.execution.datasources.hbase.ReferencedResource$class.acquire(HBaseResources.scala:54)
at org.apache.spark.sql.execution.datasources.hbase.RegionResource.acquire(HBaseResources.scala:88)
at org.apache.spark.sql.execution.datasources.hbase.ReferencedResource$class.releaseOnException(HBaseResources.scala:74)
at org.apache.spark.sql.execution.datasources.hbase.RegionResource.releaseOnException(HBaseResources.scala:88)
at org.apache.spark.sql.execution.datasources.hbase.RegionResource.&amp;lt;init&amp;gt;(HBaseResources.scala:108)
at org.apache.spark.sql.execution.datasources.hbase.HBaseTableScanRDD.getPartitions(HBaseTableScan.scala:60)
at org.apache.spark.rdd.RDD$anonfun$partitions$2.apply(RDD.scala:239)
at org.apache.spark.rdd.RDD$anonfun$partitions$2.apply(RDD.scala:237)
at scala.Option.getOrElse(Option.scala:120)
at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
at org.apache.spark.rdd.RDD$anonfun$partitions$2.apply(RDD.scala:239)
at org.apache.spark.rdd.RDD$anonfun$partitions$2.apply(RDD.scala:237)
at scala.Option.getOrElse(Option.scala:120)
at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
at org.apache.spark.rdd.RDD$anonfun$partitions$2.apply(RDD.scala:239)
at org.apache.spark.rdd.RDD$anonfun$partitions$2.apply(RDD.scala:237)
at scala.Option.getOrElse(Option.scala:120)
at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:190)
at org.apache.spark.sql.execution.Limit.executeCollect(basicOperators.scala:165)
at org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:174)
at org.apache.spark.sql.DataFrame$anonfun$org$apache$spark$sql$DataFrame$execute$1$1.apply(DataFrame.scala:1538)
at org.apache.spark.sql.DataFrame$anonfun$org$apache$spark$sql$DataFrame$execute$1$1.apply(DataFrame.scala:1538)
at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)
at org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:2125)
at org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$execute$1(DataFrame.scala:1537)
at org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$collect(DataFrame.scala:1544)
at org.apache.spark.sql.DataFrame$anonfun$head$1.apply(DataFrame.scala:1414)
at org.apache.spark.sql.DataFrame$anonfun$head$1.apply(DataFrame.scala:1413)
at org.apache.spark.sql.DataFrame.withCallback(DataFrame.scala:2138)
at org.apache.spark.sql.DataFrame.head(DataFrame.scala:1413)
at org.apache.spark.sql.DataFrame.take(DataFrame.scala:1495)
at org.apache.spark.sql.DataFrame.showString(DataFrame.scala:171)
at org.apache.spark.sql.DataFrame.show(DataFrame.scala:394)
at org.apache.spark.sql.DataFrame.show(DataFrame.scala:355)
at org.apache.spark.sql.DataFrame.show(DataFrame.scala:363)
at com.sparhbaseintg.trnsfm.HBasesrc$.main(Hbasesrc.scala:83)
at com.sparhbaseintg.trnsfm.HBasesrc.main(Hbasesrc.scala)
Caused by: java.lang.reflect.InvocationTargetException
at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
at org.apache.hadoop.hbase.client.ConnectionFactory.createConnection(ConnectionFactory.java:238)
... 44 more
Caused by: java.lang.NoSuchMethodError: org.apache.hadoop.hbase.client.RpcRetryingCallerFactory.instantiate(Lorg/apache/hadoop/conf/Configuration;Lorg/apache/hadoop/hbase/client/ServerStatisticTracker;)Lorg/apache/hadoop/hbase/client/RpcRetryingCallerFactory;
at org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.createAsyncProcess(ConnectionManager.java:2242)
at org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.&amp;lt;init&amp;gt;(ConnectionManager.java:690)
at org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.&amp;lt;init&amp;gt;(ConnectionManager.java:630)&lt;/P&gt;&lt;P&gt;I can see the methods  &lt;STRONG&gt;org.apache.hadoop.hbase.client.RpcRetryingCallerFactory.instantiate method in hbase client ja&lt;/STRONG&gt;r. I am not sure why its not referring it.&lt;/P&gt;&lt;P&gt;Please help .&lt;/P&gt;&lt;P&gt;Thanks!&lt;/P&gt;</description>
      <pubDate>Mon, 24 Oct 2016 17:47:54 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-Hbase-connector-for-Spark-1-6-1-version/m-p/135396#M43745</guid>
      <dc:creator>senthilkumarP</dc:creator>
      <dc:date>2016-10-24T17:47:54Z</dc:date>
    </item>
    <item>
      <title>Re: Spark Hbase connector for Spark 1.6.1 version</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-Hbase-connector-for-Spark-1-6-1-version/m-p/135397#M43746</link>
      <description>&lt;P&gt;is hbase running?  do you have a firewall blocking it?&lt;/P&gt;&lt;P&gt; what JDK are you using?  perhaps an incompatible version?   any other logs or details you can share?&lt;/P&gt;</description>
      <pubDate>Mon, 24 Oct 2016 18:35:10 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-Hbase-connector-for-Spark-1-6-1-version/m-p/135397#M43746</guid>
      <dc:creator>TimothySpann</dc:creator>
      <dc:date>2016-10-24T18:35:10Z</dc:date>
    </item>
    <item>
      <title>Re: Spark Hbase connector for Spark 1.6.1 version</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-Hbase-connector-for-Spark-1-6-1-version/m-p/135398#M43747</link>
      <description>&lt;P&gt;Added hbase client JAR .Fixed the issue .&lt;/P&gt;&lt;P&gt;Thanks Timothy !&lt;/P&gt;</description>
      <pubDate>Tue, 25 Oct 2016 22:09:01 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-Hbase-connector-for-Spark-1-6-1-version/m-p/135398#M43747</guid>
      <dc:creator>senthilkumarP</dc:creator>
      <dc:date>2016-10-25T22:09:01Z</dc:date>
    </item>
  </channel>
</rss>

