<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>question Re: Spark to Phoenix in Archives of Support Questions (Read Only)</title>
    <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95913#M9246</link>
    <description>&lt;P&gt;&lt;A rel="user" href="https://community.cloudera.com/users/420/vjain.html" nodeid="420"&gt;@Vedant Jain&lt;/A&gt;&lt;/P&gt;&lt;P&gt;Example below works with Sandbox 2.3.2:&lt;/P&gt;&lt;P&gt;PS: Note I haven't changed classpath, I only used --jars option&lt;/P&gt;&lt;P&gt;from shell:&lt;/P&gt;&lt;PRE&gt;spark-shell --master yarn-client --jars /usr/hdp/current/phoenix-client/phoenix-client.jar
&lt;/PRE&gt;&lt;P&gt;inside spark-shell:&lt;/P&gt;&lt;PRE&gt;//option 1, read table
val jdbcDF = sqlContext.read.format("jdbc").options( 
  Map(
  "driver" -&amp;gt; "org.apache.phoenix.jdbc.PhoenixDriver",
  "url" -&amp;gt; "jdbc:phoenix:sandbox.hortonworks.com:2181:/hbase-unsecure",
  "dbtable" -&amp;gt; "TABLE1")).load()
  
jdbcDF.show


//option 2, read custom query
import java.sql.{Connection, DriverManager, DatabaseMetaData, ResultSet}
import org.apache.spark.rdd.JdbcRDD

def getConn(driverClass: =&amp;gt; String, connStr: =&amp;gt; String, user: =&amp;gt; String, pass: =&amp;gt; String): Connection = {
  var conn:Connection = null
  try{
    Class.forName(driverClass)
     conn = DriverManager.getConnection(connStr, user, pass)
  }catch{ case e: Exception =&amp;gt; e.printStackTrace }
  conn
}

val myRDD = new JdbcRDD( sc, () =&amp;gt; getConn("org.apache.phoenix.jdbc.PhoenixDriver", "jdbc:phoenix:localhost:2181:/hbase-unsecure", "", "") ,
"select sum(10) from TABLE1 where ? &amp;lt;= id and id &amp;lt;= ?",
1, 10, 2)
myRDD.take(10)

val myRDD = new JdbcRDD( sc, () =&amp;gt; getConn("org.apache.phoenix.jdbc.PhoenixDriver", "jdbc:phoenix:localhost:2181:/hbase-unsecure", "", "") ,
"select col1 from TABLE1 where ? &amp;lt;= id and id &amp;lt;= ?",
1, 10, 2)

myRDD.take(10)

&lt;/PRE&gt;&lt;P&gt;Also note that Phoenix team recommends to use Phoenix Spark instead of jdbc directly: &lt;A target="_blank" href="http://phoenix.apache.org/phoenix_spark.html"&gt;http://phoenix.apache.org/phoenix_spark.html&lt;/A&gt;&lt;/P&gt;&lt;P&gt;Here an example with PhoenixSpark package:&lt;/P&gt;&lt;P&gt;from shell:&lt;/P&gt;&lt;PRE&gt;spark-shell --master yarn-client --jars /usr/hdp/current/phoenix-client/phoenix-client.jar,/usr/hdp/current/phoenix-client/lib/phoenix-spark-4.4.0.2.3.2.0-2950.jar --conf "spark.executor.extraClassPath=/usr/hdp/current/phoenix-client/phoenix-client.jar"&lt;/PRE&gt;&lt;P&gt;inside spark-shell:&lt;/P&gt;&lt;PRE&gt;import org.apache.phoenix.spark._

val df = sqlContext.load(
  "org.apache.phoenix.spark",
  Map("table" -&amp;gt; "TABLE1", "zkUrl" -&amp;gt; "localhost:2181:/hbase-unsecure")
)
df.show
&lt;/PRE&gt;&lt;P&gt;And here a sample project that can be built and executed thru spark-submit:&lt;/P&gt;&lt;P&gt;&lt;A target="_blank" href="https://github.com/gbraccialli/SparkUtils"&gt;https://github.com/gbraccialli/SparkUtils&lt;/A&gt;&lt;/P&gt;&lt;PRE&gt;git clone &lt;A href="https://github.com/gbraccialli/SparkUtils" target="_blank"&gt;https://github.com/gbraccialli/SparkUtils&lt;/A&gt;
cd SparkUtils/
mvn clean package
spark-submit --class com.github.gbraccialli.spark.PhoenixSparkSample target/SparkUtils-1.0.0-SNAPSHOT.jar&lt;/PRE&gt;&lt;P&gt;Also check &lt;A rel="user" href="https://community.cloudera.com/users/157/rgelhausen.html" nodeid="157"&gt;@Randy Gelhausen&lt;/A&gt; project that use Phoenix Spark to automatic load data from Hive to Phoenix:&lt;/P&gt;&lt;P&gt;&lt;A target="_blank" href="https://github.com/randerzander/HiveToPhoenix"&gt;https://github.com/randerzander/HiveToPhoenix&lt;/A&gt; (I copied my pom.xml from Randy's project)&lt;/P&gt;</description>
    <pubDate>Tue, 17 Nov 2015 01:23:11 GMT</pubDate>
    <dc:creator>gbraccialli3</dc:creator>
    <dc:date>2015-11-17T01:23:11Z</dc:date>
    <item>
      <title>Spark to Phoenix</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95906#M9239</link>
      <description>&lt;P&gt;Trying to connect spark with phoenix using JDBC. Appended location of phoenix-client.jar to the SPARK_CLASSPATH in spark_env.sh. &lt;/P&gt;&lt;P&gt;When I launch Spark shell, I get the following errors:&lt;/P&gt;&lt;PRE&gt;&amp;lt;console&amp;gt;:10: error: not found: value sqlContext
       import sqlContext.implicits._
              ^
&amp;lt;console&amp;gt;:10: error: not found: value sqlContext
       import sqlContext.sql&lt;/PRE&gt;</description>
      <pubDate>Fri, 23 Oct 2015 06:17:42 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95906#M9239</guid>
      <dc:creator>vjain</dc:creator>
      <dc:date>2015-10-23T06:17:42Z</dc:date>
    </item>
    <item>
      <title>Re: Spark to Phoenix</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95907#M9240</link>
      <description>&lt;P&gt;Is this the full stack trace or there is more?&lt;/P&gt;</description>
      <pubDate>Fri, 23 Oct 2015 06:56:10 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95907#M9240</guid>
      <dc:creator>deepesh1</dc:creator>
      <dc:date>2015-10-23T06:56:10Z</dc:date>
    </item>
    <item>
      <title>Re: Spark to Phoenix</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95908#M9241</link>
      <description>&lt;P&gt;Can you please share the code and full error log?&lt;/P&gt;</description>
      <pubDate>Fri, 23 Oct 2015 06:59:31 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95908#M9241</guid>
      <dc:creator>ofermend</dc:creator>
      <dc:date>2015-10-23T06:59:31Z</dc:date>
    </item>
    <item>
      <title>Re: Spark to Phoenix</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95909#M9242</link>
      <description>&lt;P&gt;See the stack trace attached. &lt;A href="https://community.cloudera.com/legacyfs/online/attachments/309-spark-phoenix-stack-trace.txt"&gt;spark-phoenix-stack-trace.txt&lt;/A&gt;I ran:&lt;/P&gt;&lt;P&gt;spark-submit --master yarn&lt;/P&gt;</description>
      <pubDate>Fri, 23 Oct 2015 21:33:13 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95909#M9242</guid>
      <dc:creator>vjain</dc:creator>
      <dc:date>2015-10-23T21:33:13Z</dc:date>
    </item>
    <item>
      <title>Re: Spark to Phoenix</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95910#M9243</link>
      <description>&lt;P&gt;It works fine ... if I don't modify the Spark classpath&lt;/P&gt;</description>
      <pubDate>Fri, 23 Oct 2015 21:33:56 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95910#M9243</guid>
      <dc:creator>vjain</dc:creator>
      <dc:date>2015-10-23T21:33:56Z</dc:date>
    </item>
    <item>
      <title>Re: Spark to Phoenix</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95911#M9244</link>
      <description>&lt;P&gt;This looks like a classpath issue. I suspect your phoenix-client.jar is packaged with classes compiled against a different jackson jar then what it finds in the classpath.&lt;/P&gt;</description>
      <pubDate>Fri, 23 Oct 2015 21:47:01 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95911#M9244</guid>
      <dc:creator>deepesh1</dc:creator>
      <dc:date>2015-10-23T21:47:01Z</dc:date>
    </item>
    <item>
      <title>Re: Spark to Phoenix</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95912#M9245</link>
      <description>&lt;P&gt;Looks like some conflict b/w Spark and Phoenix jars. No?&lt;/P&gt;&lt;P&gt;Googling on the data in the stack trace, it looks related to Jackson. I'm not familair with Phoenix - does it use it's own version? &lt;/P&gt;</description>
      <pubDate>Fri, 23 Oct 2015 22:50:52 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95912#M9245</guid>
      <dc:creator>ofermend</dc:creator>
      <dc:date>2015-10-23T22:50:52Z</dc:date>
    </item>
    <item>
      <title>Re: Spark to Phoenix</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95913#M9246</link>
      <description>&lt;P&gt;&lt;A rel="user" href="https://community.cloudera.com/users/420/vjain.html" nodeid="420"&gt;@Vedant Jain&lt;/A&gt;&lt;/P&gt;&lt;P&gt;Example below works with Sandbox 2.3.2:&lt;/P&gt;&lt;P&gt;PS: Note I haven't changed classpath, I only used --jars option&lt;/P&gt;&lt;P&gt;from shell:&lt;/P&gt;&lt;PRE&gt;spark-shell --master yarn-client --jars /usr/hdp/current/phoenix-client/phoenix-client.jar
&lt;/PRE&gt;&lt;P&gt;inside spark-shell:&lt;/P&gt;&lt;PRE&gt;//option 1, read table
val jdbcDF = sqlContext.read.format("jdbc").options( 
  Map(
  "driver" -&amp;gt; "org.apache.phoenix.jdbc.PhoenixDriver",
  "url" -&amp;gt; "jdbc:phoenix:sandbox.hortonworks.com:2181:/hbase-unsecure",
  "dbtable" -&amp;gt; "TABLE1")).load()
  
jdbcDF.show


//option 2, read custom query
import java.sql.{Connection, DriverManager, DatabaseMetaData, ResultSet}
import org.apache.spark.rdd.JdbcRDD

def getConn(driverClass: =&amp;gt; String, connStr: =&amp;gt; String, user: =&amp;gt; String, pass: =&amp;gt; String): Connection = {
  var conn:Connection = null
  try{
    Class.forName(driverClass)
     conn = DriverManager.getConnection(connStr, user, pass)
  }catch{ case e: Exception =&amp;gt; e.printStackTrace }
  conn
}

val myRDD = new JdbcRDD( sc, () =&amp;gt; getConn("org.apache.phoenix.jdbc.PhoenixDriver", "jdbc:phoenix:localhost:2181:/hbase-unsecure", "", "") ,
"select sum(10) from TABLE1 where ? &amp;lt;= id and id &amp;lt;= ?",
1, 10, 2)
myRDD.take(10)

val myRDD = new JdbcRDD( sc, () =&amp;gt; getConn("org.apache.phoenix.jdbc.PhoenixDriver", "jdbc:phoenix:localhost:2181:/hbase-unsecure", "", "") ,
"select col1 from TABLE1 where ? &amp;lt;= id and id &amp;lt;= ?",
1, 10, 2)

myRDD.take(10)

&lt;/PRE&gt;&lt;P&gt;Also note that Phoenix team recommends to use Phoenix Spark instead of jdbc directly: &lt;A target="_blank" href="http://phoenix.apache.org/phoenix_spark.html"&gt;http://phoenix.apache.org/phoenix_spark.html&lt;/A&gt;&lt;/P&gt;&lt;P&gt;Here an example with PhoenixSpark package:&lt;/P&gt;&lt;P&gt;from shell:&lt;/P&gt;&lt;PRE&gt;spark-shell --master yarn-client --jars /usr/hdp/current/phoenix-client/phoenix-client.jar,/usr/hdp/current/phoenix-client/lib/phoenix-spark-4.4.0.2.3.2.0-2950.jar --conf "spark.executor.extraClassPath=/usr/hdp/current/phoenix-client/phoenix-client.jar"&lt;/PRE&gt;&lt;P&gt;inside spark-shell:&lt;/P&gt;&lt;PRE&gt;import org.apache.phoenix.spark._

val df = sqlContext.load(
  "org.apache.phoenix.spark",
  Map("table" -&amp;gt; "TABLE1", "zkUrl" -&amp;gt; "localhost:2181:/hbase-unsecure")
)
df.show
&lt;/PRE&gt;&lt;P&gt;And here a sample project that can be built and executed thru spark-submit:&lt;/P&gt;&lt;P&gt;&lt;A target="_blank" href="https://github.com/gbraccialli/SparkUtils"&gt;https://github.com/gbraccialli/SparkUtils&lt;/A&gt;&lt;/P&gt;&lt;PRE&gt;git clone &lt;A href="https://github.com/gbraccialli/SparkUtils" target="_blank"&gt;https://github.com/gbraccialli/SparkUtils&lt;/A&gt;
cd SparkUtils/
mvn clean package
spark-submit --class com.github.gbraccialli.spark.PhoenixSparkSample target/SparkUtils-1.0.0-SNAPSHOT.jar&lt;/PRE&gt;&lt;P&gt;Also check &lt;A rel="user" href="https://community.cloudera.com/users/157/rgelhausen.html" nodeid="157"&gt;@Randy Gelhausen&lt;/A&gt; project that use Phoenix Spark to automatic load data from Hive to Phoenix:&lt;/P&gt;&lt;P&gt;&lt;A target="_blank" href="https://github.com/randerzander/HiveToPhoenix"&gt;https://github.com/randerzander/HiveToPhoenix&lt;/A&gt; (I copied my pom.xml from Randy's project)&lt;/P&gt;</description>
      <pubDate>Tue, 17 Nov 2015 01:23:11 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95913#M9246</guid>
      <dc:creator>gbraccialli3</dc:creator>
      <dc:date>2015-11-17T01:23:11Z</dc:date>
    </item>
    <item>
      <title>Re: Spark to Phoenix</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95914#M9247</link>
      <description>&lt;P&gt;
	Got this working in zeppelin.  Similar answer as 
	@&lt;A href="https://community.hortonworks.com/users/238/gbraccialli.html"&gt;Guilherme Braccialli&lt;/A&gt; but translated for zeppelin users&lt;/P&gt;&lt;P&gt;
	1. Run this block first&lt;/P&gt;&lt;PRE&gt;%dep 
z.reset 
z.load("/usr/hdp/current/phoenix-client/phoenix-4.4.0.2.3.2.0-2950-client.jar") 
z.load("/usr/hdp/current/phoenix-client/lib/phoenix-spark-4.4.0.2.3.2.0-2950.jar")&lt;/PRE&gt;&lt;P&gt;2. Then in a separate snippet run this&lt;/P&gt;&lt;PRE&gt;import org.apache.phoenix.spark._
val df = sqlc.load(
  "org.apache.phoenix.spark",
  Map("table" -&amp;gt; "TRANSACTIONHISTORY", 
      "zkUrl" -&amp;gt; "sandbox.hortonworks.com:2181:/hbase-unsecure")
)
df.show&lt;/PRE&gt;</description>
      <pubDate>Thu, 17 Mar 2016 10:14:33 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95914#M9247</guid>
      <dc:creator>khaslbeck</dc:creator>
      <dc:date>2016-03-17T10:14:33Z</dc:date>
    </item>
    <item>
      <title>Re: Spark to Phoenix</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95915#M9248</link>
      <description>&lt;P&gt;The solution doesn't work for HDP 2.4.0, running this I get this error when running df.show:&lt;/P&gt;&lt;PRE&gt;16/05/12 00:53:21 WARN TaskSetManager: Lost task 0.0 in stage 2.0 (TID 8, usfit-hdpdev-n02.global.internal): java.lang.ClassCastException: org.apache.spark.sql.catalyst.expressions.GenericMutableRow cannot be cast to org.apache.spark.sql.Row&lt;/PRE&gt;&lt;P&gt;If I use pyspark it also dies with a different stack trace. &lt;/P&gt;&lt;P&gt;I attempt to follow the directions at &lt;A href="http://phoenix.apache.org/phoenix_spark.html"&gt;http://phoenix.apache.org/phoenix_spark.html&lt;/A&gt; and update the spark-config to include the spark-client.jar for both ‘&lt;EM&gt;spark.executor.extraClassPath&lt;/EM&gt;’ and ‘&lt;EM&gt;spark.driver.extraClassPath&lt;/EM&gt;’ spark-shell then dies with a &lt;A href="http://pastebin.com/vJ5w1N9a"&gt;pretty long stack trace&lt;/A&gt; when attempting to setup the SQLContext. Same thing for pyspark.&lt;/P&gt;&lt;P&gt;It appears there are some incompatibilities with the Phoenix jars and with the Spark jars as built in HDP. Does anyone have a work around?&lt;/P&gt;&lt;P&gt;thanks!&lt;/P&gt;</description>
      <pubDate>Thu, 12 May 2016 13:13:42 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95915#M9248</guid>
      <dc:creator>cft</dc:creator>
      <dc:date>2016-05-12T13:13:42Z</dc:date>
    </item>
    <item>
      <title>Re: Spark to Phoenix</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95916#M9249</link>
      <description>&lt;P&gt;Tried the above lines in sandbox HDP 2.4 - Zeppelin, its not working.&lt;/P&gt;&lt;P&gt;&lt;A href="https://community.cloudera.com/legacyfs/online/attachments/4660-error.png"&gt;error.png&lt;/A&gt;&lt;/P&gt;</description>
      <pubDate>Mon, 30 May 2016 23:32:33 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95916#M9249</guid>
      <dc:creator>venkatHari</dc:creator>
      <dc:date>2016-05-30T23:32:33Z</dc:date>
    </item>
    <item>
      <title>Re: Spark to Phoenix</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95917#M9250</link>
      <description>&lt;P&gt;Hi &lt;A rel="user" href="https://community.cloudera.com/users/420/vjain.html" nodeid="420"&gt;@Vedant Jain&lt;/A&gt;,&lt;/P&gt;&lt;P&gt;How did you fix this issue. I have HDP 2.4.0 installed and i used the following command to invoke the spark-shell&lt;/P&gt;&lt;P&gt;spark-shell --master yarn-client --jars /usr/hdp/current/phoenix-client/phoenix-client.jar,/usr/hdp/current/phoenix-client/lib/phoenix-spark-4.4.0.2.4.0.0-169.jar&lt;/P&gt;&lt;P&gt;I didnt cahnge anything in the default configurations. just installed spark and tried and got the below eception&lt;/P&gt;&lt;P&gt;Caused by: java.lang.ClassCastException: org.apache.spark.sql.catalyst.expressions.GenericMutableRow cannot be cast to org.apache.spark.sql.Row&lt;/P&gt;&lt;P&gt;Any thoughts would be great. Also i tried what &lt;A rel="user" href="https://community.cloudera.com/users/238/gbraccialli.html" nodeid="238"&gt;@Guilherme Braccialli&lt;/A&gt; said. it didnt work too. is it a bug in HDP 2.4.0?&lt;/P&gt;</description>
      <pubDate>Mon, 11 Jul 2016 16:25:39 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95917#M9250</guid>
      <dc:creator>arunpoy</dc:creator>
      <dc:date>2016-07-11T16:25:39Z</dc:date>
    </item>
    <item>
      <title>Re: Spark to Phoenix</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95918#M9251</link>
      <description>&lt;P&gt;&lt;A rel="user" href="https://community.cloudera.com/users/10412/cft.html" nodeid="10412"&gt;@Chris Tarnas&lt;/A&gt; &lt;/P&gt;&lt;P&gt;You are right, on HDP2.4 this is broken. But it still works as long as you don't go for the DF option, that one fails. It needs Phoenix 4.7.0.0 to work, that will be available in HDP2.5.&lt;/P&gt;&lt;P&gt;The Phoenix_Spark connector for getting Hbase data into a RDD (not DF !) works for me.&lt;/P&gt;&lt;P&gt;An update on &lt;A rel="user" href="https://community.cloudera.com/users/238/gbraccialli.html" nodeid="238"&gt;@Guilherme Braccialli&lt;/A&gt; excellent guidance, to make the Phoenix Spark thing work, for RDD option, but then for the 2.4 Sandbox:&lt;/P&gt;&lt;PRE&gt;spark-shell --master yarn-client --jars /usr/hdp/2.4.0.0-169/phoenix/phoenix-4.4.0.2.4.0.0-169-client.jar,/usr/hdp/2.4.0.0-169/phoenix/lib/phoenix-spark-4.4.0.2.4.0.0-169.jar --conf "spark.executor.extraClassPath=/usr/hdp/2.4.0.0-169/phoenix/lib/phoenix-spark-4.4.0.2.4.0.0-169.jar:/usr/hdp/2.4.0.0-169/phoenix/phoenix-4.4.0.2.4.0.0-169-client.jar"
&lt;/PRE&gt;</description>
      <pubDate>Tue, 20 Sep 2016 22:05:04 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/Spark-to-Phoenix/m-p/95918#M9251</guid>
      <dc:creator>jknulst</dc:creator>
      <dc:date>2016-09-20T22:05:04Z</dc:date>
    </item>
  </channel>
</rss>

