<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>question Re: Connecting to Hive using local spark jdbc in Support Questions</title>
    <link>https://community.cloudera.com/t5/Support-Questions/Connecting-to-Hive-using-local-spark-jdbc/m-p/373975#M241835</link>
    <description>&lt;P&gt;Yes, I can try this... but the error I am having is on the line right before it where I am creating a spark variable.&lt;BR /&gt;&lt;BR /&gt;Error is here -&lt;/P&gt;&lt;LI-CODE lang="python"&gt;# Imports
from pyspark.sql import SparkSession

# Create SparkSession
spark = SparkSession.builder \
.appName('SparkByExamples.com') \
.config("spark.jars", "/Users/u530241/spark3/spark-3.3.2-bin-hadoop3/jars/hive-jdbc-4.0.0-alpha-2-standalone.jar").enableHiveSupport().getOrCreate()&lt;/LI-CODE&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;I will make a change here once it is solved -&amp;nbsp;&lt;/P&gt;&lt;LI-CODE lang="python"&gt;df = spark.read \
.jdbc("jdbc:hive2://p-cdl-knox-prd-svc.hcscint.net:8443/default;transportMode=http;httpPath=gateway/default/hive;ssl=true;sslTrustStore=/Users/u530241/Downloads/gateway_prod_.jks;trustStorePassword=knoxprod;user=&amp;lt;username&amp;gt;;password=&amp;lt;password&amp;gt;",
"member_connect",
properties={"user": "u530241", "password": "..", "driver":"com.mysql.jdbc.Driver"})&lt;/LI-CODE&gt;</description>
    <pubDate>Mon, 17 Jul 2023 15:00:44 GMT</pubDate>
    <dc:creator>pranav007</dc:creator>
    <dc:date>2023-07-17T15:00:44Z</dc:date>
    <item>
      <title>Connecting to Hive using local spark jdbc</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Connecting-to-Hive-using-local-spark-jdbc/m-p/373910#M241807</link>
      <description>&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;Hello,&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;I am trying to connect to a remote hive cluster which requires kerberos, using spark mysql connector&amp;nbsp;&lt;BR /&gt;&lt;BR /&gt;```&lt;/P&gt;&lt;P&gt;# Imports&lt;BR /&gt;from pyspark.sql import SparkSession&lt;/P&gt;&lt;P&gt;# Create SparkSession&lt;BR /&gt;spark = SparkSession.builder \&lt;BR /&gt;.appName('SparkByExamples.com') \&lt;BR /&gt;.config("spark.jars", "/Users/u530241/spark3/spark-3.3.2-bin-hadoop3/jars/mysql-connector-java-5.1.45-bin.jar").enableHiveSupport().getOrCreate()&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;df = spark.read \&lt;BR /&gt;.jdbc("jdbc:hive2://p-cdl-knox-prd-svc.hcscint.net:8443/default;transportMode=http;httpPath=gateway/default/hive;ssl=true;sslTrustStore=/Users/u530241/Downloads/gateway_prod_.jks;trustStorePassword=knoxprod",&lt;BR /&gt;"member_connect",&lt;BR /&gt;properties={"user": "u530241", "password": "..", "driver":"com.mysql.jdbc.Driver"})&lt;/P&gt;&lt;P&gt;```&lt;BR /&gt;&lt;BR /&gt;&lt;BR /&gt;I keep getting this error -&amp;nbsp;&lt;BR /&gt;```&lt;BR /&gt;IllegalArgumentException: requirement failed: The driver could not open a JDBC connection. Check the URL: jdbc:hive2://p-cdl-knox-prd-svc.hcscint.net:8443/default;transportMode=http;httpPath=gateway/default/hive;ssl=true;sslTrustStore=/Users/u530241/Downloads/gateway_prod_.jks;trustStorePassword=knoxprod&lt;BR /&gt;```&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;Could you please give me some suggestions?&lt;/P&gt;</description>
      <pubDate>Fri, 14 Jul 2023 15:28:27 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Connecting-to-Hive-using-local-spark-jdbc/m-p/373910#M241807</guid>
      <dc:creator>pranav007</dc:creator>
      <dc:date>2023-07-14T15:28:27Z</dc:date>
    </item>
    <item>
      <title>Re: Connecting to Hive using local spark jdbc</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Connecting-to-Hive-using-local-spark-jdbc/m-p/373915#M241809</link>
      <description>&lt;P&gt;&lt;a href="https://community.cloudera.com/t5/user/viewprofilepage/user-id/105966"&gt;@pranav007&lt;/a&gt;&amp;nbsp;Welcome to the Cloudera Community!&lt;BR /&gt;&lt;BR /&gt;To help you get the best possible solution, I have tagged our Hive expert&amp;nbsp;&lt;a href="https://community.cloudera.com/t5/user/viewprofilepage/user-id/38161"&gt;@cravani&lt;/a&gt;&amp;nbsp; who may be able to assist you further.&lt;BR /&gt;&lt;BR /&gt;Please keep us updated on your post, and we hope you find a satisfactory solution to your query.&lt;/P&gt;</description>
      <pubDate>Fri, 14 Jul 2023 16:32:49 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Connecting-to-Hive-using-local-spark-jdbc/m-p/373915#M241809</guid>
      <dc:creator>DianaTorres</dc:creator>
      <dc:date>2023-07-14T16:32:49Z</dc:date>
    </item>
    <item>
      <title>Re: Connecting to Hive using local spark jdbc</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Connecting-to-Hive-using-local-spark-jdbc/m-p/373916#M241810</link>
      <description>&lt;P&gt;You seem to be using wrong driver to connect to HiveServer2. MySQL JDBC driver does not work to connect to HS2.&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;You would need Apache Hive JDBC Driver or Cloudera Hive JDBC Driver to connect to Hive. You can download latest Apache Hive JDBC Driver from:&amp;nbsp;&lt;A href="https://repo1.maven.org/maven2/org/apache/hive/hive-jdbc/4.0.0-alpha-2/" target="_blank"&gt;https://repo1.maven.org/maven2/org/apache/hive/hive-jdbc/4.0.0-alpha-2/&lt;/A&gt;&lt;/P&gt;&lt;P&gt;File:&amp;nbsp;hive-jdbc-4.0.0-alpha-2-standalone.jar&amp;nbsp;&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;Add the same jar to your spark.jars.&lt;/P&gt;</description>
      <pubDate>Fri, 14 Jul 2023 16:55:08 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Connecting-to-Hive-using-local-spark-jdbc/m-p/373916#M241810</guid>
      <dc:creator>cravani</dc:creator>
      <dc:date>2023-07-14T16:55:08Z</dc:date>
    </item>
    <item>
      <title>Re: Connecting to Hive using local spark jdbc</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Connecting-to-Hive-using-local-spark-jdbc/m-p/373918#M241812</link>
      <description>&lt;P&gt;Hi, I changed the file and added it to the jars.&lt;BR /&gt;&lt;BR /&gt;&lt;/P&gt;&lt;LI-CODE lang="python"&gt;# Imports
from pyspark.sql import SparkSession

# Create SparkSession
spark = SparkSession.builder \
.appName('SparkByExamples.com') \
.config("spark.jars", "/Users/u530241/spark3/spark-3.3.2-bin-hadoop3/jars/hive-jdbc-4.0.0-alpha-2-standalone.jar").enableHiveSupport().getOrCreate()
&lt;/LI-CODE&gt;&lt;P&gt;&lt;BR /&gt;&lt;BR /&gt;I am not getting the error -&amp;nbsp;&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;```&lt;/P&gt;&lt;PRE&gt;Exception in thread "main" java.lang.IllegalArgumentException: Can't get Kerberos realm
	at org.apache.hadoop.security.HadoopKerberosName.setConfiguration(HadoopKerberosName.java:71)
	at org.apache.hadoop.security.UserGroupInformation.initialize(UserGroupInformation.java:315)
	at org.apache.hadoop.security.UserGroupInformation.ensureInitialized(UserGroupInformation.java:300)
	at org.apache.hadoop.security.UserGroupInformation.getCurrentUser(UserGroupInformation.java:575)
	at org.apache.hadoop.fs.FileSystem$Cache$Key.&amp;lt;init&amp;gt;(FileSystem.java:3746)
	at org.apache.hadoop.fs.FileSystem$Cache$Key.&amp;lt;init&amp;gt;(FileSystem.java:3736)
	at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:3520)
	at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:540)
	at org.apache.spark.util.DependencyUtils$.resolveGlobPath(DependencyUtils.scala:317)
	at org.apache.spark.util.DependencyUtils$.$anonfun$resolveGlobPaths$2(DependencyUtils.scala:273)
	at org.apache.spark.util.DependencyUtils$.$anonfun$resolveGlobPaths$2$adapted(DependencyUtils.scala:271)
	at scala.collection.TraversableLike.$anonfun$flatMap$1(TraversableLike.scala:293)
	at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)
	at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)
	at scala.collection.mutable.WrappedArray.foreach(WrappedArray.scala:38)
	at scala.collection.TraversableLike.flatMap(TraversableLike.scala:293)
	at scala.collection.TraversableLike.flatMap$(TraversableLike.scala:290)
	at scala.collection.AbstractTraversable.flatMap(Traversable.scala:108)
	at org.apache.spark.util.DependencyUtils$.resolveGlobPaths(DependencyUtils.scala:271)
	at org.apache.spark.deploy.SparkSubmit.$anonfun$prepareSubmitEnvironment$4(SparkSubmit.scala:364)
	at scala.Option.map(Option.scala:230)
	at org.apache.spark.deploy.SparkSubmit.prepareSubmitEnvironment(SparkSubmit.scala:364)
	at org.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:901)
	at org.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)
	at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)
	at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)
	at org.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1046)
	at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1055)
	at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
Caused by: java.lang.IllegalArgumentException: KrbException: krb5.conf loading failed
	at java.security.jgss/javax.security.auth.kerberos.KerberosPrincipal.&amp;lt;init&amp;gt;(KerberosPrincipal.java:179)
	at org.apache.hadoop.security.authentication.util.KerberosUtil.getDefaultRealm(KerberosUtil.java:120)
	at org.apache.hadoop.security.HadoopKerberosName.setConfiguration(HadoopKerberosName.java:69)
	... 28 more&lt;BR /&gt;&lt;BR /&gt;&lt;/PRE&gt;&lt;PRE&gt;&lt;SPAN class="ansi-red-fg"&gt;RuntimeError&lt;/SPAN&gt;: Java gateway process exited before sending its port number&lt;/PRE&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;```&lt;/P&gt;</description>
      <pubDate>Fri, 14 Jul 2023 20:38:38 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Connecting-to-Hive-using-local-spark-jdbc/m-p/373918#M241812</guid>
      <dc:creator>pranav007</dc:creator>
      <dc:date>2023-07-14T20:38:38Z</dc:date>
    </item>
    <item>
      <title>Re: Connecting to Hive using local spark jdbc</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Connecting-to-Hive-using-local-spark-jdbc/m-p/373936#M241821</link>
      <description>&lt;P&gt;Are you sure that you are trying to connect using Kerberos to Hive? It appears to me that you are trying to connect to Hive over Knox which may have LDAP configured instead of Kerberos?&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;Can you try below? by replacing your username and password&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;LI-CODE lang="java"&gt;df = spark.read \
.jdbc("jdbc:hive2://p-cdl-knox-prd-svc.hcscint.net:8443/default;transportMode=http;httpPath=gateway/default/hive;ssl=true;sslTrustStore=/Users/u530241/Downloads/gateway_prod_.jks;trustStorePassword=knoxprod;user=&amp;lt;username&amp;gt;;password=&amp;lt;password&amp;gt;")&lt;/LI-CODE&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;</description>
      <pubDate>Sat, 15 Jul 2023 19:09:50 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Connecting-to-Hive-using-local-spark-jdbc/m-p/373936#M241821</guid>
      <dc:creator>cravani</dc:creator>
      <dc:date>2023-07-15T19:09:50Z</dc:date>
    </item>
    <item>
      <title>Re: Connecting to Hive using local spark jdbc</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Connecting-to-Hive-using-local-spark-jdbc/m-p/373967#M241834</link>
      <description>&lt;P&gt;I have the same issue. Any tips for solution?&lt;/P&gt;</description>
      <pubDate>Mon, 17 Jul 2023 12:53:42 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Connecting-to-Hive-using-local-spark-jdbc/m-p/373967#M241834</guid>
      <dc:creator>vrooktreat</dc:creator>
      <dc:date>2023-07-17T12:53:42Z</dc:date>
    </item>
    <item>
      <title>Re: Connecting to Hive using local spark jdbc</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Connecting-to-Hive-using-local-spark-jdbc/m-p/373975#M241835</link>
      <description>&lt;P&gt;Yes, I can try this... but the error I am having is on the line right before it where I am creating a spark variable.&lt;BR /&gt;&lt;BR /&gt;Error is here -&lt;/P&gt;&lt;LI-CODE lang="python"&gt;# Imports
from pyspark.sql import SparkSession

# Create SparkSession
spark = SparkSession.builder \
.appName('SparkByExamples.com') \
.config("spark.jars", "/Users/u530241/spark3/spark-3.3.2-bin-hadoop3/jars/hive-jdbc-4.0.0-alpha-2-standalone.jar").enableHiveSupport().getOrCreate()&lt;/LI-CODE&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;I will make a change here once it is solved -&amp;nbsp;&lt;/P&gt;&lt;LI-CODE lang="python"&gt;df = spark.read \
.jdbc("jdbc:hive2://p-cdl-knox-prd-svc.hcscint.net:8443/default;transportMode=http;httpPath=gateway/default/hive;ssl=true;sslTrustStore=/Users/u530241/Downloads/gateway_prod_.jks;trustStorePassword=knoxprod;user=&amp;lt;username&amp;gt;;password=&amp;lt;password&amp;gt;",
"member_connect",
properties={"user": "u530241", "password": "..", "driver":"com.mysql.jdbc.Driver"})&lt;/LI-CODE&gt;</description>
      <pubDate>Mon, 17 Jul 2023 15:00:44 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Connecting-to-Hive-using-local-spark-jdbc/m-p/373975#M241835</guid>
      <dc:creator>pranav007</dc:creator>
      <dc:date>2023-07-17T15:00:44Z</dc:date>
    </item>
    <item>
      <title>Re: Connecting to Hive using local spark jdbc</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Connecting-to-Hive-using-local-spark-jdbc/m-p/374291#M241942</link>
      <description>&lt;P&gt;&lt;a href="https://community.cloudera.com/t5/user/viewprofilepage/user-id/38161"&gt;@cravani&lt;/a&gt;&amp;nbsp;&lt;a href="https://community.cloudera.com/t5/user/viewprofilepage/user-id/93628"&gt;@DianaTorres&lt;/a&gt;&amp;nbsp;Are you there? I am still facing issues while connecting&amp;nbsp;&lt;/P&gt;</description>
      <pubDate>Fri, 21 Jul 2023 15:40:22 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Connecting-to-Hive-using-local-spark-jdbc/m-p/374291#M241942</guid>
      <dc:creator>pranav007</dc:creator>
      <dc:date>2023-07-21T15:40:22Z</dc:date>
    </item>
    <item>
      <title>Re: Connecting to Hive using local spark jdbc</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Connecting-to-Hive-using-local-spark-jdbc/m-p/374467#M241989</link>
      <description>&lt;P&gt;&lt;a href="https://community.cloudera.com/t5/user/viewprofilepage/user-id/22324"&gt;@Bharati&lt;/a&gt;&amp;nbsp;&lt;a href="https://community.cloudera.com/t5/user/viewprofilepage/user-id/70785"&gt;@Shmoo&lt;/a&gt;&amp;nbsp;Do you have any insights here? Thanks!&lt;/P&gt;</description>
      <pubDate>Tue, 25 Jul 2023 19:47:49 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Connecting-to-Hive-using-local-spark-jdbc/m-p/374467#M241989</guid>
      <dc:creator>DianaTorres</dc:creator>
      <dc:date>2023-07-25T19:47:49Z</dc:date>
    </item>
    <item>
      <title>Re: Connecting to Hive using local spark jdbc</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Connecting-to-Hive-using-local-spark-jdbc/m-p/385298#M245678</link>
      <description>&lt;P&gt;&lt;SPAN&gt;I have followed the below steps to read the hive table from spark side with credential store:&lt;BR /&gt;&lt;BR /&gt;&lt;/SPAN&gt;&lt;STRONG&gt;FROM MYSQL&amp;nbsp;&lt;/STRONG&gt;&lt;BR /&gt;&lt;BR /&gt;&lt;/P&gt;&lt;LI-CODE lang="markup"&gt;CREATE USER IF NOT EXISTS 'gopi'@'%' IDENTIFIED BY 'gopi';
GRANT ALL PRIVILEGES ON * . * TO 'gopi'@'%';
FLUSH PRIVILEGES;

create database if not exists test;
use test;

CREATE TABLE test.EMPLOYEE(
        id INT,
        name varchar(255),
        salary DECIMAL,
        dob DATE NOT NULL DEFAULT '2021-05-01',
        doj TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP(),
        PRIMARY KEY (id)
);

INSERT INTO test.EMPLOYEE (id, name, salary, dob, doj) VALUES (1, "gopi", 10000.00, '1988-06-01', '2020-03-16 09:00:01.000000');
INSERT INTO test.EMPLOYEE (id, name, salary,dob) VALUES (2, "Nishanth", 50000.00, '2018-05-29');
INSERT INTO test.EMPLOYEE (id, name, salary) VALUES (3, "Raja", 30000.00);&lt;/LI-CODE&gt;&lt;P&gt;&lt;STRONG&gt;Create credential store:&lt;BR /&gt;&lt;BR /&gt;&lt;/STRONG&gt;&lt;/P&gt;&lt;LI-CODE lang="markup"&gt;sudo -u hive hadoop credential create gopi_user.password -v gopi -provider jceks://hdfs/user/hive/gopi_user.jceks
hadoop credential list -provider jceks://hdfs/user/hive/gopi_user.jceks
sudo -u hive hdfs dfs -chmod 400 /user/hive/gopi_user.jceks&lt;/LI-CODE&gt;&lt;P&gt;&lt;STRONG&gt;FROM HIVE&lt;/STRONG&gt;&lt;BR /&gt;&lt;BR /&gt;&lt;/P&gt;&lt;LI-CODE lang="markup"&gt;USE db_test;
drop table if exists db_test.employee2;

CREATE EXTERNAL TABLE db_test.employee2(
  id INT,
  name STRING,
  salary DOUBLE,
  dob DATE,
  doj TIMESTAMP
)
STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
TBLPROPERTIES (
  "hive.sql.database.type" = "MYSQL",
  "hive.sql.jdbc.driver" = "com.mysql.cj.jdbc.Driver",
  "hive.sql.jdbc.url" = "jdbc:mysql://ccycloud-1.nightly7x-us-gr.root.comops.site:3306/test",
  "hive.sql.dbcp.username" = "gopi",
  "hive.sql.dbcp.password.keystore"  ="jceks://hdfs/user/hive/gopi_user.jceks",
  "hive.sql.dbcp.password.key" = "gopi_user.password",
  "hive.sql.query" = "select * from test.EMPLOYEE"
);&lt;/LI-CODE&gt;&lt;P&gt;&lt;STRONG&gt;FROM SPARK&lt;BR /&gt;&lt;BR /&gt;&lt;/STRONG&gt;&lt;/P&gt;&lt;LI-CODE lang="markup"&gt;sudo -u hive spark-shell \
  --jars /opt/cloudera/parcels/CDH/jars/hive-jdbc-handler-3.1.3000.7.2.18.0-622.jar,/usr/share/java/mysql-connector-java.jar

scala&amp;gt; spark.sql("SELECT * FROM db_test.employee1").show()

+---+--------+-------+----------+-------------------+                           
| id|    name| salary|       dob|                doj|
+---+--------+-------+----------+-------------------+
|  1|   gopi|10000.0|1988-06-01|2020-03-16 09:00:01|
|  2|Nishanth|50000.0|2018-05-29|2024-02-27 10:39:22|
|  3|    Raja|30000.0|2021-05-01|2024-02-27 10:39:30|
+---+--------+-------+----------+-------------------+


scala&amp;gt; spark.sql("SELECT * FROM db_test.employee2").show()
+---+--------+-------+----------+-------------------+                           
| id|    name| salary|       dob|                doj|
+---+--------+-------+----------+-------------------+
|  1|   gopi|10000.0|1988-06-01|2020-03-16 09:00:01|
|  2|Nishanth|50000.0|2018-05-29|2024-02-27 10:39:22|
|  3|    Raja|30000.0|2021-05-01|2024-02-27 10:39:30|
+---+--------+-------+----------+-------------------+&lt;/LI-CODE&gt;&lt;P&gt;&lt;STRONG&gt;&amp;nbsp;&lt;/STRONG&gt;&lt;/P&gt;&lt;P&gt;&lt;STRONG&gt;&amp;nbsp;&lt;/STRONG&gt;&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;&lt;BR /&gt;&lt;/P&gt;</description>
      <pubDate>Thu, 21 Mar 2024 06:40:37 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Connecting-to-Hive-using-local-spark-jdbc/m-p/385298#M245678</guid>
      <dc:creator>ggangadharan</dc:creator>
      <dc:date>2024-03-21T06:40:37Z</dc:date>
    </item>
    <item>
      <title>Re: Connecting to Hive using local spark jdbc</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Connecting-to-Hive-using-local-spark-jdbc/m-p/387898#M246477</link>
      <description>&lt;P&gt;&lt;SPAN&gt;Any tips for solution?&lt;/SPAN&gt;&lt;/P&gt;</description>
      <pubDate>Wed, 15 May 2024 12:18:51 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Connecting-to-Hive-using-local-spark-jdbc/m-p/387898#M246477</guid>
      <dc:creator>craxedunt</dc:creator>
      <dc:date>2024-05-15T12:18:51Z</dc:date>
    </item>
  </channel>
</rss>

