<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>question Re: Hive query on spark as execution engine in HDP 2.6.5 in Support Questions</title>
    <link>https://community.cloudera.com/t5/Support-Questions/Hive-query-on-spark-as-execution-engine-in-HDP-2-6-5/m-p/282506#M210016</link>
    <description>&lt;P&gt;Can you please assist on this. Thanks&lt;/P&gt;</description>
    <pubDate>Fri, 08 Nov 2019 13:58:12 GMT</pubDate>
    <dc:creator>sampathkumar_ma</dc:creator>
    <dc:date>2019-11-08T13:58:12Z</dc:date>
    <item>
      <title>Hive query on spark as execution engine in HDP 2.6.5</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Hive-query-on-spark-as-execution-engine-in-HDP-2-6-5/m-p/282195#M209826</link>
      <description>&lt;P&gt;Hi,&lt;/P&gt;
&lt;P&gt;We are getting the error while executing&amp;nbsp;Hive query on spark as execution engine.&lt;/P&gt;
&lt;P&gt;Hive version: 1.2.1, Spark version : 1.6&lt;/P&gt;
&lt;P&gt;&amp;nbsp;&lt;/P&gt;
&lt;P&gt;set hive.execution.engine=spark;&lt;BR /&gt;set spark.home=/usr/hdp/current/spark-client;&lt;BR /&gt;set hive.execution.engine=spark;&lt;BR /&gt;set spark.master=yarn-client;&lt;BR /&gt;set spark.eventLog.enabled=true;&lt;BR /&gt;set spark.executor.memory=512m;&lt;BR /&gt;set spark.executor.cores=2;&lt;BR /&gt;set spark.driver.extraClassPath=/usr/hdp/current/hive-client/lib/hive-exec.jar;&lt;/P&gt;
&lt;P&gt;&amp;nbsp;&lt;/P&gt;
&lt;P&gt;Query ID = svchdpir2d_20191106105445_a9ebc8a2-9c28-4a3d-ac5e-0a8609e56fd5&lt;BR /&gt;Total jobs = 1&lt;BR /&gt;Launching Job 1 out of 1&lt;BR /&gt;In order to change the average load for a reducer (in bytes):&lt;BR /&gt;set hive.exec.reducers.bytes.per.reducer=&amp;lt;number&amp;gt;&lt;BR /&gt;In order to limit the maximum number of reducers:&lt;BR /&gt;set hive.exec.reducers.max=&amp;lt;number&amp;gt;&lt;BR /&gt;In order to set a constant number of reducers:&lt;BR /&gt;set mapreduce.job.reduces=&amp;lt;number&amp;gt;&lt;BR /&gt;Starting Spark Job = c6cc1641-20ad-4073-ab62-4f621ae595c8&lt;BR /&gt;Status: SENT&lt;BR /&gt;&lt;STRONG&gt;Failed to execute spark task, with exception 'java.lang.IllegalStateException(RPC channel is closed.)'&lt;/STRONG&gt;&lt;BR /&gt;FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.spark.SparkTask&lt;BR /&gt;WARN: The method class org.apache.commons.logging.impl.SLF4JLogFactory#release() was invoked.&lt;BR /&gt;WARN: Please see &lt;A href="http://www.slf4j.org/codes.html#release" target="_blank" rel="noopener"&gt;http://www.slf4j.org/codes.html#release&lt;/A&gt; for an explanation.&lt;/P&gt;
&lt;P&gt;&amp;nbsp;&lt;/P&gt;
&lt;P&gt;Could you please help on this.&lt;/P&gt;
&lt;P&gt;Thank you&lt;/P&gt;</description>
      <pubDate>Wed, 06 Nov 2019 13:45:49 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Hive-query-on-spark-as-execution-engine-in-HDP-2-6-5/m-p/282195#M209826</guid>
      <dc:creator>sampathkumar_ma</dc:creator>
      <dc:date>2019-11-06T13:45:49Z</dc:date>
    </item>
    <item>
      <title>Re: Hive query on spark as execution engine in HDP 2.6.5</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Hive-query-on-spark-as-execution-engine-in-HDP-2-6-5/m-p/282506#M210016</link>
      <description>&lt;P&gt;Can you please assist on this. Thanks&lt;/P&gt;</description>
      <pubDate>Fri, 08 Nov 2019 13:58:12 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Hive-query-on-spark-as-execution-engine-in-HDP-2-6-5/m-p/282506#M210016</guid>
      <dc:creator>sampathkumar_ma</dc:creator>
      <dc:date>2019-11-08T13:58:12Z</dc:date>
    </item>
    <item>
      <title>Re: Hive query on spark as execution engine in HDP 2.6.5</title>
      <link>https://community.cloudera.com/t5/Support-Questions/Hive-query-on-spark-as-execution-engine-in-HDP-2-6-5/m-p/282537#M210040</link>
      <description>&lt;P&gt;&lt;a href="https://community.cloudera.com/t5/user/viewprofilepage/user-id/18550"&gt;@sampathkumar_ma&lt;/a&gt;&amp;nbsp;- In HDP, Hive's execution engine only supports MapReduce &amp;amp; Tez. Running with Spark is not supported in HDP at this current moment in time.&lt;/P&gt;</description>
      <pubDate>Sat, 09 Nov 2019 06:23:23 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Support-Questions/Hive-query-on-spark-as-execution-engine-in-HDP-2-6-5/m-p/282537#M210040</guid>
      <dc:creator>ngarg</dc:creator>
      <dc:date>2019-11-09T06:23:23Z</dc:date>
    </item>
  </channel>
</rss>

