<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>question Re: Spark SQL Job stcuk  indefinitely at last task of a stage -- Shows  INFO: BlockManagerInfo : Removed broadcast in memory in Support Questions</title>
    <link>https://community.cloudera.com/t5/Support-Questions/Spark-SQL-Job-stcuk-indefinitely-at-last-task-of-a-stage/m-p/132546#M95216</link>
    <description>&lt;P&gt;why i asked this Question becuase I am runnign my job in client mode and I am not sure if below setting with client mode&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;PRE&gt;ContextService.&lt;EM&gt;getHiveContext&lt;/EM&gt;.sql("SET spark.yarn.executor.memoryOverhead = 3000 ");
ContextService.&lt;EM&gt;getHiveContext&lt;/EM&gt;.sql("SET spark.yarn.am.memoryOverhead = 3000");&lt;/PRE&gt;&lt;P&gt;spark.yarn.executor.memoryOverhead works in cluster mode...&lt;/P&gt;&lt;P&gt;spark.yarm.am.memoryOverhead is Same as &lt;CODE&gt;spark.yarn.driver.memoryOverhead&lt;/CODE&gt;, but for the YARN Application Master in client mode. &lt;/P&gt;</description>
    <pubDate>Tue, 19 Jul 2016 17:00:15 GMT</pubDate>
    <dc:creator>pkhare</dc:creator>
    <dc:date>2016-07-19T17:00:15Z</dc:date>
  </channel>
</rss>

