<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>question Re: Spark job in YARN queue depends on jobs in another queue in Support Questions</title>
    <link>https://community.cloudera.com/t5/Support-Questions/Spark-job-in-YARN-queue-depends-on-jobs-in-another-queue/m-p/168200#M130530</link>
    <description>&lt;P&gt;&lt;A href="https://community.cloudera.com/users/11713/grabowski14.html"&gt;Mateusz Grabowski&lt;/A&gt;, queue distribution ensures the capacity distribution. However it is possible that containers from different queues can run on same node manager host. In this case, execution time of a container may be affected. &lt;/P&gt;&lt;P&gt;So, isolating queues is not sufficient. You will also need to configure CGroup for cpu isolation. &lt;/P&gt;&lt;P&gt;Find some good links on CGroup as below. &lt;/P&gt;&lt;P&gt;&lt;A href="https://hortonworks.com/blog/managing-cpu-resources-in-your-hadoop-yarn-clusters/"&gt;https://hortonworks.com/blog/managing-cpu-resources-in-your-hadoop-yarn-clusters/&lt;/A&gt;&lt;/P&gt;&lt;P&gt;&lt;A href="https://hortonworks.com/blog/apache-hadoop-yarn-in-hdp-2-2-isolation-of-cpu-resources-in-your-hadoop-yarn-clusters/"&gt;https://hortonworks.com/blog/apache-hadoop-yarn-in-hdp-2-2-isolation-of-cpu-resources-in-your-hadoop-yarn-clusters/&lt;/A&gt;&lt;/P&gt;</description>
    <pubDate>Thu, 23 Mar 2017 06:40:40 GMT</pubDate>
    <dc:creator>yvora</dc:creator>
    <dc:date>2017-03-23T06:40:40Z</dc:date>
  </channel>
</rss>

