<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>question Re: spark issue after ran the job in Archives of Support Questions (Read Only)</title>
    <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/spark-issue-after-ran-the-job/m-p/131288#M34834</link>
    <description>&lt;P&gt;can anyone help me to tune the spark to run same job on 32 GB system. Because I my cluster was 32 GB with 3 node, I think 32 GB per node is enough and free memory was always 20 GB on every node.&lt;/P&gt;</description>
    <pubDate>Mon, 18 Jul 2016 20:13:30 GMT</pubDate>
    <dc:creator>ashneesharma88</dc:creator>
    <dc:date>2016-07-18T20:13:30Z</dc:date>
    <item>
      <title>spark issue after ran the job</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/spark-issue-after-ran-the-job/m-p/131283#M34829</link>
      <description>&lt;P&gt;We have 3 node cluster&lt;/P&gt;&lt;P&gt;each node have 32 GB ram.&lt;/P&gt;&lt;P&gt;But still System going in hung stat after running the job.&lt;/P&gt;&lt;P&gt;Job is doing converting dataframe to csv using com.databricks.csv.&lt;/P&gt;</description>
      <pubDate>Fri, 15 Jul 2016 20:16:50 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/spark-issue-after-ran-the-job/m-p/131283#M34829</guid>
      <dc:creator>ashneesharma88</dc:creator>
      <dc:date>2016-07-15T20:16:50Z</dc:date>
    </item>
    <item>
      <title>Re: spark issue after ran the job</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/spark-issue-after-ran-the-job/m-p/131284#M34830</link>
      <description>&lt;P&gt;could you please post little more information on the job, the submit command etc. What is your data source? &lt;/P&gt;</description>
      <pubDate>Sat, 16 Jul 2016 01:10:00 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/spark-issue-after-ran-the-job/m-p/131284#M34830</guid>
      <dc:creator>arunak</dc:creator>
      <dc:date>2016-07-16T01:10:00Z</dc:date>
    </item>
    <item>
      <title>Re: spark issue after ran the job</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/spark-issue-after-ran-the-job/m-p/131285#M34831</link>
      <description>&lt;P&gt;&lt;A rel="user" href="https://community.cloudera.com/users/10529/akeezhadath.html" nodeid="10529"&gt;@Arun A K&lt;/A&gt; &lt;/P&gt;&lt;P&gt;This is the command :-&lt;/P&gt;&lt;P&gt;We are reading csv files.&lt;/P&gt;&lt;P&gt; java -cp .:spark-assembly-1.6.1.2.4.2.0-258-hadoop2.7.1.2.4.2.0-258.jar:commons-csv-1.1.jar:spark-csv_2.10-1.4.0.jar SparkMainPlain xyz&lt;/P&gt;</description>
      <pubDate>Mon, 18 Jul 2016 15:20:47 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/spark-issue-after-ran-the-job/m-p/131285#M34831</guid>
      <dc:creator>ashneesharma88</dc:creator>
      <dc:date>2016-07-18T15:20:47Z</dc:date>
    </item>
    <item>
      <title>Re: spark issue after ran the job</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/spark-issue-after-ran-the-job/m-p/131286#M34832</link>
      <description>&lt;P&gt;Issue is resolved after increasing physical ram of the machine. Now it is working fine. I was running the job on 32 GB ram node and I increased the it to 64 GB and ran same code 3-4 times. &lt;/P&gt;</description>
      <pubDate>Mon, 18 Jul 2016 17:15:55 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/spark-issue-after-ran-the-job/m-p/131286#M34832</guid>
      <dc:creator>ashneesharma88</dc:creator>
      <dc:date>2016-07-18T17:15:55Z</dc:date>
    </item>
    <item>
      <title>Re: spark issue after ran the job</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/spark-issue-after-ran-the-job/m-p/131287#M34833</link>
      <description>&lt;P&gt;plz suggest if I can tune my cluster for spark.&lt;/P&gt;</description>
      <pubDate>Mon, 18 Jul 2016 18:02:08 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/spark-issue-after-ran-the-job/m-p/131287#M34833</guid>
      <dc:creator>ashneesharma88</dc:creator>
      <dc:date>2016-07-18T18:02:08Z</dc:date>
    </item>
    <item>
      <title>Re: spark issue after ran the job</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/spark-issue-after-ran-the-job/m-p/131288#M34834</link>
      <description>&lt;P&gt;can anyone help me to tune the spark to run same job on 32 GB system. Because I my cluster was 32 GB with 3 node, I think 32 GB per node is enough and free memory was always 20 GB on every node.&lt;/P&gt;</description>
      <pubDate>Mon, 18 Jul 2016 20:13:30 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/spark-issue-after-ran-the-job/m-p/131288#M34834</guid>
      <dc:creator>ashneesharma88</dc:creator>
      <dc:date>2016-07-18T20:13:30Z</dc:date>
    </item>
    <item>
      <title>Re: spark issue after ran the job</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/spark-issue-after-ran-the-job/m-p/131289#M34835</link>
      <description>&lt;P&gt;If spark-csv_2.10-1.4.0.jar is your application, please submit it using spark-submit rather than running it as java application. Could you explain little more on what the application is doing? What is the data source? How do you turn your data into a data frame etc... &lt;/P&gt;</description>
      <pubDate>Mon, 18 Jul 2016 20:45:28 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/spark-issue-after-ran-the-job/m-p/131289#M34835</guid>
      <dc:creator>arunak</dc:creator>
      <dc:date>2016-07-18T20:45:28Z</dc:date>
    </item>
  </channel>
</rss>

