<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>question SPARK Comands don't &amp;quot;work&amp;quot; in Archives of Support Questions (Read Only)</title>
    <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/SPARK-Comands-don-t-quot-work-quot/m-p/229314#M72708</link>
    <description>&lt;P&gt;Hi,&lt;/P&gt;&lt;P&gt;I'm working in Zeppelin with Spark interpreter. everytime I try to show df it's hanging or don't recognize the table. Here's my code:&lt;/P&gt;&lt;P&gt;%spark&lt;/P&gt;&lt;P&gt;import sqlContext.implicits._&lt;BR /&gt;import org.apache.spark.sql.hive.HiveContext&lt;BR /&gt;import org.apache.spark.sql.SQLContext&lt;BR /&gt;import org.apache.spark.{SparkContext, SparkConf}&lt;/P&gt;&lt;P&gt;val sqlContext = new org.apache.spark.sql.hive.HiveContext(sc)&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;val jdbcTable1 = "V_EQUIPMENT"&lt;BR /&gt;val equipment = sqlContext.read.format("jdbc").options(Map("url" -&amp;gt; jdbcUrl,"dbtable" -&amp;gt; jdbcTable1, "driver" -&amp;gt; jdbcDriver)).load()&lt;BR /&gt;&lt;BR /&gt;val df = sqlContext.sql("select * from equipment")&lt;BR /&gt;df.show()&lt;BR /&gt;&lt;BR /&gt;equipment.show()&lt;/P&gt;&lt;P&gt;org.apache.spark.sql.AnalysisException: Table not found: equipment; line 1 pos 14
	at org.apache.spark.sql.catalyst.analysis.package$AnalysisErrorAt.failAnalysis(package.scala:42)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.getTable(Analyzer.scala:305)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$anonfun$apply$9.applyOrElse(Analyzer.scala:314)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$anonfun$apply$9.applyOrElse(Analyzer.scala:309)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$anonfun$resolveOperators$1.apply(LogicalPlan.scala:57)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$anonfun$resolveOperators$1.apply(LogicalPlan.scala:57)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:69)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperators(LogicalPlan.scala:56)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$anonfun$1.apply(LogicalPlan.scala:54)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$anonfun$1.apply(LogicalPlan.scala:54)
	at org.apache.spark.sql.catalyst.trees.TreeNode$anonfun$4.apply(TreeNode.scala:281)
	at scala.collection.Iterator$anon$11.next(Iterator.scala:328)
	at scala.collection.Iterator$class.foreach(Iterator.scala:727)
	at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
	at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
	at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
	at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
	at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
	at scala.collection.AbstractIterator.to(Iterator.scala:1157)
	at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
	at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
	at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
	at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
	at org.apache.spark.sql.catalyst.trees.TreeNode.transformChildren(TreeNode.scala:321)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperators(LogicalPlan.scala:54)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.apply(Analyzer.scala:309)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.apply(Analyzer.scala:299)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor$anonfun$execute$1$anonfun$apply$1.apply(RuleExecutor.scala:83)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor$anonfun$execute$1$anonfun$apply$1.apply(RuleExecutor.scala:80)
	at scala.collection.LinearSeqOptimized$class.foldLeft(LinearSeqOptimized.scala:111)
	at scala.collection.immutable.List.foldLeft(List.scala:84)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor$anonfun$execute$1.apply(RuleExecutor.scala:80)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor$anonfun$execute$1.apply(RuleExecutor.scala:72)
	at scala.collection.immutable.List.foreach(List.scala:318)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:72)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:36)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:36)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:34)
	at org.apache.spark.sql.DataFrame.&amp;lt;init&amp;gt;(DataFrame.scala:133)
	at org.apache.spark.sql.DataFrame$.apply(DataFrame.scala:52)
	at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:817)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:67)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:72)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:74)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:76)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:78)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:80)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:82)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:84)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:86)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:88)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:90)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:92)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:94)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:96)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:98)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:100)
	at $iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:102)
	at $iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:104)
	at $iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:106)
	at $iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:108)
	at $iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:110)
	at $iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:112)
	at &amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:114)
	at .&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:118)
	at .&amp;lt;clinit&amp;gt;(&amp;lt;console&amp;gt;)
	at .&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:7)
	at .&amp;lt;clinit&amp;gt;(&amp;lt;console&amp;gt;)
	at $print(&amp;lt;console&amp;gt;)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:498)
	at org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:1065)
	at org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1346)
	at org.apache.spark.repl.SparkIMain.loadAndRunReq$1(SparkIMain.scala:840)
	at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:871)
	at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:819)
	at sun.reflect.GeneratedMethodAccessor28.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:498)
	at org.apache.zeppelin.spark.Utils.invokeMethod(Utils.java:38)
	at org.apache.zeppelin.spark.SparkInterpreter.interpret(SparkInterpreter.java:984)
	at org.apache.zeppelin.spark.SparkInterpreter.interpretInput(SparkInterpreter.java:1189)
	at org.apache.zeppelin.spark.SparkInterpreter.interpret(SparkInterpreter.java:1156)
	at org.apache.zeppelin.spark.SparkInterpreter.interpret(SparkInterpreter.java:1149)
	at org.apache.zeppelin.interpreter.LazyOpenInterpreter.interpret(LazyOpenInterpreter.java:97)
	at org.apache.zeppelin.interpreter.remote.RemoteInterpreterServer$InterpretJob.jobRun(RemoteInterpreterServer.java:490)
	at org.apache.zeppelin.scheduler.Job.run(Job.java:175)
	at org.apache.zeppelin.scheduler.FIFOScheduler$1.run(FIFOScheduler.java:139)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$201(ScheduledThreadPoolExecutor.java:180)
	at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:293)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)&lt;/P&gt;&lt;P&gt;I don't get it, why spark can't recognize the table.&lt;/P&gt;&lt;P&gt;Greatings &lt;/P&gt;&lt;P&gt;Mario&lt;/P&gt;</description>
    <pubDate>Mon, 18 Dec 2017 16:56:57 GMT</pubDate>
    <dc:creator>mario_borys</dc:creator>
    <dc:date>2017-12-18T16:56:57Z</dc:date>
    <item>
      <title>SPARK Comands don't "work"</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/SPARK-Comands-don-t-quot-work-quot/m-p/229314#M72708</link>
      <description>&lt;P&gt;Hi,&lt;/P&gt;&lt;P&gt;I'm working in Zeppelin with Spark interpreter. everytime I try to show df it's hanging or don't recognize the table. Here's my code:&lt;/P&gt;&lt;P&gt;%spark&lt;/P&gt;&lt;P&gt;import sqlContext.implicits._&lt;BR /&gt;import org.apache.spark.sql.hive.HiveContext&lt;BR /&gt;import org.apache.spark.sql.SQLContext&lt;BR /&gt;import org.apache.spark.{SparkContext, SparkConf}&lt;/P&gt;&lt;P&gt;val sqlContext = new org.apache.spark.sql.hive.HiveContext(sc)&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;val jdbcTable1 = "V_EQUIPMENT"&lt;BR /&gt;val equipment = sqlContext.read.format("jdbc").options(Map("url" -&amp;gt; jdbcUrl,"dbtable" -&amp;gt; jdbcTable1, "driver" -&amp;gt; jdbcDriver)).load()&lt;BR /&gt;&lt;BR /&gt;val df = sqlContext.sql("select * from equipment")&lt;BR /&gt;df.show()&lt;BR /&gt;&lt;BR /&gt;equipment.show()&lt;/P&gt;&lt;P&gt;org.apache.spark.sql.AnalysisException: Table not found: equipment; line 1 pos 14
	at org.apache.spark.sql.catalyst.analysis.package$AnalysisErrorAt.failAnalysis(package.scala:42)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.getTable(Analyzer.scala:305)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$anonfun$apply$9.applyOrElse(Analyzer.scala:314)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$anonfun$apply$9.applyOrElse(Analyzer.scala:309)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$anonfun$resolveOperators$1.apply(LogicalPlan.scala:57)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$anonfun$resolveOperators$1.apply(LogicalPlan.scala:57)
	at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:69)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperators(LogicalPlan.scala:56)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$anonfun$1.apply(LogicalPlan.scala:54)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$anonfun$1.apply(LogicalPlan.scala:54)
	at org.apache.spark.sql.catalyst.trees.TreeNode$anonfun$4.apply(TreeNode.scala:281)
	at scala.collection.Iterator$anon$11.next(Iterator.scala:328)
	at scala.collection.Iterator$class.foreach(Iterator.scala:727)
	at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
	at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
	at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
	at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
	at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
	at scala.collection.AbstractIterator.to(Iterator.scala:1157)
	at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
	at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
	at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
	at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
	at org.apache.spark.sql.catalyst.trees.TreeNode.transformChildren(TreeNode.scala:321)
	at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperators(LogicalPlan.scala:54)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.apply(Analyzer.scala:309)
	at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.apply(Analyzer.scala:299)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor$anonfun$execute$1$anonfun$apply$1.apply(RuleExecutor.scala:83)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor$anonfun$execute$1$anonfun$apply$1.apply(RuleExecutor.scala:80)
	at scala.collection.LinearSeqOptimized$class.foldLeft(LinearSeqOptimized.scala:111)
	at scala.collection.immutable.List.foldLeft(List.scala:84)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor$anonfun$execute$1.apply(RuleExecutor.scala:80)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor$anonfun$execute$1.apply(RuleExecutor.scala:72)
	at scala.collection.immutable.List.foreach(List.scala:318)
	at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:72)
	at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:36)
	at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:36)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:34)
	at org.apache.spark.sql.DataFrame.&amp;lt;init&amp;gt;(DataFrame.scala:133)
	at org.apache.spark.sql.DataFrame$.apply(DataFrame.scala:52)
	at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:817)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:67)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:72)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:74)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:76)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:78)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:80)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:82)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:84)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:86)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:88)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:90)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:92)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:94)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:96)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:98)
	at $iwC$iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:100)
	at $iwC$iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:102)
	at $iwC$iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:104)
	at $iwC$iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:106)
	at $iwC$iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:108)
	at $iwC$iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:110)
	at $iwC.&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:112)
	at &amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:114)
	at .&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:118)
	at .&amp;lt;clinit&amp;gt;(&amp;lt;console&amp;gt;)
	at .&amp;lt;init&amp;gt;(&amp;lt;console&amp;gt;:7)
	at .&amp;lt;clinit&amp;gt;(&amp;lt;console&amp;gt;)
	at $print(&amp;lt;console&amp;gt;)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:498)
	at org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:1065)
	at org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1346)
	at org.apache.spark.repl.SparkIMain.loadAndRunReq$1(SparkIMain.scala:840)
	at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:871)
	at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:819)
	at sun.reflect.GeneratedMethodAccessor28.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:498)
	at org.apache.zeppelin.spark.Utils.invokeMethod(Utils.java:38)
	at org.apache.zeppelin.spark.SparkInterpreter.interpret(SparkInterpreter.java:984)
	at org.apache.zeppelin.spark.SparkInterpreter.interpretInput(SparkInterpreter.java:1189)
	at org.apache.zeppelin.spark.SparkInterpreter.interpret(SparkInterpreter.java:1156)
	at org.apache.zeppelin.spark.SparkInterpreter.interpret(SparkInterpreter.java:1149)
	at org.apache.zeppelin.interpreter.LazyOpenInterpreter.interpret(LazyOpenInterpreter.java:97)
	at org.apache.zeppelin.interpreter.remote.RemoteInterpreterServer$InterpretJob.jobRun(RemoteInterpreterServer.java:490)
	at org.apache.zeppelin.scheduler.Job.run(Job.java:175)
	at org.apache.zeppelin.scheduler.FIFOScheduler$1.run(FIFOScheduler.java:139)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$201(ScheduledThreadPoolExecutor.java:180)
	at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:293)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)&lt;/P&gt;&lt;P&gt;I don't get it, why spark can't recognize the table.&lt;/P&gt;&lt;P&gt;Greatings &lt;/P&gt;&lt;P&gt;Mario&lt;/P&gt;</description>
      <pubDate>Mon, 18 Dec 2017 16:56:57 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/SPARK-Comands-don-t-quot-work-quot/m-p/229314#M72708</guid>
      <dc:creator>mario_borys</dc:creator>
      <dc:date>2017-12-18T16:56:57Z</dc:date>
    </item>
    <item>
      <title>Re: SPARK Comands don't "work"</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/SPARK-Comands-don-t-quot-work-quot/m-p/229315#M72709</link>
      <description>&lt;P&gt;&lt;A rel="user" href="https://community.cloudera.com/users/49152/marioborys.html" nodeid="49152"&gt;@Mario Borys&lt;/A&gt;: You should register the dataframe as table and then run a select query on that registered able:&lt;/P&gt;&lt;PRE&gt;equipment.registerTempTable("equipment_table")
sqlContext.sql("select * from equipment_table")&lt;/PRE&gt;</description>
      <pubDate>Tue, 19 Dec 2017 02:49:44 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/SPARK-Comands-don-t-quot-work-quot/m-p/229315#M72709</guid>
      <dc:creator>sandyy006</dc:creator>
      <dc:date>2017-12-19T02:49:44Z</dc:date>
    </item>
    <item>
      <title>Re: SPARK Comands don't "work"</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/SPARK-Comands-don-t-quot-work-quot/m-p/229316#M72710</link>
      <description>&lt;P&gt;Like you said its working but I still can't work with this Table on %spark.sql.&lt;/P&gt;&lt;P&gt;do I have to import something or to work with an other Version ?&lt;/P&gt;&lt;P&gt;I'm working with Spark 1.6.1, should I work with Spark 2.x. Sorry for the questions, I'm pretty new to Hadoop.&lt;/P&gt;</description>
      <pubDate>Tue, 19 Dec 2017 20:55:56 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/SPARK-Comands-don-t-quot-work-quot/m-p/229316#M72710</guid>
      <dc:creator>mario_borys</dc:creator>
      <dc:date>2017-12-19T20:55:56Z</dc:date>
    </item>
    <item>
      <title>Re: SPARK Comands don't "work"</title>
      <link>https://community.cloudera.com/t5/Archives-of-Support-Questions/SPARK-Comands-don-t-quot-work-quot/m-p/229317#M72711</link>
      <description>&lt;P&gt;&lt;A rel="user" href="https://community.cloudera.com/users/49152/marioborys.html" nodeid="49152"&gt;@Mario Borys&lt;/A&gt; This tutorial and zeppelin notebook should help you: &lt;/P&gt;&lt;P&gt;&lt;A href="https://hortonworks.com/tutorial/hands-on-tour-of-apache-spark-in-5-minutes/" target="_blank"&gt;https://hortonworks.com/tutorial/hands-on-tour-of-apache-spark-in-5-minutes/&lt;/A&gt;&lt;/P&gt;&lt;P&gt;&lt;A href="https://raw.githubusercontent.com/hortonworks-gallery/zeppelin-notebooks/hdp-2.6/2CBTZPY14/note.json" target="_blank"&gt;https://raw.githubusercontent.com/hortonworks-gallery/zeppelin-notebooks/hdp-2.6/2CBTZPY14/note.json&lt;/A&gt;&lt;/P&gt;&lt;P&gt;Feel free to accept the answer if this helps you.&lt;/P&gt;</description>
      <pubDate>Tue, 19 Dec 2017 23:38:55 GMT</pubDate>
      <guid>https://community.cloudera.com/t5/Archives-of-Support-Questions/SPARK-Comands-don-t-quot-work-quot/m-p/229317#M72711</guid>
      <dc:creator>sandyy006</dc:creator>
      <dc:date>2017-12-19T23:38:55Z</dc:date>
    </item>
  </channel>
</rss>

