<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>question Re: org.apache.spark.sql.AnalysisException: Table not found: in Support Questions</title>
    <link>https://community.cloudera.com/t5/Support-Questions/org-apache-spark-sql-AnalysisException-Table-not-found/m-p/168216#M130546</link>
    <description>&lt;P&gt;&lt;A rel="user" href="https://community.cloudera.com/users/472/jwiden.html" nodeid="472"&gt;@Joe Widen&lt;/A&gt; &lt;/P&gt;&lt;P&gt;am unable to do select query on my input_file_temp, at the same time i can do it on gsam_temp which is the DF i made it from SQL table.  If I could do query on both the DF then it would be much easier for me to finish it off.&lt;/P&gt;&lt;P&gt;Here is complete code&lt;/P&gt;&lt;PRE&gt;import sqlContext.implicits._
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.{SparkContext, SparkConf}
import org.apache.spark.sql.functions.broadcast
import org.apache.spark.sql.types._
import org.apache.spark.sql._
import org.apache.spark.sql.functions._


val hiveContext = new HiveContext(sc);
val sqlContext = new org.apache.spark.sql.SQLContext(sc);


// Loading DB table in to dataframe
val gsam = hiveContext.read.format("jdbc").option("driver","oracle.jdbc.driver.OracleDriver").option("url","jdbc:oracle:thin:NPIDWDEV/sysdwnpi@scan-nsgnp.ebiz.verizon.com:1521/nsgdev").option("dbtable", "GSAM_REF").load();


gsam.registerTempTable("gsam_temp")


// Create case class to load input file from local or hdfs


case class f1(
  ckt_id:String,
  location:String,
  usage:String,
  port:String,
  machine:String
)


val input_file = sc.textFile("file:///data04/dev/v994292/spark/input_file.txt").map(_.split("\\|")).map(x =&amp;gt; f1(x(0).toString,x(1).toString,x(2).toString,x(3).toString,x(4).toString)).toDF
input_file.registerTempTable("input_file_tmp")

&lt;/PRE&gt;</description>
    <pubDate>Fri, 10 Feb 2017 23:46:59 GMT</pubDate>
    <dc:creator>das_dineshk</dc:creator>
    <dc:date>2017-02-10T23:46:59Z</dc:date>
  </channel>
</rss>

