Member since 
    
	
		
		
		04-17-2018
	
	
	
	
	
	
	
	
	
	
	
	
	
	
			
      
                3
            
            
                Posts
            
        
                0
            
            
                Kudos Received
            
        
                0
            
            
                Solutions
            
        
			
    
	
		
		
		04-18-2018
	
		
		01:13 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 Thank you all,  I added..  PARTITIONED BY (`date` STRING, hour INT)   ..to the CREATE External TABLE sentence and then..    ALTER TABLE processed_data_p ADD PARTITION (`date`="2015-08-19", hour=20) LOCATION '/user/myuser/spark/proyecto1/date=2015-08-19/hour=20';    One option to avoid adding all partitions manually is:    alter table processed_data_p recover partitions;  
   
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
			
    
	
		
		
		04-17-2018
	
		
		01:30 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 Hi, I did search for similar issues but didn't find a perfect match for my case:        CREATE EXTERNAL TABLE processed_data LIKE PARQUET '/user/myuser/spark/proyecto1/date=2015-08-17/hour=21/part-00087-7a6eac40-0db4-4368-894c-72588f80dfd6.c000.snappy.parquet'
STORED AS PARQUET
LOCATION '/user/myuser/spark/proyecto1'     The table creation works, but any query to the table will return 0 rows.     Query: SHOW CREATE TABLE processed_data
+---------------------------------------------------------------------------------------------------------------------------+
| result                                                                                                                    |
+---------------------------------------------------------------------------------------------------------------------------+
| CREATE EXTERNAL TABLE josemidb.processed_data (                                                                           |
|   window STRUCT<start:TIMESTAMP,end:TIMESTAMP> COMMENT 'Inferred from Parquet file.',                                     |
|   terminating_cellid BIGINT COMMENT 'Inferred from Parquet file.',                                                        |
|   termination_id BIGINT COMMENT 'Inferred from Parquet file.',                                                            |
|   termination_reason BIGINT COMMENT 'Inferred from Parquet file.',                                                        |
|   numcalls BIGINT COMMENT 'Inferred from Parquet file.'                                                                   |
| )                                                                                                                         |
| STORED AS PARQUET                                                                                                         |
| LOCATION 'hdfs://myhost:8020/user/myuser/spark/proyecto1'                                    |
| TBLPROPERTIES ('COLUMN_STATS_ACCURATE'='false', 'numFiles'='18', 'numRows'='-1', 'rawDataSize'='-1', 'totalSize'='67251') |
+---------------------------------------------------------------------------------------------------------------------------+  This shows numFiles = 18 which is right, but numRows = -1 😞     I think my problem is that I'm not handling partitions the right way. Can you help me with this?     Thank you. 
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
		
			
				
						
							Labels:
						
						
		
			
	
					
			
		
	
	
	
	
				
		
	
	
- Labels:
 - 
						
							
		
			Apache Impala