Member since 
    
	
		
		
		07-11-2017
	
	
	
	
	
	
	
	
	
	
	
	
	
	
			
      
                31
            
            
                Posts
            
        
                2
            
            
                Kudos Received
            
        
                2
            
            
                Solutions
            
        My Accepted Solutions
| Title | Views | Posted | 
|---|---|---|
| 26650 | 07-17-2017 09:49 PM | |
| 23876 | 07-17-2017 11:41 AM | 
			
    
	
		
		
		07-17-2017
	
		
		10:47 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 My mistake. Below is the o/p of /etc/hive/conf/hive-site.xml . Is there anything wrong configured ?        <?xml version="1.0" encoding="UTF-8"?>  <!--Autogenerated by Cloudera Manager-->  <configuration>  <property>  <name>hive.metastore.uris</name>  <value>thrift://compute-15:9083</value>  </property>  <property>  <name>hive.metastore.client.socket.timeout</name>  <value>300</value>  </property>  <property>  <name>hive.metastore.warehouse.dir</name>  <value>/user/hive/warehouse</value>  </property>  <property>  <name>hive.warehouse.subdir.inherit.perms</name>  <value>true</value>  </property>  <property>  <name>hive.auto.convert.join</name>  <value>true</value>  </property>  <property>  <name>hive.auto.convert.join.noconditionaltask.size</name>  <value>20971520</value>  </property>  <property>  <name>hive.optimize.bucketmapjoin.sortedmerge</name>  <value>false</value>  </property>  <property>  <name>hive.smbjoin.cache.rows</name>  <value>10000</value>  </property>  <property>  <name>hive.server2.logging.operation.enabled</name>  <value>true</value>  </property>  <property>  <name>hive.server2.logging.operation.log.location</name>  <value>/var/log/hive/operation_logs</value>  </property>  <property>  <name>mapred.reduce.tasks</name>  <value>-1</value>  </property>  <property>  <name>hive.exec.reducers.bytes.per.reducer</name>  <value>67108864</value>  </property>  <property>  <name>hive.exec.copyfile.maxsize</name>  <value>33554432</value>  </property>  <property>  <name>hive.exec.reducers.max</name>  <value>1099</value>  </property>  <property>  <name>hive.vectorized.groupby.checkinterval</name>  <value>4096</value>  </property>  <property>  <name>hive.vectorized.groupby.flush.percent</name>  <value>0.1</value>  </property>  <property>  <name>hive.compute.query.using.stats</name>  <value>false</value>  </property>  <property>  <name>hive.vectorized.execution.enabled</name>  <value>true</value>  </property>  <property>  <name>hive.vectorized.execution.reduce.enabled</name>  <value>false</value>  </property>  <property>  <name>hive.merge.mapfiles</name>  <value>true</value>  </property>  <property>  <name>hive.merge.mapredfiles</name>  <value>false</value>  </property>  <property>  <name>hive.cbo.enable</name>  <value>false</value>  </property>  <property>  <name>hive.fetch.task.conversion</name>  <value>minimal</value>  </property>  <property>  <name>hive.fetch.task.conversion.threshold</name>  <value>268435456</value>  </property>  <property>  <name>hive.limit.pushdown.memory.usage</name>  <value>0.1</value>  </property>  <property>  <name>hive.merge.sparkfiles</name>  <value>true</value>  </property>  <property>  <name>hive.merge.smallfiles.avgsize</name>  <value>16777216</value>  </property>  <property>  <name>hive.merge.size.per.task</name>  <value>268435456</value>  </property>  <property>  <name>hive.optimize.reducededuplication</name>  <value>true</value>  </property>  <property>  <name>hive.optimize.reducededuplication.min.reducer</name>  <value>4</value>  </property>  <property>  <name>hive.map.aggr</name>  <value>true</value>  </property>  <property>  <name>hive.map.aggr.hash.percentmemory</name>  <value>0.5</value>  </property>  <property>  <name>hive.optimize.sort.dynamic.partition</name>  <value>false</value>  </property>  <property>  <name>hive.execution.engine</name>  <value>mr</value>  </property>  <property>  <name>spark.executor.memory</name>  <value>228170137</value>  </property>  <property>  <name>spark.driver.memory</name>  <value>966367641</value>  </property>  <property>  <name>spark.executor.cores</name>  <value>4</value>  </property>  <property>  <name>spark.yarn.driver.memoryOverhead</name>  <value>102</value>  </property>  <property>  <name>spark.yarn.executor.memoryOverhead</name>  <value>38</value>  </property>  <property>  <name>spark.dynamicAllocation.enabled</name>  <value>true</value>  </property>  <property>  <name>spark.dynamicAllocation.initialExecutors</name>  <value>1</value>  </property>  <property>  <name>spark.dynamicAllocation.minExecutors</name>  <value>1</value>  </property>  <property>  <name>spark.dynamicAllocation.maxExecutors</name>  <value>2147483647</value>  </property>  <property>  <name>hive.metastore.execute.setugi</name>  <value>true</value>  </property>  <property>  <name>hive.support.concurrency</name>  <value>true</value>  </property>  <property>  <name>hive.zookeeper.quorum</name>  <value>compute-15</value>  </property>  <property>  <name>hive.zookeeper.client.port</name>  <value>2181</value>  </property>  <property>  <name>hive.zookeeper.namespace</name>  <value>hive_zookeeper_namespace_hive</value>  </property>  <property>  <name>hive.cluster.delegation.token.store.class</name>  <value>org.apache.hadoop.hive.thrift.MemoryTokenStore</value>  </property>  <property>  <name>hive.server2.enable.doAs</name>  <value>true</value>  </property>  <property>  <name>hive.server2.use.SSL</name>  <value>false</value>  </property>  <property>  <name>spark.shuffle.service.enabled</name>  <value>true</value>  </property>  </configuration> 
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
			
    
	
		
		
		07-17-2017
	
		
		07:35 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 HIVE Metastore fails to start with the error posted below. 
   
 Here is the content of /etc/cloudera-scm-server/db.properties: 
   
 com.cloudera.cmf.db.type=postgresql  com.cloudera.cmf.db.host=localhost:7432  com.cloudera.cmf.db.name=scm  com.cloudera.cmf.db.user=scm  com.cloudera.cmf.db.password=7xnqnVcO3U  com.cloudera.cmf.db.setupType=EMBEDDED 
   
 Below is the HIVE Metastore server log: 
   
 [main]: Metastore Thrift Server threw an exception...
javax.jdo.JDOFatalDataStoreException: Unable to open a test connection to the given database. JDBC url = jdbc:postgresql://compute-15:7432/hive, username = hive. Terminating connection pool (set lazyInit to true if you expect to start your database after your app). Original Exception: ------
org.postgresql.util.PSQLException: FATAL: password authentication failed for user "hive"
	at org.postgresql.core.v3.ConnectionFactoryImpl.doAuthentication(ConnectionFactoryImpl.java:291)
	at org.postgresql.core.v3.ConnectionFactoryImpl.openConnectionImpl(ConnectionFactoryImpl.java:108)
	at org.postgresql.core.ConnectionFactory.openConnection(ConnectionFactory.java:66)
	at org.postgresql.jdbc2.AbstractJdbc2Connection.<init>(AbstractJdbc2Connection.java:125)
	at org.postgresql.jdbc3.AbstractJdbc3Connection.<init>(AbstractJdbc3Connection.java:30)
	at org.postgresql.jdbc3g.AbstractJdbc3gConnection.<init>(AbstractJdbc3gConnection.java:22)
	at org.postgresql.jdbc4.AbstractJdbc4Connection.<init>(AbstractJdbc4Connection.java:30)
	at org.postgresql.jdbc4.Jdbc4Connection.<init>(Jdbc4Connection.java:24)
	at org.postgresql.Driver.makeConnection(Driver.java:393)
	at org.postgresql.Driver.connect(Driver.java:267)
	at java.sql.DriverManager.getConnection(DriverManager.java:571)
	at java.sql.DriverManager.getConnection(DriverManager.java:187)
	at com.jolbox.bonecp.BoneCP.obtainRawInternalConnection(BoneCP.java:361)
	at com.jolbox.bonecp.BoneCP.<init>(BoneCP.java:416)
	at com.jolbox.bonecp.BoneCPDataSource.getConnection(BoneCPDataSource.java:120)
	at org.datanucleus.store.rdbms.ConnectionFactoryImpl$ManagedConnectionImpl.getConnection(ConnectionFactoryImpl.java:501)
	at org.datanucleus.store.rdbms.RDBMSStoreManager.<init>(RDBMSStoreManager.java:298)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.datanucleus.plugin.NonManagedPluginRegistry.createExecutableExtension(NonManagedPluginRegistry.java:631)
	at org.datanucleus.plugin.PluginManager.createExecutableExtension(PluginManager.java:301)
	at org.datanucleus.NucleusContext.createStoreManagerForProperties(NucleusContext.java:1187)
	at org.datanucleus.NucleusContext.initialise(NucleusContext.java:356)
	at org.datanucleus.api.jdo.JDOPersistenceManagerFactory.freezeConfiguration(JDOPersistenceManagerFactory.java:775)
	at org.datanucleus.api.jdo.JDOPersistenceManagerFactory.createPersistenceManagerFactory(JDOPersistenceManagerFactory.java:333)
	at org.datanucleus.api.jdo.JDOPersistenceManagerFactory.getPersistenceManagerFactory(JDOPersistenceManagerFactory.java:202)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at javax.jdo.JDOHelper$16.run(JDOHelper.java:1965)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.jdo.JDOHelper.invoke(JDOHelper.java:1960)
	at javax.jdo.JDOHelper.invokeGetPersistenceManagerFactoryOnImplementation(JDOHelper.java:1166)
	at javax.jdo.JDOHelper.getPersistenceManagerFactory(JDOHelper.java:808)
	at javax.jdo.JDOHelper.getPersistenceManagerFactory(JDOHelper.java:701)
	at org.apache.hadoop.hive.metastore.ObjectStore.getPMF(ObjectStore.java:418)
	at org.apache.hadoop.hive.metastore.ObjectStore.getPersistenceManager(ObjectStore.java:447)
	at org.apache.hadoop.hive.metastore.ObjectStore.initialize(ObjectStore.java:342)
	at org.apache.hadoop.hive.metastore.ObjectStore.setConf(ObjectStore.java:298)
	at org.apache.hadoop.util.ReflectionUtils.setConf(ReflectionUtils.java:73)
	at org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:133)
	at org.apache.hadoop.hive.metastore.RawStoreProxy.<init>(RawStoreProxy.java:60)
	at org.apache.hadoop.hive.metastore.RawStoreProxy.getProxy(RawStoreProxy.java:69)
	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.newRawStore(HiveMetaStore.java:682)
	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.getMS(HiveMetaStore.java:660)
	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.createDefaultDB(HiveMetaStore.java:713)
	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.init(HiveMetaStore.java:508)
	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.<init>(RetryingHMSHandler.java:78)
	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.getProxy(RetryingHMSHandler.java:84)
	at org.apache.hadoop.hive.metastore.HiveMetaStore.newRetryingHMSHandler(HiveMetaStore.java:6313)
	at org.apache.hadoop.hive.metastore.HiveMetaStore.newRetryingHMSHandler(HiveMetaStore.java:6308)
	at org.apache.hadoop.hive.metastore.HiveMetaStore.startMetaStore(HiveMetaStore.java:6558)
	at org.apache.hadoop.hive.metastore.HiveMetaStore.main(HiveMetaStore.java:6485)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.util.RunJar.run(RunJar.java:221)
	at org.apache.hadoop.util.RunJar.main(RunJar.java:136)
------
	at org.datanucleus.api.jdo.NucleusJDOHelper.getJDOExceptionForNucleusException(NucleusJDOHelper.java:436)
	at org.datanucleus.api.jdo.JDOPersistenceManagerFactory.freezeConfiguration(JDOPersistenceManagerFactory.java:788)
	at org.datanucleus.api.jdo.JDOPersistenceManagerFactory.createPersistenceManagerFactory(JDOPersistenceManagerFactory.java:333)
	at org.datanucleus.api.jdo.JDOPersistenceManagerFactory.getPersistenceManagerFactory(JDOPersistenceManagerFactory.java:202)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at javax.jdo.JDOHelper$16.run(JDOHelper.java:1965)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.jdo.JDOHelper.invoke(JDOHelper.java:1960)
	at javax.jdo.JDOHelper.invokeGetPersistenceManagerFactoryOnImplementation(JDOHelper.java:1166)
	at javax.jdo.JDOHelper.getPersistenceManagerFactory(JDOHelper.java:808)
	at javax.jdo.JDOHelper.getPersistenceManagerFactory(JDOHelper.java:701)
	at org.apache.hadoop.hive.metastore.ObjectStore.getPMF(ObjectStore.java:418)
	at org.apache.hadoop.hive.metastore.ObjectStore.getPersistenceManager(ObjectStore.java:447)
	at org.apache.hadoop.hive.metastore.ObjectStore.initialize(ObjectStore.java:342)
	at org.apache.hadoop.hive.metastore.ObjectStore.setConf(ObjectStore.java:298)
	at org.apache.hadoop.util.ReflectionUtils.setConf(ReflectionUtils.java:73)
	at org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:133)
	at org.apache.hadoop.hive.metastore.RawStoreProxy.<init>(RawStoreProxy.java:60)
	at org.apache.hadoop.hive.metastore.RawStoreProxy.getProxy(RawStoreProxy.java:69)
	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.newRawStore(HiveMetaStore.java:682)
	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.getMS(HiveMetaStore.java:660)
	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.createDefaultDB(HiveMetaStore.java:713)
	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.init(HiveMetaStore.java:508)
	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.<init>(RetryingHMSHandler.java:78)
	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.getProxy(RetryingHMSHandler.java:84)
	at org.apache.hadoop.hive.metastore.HiveMetaStore.newRetryingHMSHandler(HiveMetaStore.java:6313)
	at org.apache.hadoop.hive.metastore.HiveMetaStore.newRetryingHMSHandler(HiveMetaStore.java:6308)
	at org.apache.hadoop.hive.metastore.HiveMetaStore.startMetaStore(HiveMetaStore.java:6558)
	at org.apache.hadoop.hive.metastore.HiveMetaStore.main(HiveMetaStore.java:6485)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.util.RunJar.run(RunJar.java:221)
	at org.apache.hadoop.util.RunJar.main(RunJar.java:136)
NestedThrowablesStackTrace:
java.sql.SQLException: Unable to open a test connection to the given database. JDBC url = jdbc:postgresql://compute-15:7432/hive, username = hive. Terminating connection pool (set lazyInit to true if you expect to start your database after your app). Original Exception: ------
org.postgresql.util.PSQLException: FATAL: password authentication failed for user "hive"
	at org.postgresql.core.v3.ConnectionFactoryImpl.doAuthentication(ConnectionFactoryImpl.java:291)
	at org.postgresql.core.v3.ConnectionFactoryImpl.openConnectionImpl(ConnectionFactoryImpl.java:108)
	at org.postgresql.core.ConnectionFactory.openConnection(ConnectionFactory.java:66)
	at org.postgresql.jdbc2.AbstractJdbc2Connection.<init>(AbstractJdbc2Connection.java:125)
	at org.postgresql.jdbc3.AbstractJdbc3Connection.<init>(AbstractJdbc3Connection.java:30)
	at org.postgresql.jdbc3g.AbstractJdbc3gConnection.<init>(AbstractJdbc3gConnection.java:22)
	at org.postgresql.jdbc4.AbstractJdbc4Connection.<init>(AbstractJdbc4Connection.java:30)
	at org.postgresql.jdbc4.Jdbc4Connection.<init>(Jdbc4Connection.java:24)
	at org.postgresql.Driver.makeConnection(Driver.java:393)
	at org.postgresql.Driver.connect(Driver.java:267)
	at java.sql.DriverManager.getConnection(DriverManager.java:571)
	at java.sql.DriverManager.getConnection(DriverManager.java:187)
	at com.jolbox.bonecp.BoneCP.obtainRawInternalConnection(BoneCP.java:361)
	at com.jolbox.bonecp.BoneCP.<init>(BoneCP.java:416)
	at com.jolbox.bonecp.BoneCPDataSource.getConnection(BoneCPDataSource.java:120)
	at org.datanucleus.store.rdbms.ConnectionFactoryImpl$ManagedConnectionImpl.getConnection(ConnectionFactoryImpl.java:501)
	at org.datanucleus.store.rdbms.RDBMSStoreManager.<init>(RDBMSStoreManager.java:298)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.datanucleus.plugin.NonManagedPluginRegistry.createExecutableExtension(NonManagedPluginRegistry.java:631)
	at org.datanucleus.plugin.PluginManager.createExecutableExtension(PluginManager.java:301)
	at org.datanucleus.NucleusContext.createStoreManagerForProperties(NucleusContext.java:1187)
	at org.datanucleus.NucleusContext.initialise(NucleusContext.java:356)
	at org.datanucleus.api.jdo.JDOPersistenceManagerFactory.freezeConfiguration(JDOPersistenceManagerFactory.java:775)
	at org.datanucleus.api.jdo.JDOPersistenceManagerFactory.createPersistenceManagerFactory(JDOPersistenceManagerFactory.java:333)
	at org.datanucleus.api.jdo.JDOPersistenceManagerFactory.getPersistenceManagerFactory(JDOPersistenceManagerFactory.java:202)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at javax.jdo.JDOHelper$16.run(JDOHelper.java:1965)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.jdo.JDOHelper.invoke(JDOHelper.java:1960)
	at javax.jdo.JDOHelper.invokeGetPersistenceManagerFactoryOnImplementation(JDOHelper.java:1166)
	at javax.jdo.JDOHelper.getPersistenceManagerFactory(JDOHelper.java:808)
	at javax.jdo.JDOHelper.getPersistenceManagerFactory(JDOHelper.java:701)
	at org.apache.hadoop.hive.metastore.ObjectStore.getPMF(ObjectStore.java:418)
	at org.apache.hadoop.hive.metastore.ObjectStore.getPersistenceManager(ObjectStore.java:447)
	at org.apache.hadoop.hive.metastore.ObjectStore.initialize(ObjectStore.java:342)
	at org.apache.hadoop.hive.metastore.ObjectStore.setConf(ObjectStore.java:298)
	at org.apache.hadoop.util.ReflectionUtils.setConf(ReflectionUtils.java:73)
	at org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:133)
	at org.apache.hadoop.hive.metastore.RawStoreProxy.<init>(RawStoreProxy.java:60)
	at org.apache.hadoop.hive.metastore.RawStoreProxy.getProxy(RawStoreProxy.java:69)
	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.newRawStore(HiveMetaStore.java:682)
	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.getMS(HiveMetaStore.java:660)
	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.createDefaultDB(HiveMetaStore.java:713)
	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.init(HiveMetaStore.java:508)
	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.<init>(RetryingHMSHandler.java:78)
	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.getProxy(RetryingHMSHandler.java:84)
	at org.apache.hadoop.hive.metastore.HiveMetaStore.newRetryingHMSHandler(HiveMetaStore.java:6313)
	at org.apache.hadoop.hive.metastore.HiveMetaStore.newRetryingHMSHandler(HiveMetaStore.java:6308)
	at org.apache.hadoop.hive.metastore.HiveMetaStore.startMetaStore(HiveMetaStore.java:6558)
	at org.apache.hadoop.hive.metastore.HiveMetaStore.main(HiveMetaStore.java:6485)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.util.RunJar.run(RunJar.java:221)
	at org.apache.hadoop.util.RunJar.main(RunJar.java:136)
------
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at com.jolbox.bonecp.PoolUtil.generateSQLException(PoolUtil.java:192)
	at com.jolbox.bonecp.BoneCP.<init>(BoneCP.java:422)
	at com.jolbox.bonecp.BoneCPDataSource.getConnection(BoneCPDataSource.java:120)
	at org.datanucleus.store.rdbms.ConnectionFactoryImpl$ManagedConnectionImpl.getConnection(ConnectionFactoryImpl.java:501)
	at org.datanucleus.store.rdbms.RDBMSStoreManager.<init>(RDBMSStoreManager.java:298)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
	at org.datanucleus.plugin.NonManagedPluginRegistry.createExecutableExtension(NonManagedPluginRegistry.java:631)
	at org.datanucleus.plugin.PluginManager.createExecutableExtension(PluginManager.java:301)
	at org.datanucleus.NucleusContext.createStoreManagerForProperties(NucleusContext.java:1187)
	at org.datanucleus.NucleusContext.initialise(NucleusContext.java:356)
	at org.datanucleus.api.jdo.JDOPersistenceManagerFactory.freezeConfiguration(JDOPersistenceManagerFactory.java:775)
	at org.datanucleus.api.jdo.JDOPersistenceManagerFactory.createPersistenceManagerFactory(JDOPersistenceManagerFactory.java:333)
	at org.datanucleus.api.jdo.JDOPersistenceManagerFactory.getPersistenceManagerFactory(JDOPersistenceManagerFactory.java:202)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at javax.jdo.JDOHelper$16.run(JDOHelper.java:1965)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.jdo.JDOHelper.invoke(JDOHelper.java:1960)
	at javax.jdo.JDOHelper.invokeGetPersistenceManagerFactoryOnImplementation(JDOHelper.java:1166)
	at javax.jdo.JDOHelper.getPersistenceManagerFactory(JDOHelper.java:808)
	at javax.jdo.JDOHelper.getPersistenceManagerFactory(JDOHelper.java:701)
	at org.apache.hadoop.hive.metastore.ObjectStore.getPMF(ObjectStore.java:418)
	at org.apache.hadoop.hive.metastore.ObjectStore.getPersistenceManager(ObjectStore.java:447)
	at org.apache.hadoop.hive.metastore.ObjectStore.initialize(ObjectStore.java:342)
	at org.apache.hadoop.hive.metastore.ObjectStore.setConf(ObjectStore.java:298)
	at org.apache.hadoop.util.ReflectionUtils.setConf(ReflectionUtils.java:73)
	at org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:133)
	at org.apache.hadoop.hive.metastore.RawStoreProxy.<init>(RawStoreProxy.java:60)
	at org.apache.hadoop.hive.metastore.RawStoreProxy.getProxy(RawStoreProxy.java:69)
	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.newRawStore(HiveMetaStore.java:682)
	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.getMS(HiveMetaStore.java:660)
	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.createDefaultDB(HiveMetaStore.java:713)
	at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.init(HiveMetaStore.java:508)
	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.<init>(RetryingHMSHandler.java:78)
	at org.apache.hadoop.hive.metastore.RetryingHMSHandler.getProxy(RetryingHMSHandler.java:84)
	at org.apache.hadoop.hive.metastore.HiveMetaStore.newRetryingHMSHandler(HiveMetaStore.java:6313)
	at org.apache.hadoop.hive.metastore.HiveMetaStore.newRetryingHMSHandler(HiveMetaStore.java:6308)
	at org.apache.hadoop.hive.metastore.HiveMetaStore.startMetaStore(HiveMetaStore.java:6558)
	at org.apache.hadoop.hive.metastore.HiveMetaStore.main(HiveMetaStore.java:6485)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:606)
	at org.apache.hadoop.util.RunJar.run(RunJar.java:221)
	at org.apache.hadoop.util.RunJar.main(RunJar.java:136)
Caused by: org.postgresql.util.PSQLException: FATAL: password authentication failed for user "hive"
	at org.postgresql.core.v3.ConnectionFactoryImpl.doAuthentication(ConnectionFactoryImpl.java:291)
	at org.postgresql.core.v3.ConnectionFactoryImpl.openConnectionImpl(ConnectionFactoryImpl.java:108)
	at org.postgresql.core.ConnectionFactory.openConnection(ConnectionFactory.java:66)
	at org.postgresql.jdbc2.AbstractJdbc2Connection.<init>(AbstractJdbc2Connection.java:125)
	at org.postgresql.jdbc3.AbstractJdbc3Connection.<init>(AbstractJdbc3Connection.java:30)
	at org.postgresql.jdbc3g.AbstractJdbc3gConnection.<init>(AbstractJdbc3gConnection.java:22)
	at org.postgresql.jdbc4.AbstractJdbc4Connection.<init>(AbstractJdbc4Connection.java:30)
	at org.postgresql.jdbc4.Jdbc4Connection.<init>(Jdbc4Connection.java:24)
	at org.postgresql.Driver.makeConnection(Driver.java:393)
	at org.postgresql.Driver.connect(Driver.java:267)
	at java.sql.DriverManager.getConnection(DriverManager.java:571)
	at java.sql.DriverManager.getConnection(DriverManager.java:187)
	at com.jolbox.bonecp.BoneCP.obtainRawInternalConnection(BoneCP.java:361)
	at com.jolbox.bonecp.BoneCP.<init>(BoneCP.java:416)
	... 48 more 
   
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
		
			
				
						
							Labels:
						
						
		
			
	
					
			
		
	
	
	
	
				
		
	
	
- Labels:
 - 
						
							
		
			Apache Hive
 - 
						
							
		
			Cloudera Manager
 
			
    
	
		
		
		07-16-2017
	
		
		10:09 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 @saranvisa     The user is cloudera-scm. All the other services are running fine. Are u suggesting apart from sudoers passwordless permission, I should set any other permission ? Please let me know , what and where this permission need to be set.        Thanks 
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
			
    
	
		
		
		07-16-2017
	
		
		08:32 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 Hi,         I am using CDH 5.12 . I have installed using path A. Spark History server is not getting started. All, I am getting below logs continiously and history server is not starting.     2017-07-15 13:52:47,000 INFO org.apache.spark.SecurityManager: Changing view acls to: cloudera-scm  2017-07-15 13:52:47,001 INFO org.apache.spark.SecurityManager: Changing modify acls to: cloudera-scm  2017-07-15 13:52:47,001 INFO org.apache.spark.SecurityManager: SecurityManager: authentication disabled; ui acls disabled; users with view permissions: Set(cloudera-scm); users with modify permissions: Set(cloudera-scm)  2017-07-15 13:52:47,054 INFO org.apache.spark.deploy.history.FsHistoryProvider: History server ui acls disabled; users with admin permissions:  2017-07-15 13:52:47,060 WARN org.apache.spark.SparkConf: The configuration key 'spark.history.fs.update.interval.seconds' has been deprecated as of Spark 1.4 and and may be removed in the future. Please use the new key 'spark.history.fs.update.interval' instead.  2017-07-15 13:52:58,260 INFO org.apache.spark.deploy.history.HistoryServer: Started daemon with process name: 2561@compute-15  2017-07-15 13:52:58,273 INFO org.apache.spark.deploy.history.HistoryServer: Registered signal handlers for [TERM, HUP, INT]  2017-07-15 13:52:58,312 WARN org.apache.spark.SparkConf: The configuration key 'spark.history.fs.update.interval.seconds' has been deprecated as of Spark 1.4 and and may be removed in the future. Please use the new key 'spark.history.fs.update.interval' instead.        Is there something wrong in configuration ? How can this be solved ?     Thanks. 
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
		
			
				
						
							Labels:
						
						
		
			
	
					
			
		
	
	
	
	
				
		
	
	
- Labels:
 - 
						
							
		
			Apache Spark
 - 
						
							
		
			Cloudera Manager
 
			
    
	
		
		
		07-15-2017
	
		
		07:21 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							Thanks mbigelow ,    After I disabled IPv6 the distribution issue got resolved. But I am still getting some other issue. Will post in other thread.
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
			
    
	
		
		
		07-14-2017
	
		
		10:55 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 curl -v http://compute-15:7180/cmf/parcel/download/CDH-5.12.0-1.cdh5.12.0.p0.29-el7.parcel > /dev/null  % Total % Received % Xferd Average Speed Time Time Time Current  Dload Upload Total Spent Left Speed  0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0* About to connect() to compute-15 port 7180 (#0)  * Trying fe80::230:48ff:fedf:ae2c...  * Connection refused  * Trying fe80::230:48ff:fedf:ae2d...  * Connection refused  * Trying 192.168.200.15...  * Connected to compute-15 (192.168.200.15) port 7180 (#0)  > GET /cmf/parcel/download/CDH-5.12.0-1.cdh5.12.0.p0.29-el7.parcel HTTP/1.1  > User-Agent: curl/7.29.0  > Host: compute-15:7180  > Accept: */*  >  < HTTP/1.1 200 OK  < Expires: Thu, 01-Jan-1970 00:00:00 GMT  < Set-Cookie: CLOUDERA_MANAGER_SESSIONID=1ha3awbsj6mp2t23bq7725qsp;Path=/;HttpOnly  < Content-Type: application/x-download; charset=UTF-8  < Content-Length: 1702423659  < Content-disposition: attachment; filename="CDH-5.12.0-1.cdh5.12.0.p0.29-el7.parcel"  < CM-Parcel-Checksum: fa704f42b8da8916409c3f52f189629152ba2839  < Server: Jetty(6.1.26.cloudera.4)  <  { [data not shown]  100 1623M 100 1623M 0 0 51.5M 0 0:00:31 0:00:31 --:--:-- 50.0M  * Connection #0 to host compute-15 left intact                    wget http://compute-15:7180/cmf/parcel/download/CDH-5.12.0-1.cdh5.12.0.p0.29-el7.parcel  --2017-07-14 13:48:01-- http://compute-15:7180/cmf/parcel/download/CDH-5.12.0-1.cdh5.12.0.p0.29-el7.parcel  Resolving compute-15 (compute-15)... fe80::230:48ff:fedf:ae2c, fe80::230:48ff:fedf:ae2d, 192.168.200.15, ...  Connecting to compute-15 (compute-15)|fe80::230:48ff:fedf:ae2c|:7180... failed: Connection refused.  Connecting to compute-15 (compute-15)|fe80::230:48ff:fedf:ae2d|:7180... failed: Connection refused.  Connecting to compute-15 (compute-15)|192.168.200.15|:7180... connected.  HTTP request sent, awaiting response... 200 OK  Length: 1702423659 (1.6G) [application/x-download]  Saving to: ‘CDH-5.12.0-1.cdh5.12.0.p0.29-el7.parcel’  100%[=========================================================================================================>] 1,70,24,23,659 50.0MB/s in 31s  2017-07-14 13:48:32 (51.6 MB/s) - ‘CDH-5.12.0-1.cdh5.12.0.p0.29-el7.parcel’ saved [1702423659/1702423659]                
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
			
    
	
		
		
		07-14-2017
	
		
		10:39 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							Hi,    Thanks for quick reply. I am using cloudera automated installation.    How can l fix this manually ? Or is there any other way ?
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
			
    
	
		
		
		07-14-2017
	
		
		08:57 AM
	
	
	
	
	
	
	
	
	
	
	
	
	
	
		
	
				
		
			
					
				
		
	
		
					
							 With CDH - 5.12.0 installation manifest.json is not getting downloaded.     Here is the error llog:     /var/log/cloudera-scm-server/cloudera-scm-server.log:1191:2017-07-14 11:49:48,425 ERROR ParcelUpdateService:com.cloudera.parcel.components.ParcelDownloaderImpl: Failed to download manifest. Status code: 404 URI: https://www.cloudera.com/downloads/manifest.json     Tried the URI to check, look like resource is not there.     wget https://www.cloudera.com/downloads/manifest.json  --2017-07-14 11:56:01-- https://www.cloudera.com/downloads/manifest.json  Resolving www.cloudera.com (www.cloudera.com)... 52.52.206.175, 52.52.88.106  Connecting to www.cloudera.com (www.cloudera.com)|52.52.206.175|:443... connected.  HTTP request sent, awaiting response... 404 Not Found  2017-07-14 11:56:07 ERROR 404: Not Found.    
						
					
					... View more
				
			
			
			
			
			
			
			
			
			
		
		
			
				
						
							Labels:
						
						
		
			
	
					
			
		
	
	
	
	
				
		
	
	
- Labels:
 - 
						
							
		
			Cloudera Manager
 
- « Previous
 - 
						
- 1
 - 2
 
 - Next »