Member since
09-12-2017
5
Posts
0
Kudos Received
1
Solution
My Accepted Solutions
Title | Views | Posted |
---|---|---|
7106 | 03-05-2018 01:04 AM |
04-26-2018
07:21 AM
I'm trying to create Hive table with snappy compression via Spark2. CDH 5.14 SLE12 simple command is spark.sqlContext.setConf("spark.sql.parquet.compression.codec","snappy")
sql("CREATE TABLE parquet_table_name (x INT, y STRING) STORED AS PARQUET")
sql("INSERT INTO parquet_table_name VALUES(1, 'test')") Then error as [Stage 0:> (0 + 1) / 1]18/04/26 21:03:44 WARN scheduler.TaskSetManager: Lost task 0.0 in stage 0.0 (TID 0, master3-1.hadoop2.moph.go.th, executor 1): org.apache.spark.SparkException: Task failed while writing rows.
at org.apache.spark.sql.execution.datasources.FileFormatWriter$.org$apache$spark$sql$execution$datasources$FileFormatWriter$$executeTask(FileFormatWriter.scala:285)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$$anonfun$write$1.apply(FileFormatWriter.scala:197)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$$anonfun$write$1.apply(FileFormatWriter.scala:196)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
at org.apache.spark.scheduler.Task.run(Task.scala:109)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.UnsatisfiedLinkError: org.xerial.snappy.SnappyNative.maxCompressedLength(I)I
at org.xerial.snappy.SnappyNative.maxCompressedLength(Native Method)
at org.xerial.snappy.Snappy.maxCompressedLength(Snappy.java:316)
at parquet.hadoop.codec.SnappyCompressor.compress(SnappyCompressor.java:67)
at org.apache.hadoop.io.compress.CompressorStream.compress(CompressorStream.java:81)
at org.apache.hadoop.io.compress.CompressorStream.finish(CompressorStream.java:92)
at parquet.hadoop.CodecFactory$BytesCompressor.compress(CodecFactory.java:112)
at parquet.hadoop.ColumnChunkPageWriteStore$ColumnChunkPageWriter.writePage(ColumnChunkPageWriteStore.java:89)
at parquet.column.impl.ColumnWriterV1.writePage(ColumnWriterV1.java:153)
at parquet.column.impl.ColumnWriterV1.flush(ColumnWriterV1.java:241)
at parquet.column.impl.ColumnWriteStoreV1.flush(ColumnWriteStoreV1.java:126)
at parquet.hadoop.InternalParquetRecordWriter.flushRowGroupToStore(InternalParquetRecordWriter.java:159)
at parquet.hadoop.InternalParquetRecordWriter.close(InternalParquetRecordWriter.java:111)
at parquet.hadoop.ParquetRecordWriter.close(ParquetRecordWriter.java:112)
at org.apache.spark.sql.execution.datasources.parquet.ParquetOutputWriter.close(ParquetOutputWriter.scala:42)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$SingleDirectoryWriteTask.releaseResources(FileFormatWriter.scala:405)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$SingleDirectoryWriteTask.execute(FileFormatWriter.scala:396)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$$anonfun$org$apache$spark$sql$execution$datasources$FileFormatWriter$$executeTask$3.apply(FileFormatWriter.scala:269)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$$anonfun$org$apache$spark$sql$execution$datasources$FileFormatWriter$$executeTask$3.apply(FileFormatWriter.scala:267)
at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1411)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$.org$apache$spark$sql$execution$datasources$FileFormatWriter$$executeTask(FileFormatWriter.scala:272)
... 8 more
18/04/26 21:03:44 ERROR scheduler.TaskSetManager: Task 0 in stage 0.0 failed 4 times; aborting job
18/04/26 21:03:44 ERROR datasources.FileFormatWriter: Aborting job null.
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 0.0 failed 4 times, most recent failure: Lost task 0.3 in stage 0.0 (TID 3, master3-1.hadoop2.moph.go.th, executor 1): org.apache.spark.SparkException: Task failed while writing rows.
at org.apache.spark.sql.execution.datasources.FileFormatWriter$.org$apache$spark$sql$execution$datasources$FileFormatWriter$$executeTask(FileFormatWriter.scala:285)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$$anonfun$write$1.apply(FileFormatWriter.scala:197)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$$anonfun$write$1.apply(FileFormatWriter.scala:196)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
at org.apache.spark.scheduler.Task.run(Task.scala:109)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.UnsatisfiedLinkError: org.xerial.snappy.SnappyNative.maxCompressedLength(I)I
at org.xerial.snappy.SnappyNative.maxCompressedLength(Native Method)
at org.xerial.snappy.Snappy.maxCompressedLength(Snappy.java:316)
at parquet.hadoop.codec.SnappyCompressor.compress(SnappyCompressor.java:67)
at org.apache.hadoop.io.compress.CompressorStream.compress(CompressorStream.java:81)
at org.apache.hadoop.io.compress.CompressorStream.finish(CompressorStream.java:92)
at parquet.hadoop.CodecFactory$BytesCompressor.compress(CodecFactory.java:112)
at parquet.hadoop.ColumnChunkPageWriteStore$ColumnChunkPageWriter.writePage(ColumnChunkPageWriteStore.java:89)
at parquet.column.impl.ColumnWriterV1.writePage(ColumnWriterV1.java:153)
at parquet.column.impl.ColumnWriterV1.flush(ColumnWriterV1.java:241)
at parquet.column.impl.ColumnWriteStoreV1.flush(ColumnWriteStoreV1.java:126)
at parquet.hadoop.InternalParquetRecordWriter.flushRowGroupToStore(InternalParquetRecordWriter.java:159)
at parquet.hadoop.InternalParquetRecordWriter.close(InternalParquetRecordWriter.java:111)
at parquet.hadoop.ParquetRecordWriter.close(ParquetRecordWriter.java:112)
at org.apache.spark.sql.execution.datasources.parquet.ParquetOutputWriter.close(ParquetOutputWriter.scala:42)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$SingleDirectoryWriteTask.releaseResources(FileFormatWriter.scala:405)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$SingleDirectoryWriteTask.execute(FileFormatWriter.scala:396)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$$anonfun$org$apache$spark$sql$execution$datasources$FileFormatWriter$$executeTask$3.apply(FileFormatWriter.scala:269)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$$anonfun$org$apache$spark$sql$execution$datasources$FileFormatWriter$$executeTask$3.apply(FileFormatWriter.scala:267)
at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1411)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$.org$apache$spark$sql$execution$datasources$FileFormatWriter$$executeTask(FileFormatWriter.scala:272)
... 8 more
Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1599)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1587)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1586)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1586)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:831)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:831)
at scala.Option.foreach(Option.scala:257)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:831)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1820)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1769)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1758)
at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:642)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2027)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$.write(FileFormatWriter.scala:194)
at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand.run(InsertIntoHadoopFsRelationCommand.scala:154)
at org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult$lzycompute(commands.scala:104)
at org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult(commands.scala:102)
at org.apache.spark.sql.execution.command.DataWritingCommandExec.executeCollect(commands.scala:115)
at org.apache.spark.sql.Dataset$$anonfun$6.apply(Dataset.scala:190)
at org.apache.spark.sql.Dataset$$anonfun$6.apply(Dataset.scala:190)
at org.apache.spark.sql.Dataset$$anonfun$52.apply(Dataset.scala:3253)
at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:77)
at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3252)
at org.apache.spark.sql.Dataset.<init>(Dataset.scala:190)
at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:75)
at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:638)
at $line23.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:24)
at $line23.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:29)
at $line23.$read$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:31)
at $line23.$read$$iw$$iw$$iw$$iw$$iw.<init>(<console>:33)
at $line23.$read$$iw$$iw$$iw$$iw.<init>(<console>:35)
at $line23.$read$$iw$$iw$$iw.<init>(<console>:37)
at $line23.$read$$iw$$iw.<init>(<console>:39)
at $line23.$read$$iw.<init>(<console>:41)
at $line23.$read.<init>(<console>:43)
at $line23.$read$.<init>(<console>:47)
at $line23.$read$.<clinit>(<console>)
at $line23.$eval$.$print$lzycompute(<console>:7)
at $line23.$eval$.$print(<console>:6)
at $line23.$eval.$print(<console>)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at scala.tools.nsc.interpreter.IMain$ReadEvalPrint.call(IMain.scala:786)
at scala.tools.nsc.interpreter.IMain$Request.loadAndRun(IMain.scala:1047)
at scala.tools.nsc.interpreter.IMain$WrappedRequest$$anonfun$loadAndRunReq$1.apply(IMain.scala:638)
at scala.tools.nsc.interpreter.IMain$WrappedRequest$$anonfun$loadAndRunReq$1.apply(IMain.scala:637)
at scala.reflect.internal.util.ScalaClassLoader$class.asContext(ScalaClassLoader.scala:31)
at scala.reflect.internal.util.AbstractFileClassLoader.asContext(AbstractFileClassLoader.scala:19)
at scala.tools.nsc.interpreter.IMain$WrappedRequest.loadAndRunReq(IMain.scala:637)
at scala.tools.nsc.interpreter.IMain.interpret(IMain.scala:569)
at scala.tools.nsc.interpreter.IMain.interpret(IMain.scala:565)
at scala.tools.nsc.interpreter.ILoop.interpretStartingWith(ILoop.scala:807)
at scala.tools.nsc.interpreter.ILoop.command(ILoop.scala:681)
at scala.tools.nsc.interpreter.ILoop.processLine(ILoop.scala:395)
at scala.tools.nsc.interpreter.ILoop.loop(ILoop.scala:415)
at scala.tools.nsc.interpreter.ILoop$$anonfun$process$1.apply$mcZ$sp(ILoop.scala:923)
at scala.tools.nsc.interpreter.ILoop$$anonfun$process$1.apply(ILoop.scala:909)
at scala.tools.nsc.interpreter.ILoop$$anonfun$process$1.apply(ILoop.scala:909)
at scala.reflect.internal.util.ScalaClassLoader$.savingContextLoader(ScalaClassLoader.scala:97)
at scala.tools.nsc.interpreter.ILoop.process(ILoop.scala:909)
at org.apache.spark.repl.Main$.doMain(Main.scala:76)
at org.apache.spark.repl.Main$.main(Main.scala:56)
at org.apache.spark.repl.Main.main(Main.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)
at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:892)
at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:197)
at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:227)
at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:136)
at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
Caused by: org.apache.spark.SparkException: Task failed while writing rows.
at org.apache.spark.sql.execution.datasources.FileFormatWriter$.org$apache$spark$sql$execution$datasources$FileFormatWriter$$executeTask(FileFormatWriter.scala:285)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$$anonfun$write$1.apply(FileFormatWriter.scala:197)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$$anonfun$write$1.apply(FileFormatWriter.scala:196)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
at org.apache.spark.scheduler.Task.run(Task.scala:109)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.UnsatisfiedLinkError: org.xerial.snappy.SnappyNative.maxCompressedLength(I)I
at org.xerial.snappy.SnappyNative.maxCompressedLength(Native Method)
at org.xerial.snappy.Snappy.maxCompressedLength(Snappy.java:316)
at parquet.hadoop.codec.SnappyCompressor.compress(SnappyCompressor.java:67)
at org.apache.hadoop.io.compress.CompressorStream.compress(CompressorStream.java:81)
at org.apache.hadoop.io.compress.CompressorStream.finish(CompressorStream.java:92)
at parquet.hadoop.CodecFactory$BytesCompressor.compress(CodecFactory.java:112)
at parquet.hadoop.ColumnChunkPageWriteStore$ColumnChunkPageWriter.writePage(ColumnChunkPageWriteStore.java:89)
at parquet.column.impl.ColumnWriterV1.writePage(ColumnWriterV1.java:153)
at parquet.column.impl.ColumnWriterV1.flush(ColumnWriterV1.java:241)
at parquet.column.impl.ColumnWriteStoreV1.flush(ColumnWriteStoreV1.java:126)
at parquet.hadoop.InternalParquetRecordWriter.flushRowGroupToStore(InternalParquetRecordWriter.java:159)
at parquet.hadoop.InternalParquetRecordWriter.close(InternalParquetRecordWriter.java:111)
at parquet.hadoop.ParquetRecordWriter.close(ParquetRecordWriter.java:112)
at org.apache.spark.sql.execution.datasources.parquet.ParquetOutputWriter.close(ParquetOutputWriter.scala:42)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$SingleDirectoryWriteTask.releaseResources(FileFormatWriter.scala:405)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$SingleDirectoryWriteTask.execute(FileFormatWriter.scala:396)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$$anonfun$org$apache$spark$sql$execution$datasources$FileFormatWriter$$executeTask$3.apply(FileFormatWriter.scala:269)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$$anonfun$org$apache$spark$sql$execution$datasources$FileFormatWriter$$executeTask$3.apply(FileFormatWriter.scala:267)
at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1411)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$.org$apache$spark$sql$execution$datasources$FileFormatWriter$$executeTask(FileFormatWriter.scala:272)
... 8 more But samething work if set compression with gzip format.
... View more
Labels:
- Labels:
-
Apache Hive
-
Apache Spark
03-05-2018
01:04 AM
Yes. I've end-up using impersonate user following this article http://gethue.com/ldap-or-pam-pass-through-authentication-with-hive-or-impala/ and plus <property> <name>hadoop.proxyuser.hue_hive.groups</name> <value>*</value> </property> on core-site.xml by this answer http://community.cloudera.com/t5/Web-UI-Hue-Beeswax/Failed-to-validate-proxy-privilege-of-hue-hive-for-administrator/m-p/49742
... View more
02-25-2018
07:09 AM
Hi, I'm using CDH 5.12 with 16 nodes. Our setup is LDAP for HUE via Sentry. Everything works as expected. We can create user on LDAP and grant a permisson with Sentry. However, I'm also want to apply same login to the beeline. But when "Enable LDAP Authentication" on Hive Configuration, It caused error on HUE with message "Bad status: 3 (Error validating the login)" and no databases list loading. Thank you very much for any clue or help.
... View more
Labels:
- Labels:
-
Apache Hive
-
Apache Sentry
-
Cloudera Hue
09-20-2017
03:12 AM
Thank you very much. You're right. And this turn out to have me start whole cluster from zero again.
... View more
09-12-2017
11:38 PM
After Install Kerberos and enable to system, Restart my namenode1 and namenode2 got this error. 17/09/13 11:39:39 INFO namenode.FSNamesystem: fsOwner = hdfs/namenode1.xx@MOPH.COM (auth:KERBEROS)
17/09/13 11:39:39 INFO namenode.FSNamesystem: supergroup = root
17/09/13 11:39:39 INFO namenode.FSNamesystem: isPermissionEnabled = true
17/09/13 11:39:39 INFO namenode.FSNamesystem: Determined nameservice ID: nameservice1
17/09/13 11:39:39 INFO namenode.FSNamesystem: HA Enabled: true
17/09/13 11:39:39 INFO namenode.FSNamesystem: Append Enabled: true
17/09/13 11:39:39 INFO util.GSet: Computing capacity for map INodeMap
17/09/13 11:39:39 INFO util.GSet: VM type = 64-bit
17/09/13 11:39:39 INFO util.GSet: 1.0% max memory 3.9 GB = 39.6 MB
17/09/13 11:39:39 INFO util.GSet: capacity = 2^22 = 4194304 entries
17/09/13 11:39:39 INFO namenode.FSDirectory: POSIX ACL inheritance enabled? false
17/09/13 11:39:39 INFO namenode.NameNode: Caching file names occuring more than 10 times
17/09/13 11:39:39 INFO util.GSet: Computing capacity for map cachedBlocks
17/09/13 11:39:39 INFO util.GSet: VM type = 64-bit
17/09/13 11:39:39 INFO util.GSet: 0.25% max memory 3.9 GB = 9.9 MB
17/09/13 11:39:39 INFO util.GSet: capacity = 2^20 = 1048576 entries
17/09/13 11:39:39 INFO namenode.FSNamesystem: dfs.namenode.safemode.threshold-pct = 0.9990000128746033
17/09/13 11:39:39 INFO namenode.FSNamesystem: dfs.namenode.safemode.min.datanodes = 1
17/09/13 11:39:39 INFO namenode.FSNamesystem: dfs.namenode.safemode.extension = 30000
17/09/13 11:39:39 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.window.num.buckets = 10
17/09/13 11:39:39 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.num.users = 10
17/09/13 11:39:39 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.windows.minutes = 1,5,25
17/09/13 11:39:39 INFO namenode.FSNamesystem: Retry cache on namenode is enabled
17/09/13 11:39:39 INFO namenode.FSNamesystem: Retry cache will use 0.03 of total heap and retry cache entry expiry time is 600000 millis
17/09/13 11:39:39 INFO util.GSet: Computing capacity for map NameNodeRetryCache
17/09/13 11:39:39 INFO util.GSet: VM type = 64-bit
17/09/13 11:39:39 INFO util.GSet: 0.029999999329447746% max memory 3.9 GB = 1.2 MB
17/09/13 11:39:39 INFO util.GSet: capacity = 2^17 = 131072 entries
17/09/13 11:39:39 INFO namenode.FSNamesystem: ACLs enabled? true
17/09/13 11:39:39 INFO namenode.FSNamesystem: XAttrs enabled? true
17/09/13 11:39:39 INFO namenode.FSNamesystem: Maximum size of an xattr: 16384
Running in non-interactive mode, and data appears to exist in QJM to [172.16.120.31:8485, 172.16.120.32:8485, 172.16.120.46:8485]. Not formatting.
17/09/13 11:39:39 INFO util.ExitUtil: Exiting with status 1
17/09/13 11:39:39 INFO namenode.NameNode: SHUTDOWN_MSG: When i'm trying to run it manually with command exec /usr/lib64/cmf/service/hdfs/hdfs.sh format-namenode cluster14 It error as Wed Sep 13 13:20:59 ICT 2017
Wed Sep 13 13:20:59 ICT 2017
+ source_parcel_environment
+ '[' '!' -z '' ']'
+ locate_cdh_java_home
+ '[' -z /usr/java/jdk1.7.0_67-cloudera ']'
+ verify_java_home
+ '[' -z /usr/java/jdk1.7.0_67-cloudera ']'
+ echo JAVA_HOME=/usr/java/jdk1.7.0_67-cloudera
JAVA_HOME=/usr/java/jdk1.7.0_67-cloudera
+ . /usr/lib64/cmf/service/common/cdh-default-hadoop
++ [[ -z 5 ]]
++ '[' 5 = 3 ']'
++ '[' 5 = -3 ']'
++ '[' 5 -ge 4 ']'
++ export HADOOP_HOME_WARN_SUPPRESS=true
++ HADOOP_HOME_WARN_SUPPRESS=true
++ export HADOOP_PREFIX=
++ HADOOP_PREFIX=
++ export HADOOP_LIBEXEC_DIR=/libexec
++ HADOOP_LIBEXEC_DIR=/libexec
++ export HADOOP_CONF_DIR=/var/run/cloudera-scm-agent/process/5020-hdfs-NAMENODE-format
++ HADOOP_CONF_DIR=/var/run/cloudera-scm-agent/process/5020-hdfs-NAMENODE-format
++ export HADOOP_COMMON_HOME=
++ HADOOP_COMMON_HOME=
++ export HADOOP_HDFS_HOME=
++ HADOOP_HDFS_HOME=
++ export HADOOP_MAPRED_HOME=
++ HADOOP_MAPRED_HOME=
++ '[' 5 = 4 ']'
++ '[' 5 = 5 ']'
++ export HADOOP_YARN_HOME=
++ HADOOP_YARN_HOME=
++ replace_pid
++ echo
++ sed 's#{{PID}}#35248#g'
+ export HADOOP_NAMENODE_OPTS=
+ HADOOP_NAMENODE_OPTS=
++ replace_pid
++ echo
++ sed 's#{{PID}}#35248#g'
+ export HADOOP_DATANODE_OPTS=
+ HADOOP_DATANODE_OPTS=
++ replace_pid
++ echo
++ sed 's#{{PID}}#35248#g'
+ export HADOOP_SECONDARYNAMENODE_OPTS=
+ HADOOP_SECONDARYNAMENODE_OPTS=
++ replace_pid
++ echo
++ sed 's#{{PID}}#35248#g'
+ export HADOOP_NFS3_OPTS=
+ HADOOP_NFS3_OPTS=
++ replace_pid
++ echo
++ sed 's#{{PID}}#35248#g'
+ export HADOOP_JOURNALNODE_OPTS=
+ HADOOP_JOURNALNODE_OPTS=
+ '[' 5 -ge 4 ']'
+ HDFS_BIN=/bin/hdfs
+ export 'HADOOP_OPTS=-Djava.net.preferIPv4Stack=true '
+ HADOOP_OPTS='-Djava.net.preferIPv4Stack=true '
+ echo 'using /usr/java/jdk1.7.0_67-cloudera as JAVA_HOME'
using /usr/java/jdk1.7.0_67-cloudera as JAVA_HOME
+ echo 'using 5 as CDH_VERSION'
using 5 as CDH_VERSION
+ echo 'using /var/run/cloudera-scm-agent/process/5020-hdfs-NAMENODE-format as CONF_DIR'
using /var/run/cloudera-scm-agent/process/5020-hdfs-NAMENODE-format as CONF_DIR
+ echo 'using hdfs as SECURE_USER'
using hdfs as SECURE_USER
+ echo 'using hadoop as SECURE_GROUP'
using hadoop as SECURE_GROUP
+ set_hadoop_classpath
+ set_classpath_in_var HADOOP_CLASSPATH
+ '[' -z HADOOP_CLASSPATH ']'
+ [[ -n /usr/share/cmf ]]
++ find /usr/share/cmf/lib/plugins -maxdepth 1 -name '*.jar'
++ tr '\n' :
+ ADD_TO_CP=/usr/share/cmf/lib/plugins/event-publish-5.10.0-shaded.jar:/usr/share/cmf/lib/plugins/tt-instrumentation-5.10.0.jar:
+ [[ -n '' ]]
+ eval 'OLD_VALUE=$HADOOP_CLASSPATH'
++ OLD_VALUE=
+ NEW_VALUE=/usr/share/cmf/lib/plugins/event-publish-5.10.0-shaded.jar:/usr/share/cmf/lib/plugins/tt-instrumentation-5.10.0.jar:
+ export HADOOP_CLASSPATH=/usr/share/cmf/lib/plugins/event-publish-5.10.0-shaded.jar:/usr/share/cmf/lib/plugins/tt-instrumentation-5.10.0.jar
+ HADOOP_CLASSPATH=/usr/share/cmf/lib/plugins/event-publish-5.10.0-shaded.jar:/usr/share/cmf/lib/plugins/tt-instrumentation-5.10.0.jar
+ set -x
+ replace_conf_dir
+ echo CONF_DIR=/var/run/cloudera-scm-agent/process/5020-hdfs-NAMENODE-format
CONF_DIR=/var/run/cloudera-scm-agent/process/5020-hdfs-NAMENODE-format
+ echo CMF_CONF_DIR=/etc/cloudera-scm-agent
CMF_CONF_DIR=/etc/cloudera-scm-agent
+ EXCLUDE_CMF_FILES=('cloudera-config.sh' 'httpfs.sh' 'hue.sh' 'impala.sh' 'sqoop.sh' 'supervisor.conf' '*.log' '*.keytab' '*jceks')
++ printf '! -name %s ' cloudera-config.sh httpfs.sh hue.sh impala.sh sqoop.sh supervisor.conf '*.log' hdfs.keytab '*jceks'
+ find /var/run/cloudera-scm-agent/process/5020-hdfs-NAMENODE-format -type f '!' -path '/var/run/cloudera-scm-agent/process/5020-hdfs-NAMENODE-format/logs/*' '!' -name cloudera-config.sh '!' -name httpfs.sh '!' -name hue.sh '!' -name impala.sh '!' -name sqoop.sh '!' -name supervisor.conf '!' -name '*.log' '!' -name hdfs.keytab '!' -name '*jceks' -exec perl -pi -e 's#{{CMF_CONF_DIR}}#/var/run/cloudera-scm-agent/process/5020-hdfs-NAMENODE-format#g' '{}' ';'
+ make_scripts_executable
+ find /var/run/cloudera-scm-agent/process/5020-hdfs-NAMENODE-format -regex '.*\.\(py\|sh\)$' -exec chmod u+x '{}' ';'
+ '[' DATANODE_MAX_LOCKED_MEMORY '!=' '' ']'
+ ulimit -l
64
+ export HADOOP_IDENT_STRING=hdfs
+ HADOOP_IDENT_STRING=hdfs
+ '[' -n '' ']'
+ '[' mkdir '!=' format-namenode ']'
+ acquire_kerberos_tgt hdfs.keytab
+ '[' -z hdfs.keytab ']'
+ '[' -n '' ']'
+ '[' validate-writable-empty-dirs = format-namenode ']'
+ '[' file-operation = format-namenode ']'
+ '[' bootstrap = format-namenode ']'
+ '[' failover = format-namenode ']'
+ '[' transition-to-active = format-namenode ']'
+ '[' initializeSharedEdits = format-namenode ']'
+ '[' initialize-znode = format-namenode ']'
+ '[' format-namenode = format-namenode ']'
+ '[' -z '' ']'
+ echo 'No storage dirs specified.'
No storage dirs specified. The configuration hdfs-site.xml looks like this <property>
<name>dfs.ha.namenodes.nameservice1</name>
<value>namenode108,namenode123</value>
</property>
<property>
<name>dfs.namenode.name.dir.nameservice1.namenode108</name>
<value>file:///mnt/disk1/dfs/nn,file:///mnt/disk2/dfs/nn</value>
</property>
<property>
<name>dfs.namenode.shared.edits.dir.nameservice1.namenode108</name>
<value>qjournal://namenode1.xx:8485;namenode2.xx:8485;service.moph.com:8485/nameservice1</value>
</property>
<property>
<name>dfs.namenode.rpc-address.nameservice1.namenode108</name>
<value>namenode1.xx:8020</value>
</property>
<property>
<name>dfs.namenode.servicerpc-address.nameservice1.namenode108</name>
<value>namenode1.xx:8022</value>
</property>
<property>
<name>dfs.namenode.http-address.nameservice1.namenode108</name>
<value>namenode1.xx:50070</value>
</property>
<property>
<name>dfs.namenode.https-address.nameservice1.namenode108</name>
<value>namenode1.xx:50470</value>
</property>
<property>
<name>dfs.namenode.name.dir.nameservice1.namenode123</name>
<value>file:///mnt/disk1/dfs/nn,file:///mnt/disk2/dfs/nn</value>
</property>
... View more
Labels:
- Labels:
-
Cloudera Manager