Member since
01-15-2016
37
Posts
13
Kudos Received
3
Solutions
My Accepted Solutions
Title | Views | Posted |
---|---|---|
4250 | 04-24-2017 01:48 PM | |
1210 | 06-21-2016 12:05 PM | |
1467 | 03-07-2016 10:43 PM |
06-18-2019
02:13 PM
Excellent post, very well explained and extremely helpful.
... View more
06-06-2019
07:57 PM
We are attempting to integrate the AWS Single-Sign-On Application with KnoxSSO. KnoxSSO.xml <topology>
<gateway>
<provider>
<role>federation</role>
<name>pac4j</name>
<enabled>true</enabled>
<param>
<name>pac4j.callbackUrl</name>
<value>https://<REDACTED>.us-east-2.elb.amazonaws.com/cr-cluster/knoxsso/api/v1/websso</value>
</param>
<param>
<name>clientName</name>
<value>SAML2Client</value>
</param>
<param>
<name>saml.identityProviderMetadataPath</name>
<value>https://portal.sso.us-east-1.amazonaws.com/saml/metadata/<REDACTED></value>
</param>
<param>
<name>saml.serviceProviderMetadataPath</name>
<value>/tmp/sp-metadata.xml</value>
</param>
<param>
<name>saml.serviceProviderEntityId</name>
<value>https://<REDACTED>.us-east-2.elb.amazonaws.com/cr-cluster/knoxsso/api/v1/websso?pac4jCallback=true&client_name=SAML2Client</value>
</param>
</provider>
<provider>
<role>identity-assertion</role>
<name>Default</name>
<enabled>true</enabled>
</provider>
</gateway>
<service>
<role>KNOXSSO</role>
<param>
<name>knoxsso.cookie.secure.only</name>
<value>true</value>
</param>
<param>
<name>knoxsso.token.ttl</name>
<value>3000000</value>
</param>
<param>
<name>knoxsso.redirect.whitelist.regex</name>
<value>.*</value>
</param>
<param>
<name>knoxsso.cookie.max.age</name>
<value>session</value>
</param>
</service>
</topology> Gateway.xml <topology>
<gateway>
<provider>
<role>federation</role>
<name>SSOCookieProvider</name>
<enabled>true</enabled>
<param>
<name>sso.authentication.provider.url</name>
<value>https://<REDACTED>.us-east-2.elb.amazonaws.com/cr-cluster/knoxsso/api/v1/websso</value>
</param>
</provider>
<provider>
<role>identity-assertion</role>
<name>Default</name>
<enabled>true</enabled>
</provider>
</gateway>
<service>
<role>AMBARI</role>
<url>http://ip-10-10-32-143.us-east-2.compute.internal:8080</url>
</service>
<service>
<role>AMBARIUI</role>
<url>http://ip-10-10-32-143.us-east-2.compute.internal:8080</url>
</service>
<service>
<role>AMBARIWS</role>
<url>http://ip-10-10-32-143.us-east-2.compute.internal:8080</url>
</service>
<service>
<role>NAMENODE</role>
<url>hdfs://mycluster</url>
</service>
<service>
<role>JOBTRACKER</role>
<url>rpc://ip-10-10-32-181.us-east-2.compute.internal:8050</url>
</service>
<service>
<role>WEBHDFS</role>
<url>http://ip-10-10-32-169.us-east-2.compute.internal:50070/webhdfs</url>
<url>http://ip-10-10-32-181.us-east-2.compute.internal:50070/webhdfs</url>
</service>
<service>
<role>HIVE</role>
<url>http://ip-10-10-32-181.us-east-2.compute.internal:10001/cliservice</url>
</service>
<service>
<role>RESOURCEMANAGER</role>
<url>http://ip-10-10-32-181.us-east-2.compute.internal:8088/ws</url>
</service>
<service>
<role>JOBHISTORYUI</role>
<url>http://ip-10-10-32-169.us-east-2.compute.internal:19888</url>
<url>http://ip-10-10-32-181.us-east-2.compute.internal:19888</url>
<url>http://ip-10-10-32-181.us-east-2.compute.internal:19888</url>
<url>http://ip-10-10-32-181.us-east-2.compute.internal:19888</url>
</service>
<service>
<role>SPARKHISTORYUI</role>
<url>http://ip-10-10-32-169.us-east-2.compute.internal:18081/</url>
<url>http://ip-10-10-32-181.us-east-2.compute.internal:18081/</url>
<url>http://ip-10-10-32-181.us-east-2.compute.internal:18081/</url>
<url>http://ip-10-10-32-181.us-east-2.compute.internal:18081/</url>
</service>
<service>
<role>ZEPPELINUI</role>
<url>http://ip-10-10-32-143.us-east-2.compute.internal:9995</url>
</service>
<service>
<role>ZEPPELINWS</role>
<url>ws://ip-10-10-32-143.us-east-2.compute.internal:9995/ws</url>
</service>
<service>
<role>LIVYSERVER</role>
<url>http://ip-10-10-32-181.us-east-2.compute.internal:8999</url>
</service>
</topology> The SAML2Request: <?xml version="1.0" encoding="UTF-8"?>
<saml2p:AuthnRequest AssertionConsumerServiceURL="https://<REDACTED>.us-east-2.elb.amazonaws.com/cr-cluster/knoxsso/api/v1/websso?pac4jCallback=true&client_name=SAML2Client"
Destination="https://portal.sso.us-east-1.amazonaws.com/saml/assertion/<REDACTED>"
ForceAuthn="false"
ID="_hkqsnsohez6jghntmirrdoiadknevetpemxqrwd"
IsPassive="false"
IssueInstant="2019-06-06T19:39:27.927Z"
ProtocolBinding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST"
ProviderName="pac4j-saml"
Version="2.0"
xmlns:saml2p="urn:oasis:names:tc:SAML:2.0:protocol">
<saml2:Issuer xmlns:saml2="urn:oasis:names:tc:SAML:2.0:assertion">https://<REDACTED>.us-east-2.elb.amazonaws.com/cr-cluster/knoxsso/api/v1/websso?pac4jCallback=true&client_name=SAML2Client</saml2:Issuer>
<ds:Signature xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
<ds:SignedInfo>
<ds:CanonicalizationMethod Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#" />
<ds:SignatureMethod Algorithm="http://www.w3.org/2001/04/xmldsig-more#rsa-sha256" />
<ds:Reference URI="#_hkqsnsohez6jghntmirrdoiadknevetpemxqrwd">
<ds:Transforms>
<ds:Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature" />
<ds:Transform Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#" />
</ds:Transforms>
<ds:DigestMethod Algorithm="http://www.w3.org/2001/04/xmlenc#sha512" />
<ds:DigestValue>7lbvfnpnYJJho3ebYnkzb+mbsJrUKJmUle/eYObkqMroSFLwKFfUnRIstSRaOvSRhzfu7P7gTv3U mWLk52iTfg==
</ds:DigestValue>
</ds:Reference>
</ds:SignedInfo>
<ds:SignatureValue>
<REDACTED>
</ds:SignatureValue>
<ds:KeyInfo>
<ds:X509Data>
<ds:X509Certificate>
<REDACTED>
</ds:X509Certificate>
</ds:X509Data>
</ds:KeyInfo>
</ds:Signature>
</saml2p:AuthnRequest> The SAML2Response: <?xml version="1.0" encoding="UTF-8"?>
<saml2p:Response xmlns:saml2p="urn:oasis:names:tc:SAML:2.0:protocol"
xmlns:ds="http://www.w3.org/2000/09/xmldsig#"
xmlns:enc="http://www.w3.org/2001/04/xmlenc#"
xmlns:saml2="urn:oasis:names:tc:SAML:2.0:assertion"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
Destination="https://<REDACTED>.us-east-2.elb.amazonaws.com/cr-cluster/knoxsso/api/v1/websso?pac4jCallback=true&amp;client_name=SAML2Client"
ID="_da4abe2b-a005-4f5c-950b-3e9e2646c0a5"
InResponseTo="_hkqsnsohez6jghntmirrdoiadknevetpemxqrwd"
IssueInstant="2019-06-06T19:39:28.148Z"
Version="2.0">
<saml2:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">https://portal.sso.us-east-1.amazonaws.com/saml/assertion/<REDACTED></saml2:Issuer>
<ds:Signature xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
<ds:SignedInfo>
<ds:CanonicalizationMethod Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#" />
<ds:SignatureMethod Algorithm="http://www.w3.org/2001/04/xmldsig-more#rsa-sha256" />
<ds:Reference URI="#_da4abe2b-a005-4f5c-950b-3e9e2646c0a5">
<ds:Transforms>
<ds:Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature" />
<ds:Transform Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#">
<ec:InclusiveNamespaces xmlns:ec="http://www.w3.org/2001/10/xml-exc-c14n#"
PrefixList="xsd" />
</ds:Transform>
</ds:Transforms>
<ds:DigestMethod Algorithm="http://www.w3.org/2001/04/xmlenc#sha256" />
<ds:DigestValue>mq219kl40ETgkW9K1WNoqi3R/KsxVYtTyXNis7Vpsy4=</ds:DigestValue>
</ds:Reference>
</ds:SignedInfo>
<ds:SignatureValue>
oNkvQntjMJ7NS3lLSM9XYxjSmHfWLw6C8fh/dFRcM4hVxNUpiplYN8dYQt/xAniAsAI6UEsFY5wz NHHH5R0hSkKhMp4KlBDY9ASJ5ySeCWM0CIZhV1w9V0pO935rV+hdhUMgS2Fb86ggN4LjBlKNHSFp D4sqUKvsTXptWYcu48Y8tOPedtgpwHUdc+ziV5ufAeJsbLtumk5oN3kGpDBX7qqOUc8T7GhKopXS wc+1kCEY1tjOZX0dN/T6K5A4wL/+DhzycxTGY0b0KB2eKK4ULEfDFxeIVdicaFH5yMyhWor7urL7
t6A5rEpyFlPKM23KDUZq/eDkhBp6LV+aMMuVUA==
</ds:SignatureValue>
<ds:KeyInfo>
<ds:X509Data>
<ds:X509Certificate>
MIIDAzCCAeugAwIBAgIBATANBgkqhkiG9w0BAQsFADBFMRYwFAYDVQQDDA1hbWF6b25hd3MuY29tMQ0wCwYDVQQLDARJREFTMQ8wDQYDVQQKDAZBbWF6b24xCzAJBgNVBAYTAlVTMB4XDTE5MDYwNjE4NTQ0OVoXDTI0MDYwNjE4NTQ0OVowRTEWMBQGA1UEAwwNYW1hem9uYXdzLmNvbTENMAsGA1UECwwESURBUzEPMA0GA1UECgwGQW1hem9uMQswCQYDVQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL8E+P9tAWZuze/PC94va6epzs1p6KXiVaCqNgGltkZ2a7ffukNuV2ll0PXkK3SPDguiSR+FHdD3hmqhCmBnGZRge/TP+HFNBS8cxtThCV72dUfIYvJIa+dVIXf5aBOqMmUzjHPrGyaDjMaHQge1AYfoGWzSt6Fon0ji+UXJyYs57IGe8OA5DgeATfykWWpHDF1EAH3U5nF0WIOSl31kW6nTYhDNkZkBc2MgrO0fzNGyrj/dSfaU+LFsh52IsXNtHxnZTy1+rmEvW5iJb/lYQG+JFvu6o45zvLLjGpz7L1T7b5l9pq0AgrJEOvTS0W2NKdgAfTlI2Xdv7bVNIME3K18CAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAkAh5SWXZZXr7HxBbSG7UCf9vX+9aF3mX6KrPrikWskf/g99C3U2DOlStz17KZ7+dTwvhSZLkIwuWRa+t+HyJy4R+3c/aW4jlTeibQGxfTDeHBHozjSeub1g8oKW7ui7eMH3maXwe87WGTkPvRN3zQgfBSlBTKhU/DRUTdeIlC+en2hoFTchW0OTGd/O2t/CxXSqRHhDT6n8RHq5jDL8q5LvjNHAnEOD/1rjd9FIo3i+47HpUGp7RJnzN3SD9W1piUc2Ai5SI7AOzwpBGgg6XioFt9Itt5eYPgBuuS/pLhXjrg4VC7ZYIBL9TAIHmgmG2am5/AVTsD+Y+gZ0tKRzLiQ==
</ds:X509Certificate>
</ds:X509Data>
</ds:KeyInfo>
</ds:Signature>
<saml2p:Status>
<saml2p:StatusCode Value="urn:oasis:names:tc:SAML:2.0:status:Success" /></saml2p:Status>
<saml2:Assertion ID="_b818a50e-1221-46df-9bca-9f29f7613532"
IssueInstant="2019-06-06T19:39:28.149Z"
Version="2.0"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<saml2:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">https://portal.sso.us-east-1.amazonaws.com/saml/assertion/<REDACTED></saml2:Issuer>
<ds:Signature xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
<ds:SignedInfo>
<ds:CanonicalizationMethod Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#" />
<ds:SignatureMethod Algorithm="http://www.w3.org/2001/04/xmldsig-more#rsa-sha256" />
<ds:Reference URI="#_b818a50e-1221-46df-9bca-9f29f7613532">
<ds:Transforms>
<ds:Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature" />
<ds:Transform Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#">
<ec:InclusiveNamespaces xmlns:ec="http://www.w3.org/2001/10/xml-exc-c14n#"
PrefixList="xsd" />
</ds:Transform>
</ds:Transforms>
<ds:DigestMethod Algorithm="http://www.w3.org/2001/04/xmlenc#sha256" />
<ds:DigestValue>k+f/xrSENKFdbbvRs4WeeDiN+cm6eigvJwkcKxRFiBk=</ds:DigestValue>
</ds:Reference>
</ds:SignedInfo>
<ds:SignatureValue>
<REDACTED>
</ds:SignatureValue>
<ds:KeyInfo>
<ds:X509Data>
<ds:X509Certificate>
<REDACTED>
</ds:X509Certificate>
</ds:X509Data>
</ds:KeyInfo>
</ds:Signature>
<saml2:Subject>
<saml2:NameID Format="urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified"
SPNameQualifier="https://<REDACTED>.us-east-2.elb.amazonaws.com/cr-cluster/knoxsso/api/v1/websso?pac4jCallback=true&amp;client_name=SAML2Client">CRodgers@guideone.com</saml2:NameID>
<saml2:SubjectConfirmation Method="urn:oasis:names:tc:SAML:2.0:cm:bearer">
<saml2:SubjectConfirmationData InResponseTo="_hkqsnsohez6jghntmirrdoiadknevetpemxqrwd"
NotOnOrAfter="2019-06-06T20:39:28.149Z"
Recipient="https://<REDACTED>.us-east-2.elb.amazonaws.com/cr-cluster/knoxsso/api/v1/websso?pac4jCallback=true&amp;client_name=SAML2Client" /></saml2:SubjectConfirmation>
</saml2:Subject>
<saml2:Conditions NotBefore="2019-06-06T19:34:28.149Z"
NotOnOrAfter="2019-06-06T20:39:28.149Z">
<saml2:AudienceRestriction>
<saml2:Audience>https://<REDACTED>.us-east-2.elb.amazonaws.com/cr-cluster/knoxsso/api/v1/websso?pac4jCallback=true&amp;client_name=SAML2Client</saml2:Audience>
</saml2:AudienceRestriction>
</saml2:Conditions>
<saml2:AuthnStatement AuthnInstant="2019-06-06T19:39:28.149Z"
SessionIndex="_836223ec-5dcc-43ca-9029-3b1697389835"
SessionNotOnOrAfter="2019-06-06T20:39:28.149Z">
<saml2:AuthnContext>
<saml2:AuthnContextClassRef>urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport</saml2:AuthnContextClassRef>
</saml2:AuthnContext>
</saml2:AuthnStatement>
<saml2:AttributeStatement>
<saml2:Attribute Name="Username"
NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:unspecified">
<saml2:AttributeValue xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="xsd:string">i103246@guidehome.com</saml2:AttributeValue>
</saml2:Attribute>
</saml2:AttributeStatement>
</saml2:Assertion>
</saml2p:Response> Gateway.log Errors: 2019-06-06 14:38:54,540 ERROR knox.gateway (AbstractGatewayFilter.java:doFilter(69)) - Failed to execute filter: org.pac4j.core.exception.TechnicalException: name cannot be blank
2019-06-06 14:38:54,540 ERROR knox.gateway (GatewayFilter.java:doFilter(173)) - Gateway processing failed: javax.servlet.ServletException: org.pac4j.core.exception.TechnicalException: name cannot be blank
javax.servlet.ServletException: org.pac4j.core.exception.TechnicalException: name cannot be blank
Caused by: org.pac4j.core.exception.TechnicalException: name cannot be blank AWS ACS URL: https://<REDACTED>.us-east-2.elb.amazonaws.com/cr-cluster/knoxsso/api/v1/websso?pac4jCallback=true&client_name=SAML2Client AWS Audience: https://<REDACTED>.us-east-2.elb.amazonaws.com/cr-cluster/knoxsso/api/v1/websso?pac4jCallback=true&client_name=SAML2Client AWS Start URL: https://<REDACTED>.us-east-2.elb.amazonaws.com/cr-cluster/knoxsso/api/v1/websso?pac4jCallback=true&client_name=SAML2Client Is there anything glaringly wrong with this setup? We believe our issue to be coming from the encoding/decoding of the & -> & -> &amp; We've tried to remove the ampersand all together, but each time a request is made, the & shows up in the request. Any and all help is appreciated.
... View more
Labels:
- Labels:
-
Apache Knox
06-20-2017
04:59 AM
While the work around did not work exactly, it pointed us in the correct direction of working with the impersonations and making sure that those were all set correctly. Thank you.
... View more
06-20-2017
04:59 AM
The Livy Logs are showing that the HiveContext is starting fine, but for sanity's sake I tried your method of configuration and after the changes and restarts, it is still outputting the same error.
... View more
06-20-2017
04:59 AM
I do not have Ranger KMS installed, but thank you for the article!
... View more
06-20-2017
04:59 AM
1 Kudo
I have setup a Livy Interpreter through Zeppelin and am trying to run the simple %livy.pyspark
sc.version Cannot Start Spark but to no avail.
%spark sc.version res10: String = 1.6.2 however, returns the version just fine. The livy interpreter configs look like such: livy.spark.master yarn-cluster
zeppelin.interpreter.localRepo /usr/hdp/current/zeppelin-server/local-repo/....
zeppelin.livy.concurrentSQL false
zeppelin.livy.create.session.retries 120
zeppelin.livy.keytab /<location_of_keytab>/zsk.keytab
zeppelin.livy.principal <zeppelin_principal>
zeppelin.livy.url http://<hostname>:8998
I have followed the instructions provided here https://community.hortonworks.com/articles/80059/how-to-configure-zeppelin-livy-interpreter-for-sec.html in entirety. The cluster is Kerberized as well as Zeppelin being synced to Active Directory. Also, the Resource Managers are in HA and I am seeing a few errors in the livy log regarding refused connections to :8032 (Default Port for RM Admin) See Below for Stacktrace: WARN Client: Failed to connect to server: <Hostname>/<IP>:8032: retries get failed due to exceeded maximum allowed retries number: 0
java.net.ConnectException: Connection refused
at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:717)
at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531)
at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495)
at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:650)
at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:745)
at org.apache.hadoop.ipc.Client$Connection.access$3200(Client.java:397)
at org.apache.hadoop.ipc.Client.getConnection(Client.java:1618)
at org.apache.hadoop.ipc.Client.call(Client.java:1449)
at org.apache.hadoop.ipc.Client.call(Client.java:1396)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:233)
at com.sun.proxy.$Proxy16.getApplicationReport(Unknown Source)
at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getApplicationReport(ApplicationClientProtocolPBClientImpl.java:191)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:278)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:194)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:176)
at com.sun.proxy.$Proxy17.getApplicationReport(Unknown Source)
at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getApplicationReport(YarnClientImpl.java:436)
at com.cloudera.livy.sessions.SessionManager$$anonfun$2.apply(SessionManager.scala:108)
at com.cloudera.livy.sessions.SessionManager$$anonfun$2.apply(SessionManager.scala:105)
at scala.collection.immutable.List.foreach(List.scala:318)
at com.cloudera.livy.sessions.SessionManager.checkAppState(SessionManager.scala:105)
at com.cloudera.livy.sessions.SessionManager$SessionAppStateMonitor.run(SessionManager.scala:142)
17/03/21 15:53:51 INFO ConfiguredRMFailoverProxyProvider: Failing over to rm2
Any help would be appreciated! Thank you very much! Edit:
Including some more of the logs from livy-livy-server.out INFO: 17/03/22 08:17:44 INFO Client:
INFO: client token: Token { kind: YARN_CLIENT_TOKEN, service: }
INFO: diagnostics: AM container is launched, waiting for AM container to Register with RM
INFO: ApplicationMaster host: N/A
INFO: ApplicationMaster RPC port: -1
INFO: queue: default
INFO: start time: 10188663176
INFO: final status: UNDEFINED
INFO: tracking URL: http://<hostname>:8088/proxy/application_10134091314_0007/
INFO: user: crodgers@DOMAIN.ORG
INFO Client: Application report for application_10134091314_0007 (state: ACCEPTED)
INFO Client: Application report for application_10134091314_0007 (state: ACCEPTED)
INFO Client: Application report for application_10134091314_0007 (state: ACCEPTED)
INFO RSCAppListener: Disconnect with app application_10134091314_0007
WARN RSCClient: Client RPC channel closed unexpectedly.
INFO RSCClient: Failing pending job 12b64fd8-62ac-4dcb-9a05-6c68b81b8420 due to shutdown. 2nd Edit: Including Resource Manager Logs: For more detailed output, check the application tracking page: http://<hostname>:8088/cluster/app/applica
tion_1490134091314_0008 Then click on links to logs of each attempt.
Diagnostics: Exception from container-launch.
Container id: container_e18_1490134091314_0008_01_000001
Exit code: 15
Stack trace: org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException: Launch container failed
at org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DefaultLinuxContainerRuntime.launchContainer
(DefaultLinuxContainerRuntime.java:109)
at org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DelegatingLinuxContainerRuntime.launchContai
ner(DelegatingLinuxContainerRuntime.java:89)
at org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor.launchContainer(LinuxContainerExecutor.java:392)
at org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:317)
at org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:83)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
Shell output: main : command provided 1
main : run as user is rodgersc@CORPORATE.ACT.ORG
main : requested yarn user is rodgersc@CORPORATE.ACT.ORG
Getting exit code file...
Creating script paths...
Writing pid file...
Writing to tmp file /DATA1/hadoop/yarn/local/nmPrivate/application_1490134091314_0008/container_e18_1490134091314_0008_01_000001/container_e18_1490134091314_0008_01_000001.pid.tmp
Writing to cgroup task files...
Creating local dirs...
Launching container...
Getting exit code file...
Creating script paths...
... View more
Labels:
- Labels:
-
Apache Spark
-
Apache Zeppelin
04-24-2017
07:16 PM
Wonderful! If you could close the other question you had posted and accept my answer it would be greatly appreciated! It is a bit funky how it works, but as you make sure you have the correct hive-site.xml in the Spark conf, you should be okay as all of your other configs looked correct. For some reason the hive-site.xml in Spark doesn't have the same template as Hive's. Ambari will notice the hive-site.xml and overwrite it in the Spark directory whenever Spark is restarted, thus the need to copy it over again, I have a cronjob set up to cp the hive-site.xml over every 5 minutes so I don't have to worry about that, something you might think about doing.
... View more
04-24-2017
01:54 PM
I ran into this same issue a few weeks ago using Zeppelin to run Livy, make sure that you copy over the Hive hive-site.xml into the spark/conf directory on every node in the cluster, this will alleviate the inability to connect to the Hive Metastore more often than not. Please let me know the status after you try this or if you have already done that, so we can continue troubleshooting.
... View more
04-24-2017
01:49 PM
They are both technically managed by ambari, so if you are to restart spark, you will need to copy the hive-site.xml back over to overwrite the Spark's hive-site.xml as sometimes they are not the same
... View more
04-24-2017
01:48 PM
1 Kudo
Spark uses the one in its directory to connect to hive when initializing a hive context, you can overwrite the spark hive-site with the hive's hive-site and it is recommended you do that in order to be able to connect to hive from Spark. This is what I did in order to be able to run Livy.Spark within Zeppelin and was able to connect to Hive via this method.
... View more
04-24-2017
01:03 PM
Hello All, Does anyone know of a way to configure Zeppelin so that it will automatically set notebook permissions when creating a new note? We are using shiro to authenticate via AD and are fine tuning the groups permissions in that area, but I am curious to see if there's a way to have a note set default permissions to anything other than (*) for owner, read, write. Environment: Our environment is currently on HDP-2.5.3.0-37 which houses Zeppelin release 0.6.2, we plan to upgrade to HDP 2.6+ in the coming months which houses Zeppelin release 0.7.0. Target Outcome:
When `user1` creates a note, the note permissions (available through the padlock symbol on the note page) will automatically populate to Owner: `user1`, Write: `none`, Read: `none`. Many thanks in advance to any help, Colton
... View more
Labels:
- Labels:
-
Apache Zeppelin
01-12-2017
06:59 PM
Changed the FQDN to IP and everything is flowing well now, thank you again for your help!
... View more
01-12-2017
05:12 PM
Now getting error regarding timeout waiting for metadata, any idea here or would you rather I open a new question?
... View more
01-12-2017
05:05 PM
For all searching: Make sure the user that nifi is running as has permissions to access the keytab file and if after this you are receiving a metadata timeout error, login to zkCli.sh with Kafka ticket in cache...
kinit kafka/_HOST@REALM.EXAMPLE -kt /etc/security/keytabs/kafka.service.keytab and rmr /brokers
Restart Kafka and everything should be good to go at that point.
... View more
01-12-2017
05:04 PM
So far looking good, turns out had nifi running as admin user which did not have permissions on that keytab, so far so good, will update with solved once tests confirm. Thank you!
For all searching:
Make sure the user that nifi is running as has permissions to access the keytab file... Whoops.
... View more
01-12-2017
04:33 PM
Typo, let me fix that up.
... View more
01-12-2017
04:27 PM
@kkawamura @Matt @Ali Bajwa @Bryan Bende I am continually running into the following error and would be extremely grateful for any help. Please let me know if there are any more files or configurations of which you would like to see. javax.security.auth.login.LoginException: Could not login: the client is being asked for a password, but the Kafka client code does not currently support obtaining a password from the user. not available to garner authentication information from the user
Both HDF and HDP are Kerberized and running off of the same KDC. HDF and HDP both have Kafka installed, but only the HDP host with Kafka is used in the configurations and the keytabs.I have followed the steps shown in just about every post I could find in order to relieve this issue, but still to no avail. Other pertinent config files will be listed below:
On HDF:
kafka-jaas.conf
Client {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
keyTab="/etc/security/keytabs/nifi.service.keytab"
useTicketCache=false
principal="nifi/{_HOST}@ZTEST.LOCAL"
};
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useTicketCache=false
renewTicket=true
serviceName="kafka"
useKeyTab=true
keyTab="/etc/security/keytabs/kafka1.service.keytab"
principal="kafka/{_HOST}@ZTEST.LOCAL";
};
bootstrap.conf
# Java command to use when running NiFi
java=java
# Username to use when running NiFi. This value will be ignored on Windows.
run.as=nifi
##run.as=root
# Configure where NiFi's lib and conf directories live
lib.dir=/usr/hdf/2.0.2.0-17/nifi/lib
conf.dir=/usr/hdf/2.0.2.0-17/nifi/conf
# How long to wait after telling NiFi to shutdown before explicitly killing the Process
graceful.shutdown.seconds=20
# Disable JSR 199 so that we can use JSP's without running a JDK
java.arg.1=-Dorg.apache.jasper.compiler.disablejsr199=true
# JVM memory settings
java.arg.2=-Xms512m
java.arg.3=-Xmx512m
# Enable Remote Debugging
#java.arg.debug=-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=8000
java.arg.4=-Djava.net.preferIPv4Stack=true
# allowRestrictedHeaders is required for Cluster/Node communications to work properly
java.arg.5=-Dsun.net.http.allowRestrictedHeaders=true
java.arg.6=-Djava.protocol.handler.pkgs=sun.net.www.protocol
# The G1GC is still considered experimental but has proven to be very advantageous in providing great
# performance without significant "stop-the-world" delays.
java.arg.13=-XX:+UseG1GC
#Set headless mode by default
java.arg.14=-Djava.awt.headless=true
#Ambari Metrics Collector URL - passed in to flow.xml for AmbariReportingTask
java.arg.15=-Dambari.metrics.collector.url=http://{_HOST}:6188/ws/v1/timeline/metrics
#Application ID - used in flow.xml - passed into flow.xml for AmbariReportingTask
java.arg.16=-Dambari.application.id=nifi
java.arg.17=-Djava.security.auth.login.config=/etc/nifi/kafka-jaas.conf PublishKafka_10_0 Kafka Brokers: {_HOST}:6667Security Protocol: SASL_PLAINTEXTKerberos Service Name: kafka On HDP:
kafka_jaas.conf KafkaServer {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
keyTab="/etc/security/keytabs/kafka.service.keytab"
storeKey=true
useTicketCache=false
serviceName="kafka"
principal="kafka/{_HOST}@ZTEST.LOCAL";
};
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useTicketCache=true
renewTicket=true
serviceName="kafka";
};
Client {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
keyTab="/etc/security/keytabs/kafka.service.keytab"
storeKey=true
useTicketCache=false
serviceName="zookeeper"
principal="kafka/{_HOST}@ZTEST.LOCAL";
};
Thank you in advance for any help!
**Please note all hostnames were replaced with {_HOST}Full Stacktrace from nifi-app.log:
2017-01-12 11:18:42,157 WARN [Timer-Driven Process Thread-10] o.a.n.c.t.ContinuallyRunProcessorTask
org.apache.kafka.common.KafkaException: Failed to construct kafka producer
at org.apache.kafka.clients.producer.KafkaProducer.<init>(KafkaProducer.java:335) ~[na:na]
at org.apache.kafka.clients.producer.KafkaProducer.<init>(KafkaProducer.java:163) ~[na:na]
at org.apache.nifi.processors.kafka.pubsub.PublisherPool.createLease(PublisherPool.java:61) ~[na:na]
at org.apache.nifi.processors.kafka.pubsub.PublisherPool.obtainPublisher(PublisherPool.java:56) ~[na:na]
at org.apache.nifi.processors.kafka.pubsub.PublishKafka_0_10.onTrigger(PublishKafka_0_10.java:312) ~[na:na]
at org.apache.nifi.processor.AbstractProcessor.onTrigger(AbstractProcessor.java:27) ~[nifi-api-1.0.0.2.0.2.0-17.jar:1.0.0.2.0.2.0-17]
at org.apache.nifi.controller.StandardProcessorNode.onTrigger(StandardProcessorNode.java:1064) ~[nifi-framework-core-1.0.0.2.0.2.0-17.jar:1.0.0.2.0.2.0-17]
at org.apache.nifi.controller.tasks.ContinuallyRunProcessorTask.call(ContinuallyRunProcessorTask.java:136) [nifi-framework-core-1.0.0.2.0.2.0-17.jar:1.0.0.2.0.2.0-17]
at org.apache.nifi.controller.tasks.ContinuallyRunProcessorTask.call(ContinuallyRunProcessorTask.java:47) [nifi-framework-core-1.0.0.2.0.2.0-17.jar:1.0.0.2.0.2.0-17]
at org.apache.nifi.controller.scheduling.TimerDrivenSchedulingAgent$1.run(TimerDrivenSchedulingAgent.java:132) [nifi-framework-core-1.0.0.2.0.2.0-17.jar:1.0.0.2.0.2.0-17
]
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) [na:1.8.0_77]
at java.util.concurrent.FutureTask.runAndReset(FutureTask.java:308) [na:1.8.0_77]
at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301(ScheduledThreadPoolExecutor.java:180) [na:1.8.0_77]
at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:294) [na:1.8.0_77]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [na:1.8.0_77]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [na:1.8.0_77]
at java.lang.Thread.run(Thread.java:745) [na:1.8.0_77]
Caused by: org.apache.kafka.common.KafkaException: javax.security.auth.login.LoginException: Could not login: the client is being asked for a password, but the Kafka client code does not currently support obtaining a password from the user. not available to garner authentication information from the user
at org.apache.kafka.common.network.SaslChannelBuilder.configure(SaslChannelBuilder.java:86) ~[na:na]
at org.apache.kafka.common.network.ChannelBuilders.create(ChannelBuilders.java:71) ~[na:na]
at org.apache.kafka.clients.ClientUtils.createChannelBuilder(ClientUtils.java:83) ~[na:na]
at org.apache.kafka.clients.producer.KafkaProducer.<init>(KafkaProducer.java:277) ~[na:na]
... 16 common frames omitted
Caused by: javax.security.auth.login.LoginException: Could not login: the client is being asked for a password, but the Kafka client code does not currently support obtaining a password from the user. not available to garner authentication information from the user
at com.sun.security.auth.module.Krb5LoginModule.promptForPass(Krb5LoginModule.java:940) ~[na:1.8.0_77]
at com.sun.security.auth.module.Krb5LoginModule.attemptAuthentication(Krb5LoginModule.java:760) ~[na:1.8.0_77]
at com.sun.security.auth.module.Krb5LoginModule.login(Krb5LoginModule.java:617) ~[na:1.8.0_77]
at sun.reflect.GeneratedMethodAccessor368.invoke(Unknown Source) ~[na:na]
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[na:1.8.0_77]
at java.lang.reflect.Method.invoke(Method.java:498) ~[na:1.8.0_77]
at javax.security.auth.login.LoginContext.invoke(LoginContext.java:755) ~[na:1.8.0_77]
at javax.security.auth.login.LoginContext.access$000(LoginContext.java:195) ~[na:1.8.0_77]
at javax.security.auth.login.LoginContext$4.run(LoginContext.java:682) ~[na:1.8.0_77]
at javax.security.auth.login.LoginContext$4.run(LoginContext.java:680) ~[na:1.8.0_77]
at java.security.AccessController.doPrivileged(Native Method) ~[na:1.8.0_77]
at javax.security.auth.login.LoginContext.invokePriv(LoginContext.java:680) ~[na:1.8.0_77]
at javax.security.auth.login.LoginContext.login(LoginContext.java:587) ~[na:1.8.0_77]
at org.apache.kafka.common.security.authenticator.AbstractLogin.login(AbstractLogin.java:69) ~[na:na]
at org.apache.kafka.common.security.kerberos.KerberosLogin.login(KerberosLogin.java:110) ~[na:na]
at org.apache.kafka.common.security.authenticator.LoginManager.<init>(LoginManager.java:46) ~[na:na]
at org.apache.kafka.common.security.authenticator.LoginManager.acquireLoginManager(LoginManager.java:68) ~[na:na]
at org.apache.kafka.common.network.SaslChannelBuilder.configure(SaslChannelBuilder.java:78) ~[na:na]
... 19 common frames omitted
... View more
Labels:
07-08-2016
02:21 PM
If you could kill all of those processes (if your cluster will allow) and try running again. It looks like a lot of Tez resources are busy and thus could be why you are experiencing the hanging. If not you may need to look into YARN Queue Manager
... View more
07-08-2016
12:35 AM
I am was originally using the packaged version of atlas (0.5) but have removed that and am installing 0.8 now and will try again with that version installed. The cluster is currently not kerberized as it is just an internal testing cluster.
... View more
07-07-2016
06:35 PM
So I am jumping the gun on HDP 2.5 and trying to integrate Ranger 0.6.0-SNAPSHOT into my current 2.4.2 cluster and without much luck... Has anyone else been able to successfully do this? I have ranger up and running and the policies created, just having some trouble with the Hive Plugin being recognized and the Ranger TagSync function with Atlas to work although I am getting green lights across the board.
... View more
Labels:
07-05-2016
06:18 PM
The problem could lie with not enough resources available for a Tez session to start. Could you run yarn application -list on a YARN client host and see if there are any processes hogging up resources or possibly check the configs and logs to make sure that Tez isn't just hanging and waiting for resources to become available.
... View more
06-22-2016
05:42 PM
@rmolina The webhcat response time did end up seeming to be the result of the issue. I do believe that being able to block off a certain amount of memory for WebHCat specifically ( I think this may already be available with templeton.mapper.memory.mb ), but that is just the mapper memory and I haven't looked too much farther into it. When there are no other users using the cluster, the Pig GUI view will run fine, but as that is not going to be the case for most Prod clusters that we deploy, I think that being able to set a reserve specifically in the WebHCat-Env or WebHCat-Site could prove to be useful in making sure the resources are properly allocated.
... View more
06-21-2016
12:05 PM
From what I can tell this ended up being an available resources issue. I logged back in at midnight when all users had left and everything seems to be working correctly.
Some of the time the Pig Job will say that it failed to start, but in the stderr/stdout it will show the results of the DUMP that I am trying to perform and since it was working fine in the grunt CLI, this was a very tricky problem to uncover.
... View more
06-20-2016
06:10 PM
@Rahul Pathak This is what the current listing of hadoop.proxyuser is
... View more
06-20-2016
04:49 PM
@Artem Ervits Do you have any ideas?
... View more
06-20-2016
04:25 PM
Hello All,
I will preface with, I have seen multiple questions of similar nature and have tried each of the solutions, but to no avail on my end and feel that a more in depth explanation may help others as well if they are to ever arrive at a similar issue. The pig view fails, but the Grunt> CLI runs fine, so I am thinking that it may be a PIG View configuration error. I started by researching the jira located at https://issues.apache.org/jira/browse/AMBARI-12738
I am trying to use the Pig View in Ambari 2.2.1 on HDP 2.4.2 and am running into a multitude of errors. The script that I am running is logs = LOAD 'server_logs.error_logs' USING org.apache.hive.hcatalog.pig.HCatLoader();
DUMP logs;
The job will fail with a "Job failed to start" Error which then only has a stack trace of java.net.SocketTimeoutException: Read timed out
java.net.SocketTimeoutException: Read timed out In the history logs within the view I receive the following error only File /user/admin/pig/jobs/errlogs_20-06-2016-15-11-39/stderr not found. I have tried this for user hdfs and admin the same problem remains, I have also just tried to load a file with PigStorage('|'), but that also returned me the same issue. Using both Tez and MR ExecTypes, I receive the same error. The NameNode and ResourceManager are both in High Availability mode. I have added the appropriate proxyuser configs to both the core-site and hcat-site in HDFS and Hive configurations.
I have restarted all services and Ambari-Server The stderr file is created within the /user/admin/pig/jobs/errlogs_20-06-2016-15-11-39/ directory, but does not have anything written to it. The admin/pig/ directory has full go+w 777 -R permissions, but when the stderr file is created it will only show as having 644 permissions. Against my better judgement I issued an hdfs dfs -chmod -R 777 /user command to see if it was an underlying permissions issue on a file unbeknownst to me, but that also left me with the same outcome. The Resource Manager Logs show that the application is submitted and continues to hang in the RUNNING state even after the job has been noted as "Failed to Start" through Ambari. yarn application -list shows that there are no running Applications as well. Has anyone figured out a solution to this problem? The stacktraces are not helpful, given they do not output more than 1-2 lines of information. My Pig View Cluster configuration is as follows:
... View more
Labels:
- Labels:
-
Apache Ambari
-
Apache Pig
03-07-2016
10:43 PM
1 Kudo
The problem ended up being the Ambari NiFi service instance, I used the Ambari API to delete the service from Ambari and reinstalled the packages and everything worked as planned. Thank you for all your help.
... View more
03-07-2016
02:41 PM
Including StdOut as per request: ...
2016-03-07 08:06:30,467 - Installing package snappy ('/usr/bin/yum -d 0 -e 0 -y install '--disablerepo=HDP-*' --enablerepo=HDP-2.4.0.0-169,HDP-UTILS-2.4.0.0-169 snappy')
2016-03-07 08:06:32,147 - Package['snappy-devel'] {'use_repos': ['HDP-2.4.0.0-169', 'HDP-UTILS-2.4.0.0-169'], 'skip_repos': ['HDP-*']}
2016-03-07 08:06:32,147 - Installing package snappy-devel ('/usr/bin/yum -d 0 -e 0 -y install '--disablerepo=HDP-*' --enablerepo=HDP-2.4.0.0-169,HDP-UTILS-2.4.0.0-169 snappy-devel')
2016-03-07 08:06:33,333 - Package['hadoop_2_4_*-libhdfs'] {'use_repos': ['HDP-2.4.0.0-169', 'HDP-UTILS-2.4.0.0-169'], 'skip_repos': ['HDP-*']}
2016-03-07 08:06:33,334 - Installing package hadoop_2_4_*-libhdfs ('/usr/bin/yum -d 0 -e 0 -y install '--disablerepo=HDP-*' --enablerepo=HDP-2.4.0.0-169,HDP-UTILS-2.4.0.0-169 'hadoop_2_4_*-libhdfs'')
2016-03-07 08:06:34,689 - Package['zip'] {'use_repos': ['HDP-2.4.0.0-169', 'HDP-UTILS-2.4.0.0-169'], 'skip_repos': ['HDP-*']}
2016-03-07 08:06:34,690 - Installing package zip ('/usr/bin/yum -d 0 -e 0 -y install '--disablerepo=HDP-*' --enablerepo=HDP-2.4.0.0-169,HDP-UTILS-2.4.0.0-169 zip')
2016-03-07 08:06:35,745 - Package['extjs'] {'use_repos': ['HDP-2.4.0.0-169', 'HDP-UTILS-2.4.0.0-169'], 'skip_repos': ['HDP-*']}
2016-03-07 08:06:35,746 - Installing package extjs ('/usr/bin/yum -d 0 -e 0 -y install '--disablerepo=HDP-*' --enablerepo=HDP-2.4.0.0-169,HDP-UTILS-2.4.0.0-169 extjs')
2016-03-07 08:06:36,864 - Package['oozie_2_4_*'] {'use_repos': ['HDP-2.4.0.0-169', 'HDP-UTILS-2.4.0.0-169'], 'skip_repos': ['HDP-*']}
2016-03-07 08:06:36,865 - Installing package oozie_2_4_* ('/usr/bin/yum -d 0 -e 0 -y install '--disablerepo=HDP-*' --enablerepo=HDP-2.4.0.0-169,HDP-UTILS-2.4.0.0-169 'oozie_2_4_*'')
2016-03-07 08:06:38,149 - Package['falcon_2_4_*'] {'use_repos': ['HDP-2.4.0.0-169', 'HDP-UTILS-2.4.0.0-169'], 'skip_repos': ['HDP-*']}
2016-03-07 08:06:38,149 - Installing package falcon_2_4_* ('/usr/bin/yum -d 0 -e 0 -y install '--disablerepo=HDP-*' --enablerepo=HDP-2.4.0.0-169,HDP-UTILS-2.4.0.0-169 'falcon_2_4_*'')
2016-03-07 08:06:39,233 - Package['tez_2_4_*'] {'use_repos': ['HDP-2.4.0.0-169', 'HDP-UTILS-2.4.0.0-169'], 'skip_repos': ['HDP-*']}
2016-03-07 08:06:39,234 - Installing package tez_2_4_* ('/usr/bin/yum -d 0 -e 0 -y install '--disablerepo=HDP-*' --enablerepo=HDP-2.4.0.0-169,HDP-UTILS-2.4.0.0-169 'tez_2_4_*'')
2016-03-07 08:06:40,344 - Package['flume_2_4_*'] {'use_repos': ['HDP-2.4.0.0-169', 'HDP-UTILS-2.4.0.0-169'], 'skip_repos': ['HDP-*']}
2016-03-07 08:06:40,345 - Installing package flume_2_4_* ('/usr/bin/yum -d 0 -e 0 -y install '--disablerepo=HDP-*' --enablerepo=HDP-2.4.0.0-169,HDP-UTILS-2.4.0.0-169 'flume_2_4_*'')
2016-03-07 08:06:41,570 - Package['git'] {'use_repos': ['HDP-2.4.0.0-169', 'HDP-UTILS-2.4.0.0-169'], 'skip_repos': ['HDP-*']}
2016-03-07 08:06:41,571 - Installing package git ('/usr/bin/yum -d 0 -e 0 -y install '--disablerepo=HDP-*' --enablerepo=HDP-2.4.0.0-169,HDP-UTILS-2.4.0.0-169 git')
2016-03-07 08:06:42,551 - Package['java-1.7.0-openjdk-devel'] {'use_repos': ['HDP-2.4.0.0-169', 'HDP-UTILS-2.4.0.0-169'], 'skip_repos': ['HDP-*']}
2016-03-07 08:06:42,551 - Installing package java-1.7.0-openjdk-devel ('/usr/bin/yum -d 0 -e 0 -y install '--disablerepo=HDP-*' --enablerepo=HDP-2.4.0.0-169,HDP-UTILS-2.4.0.0-169 java-1.7.0-openjdk-devel')
2016-03-07 08:06:43,608 - Package['apache-maven-3.2*'] {'use_repos': ['HDP-2.4.0.0-169', 'HDP-UTILS-2.4.0.0-169'], 'skip_repos': ['HDP-*']}
2016-03-07 08:06:43,608 - Installing package apache-maven-3.2* ('/usr/bin/yum -d 0 -e 0 -y install '--disablerepo=HDP-*' --enablerepo=HDP-2.4.0.0-169,HDP-UTILS-2.4.0.0-169 'apache-maven-3.2*'')
2016-03-07 08:06:44,706 - Package Manager failed to install packages. Error: Execution of '/usr/bin/yum -d 0 -e 0 -y install '--disablerepo=HDP-*' --enablerepo=HDP-2.4.0.0-169,HDP-UTILS-2.4.0.0-169 'apache-maven-3.2*'' returned 1. Error: Nothing to do
Traceback (most recent call last):
File "/var/lib/ambari-agent/cache/custom_actions/scripts/install_packages.py", line 376, in install_packages
skip_repos=[self.REPO_FILE_NAME_PREFIX + "*"] if OSCheck.is_redhat_family() else [])
File "/usr/lib/python2.6/site-packages/resource_management/core/base.py", line 154, in __init__
self.env.run()
File "/usr/lib/python2.6/site-packages/resource_management/core/environment.py", line 158, in run
self.run_action(resource, action)
File "/usr/lib/python2.6/site-packages/resource_management/core/environment.py", line 121, in run_action
provider_action()
File "/usr/lib/python2.6/site-packages/resource_management/core/providers/package/__init__.py", line 49, in action_install
self.install_package(package_name, self.resource.use_repos, self.resource.skip_repos)
File "/usr/lib/python2.6/site-packages/resource_management/core/providers/package/yumrpm.py", line 49, in install_package
shell.checked_call(cmd, sudo=True, logoutput=self.get_logoutput())
File "/usr/lib/python2.6/site-packages/resource_management/core/shell.py", line 70, in inner
result = function(command, **kwargs)
File "/usr/lib/python2.6/site-packages/resource_management/core/shell.py", line 92, in checked_call
tries=tries, try_sleep=try_sleep)
File "/usr/lib/python2.6/site-packages/resource_management/core/shell.py", line 140, in _call_wrapper
result = _call(command, **kwargs_copy)
File "/usr/lib/python2.6/site-packages/resource_management/core/shell.py", line 291, in _call
raise Fail(err_msg)
Fail: Execution of '/usr/bin/yum -d 0 -e 0 -y install '--disablerepo=HDP-*' --enablerepo=HDP-2.4.0.0-169,HDP-UTILS-2.4.0.0-169 'apache-maven-3.2*'' returned 1. Error: Nothing to do
2016-03-07 08:06:45,247 - Installation of packages failed. Checking if installation was partially complete
2016-03-07 08:06:45,247 - Old versions: ['2.3.4.0-3485', '2.4.0.0-169']
2016-03-07 08:06:45,270 - New versions: ['2.3.4.0-3485', '2.4.0.0-169']
2016-03-07 08:06:45,434 - Deltas: set([])
... View more
03-07-2016
02:38 PM
I am really just confused as to why Ambari is trying to install an apache maven package. I have Apache Maven 3.2.5 installed and "mvn -v" confirms that
... View more
03-07-2016
01:47 PM
I used the file given under my OS and tried yum install ambari-server and yum upgrade server but both responded with "Package ambari-server-2.2.1.0-161.x86_64 already installed and latest version"
... View more