<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>question Could Not able to Launch Apache Atlas ( External hbase-solr) in Support Questions</title>
    <link>https://community.cloudera.com/t5/Support-Questions/Could-Not-able-to-Launch-Apache-Atlas-External-hbase-solr/m-p/209062#M171019</link>
    <description>&lt;P&gt;I have followed &lt;A href="http://atlas.apache.org/InstallationSteps.html" target="_blank"&gt;http://atlas.apache.org/InstallationSteps.html&lt;/A&gt; to setup atlas (with external hbase-solr). &lt;/P&gt;&lt;P&gt;After starting the &lt;STRONG&gt;./atlas_start.py&lt;/STRONG&gt;, There were no more logs generated after &lt;STRONG&gt;"AuditFilter Initialization Started".&lt;/STRONG&gt;&lt;/P&gt;&lt;P&gt;&lt;STRONG&gt;application log:&lt;/STRONG&gt;&lt;/P&gt;&lt;PRE&gt;017-11-24 18:09:41,153 INFO  - [main:] ~ GraphTransaction intercept for org.apache.atlas.discovery.EntityDiscoveryService.searchUsingDslQuery (GraphTransactionAdvisor$1:41)
2017-11-24 18:09:41,153 INFO  - [main:] ~ GraphTransaction intercept for org.apache.atlas.discovery.EntityDiscoveryService.searchUsingFullTextQuery (GraphTransactionAdvisor$1:41)
2017-11-24 18:09:41,153 INFO  - [main:] ~ GraphTransaction intercept for org.apache.atlas.discovery.EntityDiscoveryService.searchUsingBasicQuery (GraphTransactionAdvisor$1:41)
2017-11-24 18:09:41,176 INFO  - [main:] ~ GraphTransaction intercept for org.apache.atlas.discovery.EntityLineageService.getSchemaForHiveTableByGuid (GraphTransactionAdvisor$1:41)
2017-11-24 18:09:41,227 INFO  - [main:] ~ Starting service org.apache.atlas.web.service.ActiveInstanceElectorService (Services:53)
2017-11-24 18:09:41,227 INFO  - [main:] ~ HA is not enabled, no need to start leader election service (ActiveInstanceElectorService:96)
2017-11-24 18:09:41,228 INFO  - [main:] ~ Starting service org.apache.atlas.kafka.KafkaNotification (Services:53)
2017-11-24 18:09:41,228 INFO  - [main:] ~ Starting service org.apache.atlas.notification.NotificationHookConsumer (Services:53)
2017-11-24 18:09:41,228 INFO  - [main:] ~ HA is disabled, starting consumers inline. (NotificationHookConsumer:143)
2017-11-24 18:09:41,228 INFO  - [main:] ~ Consumer property: atlas.kafka.enable.auto.commit: null (KafkaNotification:275)
2017-11-24 18:09:41,346 WARN  - [main:] ~ The configuration hook.group.id = atlas was supplied but isn't a known config. (AbstractConfig:186)
2017-11-24 18:09:41,346 WARN  - [main:] ~ The configuration data = /home/ec2-user/sankar/atlas/distro/target/apache-atlas-1.0.0-SNAPSHOT-bin/apache-atlas-1.0.0-SNAPSHOT/data/kafka was supplied but isn't a known config. (AbstractConfig:186)
2017-11-24 18:09:41,347 WARN  - [main:] ~ The configuration zookeeper.connection.timeout.ms = 200 was supplied but isn't a known config. (AbstractConfig:186)
2017-11-24 18:09:41,347 WARN  - [main:] ~ The configuration key.serializer = org.apache.kafka.common.serialization.StringSerializer was supplied but isn't a known config. (AbstractConfig:186)
2017-11-24 18:09:41,347 WARN  - [main:] ~ The configuration zookeeper.session.timeout.ms = 400 was supplied but isn't a known config. (AbstractConfig:186)
2017-11-24 18:09:41,347 WARN  - [main:] ~ The configuration value.serializer = org.apache.kafka.common.serialization.StringSerializer was supplied but isn't a known config. (AbstractConfig:186)
2017-11-24 18:09:41,347 WARN  - [main:] ~ The configuration zookeeper.connect = 10.115.80.165:2181,10.115.80.168:2181,10.115.80.97:2181 was supplied but isn't a known config. (AbstractConfig:186)
2017-11-24 18:09:41,347 WARN  - [main:] ~ The configuration zookeeper.sync.time.ms = 20 was supplied but isn't a known config. (AbstractConfig:186)
2017-11-24 18:09:41,348 WARN  - [main:] ~ The configuration poll.timeout.ms = 1000 was supplied but isn't a known config. (AbstractConfig:186)
2017-11-24 18:09:41,416 INFO  - [main:] ~ Starting service org.apache.atlas.repository.audit.HBaseBasedAuditRepository (Services:53)
2017-11-24 18:09:41,417 INFO  - [NotificationHookConsumer thread-0:] ~ [atlas-hook-consumer-thread], Starting  (Logging$class:68)
2017-11-24 18:09:41,418 INFO  - [NotificationHookConsumer thread-0:] ~ ==&amp;gt; HookConsumer doWork() (NotificationHookConsumer$HookConsumer:305)
2017-11-24 18:09:41,418 INFO  - [NotificationHookConsumer thread-0:] ~ Atlas Server is ready, can start reading Kafka events. (NotificationHookConsumer$HookConsumer:508)
2017-11-24 18:09:41,437 INFO  - [main:] ~ HA is disabled. Hence creating table on startup. (HBaseBasedAuditRepository:384)
2017-11-24 18:09:41,438 INFO  - [main:] ~ Checking if table apache_atlas_entity_audit exists (HBaseBasedAuditRepository:343)
2017-11-24 18:09:41,447 INFO  - [main:] ~ Table apache_atlas_entity_audit exists (HBaseBasedAuditRepository:355)
2017-11-24 18:09:41,835 INFO  - [main:] ~ AuditFilter initialization started (AuditFilter:57)&lt;/PRE&gt;&lt;P&gt;config File:&lt;/P&gt;&lt;PRE&gt;# Graph Database
#Configures the graph database to use.  Defaults to JanusGraph 0.1.1
#atlas.graphdb.backend=org.apache.atlas.repository.graphdb.janus.AtlasJanusGraphDatabase
# Graph Storage
atlas.graph.storage.backend=hbase
atlas.graph.storage.hbase.table=apache_atlas_titan
#Hbase
#For standalone mode , specify localhost
#for distributed mode, specify zookeeper quorum here - For more information refer &lt;A href="http://s3.thinkaurelius.com/docs/titan/current/hbase.html#_remote_server_mode_2" target="_blank"&gt;http://s3.thinkaurelius.com/docs/titan/current/hbase.html#_remote_server_mode_2&lt;/A&gt;
atlas.graph.storage.hostname=10.115.80.165,10.115.80.168,10.115.80.97
atlas.graph.storage.hbase.regions-per-server=1
atlas.graph.storage.lock.wait-time=10000
# Gremlin Query Optimizer
#
# Enables rewriting gremlin queries to maximize performance. This flag is provided as
# a possible way to work around any defects that are found in the optimizer until they
# are resolved.
#atlas.query.gremlinOptimizerEnabled=true
# Delete handler
#
# This allows the default behavior of doing "soft" deletes to be changed.
#
# Allowed Values:
# org.apache.atlas.repository.graph.SoftDeleteHandler - all deletes are "soft" deletes
# org.apache.atlas.repository.graph.HardDeleteHandler - all deletes are "hard" deletes
#
#atlas.DeleteHandler.impl=org.apache.atlas.repository.graph.SoftDeleteHandler
# Entity audit repository
#
# This allows the default behavior of logging entity changes to hbase to be changed.
#
# Allowed Values:
# org.apache.atlas.repository.audit.HBaseBasedAuditRepository - log entity changes to hbase
# org.apache.atlas.repository.audit.NoopEntityAuditRepository - disable the audit repository
#
#atlas.EntityAuditRepository.impl=org.apache.atlas.repository.audit.NoopEntityAuditRepository
#org.apache.atlas.repository.audit.HBaseBasedAuditRepository
# Graph Search Index
atlas.graph.index.search.backend=solr
#Solr
#Solr cloud mode properties
atlas.graph.index.search.solr.mode=cloud
atlas.graph.index.search.solr.zookeeper-url=10.115.80.165:2181,10.115.80.168:2181,10.115.80.97:2181
atlas.graph.index.search.solr.zookeeper-connect-timeout=60000
atlas.graph.index.search.solr.zookeeper-session-timeout=60000
#Solr http mode properties
atlas.graph.index.search.solr.mode=http
atlas.graph.index.search.solr.http-urls=http://localhost:8983/solr
# Solr-specific configuration property
atlas.graph.index.search.max-result-set-size=150
#########  Notification Configs  #########
atlas.notification.embedded=false
atlas.kafka.data=${sys:atlas.home}/data/kafka
atlas.kafka.zookeeper.connect=10.115.80.165:2181,10.115.80.168:2181,10.115.80.97:2181
#localhost:9026
atlas.kafka.bootstrap.servers=10.115.80.165:9092
atlas.kafka.zookeeper.session.timeout.ms=400
atlas.kafka.zookeeper.connection.timeout.ms=200
atlas.kafka.zookeeper.sync.time.ms=20
atlas.kafka.auto.commit.interval.ms=1000
atlas.kafka.hook.group.id=atlas
atlas.kafka.enable.auto.commit=false
atlas.kafka.auto.offset.reset=earliest
atlas.kafka.session.timeout.ms=30000
atlas.kafka.poll.timeout.ms=1000
atlas.notification.create.topics=true
atlas.notification.replicas=1
atlas.notification.topics=ATLAS_HOOK,ATLAS_ENTITIES
atlas.notification.log.failed.messages=true
atlas.notification.consumer.retry.interval=500
atlas.notification.hook.retry.interval=1000
# Enable for Kerberized Kafka clusters
#atlas.notification.kafka.service.principal=kafka/_HOST@EXAMPLE.COM
#atlas.notification.kafka.keytab.location=/etc/security/keytabs/kafka.service.keytab
#########  Hive Lineage Configs  #########
## Schema
atlas.lineage.schema.query.hive_table=hive_table where __guid='%s'\, columns
atlas.lineage.schema.query.Table=Table where __guid='%s'\, columns
## Server port configuration
#atlas.server.http.port=21000
#atlas.server.https.port=21443
#########  Security Properties  #########
# SSL config
atlas.enableTLS=false
#truststore.file=/path/to/truststore.jks
#cert.stores.credential.provider.path=jceks://file/path/to/credentialstore.jceks
#########  Server Properties  #########
atlas.rest.address=http://10.115.80.165:21000
# If enabled and set to true, this will run setup steps when the server starts
#atlas.server.run.setup.on.start=false
#########  Entity Audit Configs  #########
atlas.audit.hbase.tablename=apache_atlas_entity_audit
atlas.audit.zookeeper.session.timeout.ms=1000
atlas.audit.hbase.zookeeper.quorum=10.115.80.165:2181,10.115.80.168:2181,10.115.80.97:2181&lt;/PRE&gt;&lt;P&gt;I tried to hit the application url : ( say, &lt;A href="http://localhost:21000)" target="_blank"&gt;http://localhost:21000)&lt;/A&gt;. Its throwing error.&lt;/P&gt;&lt;PRE&gt;wget &lt;A href="http://10.115.80.165:21000" target="_blank"&gt;http://10.115.80.165:21000&lt;/A&gt; --no-proxy 
--2017-11-24 17:54:05--  &lt;A href="http://10.x.x.x:21000/" target="_blank"&gt;http://10.x.x.x:21000/&lt;/A&gt; Connecting to 10.x.x.x:21000... connected. 
HTTP request sent, awaiting response... No data received.
Retrying.
&lt;/PRE&gt;&lt;P&gt;I have validated the port being used by &lt;/P&gt;&lt;PRE&gt;netstat -tunlp | grep 21000 
tcp        0      0 0.0.0.0:21000           0.0.0.0:*               LISTEN      35415/java&lt;/PRE&gt;&lt;P&gt;I have no Idea how to proceed.... &lt;/P&gt;</description>
    <pubDate>Sat, 25 Nov 2017 04:04:51 GMT</pubDate>
    <dc:creator>ssankarau</dc:creator>
    <dc:date>2017-11-25T04:04:51Z</dc:date>
  </channel>
</rss>

