########################################################################### # Settings to configure your Hadoop cluster. ########################################################################### [hadoop] # Configuration for HDFS NameNode # ------------------------------------------------------------------------ [[hdfs_clusters]] [[[default]]] # Enter the filesystem uri fs_defaultfs=hdfs://namenode.asotc.com:8020 # Use WebHdfs/HttpFs as the communication mechanism. To fallback to # using the Thrift plugin (used in Hue 1.x), this must be uncommented # and explicitly set to the empty value. webhdfs_url=http://namenode.asotc.com:50070/webhdfs/v1/ ######webhdfs_url=http://namenode.asotc.com:50070/webhdfs/v1/ ## security_enabled=true # Default umask for file and directory creation, specified in an octal value. ## umask=022 [[yarn_clusters]] [[[default]]] # Whether to submit jobs to this cluster submit_to=true ## security_enabled=false # Resource Manager logical name (required for HA) ## logical_name= # URL of the ResourceManager webapp address (yarn.resourcemanager.webapp.address) resourcemanager_api_url=http://namenode.asotc.com:8088 # URL of Yarn RPC adress (yarn.resourcemanager.address) # URL of the ResourceManager webapp address (yarn.resourcemanager.webapp.address) resourcemanager_api_url=http://namenode.asotc.com:8088 # URL of Yarn RPC adress (yarn.resourcemanager.address) resourcemanager_rpc_url=http://namenode.asotc.com:8050 # URL of the ProxyServer API proxy_api_url=http://namenode.asotc.com:8088 # URL of the HistoryServer API history_server_api_url=http://ambari.asotc.com:19888 # URL of the AppTimelineServer API app_timeline_server_api_url=http://snode.asotc.com:8188 # URL of the NodeManager API node_manager_api_url=http://d01.asotc.com:8042 # HA support by specifying multiple clusters # e.g. # [[[ha]]] # Enter the host on which you are running the failover Resource Manager #resourcemanager_api_url=http://failover-host:8088 #logical_name=failover #submit_to=True ########################################################################### # Settings to configure liboozie ########################################################################### [liboozie] # The URL where the Oozie service runs on. This is required in order for # users to submit jobs. oozie_url=http://snode.asotc.com:11000/oozie ## security_enabled=true # Location on HDFS where the workflows/coordinator are deployed when submitted. ########################################################################### # Settings to configure the Oozie app ########################################################################### [oozie] # Location on local FS where the examples are stored. ## local_data_dir=..../examples # Location on local FS where the data for the examples is stored. ## sample_data_dir=...thirdparty/sample_data # Location on HDFS where the oozie examples and workflows are stored. ## remote_data_dir=/user/hue/oozie/workspaces # Share workflows and coordinators information with all users. If set to false, # they will be visible only to the owner and administrators. ## share_jobs=true # Maximum of Oozie workflows or coodinators to retrieve in one API call. ## oozie_jobs_count=100 # Comma separated list of parameters which should be obfuscated in Oozie job configuration. ## oozie_obfuscate_params=password,pwd # Maximum count of actions of Oozie coodinators to be shown on the one page. ## oozie_job_actions_count=50 ########################################################################### # Settings to configure Beeswax ########################################################################### [beeswax] # Host where Hive server Thrift daemon is running. # If Kerberos security is enabled, use fully-qualified domain name (FQDN). hive_server_host=ambari.asotc.com # Port where HiveServer2 Thrift server runs on. hive_server_port=10000 # Hive configuration directory, where hive-site.xml is located ## hive_conf_dir=/etc/hive/conf # Timeout in seconds for thrift calls to Hive service ## server_conn_timeout=120 # Set a LIMIT clause when browsing a partitioned table. # A positive value will be set as the LIMIT. If 0 or negative, do not set any limit. ## browse_partitioned_table_limit=250 # A limit to the number of rows that can be downloaded from a query. # A value of -1 means there will be no limit. # A maximum of 65,000 is applied to XLS downloads. ## download_row_limit=1000000 # Hue will try to close the Hive query when the user leaves the editor page. # This will free all the query resources in HiveServer2, but also make its results inaccessible. ## close_queries=false # Option to show execution engine choice. ## show_execution_engine=False # "Go to column pop up on query result page. Set to false to disable" ## go_to_column=true [[ssl]] [[ssl]] # SSL communication enabled for this server. ## enabled=false # Path to Certificate Authority certificates. ## cacerts=/etc/hue/cacerts.pem # Path to the private key file. ## key=/etc/hue/key.pem # Path to the public certificate file. ## cert=/etc/hue/cert.pem # Choose whether Hue should validate certificates received from the server. ## validate=true ########################################################################### # Settings to configure Job Designer ########################################################################### [jobsub] # Location on HDFS where the jobsub examples and templates are stored. ## remote_data_dir=/user/hue/jobsub # Location on local FS where examples and template are stored. ## local_data_dir=..../data # Location on local FS where sample data is stored ## sample_data_dir=...thirdparty/sample_data ########################################################################### # Settings to configure Job Browser ########################################################################### [jobbrowser] # Share submitted jobs information with all users. If set to false, # submitted jobs are visible only to the owner and administrators. ## share_jobs=true ########################################################################### # Settings for the User Admin application ########################################################################### [useradmin] # The name of the default user group that users will be a member of default_user_group=hadoop default_username=hue default_user_password=1111 [hcatalog] templeton_url=http://ambari.asotc.com:50111/templeton/v1/ security_enabled=false [about] tutorials_installed=false [pig] udf_path="/tmp/udfs"