You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

747 lines
23 KiB

# Version 8.2.3
# DO NOT EDIT THIS FILE!
# Changes to default files will be lost on update and are difficult to
# manage and support.
#
# Please make any changes to system defaults by overriding them in
# apps or $SPLUNK_HOME/etc/system/local
# (See "Configuration file precedence" in the web documentation).
#
# To override a specific setting, copy the name of the stanza and
# setting to the file where you wish to override it.
#
# This file contains possible attributes and values to configure SSL
# and HTTP server options.
#
[general]
serverName=$HOSTNAME
sessionTimeout=1h
pass4SymmKey = changeme
pass4SymmKey_minLength = 12
# The following 'allowRemoteLogin' setting controls remote management of your splunk instance.
# - If set to 'always', all remote logins are allowed.
# - If set to 'never', only local logins to splunkd will be allowed. Note that this will still allow
# remote management through splunkweb if splunkweb is on the same server.
# - If set to 'requireSetPassword' (default behavior):
# 1. In the free license, remote login is disabled.
# 2. In the pro license, remote login is only disabled for the admin user that has not changed their default password
allowRemoteLogin=requireSetPassword
tar_format=gnutar
access_logging_for_phonehome=true
hangup_after_phonehome=false
listenOnIPv6 = no
connectUsingIpVersion = auto
useHTTPServerCompression = true
useHTTPClientCompression = true
defaultHTTPServerCompressionLevel = 6
skipHTTPCompressionAcl = 127.0.0.1 ::1
parallelIngestionPipelines = 1
pipelineSetSelectionPolicy = round_robin
pipelineSetWeightsUpdatePeriod = 30
pipelineSetNumTrackingPeriods = 5
pipelineSetChannelSetCacheSize = 12
instanceType = download
numThreadsForIndexInitExecutor = 16
cleanRemoteStorageByDefault = false
legacyCiphers = decryptOnly
decommission_search_jobs_wait_secs = 0
python.version = python3
regex_cache_hiwater = 2500
[cascading_replication]
max_replication_threads = auto
max_replication_jobs = 5
cascade_replication_plan_reap_interval = 1h
cascade_replication_plan_age = 8h
cascade_replication_plan_fanout = auto
cascade_replication_plan_topology = size_balanced
cascade_replication_plan_select_policy = random
pass4SymmKey_minLength = 12
[sslConfig]
enableSplunkdSSL = true
useClientSSLCompression = false
useSplunkdClientSSLCompression = true
# enableSplunkSearchSSL has been moved to web.conf/[settings]/enableSplunkWebSSL
# SSL settings
# The following provides modern TLS configuration. This configuration drops support
# for old Splunk versions (Splunk 5.x and earlier).
# To add support for Splunk 5.x:
# - set sslVersions & sslVersionsForClient to tls
# - and add AES256-SHA to the cipherSuite
# The following non-forward-secrecy ciphers were added to support the kv store:
# AES256-GCM-SHA384:AES128-GCM-SHA256:AES128-SHA256.
sslVersions = tls1.2
sslVersionsForClient = tls1.2
cipherSuite = ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDH-ECDSA-AES256-GCM-SHA384:ECDH-ECDSA-AES128-GCM-SHA256:ECDH-ECDSA-AES128-SHA256:AES256-GCM-SHA384:AES128-GCM-SHA256:AES128-SHA256
ecdhCurves = prime256v1, secp384r1, secp521r1
sendStrictTransportSecurityHeader = false
allowSslCompression = true
allowSslRenegotiation = true
serverCert = $SPLUNK_HOME/etc/auth/server.pem
sslPassword = password
caCertFile = $SPLUNK_HOME/etc/auth/cacert.pem
certCreateScript = $SPLUNK_HOME/bin/splunk, createssl, server-cert
# DEPRECATED
caPath = $SPLUNK_HOME/etc/auth
[httpServer]
# defines the stylesheet relative URL to apply to default Atom feeds;
# set to 'none' to not write out xsl-stylesheet directive
atomFeedStylesheet = /static/atom.xsl
max-age = 3600
follow-symlinks = false
# reject web accesses over 2GB in length
max_content_length = 2147483648
# When HTTP client streams data to HTTP server, server will timeout write operation after
# streamInWriteTimeout seconds if it cannot make write progress.
streamInWriteTimeout = 5
acceptFrom = *
# Automatically tune these limits:
maxThreads = 0
maxSockets = 0
forceHttp10 = auto
crossOriginSharingPolicy =
crossOriginSharingHeaders =
x_frame_options_sameorigin = true
allowBasicAuth = true
basicAuthRealm = /splunk
allowCookieAuth = true
cookieAuthHttpOnly = true
cookieAuthSecure = true
cookieSameSiteSecure = false
allowEmbedTokenAuth = true
dedicatedIoThreads = auto
keepAliveIdleTimeout = 7200
busyKeepAliveIdleTimeout = 12
[mimetype-extension-map]
gif = image/gif
html = text/html
htm = text/html
jpg = image/jpg
png = image/png
txt = text/plain
xml = text/xml
xsl = text/xml
[applicationsManagement]
allowInternetAccess = true
url = https://apps.splunk.com/api/apps
loginUrl = https://apps.splunk.com/api/account:login/
detailsUrl = https://apps.splunk.com/apps/id
updateHost = https://apps.splunk.com
updatePath = /api/apps:resolve/checkforupgrade
updateTimeout = 24h
caCertFile = $SPLUNK_HOME/etc/auth/appsCA.pem
sslVerifyServerCert = true
sslCommonNameToCheck = apps.splunk.com, cdn.apps.splunk.com
sslAltNameToCheck = splunkbase.splunk.com, apps.splunk.com, cdn.apps.splunk.com
# The following provides modern TLS configuration that guarantees forward-
# secrecy and efficiency. This configuration drops support for old Splunk
# versions (e.g. Splunk 5.x).
# To add support for Splunk 5.x set sslVersions to tls and add this to the
# end of cipherSuite:
# DHE-RSA-AES256-SHA:AES256-SHA:DHE-RSA-AES128-SHA:AES128-SHA
# and this, in case Diffie Hellman is not configured:
# AES256-SHA:AES128-SHA
sslVersions = tls1.2
cipherSuite = ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256
ecdhCurves = prime256v1, secp384r1, secp521r1
# disk usage processor settings
[diskUsage]
minFreeSpace = 5000
pollingFrequency = 100000
pollingTimerFrequency = 10
[diag]
# don't capture local auth information in troubleshooting files
EXCLUDE-auth = */etc/auth/*
# don't capture the index files for lookups either (big! unlikely to help)
EXCLUDE-lookup-indexes = */etc/*/lookups/*.tsidx
# don't capture ops.json for now, until we add password hash redaction.
EXCLUDE-opsjson = */etc/system/replication/ops.json
upload_proto_host_port = https://api.splunk.com
#######
# Search string redaction. These defaults are an unavoidably incomplete
# (best-effort) Splunk diag attempt to avoid capturing sensitive information
# present in search queries. This applies to situations where people enter
# field values or search terms interactively, or where they drill down into a
# table, dataset, pivot entry etc. to filter on specific values.
# To ensure sensitive data in your environment that can occur in search queries
# will not be present in Splunk diag output, you can add pattern-based
# filtering for those terms or values.
# Note that Splunk diag tries hard not to capture event text in general, by
# avoiding capture of search results, lookup files, and certain types of
# diagnostics of index files by default.
# If you find yourself wanting to add an additional pattern, be sure to match
# only the bytes relevant to your data, not any additional characters. Each
# match "consumes" a portion of the search string, so additional matched bytes
# could prevent other matches from operating.
# Rough catchall for larger number strings with separators which are
# 1: More likely to be an identifier than a simple larger number
# eg. no : 32424234242342342423424234233
# yes: 2334-243-24234-43-234-423-342
# 2: Unlikely to be numbers that are needed for troubleshooting, like limit=5000000
# 3: Probably not IP addresses, or similar pretty useful information that isn't
# typically PII (personally identifying information)
SEARCHFILTERSIMPLE-pii = \b[-_\d]{2,}\d{3,}[-_]\d{3,}[-_\d]{2,}\b
# US social security numbers fit a well-known format and predate common
# practices for automatic validation/verification
SEARCHFILTERSIMPLE-socsec = \b\d{3}[-. ]\d{2}[-. ]\d{4}\b
# Payment card numbers as displayed for human readability may contain embedded
# dashes or spaces in them, though have many different clusterings of numbers
# across the separators internationally. Probably most payment card data does
# not arrive in Splunk indexes at all, but when it does, it is usually a single
# number and will be caught by bignum, following.
SEARCHFILTERLUHN-paycard = \b(?:\d{4}[- ]){3}\d{3,4}\b
# Any significantly large string of only numbers which satisfies the Luhn
# algorithm is *probably* a financial number, though unfortunately the
# false-positive rate will be 10%. This may lead to requests for unredacted
# snippets in some cases.
SEARCHFILTERLUHN-bignum = \b(?:\d{13,})\b
[applicense]
appLicenseHostPort = apps-api.splunk.com:443
appLicenseServerPath = /splunklicensevalidation/api/licenses/validations
caCertFile = $SPLUNK_HOME/etc/auth/appsLicenseCA.pem
cipherSuite = TLSv1.2+HIGH:@STRENGTH
sslVersions = tls1.2
sslVerifyServerCert = true
sslCommonNameToCheck = apps-api.splunk.com
sslAltNameToCheck = apps-api.splunk.com
disabled = true
#
# default license configuration
# by default, this node is a master that has a single
# slave (itself) and a single pool based on the single
# free stack that alots 100% to itself
#
[license]
master_uri = self
# these timeouts only matter if you have a master_uri set to remote master
connection_timeout = 30
send_timeout = 30
receive_timeout = 30
squash_threshold = 2000
report_interval = 1m
strict_pool_quota = true
[queue]
maxSize = 500KB
# look back time in minutes
cntr_1_lookback_time = 60s
cntr_2_lookback_time = 600s
cntr_3_lookback_time = 900s
# sampling interval is the same for all the counters of a particular queue
# and defaults to 1 sec
sampling_interval = 1s
[queue=fschangemanager_queue]
maxSize = 5MB
cntr_1_lookback_time = 60s
cntr_2_lookback_time = 600s
cntr_3_lookback_time = 900s
# sampling frequency is the same for all the counters of a particular queue
# and defaults to 1 sec
sampling_interval = 1s
[queue=AQ]
maxSize = 10MB
# look back time in minutes
cntr_1_lookback_time = 60s
cntr_2_lookback_time = 600s
cntr_3_lookback_time = 900s
# sampling frequency is the same for all the counters of a particular queue
# and defaults to 1 sec
sampling_interval = 1s
[queue=WEVT]
maxSize = 5MB
# look back time in minutes
cntr_1_lookback_time = 60s
cntr_2_lookback_time = 600s
cntr_3_lookback_time = 900s
# sampling frequency is the same for all the counters of a particular queue
# and defaults to 1 sec
sampling_interval = 1s
[queue=aggQueue]
maxSize = 1MB
# look back time in minutes
cntr_1_lookback_time = 60s
cntr_2_lookback_time = 600s
cntr_3_lookback_time = 900s
# sampling frequency is the same for all the counters of a particular queue
# and defaults to 1 sec
sampling_interval = 1s
[queue=parsingQueue]
maxSize = 6MB
# look back time in minutes
cntr_1_lookback_time = 60s
cntr_2_lookback_time = 600s
cntr_3_lookback_time = 900s
# sampling frequency is the same for all the counters of a particular queue
# and defaults to 1 sec
sampling_interval = 1s
[queue=remoteOutputQueue]
maxSize = 10MB
[queue=vixQueue]
maxSize = 8MB
[clustering]
mode = disabled
pass4SymmKey =
register_replication_address =
register_forwarder_address =
register_search_address =
executor_workers = 10
manual_detention = off
summary_replication = false
allowed_hbmiss_count = 3
pass4SymmKey_minLength = 12
# lowlevel timeouts for intra-cluster communication
cxn_timeout = 60
send_timeout = 60
rcv_timeout = 60
# replication channel timeouts
rep_cxn_timeout = 60
rep_send_timeout = 60
rep_rcv_timeout = 60
rep_max_send_timeout = 180
rep_max_rcv_timeout = 180
# only valid for mode=manager
service_interval = 0
max_fixup_time_ms = 1000
replication_factor = 3
search_factor = 2
heartbeat_timeout = 60
restart_timeout = 60
streaming_replication_wait_secs = 60
quiet_period = 60
reporting_delay_period = 30
max_peer_build_load = 2
max_peer_rep_load = 5
max_peer_sum_rep_load = 5
searchable_targets = true
searchable_target_sync_timeout = 60
target_wait_time = 150
summary_wait_time = 660
commit_retry_time = 300
percent_peers_to_restart = 10
percent_peers_to_reload = 100
max_peers_to_download_bundle = 5
precompress_cluster_bundle = true
multisite = false
site_replication_factor = origin:2, total:3
site_search_factor = origin:1, total:2
available_sites =
site_mappings =
constrain_singlesite_buckets=true
access_logging_for_heartbeats=false
auto_rebalance_primaries = true
rebalance_primaries_execution_limit_ms = 0
commit_generation_execution_limit_ms = 0
idle_connections_pool_size = -1
use_batch_mask_changes = true
service_jobs_msec = 100
rebalance_threshold = 0.90
max_auto_service_interval = 1
service_execution_threshold_ms = 1500
buckets_to_summarize = primaries
maintenance_mode = false
backup_and_restore_primaries_in_maintenance = false
max_primary_backups_per_service = 10
searchable_rolling_peer_state_delay_interval = 60
searchable_rolling_site_down_policy = half
allow_default_empty_p4symmkey = true
decommission_force_finish_idle_time = 0
rolling_restart = restart
searchable_rebalance = false
rebalance_pipeline_batch_size = 60
rebalance_primary_failover_timeout = 75
rebalance_newgen_propagation_timeout = 60
rebalance_search_completion_timeout = 180
deferred_cluster_status_update = true
assign_primaries_to_all_sites = false
log_bucket_during_addpeer = false
enable_primary_fixup_during_maintenance = true
freeze_during_maintenance = false
bucketsize_mismatch_strategy = largest
max_concurrent_peers_joining = 10
rolling_restart_condition = batch_adding
enable_parallel_add_peer = true
primary_src_persist_secs = 604800
#only valid for mode=manager or mode=searchhead
generation_poll_interval = 5
# only needed for mode=peer or mode=searchhead
manager_uri =
# only needed for mode=peer
heartbeat_period = 1
notify_scan_period = 10
notify_buckets_period = 10
enableS2SHeartbeat = true
s2sHeartbeatTimeout = 600
throwOnBucketBuildReadError = false
max_replication_errors = 3
search_files_retry_timeout = 600
re_add_on_bucket_request_error = false
decommission_search_jobs_wait_secs = 180
notify_scan_min_period = 10
summary_update_batch_size = 10
summary_registration_batch_size = 1000
decommission_node_force_timeout = 300
buckets_per_addpeer = 1000
max_nonhot_rep_kBps = 0
warm_bucket_replication_pre_upload = false
recreate_bucket_max_per_service = 20000
bucketsize_upload_preference = largest
upload_rectifier_timeout_secs = 2
[introspection:generator:disk_objects]
disabled = true
[introspection:generator:disk_objects__summaries]
collectionPeriodInSecs = 1800
[introspection:generator:disk_objects__fishbucket]
disabled = false
[introspection:generator:disk_objects__bundle_replication]
disabled = false
[introspection:generator:resource_usage]
disabled = true
[introspection:generator:resource_usage__iostats]
disabled = true
[introspection:generator:resource_usage__iowait]
disabled = true
[introspection:generator:kvstore]
disabled = true
[introspection:distributed-indexes]
disabled = true
collectionPeriodInSecs = 3600
[shclustering]
disabled = true
register_replication_address =
executor_workers = 10
adhoc_searchhead = false
no_artifact_replications = false
precompress_artifacts = true
captain_is_adhoc_searchhead = false
async_replicate_on_proxy = true
preferred_captain = true
prevent_out_of_sync_captain = true
pass4SymmKey_minLength = 12
manual_detention = off
master_dump_service_periods = 500
scheduling_heuristic = scheduler_load_based
long_running_jobs_poll_period = 600
election_timeout_ms = 60000
election_timeout_2_hb_ratio = 12
raft_rpc_backoff_time_ms = 5000
# lowlevel timeouts for intra-cluster communication
cxn_timeout = 60
send_timeout = 60
rcv_timeout = 60
# lowlevel timeouts for intra-cluster communication for the raft protocol
cxn_timeout_raft = 2
send_timeout_raft = 5
rcv_timeout_raft = 5
log_heartbeat_append_entries = false
# replication channel timeouts
rep_cxn_timeout = 60
rep_send_timeout = 60
rep_rcv_timeout = 60
rep_max_send_timeout = 600
rep_max_rcv_timeout = 600
# only valid for mode=manager
replication_factor = 3
heartbeat_timeout = 60
restart_timeout = 600
quiet_period = 60
max_peer_rep_load = 5
target_wait_time = 150
percent_peers_to_restart = 10
rolling_restart_with_captaincy_exchange = true
access_logging_for_heartbeats=false
rolling_restart = restart
decommission_search_jobs_wait_secs = 180
# only needed for mode=peer
heartbeat_period = 5
enableS2SHeartbeat = true
s2sHeartbeatTimeout = 600
#proxying related
sid_proxying = true
ss_proxying = true
ra_proxying = true
alert_proxying = true
csv_journal_rows_per_hb = 10000
#
# Replicate changes to UI- and search-related configurations.
#
conf_replication_period = 5
conf_replication_max_pull_count = 1000
conf_replication_max_push_count = 100
conf_replication_max_json_value_size = 15MB
conf_replication_include.alert_actions = true
conf_replication_include.authentication = true
conf_replication_include.authorize = true
conf_replication_include.collections = true
conf_replication_include.commands = true
conf_replication_include.datamodels = true
conf_replication_include.event_renderers = true
conf_replication_include.eventtypes = true
conf_replication_include.fields = true
conf_replication_include.global-banner = true
conf_replication_include.health = true
conf_replication_include.history = false
conf_replication_include.html = true
conf_replication_include.limits = true
conf_replication_include.literals = true
conf_replication_include.lookups = true
conf_replication_include.macros = true
conf_replication_include.manager = true
conf_replication_include.models = true
conf_replication_include.multikv = true
conf_replication_include.nav = true
conf_replication_include.panels = true
conf_replication_include.passwd = true
conf_replication_include.passwords = true
conf_replication_include.props = true
conf_replication_include.savedsearches = true
conf_replication_include.searchbnf = true
conf_replication_include.searchscripts = true
conf_replication_include.segmenters = true
conf_replication_include.tags = true
conf_replication_include.telemetry = true
conf_replication_include.tos = true
conf_replication_include.times = true
conf_replication_include.transforms = true
conf_replication_include.transactiontypes = true
conf_replication_include.ui-prefs = true
conf_replication_include.ui-tour = true
conf_replication_include.user-prefs = true
conf_replication_include.views = true
conf_replication_include.viewstates = true
conf_replication_include.workflow_actions = true
conf_replication_include.workload_pools = true
conf_replication_include.workload_rules = true
conf_replication_include.workload_policy = true
conf_replication_include.metric_rollups = true
conf_replication_include.metric_alerts = true
# Whitelists and blacklists for configuration replication summaries.
conf_replication_summary.whitelist.refine.local = (system|(apps/*)|users(/_reserved)?/*/*)/(local/...|metadata/local.meta)
conf_replication_summary.whitelist.passwd = passwd
conf_replication_summary.whitelist.lookups = (system|(apps/*)|users(/_reserved)?/*/*)/lookups/*
conf_replication_summary.whitelist.repo = system/replication/*.json
conf_replication_summary.blacklist.lookup_index = (system|(apps/*)|users(/_reserved)?/*/*)/lookups/*.(tmp$|index($|/...))
conf_replication_summary.concerning_file_size = 50
conf_replication_summary.period = 1m
conf_replication_purge.eligibile_count = 20000
conf_replication_purge.eligibile_age = 1d
conf_replication_purge.period = 1h
conf_replication_find_baseline.use_bloomfilter_only = false
#
# Deploy configurations to search head cluster members.
#
conf_deploy_repository = $SPLUNK_HOME/etc/shcluster
conf_deploy_staging = $SPLUNK_HOME/var/run/splunk/deploy
conf_deploy_concerning_file_size = 50
conf_deploy_precompress_bundles = true
conf_deploy_fetch_url =
conf_deploy_fetch_mode = replace
artifact_status_fields = user, eai:acl.app , label
encrypt_fields = "server: :sslKeysfilePassword", "server: :sslPassword", "server: :pass4SymmKey", "server: :password", "outputs:tcpout:sslPassword", "outputs:tcpout:socksPassword","outputs:indexer_discovery:pass4SymmKey", "outputs:tcpout:token", "inputs:SSL:password", "inputs:SSL:sslPassword", "inputs:http:sslPassword", "inputs:http:sslKeysfilePassword", "inputs:splunktcptoken:token", "alert_actions:email:auth_password", "app:credential:password", "app:credential:sslPassword", "passwords:credential:password", "passwords:credential:sslPassword", "authentication: :bindDNpassword", "authentication: :sslKeysfilePassword", "authentication: :attributeQuerySoapPassword", "authentication: :scriptSecureArguments", "authentication: :sslPassword", "authentication: :accessKey", "web:settings:privKeyPassword", "web:settings:sslPassword", "server:indexer_discovery:pass4SymmKey", "server:clustermanager:pass4SymmKey", "server:dmc:pass4SymmKey", "server:kvstore:sslKeysPassword", "indexes: :remote.s3.access_key", "indexes: :remote.s3.secret_key", "indexes: :remote.s3.kms.key_id", "server:scs:kvservice.principal.client.secret", "federated: :password"
enable_jobs_data_lite = true
retry_autosummarize_or_data_model_acceleration_jobs = true
deployerPushThreads = 1
[kvstore]
disabled = false
port = 8191
replicaset = splunkrs
storageEngine=mmapv1
storageEngineMigration = false
shutdownTimeout = 100
initAttempts = 300
initialSyncMaxFetcherRestarts = 0
delayShutdownOnBackupRestoreInProgress = false
oplogSize = 1000
dbPath = $SPLUNK_DB/kvstore
replicationWriteTimeout = 1800
clientConnectionTimeout = 10
clientSocketTimeout = 300
percRAMForCache = 15
clientConnectionPoolSize = 500
[cachemanager]
eviction_policy = lru
eviction_padding = 5120
max_cache_size = 0
hotlist_recency_secs = 86400
hotlist_bloom_filter_recency_hours = 360
evict_on_stable = false
batch_registration = true
[imds]
imds_version = v1
#
# Raft statemachine stanza
#
[raft_statemachine]
disabled = true
replicate_search_peers = false
[stderr_log_rotation]
# 10 million bytes, or \"short\" megabytes
maxFileSize = 10000000
BackupIndex = 2
checkFrequency = 10
[stdout_log_rotation]
# 10 million bytes, or \"short\" megabytes
maxFileSize = 10000000
BackupIndex = 2
checkFrequency = 10
[prometheus]
disabled = true
# Watchdog configuration
[watchdog]
disabled = false
responseTimeout = 8
actions =
actionsInterval = 1
pstacksEndpoint = true
usePreloadedPstacks = true
[watchdog:timeouts]
reaperThread = 30
[watchdogaction:pstacks]
dumpAllThreads = true
stacksBufferSizeOrder = 14
maxStacksPerBlock = 60
batchStacksThreshold = auto
[watchdogaction:script]
path = ""
useShell = false
forceStop = false
forceStopOnShutdown = true
[dfs]
disabled = true
dfc_ip_address =
port = 9000
extra_kryo_registered_classes =
spark_home =
spark_master_host = 127.0.0.1
spark_master_webui_port = 8080
spark_master_connect_timeout = 10
connection_timeout = 180
connection_retries = 5
[node_auth]
signatureVersion = v1,v2
[federated_search]
disabled=false
[app_backup]
backup_path = $SPLUNK_HOME/var/backup
[config_change_audit]
disabled = true
mode=auto
[distributed_leases]
sslVerifyServerCert = false
disabled = true