You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
197 lines
5.0 KiB
197 lines
5.0 KiB
# Version 8.2.3
|
|
# CAUTION: Do not alter the settings in limits.conf unless you know what you are doing.
|
|
# Improperly configured limits may result in splunkd crashes and/or memory overuse.
|
|
|
|
|
|
[searchresults]
|
|
maxresultrows = 50000
|
|
# maximum number of times to try in the atomic write operation (1 = no retries)
|
|
tocsv_maxretry = 5
|
|
# retry period is 1/2 second (500 milliseconds)
|
|
tocsv_retryperiod_ms = 500
|
|
|
|
[subsearch]
|
|
# maximum number of results to return from a subsearch
|
|
maxout = 100
|
|
# maximum number of seconds to run a subsearch before finalizing
|
|
maxtime = 10
|
|
# time to cache a given subsearch's results
|
|
ttl = 300
|
|
|
|
[anomalousvalue]
|
|
maxresultrows = 50000
|
|
# maximum number of distinct values for a field
|
|
maxvalues = 100000
|
|
# maximum size in bytes of any single value (truncated to this size if larger)
|
|
maxvaluesize = 1000
|
|
|
|
[associate]
|
|
maxfields = 10000
|
|
maxvalues = 10000
|
|
maxvaluesize = 1000
|
|
|
|
# for the contingency, ctable, and counttable commands
|
|
[ctable]
|
|
maxvalues = 1000
|
|
|
|
[correlate]
|
|
maxfields = 1000
|
|
|
|
# for bin/bucket/discretize
|
|
[discretize]
|
|
maxbins = 50000
|
|
# if maxbins not specified or = 0, defaults to searchresults::maxresultrows
|
|
|
|
[inputcsv]
|
|
# maximum number of retries for creating a tmp directory (with random name in
|
|
# SPLUNK_HOME/var/run/splunk)
|
|
mkdir_max_retries = 100
|
|
|
|
[kmeans]
|
|
maxdatapoints = 100000000
|
|
|
|
[kv]
|
|
# when non-zero, the point at which kv should stop creating new columns
|
|
maxcols = 512
|
|
|
|
[rare]
|
|
maxresultrows = 50000
|
|
# maximum distinct value vectors to keep track of
|
|
maxvalues = 100000
|
|
maxvaluesize = 1000
|
|
|
|
[restapi]
|
|
# maximum result rows to be returned by /events or /results getters from REST
|
|
# API
|
|
maxresultrows = 50000
|
|
|
|
[search]
|
|
# how long searches should be stored on disk once completed
|
|
ttl = 86400
|
|
|
|
# the approximate maximum number of timeline buckets to maintain
|
|
status_buckets = 300
|
|
|
|
# the last accessible event in a call that takes a base and bounds
|
|
max_count = 10000
|
|
|
|
# the minimum length of a prefix before a * to ask the index about
|
|
min_prefix_len = 1
|
|
|
|
# the length of time to persist search cache entries (in seconds)
|
|
cache_ttl = 300
|
|
|
|
# By default, we will not retry searches in the event of indexer
|
|
# failures with indexer clustering enabled.
|
|
# Hence, the default value for search_retry here is false.
|
|
search_retry = false
|
|
|
|
# Timeout value for checking search marker files like hotbucketmarker or backfill
|
|
# marker.
|
|
check_search_marker_done_interval = 60
|
|
|
|
# Time interval of sleeping between subsequent search marker files checks.
|
|
check_search_marker_sleep_interval = 1
|
|
|
|
|
|
# If number of cpu's in your machine is 14 then total system wide number of
|
|
# concurrent searches this machine can handle is 20.
|
|
# which is base_max_searches + max_searches_per_cpu x num_cpus = 6 + 14 x 1 = 20
|
|
base_max_searches = 6
|
|
max_searches_per_cpu = 1
|
|
|
|
|
|
[scheduler]
|
|
|
|
# Percent of total concurrent searches that will be used by scheduler is
|
|
# total concurrency x max_searches_perc = 20 x 60% = 12 scheduled searches
|
|
# User default value (needed only if different from system/default value) when
|
|
# no max_searches_perc.<n>.when (if any) below matches.
|
|
max_searches_perc = 60
|
|
|
|
# Increase the value between midnight-5AM.
|
|
max_searches_perc.0 = 75
|
|
max_searches_perc.0.when = * 0-5 * * *
|
|
|
|
# More specifically, increase it even more on weekends.
|
|
max_searches_perc.1 = 85
|
|
max_searches_perc.1.when = * 0-5 * * 0,6
|
|
|
|
# Maximum number of concurrent searches is enforced cluster-wide by the
|
|
# captain for scheduled searches. For a 3 node SHC total concurrent
|
|
# searches = 3 x 20 = 60. The total searches (adhoc + scheduled) = 60, then
|
|
# no more scheduled searches can start until some slots are free.
|
|
shc_syswide_quota_enforcement = true
|
|
|
|
[slc]
|
|
# maximum number of clusters to create
|
|
maxclusters = 10000
|
|
|
|
[findkeywords]
|
|
#events to use in findkeywords command (and patterns UI)
|
|
maxevents = 50000
|
|
|
|
[stats]
|
|
maxresultrows = 50000
|
|
maxvalues = 10000
|
|
maxvaluesize = 1000
|
|
|
|
[top]
|
|
maxresultrows = 50000
|
|
# maximum distinct value vectors to keep track of
|
|
maxvalues = 100000
|
|
maxvaluesize = 1000
|
|
|
|
[search_optimization]
|
|
enabled = true
|
|
|
|
[search_optimization::predicate_split]
|
|
enabled = true
|
|
|
|
[search_optimization::predicate_push]
|
|
enabled = true
|
|
|
|
[search_optimization::predicate_merge]
|
|
enabled = true
|
|
inputlookup_merge = true
|
|
merge_to_base_search = true
|
|
|
|
[search_optimization::projection_elimination]
|
|
enabled = true
|
|
cmds_black_list = eval, rename
|
|
|
|
[search_optimization::search_flip_normalization]
|
|
enabled = true
|
|
|
|
[search_optimization::reverse_calculated_fields]
|
|
enabled = true
|
|
|
|
[search_optimization::search_sort_normalization]
|
|
enabled = true
|
|
|
|
[search_optimization::replace_table_with_fields]
|
|
enabled = true
|
|
|
|
[search_optimization::replace_stats_cmds_with_tstats]
|
|
enabled = false
|
|
detect_search_time_field_collisions = true
|
|
|
|
[search_optimization::replace_datamodel_stats_cmds_with_tstats]
|
|
enabled = true
|
|
|
|
[search_optimization::dfs_job_extractor]
|
|
enabled = true
|
|
|
|
[dfs]
|
|
dfc_control_port = 17000
|
|
dfc_num_slots = 4
|
|
dfs_max_num_keepalives = 10
|
|
dfs_max_reduce_partition_size = 150000
|
|
dfs_max_search_result_size = 1000000
|
|
dfw_num_slots = 10
|
|
dfw_num_slots_enabled = true
|
|
dfw_receiving_data_port = 17500
|
|
dfs_max_num_keepalives = 10
|
|
dfw_receiving_data_port_count = 0
|
|
|