parent
2c95c98ca7
commit
a35a64fb57
@ -0,0 +1,12 @@
|
||||
# Version 10.0.2
|
||||
# Splunk app configuration file
|
||||
|
||||
[install]
|
||||
state = enabled
|
||||
|
||||
[ui]
|
||||
is_visible = false
|
||||
label = Clones Internal Metrics into Metrics Index
|
||||
|
||||
[launcher]
|
||||
description = Clones, converts and stores various internal splunk logs into _metrics index
|
||||
@ -0,0 +1,58 @@
|
||||
#
|
||||
# Clones all metrics.log events into a metric_log sourcetype. The metrics_log
|
||||
# sourcetype does the necessary conversion from log to metric event, and redirects
|
||||
# the events to the _metrics index. See accompanying transforms.conf in this
|
||||
# app for details on transformation
|
||||
###
|
||||
|
||||
[source::.../var/log/splunk/metrics.log(.\d+)?]
|
||||
TRANSFORMS-metricslogclone = metrics_log_clone
|
||||
|
||||
[source::.../var/log/introspection/disk_objects.log(.\d+)?]
|
||||
TRANSFORMS-diskobjectsclone = introspection_disk_objects_log_clone
|
||||
|
||||
[source::.../var/log/introspection/resource_usage.log(.\d+)?]
|
||||
TRANSFORMS-resourceusageclone = introspection_resource_usage_log_clone
|
||||
|
||||
[splunk_metrics_log]
|
||||
TRANSFORMS-metricslog = metrics_index_redirect,metrics_field_extraction,metrics_log_metric_name
|
||||
METRIC-SCHEMA-TRANSFORMS = metric-schema:metrics_dot_log
|
||||
|
||||
[splunk_intro_disk_objects]
|
||||
TRANSFORMS-blah = metrics_index_redirect,introspection_disk_objects_metric_name
|
||||
METRIC-SCHEMA-TRANSFORMS = metric-schema:introspection_disk_objects
|
||||
|
||||
[splunk_intro_resource_usage]
|
||||
TRANSFORMS-bloo = metrics_index_redirect,introspection_resource_usage_metric_name
|
||||
METRIC-SCHEMA-TRANSFORMS = metric-schema:introspection_resource_usage
|
||||
|
||||
#Log to metrics source types for CSV, JSON, KeyValue
|
||||
[log2metrics_csv]
|
||||
DATETIME_CONFIG =
|
||||
INDEXED_EXTRACTIONS = csv
|
||||
LINE_BREAKER = ([\r\n]+)
|
||||
METRIC-SCHEMA-TRANSFORMS = metric-schema:log2metrics_default_csv
|
||||
NO_BINARY_CHECK = true
|
||||
category = Log to Metrics
|
||||
pulldown_type = 1
|
||||
description = Comma-separated value format. Log-to-metrics processing converts the numeric values in csv events into metric data points.
|
||||
|
||||
[log2metrics_json]
|
||||
DATETIME_CONFIG =
|
||||
INDEXED_EXTRACTIONS = json
|
||||
LINE_BREAKER = ([\r\n]+)
|
||||
METRIC-SCHEMA-TRANSFORMS = metric-schema:log2metrics_default_json
|
||||
NO_BINARY_CHECK = true
|
||||
category = Log to Metrics
|
||||
pulldown_type = 1
|
||||
description = JSON-formatted data. Log-to-metrics processing converts the numeric values in json keys into metric data points.
|
||||
|
||||
[log2metrics_keyvalue]
|
||||
DATETIME_CONFIG =
|
||||
LINE_BREAKER = ([\r\n]+)
|
||||
METRIC-SCHEMA-TRANSFORMS = metric-schema:log2metrics_default_keyvalue
|
||||
NO_BINARY_CHECK = true
|
||||
TRANSFORMS-EXTRACT = metrics_field_extraction
|
||||
category = Log to Metrics
|
||||
pulldown_type = 1
|
||||
description = '<key>=<value>' formatted data. Log-to-metrics processing converts the keys with numeric values into metric data points.
|
||||
@ -0,0 +1,135 @@
|
||||
|
||||
###
|
||||
# redirects data into into the _metrics index
|
||||
###
|
||||
[metrics_index_redirect]
|
||||
REGEX = .
|
||||
DEST_KEY = _MetaData:Index
|
||||
FORMAT = _metrics
|
||||
|
||||
|
||||
###
|
||||
# sets the metric_name 'prefix' for this event. The
|
||||
# metric schema processor will append the actual measure names to
|
||||
# this prefix for the final metric_name for the event
|
||||
###
|
||||
[metrics_log_metric_name]
|
||||
INGEST_EVAL = metric_name="spl.mlog.".case(\
|
||||
group IN ( "subtask_seconds",\
|
||||
"subtask_counts",\
|
||||
"search_concurrency",\
|
||||
"deploy-server",\
|
||||
"thruput",\
|
||||
"jobs",\
|
||||
"search_health_metrics",\
|
||||
"tailingprocessor" ) AND isnotnull(name),\
|
||||
group.".".name,\
|
||||
group == "pipeline" AND name == "indexerpipe" AND processor == "indexer",\
|
||||
group.".".processor,\
|
||||
group == "pipeline" AND processor == "regexreplacement",\
|
||||
group.".".processor.".".name,\
|
||||
group == "dutycycle" AND name == "misc",\
|
||||
group.".".name,\
|
||||
group == "search_concurrency" AND isnotnull(user),\
|
||||
group.".per_user",\
|
||||
group == "bundles_downloads" AND isnotnull(baseline_count),\
|
||||
group.".baseline",\
|
||||
group == "bundles_downloads" AND isnotnull(delta_count),\
|
||||
group.".delta",\
|
||||
isnotnull(group), group,\
|
||||
1 == 1, "nullgroup"\
|
||||
)
|
||||
|
||||
###
|
||||
# This transforms simply clones the event as is, updates it's sourcetype
|
||||
# to metrics_log. This event will then undergo the transforms associated
|
||||
# with the metrics_log sourcetype
|
||||
###
|
||||
[metrics_log_clone]
|
||||
SOURCE_KEY = MetaData:Source
|
||||
REGEX = (.)
|
||||
CLONE_SOURCETYPE = splunk_metrics_log
|
||||
|
||||
###
|
||||
# This metric schema stanza will define a schema to convert the metrics.log
|
||||
# event into a metric data point.
|
||||
###
|
||||
[metric-schema:metrics_dot_log]
|
||||
METRIC-SCHEMA-MEASURES = _NUMS_EXCEPT_ destPort,sourcePort
|
||||
|
||||
|
||||
###
|
||||
# start transforms for introspection logs
|
||||
#
|
||||
# Follows similar pattern to metric.log case above. We clone
|
||||
# the events, create a metric_name prefix and send them through
|
||||
# our metric schema processor to form them into valid
|
||||
# metric store event.
|
||||
###
|
||||
[introspection_disk_objects_log_clone]
|
||||
SOURCE_KEY = MetaData:Source
|
||||
REGEX = (.)
|
||||
CLONE_SOURCETYPE = splunk_intro_disk_objects
|
||||
|
||||
[introspection_resource_usage_log_clone]
|
||||
SOURCE_KEY = MetaData:Source
|
||||
REGEX = (.)
|
||||
CLONE_SOURCETYPE = splunk_intro_resource_usage
|
||||
|
||||
|
||||
###
|
||||
# sets the metric_name prefix for this introspection
|
||||
# events
|
||||
###
|
||||
[introspection_disk_objects_metric_name]
|
||||
INGEST_EVAL = metric_name = "spl.intr.disk_objects"\
|
||||
.case(\
|
||||
isnotnull( component ), ".".component\
|
||||
, 1==1, ""\
|
||||
)
|
||||
|
||||
[introspection_resource_usage_metric_name]
|
||||
INGEST_EVAL = metric_name = "spl.intr.resource_usage"\
|
||||
.case(\
|
||||
isnotnull( component ), ".".component,\
|
||||
1==1, ""\
|
||||
)
|
||||
|
||||
#
|
||||
# NOTE: we choose to blacklist max_size, which is reported as
|
||||
# part of the Volumes component, to be a dimension since it
|
||||
# can take on the value: "Infinite" AND it is wasteful to
|
||||
# store it as a measure as it won't change a lot anyways
|
||||
###
|
||||
[metric-schema:introspection_disk_objects]
|
||||
METRIC-SCHEMA-MEASURES = _NUMS_EXCEPT_ *data.top_*
|
||||
METRIC-SCHEMA-BLACKLIST-DIMS = datetime, *data.top_*, *data.args
|
||||
|
||||
|
||||
|
||||
#
|
||||
# NOTE: we choose to blacklist 'interval' since this will
|
||||
# mostly be a fixed value, no need to take up space for it
|
||||
# as a measure
|
||||
#
|
||||
# 'datetime' is blacklisted as a dimension since it is
|
||||
# high cardinality and redundant w/ _time field
|
||||
###
|
||||
[metric-schema:introspection_resource_usage]
|
||||
METRIC-SCHEMA-MEASURES = _NUMS_EXCEPT_ *data.splunk_version, *data.pid, *data.ppid, *data.interval
|
||||
METRIC-SCHEMA-BLACKLIST-DIMS = datetime
|
||||
|
||||
[metric-schema:log2metrics_default_csv]
|
||||
METRIC-SCHEMA-MEASURES = _ALLNUMS_
|
||||
[metric-schema:log2metrics_default_keyvalue]
|
||||
METRIC-SCHEMA-MEASURES = _ALLNUMS_
|
||||
[metric-schema:log2metrics_default_json]
|
||||
METRIC-SCHEMA-MEASURES = _ALLNUMS_
|
||||
|
||||
[metrics_field_extraction]
|
||||
# Ensure that "-" character is last in the value character class
|
||||
# so it is not incorrectly interpreted as a range.
|
||||
REGEX = ([a-zA-Z0-9_\.]+)=\"?([a-zA-Z0-9_\.:-]+)
|
||||
FORMAT = $1::$2
|
||||
REPEAT_MATCH = true
|
||||
WRITE_META = true
|
||||
@ -0,0 +1,3 @@
|
||||
# Application-level permissions
|
||||
[]
|
||||
access = read : [ * ], write : [ admin ]
|
||||
Loading…
Reference in new issue