You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1696 lines
99 KiB

# Copyright (C) 2005-2024 Splunk Inc. All Rights Reserved.
###############################################
# ITOA Alert Search Macros
###############################################
# Used when Fill Data Gaps option is set to Last Available Value, for Aggregate KPI.
[fill_aggregate_gaps(1)]
args = kpi_key
definition = `get_service_aggregate_cached_alert_value($kpi_key$)` | eval alert_value=coalesce(alert_value, cached_alert_value), kpi_id="$kpi_key$", entity_title="service_aggregate" | `write_latest_results_to_kpi_cache` | eval is_service_aggregate="1", is_entity_defined="0", entity_key="service_aggregate" | `gettime`
# Used to convert raw data into the service aggregate (no entity breakdown)
[aggregate_raw_into_service(2)]
args = aggregate_statop, threshold_field
definition = stats $aggregate_statop$($threshold_field$) AS alert_value | eval is_service_aggregate="1", is_entity_defined="0", entity_key="service_aggregate", entity_title="service_aggregate" | `gettime`
# Used to add related fields in metric based KPI alert search (no entity breakdown)
[decorate_aggregate_fields]
definition = eval is_service_aggregate="1", is_entity_defined="0", entity_key="service_aggregate", entity_title="service_aggregate" | `gettime`
# Used to convert raw data into the service aggregate.
# Uses KPI Summary Cache to override service aggregate data gap for KPI, with last reported KPI alert_value.
# Updates KPI Summary Cache with latest reported service aggregate result, if it is not a data gap.
[aggregate_raw_into_service_using_kpi_cache(3)]
args = aggregate_statop, threshold_field, kpi_key
definition = stats $aggregate_statop$($threshold_field$) AS alert_value | `fill_aggregate_gaps($kpi_key$)`
# Get last reported service aggregate result from KPI Summary Cache.
[get_service_aggregate_cached_alert_value(1)]
args = kpi_key
definition = inputlookup append=true itsi_kpi_alert_value_cache where (kpi_id="$kpi_key$" AND entity_title="service_aggregate") | eval mod_time=if(isnull(mod_time), now(), mod_time) | stats first(alert_value) as alert_value first(entity_title) as entity_title first(kpi_id) as kpi_id first(cached_alert_value) as cached_alert_value max(mod_time) as mod_time
# Write latest non-gap KPI results to KPI Summary Cache
[write_latest_results_to_kpi_cache]
args =
definition = eval cached_alert_value=if((isnull(alert_value) AND isnull(cached_alert_value)), "N/A", coalesce(alert_value, cached_alert_value)) | fields - alert_value | eval kpi_lookup_key=kpi_id."_".entity_title | outputlookup itsi_kpi_alert_value_cache append=true key_field=kpi_lookup_key | fields - kpi_id, kpi_lookup_key, mod_time | rename cached_alert_value AS alert_value
# Used when Fill Data Gaps option is set to Last Available Value, for Entity Level KPI.
[fill_entity_gaps(2)]
args = entity_field, kpi_key
definition = `get_entities_cached_alert_value("$entity_field$", $kpi_key$)` | eval alert_value=coalesce(alert_value, cached_alert_value), entity_split_field="$entity_field$", kpi_id="$kpi_key$" | rename $entity_field$ as entity_title | `write_latest_results_to_kpi_cache` | rename entity_title as $entity_field$ | fields - entity_split_field | `gettime`
# Extracted out basic search since it's used in multiple places. Inserted and used
# in most of the KPI aggregation searches
[aggregate_raw_data_into_entity(3)]
args = entity_statop, threshold_field, entity_field
definition = stats $entity_statop$($threshold_field$) AS alert_value by $entity_field$
# Used to convert raw data into the per entity alert_values
# From changes in ITSI-16491, entity_field can be multiple fields that are comma separated
# e.g. entity_field == host,moid,vcenter
[aggregate_raw_into_entity(3)]
args = entity_statop, threshold_field, entity_field
definition = `aggregate_raw_data_into_entity($entity_statop$,$threshold_field$, "$entity_field$")` | `gettime`
# Used to convert raw data into the per entity alert_values
# Uses KPI Summary Cache to override entity data gap, with last reported entity alert_value for KPI.
# Updates KPI Summary Cache with latest reported entity results, if entity results are not data gaps.
[aggregate_raw_into_entity_using_kpi_cache(4)]
args = entity_statop, threshold_field, entity_field, kpi_key
definition = `aggregate_raw_data_into_entity($entity_statop$,$threshold_field$, "$entity_field$")` | `fill_entity_gaps("$entity_field$", $kpi_key$)`
# Get last reported entity results from KPI Summary Cache.
[get_entities_cached_alert_value(2)]
args = entity_field, kpi_key
definition = inputlookup append=t itsi_kpi_alert_value_cache where (kpi_id="$kpi_key$" AND entity_split_field="$entity_field$" AND entity_title!="service_aggregate") | eval $entity_field$=coalesce($entity_field$, entity_title), mod_time=if(isnull(mod_time), now(), mod_time) | fields - entity_title, kpi_id | stats first(entity_split_field) as entity_split_field first(alert_value) as alert_value first(cached_alert_value) as cached_alert_value max(mod_time) as mod_time by $entity_field$
# Gets last reported entity results from KPI Summary Cache. Only to be used with compound entity aliases
# Assumption 1: Before this macro is called, a field "compound_alias" that is a concatenation
# of the compound alias fields with a : separating the fields will be created
# Assumption 2: The entity breakdown fields will be sorted alphabetically (aField, bField, cField)
[get_compound_alias_entities_cached_alert_values(2)]
args = entity_fields, kpi_key
definition = inputlookup append=t itsi_kpi_alert_value_cache where (kpi_id="$kpi_key$" AND entity_split_field="$entity_fields$" AND entity_title!="service_aggregate") | eval mod_time=if(isnull(mod_time), now(), mod_time)
# Aggregates the data from KPI summary cache into the KPI raw data
# Assumption 1: Before this macro is called, a field "compound_alias" that is a concatenation
# of the compound alias fields with a : separating the fields will be created
# Assumption 2: Before this macro is called, the various compound entity alias fields are coalesced
# and assigned a value -- there will be no entry in the table that contains a blank cell
[aggregate_cached_compound_alias_entity_alert_values(1)]
args = entity_fields
definition = fields - kpi_id | stats first(entity_split_field) as entity_split_field first(alert_value) as alert_value first(cached_alert_value) as cached_alert_value max(mod_time) as mod_time first(compound_alias) as compound_alias by $entity_fields$
# Used when Fill Data Gaps option is set to Last Available Value, for Entity Level KPI.
[fill_compound_alias_entity_gaps(2)]
args = entity_fields, kpi_key
definition = eval alert_value=coalesce(alert_value, cached_alert_value), entity_split_field="$entity_fields$", kpi_id="$kpi_key$" | fields - $entity_fields$ | `write_latest_results_to_kpi_cache` | fields - entity_split_field | `gettime`
# Used to check if object is in maintenance
[is_object_in_maintenance(2)]
args = object_type, object_key_field
definition = eval maintenance_object_type = "$object_type$", maintenance_object_key = $object_key_field$ | lookup operative_maintenance_log maintenance_object_type, maintenance_object_key OUTPUT _key as maintenance_log_key | eval in_maintenance = if(IsNull(maintenance_log_key), 0, 1) | fields - maintenance_object_key, maintenance_object_type, maintenance_log_key
# process searched entities, primarily for internal use only
# assumes lookup itsi_entities was invoked prior to this macro
[normalize_matched_entities(1)]
args = entity_field
definition = `is_object_in_maintenance("entity", entity_key)` | eval is_entity_defined=if(isnull(entity_key), "0", "1"), entity_key=if(isnull(entity_key), "N/A", entity_key), entity_title=coalesce(entity_title, '$entity_field$'), is_service_aggregate="0", is_entity_in_maintenance = in_maintenance | fields - $entity_field$, in_maintenance, entity_sec_grp
# process searched entities, primarily for internal use only
# assumes lookup itsi_entities was invoked prior to this macro
# Also assumes that compound_pseudo_entity was correctly evaluated before this macro
# was invoked
[normalize_matched_compound_entities(1)]
args = entity_field
definition = `is_object_in_maintenance("entity", entity_key)` | eval is_entity_defined=if(isnull(entity_key), "0", "1"), entity_key=if(isnull(entity_key), "N/A", entity_key), entity_title=coalesce(entity_title, compound_pseudo_entity), is_service_aggregate="0", is_entity_in_maintenance = in_maintenance | fields - $entity_field$, in_maintenance, entity_sec_grp, compound_pseudo_entity
# Used to augment search results for entities
[itsi_match_entities(2)]
args = entity_field, sec_grp_field
definition = eval itsi_identifier_lookup="$entity_field$=" + '$entity_field$' | lookup itsi_entities _itsi_identifier_lookups as itsi_identifier_lookup OUTPUT _key as entity_key, title as entity_title, services._key as serviceid, sec_grp as $sec_grp_field$, retired as retired | where not retired=1 | fields - itsi_identifier_lookup
# Used to augment search results for entities. Used specifically when user splits by compound alias fields
[itsi_match_multiple_entity_fields(3)]
args = entity_fields, sec_grp_field, entity_identifiers
definition = foreach $entity_fields$ [ eval itsi_identifier_<<FIELD>>="<<FIELD>>=" + <<FIELD>>] | lookup itsi_entities $entity_identifiers$ OUTPUT _key as entity_key, title as entity_title, services._key as serviceid, sec_grp as $sec_grp_field$, retired as retired | where not retired=1 | fields - itsi_identifier_*
# Used to augment search results for entities (through a compound alias) by security group that entities belong to
[match_compound_entities(3)]
args = entity_fields, sec_grp_field, entity_identifiers
definition = `itsi_match_multiple_entity_fields("$entity_fields$", $sec_grp_field$, "$entity_identifiers$")` | `normalize_matched_compound_entities("$entity_fields$")`
# Used to augment search results for entities
[match_entities(1)]
args = entity_field
definition = `itsi_match_entities($entity_field$, entity_sec_grp)` | `normalize_matched_entities($entity_field$)`
# Used to augment search results for entities by security group that entities belong to
[match_entities(2)]
args = entity_field, sec_grp_field
definition = `itsi_match_entities($entity_field$, $sec_grp_field$)` | `normalize_matched_entities("$entity_field$")`
# Used to augment search results for filter entities by security group that entities belong to
# Specifically, used to add serviceid for each entity result
# Used by shared base search when entity filter and entity breakdown fields are different
[match_filter_entites(2)]
args = entity_filter_field, sec_grp_field
definition = eval itsi_identifier_lookup="$entity_filter_field$=" + '$entity_filter_field$' | lookup itsi_entities _itsi_identifier_lookups as itsi_identifier_lookup OUTPUT services._key as serviceid | fields - itsi_identifier_lookup
# Used to augment search results for breakdown entities by security group that entities belong to
# Used by shared base search when entity filter and entity breakdown fields are different
[match_breakdown_entities(2)]
args = entity_breakdown_field, sec_grp_field
definition = eval itsi_identifier_lookup="$entity_breakdown_field$=" + '$entity_breakdown_field$' | lookup itsi_entities _itsi_identifier_lookups as itsi_identifier_lookup OUTPUT title as entity_title, _key as entity_key, sec_grp as entity_sec_grp | `normalize_matched_entities($entity_breakdown_field$)`
# Used to augment search results for breakdown entities by security group that entities belong to
# Used by shared base search when entity filter and entity breakdown fields are different
# and the shared base search is split by a compound entity alias
[match_breakdown_compound_alias_entities(3)]
args = entity_breakdown_field, sec_grp_field, entity_identifiers
definition = foreach $entity_breakdown_field$ [ eval itsi_identifier_<<FIELD>>="<<FIELD>>=" + <<FIELD>>] | lookup itsi_entities $entity_identifiers$ OUTPUT title as entity_title, _key as entity_key, sec_grp as entity_sec_grp | fields - itsi_identifier_* | `normalize_matched_compound_entities("$entity_breakdown_field$")`
# Used to filter out entities when they are in maintenance
# Primarily consumed by notable events to filter out correlation search events that have maintenance entities associated
# This macro assumes that match_entities macro was used earlier which populated is_entity_in_maintenance field
[filter_maintenance_entities]
args =
definition = where IsNull(is_entity_in_maintenance) OR (is_entity_in_maintenance != 1)
# Given a bunch of service ids filter out events that have all the services in maintenance
# This is primarily used by notable events to not generate events when services associated with a correlation search
# are all in maintenance
[filter_maintenance_services(1)]
args = service_ids
definition = eval service_ids = "$service_ids$" | makemv delim="," service_ids | mvexpand service_ids | `is_object_in_maintenance("service", service_ids)` | where IsNull(in_maintenance) OR (in_maintenance != 1) | fields - in_maintenance | mvcombine service_ids | fields - service_ids
# Used to create an aggregate event from the entity breakdown events
[aggregate_entity_into_service(1)]
args = service_statop
definition = appendpipe [stats $service_statop$(alert_value) AS alert_value by serviceid, is_entity_in_maintenance | sort 0 serviceid is_entity_in_maintenance | dedup consecutive=t serviceid | eval is_all_entities_in_maintenance=is_entity_in_maintenance, is_service_aggregate="1", is_entity_defined="0", entity_key="service_aggregate", entity_title="service_aggregate"] | `gettime`
#Used to assess the severity for any KPI base search
# adding gettime macro to the end of this macro to handle the time range issue for shared base search.
[assess_severity(1)]
args = kpibasesearch
definition = setseverityfields kpibasesearch="$kpibasesearch$", output_secgrp="True" | fields - is_all_entities_in_maintenance, entity_keys | `gettime`
# Used to assess severity for any KPI alert search
[assess_severity(2)]
args = serviceid, kpiid
definition = eval maintenance_service_id = "$serviceid$" | `is_object_in_maintenance("service", maintenance_service_id)` | eval is_service_in_maintenance = in_maintenance | fields - in_maintenance, maintenance_service_id | setseverityfields serviceid="$serviceid$", kpiid="$kpiid$" | fields - is_all_entities_in_maintenance
# Used to assess severity for any KPI alert search with no data and max severity event
# Note:
# - If no event passed to search command then is generate no data event which may not suitable for certain case like previewing charts
# - It also generate a max severity event which is used by health compute search
# - If is_fill_data_gaps parameter true, then fills N/A alert_values (data gaps) with non-N/A values, as per the
# "Fill Data Gaps" option selected by user while configuring the KPI.
# is_handle_no_data, is_gen_max_severity_event and is_fill_data_gaps are boolean flags
[assess_severity(5)]
args = serviceid, kpiid, is_handle_no_data, is_gen_max_severity_event, is_fill_data_gaps
definition = eval maintenance_service_id = "$serviceid$" | `is_object_in_maintenance("service", maintenance_service_id)` | eval is_service_in_maintenance = in_maintenance | fields - in_maintenance, maintenance_service_id | setseverityfields serviceid="$serviceid$", kpiid="$kpiid$" handle_no_data="$is_handle_no_data$" generate_max_severity_event="$is_gen_max_severity_event$" fill_data_gaps="$is_fill_data_gaps$" output_secgrp="True" | fields - is_all_entities_in_maintenance
# Used to assess severity for any KPI alert search with no data and max severity event
# Note:
# - If no event passed to search command then is generate no data event which may not suitable for certain case like previewing charts
# - It also generate a max severity event which is used by health compute search
# - If is_fill_data_gaps parameter true, then fills N/A alert_values (data gaps) with non-N/A values, as per the
# "Fill Data Gaps" option selected by user while configuring the KPI.
# - If is_time_series parameter true, then it is assumed that events being processed are in time-series form
# and therefore, max severity event (if is_gen_max_severity_event True) are generated for each timestamp.
# Also, it is necessary for events to be sorted by time to be processed correctly
# for max severity generation
# is_handle_no_data, is_gen_max_severity_event, is_fill_data_gaps and is_time_series are boolean flags
[assess_severity(6)]
args = serviceid, kpiid, is_handle_no_data, is_gen_max_severity_event, is_fill_data_gaps, is_time_series
definition = eval maintenance_service_id = "$serviceid$" | `is_object_in_maintenance("service", maintenance_service_id)` | eval is_service_in_maintenance = in_maintenance | fields - in_maintenance, maintenance_service_id | setseverityfields serviceid="$serviceid$", kpiid="$kpiid$" handle_no_data="$is_handle_no_data$" generate_max_severity_event="$is_gen_max_severity_event$" fill_data_gaps="$is_fill_data_gaps$" is_time_series="$is_time_series$" | fields - is_all_entities_in_maintenance
[assess_urgency]
definition = eval urgency = if (is_service_in_maintenance == 1, 0, urgency)
# For entities, we need to combine entity title and alias together for pseudo entities before we filldown values
# Otherwise, the filldown just uses the last known value for results. Also we sort by time to make sure that recent
# null values are not overidden by the filldown, which may give an impression that an entity was recently active
[interpolate_entity_data(2)]
args = start_time, end_time
definition = eval key_by_title=entity_key."==@@==".entity_title | xyseries _time key_by_title alert_value\
| append [ | makeresults | eval _time=$end_time$ | timechart start=$start_time$ end=$end_time$ span=1m count | fields _time ] \
| sort -_time | filldown * | untable _time key_by_title alert_value\
| mvexpand key_by_title | rex field=key_by_title "(?<entity_key>.+)==@@==(?<entity_title>.+)" | fields - key_by_title
# Performs a filldown on KPI results to use the last data point that appeared (for KPIs that don't have data every 1m)
# Appends data from
[interpolate_kpi_data(2)]
args = start_time, end_time
definition = xyseries _time itsi_kpi_id alert_value\
| append [ | makeresults | eval _time=$end_time$ | timechart start=$start_time$ end=$end_time$ span=1m count | fields _time ]\
| sort -_time | filldown * | untable _time itsi_kpi_id alert_value
###############################################
# ITOA Backfill Search Macros
###############################################
# Used to convert raw data into per entity alert_values over backfill bucketsmatch_entities
# but with possibly overlapping buckets.
# This macro assumes that events have been tagged with appropriate bucket IDs given by
# the values of (possibly multivalued) $bucket_field$.
# - the _time of each bucket is the time of its "latest" end
# - buckets are "snapped" to monitoring frequency-spaced time intervals
# - due to the fact that buckets overlap, the "latest" end will be common for all overlapping
# buckets at the end of the search time range, necessitating stats first(*) by _time
# - bucket_field string stats with -1 for times corresponding to bucket gaps; these will be dropped
[aggregate_raw_into_entity_backfill(5)]
args = entity_statop, threshold_field, entity_field, alert_period, bucket_field
definition = stats max(_time) AS maxtime, $entity_statop$($threshold_field$) AS alert_value BY $bucket_field$, $entity_field$ | eval _time=maxtime+$alert_period$*60 | bucket _time span=$alert_period$m | stats first(*) as * by _time, $entity_field$ | eval _drop=substr($bucket_field$, 0, 2) | search _drop!=-1 | fields - $bucket_field$, maxtime
[aggregate_raw_into_entity_fill_gaps_backfill(5)]
args = entity_statop, threshold_field, entity_field, alert_period, bucket_field
definition = stats max(_time) AS maxtime, $entity_statop$($threshold_field$) AS alert_value BY $bucket_field$, $entity_field$ | eval _time=maxtime+$alert_period$*60 | bucket _time span=$alert_period$m | stats first(*) as * by _time, $entity_field$ | eval _drop=substr($bucket_field$, 0, 2) | search _drop!=-1 | fields - $bucket_field$, maxtime | `fill_gaps_entity_backfill("$entity_field$", $alert_period$)`
[aggregate_raw_into_service_backfill(4)]
args = service_statop, threshold_field, alert_period, bucket_field
definition = stats max(_time) AS maxtime, $service_statop$($threshold_field$) AS alert_value BY $bucket_field$ | eval _time=maxtime+$alert_period$*60 | bucket _time span=$alert_period$m | stats first(*) as * by _time | eval _drop=substr($bucket_field$, 0, 2) | search _drop!=-1 | fields - $bucket_field$, maxtime | eval is_service_aggregate="1", is_entity_defined="0", entity_key="service_aggregate", entity_title="service_aggregate"
[aggregate_raw_into_service_fill_gaps_backfill(4)]
args = service_statop, threshold_field, alert_period, bucket_field
definition = stats max(_time) AS maxtime, $service_statop$($threshold_field$) AS alert_value BY $bucket_field$ | eval _time=maxtime+$alert_period$*60 | bucket _time span=$alert_period$m | stats first(*) as * by _time | eval _drop=substr($bucket_field$, 0, 2) | search _drop!=-1 | fields - $bucket_field$, maxtime | `fill_gaps_aggregate_backfill($alert_period$)` | eval is_service_aggregate="1", is_entity_defined="0", entity_key="service_aggregate", entity_title="service_aggregate"
[aggregate_entity_into_service_backfill(1)]
args = service_statop
definition = appendpipe [stats $service_statop$(alert_value) AS alert_value, count(entity_key) AS entities_count, count(eval(is_entity_in_maintenance=1)) AS maintenance_entities_count by _time | eval is_service_aggregate="1", is_entity_defined="0", entity_key="service_aggregate", entity_title="service_aggregate", is_all_entities_in_maintenance=if ((entities_count == maintenance_entities_count) AND (entities_count > 0), 1, 0) | fields - entities_count maintenance_entities_count]
[fill_gaps_entity_backfill(2)]
args = entity_split_field, alert_period
definition = fillgapsbackfill entity_split_field="$entity_split_field$", alert_period="$alert_period$"
[fill_gaps_aggregate_backfill(1)]
args = alert_period
definition = fillgapsbackfill kpi_type="service_aggregate", alert_period="$alert_period$"
# used to backfill health score for a service. currently, buckets are created with
# a span of 1 minute and health score values are calculated every 1 min.
# if different variations of calculation period and monitoring frequency are needed,
# then add more arguments to the macro accordingly.
[service_health_score_backfill(1)]
args = service_id
definition = bucket _time span=1m | eval itsi_backfill_bucket_string=_time | stats max(_time) AS maxtime, latest(urgency) AS urgency latest(alert_level) AS alert_level latest(alert_severity) as alert_name latest(health_score) as service_health_score latest(service) AS service latest(is_service_in_maintenance) AS is_service_in_maintenance latest(kpi) AS kpi BY itsi_backfill_bucket_string, kpiid, serviceid | eval _time=maxtime+1*60 | fields - itsi_backfill_bucket_string, maxtime' | gethealth service_id="$service_id$" | eval is_backfilled_event="1"
###############################################
# ITOA Time Series Search Macros
###############################################
# Used to convert raw data into the per entity alert_values over bucketed time (note this is not the same as timechart output)
[aggregate_raw_into_entity_time_series(4)]
args = entity_statop, threshold_field, entity_field, search_alert_earliest
definition = bucket _time span=$search_alert_earliest$m | stats $entity_statop$($threshold_field$) AS alert_value by _time, $entity_field$
# Used to convert raw data into a timechart format of a limited set of entities
[aggregate_raw_into_limited_entity_time_series(2)]
args = entity_field, search_alert_earliest
definition = timechart bins=500 minspan=$search_alert_earliest$m avg(alert_value) AS alert_value by $entity_field$
# Used to convert raw data into a timechart format of a limited set of entities
[aggregate_raw_into_limited_entity_time_series(4)]
args = entity_statop, threshold_field, entity_field, search_alert_earliest
definition = bucket _time span=$search_alert_earliest$m | stats $entity_statop$($threshold_field$) AS alert_value by _time, $entity_field$ | timechart bins=500 minspan=$search_alert_earliest$m avg(alert_value) AS alert_value by $entity_field$
# Used to convert raw data into a timechart format of a entity
[aggregate_raw_into_limited_entity_time_series(5)]
args = entity_name, entity_statop, threshold_field, entity_field, search_alert_earliest
definition = bucket _time span=$search_alert_earliest$m | where $entity_field$="$entity_name$" | stats $entity_statop$($threshold_field$) AS alert_value by _time, $entity_field$ | timechart bins=500 minspan=$search_alert_earliest$m avg(alert_value) AS alert_value by $entity_field$
# Used to convert raw data into a timechart format of a limited set of entities, but if a compound entity was used
[aggregate_raw_compound_entities_into_limited_entity_time_series(4)]
args = entity_statop, threshold_field, entity_fields, search_alert_earliest
definition = bucket _time span=$search_alert_earliest$m | stats $entity_statop$($threshold_field$) AS alert_value by _time, $entity_fields$ | eval compound_alias=mvzip($entity_fields$) | timechart bins=500 minspan=$search_alert_earliest$m avg(alert_value) AS alert_value by compound_alias
# Used to convert raw data into a timechart format of a entity, but if a compound entity was used
[aggregate_raw_compound_entity_into_limited_entity_time_series(5)]
args = entity_name, entity_statop, threshold_field, entity_fields, search_alert_earliest
definition = bucket _time span=$search_alert_earliest$m | eval compound_alias=mvzip($entity_fields$) | where compount_alias=$entity_name$ | stats $entity_statop$($threshold_field$) AS alert_value by _time, compount_alias | timechart bins=500 minspan=$search_alert_earliest$m avg(alert_value) AS alert_value by compound_alias
# Used to aggregate all entity time series data points into a single service time series (in timechart output format)
[aggregate_entity_into_service_time_series(2)]
args = service_statop, search_alert_earliest
definition = stats $service_statop$(alert_value) AS alert_value, count(entity_key) AS entities_count, count(eval(is_entity_in_maintenance=1)) AS maintenance_entities_count by _time | eval is_all_entities_in_maintenance=if ((entities_count == maintenance_entities_count) AND (entities_count > 0), 1, 0) | fields - entities_count maintenance_entities_count | timechart bins=500 minspan=$search_alert_earliest$m avg(alert_value) AS alert_value
# Used to aggregate all entity time series data points into a single service time series (in timechart output format)
[aggregate_entity_into_service_time_series(3)]
args = service_statop, search_alert_earliest, deep_dive_kpi_statsop
definition = stats $service_statop$(alert_value) AS alert_value, count(entity_key) AS entities_count, count(eval(is_entity_in_maintenance=1)) AS maintenance_entities_count by _time | eval is_all_entities_in_maintenance=if ((entities_count == maintenance_entities_count) AND (entities_count > 0), 1, 0) | fields - entities_count maintenance_entities_count | timechart bins=500 minspan=$search_alert_earliest$m $deep_dive_kpi_statsop$(alert_value) AS alert_value
# Used to convert raw metric data into the service aggregate time series (in timechart output format)
[aggregate_raw_into_service_time_series(1)]
args = search_alert_earliest
definition = timechart bins=500 minspan=$search_alert_earliest$m avg(alert_value) as alert_value
# Used to convert raw metric data with entity filter into the service aggregate time series (in timechart output format)
[aggregate_raw_into_service_time_series(2)]
args = aggregate_statop, search_alert_earliest
definition = stats $aggregate_statop$(_value) AS alert_value by _time | timechart bins=500 minspan=$search_alert_earliest$m avg(alert_value) as alert_value
# Used to convert raw data into the service aggregate time series (in timechart output format)
[aggregate_raw_into_service_time_series(3)]
args = aggregate_statop, threshold_field, search_alert_earliest
definition = bucket _time span=$search_alert_earliest$m | stats $aggregate_statop$($threshold_field$) AS alert_value by _time | timechart bins=500 minspan=$search_alert_earliest$m avg(alert_value) as alert_value
# Used to convert raw data into the service aggregate time series with the custom stats op (in timechart output format)
[aggregate_raw_into_service_time_series(4)]
args = aggregate_statop, threshold_field, search_alert_earliest, deep_dive_kpi_statsop
definition = bucket _time span=$search_alert_earliest$m | stats $aggregate_statop$($threshold_field$) AS alert_value by _time | timechart bins=500 minspan=$search_alert_earliest$m $deep_dive_kpi_statsop$(alert_value) as alert_value
###############################################
# ITOA Compare Search Macros
# cuts off data to just two alert periods worth and performs comparison of two windows of length of the search_alert_earliest
###############################################
# Entity Level compare search distinguished by needing more args, entity_statop and entity_field
[aggregate_raw_and_compare(5)]
args = entity_statop, service_statop, threshold_field, entity_field, search_alert_earliest
definition = `HandleInfoMaxTime` | head _time>(info_max_time - $search_alert_earliest$*120) | eval alert_value_window=if(_time<(info_max_time-$search_alert_earliest$*60),"last_window", "current_window") | stats $entity_statop$($threshold_field$) AS alert_value by alert_value_window, $entity_field$ | stats $service_statop$(alert_value) AS alert_value by alert_value_window | reverse | delta alert_value AS window_delta | search alert_value_window="current_window" | eval window_direction=if(window_delta >0, "increase", if(window_delta < 0, "decrease", "none")) | `gettime`
# Service Level compare search distinguished by only needing 3 args
[aggregate_raw_and_compare(3)]
args = service_statop, threshold_field, search_alert_earliest
definition = `HandleInfoMaxTime` | head _time>(info_max_time - $search_alert_earliest$*120) | eval alert_value_window=if(_time<(info_max_time-$search_alert_earliest$*60),"last_window", "current_window") | stats $service_statop$($threshold_field$) AS alert_value by alert_value_window | reverse | delta alert_value AS window_delta | search alert_value_window="current_window" | eval window_direction=if(window_delta >0, "increase", if(window_delta < 0, "decrease", "none")) | `gettime`
# Entity Level compare search distinguished by needing more args, entity_statop and entity_field
[aggregate_raw_and_compare_metric(3)]
args = service_statop, threshold_field, search_alert_earliest
definition = `HandleInfoMaxTime` | sort -_time | head _time>(info_max_time - $search_alert_earliest$*120) | eval alert_value_window=if(_time<(info_max_time-$search_alert_earliest$*60),"last_window", "current_window") | stats $service_statop$($threshold_field$) AS alert_value by alert_value_window | reverse | delta alert_value AS window_delta | search alert_value_window="current_window" | eval window_direction=if(window_delta >0, "increase", if(window_delta < 0, "decrease", "none")) | `gettime`
# Service Level compare search distinguished by only needing 1 args
[aggregate_raw_and_compare_metric(1)]
args = search_alert_earliest
definition = `HandleInfoMaxTime` | sort -_time | head _time>(info_max_time - $search_alert_earliest$*120) | eval alert_value_window=if(_time<(info_max_time-$search_alert_earliest$*60),"last_window", "current_window") | reverse | delta alert_value AS window_delta | search alert_value_window="current_window" | eval window_direction=if(window_delta >0, "increase", if(window_delta < 0, "decrease", "none")) | `gettime`
###############################################
# ITOA Single Value Search Macros
# cuts off data to just one alert period worth and provides a single value
###############################################
# Entity Level compare search distinguished by needing more args, entity_statop and entity_field
[aggregate_raw_into_single_value(5)]
args = entity_statop, service_statop, threshold_field, entity_field, search_alert_earliest
definition = `HandleInfoMaxTime` | head _time>(info_max_time - $search_alert_earliest$*60) | stats $entity_statop$($threshold_field$) AS alert_value by $entity_field$ | stats $service_statop$(alert_value) AS alert_value | `gettime`
# Service Level compare search distinguished by only needing 3 args
[aggregate_raw_into_single_value(3)]
args = service_statop, threshold_field, search_alert_earliest
definition = `HandleInfoMaxTime` | head _time>(info_max_time - $search_alert_earliest$*60) | stats $service_statop$($threshold_field$) AS alert_value | `gettime`
# Entity Level single value search used by metric based KPI
[aggregate_raw_into_single_value_metric(2)]
args = service_statop, threshold_field
definition = stats $service_statop$($threshold_field$) AS alert_value | `gettime`
##################################################
# Time related Macros
##################################################
[HandleInfoMaxTime]
args =
definition = addinfo | eval info_max_time=if(info_max_time="+Infinity",now() + 315569260,info_max_time)
[getSearchTimeDiff]
args =
definition = `HandleInfoMaxTime` | eval timeDiff = info_max_time-info_min_time | fields - info_min_time info_max_time info_search_time info_sid
[gettime]
args =
definition = `HandleInfoMaxTime` | eval _time=info_max_time | fields - info_min_time info_max_time info_search_time info_sid
[get_info_time_without_sid]
args =
definition = `HandleInfoMaxTime` | eval _time=info_max_time | fields - info_sid
[no_entities_matched]
args =
definition = _time!=*
#####################################################
# Event Management Related macros
#####################################################
[itsi_get_event_id]
args=
definition = `itsi_map_notable_fields` | `gettime` | `itsi_get_sid_id` | eval event_id= orig_sid."@@notable@@".orig_rid
[itsi_get_sid_id]
args=
definition = addinfo | fields - info_*_time | streamstats count as rid | eval rid=rid-1 | rename info_sid as orig_sid,rid as orig_rid
[itsi_map_notable_fields]
args =
definition = rename _time as orig_time | rename _raw as orig_raw | eval orig_raw=sha256(orig_raw) | `itsi_rename_if_tgt_null(event_id, "orig", "_")` | `itsi_rename_if_tgt_null(tag, "orig", "_")` | rename splunk_server as orig_splunk_server, linecount as orig_linecount, eventtype as orig_eventtype, timestartpos as orig_timestartpos, timeendpos as orig_timeendpos, status as orig_status, owner as orig_owner, owner as orig_owner, tag::* as orig_tag::* | fields - date_*, punct
[itsi_rename_if_tgt_null(3)]
args = field, prefix, sep
definition = eval "$prefix$$sep$$field$"=if(isnull('$prefix$$sep$$field$'), '$field$', '$prefix$$sep$$field$') | fields - "$field$"
# Lookup state (ie. status, severity, owner) for notable events from kv store
# DEPRECATED AS OF 4.0.0
[get_notable_event_state]
args =
definition = lookup itsi_notable_event_state_lookup _key AS event_id OUTPUT severity AS lookup_severity, owner AS lookup_owner, status AS lookup_status | eval severity=if(isnull(lookup_severity), severity, lookup_severity), status=if(isnull(lookup_status), status, lookup_status), owner=if(isnull(lookup_owner), owner, lookup_owner) | fields - lookup_*
# spath to extract specific event management fields that we know we need, if a notable event is extremely long
# DEPRECATED AS OF 4.0.0 BECAUSE NOW WE USED INDEXED_EXTRACTION=JSON
[itsi_extract_notable_event_required_fields]
args =
definition = spath severity | spath status | spath owner | spath title | spath description | spath event_id
# the auditing index for notable event management objects
[itsi_notable_audit_index]
args =
definition = index="itsi_notable_audit"
# the archive index for notable event management objects
[itsi_notable_archive_index]
args =
definition = index="itsi_notable_archive"
[itsi_grouped_alerts_index]
args =
definition = index="itsi_grouped_alerts"
[itsi_tracked_alerts_index]
args =
definition = index="itsi_tracked_alerts"
# Get notable events with a filter applied
# event_filter - search clause to be run against raw notable event index
# Note: to get all results aka no event_filter, call the macro with empty string like this - `itsi_event_management_index_with_state("")`
# Note: this macro contains the word "state" because pre-4.0.0, we used to allow changes to notable event states. We no longer allow that.
[itsi_event_management_index_with_state(1)]
args = event_filter
definition = `itsi_event_management_index` $event_filter$
# Get notable group events with looked up state values
# event_filter - search clause to be run against raw notable event group index before state lookup
# Note: to get all results aka no event_filter, call the macro with empty string like this - `itsi_event_management_group_index_with_state("")`
[itsi_event_management_group_index_with_state(1)]
args = event_filter
definition = `itsi_event_management_group_index` $event_filter$ | `itsi_notable_group_lookup`
# Get notable events from splunk index
# Note: use this macro when notable event state values are not needed
[itsi_event_management_index]
args =
definition = `itsi_tracked_alerts_index` `itsi_event_management_sourcetype` NOT source=itsi@internal@group_closing_event
# Get notable events fields from splunk index
[itsi_event_management_index_fields]
args =
definition = `itsi_event_management_index` | fieldsummary | rename count as field_count | fields field, field_count | sort -field_count | head 300 | sort field | fields field
# Use this when you want to see the group closing events
[itsi_event_management_index_with_close_events]
args =
definition = `itsi_tracked_alerts_index` `itsi_event_management_sourcetype`
# Get the count of events in the index defined above
[itsi_event_management_index_with_close_events_filter_count]
args =
definition = count(eval(if(index=="itsi_tracked_alerts", "true", null)))
[itsi_event_management_group_index]
args =
definition = `itsi_grouped_alerts_index` `itsi_event_management_group_sourcetype` NOT source=itsi@internal@group_closing_event NOT itsi_dummy_closing_flag=* NOT itsi_bdt_event=*
[itsi_event_management_group_index_with_bdt_events]
args =
definition = `itsi_grouped_alerts_index` `itsi_event_management_group_sourcetype` NOT source=itsi@internal@group_closing_event NOT itsi_dummy_closing_flag=*
[itsi_event_management_group_index_with_close_events]
args =
definition = `itsi_grouped_alerts_index` `itsi_event_management_group_sourcetype`
[itsi_event_management_sourcetype]
args =
definition = sourcetype=itsi_notable:event
[itsi_event_management_group_sourcetype]
args =
definition = sourcetype=itsi_notable:group
[itsi_event_management_comment_sourcetype]
args =
definition = sourcetype=itsi_notable:comment
[itsi_event_management_comment_index]
args =
definition = `itsi_grouped_alerts_index` `itsi_event_management_comment_sourcetype`
# Get noise reduction rate (in percentage) for event management
[itsi_event_management_noise_reduction]
args =
definition = tstats dc(event_id) as events where `itsi_event_management_group_index` by itsi_group_id\
| stats sum(events) as num_events count as num_groups\
| eval noisereduction=((num_events-num_groups)/num_events)*100\
| eval noisereduction=if(isnull(noisereduction), 0, noisereduction)\
| fields noisereduction
# Get the episode count for an episode state
[itsi_event_management_get_episode_count(1)]
args = state
definition = tstats count as Acknowledged where `itsi_notable_audit_index` activity=*$state$*\
| appendcols [| tstats dc(itsi_group_id) as total where `itsi_event_management_group_index`]
# Get mean time to achieve the state in event management
[itsi_event_management_get_mean_time(1)]
args = state
definition = tstats earliest(_time) as t1 where `itsi_notable_audit_index` activity="*$state$*" by event_id\
| append [| tstats earliest(itsi_first_event_time) as t2 where `itsi_event_management_group_index` by itsi_group_id]\
| eval match_id=coalesce(event_id,itsi_group_id)\
| stats values(*) AS * by match_id\
| search event_id=* itsi_group_id=*\
| eval diff=t1-t2\
| stats avg(diff) as t3\
| eval avgDuration = round(t3/60,0)\
| fields - t3
# Get episode counts by severity
[itsi_event_management_episode_by_severity]
args =
definition = tstats count where `itsi_event_management_group_index` by itsi_group_id\
| `itsi_notable_group_lookup`\
| stats count as "Count" by severity\
| sort - severity\
| eval severity=case(severity=1,"Information",severity=2,"Normal",severity=3,"Low",severity=4,"Medium",severity=5,"High",severity=6,"Critical")\
| rename severity as "Severity"
# Note that if the below macro is updated, it MUST return a field called all_tickets with a structure akin to the eval below
[itsi_event_management_tickets_lookup]
args =
definition = lookup itsi_notable_event_external_ticket event_id OUTPUT tickets.ticket_system AS ticket_system, tickets.ticket_id AS ticket_id, tickets.ticket_url AS ticket_url | eval all_tickets=mvzip(ticket_system, ticket_id, " - ") | lookup itsi_notable_event_external_ticket event_id AS itsi_group_id OUTPUT tickets.ticket_system AS ticket_system_snow, tickets.ticket_id AS ticket_id_snow, tickets.ticket_url AS ticket_url_snow | eval all_tickets=if(isnull(all_tickets), mvzip(ticket_system_snow, ticket_id_snow, " - "), mvappend(all_tickets, mvzip(ticket_system_snow, ticket_id_snow, " - "))) | fields - ticket_system,ticket_id, ticket_url, ticket_system_snow, ticket_id_snow, ticket_url_snow
[itsi_event_management_ref_url_lookup]
args =
definition = lookup itsi_notable_event_ref_url event_id as itsi_group_id OUTPUT description as ref_links | mvcombine(ref_links)
# Note that if the below macro is updated, it MUST return a field called linked_tickets with a structure akin to the eval below
# TODO: we use join command to append columns for the episodes, which returns maximum of 50K results by default.
[itsi_event_management_similar_episodes_tickets_lookup]
args =
definition = lookup itsi_notable_event_external_ticket event_id AS itsi_group_id OUTPUT tickets.ticket_system AS ticket_system, tickets.ticket_id AS ticket_id, tickets.ticket_url AS ticket_url\
| join type=left itsi_group_id [search `itsi_notable_archive_index` object_type=external_ticket earliest=-30d latest=now()\
| rename event_id as itsi_group_id tickets{}.* as *_new\
| table itsi_group_id *_new]\
| eval ticket_system=mvappend(ticket_system, ticket_system_new), ticket_id=mvappend(ticket_id, ticket_id_new), ticket_url=mvappend(ticket_url, ticket_url_new)\
| fields - *_new\
| rex field=ticket_id mode=sed "s/([\r\n]+)/,/g" | rex field=ticket_system mode=sed "s/([\r\n]+)/,/g" | rex field=ticket_url mode=sed "s/([\r\n]+)/,/g"\
| eval ticket_id=mvjoin(ticket_id, ","), ticket_system=mvjoin(ticket_system, ","), ticket_url=mvjoin(ticket_url, ",")\
| makemv delim="," ticket_id | makemv delim="," ticket_system | makemv delim="," ticket_url\
| eval linked_tickets=mvzip(ticket_system, ticket_id, ",")\
| eval linked_tickets=mvzip(linked_tickets, ticket_url)\
| fields - ticket_system,ticket_id, ticket_url
[itsi_event_management_snow_incidents]
args =
definition = sourcetype=snow:incident
[itsi_event_management_remedy_incidents]
args =
definition = sourcetype=remedy:incident
[itsi_event_management_jira_issues]
args =
definition = sourcetype="jira:cloud:issues"
# This macro encompasses the lookups of both the itsi_notable_group_user and itsi_notable_group_system collections
[itsi_notable_group_lookup]
args =
definition = lookup itsi_notable_group_user_lookup _key AS itsi_group_id OUTPUT owner severity status instruction | lookup itsi_notable_group_system_lookup _key AS itsi_group_id OUTPUT title description start_time last_time is_active event_count itsi_policy_id | eval policy_id=itsi_policy_id, _itsi_is_group_broken=if(is_active==0,1,0)
[itsi_notable_event_actions_temp_state_values]
args =
definition = eval action_temp_status=status | eval action_temp_owner=owner | eval action_temp_severity=severity | eval action_temp_title=title | eval action_temp_description=description | eval policy_id_temp=itsi_policy_id
[itsi_notable_event_actions_coalesce_state_values]
args =
definition = eval status=coalesce(status, action_temp_status) | eval owner=coalesce(owner, action_temp_owner) | eval severity=coalesce(severity,action_temp_severity) | eval title=coalesce(title, action_temp_title) | eval description=coalesce(description, action_temp_description) | eval itsi_policy_id = coalesce(policy_id, policy_id_temp) | fields - action_temp_*
# This macro checks if all events are displayed in the UI for a notable group or not
# This macro isn't being used right now, but one day for cleaner code, should replace all the 'groupFrontClosed' and 'groupBackClosed' stuff
[has_all_events]
args =
definition = addinfo | lookup itsi_notable_group_system_lookup _key AS itsi_group_id OUTPUT start_time last_time | eval has_all_events=if(info_max_time>last_time AND info_min_time<start_time,1,0) | fields - info_max_time, info_min_time, info_search_time, info_sid
[neap_preview_event_limit]
args =
definition = head 10000
# Take in a string of team_keys in the format of '(sec_grp="itsi_team_key") OR (sec_grp="itsi_team_key")' and returns a filter of service_ids
[itsi_events_compare_teams(1)]
args = itsi_team_id_list
definition = search (service_ids=*null*) OR (NOT service_ids=*) OR [|inputlookup itsi_services_in_team_lookup where $itsi_team_id_list$ | rename _key as service_ids | eval service_ids="*".service_ids."*" | fields service_ids]
# Take in a string of team_keys in the format of '(sec_grp="itsi_team_key") OR (sec_grp="itsi_team_key")' and returns a filter of itsi_service_ids
[itsi_groups_compare_teams(1)]
args = itsi_team_id_list
definition = search (itsi_service_ids=*null*) OR (NOT itsi_service_ids=*) OR [|inputlookup itsi_services_in_team_lookup where $itsi_team_id_list$ | rename _key as itsi_service_ids | eval itsi_service_ids="*".itsi_service_ids."*" | fields itsi_service_ids]
# Search to retrieve old active groups from itsi_grouped_alerts index
[itsi_event_management_get_active_episodes]
args =
definition = `itsi_event_management_group_index_with_close_events` \
| stats max(itsi_group_count) as itsi_group_count \
values(itsi_is_last_event) as itsi_is_last_event \
max(itsi_last_event_time) as itsi_last_event_time \
first(itsi_parent_group_id) as itsi_parent_group_id \
first(itsi_policy_id) as itsi_policy_id \
first(itsi_split_by_hash) as itsi_split_by_hash \
first(itsi_first_event_id) as itsi_first_event_id \
min(itsi_first_event_time) as itsi_first_event_time \
min(itsi_earliest_event_time) as itsi_earliest_event_time \
latest(itsi_group_assignee) as itsi_group_assignee \
latest(itsi_group_description) as itsi_group_description \
latest(itsi_group_severity) as itsi_group_severity \
latest(itsi_group_status) as itsi_group_status \
latest(itsi_group_ace_template_id) as itsi_group_ace_template_id \
latest(itsi_group_title) as itsi_group_title by itsi_group_id \
| where itsi_is_last_event!="true" \
| sort 0 -itsi_last_event_time \
| lookup itsi_notable_group_user_lookup _key AS itsi_group_id OUTPUT owner severity status \
| lookup itsi_notable_group_system_lookup _key AS itsi_group_id OUTPUT is_active \
| where is_active=1 \
| eval itsi_group_assignee=coalesce(owner, itsi_group_assignee), \
itsi_group_severity=coalesce(severity, itsi_group_severity), \
itsi_group_status=coalesce(status, itsi_group_status)
# Use this macro to enable bidirectional ticketing.
# The Service Now index is 'alternate_index'.
# Default value for 'snow_index' is 'main'
# Default look back time is 90 seconds
# Default value for 'alternate_index' is 'itsi_tracked_alerts'.
[itsi_bidirectional_ticketing(3)]
args = snow_index, max_look_back_time, alternate_index
definition = datamodel Ticket_Management Incident search\
| rename All_Ticket_Management.ticket_id as ticket_id\
| join ticket_id [search sourcetype="snow:incident" index="$snow_index$"\
| where _indextime > now() - $max_look_back_time$]\
| lookup itsi_notable_event_external_ticket tickets.ticket_id as ticket_id OUTPUTNEW tickets.ticket_system event_id\
| where isnotnull(event_id)\
| rename tickets.* as * \
| eventstats values(event_id) as group_id last(ticket_system) as ticket_system by ticket_id\
| fields - dv_* | table * | makemv group_id | mvexpand group_id\
| lookup itsi_notable_group_system_lookup _key AS group_id OUTPUT itsi_policy_id, split_by_hash \
| rename split_by_hash as itsi_split_by_hash \
| eval bidirectional_ticketing=1, snow_hash = number + "!" + group_id + "!" + sys_updated_on | search NOT\
[| search index="$alternate_index$"\
| fields snow_hash] | dedup snow_hash
# Use this macro to enable bidirectional ticketing.
# The Service Now index is 'alternate_index'.
# Default look back time is 90 seconds
# Default value for 'alternate_index' is 'itsi_tracked_alerts'.
[itsi_bidirectional_ticketing(2)]
args = max_look_back_time, alternate_index
definition = datamodel Ticket_Management Incident search\
| rename All_Ticket_Management.ticket_id as ticket_id\
| join ticket_id [search `itsi_event_management_snow_incidents`\
| where _indextime > now() - $max_look_back_time$]\
| lookup itsi_notable_event_external_ticket tickets.ticket_id as ticket_id OUTPUTNEW tickets.ticket_system event_id\
| where isnotnull(event_id)\
| rename tickets.* as * \
| eventstats values(event_id) as group_id last(ticket_system) as ticket_system by ticket_id\
| fields - dv_* | table * | makemv group_id | mvexpand group_id\
| lookup itsi_notable_group_system_lookup _key AS group_id OUTPUT itsi_policy_id, split_by_hash \
| rename split_by_hash as itsi_split_by_hash \
| eval bidirectional_ticketing=1, snow_hash = number + "!" + group_id + "!" + sys_updated_on | search NOT\
[| search index="$alternate_index$"\
| fields snow_hash] | dedup snow_hash
# Use this macro to enable BMC Remedy Bidirectional Ticketing.
# The BMC index is 'alternate_index'.
# Default value for 'remedy_index' is 'main'
# Default look back time is 90 seconds
# Default value for 'alternate_index' is 'itsi_tracked_alerts'.
[itsi_bmc_bidirectional_ticketing(3)]
args = remedy_index, max_look_back_time, alternate_index
definition = datamodel Ticket_Management Incident search\
| rename All_Ticket_Management.ticket_id as ticket_id\
| join ticket_id [search sourcetype="remedy:incident" index="$remedy_index$"\
| where _indextime > now() - $max_look_back_time$] \
| lookup itsi_notable_event_external_ticket tickets.ticket_id as ticket_id OUTPUTNEW tickets.ticket_system event_id\
| where isnotnull(event_id)\
| rename tickets.* as * | eventstats values(event_id) as group_id last(ticket_system) as ticket_system by ticket_id\
| fields - dv_* | table * | makemv group_id | mvexpand group_id\
| lookup itsi_notable_group_system_lookup _key AS group_id OUTPUT itsi_policy_id, split_by_hash \
| rename split_by_hash as itsi_split_by_hash \
| eval bidirectional_ticketing=1, remedy_hash = 'Incident Number' + "!" + group_id + "!" + 'Last Modified Date' | search NOT\
[| search index="$alternate_index$"\
| fields remedy_hash] | dedup remedy_hash
# Use this macro to enable BMC Remedy Bidirectional Ticketing.
# The BMC Remedy index is 'alternate_index'.
# Default look back time is 90 seconds
# Default value for 'alternate_index' is 'itsi_tracked_alerts'.
[itsi_bmc_bidirectional_ticketing(2)]
args = max_look_back_time, alternate_index
definition = datamodel Ticket_Management Incident search\
| rename All_Ticket_Management.ticket_id as ticket_id\
| join ticket_id [search `itsi_event_management_remedy_incidents`\
| where _indextime > now() - $max_look_back_time$] \
| lookup itsi_notable_event_external_ticket tickets.ticket_id as ticket_id OUTPUTNEW tickets.ticket_system event_id\
| where isnotnull(event_id)\
| rename tickets.* as * | eventstats values(event_id) as group_id last(ticket_system) as ticket_system by ticket_id\
| fields - dv_* | table * | makemv group_id | mvexpand group_id\
| lookup itsi_notable_group_system_lookup _key AS group_id OUTPUT itsi_policy_id, split_by_hash \
| rename split_by_hash as itsi_split_by_hash \
| eval bidirectional_ticketing=1, remedy_hash = 'Incident Number' + "!" + group_id + "!" + 'Last Modified Date' | search NOT\
[| search index="$alternate_index$"\
| fields remedy_hash] | dedup remedy_hash
# Use this macro to enable Jira Bidirectional Ticketing.
# The jira index is 'alternate_index'.
# Default value for 'jira_index' is 'main'
# Default look back time is 90 seconds
# Default value for 'alternate_index' is 'itsi_tracked_alerts'.
[itsi_jira_bidirectional_ticketing(3)]
args = jira_index, max_look_back_time, alternate_index
definition = datamodel Ticket_Management Incident search\
| rename All_Ticket_Management.ticket_id as ticket_id\
| join ticket_id [search sourcetype="jira:cloud:issues" index="$jira_index$"\
| where _indextime > now() - $max_look_back_time$] \
| lookup itsi_notable_event_external_ticket tickets.ticket_id as ticket_id OUTPUTNEW tickets.ticket_system event_id\
| where isnotnull(event_id)\
| rename tickets.* as * | eventstats values(event_id) as group_id last(ticket_system) as ticket_system by ticket_id\
| fields - dv_* | table * | makemv group_id | mvexpand group_id\
| lookup itsi_notable_group_system_lookup _key AS group_id OUTPUT itsi_policy_id, split_by_hash \
| rename split_by_hash as itsi_split_by_hash \
| eval bidirectional_ticketing=1, jira_hash = id + "!" + key + "!" + group_id + "!" + 'fields.updated' | search NOT\
[| search index="$alternate_index$"\
| fields jira_hash] | dedup jira_hash
# Use this macro to enable Jira Bidirectional Ticketing.
# The jira index is 'alternate_index'.
# Default look back time is 90 seconds
# Default value for 'alternate_index' is 'itsi_tracked_alerts'.
[itsi_jira_bidirectional_ticketing(2)]
args = max_look_back_time, alternate_index
definition = datamodel Ticket_Management Incident search\
| rename All_Ticket_Management.ticket_id as ticket_id\
| join ticket_id [search `itsi_event_management_jira_issues`\
| where _indextime > now() - $max_look_back_time$] \
| lookup itsi_notable_event_external_ticket tickets.ticket_id as ticket_id OUTPUTNEW tickets.ticket_system event_id\
| where isnotnull(event_id)\
| rename tickets.* as * | eventstats values(event_id) as group_id last(ticket_system) as ticket_system by ticket_id\
| fields - dv_* | table * | makemv group_id | mvexpand group_id\
| lookup itsi_notable_group_system_lookup _key AS group_id OUTPUT itsi_policy_id, split_by_hash \
| rename split_by_hash as itsi_split_by_hash \
| eval bidirectional_ticketing=1, jira_hash = id + "!" + key + "!" + group_id + "!" + 'fields.updated' | search NOT\
[| search index="$alternate_index$"\
| fields jira_hash] | dedup jira_hash
# Search to get the events not grouped or failed to group by rules engine
[grouping_missed_events_search]
args =
definition = (`itsi_event_management_index_with_close_events` ) OR ( `itsi_event_management_group_index_with_close_events`) \
| stats first(_time) AS _time first(_raw) AS _raw first(source) AS source first(sourcetype) AS sourcetype first(host) AS host count(eval(`itsi_grouped_alerts_index`)) AS c_grouped by event_id \
| where c_grouped=0 | fields _time, _raw, source, sourcetype, host
# Search to retrieve missed events to backfill
[backfill_events_search]
args =
definition = `grouping_missed_events_search` | sort 0 _time
# High Scale EA Search to retrieve missed events to backfill
[high_scale_ea_backfill(1)]
description = Search to retrieve missed events to backfill
args = latest_offset
definition = (_index_latest=-$latest_offset$) `backfill_events_search` | eval is_backfill="True"
# Search to get list of enabled notable event actions consumers
[enabled_notable_event_action_consumers_search]
args =
definition = rest /services/configs/conf-inputs splunk_server=local | search id="*notable_event_actions_queue_consumer*" disabled="0" | table id
# Searches for dedup logic for spl being created for data integration connection (Event Onboarding)
[dedup_search_for_raw_alert]
args =
definition = | eval groupingid=coalesce(groupingid, internal_groupingid) \
| eval event_identifier_string=groupingid \
| dedup event_identifier_string sortby -_time -severity_id \
[dedup_search_for_notable_event]
args =
definition = | join type=left event_identifier_string vendor_severity \
[| tstats latest(_time) as _time latest(event_identifier_fields) as event_identifier_fields max(severity_id) as severity_id where `itsi_event_management_index` earliest=-59m latest=now by event_identifier_string, vendor_severity \
| dedup event_identifier_string sortby -_time -severity_id \
| table _time, event_identifier_string, event_identifier_fields, vendor_severity] \
| where isnull(event_identifier_fields)
#####################################################
# ACE Related macros
#####################################################
[ace_event_limit]
args =
definition = head 10000
##################### moved from itsi #####################
###############################################
# Notable event / MultiKpi / Correlation Search Macros
################################################
# Get service name using KV lookup alarm_console_lookup
[get_name_from_kv(2)]
args = input_id_field, output_field
definition = lookup alarm_console_lookup _key AS $input_id_field$ OUTPUT title AS $output_field$
# Get service name from kv using id
[get_service_name(2)]
args = input_id_field, output_field
definition = `get_name_from_kv($input_id_field$, $output_field$)`
# Used to set meta data of correlation search created by KPI Correlation UI
[kpi_correlation_meta_data(1)]
args= timeLabel
definition = eval kpi=coalesce(kpi, "ServiceHealthScore") | `getPercentage(alert_period, occurrences)` | eval service_kpi_ids= itsi_service_id + ":" + itsi_kpi_id | eval statement=kpi." had severity value ".severity." ".occurrences." times in "."$timeLabel$" | stats list(statement) as all_info dc(service_kpi_ids) as total_kpis values(*) as * | eval event_description=mvjoin(all_info, ". ") | eval _raw=event_description
[composite_kpi_meta_data]
args=
definition= eval gs_search_source="KPI Correlation" | eval time=strftime(_time, " %Y-%m-%d %H:%M:%S.%3N %p") | eval event_description='composite_kpi_name' + " status was " + 'severity_label' + " (Health Score=" + 'health_score' + ") at " + time
# Used in search creation for Multi-KPI Alert searches
[getPercentage(2)]
args = period, occurrence
definition = eval alert_period = coalesce($period$, 1) | `getSearchTimeDiff` | eval timeDiffInMin = timeDiff/60 | eval total_occurrences = floor(timeDiffInMin/alert_period) | eval percentage=$occurrence$/total_occurrences*100 | eval percentage=round(percentage, 4) | eval percentage=min(percentage, 100)
[apply_entity_lookup(1)]
args = entity_lookup_field
definition = eval sec_grp="default_itsi_security_group" | `match_entities_to_correlation_search($entity_lookup_field$, sec_grp)` | `filter_maintenance_entities`
# Used to augment search results for entities by security group that entities belong to
[match_entities_to_correlation_search(2)]
args = entity_field, sec_grp_field
definition = `itsi_match_entities($entity_field$, $sec_grp_field$)` | `normalize_correlation_search_matched_entities($entity_field$)`
# process searched entities, primarily for internal use only
# modified macro for correlation search use only. does not remove entity_field at the end
# assumes lookup itsi_entities was invoked prior to this macro
[normalize_correlation_search_matched_entities(1)]
args = entity_field
definition = `is_object_in_maintenance("entity", entity_key)` | eval is_entity_defined=if(isnull(entity_key), "0", "1"), entity_key=if(isnull(entity_key), "N/A", entity_key), entity_title=coalesce(entity_title,'$entity_field$'), is_service_aggregate="0", is_entity_in_maintenance = in_maintenance | fields - in_maintenance, entity_sec_grp
###############################################
# Bulk import Macros
###############################################
# Use this macro to search in itsi_import_objects index
[get_itsi_import_objects_index]
args =
definition = index="itsi_import_objects"
###############################################
# KPI Summary Macros
###############################################
# Use this macro to search in itsi_summary index
[get_itsi_summary_index]
args =
definition = index="itsi_summary"
# Use this macro to search in itsi_summary_metrics index
[get_itsi_summary_metrics_index]
args =
definition = index="itsi_summary_metrics"
# Use this macro if user want to get partial search for service in itsi_summary index
[get_only_itsi_summary_service(1)]
args = serviceid
definition = indexed_itsi_service_id::$serviceid$
# Use this macro if user want to partial search for kpi in itsi_summary index
[get_only_itsi_summary_kpi(1)]
args = kpiid
definition = indexed_itsi_kpi_id::$kpiid$
# Use this macro if user want to full search for service in itsi_summary index
[get_full_itsi_summary_service(1)]
args = serviceid
definition = `get_itsi_summary_index` indexed_itsi_service_id::$serviceid$
# Use this macro if user want to full search for service in itsi_summary index limited to service_health_monitor/backfill sources
[get_full_itsi_summary_service_health_events(1)]
args = serviceid
definition = `get_itsi_summary_index` indexed_itsi_service_id::$serviceid$ (source=service_health_monitor OR source=service_health_score_backfill)
# Use this macro if user want to get full search for kpi in itsi_summary index
[get_full_itsi_summary_kpi(1)]
args = kpiid
definition = `get_itsi_summary_index` indexed_itsi_kpi_id::$kpiid$
# Use to get KPI severity count for one or all possible severity value
[get_kpi_status(1)]
args = severity_name
definition = `get_itsi_summary_index` alert_severity="$severity_name$" `service_level_kpi_only` | stats latest(alert_severity) as alert_severity latest(alert_color) as severity_color by kpiid | stats count as severity_count first(severity_color) as severity_color by alert_severity | rename alert_severity AS severity_name
# Use to get all severity count for defined KPI
[get_all_kpi_status]
args =
definition = `get_kpi_status(*)`
# Use to show the health score of a service over time
[service_health_score_time_series(1)]
args = serviceid
definition = `service_health_data` serviceid="$serviceid$" | timechart avg(health_score) by service
[all_service_health_score_time_series]
definition = `service_health_score_time_series(*)`
[health_score_data]
definition = `get_itsi_summary_index` (source=service_health_monitor OR source=service_health_score_backfill)
[no_health_score_data]
definition = `get_itsi_summary_index` (source!=service_health_monitor AND source!=service_health_score_backfill)
[service_level_kpi_only]
definition = ((indexed_is_service_aggregate::1 AND indexed_is_service_max_severity_event::0) OR ((source=service_health_monitor OR source=service_health_score_backfill) AND scoretype="service_health"))
# Metric version of the service level kpi only macro
[metrics_service_level_kpi_only]
definition = ((is_service_max_severity_event=0 is_service_aggregate=1) OR scoretype="service_health")
[service_level_kpi_only(1)]
args = max_severity_event
definition = ((indexed_is_service_max_severity_event::$max_severity_event$) OR ((source=service_health_monitor OR source=service_health_score_backfill) AND scoretype="service_health"))
[service_health_data]
definition = `get_itsi_summary_index` (source=service_health_monitor OR source=service_health_score_backfill) scoretype="service_health"
[get_service_health_for_name(1)]
args = service_name
definition = `service_health_data` [ stats count | inputlookup append=t service_kpi_lookup where title="$service_name$" | rename _key as itsi_service_id | fields itsi_service_id count | eventstats count | eval itsi_service_id=if(count<2, "NO SERVICE FOUND",itsi_service_id) | search itsi_service_id=* | table itsi_service_id ]
[composite_health_data]
definition = `get_itsi_summary_index` (source=service_health_monitor OR source=service_health_score_backfill) scoretype="compositekpi_health"
# To pick max severity value event for service
[service_level_max_severity_event_only]
definition = (indexed_is_service_max_severity_event::1 OR ((source=service_health_monitor OR source=service_health_score_backfill) AND scoretype="service_health"))
# Metrics version of macro to pick max severity value event for service
[service_level_max_severity_metric_only]
definition = (is_service_max_severity_event=1 OR ((source=service_health_metrics_monitor OR source=service_health_score_backfill) AND scoretype="service_health"))
[entity_level_kpi_only]
definition = indexed_is_service_aggregate::0
# Metric version of the entity level kpi only macro
[metrics_entity_level_kpi_only]
definition = is_service_aggregate=0
# Allows for extraction of entity keys containing an equal sign
[escape_entity_key]
definition = rex "entity_key=\"(?<entity_key>[^\"]+)\""
# Token filter for events associated to a specific entity key, counteracting the performance degradation due to the rex expression
[filter_by_entity_key(1)]
args = entity_id
definition = "entity_key=\"$entity_id$\"" OR "entity_key=$entity_id$" | `escape_entity_key` | search entity_key="$entity_id$"
[service_level_max_severity_and_service_health_score]
definition = (indexed_is_service_max_severity_event::1 OR ((source=service_health_monitor OR source=service_health_score_backfill) AND scoretype="service_health"))
# Metrics version of the macro.
[metrics_service_level_max_severity_and_service_health_score]
definition = (is_service_max_severity_event=1 OR ((source=service_health_metrics_monitor OR source=service_health_score_backfill) AND scoretype="service_health"))
# Use this to get a list of services and KPIs
# This macro uses inputlookup so use with a pipe for example: | `service_kpi_list`
[service_kpi_list]
definition = inputlookup service_kpi_lookup | rename _key as serviceid title as service_name | eval kpi_info = mvzip('kpis._key', 'kpis.title', "==@@==") | fields kpi_info service_name serviceid | mvexpand kpi_info | rex field=kpi_info "(?<kpiid>.+)==@@==(?<kpi_name>.+)" | fields - kpi_info
# Given a list of KPI_ids, return highest avg of associated entities. Input provided as indexed_itsi_kpi_id::$kpiid$
[itsi_get_affecting_entities(1)]
args = kpi_id_summary_list
definition = `get_itsi_summary_index` $kpi_id_summary_list$ indexed_is_service_aggregate::0 | stats avg(alert_level) AS alert_level by entity_title, entity_key, itsi_service_id, itsi_kpi_id, kpi | sort 0 -alert_level | head 5
# DEPRECATED AS OF ITSI 4.9.1 -- will not remove for possibility of issues in migration
# Macro to capture entity filter rule multivalue expansion and join used in both macros for adhoc kpis or shared base searches.
# Sets the values to the expected "host='123' OR host='abc' OR host='xyz'" format
# If no results OR alias_value is empty (non matched entities), uses `no_entities_macro` as the value returned
[entity_filter_rule_mvevals]
definition = rename entity_info.alias_value AS alias_values | mvexpand entity_filtering_field | mvexpand alias_values\
| eval matched_alias_values=if(isnull(alias_values),"`no_entities_matched`", entity_filtering_field."="."\"".alias_values."\"")\
| mvcombine matched_alias_values | eval results=mvjoin(matched_alias_values," OR ") | table results\
| mvcombine results | eval value=mvjoin(results," OR ")\
| appendpipe [stats count | where count=0] | eval value=if(isnull(count),value,"`no_entities_matched`")
# Given a KPI ID, perform an inputlookup against the itsi entity_rules filtering collection to construct the complete filter rule
[lookup_kpi_entity_filter_rule(1)]
args = kpi_id
definition = | inputlookup itsi_entity_filter_rules_lookup where kpi_id="$kpi_id$" | eval value=if(isnull(filtering_search), "`no_entities_matched`", filtering_search)
# Given a base search ID, perform an inputlookup against the itsi entity_rules filtering collection to construct the complete filter rule
[lookup_shared_base_search_entity_filter_rule(1)]
args = search_id
definition = | inputlookup itsi_entity_filter_rules_lookup where base_search_id="$search_id$" | eval value=if(isnull(filtering_search), "`no_entities_matched`", filtering_search)
###############################################
# ITSI Summary Metrics Related Macros
###############################################
# Main Mcollect macro to push events into itsi_summary_metrics index
[mcollect_into_summary_index]
args =
definition = rename alert_level AS "metric_name:alert_level", alert_value AS "metric_name:alert_value", service_health_score AS "metric_name:service_health_score" \
| mcollect spool=t `get_itsi_summary_metrics_index` sourcetype="itsi_summary:metrics" split=f
# Macro that performs a lookup to add KPI information to the metrics summary index results.
[join_kpi_info(1)]
args = kpi_id_field
definition = join $kpi_id_field$\
[| inputlookup alarm_console_lookup\
| rename _key as itsi_service_id \
| eval kpi_info_tmp=mvzip('kpis._key','kpis.title',"<=>"), kpi_info=mvzip(kpi_info_tmp, 'kpis.urgency', "<=>")\
| fields + kpi_info \
| mvexpand kpi_info \
| rex field=kpi_info "(?<itsi_kpi_id>.+)<=>(?<kpi>.+)<=>(?<urgency>.+)"\
| where (kpi != "ServiceHealthScore")\
| fields - kpi_info]
# Perform a lookup against operative_maintenance_log and join maintenance information based on `itsi_service_id`
[mark_services_in_maintenance]
args =
definition = join itsi_service_id type=left\
[ | inputlookup operative_maintenance_log | where maintenance_object_type="service" \
| rename "maintenance_object_key" as itsi_service_id | fields + itsi_service_id | eval is_service_in_maintenance=1 ] \
| fillnull is_service_in_maintenance
# Rename and reorganize metric results to match computer_health_score function
[reorganize_metrics_healthscore_results]
args =
definition = rename itsi_kpi_id AS kpiid, itsi_service_id AS serviceid\
| fields kpiid, serviceid, urgency, alert_level, alert_name, service, is_service_in_maintenance, kpi
# Generic transformations of kpi and health score fields into metric-compatible format.
[metrics_kpi_fields_transforms]
args =
definition = rename health_score AS service_health_score, urgency AS kpi_importance, kpibasesearch AS kpi_base_search, sec_grp AS itsi_team_id\
| eval is_null_alert_value=if(alert_value == "N/A" OR isnull(alert_value), 1, 0), alert_value=if(alert_value == "N/A" OR isnull(alert_value), 0, alert_value), \
is_service_disabled=if(alert_level==-3, 1, 0), service_health_score=if(service_health_score=="N/A",0,service_health_score) \
| `limit_metric_index_fields`
# Metrics index only allows numeric values for metric measurements.
# Need to convert value back to N/A if alert_level = -1 or -3 (UNKNOWN or DISABLED) or is_null_alert_value = 1(Null data event). Default case uses the field itself
[convert_null_value_to_NA(1)]
args = value_field
definition = eval $value_field$=case(alert_level = -1, "N/A", alert_level=-3, "N/A", is_null_alert_value=1, "N/A", true(), $value_field$)
# Restrict search to just KPI searches in the metric index.
[kpi_level_metrics_source_filter]
args =
definition = (source!=service_health_metrics_monitor AND source!=service_health_score_backfill)
# Restrict search to just service health results in the metric index.
[service_health_metrics_source_filter]
args =
definition = scoretype="service_health"
# Strip extraneous fields that are unneeded when we backfill metrics results
[limit_metric_index_fields]
args =
definition = fields alert_level alert_period alert_value entity_key entity_title info_max_time info_min_time \
info_search_time is_backfilled_event is_custom_threshold_event is_entity_defined is_entity_in_maintenance is_filled_gap_event \
is_null_alert_value is_service_aggregate is_service_disabled is_service_in_maintenance is_service_max_severity_event \
itsi_kpi_id itsi_service_id itsi_team_id kpi kpi_base_search kpi_importance retired scoretype search_name search_now service_health_score
###############################################
# Predictive Analytics Related Macros
###############################################
# Macro to generate histogram data for health score distribution
[health_score_histogram(1)]
args = sid
definition = `health_score_data` indexed_itsi_service_id::$sid$ kpi=ServiceHealthScore\
| timechart limit=0 span=5m avg(health_score) BY kpi | `histogram(ServiceHealthScore,100)`
# Macro to generate KPI (all including dependent KPIs) data over time with KPI titles and services they belong to
[kpi_values_over_time(1)]
args = sid
definition = `itsi_summary_with_dependence($sid$)`\
| timechart limit=0 span=5m avg(alert_value) BY kpiid\
| untable _time, kpiid, value\
| join kpiid [| inputlookup service_kpi_lookup\
| eval zipped=mvzip('kpis._key', 'kpis.title')\
| mvexpand zipped |eval temp=split(zipped,",")\
| eval kpiid=mvindex(temp,0), kpititle=mvindex(temp,1)\
| eval kpiname= title.": ".kpititle\
| fields kpiid, kpiname]\
| xyseries _time kpiname value
# Main search to calculate actual vs predicted healthscore visualization
# Argument "split" is precent of data will be used for calculation.
[predict_actual_vs_predicted(3)]
args = sid,modelname,split
definition = `itsi_predictive_analytics_dataset($sid$)`\
| sample seed=1001 partitions=100 fieldname="pn"\
| where pn>=(100 - $split$)\
| apply $modelname$_ss\
| apply $modelname$_avg as "Predicted Health Score"\
| eval "Predicted Health Score"=case('Predicted Health Score'>100,100,'Predicted Health Score'<0,0,true(),'Predicted Health Score')\
| rename next30m_avg_hs as "Actual Health Score"\
| table "Actual Health Score" "Predicted Health Score"
# Main search to calculate residual errors
# Argument "split" is precent of data will be used for calculation.
[predict_residual_error(3)]
args = sid,modelname,split
definition = `itsi_predictive_analytics_dataset($sid$)`\
| sample seed=1001 partitions=100 fieldname="pn"\
| where pn>=(100 - $split$)\
| apply $modelname$_ss\
| apply $modelname$_avg as predicted\
| eval predicted = case(predicted>100,100,predicted<0,0,true(),predicted)\
| eval residual = next30m_avg_hs - predicted\
| `histogram(residual, 100)`\
| rename count as "Sample Count", residual as "Residual Error"
# Get events of KPIs and dependent KPIs for a service
[itsi_summary_with_dependence(1)]
args = sid
definition = `get_itsi_summary_index` indexed_itsi_service_id::$sid$ OR [| getservice service=$sid$\
| rex field=services_depends_on "kpis=(?<kpiid>.*)"\
| fields kpiid\
| mvexpand kpiid\
| eval kpiid=if(like(kpiid, "%,%"), split(kpiid, ","), kpiid) | mvexpand kpiid]
# Macro to generate the dataset for predicting health score
[itsi_predictive_analytics_dataset(1)]
args = sid
definition = `itsi_summary_with_dependence($sid$)`\
| timechart limit=0 span=5m avg(alert_value) as value_avg\
min(alert_value) as value_min\
max(alert_value) as value_max\
stdev(alert_value) as value_std\
avg(alert_level) as level_avg\
max(alert_level) as level_max\
avg(eval(if(is_service_max_severity_event=1, alert_value, null))) as value_ms_avg\
min(eval(if(is_service_max_severity_event=1, alert_value, null))) as value_ms_min\
max(eval(if(is_service_max_severity_event=1, alert_value, null))) as value_ms_max by kpiid\
| rename "value_avg: SHKPI-$sid$" as now_avg_hs,\
"value_min: SHKPI-$sid$" as now_worst_hs,\
"level_avg: SHKPI-$sid$" as now_avg_hsl,\
"level_max: SHKPI-$sid$" as now_worst_hsl\
| search now_avg_hs=*\
| eval now_avg_hsl=case(now_avg_hsl>=5, "Critical", now_avg_hsl>=3, "Medium", true(), "Normal"),\
now_worst_hsl=case(now_worst_hsl>=5, "Critical", now_worst_hsl>=3, "Medium", true(), "Normal")\
| eval this_date_day = strftime(_time, "%w"),\
this_date_hour = strftime(_time, "%H")\
| filldown\
| streamstats window=6 current=f first("value_avg: *") as last30mkpi_*,\
first("now_avg_hs") as last30m_avg_hs,\
first("now_worst_hs") as last30m_worst_hs,\
first("now_avg_hsl") as last30m_avg_hsl,\
first("now_worst_hsl") as last30m_worst_hsl\
| reverse\
| streamstats window=6 current=f first("value_avg: *") as next30mkpi_*,\
first("now_avg_hs") as next30m_avg_hs,\
first("now_worst_hs") as next30m_worst_hs,\
first("now_avg_hsl") as next30m_avg_hsl,\
first("now_worst_hsl") as next30m_worst_hsl\
| where _time < now() - 300\
| foreach next30m* [eval <<FIELD>>=if(_time<now()-2100,'<<FIELD>>',Null)]
# Macro to predict something for further steps
[apply_model(2)]
args = sid,modelname
definition = `itsi_predictive_analytics_dataset($sid$)` | apply $modelname$_ss | apply $modelname$_avg | apply $modelname$_worst
# Macro to perform linear regression training
[train_linear_regression(2)]
args = modelname,split
definition = search next30m_avg_hs>=0 AND next30m_worst_hs>=0\
| fit StandardScaler "value*" with_mean=true with_std=true into $modelname$_ss\
| sample seed=1001 partitions=100 fieldname="pn"\
| search pn<$split$\
| appendpipe [| fit LinearRegression fit_intercept=true next30m_avg_hs from\
"SS_*" "this_date_*" last30m_avg_hs now_avg_hs into $modelname$_avg | fields - _time,*]\
| fit LinearRegression fit_intercept=true next30m_worst_hs from\
"SS_*" "this_date_*" last30m_worst_hs now_worst_hs into $modelname$_worst
# Macro to perform random forest training
[train_random_forest_regressor(2)]
args = modelname,split
definition = search next30m_avg_hs>=0 AND next30m_worst_hs>=0\
| fit StandardScaler "value*" with_mean=true with_std=true into $modelname$_ss\
| sample seed=1001 partitions=100 fieldname="pn"\
| search pn<$split$\
| appendpipe [| fit RandomForestRegressor next30m_avg_hs from\
"SS_*" "this_date_*" last30m_avg_hs now_avg_hs into $modelname$_avg | fields - _time,*]\
| fit RandomForestRegressor next30m_worst_hs from\
"SS_*" "this_date_*" last30m_worst_hs now_worst_hs into $modelname$_worst
# Macro to perform gradient boosting training
[train_gradient_boosting_regressor(2)]
args = modelname,split
definition = search next30m_avg_hs>=0 AND next30m_worst_hs>=0\
| fit StandardScaler "value*" with_mean=true with_std=true into $modelname$_ss\
| sample seed=1001 partitions=100 fieldname="pn"\
| search pn<$split$\
| appendpipe [| fit GradientBoostingRegressor next30m_avg_hs from\
"SS_*" "this_date_*" last30m_avg_hs now_avg_hs into $modelname$_avg | fields - _time,*]\
| fit GradientBoostingRegressor next30m_worst_hs from\
"SS_*" "this_date_*" last30m_worst_hs now_worst_hs into $modelname$_worst
# Macro to perform logistic regression training
[train_logistic_regression(2)]
args = modelname,split
definition = where in(next30m_avg_hsl, "Normal", "Medium", "Critical") AND in(next30m_worst_hsl, "Normal", "Medium", "Critical")\
| fit StandardScaler "value_*" with_mean=true with_std=true into $modelname$_ss\
| sample seed=1001 partitions=100 fieldname="pn"\
| search pn<$split$\
| appendpipe [| fit LogisticRegression fit_intercept=true next30m_avg_hsl from\
"SS_*" "level_*" "this_date_*" last30m_avg_hsl now_avg_hsl into $modelname$_avg | fields - _time,*]\
| fit LogisticRegression fit_intercept=true next30m_worst_hsl from\
"SS_*" "level_*" "this_date_*" last30m_worst_hsl now_worst_hsl into $modelname$_worst
# Macro to format output for health score prediction
[health_score_prediction_output]
definition = appendpipe\
[| fields next30m_avg_hs, next30m_worst_hs, "predicted(next30m_avg_hs)", "predicted(next30m_worst_hs)"\
| appendpipe [fields next30m_worst_hs, "predicted(next30m_worst_hs)"\
| rename next30m_worst_hs as next30m_avg_hs, "predicted(next30m_worst_hs)" as "predicted(next30m_avg_hs)"]\
| eval "predicted(next30m_avg_hs)"=case('predicted(next30m_avg_hs)'>100,100,'predicted(next30m_avg_hs)'<0,0,true(),'predicted(next30m_avg_hs)')\
| `regressionstatistics("next30m_avg_hs", "predicted(next30m_avg_hs)")`\
| rename rSquared as next30m_avg_hs, RMSE as next30m_worst_hs]\
| stats last(next30m_avg_hs) as rSquared,\
last(next30m_worst_hs) as RMSE,\
first("predicted(next30m_avg_hs)") as next30m_avg_hs,\
first("predicted(next30m_worst_hs)") as next30m_worst_hs\
| eval next30m_avg_hs=case(next30m_avg_hs < 0,0,next30m_avg_hs>100,100,true(),round(next30m_avg_hs,2)),\
next30m_worst_hs=case(next30m_worst_hs < 0,0,next30m_worst_hs>next30m_avg_hs,next30m_avg_hs,true(),round(next30m_worst_hs,2))
# Macro to format output for ONLY health score prediction
[only_health_score_prediction_output(2)]
args=sid,modelname
definition = `apply_model($sid$,$modelname$)` | fields "predicted(next30m_worst_hs)" | head 1\
| rename "predicted(next30m_worst_hs)" AS next30m_worst_hs
# Macro to format output for ONLY health score level prediction
[only_health_score_level_prediction_output(2)]
args=sid,modelname
definition = `apply_model($sid$,$modelname$)` | fields "predicted(next30m_worst_hsl)" | head 1\
| rename "predicted(next30m_worst_hsl)" AS next30m_worst_hsl
# Macro to format output of health score level prediction
[health_score_level_prediction_output]
definition = appendpipe\
[| fields next30m_avg_hsl, next30m_worst_hsl, "predicted(next30m_avg_hsl)", "predicted(next30m_worst_hsl)"\
| appendpipe [fields next30m_worst_hsl, "predicted(next30m_worst_hsl)"\
| rename next30m_worst_hsl as next30m_avg_hsl,\
"predicted(next30m_worst_hsl)" as "predicted(next30m_avg_hsl)"]\
| `classificationstatistics(next30m_avg_hsl, "predicted(next30m_avg_hsl)")`\
| rename accuracy as last30m_avg_hsl, precision as now_avg_hsl, recall as next30m_avg_hsl,\
f1 as last30m_worst_hsl, count as now_worst_hsl]\
| stats last(last30m_avg_hsl) as accuracy,\
last(now_avg_hsl) as precision,\
last(next30m_avg_hsl) as recall,\
last(last30m_worst_hsl) as f1,\
last(now_worst_hsl) as count,\
first("predicted(next30m_avg_hsl)") as next30m_avg_hsl,\
first("predicted(next30m_worst_hsl)") as next30m_worst_hsl\
| eval next30m_worst_hsl=if(next30m_worst_hsl<next30m_avg_hsl, next30m_avg_hsl, next30m_worst_hsl)
# Macro to perform prediction at Test Section
[itsi_predict(3)]
args = sid,outputtype,modelname
definition = `apply_model($sid$,$modelname$)` | `$outputtype$_prediction_output`
# Macro to create model and testing model with given split at Train Section
[train_and_test(5)]
args=sid,outputtype,alg,modelname,split
definition = `itsi_predictive_analytics_dataset($sid$)`\
| appendpipe [`train_$alg$($modelname$,$split$)` | fields - _time *]\
| apply $modelname$_ss | search next30m_avg_hs>=0\
| sample seed=1001 partitions=100 fieldname="pn"\
| search pn>=$split$ | apply $modelname$_avg | apply $modelname$_worst\
| `$outputtype$_prediction_output`
# Macro to get KPI urgency for a service
[get_kpi_urgency(1)]
args = sid
definition = getservice service=$sid$ | rex field=services_depends_on "kpis=(?<kpiid>.*)"\
| fields kpiid | eval kpiid=if(like(kpiid, "%,%"), split(kpiid, ","), kpiid)\
| mvexpand kpiid | append [| getservice service=$sid$ | fields kpis | mvexpand kpis\
| rex field=kpis "id=(?<kpiid>.*)~~~urgency=(?<urgency>.*)" | fields - kpis | search kpiid!=SHKPI-$sid$]
# Prepare the data for KPI trend prediction
[prepare_kpi_trend_data(2)]
args=sid,suffix
definition = outputcsv itsi_predict_kpi_$suffix$.csv\
| fields - _time, *\
| `get_kpi_urgency($sid$)`\
| eval model_suffix=replace(kpiid, "-", "_")\
# Macro to train KPI trend models and health score KPI relations.
[train_kpi_trends(2)]
args=sid,suffix
definition = `itsi_predictive_analytics_dataset($sid$)`\
| appendpipe [fit LinearRegression fit_intercept=true now_avg_hs from\
"value_avg:*" into app:itsi_predict_kpi_hs_$suffix$ | fields - _time *]\
| fit StandardScaler "value_*" with_mean=true with_std=true into app:itsi_predict_kpi_ss_$suffix$\
| `prepare_kpi_trend_data($sid$,$suffix$)`\
| map search="| inputcsv itsi_predict_kpi_$suffix$.csv | fit GradientBoostingRegressor \"next30mkpi_$kpiid$\" from\
\"SS_*\" \"this_date_*\" \"last30mkpi_$kpiid$\" \"value_avg: $kpiid$\" into app:itsi_predict_kpi_$model_suffix$"\
maxsearches=100\
| head 1\
| fields "predicted(*)"\
| rename "predicted(next30mkpi_*)" as *\
| fields - _time\
| foreach * [eval <<FIELD>>=1]\
| untable modelname kpi dummyfield\
| fields - dummyfield\
| eval modelname="itsi_predict_kpi_".replace(kpi, "-", "_")\
| append [| listmodels\
| search name="itsi_predict_kpi_*_$suffix$"\
| rename name as modelname\
| fields modelname]
# Macro to analyze root cause, it will return the swimlanes for top N KPIs and health score.
[root_cause_analysis(3)]
args=sid,suffix,ntop
definition = `itsi_predictive_analytics_dataset($sid$)`\
| apply app:itsi_predict_kpi_ss_$suffix$\
| `prepare_kpi_trend_data($sid$,$suffix$)`\
| map search="| inputcsv itsi_predict_kpi_$suffix$.csv | apply app:itsi_predict_kpi_$model_suffix$" maxsearches=100\
| timechart limit=0 span=5m first(*)\
| rename "first(next30mkpi_*)" as "actual_*", "first(predicted(next30mkpi_*))" as "predicted_*"\
| fields _time actual_* predicted_*\
| eval _time = _time + 2100\
| reverse\
| outputcsv itsi_predict_kpi_trends_$suffix$.csv\
| foreach actual_* [eval <<FIELD>>=if(isnull('<<FIELD>>'), 'predicted_<<MATCHSTR>>', '<<FIELD>>')]\
| fields - predicted_*\
| untable _time, kpi, value\
| chart limit=0 values(value) by _time,kpi\
| fit StandardScaler "actual_*" with_mean=true with_std=true\
| tail 6\
| stats max(SS_actual_*) as max_* min(SS_actual_*) as min_* first(SS_actual_*) as now_*\
| transpose\
| rex field=column "(?<type>.*)_(?<kpiid>.*)"\
| fields - column\
| xyseries kpiid type "row 1"\
| appendcols [|summary app:itsi_predict_kpi_hs_$suffix$\
| eval d=if(coefficient<0, -1, 1)\
| rex field=feature "value_avg: (?<kpiid>.*)"\
| search kpiid=*\
| fields kpiid, d]\
| appendcols [| `get_kpi_urgency($sid$)`]\
| eval impact=max((now-min)*d,(now-max)*d) * urgency\
| sort - impact\
| head $ntop$\
| streamstats count as n\
| map search="|inputcsv itsi_predict_kpi_trends_$suffix$.csv |fields *_$kpiid$ |rename *_$kpiid$ as *_$n$_$kpiid$"\
| untable _time, kpi, value\
| chart limit=0 values(value) by _time, kpi\
| reverse\
| outputcsv itsi_predict_kpi_top_n_$suffix$.csv
# Macro to get rSquares for KPI trends for a service
[r2_for_kpi_trends(1)]
args=suffix
definition = inputcsv itsi_predict_kpi_top_n_$suffix$.csv\
| fieldsummary actual_*\
| rex field=field "actual_(?<kpiid>.*)"\
| map search="| inputcsv itsi_predict_kpi_top_n_$suffix$.csv | `regressionstatistics(actual_$kpiid$, predicted_$kpiid$)`"\
| fields rSquared
# Macro to list models by type
[listmodels_by_type(1)]
args = modelname
definition = listmodels | search name="$modelname$_*"\
| rex field=name "(?<name>.*)_(?<usage>[a-z]*)$"\
| stats count by name, usage\
| xyseries name, usage, count
# Generate confusion matrix for ITSI classification model results
[itsi_confusionmatrix(2)]
args = a, p
definition = rename "$a$" as actual, "$p$" as predicted\
| stats count by actual predicted\
| appendpipe [ | makeresults count=9 \
| streamstats count as n \
| head 9 \
| eval actual=case(n/3<=1, "Normal", n/3<=2, "Medium", true(), "Critical"),\
predicted=case(n%3=0, "Normal", n%3=1, "Medium", true(), "Critical"), count=0 \
| fields - _time n]\
| stats sum(count) as count by actual predicted\
| xyseries actual predicted count\
| rename * as "Predicted *"\
| fillnull value=0
# Predict health score or health score level, just return one number of predicted value.
[itsi_predict_one_number(3)]
args = sid,type,modelname
definition = `itsi_predictive_analytics_dataset($sid$)` | apply $modelname$_ss | apply $modelname$_worst as $type$\
| fields $type$ | search $type$=* | head 1 | eval $type$=if($type$<0,0,$type$) | eval $type$=if($type$>100,100,$type$)
# Get model count and KPI count to check if KPI models are deleted by accident.
[itsi_check_model_count(2)]
args = sid,suffix
definition = `get_kpi_urgency($sid$)`| search urgency=* | eval s=replace(kpiid, "-", "_")\
| append [| makeresults | eval s="hs_$suffix$"] | append [| makeresults | eval s="ss_$suffix$"]\
| appendpipe [search [| listmodels | search name="itsi_predict_kpi_*" | rex field=name "itsi_predict_kpi_(?<s>.*)"\
| fields s] | stats count as modelcount]\
| appendpipe [stats count(kpiid) as kpicount]\
| stats last(kpicount) as kpicount, last(modelcount) as modelcount
######################################################
# Anomaly Detection Related Macros
#####################################################
# gets anomaly detection related events for KPIs
[get_anomaly_detection_index]
args =
definition = index="anomaly_detection"
######################################################
# Entity Overview Related Macros
#####################################################
[itsi_im_metrics_indexes]
description = A macro command to specify the index(es) to use for metrics searches in ITSI IM.
definition = index="em_metrics" OR index="itsi_im_metrics" OR index="vmware-perf-metrics"
[itsi_im_events_indexes]
description = Selects only non-internal indexes.
definition = index!="_*" AND index!="infra_alerts"
[itsi_im_metadata_indexes]
description = A macro command to specify the index(es) to use for Kubernetes metadata searches in ITSI IM.
definition = index="itsi_im_meta"
[itsi_entity_type_nix_metrics_indexes]
description = A macro command to specify the indexes to use for *nix metrics searches.
definition = index="em_metrics" OR index="itsi_im_metrics"
[itsi_entity_type_ta_nix_metrics_indexes]
description = A macro command to specify the indexes to use for ta nix metrics searches.
definition = index="em_metrics" OR index="itsi_im_metrics"
[itsi_entity_type_windows_metrics_indexes]
description = A macro command to specify the indexes to use for Windows metrics searches.
definition = index="em_metrics" OR index="itsi_im_metrics"
[itsi_entity_type_k8s_node_metrics_indexes]
description = A macro command to specify the indexes to use for Kubernetes node metrics searches.
definition = index="em_metrics" OR index="itsi_im_metrics"
[itsi_entity_type_k8s_pod_metrics_indexes]
description = A macro command to specify the indexes to use for Kubernetes pod metrics searches.
definition = index="em_metrics" OR index="itsi_im_metrics"
[itsi_entity_type_vmware_vm_metrics_indexes]
description = A macro command to specify the indexes to use for VMware virtual machine metrics searches.
definition = index="vmware-perf-metrics"
[itsi_entity_type_vmware_esxihost_metrics_indexes]
description = A macro command to specify the indexes to use for VMware ESXi host metrics searches.
definition = index="vmware-perf-metrics"
[itsi_entity_type_vmware_cluster_metrics_indexes]
description = A macro command to specify the indexes to use for VMware cluster metrics searches.
definition = index="vmware-perf-metrics"
[itsi_entity_type_vmware_vcenter_metrics_indexes]
description = A macro command to specify the indexes to use for VMware vCenter metrics searches.
definition = index="vmware-perf-metrics"
[itsi_entity_type_vmware_datastore_metrics_indexes]
description = A macro command to specify the indexes to use for VMware datastore metrics searches.
definition = index="vmware-perf-metrics"
[itsi_entity_type_vmware_inv_hostsystem_data]
description = Specifies indexes and sourcetypes to populate vmware-cluster dashboard.
definition = index="vmware-inv" sourcetype="vmware_inframon:inv:hostsystem"
[itsi_entity_type_vmware_inv_vm_data]
description = Specifies indexes and sourcetypes to populate vmware-datastore dashboard.
definition = index="vmware-inv" sourcetype="vmware_inframon:inv:vm"
[itsi_entity_type_vmware_vclog_vpxd_data]
description = Specifies indexes and sourcetypes to populate vmware-vcenter dashboard.
definition = index="vmware-vclog" sourcetype="vmware:vclog:vpxd"
[itsi_entity_type_vmware_taskevent_events_data]
description = Specifies indexes and sourcetypes to populate vmware-taskevent dashboard.
definition = index="vmware-taskevent" sourcetype="vmware_inframon:events"
######################################################
# Entity Overview Related Macros
#####################################################
[itsi_infra_vital_metrics(5)]
description = use to display entity overview vital metrics trendline chart given the params. \
Depending on input trendline_type it can be used to display both aggregated stats among entities OR stats by each entity.
args = search, entity_type, split_by_fields, matching_entity_fields, trendline_type
definition= $search$ \
| rename [| `gen_rename_fields("$split_by_fields$","$matching_entity_fields$")`]\
| eval [ | `gen_eval_fields("$matching_entity_fields$")`], id="$entity_type$"\
| lookup itsi_entities entity_type_ids as id [| `gen_as_fields("$matching_entity_fields$", "_itsi_identifier_lookups as ")`]\
OUTPUT _key as entity_key, entity_type_ids\
| search entity_key != NULL\
| timechart cont=false eval(round(avg(val),2)) AS val `gen_by_fields($trendline_type$, "entity_key")` limit=0\
| filldown
[itsi_infra_vital_metrics_sparkline(5)]
description = use to display entity overview vital metrics trendline chart given the params. \
Depending on input trendline_type it can be used to display both aggregated stats among entities OR stats by each entity.
args = search, entity_type, split_by_fields, matching_entity_fields, trendline_type
definition= $search$ \
| rename [| `gen_rename_fields("$split_by_fields$","$matching_entity_fields$")`]\
| eval [ | `gen_eval_fields("$matching_entity_fields$")`], id="$entity_type$"\
| lookup itsi_entities entity_type_ids as id [| `gen_as_fields("$matching_entity_fields$", "_itsi_identifier_lookups as ")`]\
OUTPUT _key as entity_key, entity_type_ids\
| search entity_key != NULL\
| timechart cont=false eval(round(avg(val),2)) AS val `gen_by_fields($trendline_type$, "entity_key")`\
| filldown
[itsi_infra_vital_metrics_no_split_by_entity(1)]
description = use to display entity overview vital metrics chart if the vital metrics does not \
have entity matching rules configured.
args = search
definition = $search$ \
| timechart cont=false eval(round(avg(val),2)) AS val \
| filldown
[itsi_infra_vital_metrics_hist(6)]
description = Used to display the Entity Overview key metric histogram given the provided parameters.
args = search, entity_type, split_by_fields, matching_entity_fields, filter_spl, unit
definition= $search$ \
| rename [| `gen_rename_fields("$split_by_fields$","$matching_entity_fields$")`]\
| eval [ | `gen_eval_fields("$matching_entity_fields$")`], id="$entity_type$"\
| lookup itsi_entities entity_type_ids as id [| `gen_as_fields("$matching_entity_fields$", "_itsi_identifier_lookups as ")`]\
OUTPUT _key as entity_key, entity_type_ids, _itsi_informational_lookups as info, title,\
_itsi_identifier_lookups as identifier, _itsi_entity_status_lookups as status \
| eval [ | `gen_eval_fields(title)`]\
| eval [ | `gen_eval_fields(entity_key)`]\
| eval all_fields=mvappend(info, identifier, status, title, entity_key)\
| search entity_key != NULL AND ([|`gen_filter_fields("$filter_spl$")`])\
| `gen_histchart("$unit$")`
[gen_histchart(1)]
description = for histogram chart
args = unit
definition= stats avg(val) as val by entity_key\
| eval val=round(val,1)\
| bin val [|makeresults | eval search=if("$unit$"=="%", "bins=10 start=0 end=100 span=10", "bins=10") | fields - _time]\
| chart count by val\
| makecontinuous val [|makeresults | eval search=if("$unit$"=="%", "bins=10 start=0 end=100 span=10", "bins=10") | fields - _time]\
| fillnull value=0
[gen_rename_fields(2)]
description = expand the search to return as [prefix as foo...]
args = split_by_fields,matching_entity_fields
definition= makeresults | eval s1="$split_by_fields$",s2="$matching_entity_fields$"\
| eval search=mvjoin(mvzip(split(s1,","), split(s2, ","), " as "), " , ")\
| nomv search
[gen_as_fields(2)]
description = expand the search to return as [prefix as foo...]
args = matching_entity_fields,prefix
definition= makeresults | head 1 | eval matching_entity_fields="$matching_entity_fields$",prefix="$prefix$"\
| eval search=split(matching_entity_fields,",") | mvexpand search | eval search=prefix+search\
| mvcombine search delim="," | nomv search
[gen_by_fields(2)]
description = if trendline is split, return by $fields$
args = trendline_type, fields
definition= if(trendline_type="split", "by $fields$","")
iseval=true
[gen_eval_fields(1)]
description = expand the search to return as [foo="foo=".$foo$....]
args = matching_entity_fields
definition = makeresults | head 1 | eval fields="$matching_entity_fields$"\
| eval search=mvzip(split(fields,","), mvzip(split(fields,","), split(fields, ","), "=\".$"), "=\"")\
| mvexpand search | eval search =search."$" | mvcombine delim=", " search | nomv search
[gen_filter_fields(1)]
description = expand the search to return as [all_fields=filter1 OR all_fields=filter2]
args = filter_spl
definition = makeresults | eval fields="$filter_spl$"\
| eval search=replace(fields, " \(", " (all_fields=\"")\
| eval search=replace(search, "\) ", "\") ")\
| eval search=replace(search, " OR ", "\" OR all_fields=\"")
[itsi_infra_vital_metrics_entities_match_count(3)]
description = use to preview entities matching for vital metric
args = search, split_by_fields, matching_entity_fields
definition= $search$ \
| rename [| `gen_rename_fields("$split_by_fields$","$matching_entity_fields$")`]\
| eval [ | `gen_eval_fields("$matching_entity_fields$")`]\
| lookup itsi_entities [| `gen_as_fields("$matching_entity_fields$", "_itsi_identifier_lookups as ")`]\
OUTPUT _key as entity_key | stats dc(entity_key)
[itsi_entity_type_alert(9)]
args = search, entity_type_title, entity_type_id, split_by_fields, matching_entity_fields, metric_name, info_threshold, warning_threshold, critical_threshold
definition= $search$ \
| rename [| `gen_rename_fields("$split_by_fields$","$matching_entity_fields$")`]\
| eval [ | `gen_eval_fields("$matching_entity_fields$")`], id="$entity_type_id$"\
| lookup itsi_entities entity_type_ids as id [| `gen_as_fields("$matching_entity_fields$", "_itsi_identifier_lookups as ")`]\
OUTPUT _key as entity_key, title\
| search entity_key != NULL\
| eval entity_type="$entity_type_title$"\
| eval metric_name="$metric_name$"\
| eval itsiSeverity=case($info_threshold$, 2, $warning_threshold$, 4, $critical_threshold$, 6)\
| eval itsiAlert=metric_name." alert for ".entity_type." entity type"\
| eval itsiDrilldownURI="/app/itsi/entity_detail?entity_key=".entity_key\
| eval itsiInstance=title \
| eval entity_title=title \
| eval itsiNotableTitle=title\
| eval itsiDetails = metric_name + " current value is " + val\
| eval sec_grp=default_itsi_security_group\
| eval alert_source="entity_type" \
| `filter_maintenance_entities` | fields - host
[itsi_entity_type_alert_entity_filter(10)]
args = search, entity_type_title, entity_type_id, split_by_fields, matching_entity_fields, metric_name, info_threshold, warning_threshold, critical_threshold, entity_filter
definition= $search$ \
| rename [| `gen_rename_fields("$split_by_fields$","$matching_entity_fields$")`]\
| eval [ | `gen_eval_fields("$matching_entity_fields$")`], id="$entity_type_id$"\
| lookup itsi_entities entity_type_ids as id [| `gen_as_fields("$matching_entity_fields$", "_itsi_identifier_lookups as ")`]\
OUTPUT _key as entity_key, title, _itsi_informational_lookups as info_lookup, _itsi_identifier_lookups as alias_lookup\
| search entity_key != NULL\
$entity_filter$\
| eval entity_type="$entity_type_title$"\
| eval metric_name="$metric_name$"\
| eval itsiSeverity=case($info_threshold$, 2, $warning_threshold$, 4, $critical_threshold$, 6)\
| eval itsiAlert=metric_name." alert for ".entity_type." entity type"\
| eval itsiDrilldownURI="/app/itsi/entity_detail?entity_key=".entity_key\
| eval itsiInstance=title \
| eval entity_title=title \
| eval itsiNotableTitle=title\
| eval val = round(val, 2) \
| eval itsiDetails = metric_name + " current value is " + val\
| eval sec_grp=default_itsi_security_group\
| eval alert_source="entity_type" \
| `filter_maintenance_entities` | fields - host
######################################################
# SVC Related Macros
#####################################################
[is_itsi_app]
description = checks if app field matches one of ITSI apps
definition = app IN ("itsi"\
,"DA-ITSI-APPSERVER"\
,"DA-ITSI-DATABASE"\
,"DA-ITSI-EUEM"\
,"DA-ITSI-LB"\
,"DA-ITSI-OS"\
,"DA-ITSI-STORAGE"\
,"DA-ITSI-VIRTUALIZATION"\
,"DA-ITSI-WEBSERVER"\
,"SA-IndexCreation"\
,"SA-ITOA"\
,"SA-ITSI-CustomModuleViz"\
,"SA-ITSI-Licensechecker"\
,"SA-ITSI-MetricAD"\
,"SA-UserAccess"\
)
########## end of macros moved from itsi #############