You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
Splunk_Deploiement/apps/trackme/bin/trackmeexpandsplkmhm.py

180 lines
5.6 KiB

#!/usr/bin/env python
# coding=utf-8
__author__ = "TrackMe Limited"
__copyright__ = "Copyright 2022-2026, TrackMe Limited, U.K."
__credits__ = "TrackMe Limited, U.K."
__license__ = "TrackMe Limited, all rights reserved"
__version__ = "0.1.0"
__maintainer__ = "TrackMe Limited, U.K."
__email__ = "support@trackme-solutions.com"
__status__ = "PRODUCTION"
# Standard library imports
import os
import sys
import time
import json
import ast
# Logging imports
import logging
from logging.handlers import RotatingFileHandler
# Networking imports
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# splunk home
splunkhome = os.environ["SPLUNK_HOME"]
# set logging
filehandler = RotatingFileHandler(
"%s/var/log/splunk/trackme_expand_splk_mhm.log" % splunkhome,
mode="a",
maxBytes=10000000,
backupCount=1,
)
formatter = logging.Formatter(
"%(asctime)s %(levelname)s %(filename)s %(funcName)s %(lineno)d %(message)s"
)
logging.Formatter.converter = time.gmtime
filehandler.setFormatter(formatter)
log = logging.getLogger() # root logger - Good to get it only once.
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr, logging.FileHandler):
log.removeHandler(hdlr)
log.addHandler(filehandler) # set the new handler
# set the log level to INFO, DEBUG as the default is ERROR
log.setLevel(logging.INFO)
# append current directory
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# import libs
import import_declare_test
# import Splunk libs
from splunklib.searchcommands import (
dispatch,
StreamingCommand,
Configuration,
Option,
validators,
)
# Import trackme libs
from trackme_libs import trackme_reqinfo
@Configuration(distributed=False)
class TrackMeMergeSplkDhm(StreamingCommand):
chunk_size = Option(
doc="""
**Syntax:** **chunk_size=****
**Description:** Specify the number of records to be processed in a single chunk, defaults to 10K records.""",
require=False,
default=10000,
validate=validators.Match("chunk_size", r"^\d*$"),
)
field_current = Option(
doc="""
**Syntax:** **field_current=****
**Description:** field name containing the current object dictionnary.""",
require=True,
)
# status will be statically defined as imported
def stream(self, records):
# Get request info and set logging level
reqinfo = trackme_reqinfo(
self._metadata.searchinfo.session_key, self._metadata.searchinfo.splunkd_uri
)
log.setLevel(reqinfo["logging_level"])
# empty array to store our processed records
records_list = []
# Loop in the results
records_count = 0
for splrecord in records:
# increment
records_count += 1
# get fields and values
current_dict = str(splrecord[self.field_current])
# handle the raw records
rawdict = splrecord
# remove fields not needed any longer
del rawdict[self.field_current]
# Create the record to be added to our array of processed events
record = {
"object": str(splrecord["object"]),
"current_dict": str(current_dict),
"rawdict": str(rawdict),
}
# logging debug
logging.info(f'host="{splrecord["object"]}"')
logging.debug(f'downstream record="{json.dumps(record, indent=1)}"')
# Append to the records array
records_list.append(record)
################################
# Export to the table by chunk #
################################
# total number of messages to be processed
results_count = len(records_list)
logging.info(
f'number of records to be processed, record_count="{results_count}"'
)
# process by chunk
chunks = [
records_list[i : i + int(self.chunk_size)]
for i in range(0, len(records_list), int(self.chunk_size))
]
for chunk in chunks:
for subrecord in chunk:
# attempt to get the current_dict
try:
current_dict = ast.literal_eval(subrecord.get("current_dict"))
except Exception as e:
current_dict = None
rawdict = subrecord.get("rawdict")
# if we have both
if current_dict:
# loop through the records
for p_id, p_info in current_dict.items():
yield {
"_time": time.time(),
"_raw": json.dumps(subrecord, indent=1),
"object": subrecord["object"],
"metric_index": p_info["idx"],
"metric_category": p_info["metric_category"],
"metric_last_time_seen": time.strftime(
"%d %b %Y %H:%M:%S",
time.localtime(int(p_info["last_time"])),
),
"metric_last_lag_seen": p_info["last_metric_lag"],
"time_measure": time.strftime(
"%d %b %Y %H:%M:%S",
time.localtime(int(p_info["time_measure"])),
),
"metric_max_lag_allowed": p_info["lag_allowed"],
"state": p_info["state"],
}
dispatch(TrackMeMergeSplkDhm, sys.argv, sys.stdin, sys.stdout, __name__)