#!/usr/bin/env python # coding=utf-8 __author__ = "TrackMe Limited" __copyright__ = "Copyright 2022-2026, TrackMe Limited, U.K." __credits__ = "TrackMe Limited, U.K." __license__ = "TrackMe Limited, all rights reserved" __version__ = "0.1.0" __maintainer__ = "TrackMe Limited, U.K." __email__ = "support@trackme-solutions.com" __status__ = "PRODUCTION" # Standard library imports import os import sys import json import time # Logging imports import logging from logging.handlers import RotatingFileHandler # Networking imports import urllib3 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) # splunk home splunkhome = os.environ["SPLUNK_HOME"] # set logging filehandler = RotatingFileHandler( "%s/var/log/splunk/trackme_opsstatus_expand.log" % splunkhome, mode="a", maxBytes=10000000, backupCount=1, ) formatter = logging.Formatter( "%(asctime)s %(levelname)s %(filename)s %(funcName)s %(lineno)d %(message)s" ) logging.Formatter.converter = time.gmtime filehandler.setFormatter(formatter) log = logging.getLogger() # root logger - Good to get it only once. for hdlr in log.handlers[:]: # remove the existing file handlers if isinstance(hdlr, logging.FileHandler): log.removeHandler(hdlr) log.addHandler(filehandler) # set the new handler # set the log level to INFO, DEBUG as the default is ERROR log.setLevel(logging.INFO) # append current directory sys.path.append(os.path.dirname(os.path.abspath(__file__))) # import libs import import_declare_test # import Splunk libs from splunklib.searchcommands import ( dispatch, StreamingCommand, Configuration, Option, validators, ) # Import trackme libs from trackme_libs import trackme_reqinfo @Configuration(distributed=False) class TrackMeOpsStatusExpand(StreamingCommand): # status will be statically defined as imported def stream(self, records): # Get request info and set logging level reqinfo = trackme_reqinfo( self._metadata.searchinfo.session_key, self._metadata.searchinfo.splunkd_uri ) log.setLevel(reqinfo["logging_level"]) # Loop, expand and yield count = 0 for record in records: logging.debug(f'record="{record}"') if not isinstance(record, dict): record = json.loads(record) raw_record = record.get("_raw") if not isinstance(raw_record, dict): raw_record = json.loads(raw_record) logging.debug(f'raw_record="{raw_record}"') try: # if raw_record is not a list, convert it into a list if not isinstance(raw_record, list): raw_record = [raw_record] for subrecord in raw_record: logging.debug(f'subrecord="{subrecord}"') job_component_register_records = json.loads( subrecord.get("job_component_register") ) for job_component_register_record in job_component_register_records: logging.debug( f'job_component_register_record="{job_component_register_record}"' ) count = +1 # yield yield { "_time": time.time(), "_raw": job_component_register_record, "component": job_component_register_record.get("component"), "earliest": job_component_register_record.get("earliest"), "last_duration": job_component_register_record.get( "last_duration" ), "last_exec": job_component_register_record.get("last_exec"), "last_result": job_component_register_record.get( "last_result" ), "last_status": job_component_register_record.get( "last_status" ), "latest": job_component_register_record.get("latest"), "report": job_component_register_record.get("report"), "tenant_id": job_component_register_record.get("tenant_id"), } logging.info( f'trackmeopsstatusexpand terminated successfully, results_count="{count}"' ) except Exception as e: logging.error( f'trackmeopsstatusexpand command failed with exception="{str(e)}"' ) raise Exception( f'trackmeopsstatusexpand command failed with exception="{str(e)}"' ) dispatch(TrackMeOpsStatusExpand, sys.argv, sys.stdin, sys.stdout, __name__)