You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1132 lines
50 KiB

# Copyright (C) 2005-2025 Splunk Inc. All Rights Reserved.
import inspect
import json
import sys
import os
from collections import defaultdict
from splunk.clilib.bundle_paths import make_splunkhome_path
from splunk import rest, RESTException
from integrations.commons.splunk.server import Splunk
from time import sleep
import SA_ITOA_app_common.splunklib.results as results
sys.path.append(make_splunkhome_path(["etc", "apps", "SA-ITOA", "lib"]))
from migration_utility.migration_utility_manifest import (
precheck_failure_severity,
precheck_documentation_details,
dict_for_mapping_precheck_ids,
dict_for_mapping_category,
dict_for_mapping_precheck_class,
dict_for_mapping_precheck_class_and_id,
)
from itsi.objects.itsi_kpi_base_search import ItsiKPIBaseSearch
from ITOA.itoa_common import (
get_conf_stanza_single_entry,
get_object_batch_size,
SplunkUser,
)
from ITOA.setup_logging import InstrumentCall, logger
from migration.migration import MigrationBaseMethod
from migration_utility.constants import (
KPI_BASE_SEARCH_THRESHOLD,
MODES,
MODES_STR_MAP,
UPGRADE_READINESS_JOB_TIMEOUT_LIMIT,
)
from itsi.objects.model.itsi_model_validator import ItsiModelValidator
from itsi.objects.itsi_service import ItsiService
from itsi.objects.itsi_entity import ItsiEntity
from itsi.objects.itsi_service_template import ItsiBaseServiceTemplate
from itsi.objects.itsi_upgrade_readiness_prechecks import ItsiUpgradeReadinessPrechecks
from itsi.objects.itsi_refresh_queue_job import ItsiRefreshQueueJob
from itsi.service_template.service_template_utils import ServiceTemplateUtils
from itsi.objects.itsi_backup_restore import ItsiBackupRestore
from ITOA.itoa_common import get_current_utc_epoch
class ItsiMigrationUtilityHandler:
def __init__(self, sessionkey, transaction_id) -> None:
self.sessionkey = sessionkey
self.transaction_id = transaction_id
self.dict_for_mapping_precheck_ids = dict_for_mapping_precheck_ids
self.dict_for_mapping_precheck_class = dict_for_mapping_precheck_class
self.dict_for_mapping_precheck_class_and_id = (
dict_for_mapping_precheck_class_and_id
)
self.precheck_failure_severity = precheck_failure_severity
self.dict_for_mapping_category = dict_for_mapping_category
self.precheck_documentation_details = precheck_documentation_details
self.upgrade_readiness_precheck_obj = ItsiUpgradeReadinessPrechecks(
self.sessionkey, "nobody"
)
self.entity_batch_size = get_object_batch_size(
self.sessionkey, "entity"
)
self.failed_precheck_count = 0
self.kpi_base_search_threshold = KPI_BASE_SEARCH_THRESHOLD
self.dangling_service_reference_in_entities_disabled = int(
get_conf_stanza_single_entry(
self.sessionkey,
"itsi_settings",
"upgrade_readiness",
"dangling_service_reference_in_entities_disabled",
).get("content", 0)
)
self.itsi_default_authorize_conf_file = make_splunkhome_path(
["etc", "apps", "itsi", "default", "authorize.conf"]
)
self.system_default_authorize_conf_file = make_splunkhome_path(
["etc", "system", "default", "authorize.conf"]
)
self.system_local_authorize_conf_file = make_splunkhome_path(
["etc", "system", "local", "authorize.conf"]
)
self.metric_ad_authorize_conf_file = make_splunkhome_path(
["etc", "apps", "SA-ITSI-MetricAD", "default", "authorize.conf"]
)
self.parent_role_to_get_user_capabilites = "itoa_admin"
self.role_to_get_user_capabilites = "itoa_team_admin"
self.native_user_capabilities = self.get_native_capabilities(self.itsi_default_authorize_conf_file, self.system_default_authorize_conf_file, self.system_local_authorize_conf_file, self.metric_ad_authorize_conf_file, self.role_to_get_user_capabilites)
self.modified_native_capabilities = self.get_modified_capabilities(self.parent_role_to_get_user_capabilites)
self.modified_capability_for_current_user = self.get_modified_capability_for_current_user(self.modified_native_capabilities, self.native_user_capabilities)
self.failed_precheck = {}
self.splunk_server = Splunk(self.sessionkey)
@InstrumentCall(logger)
def failed_precheck_map(self, precheck_id=None, affect_object_count=0):
"""This method will create map of failed precheck ID and number affected object associated with it.
Args:
precheck_id (_type_, optional): Take the failed precheck ID. Defaults to None.
affect_object_count (int, optional): Takes the count of the affected objects of failed precheck. Defaults to 0.
"""
logger.info("Entering failed_precheck_map with precheck_id=%s and"
" affect_object_count=%d.", precheck_id,
affect_object_count)
if precheck_id is not None:
self.failed_precheck[precheck_id] = affect_object_count
logger.info("Exiting failed_precheck_map.")
@InstrumentCall(logger)
def get_precheck_details(self, itsi_muh, precheck_id):
logger.info("Retrieving precheck details for"
" precheck_id=%s with transaction_id=%s",
precheck_id, self.transaction_id)
precheck = [
name
for name, pre_id in self.dict_for_mapping_precheck_ids.items()
if pre_id == precheck_id
]
precheck_class = self.dict_for_mapping_precheck_class[precheck[0]]
logger.info("Found precheck class %s for"
" precheck_id=%s with transaction_id=%s",
precheck_class.__name__, precheck_id, self.transaction_id)
description_details, resolution_details = precheck_class(
itsi_muh
).get_precheck_details()
documentation_details = self.precheck_documentation_details[precheck_id]
logger.info("Returning precheck details for precheck_id=%s"
"with transaction_id=%s",
precheck_id, self.transaction_id)
return description_details, resolution_details, documentation_details
@InstrumentCall(logger)
def get_auto_remediation_details(self, itsi_muh, precheck_id):
logger.info("[transaction_id=%s] Retrieving auto-remediation"
" details for precheck_id=%s.",
self.transaction_id, precheck_id)
precheck = [
name
for name, pre_id in self.dict_for_mapping_precheck_ids.items()
if pre_id == precheck_id
]
precheck_class = self.dict_for_mapping_precheck_class[precheck[0]]
logger.info("[transaction_id=%s] Found precheck class %s"
" for precheck_id=%s.",
self.transaction_id, precheck_class.__name__, precheck_id)
description_details, category_fixed = precheck_class(
itsi_muh
).get_auto_remediation_details()
logger.info("[transaction_id=%s] Returning auto-remediation"
" details for precheck_id=%s.",
self.transaction_id, precheck_id)
return description_details, category_fixed
@InstrumentCall(logger)
def set_prechecks(self, precheck_ids):
# list of prechecks on which operations to be performed
logger.info("[transaction_id=%s] Setting prechecks with"
" provided precheck_ids: %s.",
self.transaction_id, precheck_ids)
self.prechecks = []
if precheck_ids is None:
self.prechecks = list(
self.dict_for_mapping_precheck_class.values())
logger.info("[transaction_id=%s] No precheck_ids provided."
" Set to all available prechecks.",
self.transaction_id)
else:
for key, value in self.dict_for_mapping_precheck_ids.items():
if value in precheck_ids:
self.prechecks.append(
self.dict_for_mapping_precheck_class[key])
logger.info("[transaction_id=%s] Precheck"
" added for ID %s: %s.",
self.transaction_id, value,
self.dict_for_mapping_precheck_class[key].__name__)
@InstrumentCall(logger)
def execute(self, itsi_muh, operation_mode, precheck_ids=None):
"""
Takes prechecks classes' list and operation mode to be executed and
after the execution proper output in console and itsi_migration_utility.log will be logged
: param itsi_muh: object of ItsiMigrationUtilityHandler() class
: param operation_mode: mode of operation -- MODE 1: Precheck or MODE 2: Auto-remediation
: precheck_ids: list of ids of prechecks to be executed.
If None, all prechecks will be executed
"""
result = True
message = ""
logger.info(
'[transaction_id=%s] Starting execution '
'with operation_mode=%s and precheck_ids=%s.',
self.transaction_id, MODES_STR_MAP[operation_mode], precheck_ids
)
self.set_prechecks(precheck_ids)
logger.info('[transaction_id=%s] Checking for in-progress jobs.',
self.transaction_id)
# Check for in progress jobs before starting job.
other_in_progress_jobs = self.upgrade_readiness_precheck_obj.get_in_progress_upgrade_readiness_prechecks(
lookback_time=UPGRADE_READINESS_JOB_TIMEOUT_LIMIT,
exclude_transaction_id=self.transaction_id,
)
other_in_progress_job_ids = [
other_in_progress_job["transaction_id"]
for other_in_progress_job in other_in_progress_jobs
]
other_in_progress_job_start_times = [
other_in_progress_job["start_time"]
for other_in_progress_job in other_in_progress_jobs
]
if len(other_in_progress_jobs) > 0:
logger.error(
"[transaction_id=%s] [operation_mode=%s] Execution failed"
" because other in progress job(s) were found,"
" transaction_id(s): %s.",
self.transaction_id, MODES_STR_MAP[operation_mode],
str(other_in_progress_job_ids)
)
return False, f"OTHER_JOB_{other_in_progress_job_start_times}", self.failed_precheck_count
if operation_mode == MODES["PRECHECK"]:
message = "PRECHECK"
logger.info('[transaction_id=%s] Performing prechecks.',
self.transaction_id)
self.perform_precheck(itsi_muh)
elif operation_mode == MODES["AUTO_REMEDIATION"]:
logger.info('[transaction_id=%s] Performing auto-remediation.',
self.transaction_id)
result, message = self.perform_remediation(itsi_muh)
logger.info('[transaction_id=%s] Execution finished with'
' result=%s, message=%s, failed_precheck_count=%d.',
self.transaction_id, result, message,
self.failed_precheck_count)
return result, message, self.failed_precheck_count
@InstrumentCall(logger)
def perform_precheck(self, itsi_muh):
# list to store the detailed output for logging in
# itsi_migration_utility.log file
logger.info("[transaction_id=%s] Starting precheck execution.",
self.transaction_id)
list_to_store_detailed_output = []
self.setup_data()
time_now = get_current_utc_epoch()
self.failed_precheck = {}
self.upgrade_readiness_precheck_obj.update_upgrade_readiness_precheck_job(
self.transaction_id, precheck_started=True, precheck_start_time=time_now
)
logger.info("[transaction_id=%s] Precheck start time: %d.",
self.transaction_id, time_now)
for precheck in self.prechecks:
precheck_obj = precheck(itsi_muh)
detailed_output = precheck_obj.precheck_result()
if detailed_output:
# Logging the precheck failure details
list_to_store_detailed_output.append(detailed_output)
# to print in log file
for detailed_output in list_to_store_detailed_output:
if "[precheck_passed" in detailed_output:
logger.info(detailed_output)
else:
self.failed_precheck_count += 1
logger.error(detailed_output)
if self.failed_precheck:
self.upgrade_readiness_precheck_obj.update_upgrade_readiness_precheck_job(
self.transaction_id,
failed_precheck=self.failed_precheck
)
logger.info("[transaction_id=%s] Precheck Execution Completed.",
self.transaction_id)
@InstrumentCall(logger)
def perform_remediation(self, itsi_muh):
# list to store the detailed output for logging in
# itsi_migration_utility.log file
logger.info("[transaction_id=%s] Starting auto-remediation process.",
self.transaction_id)
list_to_store_detailed_output = []
operation_mode = MODES["AUTO_REMEDIATION"]
self.setup_data()
time_now = get_current_utc_epoch()
self.upgrade_readiness_precheck_obj.update_upgrade_readiness_precheck_job(
self.transaction_id,
remediation_started=True,
remediation_start_time=time_now,
)
logger.info("[transaction_id=%s] Remediation start time: %s.",
self.transaction_id, time_now)
# Fail job and return if refresh queue is not empty after a period of time
refresh_queue_job_interface = ItsiRefreshQueueJob(self.sessionkey, "nobody")
if not refresh_queue_job_interface.wait_for_unblocked_queue():
logger.error("[transaction_id=%s] [operation_mode=%s] "
"Execution timed out waiting for refresh queue to "
"empty. Not proceeding with remediation.",
self.transaction_id, MODES_STR_MAP[operation_mode])
return False, "SI_UR_1004"
# Fail job and return if backup/restore jobs are in progress
backup_restore_interface = ItsiBackupRestore(self.sessionkey, "nobody")
if backup_restore_interface.is_any_backup_restore_job_in_progress("nobody"):
logger.error(
"[transaction_id=%s] [operation_mode=%s] "
"Backup/restore jobs are in progress. Auto-remediation is"
" not allowed while backup/restore jobs are in progress.",
self.transaction_id, MODES_STR_MAP[operation_mode]
)
return False, "SI_UR_1005"
# Fail job and return if service template sync is in progress
status = ServiceTemplateUtils(
self.sessionkey, "nobody"
).service_template_sync_job_in_progress_or_sync_now()
if status.get("status", False):
logger.error(
"[transaction_id=%s] [operation_mode=%s]"
"One or more service templates are currently syncing. "
"Auto-remediation is not allowed while service template sync"
" is in progress.",
self.transaction_id, MODES_STR_MAP[operation_mode]
)
return False, "SI_UR_1006"
for precheck in self.prechecks:
precheck_obj = precheck(itsi_muh)
precheck_id = self.dict_for_mapping_precheck_class_and_id[precheck]
detailed_output_of_precheck = precheck_obj.precheck_result()
if "[precheck_passed" not in detailed_output_of_precheck:
detailed_output_of_remediation = precheck_obj.auto_remediation()
if detailed_output_of_remediation:
# Logging the details of updated objects
list_to_store_detailed_output.append(
detailed_output_of_remediation)
list_to_store_detailed_output.append(
f"[transaction_id={self.transaction_id}] "
f"[operation_mode={MODES_STR_MAP[operation_mode]}] "
"The errors identified by the upgrade readiness check were fixed."
)
else:
list_to_store_detailed_output.append(
f"[transaction_id={self.transaction_id}] "
f"[precheck_id={precheck_id}] "
"No automatic fix ran because no error exists."
)
# converting dict into list
list_of_updated_objs = list(itsi_muh.dict_of_original_objs.values())
# update into kvstore
self.update_kvstore_objects(list_of_updated_objs)
# to print in log file
for detailed_output in list_to_store_detailed_output:
logger.info(detailed_output)
logger.info(
"[transaction_id=%s] [operation_mode=%s] The errors identified "
"by the upgrade readiness check were fixed.",
self.transaction_id, MODES_STR_MAP[operation_mode]
)
# perform another precheck after remediation is done
logger.info(
"[transaction_id=%s] [operation_mode=%s] Running another "
"precheck after completing remediation.",
self.transaction_id, MODES_STR_MAP[operation_mode]
)
self.set_prechecks(None)
self.perform_precheck(itsi_muh)
logger.info("[transaction_id=%s] Auto-remediation process completed"
" successfully.", self.transaction_id)
return True, ""
@InstrumentCall(logger)
def setup_data(self):
logger.info("Starting setup_data method")
self.imv = ItsiModelValidator(self.sessionkey, logger)
logger.info("ItsiModelValidator initialized")
self.imv.check_service_templates()
self.imv.check_services()
logger.info("Checked service templates and services")
self.imv.get_services_and_service_templates()
self.imv.check_kpis_in_collections()
logger.info("Fetched services, service templates, and KPIs")
self.imv.get_services_having_no_entity_filter_rule()
self.imv.get_kpi_base_search_having_service_entity_filter_rule()
logger.info("Checked services having no entity filter rule and KPIs "
"with service entity filter rule")
self.imv.get_base_search_id_having_service_entity_filter_rule()
self.imv.get_kpi_base_searches()
self.imv.get_service_templates_with_correct_services()
logger.info("Fetched base search IDs, KPI base searches, "
"and correct service templates")
self.imv.get_entities_services_map() if not self.dangling_service_reference_in_entities_disabled else None
self.mi_method = MigrationBaseMethod(self.sessionkey, logger=logger)
logger.info("MigrationBaseMethod initialized")
self.owner = "nobody"
logger.info("[transaction_id=%s] Owner set to nobody",
self.transaction_id)
# Set the threshold from the limits.conf file
self.kpi_base_search_threshold = int(get_conf_stanza_single_entry(
self.sessionkey, 'itsi_settings', 'upgrade_readiness', 'kpi_base_search_threshold').get('content', KPI_BASE_SEARCH_THRESHOLD))
logger.info("[transaction_id=%s] Large number of KPI base search threshold"
" set to %s",
self.transaction_id,
self.kpi_base_search_threshold)
# List of KPIs having empty threshold field
self.kpi_with_empty_threshold_field = []
# List of KPI Base Searches having no metrics configured
self.kpi_base_search_empty_metrics = []
logger.info("[transaction_id=%s] Initialized empty KPI threshold "
"and KPI base searches with no metrics",
self.transaction_id)
self.get_kpi_with_empty_threshold_field()
# List of entities link to each services
self.list_of_entities_link_to_service = dict(
self.imv.list_of_entities_link_to_service
)
logger.info("Fetched list of entities linked to services")
logger.info("Starting setup of dangling service references and"
" KPI entity filter rules")
self.service_objects_with_dangling_service_refs = (
self.imv.service_objects_with_dangling_service_refs
)
self.kpi_base_search_having_service_entity_filter_rule = list(
self.imv.kpi_base_search_id_having_entity_filter_rule_enabled
)
logger.info("Separating dangling service references"
" into two categories")
# seperating the service_objects_with_dangling_service_refs into 2 different
# objs with dangling services depending on me references and
# objs with dangling services depends on references
self.service_objects_with_dangling_services_depending_on_me_refs = defaultdict(
list
)
self.service_objects_with_dangling_services_depends_on_refs = defaultdict(
list)
for service_obj in self.service_objects_with_dangling_service_refs:
service_id = service_obj["_key"]
for services_depends_on_obj in service_obj["services_depends_on"]:
if "serviceid" in services_depends_on_obj:
self.service_objects_with_dangling_services_depends_on_refs[
service_id
].append(services_depends_on_obj["serviceid"])
for services_depending_on_me_obj in service_obj["services_depending_on_me"]:
if "serviceid" in services_depending_on_me_obj:
self.service_objects_with_dangling_services_depending_on_me_refs[
service_id
].append(services_depending_on_me_obj["serviceid"])
logger.info("Completed processing dangling service references")
self.service_template_id_to_missing_linked_services_map = (
self.imv.service_template_id_to_missing_linked_services_map
)
self.enabled_services_with_no_entity_filter_rule = (
self.imv.enabled_services_with_no_entity_filter_rule
)
self.service_template_objects_missing_expected_linked_services_map = (
self.imv.service_template_objects_missing_expected_linked_services_map
)
self.service_templates_with_linked_services = (
self.imv.service_templates_with_linked_services
)
self.services_without_base_service_template_id = (
self.imv.services_without_base_service_template_id
)
self.service_objects_with_missing_depends_service_refs_map = (
self.imv.service_objects_with_missing_depends_service_refs_map
)
self.objs_with_corrupt_kpis = self.imv.objs_with_corrupt_kpis
self.objs_with_dangling_shared_base_search_kpis = (
self.imv.objs_with_dangling_shared_base_search_kpis
)
self.objs_with_dangling_kpi_threshold_templates = (
self.imv.objs_with_dangling_kpi_threshold_templates
)
self.service_template_bad_sync_status = (
self.imv.service_template_bad_sync_status
)
logger.info("Fetched service templates, entity filter rules, "
"and related data")
self.entity_interface = ItsiEntity(self.sessionkey, "nobody")
self.service_interface = ItsiService(self.sessionkey, "nobody")
self.service_template_interface = ItsiBaseServiceTemplate(
self.sessionkey, "nobody"
)
self.kpi_base_search_interface = ItsiKPIBaseSearch(
self.sessionkey, "nobody"
)
logger.info("Initialized interfaces for entities, services, "
"service templates, and KPIs")
self.kpi_base_search_with_count_of_kpis = self.imv.kpi_base_search_with_count_of_kpis
self.service_template_with_correct_linked_services = self.imv.service_template_with_correct_linked_services
# fetch all the objects from kvstore
self.original_entities = list(
self.mi_method.migration_get(
"entity",
limit=self.entity_batch_size,
get_raw=True,
source_kvstore=True,
get_raw_kwargs={"collection": "itsi_services"},
)
)
self.original_services = list(
self.mi_method.migration_get(
"service",
limit=get_object_batch_size(self.sessionkey),
get_raw=True,
source_kvstore=True,
get_raw_kwargs={"collection": "itsi_services"},
)
)
self.original_base_service_templates = self.service_template_interface.get_bulk(
"nobody", transaction_id=None
)
self.original_kpi_templates = self.service_interface.get_bulk(
"nobody", transaction_id=None
)
self.original_kpi_base_search = self.kpi_base_search_interface.get_bulk(
"nobody", transaction_id=None
)
logger.info("Fetched original entities, services, service templates,"
" KPI templates, and KPI base searches from kvstore")
self.dict_of_original_entities = {
entity["_key"]: entity for entity in self.original_entities
}
self.dict_of_original_services = {
service["_key"]: service for service in self.original_services
}
self.dict_of_original_base_service_templates = {
base_service_template["_key"]: base_service_template
for base_service_template in self.original_base_service_templates
}
self.dict_of_original_kpi_templates = {
kpi_template["_key"]: kpi_template
for kpi_template in self.original_kpi_templates
}
self.dict_of_original_kpi_base_search = {
kpi_base_search["_key"]: kpi_base_search
for kpi_base_search in self.original_kpi_base_search
}
logger.info("Stored original objects in dictionaries")
self.list_of_dicts = [
self.dict_of_original_base_service_templates,
self.dict_of_original_kpi_templates,
self.dict_of_original_services,
self.dict_of_original_entities,
self.dict_of_original_kpi_base_search,
]
self.dict_of_original_objs = {}
for self.item in self.list_of_dicts:
self.dict_of_original_objs.update(self.item)
logger.info("[transaction_id=%s] Combined original objects into a "
"single dictionary with %s items",
self.transaction_id,
len(self.dict_of_original_objs))
self.all_updated_objs = []
logger.info("setup_data method completed successfully")
@InstrumentCall(logger)
def update_kvstore_objects(self, updated_objects):
"""
Takes list of updated objects after performing auto-remediation
and then stores them in kvstore using rest call
: param updated_objects: list of updated objects
"""
logger.info("Starting update of kvstore objects")
updated_base_service_templates = []
updated_services = []
updated_kpi_templates = []
updated_kpi_base_search = []
for updated_obj in updated_objects:
if updated_obj["object_type"] == "base_service_template":
updated_base_service_templates.append(updated_obj)
elif updated_obj["object_type"] == "service" or updated_obj["object_type"] == "entity":
updated_services.append(updated_obj)
elif updated_obj["object_type"] == "kpi_template":
updated_kpi_templates.append(updated_obj)
elif updated_obj["object_type"] == "kpi_base_search":
updated_kpi_base_search.append(updated_obj)
else:
logger.warning("[transaction_id=%s] Unknown object type "
"'%s' encountered",
self.transaction_id,
updated_obj["object_type"])
# Log the number of items in each category
logger.info("[transaction_id=%s] Updated base service templates"
" count: %s",
self.transaction_id,
len(updated_base_service_templates))
logger.info("[transaction_id=%s] Updated services count: %s",
self.transaction_id,
len(updated_services))
logger.info("[transaction_id=%s] Updated KPI templates count: %s",
self.transaction_id,
len(updated_kpi_templates))
logger.info("[transaction_id=%s] Updated KPI base searches count: %s",
self.transaction_id,
len(updated_kpi_base_search))
# Saving the updated objects in the backend
if updated_base_service_templates:
self.service_template_interface.batch_save_backend(
"nobody", updated_base_service_templates, transaction_id=None
)
logger.info("[transaction_id=%s] Successfully saved %s"
" base service templates",
self.transaction_id,
len(updated_base_service_templates))
if updated_services:
self.service_interface.batch_save_backend(
"nobody", updated_services, transaction_id=None
)
logger.info("[transaction_id=%s] Successfully saved %s services",
self.transaction_id,
len(updated_services))
if updated_kpi_templates:
self.service_interface.batch_save_backend(
"nobody", updated_kpi_templates, transaction_id=None
)
logger.info("[transaction_id=%s] Successfully saved"
" %s KPI templates",
self.transaction_id,
len(updated_kpi_templates))
if updated_kpi_base_search:
self.kpi_base_search_interface.batch_save_backend(
"nobody", updated_kpi_base_search, transaction_id=None
)
logger.info("[transaction_id=%s] Successfully saved"
" %s KPI base searches",
self.transaction_id,
len(updated_kpi_base_search))
logger.info("Update of kvstore objects completed")
@InstrumentCall(logger)
def make_logging_details(
self, precheck_id, total_count, severity, category, block_upgrade
):
logging_details = (
"[transaction_id={}] [precheck_id={}] [total_count={}] "
'[severity={}] [category="{}"] [blocks_upgrade={}] '.format(
self.transaction_id,
precheck_id,
total_count,
severity,
category,
block_upgrade,
)
)
logger.info("[transaction_id=%s] Created logging details: %s",
self.transaction_id,
logging_details)
return logging_details
@InstrumentCall(logger)
def make_result(self, passed, precheck, message, object_type, object_id, **kwargs):
"""
Takes required parameters of the object that failed the prechecks and
then returns the dict of all these parameters so that proper formatted result
will get logged in the log file
: param passed: boolean for prechecks failed or passed
: param precheck: precheck type
: param message: required message to be added
: param object_type: type of the object (service, base service template, kpi template)
: param object_id: '_key' of the object
: return: dict of these parameters received
"""
precheck_dict = {
"passed": passed,
"pre-check": precheck,
"message": message,
"Object_type": object_type,
"object_id": object_id,
}
precheck_dict.update(kwargs)
logger.info("[transaction_id=%s] Created precheck result: %s",
self.transaction_id,
precheck_dict)
return precheck_dict
@InstrumentCall(logger)
def _check_empty_threshold_kpi(self, kpi):
"""
Check if the threshold fields in the KPIs of service is empty
1.If the KPI is shared base verify the metric field in the base search used by the KPI
2.If not then consider threshold field
:param kpi: ITSI Object for KPI
:return : None
"""
if not kpi.get("threshold_field", "") and (
kpi.get("search_type", "") == "shared_base"
and kpi.get("base_search_id", "") in self.kpi_base_search_empty_metrics
or kpi.get("search_type", "") == "adhoc"
):
self.kpi_with_empty_threshold_field.append(kpi)
logger.warning(
"[transaction_id=%s] KPI with empty threshold field found"
" and added to list: %s",
self.transaction_id,
kpi
)
else:
logger.info(
"[transaction_id=%s] KPI checked and does not meet criteria"
" for empty threshold field: %s",
self.transaction_id,
kpi
)
@InstrumentCall(logger)
def _check_threshold_in_kpi(self, service_obj):
"""
Check the threshold field in each KPI
:param service_obj: ITSI object with services and KPIs
:return : None
"""
kpi_list = service_obj.get("kpis", [])
logger.info(
"[transaction_id=%s] Checking %d KPIs in service object: %s",
self.transaction_id,
len(kpi_list),
service_obj.get('_key', 'unknown_service_id')
)
def check_kpis(kpi):
if kpi["title"] != "ServiceHealthScore":
return True
else:
logger.debug(
"[transaction_id=%s] Skipping KPI with title"
" 'ServiceHealthScore': %s",
self.transaction_id,
kpi
)
return False
filtered_kpi_list = filter(check_kpis, kpi_list)
for kpi in filtered_kpi_list:
self._check_empty_threshold_kpi(kpi)
@InstrumentCall(logger)
def _check_empty_metrics_for_kpi_base_search(self):
"""
Check for the empty metrics field in KPI Base search
:param : None
:return : None
"""
kpi_base_search_object = ItsiKPIBaseSearch(self.sessionkey, "unknown")
all_objects = kpi_base_search_object.get_bulk(owner=self.owner)
logger.info(
"[transaction_id=%s] Fetched %d KPI Base Search objects"
" for owner '%s'",
self.transaction_id,
len(all_objects),
self.owner
)
# Find all the objects that have empty metrics field
def get_kpi_base_obj_keys(kpi_base_obj):
if len(kpi_base_obj.get("metrics", [])) == 0:
logger.debug(
"[transaction_id=%s] KPI Base Search object with key "
"'%s' has empty metrics field.",
self.transaction_id,
kpi_base_obj.get("_key")
)
return True
else:
return False
get_filtered_kpi_base_search_objs = filter(
get_kpi_base_obj_keys, all_objects)
for kpi_base_obj in get_filtered_kpi_base_search_objs:
self.kpi_base_search_empty_metrics.append(kpi_base_obj.get("_key"))
logger.info(
"[transaction_id=%s] Found %d KPI Base Search objects"
" with empty metrics fields.",
self.transaction_id,
len(self.kpi_base_search_empty_metrics)
)
@InstrumentCall(logger)
def get_kpi_with_empty_threshold_field(self):
"""
Get the KPIs that have empty threshold fields for each Service
: param : None
: return : None
"""
service_iter = self.mi_method.migration_get(
"service",
limit=get_object_batch_size(self.sessionkey),
get_raw=True,
fields=["_key", "identifying_name", "object_type", "kpis"],
source_kvstore=True,
get_raw_kwargs={"collection": "itsi_services"},
filter_data={"kpis.threshold_field": ""},
)
logger.info("[transaction_id=%s] Started processing services to"
" find KPIs with empty threshold fields.",
self.transaction_id)
self._check_empty_metrics_for_kpi_base_search()
for service in service_iter:
logger.debug("[transaction_id=%s] Processing service with ID: %s",
self.transaction_id, service.get('_key'))
self._check_threshold_in_kpi(service)
@InstrumentCall(logger)
def parse_conf_file(self, file_path, stanza):
"""
Get conf stanza from file
: param file_path : file located at path
: param stanza : stanza name to get values of
: return : dict of key/values
"""
result = {}
current_stanza = None
if not os.path.isfile(file_path):
return result
logger.info("[transaction_id=%s] Parsing configuration file: %s"
" for stanza: %s",
self.transaction_id, file_path, stanza)
with open(file_path, "r") as file:
for line in file:
line = line.strip()
if line.startswith("[") and line.endswith("]"):
current_stanza = line[1:-1]
logger.debug("[transaction_id=%s] Found stanza: %s",
self.transaction_id,
current_stanza)
elif current_stanza == stanza and "=" in line:
key, value = line.split("=", 1)
result[key.strip()] = value.strip()
logger.debug("[transaction_id=%s] Parsed key-value"
" pair: %s=%s",
self.transaction_id, key.strip(),
value.strip())
logger.info("[transaction_id=%s] Completed parsing stanza: %s."
" Parsed %d key-value pairs.",
self.transaction_id, stanza, len(result))
return result
@InstrumentCall(logger)
def parse_authorize_conf(self, file_path):
"""
Parse the authorize.conf file and return a dictionary of roles and their imported roles.
: param file_path : The file path of the authorize.conf file.
: return : A dictionary containing roles and their imported roles.
"""
roles = {}
if not os.path.isfile(file_path):
logger.warning("[transaction_id=%s] authorize.conf file"
" not found: %s",
self.transaction_id, file_path)
return roles
logger.info("[transaction_id=%s] Parsing authorize.conf file: %s",
self.transaction_id, file_path)
with open(file_path, "r") as file:
lines = file.readlines()
current_role = None
for line in lines:
line = line.strip()
if line.startswith("[") and line.endswith("]"):
current_role = line[1:-1]
if current_role.startswith("role_"):
roles[current_role] = []
elif line.startswith("importRoles"):
if current_role is not None:
import_roles = line.split("=")[1].strip().split(";")
roles[current_role].extend(import_roles)
logger.info("[transaction_id=%s] Completed parsing authorize.conf"
" file. Parsed %d roles.",
self.transaction_id, len(roles))
return roles
@InstrumentCall(logger)
def get_all_import_roles(self, role, conf_files):
"""
Get all the roles imported by the given role from the specified configuration files.
: param role : The role to get imported roles for.
: param conf_files : A list of configuration files to parse.
: return : A list of roles imported by the given role.
"""
roles_data = {}
logger.info("[transaction_id=%s] Getting all imported roles"
" for role: %s", self.transaction_id, role)
for conf_file in conf_files:
parsed_role = self.parse_authorize_conf(conf_file)
for key, value in parsed_role.items():
if value:
roles_data[key] = value
def filter_roles(role_dict):
updated_dict = {}
for key, value in role_dict.items():
updated_key = key.replace("role_", "")
updated_dict[updated_key] = value
return updated_dict
filtered_roles = filter_roles(roles_data)
logger.debug("[transaction_id=%s] Filtered roles data: %s",
self.transaction_id, filtered_roles)
def get_nested_roles(role_dict, target_role):
if target_role not in role_dict:
logger.warning("[transaction_id=%s] Role %s not found in"
" roles data.", self.transaction_id,
target_role)
return []
nested_roles = []
stack = [target_role]
while stack:
current_role = stack.pop()
nested_roles.append(current_role)
if current_role in role_dict:
stack.extend(role_dict[current_role])
return nested_roles
all_roles = get_nested_roles(filtered_roles, role)
logger.info("[transaction_id=%s] Imported all roles for %s",
self.transaction_id, role)
return list(set(all_roles))
@InstrumentCall(logger)
def get_capabilities(self, file_paths, roles):
"""
Get the capabilities for the given roles from the specified configuration files.
: param file_paths : A list of file paths for the configuration files to parse.
: param roles : A list of roles to get capabilities for.
: return : A dictionary containing the capabilities for the given roles.
"""
all_capabilities = {}
logger.info("[transaction_id=%s] Starting to get capabilities for"
" roles: %s from files: %s", self.transaction_id,
roles, file_paths)
for file_path in file_paths:
for role in roles:
capabilities = self.parse_conf_file(file_path, role)
all_capabilities.update(capabilities)
logger.info("[transaction_id=%s] Returning a dictionary containing"
" the capabilities for the given roles",
self.transaction_id)
return {
key: value
for key, value in all_capabilities.items()
if value in ["enabled", "disabled"]
}
@InstrumentCall(logger)
def get_native_capabilities(
self,
itsi_app_default_file_path,
system_default_file_path,
system_local_default_path,
metric_ad_authorize_conf_file,
role,
):
"""
Get the native capabilities for the given role.
: param itsi_app_default_file_path : Default file path of authorize.conf
: param role : Role to get native capabilities
: return native_capabilities : Dict containing native capabilities of given role
"""
logger.info("[transaction_id=%s] Parsing default capabilities"
" from file: %s", self.transaction_id,
itsi_app_default_file_path)
default_capabilities = self.parse_conf_file(
itsi_app_default_file_path, "role_itoa_admin"
)
logger.info("[transaction_id=%s] Default capabilities: %s",
self.transaction_id, default_capabilities)
logger.info("[transaction_id=%s] Getting all imported roles.",
self.transaction_id)
imported_roles = self.get_all_import_roles(
role,
[
itsi_app_default_file_path,
system_default_file_path,
system_local_default_path,
metric_ad_authorize_conf_file,
],
)
all_imported_roles = ["role_" + value for value in imported_roles]
logger.info("[transaction_id=%s] Imported roles: %s",
self.transaction_id, all_imported_roles)
logger.info("Getting capabilities for all imported roles.")
inherited_capabilities = self.get_capabilities(
[
itsi_app_default_file_path,
system_default_file_path,
system_local_default_path,
metric_ad_authorize_conf_file,
],
all_imported_roles,
)
logger.info("[transaction_id=%s] Inherited capabilities: %s",
self.transaction_id, inherited_capabilities)
native_capabilities = {
key: value
for key, value in default_capabilities.items()
if key not in inherited_capabilities
}
logger.info("[transaction_id=%s] Native capabilities: %s",
self.transaction_id, native_capabilities)
return native_capabilities
@InstrumentCall(logger)
def get_modified_capabilities(self, role):
"""
Get the modified capabilities based on the native capabilities and the updated capabilities.
: param role : role for which capabilities have been modified
: return : A list of modified capabilities.
"""
logger.info("[transaction_id=%s] Start retrieving modified"
" capabilities for role: %s", self.transaction_id, role)
endpoint = f'/services/authorization/roles/{role}'
modified_native_capabilities = []
try:
response, content = rest.simpleRequest(
endpoint,
sessionKey=self.sessionkey,
getargs={'output_mode': 'json'}
)
if response.status == 200:
splunk_object_response = json.loads(content).get('entry')
modified_native_capabilities = splunk_object_response[0].get('content').get('capabilities')
else:
logger.error(f'Failed to retrieve capabilities for role {role}. HTTP Status: {response.status}')
raise RESTException(response.status, f'Failed to retrieve capabilities for role {role}. HTTP Status: {response.status}')
except Exception as e:
logger.error(f'Received unexpected result {e}')
raise RESTException(f'Received unexpected result {e}') from e
logger.info("[transaction_id=%s] Completed retrieval of modified"
" capabilities for role: %s", self.transaction_id, role)
return modified_native_capabilities
@InstrumentCall(logger)
def get_modified_capability_for_current_user(
self,
modified_native_capabilities,
native_user_capabilities,
):
"""
Determine the modified capabilities for the current user based on the native capabilities and the modified capabilities.
: param modified_native_capabilities : A list of capabilities that have been modified from their native state.
: param native_user_capabilities : A list of capabilities that are native to the user.
: return : A list of capabilities that have been modified for the current user.
"""
logger.info("Determining modified capabilities for the current user.")
removed_capability = []
filtered_native_user_capabilities = [
key
for key, value in native_user_capabilities.items()
if value.lower() in ["enabled", "disabled"]
]
for capability in filtered_native_user_capabilities:
if capability not in modified_native_capabilities:
removed_capability.append(capability)
logger.info("Removed capabilities for the current user: %s",
removed_capability)
return removed_capability
def get_kpi_event_count(self, kpibasesearch, search_alert_earliest, alert_lag):
"""
This will fetch the count of events by kpi base search id by making splunk search query
: param kpibasesearch : kpi base search id
: param search_alert_earliest : kpi base search search_alert_earliest
: param alert_lag : kpi base search alert_lag
: return : events count associated with the kpibasesearch
"""
service = self.splunk_server.get_service()
search_string = f'search `get_itsi_summary_index` kpibasesearch="{kpibasesearch}" | stats count'
kwargs = {"earliest_time": "-" + str(search_alert_earliest * 60 + alert_lag) + "s", "latest_time": "-" + str(alert_lag) + "s"}
logger.info(f'[transaction_id={self.transaction_id}] get_kpi_event_count: search_string = {str(search_string)} , params = {str(kwargs)}')
job = service.jobs.create(search_string, **kwargs)
while not job.is_done():
sleep(.2)
rr = results.ResultsReader(job.results())
events_count = 0
for result in rr:
events_count = result.get('count')
logger.info(f'[transaction_id={self.transaction_id}] get_kpi_event_count: kpibasesearch= {kpibasesearch}, events_count = {events_count}')
return int(events_count)