You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
2414 lines
81 KiB
2414 lines
81 KiB
# Copyright (C) 2005-2025 Splunk Inc. All Rights Reserved.
|
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
"""
|
|
Basic utility module for itoa. Contains miscellaneous and ubiquitous base classes
|
|
and various generic constants and utilities
|
|
|
|
ITOA-8115: remove dependencies of SA-ITOA from SA-ITSI-Licensechecker.
|
|
Manually copied to apps/SA-ITSI-Licensechecker/lib/ITOA/itoa_common.py
|
|
If you change this file, make sure to also update the copy.
|
|
|
|
Only differences in this file are the imports:
|
|
- SA_ITOA_app_common
|
|
instead of
|
|
- SA_ITSI_Licensechecker_app_common
|
|
"""
|
|
|
|
import csv
|
|
import datetime
|
|
import errno
|
|
import glob
|
|
import json
|
|
import os
|
|
import re
|
|
import shutil
|
|
import splunk
|
|
import sys
|
|
import time
|
|
import zipfile
|
|
from uuid import uuid1
|
|
import ipaddress
|
|
import http.client
|
|
|
|
import pytz
|
|
|
|
import splunk.rest as rest
|
|
from splunk.util import utc
|
|
from splunk import ResourceNotFound, AuthorizationFailed
|
|
from splunk.util import localTZ, getTimeOffset, normalizeBoolean
|
|
|
|
from splunk.clilib.bundle_paths import make_splunkhome_path
|
|
import splunk.clilib.cli_common as conf
|
|
from splunk.util import safeURLQuote
|
|
|
|
sys.path.append(make_splunkhome_path(['etc', 'apps', 'SA-ITOA', 'lib']))
|
|
sys.path.append(make_splunkhome_path(['etc', 'apps', 'SA-ITOA', 'lib', 'SA_ITOA_app_common']))
|
|
import itsi_path
|
|
import itsi_py3
|
|
import tzlocal
|
|
from croniter import croniter
|
|
from .setup_logging import setup_logging, logger as singleton_logger
|
|
from .itoa_exceptions import ItoaError, ItoaValidationError
|
|
from urllib.parse import quote_plus, quote
|
|
from SA_ITOA_app_common.solnlib.splunk_rest_client import SplunkRestClient
|
|
from SA_ITOA_app_common.solnlib.server_info import ServerInfo
|
|
from SA_ITOA_app_common.splunklib.binding import HTTPError
|
|
from urllib.parse import urlparse
|
|
|
|
|
|
def get_local_tz_offset_to_utc_sec():
|
|
"""
|
|
Identifies the seconds offset to apply to an epoch to convert it from local server timezone to UTC
|
|
|
|
@rtype: float
|
|
@return: the offset in seconds of the local server's timezone from UTC
|
|
"""
|
|
local_tz_offset = localTZ.utcoffset(localTZ)
|
|
return float((local_tz_offset.days * 24 * 3600) + local_tz_offset.seconds)
|
|
|
|
|
|
def format_local_tzoffset(t=None):
|
|
'''
|
|
Render the current process-local timezone offset in standard -0800 type
|
|
format for the present or at time t.
|
|
'''
|
|
offset_secs = getTimeOffset(t)
|
|
|
|
plus_minus = "+"
|
|
if offset_secs < 0:
|
|
plus_minus = '-'
|
|
offset_secs = abs(offset_secs)
|
|
|
|
hours, rem_secs = divmod(offset_secs, 3600)
|
|
minutes = rem_secs // 60
|
|
return "%s%0.2i%0.2i" % (plus_minus, hours, minutes)
|
|
|
|
|
|
def get_unix_timestamp_for_datetime(datetime_str, tzOffset):
|
|
"""
|
|
Retrieves, from a datetime string like 2021-10-12 02:00:00 PDT with a tzOffset (-0700) to a unix epoch
|
|
|
|
@rtype: float
|
|
@return: unix timezone
|
|
"""
|
|
if datetime_str is None:
|
|
return ''
|
|
sub_str = datetime_str.split(" ")
|
|
sub_str = sub_str[:-1] # remove the timezone acronym (PDT)
|
|
sub_str = " ".join(sub_str)
|
|
tz_time = sub_str + tzOffset
|
|
|
|
fmt = "%Y-%m-%d %H:%M:%S%z"
|
|
try:
|
|
datetime_obj = datetime.datetime.strptime(tz_time, fmt)
|
|
except Exception as e:
|
|
singleton_logger.error('Failed to evaluate the next_scheduled_time: %s' % e.args[0])
|
|
return -1
|
|
return datetime_obj.timestamp()
|
|
|
|
|
|
def get_current_timestamp_utc():
|
|
"""
|
|
Utility to get ISO formatted UTC value for current time
|
|
"""
|
|
return datetime.datetime.now(utc).isoformat()
|
|
|
|
|
|
def get_current_utc_epoch():
|
|
"""
|
|
Utility to get float UTC value for current time
|
|
"""
|
|
return time.time()
|
|
|
|
|
|
def calculate_default_schedule_time(logger, frequency='daily', scheduled_day=0, scheduled_hour=1):
|
|
"""
|
|
Calculate default scheduled time based on frequency, day and hour setting
|
|
|
|
@type logger: logger object
|
|
@param logger: logger object
|
|
|
|
@type frequency: str
|
|
@param frequency: 'daily' or 'weekly'
|
|
|
|
@type scheduled_day: int
|
|
@param scheduled_day: scheduled day (0 for Monday - 6 for Sunday)
|
|
|
|
@type scheduled_hour: int
|
|
@param scheduled_hour: scheduled hour in from 0 - 23
|
|
|
|
@rtype: float
|
|
@return: Next scheduled time in UTC epoch
|
|
"""
|
|
|
|
def _next_weekday(d, weekday):
|
|
days_ahead = weekday - d.weekday()
|
|
if days_ahead < 0: # Target day already happened this week
|
|
days_ahead += 7
|
|
return d + datetime.timedelta(days_ahead)
|
|
|
|
current_time = datetime.datetime.now()
|
|
if not is_valid_num(scheduled_hour) or scheduled_hour > 23 or scheduled_hour < 0:
|
|
scheduled_hour = 0
|
|
|
|
if frequency == 'daily':
|
|
next_run_time = current_time.replace(hour=scheduled_hour, minute=0, second=0)
|
|
if (current_time - next_run_time).total_seconds() >= 0:
|
|
next_run_time += datetime.timedelta(days=1)
|
|
|
|
elif frequency == 'weekly':
|
|
next_day = _next_weekday(current_time, scheduled_day)
|
|
next_run_time = next_day.replace(hour=scheduled_hour, minute=0, second=0)
|
|
if (current_time - next_run_time).total_seconds() >= 0:
|
|
next_run_time += datetime.timedelta(days=7)
|
|
|
|
next_run_time_utc = (next_run_time - datetime.datetime(1970, 1, 1)
|
|
).total_seconds() - get_local_tz_offset_to_utc_sec()
|
|
|
|
localtime = time.localtime()
|
|
if localtime.tm_isdst == 1:
|
|
next_run_time_utc -= 60 * 60
|
|
logger.debug('Perform a shift due to daylight saving.')
|
|
|
|
return next_run_time_utc
|
|
|
|
|
|
def parse_mod_timestamp(mod_timestamp):
|
|
"""
|
|
Convert a Splunk mod_timestamp (KVstore representation) to an integer timestamp
|
|
|
|
:param mod_timestamp: Timestamp as represented by the mod_timestamp field in the KVStore
|
|
:type: string
|
|
|
|
:return: Standard Unix timestamp
|
|
:type: int
|
|
"""
|
|
return datetime.datetime.strptime(mod_timestamp, '%Y-%m-%dT%H:%M:%S.%f%z').timestamp()
|
|
|
|
|
|
"""
|
|
A process-wide cache to save if a feature is enabled or not.
|
|
Key is the feature name, value is True(enabled) or False(disabled).
|
|
It will prevent retrieving object at high cost for multiple times.
|
|
"""
|
|
feature_enablement_cache = {}
|
|
|
|
|
|
class JsonPathElementNotArrayException(Exception):
|
|
pass
|
|
|
|
|
|
class JsonPathElement(object):
|
|
"""
|
|
A JSON Path from conf looks like `entry.{0}.content`
|
|
It indicates the path to traverse to a pertinent blob.
|
|
i.e.
|
|
1. First read the value key'ed by `entry`.
|
|
2. This value is an array indicated by `{0}` and in it, the value
|
|
we care about is the 1st indicated by 0.
|
|
3. Next, fetch the value key'ed by `content`.
|
|
|
|
Usage::
|
|
>>> path = 'entry.{0}.content'
|
|
>>> elems = path.split('.')
|
|
>>> for e in elems:
|
|
>>> json_elem = JsonPathElement(e)
|
|
>>> if json_elem.is_array():
|
|
>>> # retrieve index'ed blob
|
|
>>> pass
|
|
>>> else:
|
|
>>> # elem is dict, retrieve it, do stuff
|
|
>>> pass
|
|
"""
|
|
|
|
def __init__(self, elem):
|
|
if any([not isinstance(elem, itsi_py3.string_type), isinstance(elem, itsi_py3.string_type)
|
|
and not elem.strip()]):
|
|
raise TypeError('Invalid path element. %s' % elem)
|
|
|
|
self.elem = elem
|
|
|
|
# lets try and make "match groups" if element is an array
|
|
# {1} or {0} are examples. There will be only one capture group.
|
|
# everything preceeding the numeral and trailing the numeral is not captured
|
|
array_pattern = re.compile('^(?:[a-zA-Z]*{)([0-9]*)(?:})$')
|
|
self.array_match = re.match(array_pattern, self.elem)
|
|
|
|
def __str__(self):
|
|
return self.elem
|
|
|
|
def is_array(self):
|
|
return True if self.array_match else False
|
|
|
|
def get_array_index(self):
|
|
"""
|
|
returns the index in the path which corresponds to an array
|
|
Ex: if elem = `{1}`, return `1`
|
|
@rtype: int
|
|
@returns: an integer corresponding to the array index
|
|
|
|
@raises JsonPathElementNotArrayException if element is not an index
|
|
"""
|
|
if not self.is_array():
|
|
raise JsonPathElementNotArrayException('Element `%s` is not an array' % self.elem)
|
|
# if elem is indeed an array, we are guaranteed 1 element in the group, OK to
|
|
# access the group using index.
|
|
return int(self.array_match.group(1))
|
|
|
|
def is_dict(self):
|
|
return not self.is_array()
|
|
|
|
|
|
def get_session_user(session_key):
|
|
"""
|
|
Get current username for given session key
|
|
@param session_key: splunkd session key
|
|
@param type: basestring
|
|
|
|
@return username: current user logged into the system
|
|
@return type: str
|
|
|
|
@raise TypeError: if invalid session_key
|
|
@raise AttributeError: if user is not logged into system
|
|
"""
|
|
if not isinstance(session_key, itsi_py3.string_type):
|
|
raise TypeError('Invalid session key.')
|
|
|
|
resp, content = rest.simpleRequest(
|
|
'/authentication/current-context',
|
|
getargs={'output_mode': 'json'},
|
|
sessionKey=session_key,
|
|
raiseAllErrors=False)
|
|
content = json.loads(content)
|
|
return content['entry'][0]['content']['username']
|
|
|
|
|
|
# This method can help sometime
|
|
def itsi_version_matches_with_kv(session_key, local_itsi_version):
|
|
from itsi.itsi_utils import ITOAInterfaceUtils
|
|
kv_itsi_version, id_ = ITOAInterfaceUtils.get_version_from_kv(session_key)
|
|
return local_itsi_version == kv_itsi_version
|
|
|
|
|
|
def is_shc_member(session_key, logger=None):
|
|
retry = 1
|
|
while retry <= 10:
|
|
try:
|
|
info = ServerInfo(session_key)
|
|
is_shc_member = info.is_shc_member()
|
|
return is_shc_member
|
|
except Exception as e:
|
|
# during stack initialization call to is_shc_member may fail with following error:
|
|
#
|
|
# This member has marked the connection to the search head captain as down until it
|
|
# receives a successful heartbeat.
|
|
#
|
|
# catching the Exception and waiting for successfully getting is_shc_member result
|
|
logger.info('Retry %d of 10. Exception occurred in is_shc_member() function %s.', retry, e)
|
|
# Incremental delay to reduce the number of calls.
|
|
delay = retry * 5
|
|
time.sleep(delay)
|
|
retry += 1
|
|
logger.info('Failed to retrieve is_shc_member() function result after 10 attempts.')
|
|
|
|
|
|
# This method is not used it anywhere but it can help sometime
|
|
def get_shc_members(session_key, logger=None):
|
|
retry = 1
|
|
while retry <= 10:
|
|
try:
|
|
info = ServerInfo(session_key)
|
|
member_info = info.get_shc_members()
|
|
return member_info
|
|
except Exception as e:
|
|
# during stack initialization call to get_shc_members may fail with following error:
|
|
#
|
|
# This member has marked the connection to the search head captain as down until it
|
|
# receives a successful heartbeat.
|
|
#
|
|
# catching the Exception and waiting for successfully retrieving get_shc_members result
|
|
logger.info('Retry %d of 10. Exception occurred in get_shc_members() function %s.', retry, e)
|
|
# Incremental delay to reduce the number of calls.
|
|
delay = retry * 5
|
|
time.sleep(delay)
|
|
retry += 1
|
|
logger.info('Failed to retrieve get_shc_members() function result after 10 attempts.')
|
|
|
|
|
|
def modular_input_should_run(session_key, logger=None):
|
|
"""
|
|
Determine if a modular input should run or not.
|
|
Run if and only if:
|
|
1. Node is not a SHC member
|
|
2. Node is an SHC member and is Captain
|
|
@return True if condition satisfies, False otherwise
|
|
"""
|
|
if any([not isinstance(session_key, itsi_py3.string_type), isinstance(session_key, itsi_py3.string_type)
|
|
and not session_key.strip()]):
|
|
raise ValueError('Invalid session key.')
|
|
|
|
info = ServerInfo(session_key)
|
|
logger = logger if logger else singleton_logger
|
|
|
|
if not is_shc_member(session_key, logger=logger):
|
|
return True
|
|
|
|
timeout = 300 # 5 minutes
|
|
while (timeout > 0):
|
|
try:
|
|
# captain election can take time on a rolling restart.
|
|
if info.is_captain_ready():
|
|
break
|
|
except HTTPError as e:
|
|
if e.status == 503:
|
|
logger.warning(
|
|
'Search head cluster may be initializing on node `%s`. Captain is not ready. Try again.',
|
|
info.server_name)
|
|
else:
|
|
logger.exception('Unexpected exception on node `%s`.', info.server_name)
|
|
raise
|
|
time.sleep(5)
|
|
timeout -= 5
|
|
|
|
# we can fairly be certain that even after 5 minutes if `is_captain_ready`
|
|
# is false, there is a problem
|
|
if not info.is_captain_ready():
|
|
raise Exception('Error. Captain is not ready even after 5 minutes. node=`%s`.', info.server_name)
|
|
|
|
return info.is_captain()
|
|
|
|
|
|
def get_shcluster_total_members_count(session_key, logger):
|
|
response, content = rest.simpleRequest('/services/shcluster/member/members',
|
|
sessionKey=session_key,
|
|
getargs={'output_mode': 'json'},
|
|
raiseAllErrors=False)
|
|
if response and response.status != 200:
|
|
raise Exception('Failed to get SHCluster members')
|
|
content = json.loads(content)
|
|
return content['paging']['total']
|
|
|
|
|
|
def get_shcluster_up_peers(session_key, logger):
|
|
response, content = rest.simpleRequest('/services/shcluster/status',
|
|
sessionKey=session_key,
|
|
getargs={'output_mode': 'json'},
|
|
raiseAllErrors=False)
|
|
if response and response.status != 200:
|
|
raise Exception('Failed to get SHCluster status')
|
|
content = json.loads(content)
|
|
peers = content['entry'][0]['content']['peers']
|
|
up_peer_count = 0
|
|
for peer_id, peer_info in peers.items():
|
|
if peer_info['status'] == 'Up':
|
|
up_peer_count += 1
|
|
|
|
return up_peer_count
|
|
|
|
|
|
def get_peers(logger, session_key, cur_host_name, include_current_host):
|
|
"""
|
|
Gets the SHC peer IP addresses
|
|
|
|
@param cur_host_name: current machine's host name
|
|
@return: list of peers IP addresses
|
|
"""
|
|
response, content = rest.simpleRequest('/services/shcluster/status',
|
|
sessionKey=session_key,
|
|
getargs={'output_mode': 'json'},
|
|
raiseAllErrors=False)
|
|
if response and response.status != 200:
|
|
# no peers or shc is not supported
|
|
return []
|
|
content = json.loads(content)
|
|
peers = []
|
|
peers_list = content['entry'][0]['content']['peers']
|
|
for _, peer_info in peers_list.items():
|
|
peer_host_name = peer_info['label']
|
|
derived_host_adress = ''
|
|
peer_mgmt_uri = urlparse(peer_info['mgmt_uri'])
|
|
if is_valid_ip(peer_mgmt_uri.hostname):
|
|
derived_host_adress = peer_mgmt_uri.hostname
|
|
else:
|
|
peer_mgmt_uri_alias = urlparse(peer_info['mgmt_uri_alias'])
|
|
if is_valid_ip(peer_mgmt_uri_alias.hostname):
|
|
derived_host_adress = peer_mgmt_uri_alias.hostname
|
|
else:
|
|
derived_host_adress = peer_host_name
|
|
|
|
if include_current_host:
|
|
if derived_host_adress:
|
|
peers.append(derived_host_adress)
|
|
else:
|
|
if peer_host_name != cur_host_name:
|
|
if derived_host_adress:
|
|
peers.append(derived_host_adress)
|
|
peerslistinfo = ", ".join(peers)
|
|
logger.info(f'Peers list info is {peerslistinfo}')
|
|
return peers
|
|
|
|
|
|
def is_shcluster_restarting(session_key, logger):
|
|
members_count = get_shcluster_total_members_count(session_key, logger)
|
|
up_peer_count = get_shcluster_up_peers(session_key, logger)
|
|
if up_peer_count < members_count:
|
|
logger.error('Total SHCluster peers: %s, Total Up peers: %s, '
|
|
'Exiting Moduler Input as all peers are not up', members_count, up_peer_count)
|
|
return True
|
|
else:
|
|
logger.info('Total SHCluster peers: %s, Total Up peers: %s, '
|
|
'SHCluster is not restarting. Proceeding with Modular Input.' , members_count, up_peer_count)
|
|
return False
|
|
|
|
|
|
def is_noah_enabled(session_key, logger):
|
|
"""
|
|
Returns true on a Noah enabled environment
|
|
"""
|
|
try:
|
|
response, content = rest.simpleRequest(
|
|
'/services/configs/conf-server/noahService',
|
|
getargs={'output_mode': 'json'},
|
|
sessionKey=session_key,
|
|
raiseAllErrors=False,
|
|
rawResult=True
|
|
)
|
|
status = response.status
|
|
if status == 200:
|
|
logger.info('Detected Noah environment.')
|
|
return True
|
|
else:
|
|
logger.info('Noah environment not detected.')
|
|
return False
|
|
except Exception as e:
|
|
logger.error(e)
|
|
return False
|
|
|
|
|
|
def get_itoa_logger(logger_name, file_name=None):
|
|
"""
|
|
Get a logger instance.
|
|
"""
|
|
if isinstance(logger_name, itsi_py3.string_type) and len(logger_name) > 0:
|
|
LOGGER = logger_name
|
|
else:
|
|
LOGGER = 'itoa.common'
|
|
|
|
if isinstance(file_name, itsi_py3.string_type) and len(file_name) > 0:
|
|
FILE = file_name
|
|
else:
|
|
FILE = 'itsi.log'
|
|
|
|
return setup_logging(FILE, LOGGER)
|
|
|
|
|
|
def get_log_message_for_exception(exception_obj):
|
|
"""
|
|
Extracts the error message from any type of exception object for user display
|
|
:param exception_obj: exception object, any type
|
|
:return: string to be used in log messages
|
|
"""
|
|
if exception_obj.args is not None and len(exception_obj.args) != 0:
|
|
exception = exception_obj.args[0]
|
|
else:
|
|
exception = repr(exception_obj)
|
|
return exception
|
|
|
|
|
|
def get_object(object_):
|
|
"""
|
|
given an object, try to get a dict/list type
|
|
merely a wrapper to json.loads(). doesnt crap out and returns None if
|
|
invalid.
|
|
@param object_: input object, any type
|
|
@return dict/list if valid, None if otherwise
|
|
"""
|
|
rval = None
|
|
if isinstance(object_, itsi_py3.string_type):
|
|
try:
|
|
rval = json.loads(object_)
|
|
except Exception:
|
|
pass
|
|
elif isinstance(object_, dict):
|
|
rval = object_
|
|
elif isinstance(object_, list):
|
|
rval = object_
|
|
return rval
|
|
|
|
|
|
def extract(objects, key, dedup=True):
|
|
"""
|
|
given a list of objects, extract requested values given key,
|
|
and return a list
|
|
@type objects: dict/list
|
|
@param objects: objects to iterate over and extract id from
|
|
@type key: basestring
|
|
@param key: `key` that corresponds to the id
|
|
@type dedup: bool
|
|
@param dedup: flag to indicate whether or not to dedup results
|
|
@return a list of object ids
|
|
@raises Exception
|
|
"""
|
|
ids_ = []
|
|
|
|
# always work with list
|
|
objects = get_object(objects)
|
|
if not objects:
|
|
return ids_
|
|
|
|
if isinstance(objects, dict):
|
|
objects = [objects]
|
|
if not isinstance(objects, list):
|
|
raise Exception('Expecting `objects` to be list/dict type and not `%s`' % type(objects).__name__)
|
|
for i in objects:
|
|
if i.get(key):
|
|
ids_.append(i[key])
|
|
if dedup:
|
|
return list(set(ids_))
|
|
return ids_
|
|
|
|
|
|
def get_supported_itoa_operations():
|
|
"""
|
|
Method returns a list of supported operations on ITOA object types...
|
|
"""
|
|
return ['read', 'write', 'delete']
|
|
|
|
|
|
def get_privatizeable_object_types():
|
|
"""
|
|
method that returns a list of object types that can have a `private`
|
|
ownership vs `public` ownership
|
|
i.e.
|
|
"""
|
|
return ['home_view', 'glass_table', 'deep_dive', 'event_management_state']
|
|
|
|
|
|
def massage_string_array(string_array, separator=','):
|
|
"""
|
|
Some of the stuff that we get from the frontend can be not in the style we expect it to be
|
|
So we need to massage it. We'll also trim any spacing that we find in the thing
|
|
@return - Always an array
|
|
"""
|
|
if string_array is None:
|
|
return []
|
|
elif isinstance(string_array, itsi_py3.string_type):
|
|
# Convert from the string specification to something we can process
|
|
string_array = string_array.split(separator)
|
|
if not isinstance(string_array, list):
|
|
raise Exception('Unable to convert string_array - passed in a {0}'.format(
|
|
str(string_array.__class__)))
|
|
# Strip the leading and trailing whitespace
|
|
string_array = [i.strip() for i in string_array]
|
|
return string_array
|
|
|
|
|
|
def validate_json(log_prefix, json_data):
|
|
"""
|
|
Quick and dirty parsing/json validation,
|
|
|
|
@return: Parsed json dict/list (or unaltered dict if it was a dict originally)
|
|
@rval: dict or list parsed json
|
|
"""
|
|
if json_data is None:
|
|
raise Exception(log_prefix + 'Missing json_data')
|
|
elif is_valid_dict(json_data):
|
|
return json_data
|
|
elif is_valid_list(json_data):
|
|
return json_data
|
|
|
|
try:
|
|
data = json.loads(json_data)
|
|
except TypeError:
|
|
singleton_logger.exception('Unable to parse as JSON data: Received %s', json_data.__class__.__name__)
|
|
raise
|
|
return data
|
|
|
|
|
|
def remove_keys_from_dict(keys_as_list, dictionary):
|
|
"""
|
|
given a list of keys and a dictionary, remove the key-value pairs from it
|
|
@param keys_as_list: list of keys
|
|
@param dictionary: dict under consideration
|
|
@return list of keys that were removed
|
|
"""
|
|
removed = []
|
|
if isinstance(keys_as_list, list) and isinstance(dictionary, dict):
|
|
for key in keys_as_list:
|
|
attribute = dictionary.pop(key, None)
|
|
if attribute:
|
|
removed.append(key)
|
|
return removed
|
|
|
|
|
|
def is_valid_dict(data):
|
|
return isinstance(data, dict)
|
|
|
|
|
|
def is_valid_list(data):
|
|
return isinstance(data, list)
|
|
|
|
|
|
def is_valid_num(data):
|
|
return isinstance(data, int)
|
|
|
|
|
|
def is_string_numeric(data):
|
|
try:
|
|
float(data)
|
|
return True
|
|
except (ValueError, TypeError):
|
|
return False
|
|
|
|
|
|
def is_string_numeric_int(data):
|
|
try:
|
|
int(data)
|
|
return True
|
|
except (ValueError, TypeError):
|
|
return False
|
|
|
|
|
|
def get_csv_dict_writer(out_buf, fieldnames, is_write_header=True):
|
|
"""
|
|
Get csvWriter object after initializing header
|
|
|
|
@type out_buf: StringIO object (refer self.get_string_buffer for more info)
|
|
@param out_buf: output buffer object
|
|
|
|
@type fieldnames: list
|
|
@param fieldnames: csv header field names
|
|
|
|
@type is_write_header: bool
|
|
@param is_write_header: flag to write header into buffer
|
|
@return:
|
|
"""
|
|
deduped_fieldnames = []
|
|
fields_seen = set()
|
|
for fieldname in fieldnames:
|
|
if fieldname in fields_seen:
|
|
continue
|
|
deduped_fieldnames.append(fieldname)
|
|
fields_seen.add(fieldname)
|
|
|
|
writer = csv.DictWriter(out_buf, fieldnames=deduped_fieldnames)
|
|
if is_write_header:
|
|
writer.writeheader()
|
|
return writer
|
|
|
|
|
|
def is_stats_operation(statsop):
|
|
"""
|
|
List of ITOA supported statistical operators.
|
|
percNN is not in the list, further validation will handle the check for percNN.
|
|
@type statsop: string
|
|
@param statsop: input stats operator
|
|
@type return: boolean
|
|
@param return: True if the stats operator is in the supported list, False otherwise.
|
|
"""
|
|
statsop = statsop.strip().lower()
|
|
precNN = re.compile(r'^perc\d{1,2}$')
|
|
stats_operation = ['avg', 'count', 'dc', 'max', 'min', 'sum', 'stdev', 'median',
|
|
'duration', 'latest', 'earliest']
|
|
try:
|
|
if any([
|
|
statsop in stats_operation,
|
|
precNN.search(statsop)
|
|
]):
|
|
return True
|
|
except Exception:
|
|
pass
|
|
|
|
return False
|
|
|
|
|
|
def is_valid_perc(number):
|
|
"""
|
|
Utility function to check percNN string and if the NN
|
|
falls into the range between 1 and 99.
|
|
@type number: string
|
|
@param number: number after the perc string
|
|
@type return: boolean
|
|
@param return: True if NN is within the expected range, False otherwise.
|
|
"""
|
|
if is_string_numeric(number):
|
|
perc = int(float(number))
|
|
return 99 >= perc >= 1
|
|
|
|
return False
|
|
|
|
|
|
def normalize_bool_flag(flag, default=False):
|
|
"""
|
|
Normalizes a boolean flag to python bool variable.
|
|
@param flag: input flag
|
|
@param default: in case of no value or unexpected, what to default to
|
|
@return: bool
|
|
"""
|
|
is_true = default
|
|
if isinstance(flag, bool):
|
|
is_true = flag
|
|
elif isinstance(flag, itsi_py3.string_type):
|
|
if flag.strip().lower() == 'true' or flag.strip() == '1':
|
|
is_true = True
|
|
elif flag.strip().lower() == 'false' or flag.strip() == '0':
|
|
is_true = False
|
|
elif isinstance(flag, int):
|
|
if flag == 1:
|
|
is_true = True
|
|
elif flag == 0:
|
|
is_true = False
|
|
return is_true
|
|
|
|
|
|
def convert_to_bytes(input_string):
|
|
"""
|
|
converts an incoming MB/GB string to decimal bytes integer
|
|
|
|
:param input_string: string
|
|
:return: int
|
|
"""
|
|
string, unit = input_string[:-1], input_string[-1]
|
|
mapping = {'K': 10, 'M': 20, 'G': 30, 'T': 40, 'P': 50}
|
|
|
|
# if string is not integer or unit not in mapping
|
|
if not string.isdigit() or unit not in mapping:
|
|
return 0
|
|
return int(string) << mapping[unit.upper()]
|
|
|
|
|
|
def normalize_num_field(json_data, field, numclass=int):
|
|
"""
|
|
Normalizes field in JSON payload to float
|
|
NOTE: Will not raise an exception on change,
|
|
make sure to validate that this field was updated
|
|
|
|
@type: dict
|
|
@param json_data: JSON payload
|
|
|
|
@type: string
|
|
@param field: field to normalize
|
|
|
|
@type numclass: Class
|
|
@param numclass: The numerical class to cast the object as, default int
|
|
|
|
@rtype: None
|
|
@return: JSON payload is updated if needed
|
|
"""
|
|
if ((field in json_data)
|
|
and ((is_valid_num(json_data[field])
|
|
or is_string_numeric(json_data[field])))):
|
|
# Always cast as float first
|
|
json_data[field] = numclass(float(json_data.get(field, 0)))
|
|
# Else not a numeric type, do not normalize
|
|
|
|
|
|
def intersection_of_arrays(dst, src):
|
|
"""
|
|
Compare two array and return intersection of dst and src (dst-src)
|
|
Note: both array should be integer arrays
|
|
:param {list} dst: list of items
|
|
:param {list} src: list of items
|
|
:return {list}: only element of dst which exists in src array
|
|
"""
|
|
if not is_valid_list(dst) or not is_valid_list(src):
|
|
return []
|
|
return list(set(dst).intersection(set(src)))
|
|
|
|
|
|
def is_equal_lists(dst, src):
|
|
"""
|
|
Check if elements of two array are same or not
|
|
Note: both array should be integer arrays
|
|
:param {list} dst: list of items
|
|
:param {list} src: list of items
|
|
:return {list}: only element of dst which exists in src array
|
|
"""
|
|
if not is_valid_list(dst) or not is_valid_list(src):
|
|
return False
|
|
dst_set = frozenset(dst)
|
|
src_set = frozenset(src)
|
|
return dst_set == src_set
|
|
|
|
|
|
def dict_to_search_field_value(dictionary):
|
|
assert is_valid_dict(dictionary)
|
|
result = ''
|
|
first_iter = True
|
|
for key, value in list(dictionary.items()):
|
|
if first_iter:
|
|
first_iter = False
|
|
else:
|
|
result += ','
|
|
result += key + '='
|
|
if isinstance(value, itsi_py3.string_type):
|
|
result += value
|
|
elif is_valid_dict(value):
|
|
result += dict_to_search_field_value(value)
|
|
elif is_valid_list(value):
|
|
parsed_items = []
|
|
for item in value:
|
|
if is_valid_dict(item):
|
|
parsed_items.append(dict_to_search_field_value(item))
|
|
elif isinstance(item, itsi_py3.string_type):
|
|
parsed_items.append(item)
|
|
else:
|
|
parsed_items.append(str(item))
|
|
result += ','.join(parsed_items)
|
|
else:
|
|
result += str(value)
|
|
return result
|
|
|
|
|
|
regex_all_spaces = re.compile('^(?u)\s*$') # noqa
|
|
|
|
|
|
def is_valid_str(data):
|
|
return isinstance(data, itsi_py3.string_type) and (len(data) > 0) and (not re.match(regex_all_spaces, data))
|
|
|
|
|
|
regex_invalid_name_characters = re.compile('[="\']+')
|
|
regex_invalid_field_characters = re.compile('[="\'.,]+')
|
|
regex_invalid_value_characters = re.compile('["\']+')
|
|
|
|
|
|
def is_valid_name(data):
|
|
"""
|
|
Checks to see if the string passed in is a valid string and does
|
|
not contain invalid characters
|
|
@param data: The string to validate name characters against
|
|
@type data: string
|
|
"""
|
|
return is_valid_str(data) and (not re.search(regex_invalid_name_characters, data))
|
|
|
|
|
|
def is_valid_field_name(data):
|
|
"""
|
|
Checks to see if the string passed in is a valid string and does
|
|
not contain invalid characters relevant for a field name
|
|
@param data: The string to validate field value characters against
|
|
@type data: string
|
|
"""
|
|
return is_valid_str(data) and (not re.search(regex_invalid_field_characters, data))
|
|
|
|
|
|
def is_valid_field_value(data):
|
|
"""
|
|
Checks to see if the string passed in is a valid string and does
|
|
not contain invalid characters relevant for a field value
|
|
@param data: The string to validate field value characters against
|
|
@type data: string
|
|
"""
|
|
return is_valid_str(data) and (not re.search(regex_invalid_value_characters, data))
|
|
|
|
|
|
def squish_whitespace(squishy, replace_with_empty_string=False):
|
|
"""
|
|
Eliminate contiguous whitespace and newlines and replace them a single space.
|
|
Also eliminates leading and trailing whitespace.
|
|
|
|
@param squishy: string to squish whitespace in
|
|
@type squishy: str
|
|
@return: string with all whitespace and newline reduced to a single space
|
|
@rtype: str
|
|
"""
|
|
return re.sub('\s+', '' if replace_with_empty_string else ' ', squishy).strip() # noqa
|
|
|
|
|
|
def post_splunk_user_message(message, session_key=None, name=None, severity='info', namespace='SA-ITOA',
|
|
owner='nobody', **query):
|
|
"""
|
|
Using the Messenger API passed to us by core, post a message to the user through the messages
|
|
endpoint
|
|
|
|
@param message: Message to post to API
|
|
@type message: String
|
|
@param session_key: Splunk user session key
|
|
@type session_key: String
|
|
@param name: ID to uniquely identify message. This allows updating and deletion of previously posted Splunk
|
|
messages.
|
|
@type name: String
|
|
@param severity: Severity level of Splunk message
|
|
@type severity: String
|
|
@type namespace: App namespace for message
|
|
@type namespace: String
|
|
@param owner: Splunk owner for REST endpoint
|
|
@type owner: String
|
|
|
|
@return: Did message successfully post?
|
|
@rtype: Boolean
|
|
"""
|
|
if not isinstance(message, itsi_py3.string_type):
|
|
return False
|
|
message = message if len(message) <= 500 else message[0:499] + '...'
|
|
try:
|
|
msg = SplunkRestClient(session_key=session_key, owner=owner, app=namespace).messages
|
|
if name is None:
|
|
name = str(uuid1())
|
|
return msg.post(name=name, value=message, severity=severity, **query)
|
|
except Exception as e:
|
|
# Best effort, log and continue
|
|
singleton_logger.exception(e)
|
|
pass
|
|
return False
|
|
|
|
|
|
def get_conf_rest_path(conf_name, app='itsi', host_base_uri=''):
|
|
if host_base_uri:
|
|
return host_base_uri + '/servicesNS/nobody/' + app + '/configs/conf-' + conf_name
|
|
else:
|
|
return rest.makeSplunkdUri() + 'servicesNS/nobody/' + app + '/configs/conf-' + conf_name
|
|
|
|
|
|
def get_conf(session_key, conf_name, search=None, count=0, app='itsi', host_base_uri='', getargs=None):
|
|
if getargs is None:
|
|
getargs = {'output_mode': 'json', 'count': count}
|
|
if is_valid_str(search):
|
|
getargs['search'] = search
|
|
|
|
response, content = rest.simpleRequest(
|
|
get_conf_rest_path(conf_name, app, host_base_uri=host_base_uri),
|
|
method='GET',
|
|
getargs=getargs,
|
|
sessionKey=session_key,
|
|
raiseAllErrors=False
|
|
)
|
|
return {'response': response, 'content': content}
|
|
|
|
|
|
def get_conf_stanza(session_key, conf_name, stanza_name, app='itsi'):
|
|
getargs = {'output_mode': 'json'}
|
|
rest_path = get_conf_rest_path(conf_name, app) + '/' + quote_plus(stanza_name)
|
|
response, content = rest.simpleRequest(
|
|
rest_path,
|
|
method='GET',
|
|
getargs=getargs,
|
|
sessionKey=session_key,
|
|
raiseAllErrors=False
|
|
)
|
|
return {'response': response, 'content': content}
|
|
|
|
|
|
def get_hec_token(session_key, token_name):
|
|
"""
|
|
This method sends a REST request to the data endpoint
|
|
to get the hec_token to be used by the HEC event writer
|
|
|
|
@type session_key: basestring
|
|
@param session_key: the session key
|
|
|
|
@type token_name: basestring
|
|
@param token_name: token_name
|
|
|
|
@rtype: basestring
|
|
@return: the HEC token
|
|
"""
|
|
encoded_token_name = quote(token_name)
|
|
resp, content = rest.simpleRequest('/servicesNS/nobody/SA-ITOA/data/inputs/http/' + encoded_token_name,
|
|
getargs={"output_mode": "json"},
|
|
sessionKey=session_key)
|
|
if resp.status != 200:
|
|
raise Exception(
|
|
('Unable to reach the data inputs endpoint, failed with status code="{0}".').format(resp.status))
|
|
content = json.loads(content)
|
|
if 'entry' not in content or not len(content['entry']):
|
|
raise Exception(
|
|
('Could not find token value in response from server for token name="{0}".').format(token_name))
|
|
content = content['entry'][0]['content']
|
|
return content.get('token', None)
|
|
|
|
|
|
def get_hec_uri(session_key):
|
|
"""
|
|
Gets the URI with which to communicate with HEC
|
|
|
|
@type session_key: basestring
|
|
@param session_key: the session key
|
|
|
|
@rtype: object
|
|
@return: the data to be used for the HEC event writer
|
|
"""
|
|
resp, content = rest.simpleRequest('/servicesNS/nobody/SA-ITOA/data/inputs/http/http',
|
|
getargs={"output_mode": "json"},
|
|
sessionKey=session_key)
|
|
if resp.status != 200:
|
|
raise Exception(
|
|
('Unable to reach the data inputs endpoint, failed with status code="{0}".').format(resp.status))
|
|
content = json.loads(content)
|
|
if 'entry' not in content or not len(content['entry']):
|
|
# We return default values if we don't have a proper data format
|
|
content = {
|
|
'host': 'localhost',
|
|
'port': '8088',
|
|
'enableSSL': '1'
|
|
}
|
|
else:
|
|
content = content['entry'][0]['content']
|
|
# Fun Fact! the host value from http configuration is often unresolvable, the accepted method is to always use the
|
|
# splunkd management host (typically 127.0.0.1) for all HEC operations
|
|
splunk_uri = splunk.getLocalServerInfo() # Returns splunkd uri, which can be in a IPv6 compatible format
|
|
scheme_host, port = splunk_uri.rsplit(':', 1) # Becomes ['https://[::1]', '8089'] or ['http://127.0.0.1', '8089']
|
|
scheme, host = scheme_host.split(':', 1) # Becomes ['https', '//[::1]'] or ['http', '//127.0.0.1']
|
|
port = content.get('port', '8088') # Replace splunkd port with HEC port
|
|
enable_ssl = normalizeBoolean(content.get('enableSSL', '1'))
|
|
scheme = 'https' if enable_ssl else 'http'
|
|
|
|
return scheme + ":" + host + ":" + port
|
|
|
|
|
|
def toggle_mod_input(session_key, app, data_input, input_name, action):
|
|
"""
|
|
Toggles the modular input with provided action
|
|
|
|
@type session_key: basestring
|
|
@param session_key: the session key
|
|
|
|
@type app: String
|
|
@param app: app context
|
|
|
|
@type data_input: String
|
|
@param data_input: Modular input name
|
|
|
|
@type data_input: String
|
|
@param data_input: Modular input stanza name
|
|
|
|
@type data_input: String
|
|
@param data_input: Action to be performed (enable/disable)
|
|
|
|
@rtype: object
|
|
@return: response
|
|
"""
|
|
postargs = { 'output_mode': 'json' }
|
|
rest_path = rest.makeSplunkdUri() + 'servicesNS/nobody/' + app + '/data/inputs/' + data_input + '/' + input_name + '/' + action
|
|
response, content = rest.simpleRequest(
|
|
rest_path,
|
|
method='POST',
|
|
postargs=postargs,
|
|
sessionKey=session_key,
|
|
raiseAllErrors=False
|
|
)
|
|
return {'response': response, 'content': json.loads(content)['entry'][0]['content']}
|
|
|
|
|
|
def mod_input_reload(session_key, app, data_input):
|
|
"""
|
|
Toggles the modular input with provided action
|
|
|
|
@type session_key: basestring
|
|
@param session_key: the session key
|
|
|
|
@type app: String
|
|
@param app: app context
|
|
|
|
@type data_input: String
|
|
@param data_input: Modular input name
|
|
|
|
@rtype: object
|
|
@return: response
|
|
"""
|
|
postargs = { 'output_mode': 'json' }
|
|
rest_path = rest.makeSplunkdUri() + 'servicesNS/nobody/' + app + '/data/inputs/' + data_input + '/_reload'
|
|
response, content = rest.simpleRequest(
|
|
rest_path,
|
|
method='POST',
|
|
postargs=postargs,
|
|
sessionKey=session_key,
|
|
raiseAllErrors=False
|
|
)
|
|
return {'response': response, 'content': json.loads(content)}
|
|
|
|
|
|
def get_clear_password(logger, session_key, realm_name, username, app):
|
|
"""
|
|
Get the actual decoded password
|
|
|
|
@type: str
|
|
@param username: username
|
|
|
|
@rtype: str
|
|
@return: decoded password
|
|
"""
|
|
try:
|
|
realm_user = realm_name + ':' + username
|
|
uri = safeURLQuote((f'/servicesNS/nobody/{app}/storage/passwords/{realm_user}'))
|
|
|
|
res, content = rest.simpleRequest(uri, getargs={'output_mode': 'json'},
|
|
sessionKey=session_key)
|
|
|
|
if res.status in (200, 201):
|
|
logger.info(
|
|
f'Password is fetched successfully for pagerduty account={username}')
|
|
else:
|
|
logger.error(
|
|
f'Error in getting password from passwords.conf. response={res} content={content}')
|
|
return None
|
|
if not content:
|
|
logger.error('content was not returned.')
|
|
raise Exception('No content returned')
|
|
|
|
parsed_content = json.loads(content)
|
|
clear_password = parsed_content.get('entry', [])[0].get('content', {}).get('clear_password', {})
|
|
clear_password = json.loads(clear_password)
|
|
|
|
return clear_password
|
|
|
|
except Exception as e:
|
|
logger.exception(f'An error occurred while fetching the password: {e}')
|
|
return None
|
|
|
|
|
|
def is_feature_enabled(feature_name, session_key=None, reload=False):
|
|
"""
|
|
Check if an ITSI Feature is enabled or not.
|
|
:param feature_name: the feature name
|
|
:param session_key: the session key used for establish the connection to splunkd.
|
|
:param reload: Set to True for testing only. Force reloading from config file.
|
|
Usually we update config after cache already got loaded (especially during testing)
|
|
:return: True if the feature is enabled, False if not enabled.
|
|
"""
|
|
if feature_name in feature_enablement_cache and (not reload):
|
|
return feature_enablement_cache[feature_name]
|
|
|
|
if session_key is None:
|
|
singleton_logger.error('Since feature cache does not has the feature,'
|
|
' you need to provide session_key to load.')
|
|
return False
|
|
|
|
stanzas = get_conf(session_key, 'app_common_flags')
|
|
contents = json.loads(stanzas['content'])
|
|
# Load all feature enablement information to cache
|
|
for content in contents['entry']:
|
|
result = False if content['content']['disabled'] else True
|
|
feature_enablement_cache[content['name']] = result
|
|
|
|
if feature_name not in feature_enablement_cache:
|
|
singleton_logger.info('Feature %s not found in app_common_flags.conf!', feature_name)
|
|
return False
|
|
|
|
return feature_enablement_cache[feature_name]
|
|
|
|
|
|
def get_conf_stanza_single_entry(session_key, conf_name, stanza_name, entry_name, host_base_uri=''):
|
|
uri = host_base_uri + '/servicesNS/nobody/SA-ITOA/properties/' + conf_name + '/' + stanza_name + '/' + entry_name
|
|
response, content = rest.simpleRequest(
|
|
uri,
|
|
method="GET",
|
|
sessionKey=session_key,
|
|
getargs={'output_mode': 'json'},
|
|
raiseAllErrors=False
|
|
)
|
|
return {'response': response, 'content': content}
|
|
|
|
|
|
def create_conf_stanza(session_key, conf_name, conf_stanza, app='itsi'):
|
|
postargs = conf_stanza
|
|
postargs['output_mode'] = 'json'
|
|
response, content = rest.simpleRequest(
|
|
get_conf_rest_path(conf_name, app),
|
|
method='POST',
|
|
postargs=postargs,
|
|
sessionKey=session_key,
|
|
raiseAllErrors=True
|
|
)
|
|
return {'response': response, 'content': content}
|
|
|
|
|
|
def update_conf_stanza(session_key, conf_name, conf_stanza, app='itsi'):
|
|
postargs = conf_stanza
|
|
postargs['output_mode'] = 'json'
|
|
rest_path = get_conf_rest_path(conf_name, app) + '/' + quote_plus(conf_stanza.get('name', ''))
|
|
del postargs['name']
|
|
response, content = rest.simpleRequest(
|
|
rest_path,
|
|
method='POST',
|
|
postargs=postargs,
|
|
sessionKey=session_key,
|
|
raiseAllErrors=True
|
|
)
|
|
return {'response': response, 'content': content}
|
|
|
|
|
|
def delete_conf_stanza(session_key, conf_name, conf_stanza_name, app='itsi'):
|
|
response, content = rest.simpleRequest(
|
|
get_conf_rest_path(conf_name, app) + '/' + quote_plus(conf_stanza_name),
|
|
method='DELETE',
|
|
sessionKey=session_key,
|
|
raiseAllErrors=True
|
|
)
|
|
return {'response': response, 'content': content}
|
|
|
|
|
|
def get_saved_search_kpi_setting(session_key):
|
|
"""
|
|
Makes a check against the kpi_search conf file to see if
|
|
synced kpi saved search scheduling is disabled
|
|
@type session_key: basestring
|
|
@param session_key: Splunk session key
|
|
|
|
@type return: bool
|
|
@param return: whether sync scheduling of KPI saved searches is disabled or not (True means disabled)
|
|
"""
|
|
saved_search_synced_disabled = 1
|
|
try:
|
|
stanza_name = 'synced_kpi_scheduling'
|
|
response, content = rest.simpleRequest(
|
|
'/servicesNS/nobody/SA-ITOA/configs/conf-itsi_settings/' + stanza_name,
|
|
sessionKey=session_key,
|
|
getargs={'output_mode': 'json'}
|
|
)
|
|
if response.status == 200:
|
|
entries = json.loads(content).get('entry')
|
|
for entry in entries:
|
|
name = entry.get('name')
|
|
if name != stanza_name:
|
|
continue
|
|
settings = entry.get('content', {})
|
|
saved_search_synced_disabled = int(settings.get('disabled', 1))
|
|
break
|
|
except Exception as e:
|
|
# use default
|
|
singleton_logger.exception(e)
|
|
pass
|
|
|
|
return bool(saved_search_synced_disabled)
|
|
|
|
|
|
def get_conf_data(session_key, conf_file_name, app):
|
|
"""
|
|
Get data from conf file and return back retention policy for each collection
|
|
|
|
@rtype: dict
|
|
@return: dict which contain retention policy for each collection. dict key would be collection name
|
|
"""
|
|
stanza_dict = {}
|
|
config = get_conf(session_key, conf_file_name, app=app)
|
|
if config.get('response', {}).get('status') == '200':
|
|
contents = json.loads(config.get('content'))
|
|
for entry in contents.get('entry', []):
|
|
stanza_dict[entry.get('name')] = entry.get('content', {})
|
|
singleton_logger.debug('Conf data for {}="{}"'.format(conf_file_name, stanza_dict))
|
|
singleton_logger.info('Successfully fetched information from conf file="%s"', conf_file_name)
|
|
return stanza_dict
|
|
else:
|
|
singleton_logger.error('Failed to fetch configuration from config %s, response=`%s`',
|
|
conf_file_name, config.get('response', {}))
|
|
raise Exception('Failed to fetch configuration from config %s' % conf_file_name)
|
|
|
|
|
|
def get_kvstore_max_row_per_query(session_key, app='SA-ITOA'):
|
|
"""
|
|
Get the max row per KVStore query from limits.conf
|
|
@type session_key: basestring
|
|
@param session_key: Splunk session key
|
|
|
|
@type return: int
|
|
@param return: the max row per KVStore query, -1 for error.
|
|
"""
|
|
for stanza, values in get_conf_data(session_key, 'limits', app).items():
|
|
if stanza == 'kvstore':
|
|
return int(values.get('max_rows_per_query'))
|
|
return 50000 # 50K is the default value
|
|
|
|
|
|
def get_itsi_event_management_conf_field_value(session_key, requested_stanza, conf_field):
|
|
"""
|
|
Get the field value from itsi_event_management.conf
|
|
@param session_key: session
|
|
@param requested_stanza: stanza in conf file
|
|
@type conf_field: str
|
|
@param conf_field: field in stanza
|
|
|
|
@type return: int
|
|
@param return: the value for sort_notable_events
|
|
"""
|
|
try:
|
|
for stanza, values in get_conf_data(session_key, 'itsi_event_management', 'SA-ITOA').items():
|
|
if stanza == requested_stanza:
|
|
singleton_logger.info('Field {} value from itsi_event_management.conf - {}'.
|
|
format(conf_field, int(values.get(conf_field))))
|
|
return int(values.get(conf_field))
|
|
else:
|
|
singleton_logger.info('Field {} for stanza {} not found in itsi_event_management.conf'.
|
|
format(conf_field, stanza))
|
|
except Exception as e:
|
|
singleton_logger.exception('Failed to get field value from .conf file: %s' % e.args[0])
|
|
|
|
|
|
def get_itsi_event_management_nats_certificate_value(session_key, requested_stanza, conf_field):
|
|
"""
|
|
Get the field value from certificates.conf
|
|
@param session_key: session
|
|
@param requested_stanza: stanza in conf file
|
|
@type conf_field: str
|
|
@param conf_field: field in stanza
|
|
|
|
@type return: string
|
|
"""
|
|
try:
|
|
for stanza, values in get_conf_data(session_key, 'certificates', 'SA-ITOA').items():
|
|
if stanza == requested_stanza:
|
|
conf_field_value = values.get(conf_field)
|
|
if conf_field_value is None:
|
|
singleton_logger.info('Field {} for stanza {} not found in certificates.conf'.
|
|
format(conf_field, stanza))
|
|
else:
|
|
singleton_logger.info('Field {} value from certificates.conf - {}'.
|
|
format(conf_field, values.get(conf_field)))
|
|
return conf_field_value
|
|
singleton_logger.info('Field {} for stanza {} not found in certificates.conf'.
|
|
format(conf_field, requested_stanza))
|
|
except Exception as e:
|
|
singleton_logger.exception('Failed to get field value from .conf file: %s' % e.args[0])
|
|
|
|
|
|
def get_nats_credentials(sessionKey, uri, auth_enabled):
|
|
"""
|
|
Gets user, password stored in stored passwords.conf
|
|
"""
|
|
credentials = {}
|
|
if auth_enabled == 1:
|
|
singleton_logger.info('Getting user credentials')
|
|
try:
|
|
response, content = rest.simpleRequest(
|
|
uri, sessionKey, method='GET', raiseAllErrors=True
|
|
)
|
|
if response.status != 200:
|
|
# Something failed in the request, log an error
|
|
singleton_logger.error('Failed to get user credentials.')
|
|
|
|
if response.status == 200:
|
|
res = json.loads(content)
|
|
if 'entry' in res:
|
|
for entry in res['entry']:
|
|
entry_content = entry['content']
|
|
if entry_content:
|
|
credentials['realm'] = entry_content['realm']
|
|
credentials['username'] = entry_content['username']
|
|
credentials['clear_password'] = entry_content[
|
|
'clear_password'
|
|
]
|
|
break
|
|
except Exception as e:
|
|
singleton_logger.exception('Failed to get user credentials - %s' % e)
|
|
return credentials
|
|
|
|
|
|
def is_valid_ip(ip_address):
|
|
"""
|
|
Checks if the passed in string is a valid IP address. Works with both IPv4 and IPv6
|
|
|
|
:param ip_address: IP address to be validated
|
|
:type ip_address: str
|
|
:returns: True if valid IP, else False
|
|
:rtype: bool
|
|
"""
|
|
try:
|
|
ipaddress.ip_address(ip_address)
|
|
return True
|
|
except ValueError:
|
|
return False
|
|
|
|
|
|
def save_password(session_key, realm, username, password, app='SA-ITOA'):
|
|
"""
|
|
Create/update credentials in passwords.conf.
|
|
|
|
:param session_key: Session key
|
|
:type session_key: str
|
|
:param realm: Realm
|
|
:type realm: str
|
|
:param username: Username
|
|
:type username: str
|
|
:param password: Password
|
|
:type password: str
|
|
:param app: Name of the app
|
|
:type app: str
|
|
"""
|
|
data = {'realm': realm, 'name': username, 'password': password}
|
|
passwords_uri = safeURLQuote(f'/servicesNS/nobody/{app}/storage/passwords')
|
|
singleton_logger.info(f'Add credentials to {passwords_uri}')
|
|
try:
|
|
rsp, _ = rest.simpleRequest(
|
|
passwords_uri,
|
|
sessionKey=session_key,
|
|
method="POST",
|
|
postargs=data
|
|
)
|
|
if rsp.status != 200 and rsp.status != 201:
|
|
singleton_logger.error(f'Error occurred while adding credentials to passwords.conf: {rsp}')
|
|
except Exception as e:
|
|
singleton_logger.error(f'Exception occurred while adding credentials to passwords.conf: {e}')
|
|
|
|
|
|
def is_cloud(logger, session_key):
|
|
"""
|
|
Check if running on the Cloud stack
|
|
|
|
:param logger: Logger
|
|
:type logger: object
|
|
:param session_key: Session key
|
|
:type session_key: str
|
|
:returns: True - in the Cloud, otherwise False
|
|
:rtype: boolean
|
|
"""
|
|
try:
|
|
instance_type = _get_server_info(logger, session_key)['entry'][0]['content'].get('instance_type')
|
|
logger.info(f'Instance Type : {instance_type}')
|
|
if instance_type is not None:
|
|
return instance_type == 'cloud'
|
|
return False
|
|
except Exception as e:
|
|
logger.info(f'An error occurred while fetching server info : {e}')
|
|
return False
|
|
|
|
|
|
def _get_server_info(logger, session_key):
|
|
response, contents = rest.simpleRequest(
|
|
path='/services/server/info',
|
|
getargs={'output_mode': 'json'},
|
|
sessionKey=session_key)
|
|
if response.status != http.client.OK:
|
|
e = Exception('Failed to get server information. Response={} Contents={}'.format(response, contents))
|
|
logger.exception(e)
|
|
raise e
|
|
|
|
return json.loads(contents)
|
|
|
|
|
|
class ItoaBase(object):
|
|
log_prefix = '[Itoa Base] '
|
|
|
|
def __init__(self, session_key):
|
|
self.session_key = session_key
|
|
|
|
def raise_error(self, logger, message, status_code=500):
|
|
raise ItoaError(message, logger, self.log_prefix, status_code=status_code)
|
|
|
|
def raise_error_bad_validation(self, logger, message, status_code=400, uid='',
|
|
context=None):
|
|
raise ItoaValidationError(message, logger, self.log_prefix, status_code=status_code,
|
|
uid=uid, context=context)
|
|
|
|
|
|
def get_object_size_in_bytes(object_name):
|
|
"""
|
|
Return size of an object. The object can be any type of object
|
|
|
|
@type object_name: object
|
|
@param object_name: object name
|
|
|
|
@rtype int
|
|
@return: return size of object
|
|
"""
|
|
return sys.getsizeof(str(object_name))
|
|
|
|
|
|
def is_size_less_than_50_mb(object_name):
|
|
"""
|
|
Size of object
|
|
|
|
@type object_name: object
|
|
@param object_name: object name
|
|
|
|
@rtype bool
|
|
@return: True if size is less than 50MB otherwise False
|
|
"""
|
|
size = get_object_size_in_bytes(object_name)
|
|
if not size:
|
|
raise ValueError('Failed to get size of object={0}.'.format(object_name))
|
|
else:
|
|
size_in_mb = float(size) / 1024 / 1024
|
|
# Check if size is less then 50 MB, Splunk truncate data if size of post request
|
|
# is more than 50 MB
|
|
# TODO: Get splunkd post request size from server.conf file
|
|
return size_in_mb < 50
|
|
|
|
|
|
def save_batch(itoa_object,
|
|
owner,
|
|
data_list,
|
|
no_batch=False,
|
|
dupname_tag=None,
|
|
skip_local_failure=False,
|
|
transaction_id=None,
|
|
skip_service_template_update=False):
|
|
"""
|
|
Splunk does not support the saving of an object whose size is more than 50 MB
|
|
Hence handle that scenario while doing batched saved.
|
|
If the size of an object is greater than 50MB, break it down till each chunk is less than or equal to 50 MB
|
|
|
|
@type itoa_object: instance
|
|
@param itoa_object: one of itoa object instance
|
|
|
|
@type owner: basestring
|
|
@param owner: owner
|
|
|
|
@type data_list: list
|
|
@param data_list: list of data which need to save
|
|
|
|
@return: a list json object contain all the failed objects
|
|
"""
|
|
failed_json = []
|
|
if no_batch:
|
|
for object_data in data_list:
|
|
results = itoa_object.get(owner, object_data.get('_key'))
|
|
try:
|
|
if results is None or len(results) == 0:
|
|
itoa_object.create(
|
|
owner, object_data, dupname_tag, transaction_id=transaction_id)
|
|
else:
|
|
itoa_object.update(owner, object_data.get('_key'), object_data, dupname_tag,
|
|
transaction_id=transaction_id)
|
|
except Exception:
|
|
failed_json.append(object_data)
|
|
else:
|
|
if is_size_less_than_50_mb(data_list):
|
|
singleton_logger.debug('data_list less than 50mb, ok to save now')
|
|
if skip_service_template_update:
|
|
itoa_object.skip_service_template_update = skip_service_template_update
|
|
itoa_object.save_batch(owner,
|
|
data_list,
|
|
True,
|
|
dupname_tag,
|
|
skip_local_failure=skip_local_failure,
|
|
transaction_id=transaction_id)
|
|
else:
|
|
# Cut down the size and do the batch save
|
|
total_length = len(data_list)
|
|
singleton_logger.debug(
|
|
'data_list is: %s and size is more than 50mb, split the data for batch_save' % total_length)
|
|
if total_length == 1:
|
|
raise ValueError(
|
|
'Size of one object={0} is more than 50 MB, splunk can\'t handle that size.'.
|
|
format(str(itoa_object)))
|
|
|
|
first_half = data_list[0:(total_length // 2)]
|
|
second_half = data_list[(total_length // 2):total_length]
|
|
save_batch(itoa_object, owner, first_half, transaction_id=transaction_id,
|
|
skip_service_template_update=skip_service_template_update)
|
|
save_batch(itoa_object, owner, second_half, transaction_id=transaction_id,
|
|
skip_service_template_update=skip_service_template_update)
|
|
return failed_json
|
|
|
|
|
|
class FileManager(object):
|
|
"""
|
|
Manager file operation
|
|
"""
|
|
DELIMITER = '___'
|
|
|
|
@staticmethod
|
|
def delete_file(path):
|
|
"""
|
|
Deletes the file at the path provided
|
|
|
|
@type path: basestring
|
|
@param path: directory path
|
|
|
|
"""
|
|
try:
|
|
os.remove(path)
|
|
except OSError as e:
|
|
singleton_logger.warning(e)
|
|
|
|
@staticmethod
|
|
def is_file(path):
|
|
"""
|
|
Check if it is file or not
|
|
|
|
@type path: basestring
|
|
@param path: directory path
|
|
|
|
@rtype: bool
|
|
@return: True or False
|
|
"""
|
|
return os.path.isfile(path)
|
|
|
|
@staticmethod
|
|
def is_directory(path):
|
|
"""
|
|
Check if it is directory
|
|
|
|
@type path: basestring
|
|
@param path: directory path
|
|
|
|
@rtype: bool
|
|
@return: True or False
|
|
"""
|
|
return os.path.isdir(path)
|
|
|
|
@staticmethod
|
|
def is_exists(path):
|
|
"""
|
|
Check if the path exist or not
|
|
|
|
@type path: basestring
|
|
@param path: directory path
|
|
|
|
@rtype: bool
|
|
@return: True or False base on existence of the path
|
|
"""
|
|
return os.path.exists(path)
|
|
|
|
@staticmethod
|
|
def get_base_dir(file):
|
|
"""
|
|
Get base dir of given file. If directory is passed then return dir
|
|
|
|
@type file: basestring
|
|
@param file: file path
|
|
|
|
@return: Base directory - if file path is passed
|
|
@rtype: basestring
|
|
"""
|
|
if os.path.isfile(file):
|
|
return os.path.dirname(file)
|
|
elif os.path.isdir(file):
|
|
return file
|
|
elif file is None:
|
|
return os.getcwd()
|
|
|
|
@staticmethod
|
|
def create_directory(path):
|
|
"""
|
|
Create directory
|
|
|
|
@type path: basestring
|
|
@param path: directory path
|
|
@return:
|
|
"""
|
|
try:
|
|
os.makedirs(path)
|
|
singleton_logger.debug('Successfully created directory, path=%s', path)
|
|
except OSError as e:
|
|
if e.errno != errno.EEXIST:
|
|
singleton_logger.exception(e)
|
|
raise e
|
|
|
|
@staticmethod
|
|
def zip_directory(root_path, name_of_zip_file):
|
|
"""
|
|
Zip the directory
|
|
|
|
@type path: basestring
|
|
@param path: directory path
|
|
"""
|
|
try:
|
|
os.chdir(os.path.dirname(root_path))
|
|
with zipfile.ZipFile(name_of_zip_file + '.zip',
|
|
'w',
|
|
zipfile.ZIP_DEFLATED,
|
|
allowZip64=True) as zf:
|
|
for root, _, filenames in os.walk(os.path.basename(root_path)): # noqa
|
|
for name in filenames:
|
|
name = os.path.join(root, name)
|
|
name = os.path.normpath(name)
|
|
zf.write(name, name)
|
|
except Exception as exc:
|
|
singleton_logger.exception(exc)
|
|
raise
|
|
|
|
@staticmethod
|
|
def unzip_backup(path_to_zip_file, extract_to_path):
|
|
"""
|
|
Unzip the backup zip file and rename the extracted folder to the parent folder name in extract_to_path
|
|
|
|
@type path_to_zip_file: basestring
|
|
@param path_to_zip_file: path to zip file including .zip extension
|
|
|
|
@type extract_to_path: basestring
|
|
@param extract_to_path: path to extract to
|
|
"""
|
|
zip_ref = zipfile.ZipFile(path_to_zip_file, 'r')
|
|
zip_ref.extractall(extract_to_path)
|
|
zip_ref.close()
|
|
|
|
@staticmethod
|
|
def delete_working_directory(path):
|
|
"""
|
|
Delete the working directory that contains the json files
|
|
|
|
@type path: basestring
|
|
@param path: directory path
|
|
"""
|
|
try:
|
|
shutil.rmtree(path)
|
|
except OSError as ose:
|
|
singleton_logger.exception(ose)
|
|
raise
|
|
|
|
@staticmethod
|
|
def write_to_file(file_path, data, flag='w+'):
|
|
"""
|
|
Write a valid json convert-able data to the file_path
|
|
|
|
@type file_path: basestring
|
|
@param file_path: file_path path
|
|
|
|
@type data: dict
|
|
@param data: json data to write
|
|
|
|
@type flag: basestring
|
|
@param flag: file_path opening flags
|
|
|
|
@return:
|
|
"""
|
|
with open(file_path, flag) as fp:
|
|
json.dump(data, fp)
|
|
|
|
@staticmethod
|
|
def copy_file(source, destination):
|
|
"""
|
|
Copy a valid file
|
|
|
|
@type source: basestring
|
|
@param source: path of the file we want to copy
|
|
|
|
@type destination: basestring
|
|
@param destination: destination path of the file
|
|
|
|
@return:
|
|
"""
|
|
shutil.copyfile(source, destination)
|
|
|
|
@staticmethod
|
|
def read_data(file_path, flag='r'):
|
|
"""
|
|
Read data from given file_path and return json object
|
|
|
|
@type file_path: basestring
|
|
@param file_path: file_path path
|
|
|
|
@type flag: basestring
|
|
@param flag: file_path opening flags
|
|
|
|
@rtype: json dict
|
|
@return: json based dict
|
|
"""
|
|
with open(file_path, flag) as fp:
|
|
data = json.load(fp)
|
|
|
|
return data
|
|
|
|
@staticmethod
|
|
def clean_file(file_path):
|
|
"""
|
|
Delete content of the file
|
|
|
|
@type file_path: basestring
|
|
@param file_path: file path
|
|
|
|
@return:
|
|
"""
|
|
if os.path.exists(file_path):
|
|
try:
|
|
open(file_path, 'w').close()
|
|
except Exception as exc:
|
|
singleton_logger.error(exc.args[0])
|
|
singleton_logger.info('Failed to clean existing file. Will append data to existing file.')
|
|
|
|
@staticmethod
|
|
def get_rolling_file_name(file_path, rolling_file_number=0):
|
|
"""
|
|
Get rolling file name
|
|
for example: ('/tmp/foo.txt,2) would return /tmp/foo___2.txt where ____ is the DELIMITER string
|
|
|
|
@type file_path: basestring
|
|
@param file_path: file path
|
|
|
|
@type rolling_file_number: integer
|
|
@param rolling_file_number: rolling file number
|
|
|
|
@rtype: basestring
|
|
@return: file name
|
|
"""
|
|
basedir = os.path.dirname(file_path)
|
|
basefilename = os.path.basename(file_path)
|
|
tmp_file = basefilename[0:basefilename.rfind('.')] + FileManager.DELIMITER + \
|
|
str(rolling_file_number) + basefilename[basefilename.rfind('.'): len(basefilename)]
|
|
return os.path.join(basedir, tmp_file)
|
|
|
|
@staticmethod
|
|
def get_zip_file_names(directory_path):
|
|
"""
|
|
Get filenames with a .zip extension in the directory provided by directory_path
|
|
|
|
@:rtype: list
|
|
@return: list of filenames
|
|
|
|
"""
|
|
file_paths = glob.glob(os.path.join(directory_path, '*.zip'))
|
|
if isinstance(file_paths, list) and len(file_paths) > 0:
|
|
return [fpath.split(os.sep)[-1].split('.zip')[-2] for fpath in file_paths]
|
|
else:
|
|
return None
|
|
|
|
@staticmethod
|
|
def merge_file(logger, file_source, file_destination):
|
|
"""
|
|
Merge source file into destination file.
|
|
Create new keys if absent in destination file
|
|
Overwrite keys present in destination file
|
|
Keep other keys intact
|
|
|
|
@type logger: object
|
|
@param logger: logger to use
|
|
|
|
@type file_source: basestring
|
|
@param file_source: source file path
|
|
|
|
@type file_destination: basestring
|
|
@param file_destination: destination file path
|
|
|
|
@return:
|
|
"""
|
|
try:
|
|
macros_conf = conf.readConfFile(file_destination)
|
|
macros_conf.update(conf.readConfFile(file_source))
|
|
# As conf parser includes a default stanza by itself so removing it if it's empty
|
|
if 'default' in macros_conf and len(macros_conf['default']) == 0:
|
|
del macros_conf['default']
|
|
conf.writeConfFile(file_destination, macros_conf)
|
|
except Exception as e:
|
|
logger.error("Exception {} occurred while merging {} into {}".format(
|
|
str(e), file_source, file_destination))
|
|
|
|
|
|
class SplunkUser(object):
|
|
"""
|
|
Class to wrap Splunk's user info interactions
|
|
"""
|
|
|
|
@staticmethod
|
|
def fetch_user_access_info(username, session_key, logger):
|
|
"""
|
|
Given a username, fetch the user's access control details
|
|
|
|
@type username: string
|
|
@param username: concerned username
|
|
|
|
@type session_key: string
|
|
@param session_key: splunkd session key
|
|
|
|
@type logger: object
|
|
@param logger: logger to use
|
|
|
|
@rtype: dict
|
|
@return object: json'ified access details of username
|
|
"""
|
|
if (not isinstance(username, itsi_py3.string_type)) or len(username.strip()) < 1:
|
|
raise Exception('Expecting a valid username, "{0}" is invalid' % username)
|
|
if (not isinstance(session_key, itsi_py3.string_type)) or len(session_key.strip()) == 0:
|
|
raise Exception('Expecting a valid session_key, "%s" is invalid' % session_key)
|
|
|
|
uri = '/services/authentication/users/%s' % username
|
|
getargs = {'output_mode': 'json'}
|
|
try:
|
|
response, content = rest.simpleRequest(
|
|
uri,
|
|
method='GET',
|
|
getargs=getargs,
|
|
sessionKey=session_key)
|
|
except Exception as e:
|
|
logger.exception(e)
|
|
raise
|
|
|
|
if response.status != 200:
|
|
message = ('Error while polling Splunkd for user access information. Response: "%s".'
|
|
' Content: "%s".') % (response, content)
|
|
logger.error(message)
|
|
raise Exception(message)
|
|
else:
|
|
logger.debug('Fetched user access details for user "%s": %s', username, content)
|
|
return json.loads(content)
|
|
|
|
@staticmethod
|
|
def fetch_role_info(role_name, session_key, logger):
|
|
"""
|
|
Given a role name, fetch the role's details
|
|
|
|
@type role_name: basestring
|
|
@param role_name: concerned role
|
|
|
|
@type session_key: string
|
|
@param session_key: splunkd session key
|
|
|
|
@type logger: object
|
|
@param logger: logger to use
|
|
|
|
@rtype: dict
|
|
@return object: json'ified details of role
|
|
"""
|
|
if (not isinstance(role_name, itsi_py3.string_type)) or len(role_name.strip()) < 1:
|
|
raise Exception('Expecting a valid username, "{0}" is invalid' % role_name)
|
|
if (not isinstance(session_key, itsi_py3.string_type)) or len(session_key.strip()) == 0:
|
|
raise Exception('Expecting a valid session_key, "%s" is invalid' % session_key)
|
|
|
|
uri = '/services/authorization/roles/%s' % quote(role_name, safe='')
|
|
getargs = {'output_mode': 'json'}
|
|
try:
|
|
response, content = rest.simpleRequest(
|
|
uri,
|
|
method='GET',
|
|
getargs=getargs,
|
|
sessionKey=session_key,
|
|
raiseAllErrors=False)
|
|
except AuthorizationFailed as e:
|
|
logger.exception(e)
|
|
return {}
|
|
except Exception as e:
|
|
logger.exception(e)
|
|
raise
|
|
|
|
if response.status != 200:
|
|
message = ('Error while polling Splunkd for role information. Response: "%s".'
|
|
' Content: "%s".') % (response, content)
|
|
logger.error(message)
|
|
raise Exception(message)
|
|
else:
|
|
logger.debug('Fetched details for role "%s": %s', role_name, content)
|
|
return json.loads(content)
|
|
|
|
@staticmethod
|
|
def get_roles_for_user(username, session_key, logger):
|
|
"""
|
|
Given a username, fetch the roles assigned to the user
|
|
|
|
@type username: string
|
|
@param username: concerned username
|
|
|
|
@type session_key: string
|
|
@param session_key: splunkd session key
|
|
|
|
@type logger: logging.logger
|
|
@param logger: logger to use
|
|
|
|
@rtype: tuple with two lists of strings
|
|
@return: tuple of list of roles directly assigned to user and full list of roles accounting for role inheritance
|
|
"""
|
|
|
|
# if username is nobody, means operations are being performed in splunk system user's context
|
|
# fetch current user context, to get the username.
|
|
# NOTE: this function should only be used by itsi_security_group
|
|
if username == 'nobody':
|
|
nobody_user = SplunkUser.get_current_user_context(session_key, logger)
|
|
username = nobody_user['username']
|
|
|
|
try:
|
|
user_access_info = SplunkUser.fetch_user_access_info(username, session_key, logger)
|
|
except ResourceNotFound:
|
|
logger.warn('Unable to look up user %s. Returning no roles.', username)
|
|
return [], []
|
|
|
|
if not (
|
|
isinstance(user_access_info.get('entry'), list)
|
|
and len(user_access_info.get('entry')) == 1
|
|
and isinstance(user_access_info['entry'][0].get('content'), dict)
|
|
and 'roles' in user_access_info['entry'][0]['content']
|
|
):
|
|
raise Exception('Could not find roles for the user "%s". User access info: %s' %
|
|
(username, user_access_info))
|
|
|
|
roles_for_user = user_access_info['entry'][0]['content']['roles']
|
|
roles_for_user = roles_for_user if isinstance(roles_for_user, list) else []
|
|
all_roles_for_user = set(roles_for_user)
|
|
processed_roles = set()
|
|
|
|
def append_inherited_roles(role):
|
|
if role in processed_roles:
|
|
# Prevent looping forever from cyclic inheritance
|
|
return
|
|
else:
|
|
processed_roles.add(role)
|
|
|
|
# Recursively add roles via inheritance for each roles assigned to user
|
|
role_info = SplunkUser.fetch_role_info(role, session_key, logger)
|
|
if not (
|
|
isinstance(role_info.get('entry'), list)
|
|
and len(role_info.get('entry')) == 1
|
|
and isinstance(role_info['entry'][0].get('content'), dict)
|
|
and 'imported_roles' in role_info['entry'][0]['content']
|
|
):
|
|
logger.debug('Could not fetch inherited roles for role "%s"', role)
|
|
return
|
|
|
|
inherited_roles = role_info['entry'][0]['content']['imported_roles']
|
|
inherited_roles = inherited_roles if isinstance(inherited_roles, list) else []
|
|
|
|
for inherited_role in inherited_roles:
|
|
all_roles_for_user.add(inherited_role)
|
|
append_inherited_roles(inherited_role)
|
|
|
|
for role in roles_for_user:
|
|
append_inherited_roles(role)
|
|
|
|
return roles_for_user, list(all_roles_for_user)
|
|
|
|
@staticmethod
|
|
def get_current_user_context(session_key, logger):
|
|
"""
|
|
Get current splunk user context. An example use of this
|
|
method could be fetching username, when current user is
|
|
'nobody'.
|
|
|
|
@type session_key: string
|
|
@param session_key: splunkd session key
|
|
|
|
@type logger: logging.logger
|
|
@param logger: logger to use
|
|
|
|
@rtype: json
|
|
@return: current user object.
|
|
"""
|
|
uri = '/services/authentication/current-context'
|
|
getargs = {'output_mode': 'json'}
|
|
try:
|
|
response, content = rest.simpleRequest(
|
|
uri,
|
|
method='GET',
|
|
getargs=getargs,
|
|
sessionKey=session_key
|
|
)
|
|
except Exception as e:
|
|
logger.exception(e)
|
|
raise
|
|
|
|
if response.status != 200:
|
|
message = ('Error while polling Splunkd for current user context. Response: "%s".'
|
|
' Content: "%s".') % (response, content)
|
|
logger.error(message)
|
|
raise Exception(message)
|
|
else:
|
|
content = json.loads(content)
|
|
# only one entry should be returned in content, as there could only be one current context
|
|
user = content['entry'][0]['content']
|
|
return user
|
|
|
|
|
|
def get_object_batch_size(session_key, object_type='default'):
|
|
"""
|
|
Get batch size to use for large operations against the KVStore
|
|
Use 'default' as the `object_type` for the default case (1,000).
|
|
|
|
:type session_key: str
|
|
:param session_key: Session key
|
|
|
|
:type object_type: str
|
|
:param object_type: Object type to get batch size for
|
|
|
|
:rtype int:
|
|
:return: Batch size (number of objects)
|
|
"""
|
|
fallback_batch_size = 1000
|
|
try:
|
|
resp_data = get_conf_stanza(session_key, 'itsi_settings', 'object_batch_sizes', 'SA-ITOA')
|
|
content = json.loads(resp_data['content'])['entry'][0]['content']
|
|
batch_size = int(content.get(object_type, 0))
|
|
if not batch_size:
|
|
batch_size = int(content.get('default', fallback_batch_size))
|
|
except Exception:
|
|
batch_size = fallback_batch_size
|
|
singleton_logger.error('Object batch size fetching failed. Defaulting to %d.' % batch_size)
|
|
return batch_size
|
|
|
|
|
|
def combine_object_lists(list1, list2):
|
|
"""
|
|
Method to combine two data lists together. Any objects in the first data set will be
|
|
replaced with a matching object in the second data set.
|
|
|
|
@type: list
|
|
@param list1: a list of object data
|
|
|
|
@type: list
|
|
@param list2: a list of object data
|
|
|
|
@rtype: list
|
|
@return: the combined list of data
|
|
"""
|
|
if not list1:
|
|
list1 = []
|
|
|
|
if not list2:
|
|
list2 = []
|
|
|
|
combined = {data['_key']: data for data in list1}
|
|
for data in list2:
|
|
key = data['_key']
|
|
combined[key] = data
|
|
return list(combined.values())
|
|
|
|
|
|
def calculate_next_cron_time(policy):
|
|
"""
|
|
Calculate next cron time given a policy with a cron schedule and possible user_timezone.
|
|
|
|
@type policy: dict
|
|
@param policy: Policy to build cron time off of
|
|
|
|
@rtype: tuple of int, datetime.tzinfo implementation
|
|
@return: Tuple of next scheduled time (in timestamp form), timezone used
|
|
"""
|
|
explicit_timezone = policy.get('user_timezone', 'UTC')
|
|
if explicit_timezone:
|
|
user_timezone = pytz.timezone(explicit_timezone)
|
|
else: # Policy sending user_timezone value of ''
|
|
user_timezone = tzlocal.get_localzone()
|
|
current_time = datetime.datetime.now(user_timezone)
|
|
next_scheduled_time = int(croniter(policy.get('cron_schedule'), current_time).get_next())
|
|
return next_scheduled_time, user_timezone
|
|
|
|
|
|
def apply_sort_count_to_results(
|
|
objects, logger, sort_key="_key", sort_dir="asc", skip=0, limit=0, detailed_format=True,
|
|
):
|
|
"""
|
|
Mock how the KVstore sorts and counts results, for returning to the user.
|
|
|
|
This modifies in-place AND returns objects.
|
|
|
|
:param objects: Objects to sort and count
|
|
:type objects: list of dict
|
|
|
|
:param logger: Logger to log to
|
|
:type logger: Logger object
|
|
|
|
:param sort_key: Key to sort by
|
|
:type sort_key: string
|
|
|
|
:param sort_dir: Direction for sorting - asc or desc
|
|
:type sort_dir: string
|
|
|
|
:param skip: Number of items to skip from the start
|
|
:type skip: int
|
|
|
|
:param limit: Maximum number of items to return
|
|
:type limit: int
|
|
|
|
:param detailed_format: Use the new-style (Splunk Core API-style) format for return value
|
|
:type detailed_format: Boolean
|
|
|
|
:return: Objects, sorted and counted, possibly with metadata
|
|
:type: dict (if detailed_format) or list of dict (if not detailed_format)
|
|
"""
|
|
if sort_dir == "desc" or sort_dir == "0":
|
|
is_reverse_order = True
|
|
else:
|
|
is_reverse_order = False
|
|
|
|
if len(sort_key.split(".")) > 1:
|
|
raise ItoaError("Key %s not filterable at this time" % sort_key, logger, status_code=501)
|
|
if sort_key.startswith("kpis."):
|
|
sort_key = sort_key.split(".", 1)[1]
|
|
|
|
objects.sort(key=lambda x: x.get(sort_key), reverse=is_reverse_order)
|
|
# Is there a way to do this without double memory?
|
|
if skip and limit:
|
|
results = objects[skip:skip + limit]
|
|
elif skip:
|
|
results = objects[skip]
|
|
elif limit:
|
|
results = objects[:limit]
|
|
else:
|
|
results = objects
|
|
|
|
if detailed_format:
|
|
return {
|
|
"entry": results,
|
|
"paging": {
|
|
"total": len(objects),
|
|
"perPage": len(results),
|
|
"offset": skip,
|
|
},
|
|
}
|
|
return results
|
|
|
|
|
|
def apply_filter_to_results(objects, logger, filter_data={}):
|
|
"""
|
|
Mock how the KVstore reads and filters results, for returning to the user.
|
|
|
|
Only $regex flag "i" is supported for now.
|
|
|
|
:param objects: Objects to filter
|
|
:type objects: list of dicts
|
|
|
|
:param logger: Logger to record errors to
|
|
:type logger: Logger
|
|
|
|
:param filter_data: MongoDB filter to use
|
|
:type filter_data: dict
|
|
|
|
:return: Objects, filtered to the desired keys
|
|
:rtype: list of dicts
|
|
"""
|
|
# Potential speed-up with vector math?
|
|
def recursive_filter(objects, sub_filter):
|
|
"""
|
|
Recursive helper for identifying which objects fit the filter
|
|
|
|
:param objects: List of N objects to filter
|
|
:type objects: list of dict
|
|
|
|
:param sub_filter: Filter / query
|
|
:type sub_filter: dict
|
|
|
|
:return: List of N truth values that each correspond to whether sub_filter evaluates to True or False for each
|
|
of the N objects
|
|
:rtype: list of Bools
|
|
"""
|
|
length = len(objects)
|
|
recursive_rv = [True] * length
|
|
for k, v in sub_filter.items():
|
|
if k == "$and":
|
|
field_truthiness = [True] * length
|
|
for and_sub_filter in v:
|
|
and_result = recursive_filter(objects, and_sub_filter)
|
|
for i in range(length):
|
|
field_truthiness[i] = and_result[i] and field_truthiness[i]
|
|
elif k == "$or":
|
|
field_truthiness = [False] * length
|
|
for or_sub_filter in v:
|
|
or_result = recursive_filter(objects, or_sub_filter)
|
|
for i in range(length):
|
|
field_truthiness[i] = or_result[i] or field_truthiness[i]
|
|
# Not fully supported (see convert_filter_to_kpi_filter())
|
|
elif k == "$not":
|
|
field_truthiness = [not x for x in recursive_filter(objects, v)]
|
|
# Assume we're doing a base-level comparison with a field
|
|
elif type(v) is dict:
|
|
for operator, explicit_value in v.items():
|
|
if operator == "$gt":
|
|
field_truthiness = [obj[k] > explicit_value for obj in objects]
|
|
elif operator == "$gte":
|
|
field_truthiness = [obj[k] >= explicit_value for obj in objects]
|
|
elif operator == "$lt":
|
|
field_truthiness = [obj[k] < explicit_value for obj in objects]
|
|
elif operator == "$lte":
|
|
field_truthiness = [obj[k] <= explicit_value for obj in objects]
|
|
elif operator == "$ne":
|
|
field_truthiness = [obj[k] != explicit_value for obj in objects]
|
|
elif operator == "$regex":
|
|
options = v.get("$options", "")
|
|
if options == "i":
|
|
flags = re.IGNORECASE
|
|
else:
|
|
flags = 0
|
|
compiled_regex = re.compile(explicit_value, flags=flags)
|
|
field_truthiness = [bool(compiled_regex.search(obj[k])) for obj in objects]
|
|
elif operator == "$options":
|
|
continue
|
|
else:
|
|
raise ItoaError("Unknown operator: %s" % operator, logger, status_code=400)
|
|
else:
|
|
def get_val(object, subkeys):
|
|
"""
|
|
Get a value associated with a nested-key (represented by the subkeys parameter) from an object
|
|
|
|
Example: Fetching "a.b.c" (as ["a", "b", "c"]) from:
|
|
{
|
|
"a": {
|
|
"b": {
|
|
"c": 123
|
|
}
|
|
}
|
|
}
|
|
|
|
:param object: Object containing the field value
|
|
:type object: Any
|
|
|
|
:param subkeys: List of key components
|
|
:type subkeys: list
|
|
|
|
:return: Value represented loosely as "object[subkeys]" in a certain type of format
|
|
:rtype: Any
|
|
"""
|
|
if object is None or len(subkeys) == 0: # Base case
|
|
return object
|
|
elif type(object) is dict:
|
|
return get_val(object.get(subkeys[0]), subkeys[1:])
|
|
else:
|
|
# ITSI-35024
|
|
raise NotImplementedError("List values are not parseable at this time")
|
|
|
|
keys = k.split(".")
|
|
field_truthiness = [get_val(obj, keys) == v for obj in objects]
|
|
|
|
# Implict "$and" in a query with multiple fields at the same level
|
|
for i in range(length):
|
|
recursive_rv[i] = field_truthiness[i] and recursive_rv[i]
|
|
return recursive_rv
|
|
|
|
if not filter_data:
|
|
return objects
|
|
|
|
bool_array = recursive_filter(objects, filter_data)
|
|
rv = []
|
|
for i, object in enumerate(objects):
|
|
if bool_array[i]:
|
|
rv.append(object)
|
|
return rv
|
|
|
|
|
|
def limit_event_chars(events, max_chars):
|
|
"""
|
|
Limit the characters in event
|
|
|
|
:param event: Event to limit the characters
|
|
:type event: dicts
|
|
|
|
:param chars: chars to be limited to
|
|
:type chars: integer
|
|
|
|
:return: event having upto defined chars
|
|
:rtype: dict
|
|
"""
|
|
|
|
def recursive_limit(dict_obj):
|
|
# Sorting dictionary to descending order based on length
|
|
sorted_items = sorted(
|
|
dict_obj.items(), key=lambda x: len(json.dumps({x[0]: x[1]})), reverse=True
|
|
)
|
|
new_dict = {}
|
|
current_chars = 0
|
|
|
|
# limiting the dictionary size to less than max_chars
|
|
for key, value in sorted_items:
|
|
if current_chars + len(json.dumps({key: value})) <= max_chars:
|
|
if isinstance(value, dict):
|
|
new_value = recursive_limit(value)
|
|
else:
|
|
new_value = value
|
|
new_dict[key] = new_value
|
|
current_chars += len(json.dumps({key: new_value}))
|
|
|
|
return new_dict
|
|
|
|
return [recursive_limit(event) for event in events]
|
|
|
|
|
|
def wait_for_job(search_job, max_time=-1):
|
|
"""
|
|
Wait up to `max_time` seconds for `search_job` to finish. If `max_time` is negative (default), waits forever.
|
|
Returns true, if job finished.
|
|
|
|
:param search_job: Search job to wait for
|
|
:type search_job: splunklib.client.Jobs
|
|
|
|
:param max_time: Seconds to wait
|
|
:type max_time: integer
|
|
|
|
:return: Did search job complete?
|
|
:rtype: Boolean
|
|
"""
|
|
pause = 0.5
|
|
lapsed = 0.0
|
|
while not search_job.is_done():
|
|
time.sleep(pause)
|
|
lapsed += pause
|
|
if 0 <= max_time < lapsed:
|
|
break
|
|
return search_job.is_done()
|