You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
690 lines
28 KiB
690 lines
28 KiB
# Copyright (C) 2005-2024 Splunk Inc. All Rights Reserved.
|
|
|
|
|
|
import uuid
|
|
import asyncio
|
|
import ssl
|
|
import nats
|
|
import sys
|
|
import json
|
|
|
|
from ITOA.setup_logging import logger
|
|
from ITOA.itoa_common import get_current_utc_epoch
|
|
from ITOA.event_management.notable_event_error import NotableEventBadRequest
|
|
from ITOA.itoa_common import get_itsi_event_management_nats_certificate_value, get_nats_credentials
|
|
from splunk.clilib.bundle_paths import make_splunkhome_path
|
|
sys.path.append(make_splunkhome_path(['etc', 'apps', 'SA-ITOA', 'lib']))
|
|
sys.path.append(make_splunkhome_path(['etc', 'apps', 'SA-ITOA', 'lib', 'SA_ITOA_app_common']))
|
|
from SA_ITOA_app_common.solnlib.conf_manager import ConfManager
|
|
|
|
from .base_event_management import BaseEventManagement
|
|
from .notable_event_utils import MethodType
|
|
from .push_event_manager import PushEventManager
|
|
|
|
|
|
class NotableEventGroup(BaseEventManagement):
|
|
"""
|
|
Class to create, update, get and delete group state
|
|
Use to store notable event comments
|
|
{
|
|
_key: Random key
|
|
object_type: notable_event_group,
|
|
owner: <assignee>,
|
|
severity: <severity>,
|
|
status: <status>,
|
|
mod_time: <mod_time>
|
|
<create_time> : <create_time>
|
|
}
|
|
|
|
|
|
"""
|
|
|
|
def __init__(self, session_key, current_user_name=None, collection='itsi_notable_group_user',
|
|
object_type='notable_event_group',
|
|
user='nobody', action_dispatch_config=None, **kwargs):
|
|
"""
|
|
Initialize
|
|
@param session_key: session key
|
|
@param collection: collection name
|
|
@param object_type: object type
|
|
@param user: user context to save
|
|
@type action_dispatch_config: ActionDispatchConfiguration
|
|
@param action_dispatch_config: the setting for hybrid action dispatch
|
|
@param kwargs: extra args
|
|
@return:
|
|
"""
|
|
# Initialized base event object
|
|
super(NotableEventGroup, self).__init__(
|
|
session_key, collection, object_type, user, current_user_name, action_dispatch_config=action_dispatch_config
|
|
)
|
|
self.session_key = session_key
|
|
self.mod_time_key = 'mod_time'
|
|
self.create_time_key = 'create_time'
|
|
self.user = 'nobody'
|
|
self.logger = logger
|
|
self.kwargs = kwargs
|
|
|
|
if action_dispatch_config:
|
|
self.host_base_uri = action_dispatch_config.remote_ea_mgmt_uri
|
|
self.master_session_key = action_dispatch_config.get_master_host_session_key()
|
|
|
|
def pre_processing(self, data_list, method):
|
|
"""
|
|
Add mod_time and event time to the group
|
|
|
|
@type data_list: list
|
|
@param data_list: list of data to validate and add time, user info etc
|
|
|
|
@type method: basestring
|
|
@param method: method type
|
|
|
|
@rtype: list
|
|
@return: It updates list in place and also return it back as well
|
|
"""
|
|
if not isinstance(data_list, list):
|
|
raise TypeError('Data is not a valid list, data_list type is %s.', type(data_list))
|
|
for data in data_list:
|
|
# Make sure data is valid dict
|
|
if not isinstance(data, dict):
|
|
raise TypeError('Data is not a valid dictionary.')
|
|
|
|
time_value = get_current_utc_epoch()
|
|
if method == MethodType.CREATE:
|
|
# Add mod time, create time
|
|
data[self.create_time_key] = time_value
|
|
if method not in (MethodType.DELETE, MethodType.DELETE_BULK, MethodType.GET, MethodType.GET_BULK):
|
|
# Need to set mod time for create and update
|
|
data[self.mod_time_key] = time_value
|
|
|
|
return data_list
|
|
|
|
def _get_activity(self, updated_data, activity_type=None):
|
|
"""
|
|
Return activity which is happening during update
|
|
|
|
@type updated_data: dict
|
|
@param updated_data: data to get activity
|
|
|
|
@type activity_type: basestring
|
|
@param activity_type: type of activity
|
|
|
|
@rtype: basestring
|
|
@return: activity log statement
|
|
"""
|
|
activity_tracking = ''
|
|
keys_to_del = []
|
|
|
|
if activity_type == 'acknowledge':
|
|
return '{0} successfully acknowledged episode.'.format(updated_data.get('owner'))
|
|
|
|
self.lazy_init_notable_event_configuration()
|
|
|
|
for key, value in updated_data.items():
|
|
if key.startswith('__old__'):
|
|
actual_key = key[len('__old__'):]
|
|
# For status and severity, stores its level
|
|
old_value = value
|
|
new_value = updated_data[actual_key]
|
|
# Put label along with id to show them pretty
|
|
if actual_key == 'status' or actual_key == 'severity':
|
|
if actual_key == 'status':
|
|
old_value = self.notable_event_configuration.status_contents.get(old_value, {})\
|
|
.get('label') + " ({0})".format(old_value)
|
|
new_value = self.notable_event_configuration.status_contents.get(new_value, {})\
|
|
.get('label') + " ({0})".format(new_value)
|
|
if actual_key == 'severity':
|
|
old_value = self.notable_event_configuration.severity_contents.get(old_value, {})\
|
|
.get('label') + " ({0})".format(old_value)
|
|
new_value = self.notable_event_configuration.severity_contents.get(new_value, {})\
|
|
.get('label') + " ({0})".format(new_value)
|
|
|
|
activity_tracking += '{0} changed from {0}="{1}" to {0}="{2}". '.format(actual_key, old_value,
|
|
new_value)
|
|
keys_to_del.append(key)
|
|
# delete old entry in the dict
|
|
for key in keys_to_del:
|
|
del updated_data[key]
|
|
|
|
if not activity_tracking and updated_data:
|
|
fields = set(updated_data.keys()).intersection(set(['status', 'severity', 'owner']))
|
|
activity_tracking = 'Updated '
|
|
for field in fields:
|
|
value = updated_data[field]
|
|
if field == 'severity':
|
|
value = self.notable_event_configuration.severity_contents.get(value, {}).get('label', '') + " ({0})".format(value)
|
|
if field == 'status':
|
|
value = self.notable_event_configuration.status_contents.get(value, {}).get('label', '') + " ({0})".format(value)
|
|
activity_tracking += ' {0}={1} '.format(field, value)
|
|
return activity_tracking
|
|
|
|
def create(self, data, **kwargs):
|
|
"""
|
|
Create notable event group
|
|
|
|
@type data - dict
|
|
@param data - notable event group schema to create
|
|
|
|
@rtype dict
|
|
@return create object _key or raise an exception
|
|
"""
|
|
# We need to set _key because _key should be same as group_id
|
|
# if we set this value in payload then generic facade understand
|
|
# that as update instead of create, hence we are passing as different
|
|
# then _key
|
|
# group_id comes from the UI and should not be confused with itsi_group_id
|
|
# To-Fix:
|
|
# - Update UI to use itsi_group_id
|
|
if isinstance(data, dict) and 'group_id' in data:
|
|
data['_key'] = data.pop('group_id')
|
|
activity = self._get_activity(data, data.pop('action_type', None))
|
|
ret = super(NotableEventGroup, self).create(data, **kwargs)
|
|
# Create is kind of update here because group had already create with some initial state
|
|
# now we are tracking it's state by creating record in KV
|
|
self.send_activity_to_audit({
|
|
'event_id': data.get('_key'),
|
|
'itsi_policy_id': data.get('itsi_policy_id')
|
|
}, activity, 'Episode update')
|
|
|
|
self.check_to_send_break_group_event(data['_key'], **kwargs)
|
|
return ret
|
|
|
|
def create_bulk(self, data_list, **kwargs):
|
|
"""
|
|
Create more than one notable event group
|
|
|
|
@type data_list: list
|
|
@param data_list: data list
|
|
|
|
@rtype: list
|
|
@return: list of created
|
|
"""
|
|
activities = []
|
|
activities_data = []
|
|
action_type = kwargs.pop('action_type', None)
|
|
if isinstance(data_list, list):
|
|
for data in data_list:
|
|
if 'group_id' not in data:
|
|
continue
|
|
data['_key'] = data.pop('group_id', None)
|
|
activities.append(self._get_activity(data, action_type))
|
|
activities_data.append({
|
|
'event_id': data.get('_key'),
|
|
'itsi_policy_id': data.get('itsi_policy_id')
|
|
})
|
|
ret = super(NotableEventGroup, self).create_bulk(data_list, **kwargs)
|
|
# Create is kind of update here because group had already create with some initial state
|
|
# now we are tracking it's state by creating record in KV
|
|
self.send_activity_to_audit(activities_data, activities, 'Episode bulk update')
|
|
self.check_to_send_multiple_break_group_events(data_list, **kwargs)
|
|
return ret
|
|
|
|
def update(self, object_id, data, is_partial_update=False, **kwargs):
|
|
"""
|
|
Update one notable event
|
|
|
|
@type object_id: basestring
|
|
@param object_id: object id
|
|
|
|
@type data: dict
|
|
@param data: data
|
|
|
|
@type is_partial_update: bool
|
|
@param is_partial_update: flag to do partial update
|
|
|
|
@type kwargs: dict
|
|
@param kwargs: Extra parameters
|
|
|
|
@rtype: dict
|
|
@return: return dict which holds updated keys
|
|
"""
|
|
activity = self._get_activity(data, data.pop('action_type', None))
|
|
if kwargs.get('break_group_policy_id', False):
|
|
self.check_break_group_event_fields(data)
|
|
if 'group_state_change_action' in data:
|
|
# if there is 'group_state_change_action' field in the data then this means that this is a group state change action
|
|
# triggered from the Rules Engine side. In this case, let's do an update_bulk instead because there is
|
|
# special logic for handling group state change actions in update_bulk. That special logic is for handling
|
|
# group updates for groups which are not yet inserted in KV Store.
|
|
ret = super(NotableEventGroup, self).update_bulk([object_id], [data], is_partial_update, **kwargs)
|
|
else:
|
|
ret = super(NotableEventGroup, self).update(object_id, data, is_partial_update, **kwargs)
|
|
self.send_activity_to_audit({
|
|
'event_id': data.get('_key'),
|
|
'itsi_policy_id': data.get('itsi_policy_id')
|
|
}, activity, 'Episode update')
|
|
self.check_to_send_break_group_event(data['_key'], **kwargs)
|
|
return ret
|
|
|
|
def update_bulk(self, object_ids, data_list, is_partial_update=False, **kwargs):
|
|
"""
|
|
Perform update for one or more notable event groups
|
|
|
|
@type object_ids: list
|
|
@param object_ids: notable events
|
|
|
|
@type data_list: list
|
|
@param data_list: notable events
|
|
|
|
@type is_partial_update: bool
|
|
@param is_partial_update: flag for partial update
|
|
|
|
@type kwargs: dict
|
|
@param kwargs: Extra params to perform
|
|
|
|
@rtype: list
|
|
@return: update notable event schema
|
|
"""
|
|
activities = []
|
|
activities_data = []
|
|
action_type = kwargs.pop('action_type', None)
|
|
if kwargs.get('break_multiple_groups', False):
|
|
for data in data_list:
|
|
self.check_break_group_event_fields(data)
|
|
for data in data_list:
|
|
if 'group_id' in data:
|
|
data['_key'] = data.pop('group_id', None)
|
|
activities.append(self._get_activity(data, action_type))
|
|
activities_data.append({
|
|
'event_id': data.get('_key'),
|
|
'itsi_policy_id': data.get('itsi_policy_id')
|
|
})
|
|
ret = super(NotableEventGroup, self).update_bulk(object_ids, data_list, is_partial_update,
|
|
**kwargs)
|
|
self.send_activity_to_audit(activities_data, activities, 'Episode bulk update')
|
|
self.check_to_send_multiple_break_group_events(data_list, **kwargs)
|
|
return ret
|
|
|
|
def add_drilldown(self, object_id, drilldown, is_partial_update=True, **kwargs):
|
|
"""
|
|
Add drilldown link to notable event group
|
|
|
|
@type object_id: basestring
|
|
@param object_id: object id
|
|
|
|
@type drilldown: dict
|
|
@param drilldown: drilldown to be added
|
|
|
|
@type is_partial_update: bool
|
|
@param is_partial_update: flag to do partial update
|
|
|
|
@type kwargs: dict
|
|
@param kwargs: Extra parameters
|
|
|
|
@rtype: dict
|
|
@return: return dict which holds updated keys
|
|
"""
|
|
if not self.is_valid_drilldown(drilldown):
|
|
raise ValueError('Drilldown data must have link and name')
|
|
|
|
group = self.get(object_id)
|
|
|
|
clean_drilldown = self._clean_drilldown(drilldown)
|
|
|
|
try:
|
|
drilldown_list = group.get('drilldown', [])
|
|
except AttributeError:
|
|
raise TypeError('Group is not of type dict')
|
|
|
|
try:
|
|
drilldown_list.append(clean_drilldown)
|
|
except AttributeError:
|
|
raise TypeError('Drilldown field is not of type list')
|
|
|
|
ret = super(NotableEventGroup, self).update(object_id, {'drilldown': drilldown_list}, is_partial_update, **kwargs)
|
|
|
|
return ret
|
|
|
|
def update_drilldown(self, object_id, drilldown, is_partial_update=True, **kwargs):
|
|
"""
|
|
Update drilldown for a NotableEventGroup
|
|
|
|
@type object_id: basestring
|
|
@param object_id: object id
|
|
|
|
@type drilldown: dict
|
|
@param drilldown: drilldown to be updated
|
|
|
|
@type is_partial_update: bool
|
|
@param is_partial_update: flag to do partial update
|
|
|
|
@type kwargs: dict
|
|
@param kwargs: extra parameters
|
|
|
|
@rtype: dict
|
|
@return: return dict which holds updated keys
|
|
"""
|
|
if not self.is_valid_drilldown(drilldown):
|
|
raise ValueError('Drilldown data must have link and name')
|
|
|
|
group = self.get(object_id)
|
|
|
|
clean_drilldown = self._clean_drilldown(drilldown)
|
|
|
|
try:
|
|
drilldown_list = group.get('drilldown', [])
|
|
except AttributeError:
|
|
raise TypeError('Group is not of type dict')
|
|
|
|
drilldown_index = self._find_drilldown(drilldown_list, clean_drilldown)
|
|
|
|
if not drilldown_list or drilldown_index is None:
|
|
ret = self.add_drilldown(object_id, clean_drilldown, is_partial_update, **kwargs)
|
|
return ret
|
|
|
|
try:
|
|
drilldown_list[drilldown_index].update(clean_drilldown)
|
|
except IndexError:
|
|
raise IndexError('Drilldown index of: {0} out of bounds for drilldown list.'.format(drilldown_index))
|
|
except ValueError:
|
|
raise ValueError('Non dictionary type given for drilldown.')
|
|
except TypeError:
|
|
raise TypeError('Drilldown index given is not an integer.')
|
|
except AttributeError:
|
|
raise AttributeError('Drilldown list item at index: {0} is not of type dict'.format(drilldown_index))
|
|
|
|
ret = super(NotableEventGroup, self).update(object_id, {'drilldown': drilldown_list}, is_partial_update, **kwargs)
|
|
|
|
return ret
|
|
|
|
def delete_drilldown(self, object_id, drilldown, is_partial_update=True, **kwargs):
|
|
"""
|
|
Delete drilldown for a NotableEventGroup
|
|
|
|
@type object_id: basestring
|
|
@param object_id: object id
|
|
|
|
@type drilldown: dict
|
|
@param drilldown: drilldown to be updated
|
|
|
|
@type is_partial_update: bool
|
|
@param is_partial_update: flag to do partial update
|
|
|
|
@type kwargs: dict
|
|
@param kwargs: extra parameters
|
|
|
|
@rtype: dict
|
|
@return: return dict which holds updated keys
|
|
"""
|
|
if not self.is_valid_drilldown(drilldown):
|
|
raise ValueError('Drilldown data must have link and name')
|
|
|
|
group = self.get(object_id)
|
|
|
|
clean_drilldown = self._clean_drilldown(drilldown)
|
|
|
|
try:
|
|
drilldown_list = group.get('drilldown', [])
|
|
except AttributeError:
|
|
raise TypeError('Group is not of type dict')
|
|
|
|
drilldown_index = self._find_drilldown(drilldown_list, clean_drilldown)
|
|
|
|
if drilldown_index is None:
|
|
raise KeyError('Drilldown with name: {0} not found.'.format(drilldown['name']))
|
|
|
|
try:
|
|
drilldown_list.pop(drilldown_index)
|
|
except AttributeError:
|
|
raise AttributeError('Drilldown list is not of type list.')
|
|
except TypeError:
|
|
raise TypeError('Drilldown index given is not an integer.')
|
|
except IndexError:
|
|
raise IndexError('Drilldown index of: {0} out of bounds for drilldown list.'.format(drilldown_index))
|
|
|
|
ret = super(NotableEventGroup, self).update(object_id, {'drilldown': drilldown_list}, is_partial_update, **kwargs)
|
|
|
|
return ret
|
|
|
|
def check_to_send_break_group_event(self, group_id, **kwargs):
|
|
"""
|
|
Check to see if you need to send an event to break the group by looking through kwargs for a break group flag
|
|
|
|
@type group_id: basestring
|
|
@param group_id: the id of the group
|
|
|
|
@type kwargs: dict
|
|
@param kwargs: Extra params to perform
|
|
"""
|
|
# If we detect a policy id for breaking the group, then sent an event to the rules engine to break the group
|
|
break_group_policy_id = kwargs.get('break_group_policy_id', False)
|
|
if break_group_policy_id:
|
|
self.send_break_group_event(group_id=group_id, policy_id=break_group_policy_id, **kwargs)
|
|
|
|
def check_to_send_multiple_break_group_events(self, data_list, **kwargs):
|
|
"""
|
|
Check to see if you need to send an event to break the group by looking through kwargs for a break group flag
|
|
|
|
@type data_list: list
|
|
@param data_list: notable events
|
|
|
|
@type kwargs: dict
|
|
@param kwargs: Extra params to perform
|
|
"""
|
|
# If we detect a policy id for breaking the group, then sent an event to the rules engine to break the group
|
|
break_multiple_groups = kwargs.get('break_multiple_groups', False)
|
|
if break_multiple_groups:
|
|
self.send_multiple_break_group_events(group_list=data_list)
|
|
|
|
def send_break_group_event(self, group_id, policy_id, **kwargs):
|
|
"""
|
|
Sends an event to the itsi_tracked_alerts index to break a specified group
|
|
|
|
@type group_id: basestring
|
|
@param group_id: the id of the group to be broken
|
|
|
|
@type policy_id: basestring
|
|
@param policy_id: the id of the group to be broken
|
|
|
|
@type kwargs: dict
|
|
@param kwargs: Extra params to perform
|
|
|
|
@return:
|
|
"""
|
|
push_event_manager = PushEventManager(
|
|
self.session_key,
|
|
'Auto Generated ITSI Event Management Token'
|
|
)
|
|
event = {
|
|
'event_id': str(uuid.uuid1()),
|
|
'itsi_policy_id': policy_id,
|
|
'itsi_group_id': group_id,
|
|
'break_group_flag': True
|
|
}
|
|
event_info = self.get(group_id, **kwargs)
|
|
if 'title' in list(event_info.keys()):
|
|
macro_fields = ('title', 'description', 'severity', 'owner', 'status')
|
|
# itsi_notable_group_system' collection has a 'split_by_hash' field, which we're not currently using.
|
|
# Instead, we're working with the 'itsi_notable_group_user' collection.
|
|
# Since we don't plan to use 'High Scale EA' soon, we've decided not to complicate the 'app-itsi' Python code by making it read from 'itsi_notable_group_system.'
|
|
# To keep things simple,
|
|
# we're adding the 'split_by_hash' field to 'itsi_notable_group_user' on the 'High Scale EA' side.
|
|
# This change minimizes the modifications needed in the existing Python code.
|
|
# If this approach is later found to be unsuitable, we can revisit it
|
|
if push_event_manager.is_high_scale_ea_enabled:
|
|
if 'split_by_hash' in event_info:
|
|
macro_fields += ('split_by_hash',)
|
|
# array for ACE fields
|
|
ace_fields = []
|
|
if 'is_ace_enabled' in event_info:
|
|
if 'itsi_group_ace_category_values' in event_info:
|
|
ace_fields.append('itsi_group_ace_category_values')
|
|
if 'itsi_group_ace_text_values' in event_info:
|
|
ace_fields.append('itsi_group_ace_text_values')
|
|
if len(ace_fields) > 0:
|
|
macro_fields = macro_fields + tuple(ace_fields)
|
|
event.update({(key, event_info[key]) for key in macro_fields})
|
|
push_event_manager.push_event(event, source='itsi@internal@group_closing_event', time=str(get_current_utc_epoch()))
|
|
if push_event_manager.is_high_scale_ea_enabled:
|
|
event['source'] = 'itsi@internal@group_closing_event'
|
|
event['_time'] = str(get_current_utc_epoch())
|
|
if event.get('split_by_hash') is not None:
|
|
event['itsi_split_by_hash'] = event['split_by_hash']
|
|
push_event_manager.push_events_to_ingest_service([event])
|
|
if push_event_manager.is_queue_mode_enabled:
|
|
event['source'] = 'itsi@internal@group_closing_event'
|
|
event['_time'] = str(get_current_utc_epoch())
|
|
asyncio.run(self.push_events_to_nats([event]))
|
|
|
|
async def push_events_to_nats(self, events):
|
|
ssl_ctx = None
|
|
try:
|
|
cfm = ConfManager(self.session_key, 'SA-ITOA')
|
|
conf = cfm.get_conf('itsi_nats')
|
|
nats_settings = conf.get('nats_settings')
|
|
# get nats configurations
|
|
tls_enabled = int(nats_settings.get('require_tls_client_cert', 1))
|
|
auth_enabled = int(nats_settings.get('require_auth', 1))
|
|
retention_max_age = int(nats_settings.get('retention_max_age', 3600))
|
|
nats_server_connect_time = int(nats_settings.get('nats_server_connect_time', 5))
|
|
nats_max_reconnect_attempts = int(nats_settings.get('nats_max_reconnect_attempts', 3))
|
|
nats_reconnect_time_wait = int(nats_settings.get('nats_reconnect_time_wait', 5))
|
|
nats_servers = nats_settings.get('nats_servers', '127.0.0.1:4222')
|
|
list_of_nats_servers = nats_servers.split(',')
|
|
# get nats credentials from storage/passwords
|
|
passwords_uri = "/services/storage/passwords/nats-admin?output_mode=json"
|
|
credentials = get_nats_credentials(self.session_key, passwords_uri, auth_enabled)
|
|
if credentials:
|
|
username = credentials['clear_password'].split(':')[0]
|
|
password = credentials['clear_password'].split(':')[1]
|
|
# Insert credentials before each server address
|
|
prefixed_nats_servers = [f'nats://{username}:{password}@{server}' for server in list_of_nats_servers]
|
|
else:
|
|
# simply use the nats server url
|
|
prefixed_nats_servers = [f'nats://{server}' for server in list_of_nats_servers]
|
|
|
|
self.logger.info('NATS configuration: tls_enabled=%s, retention_max_age=%s, nats_servers=%s, '
|
|
'server_connect_time=%s, max_reconnect_attempts=%s, nats_reconnect_time_wait=%s',
|
|
tls_enabled, retention_max_age, nats_servers, nats_server_connect_time,
|
|
nats_max_reconnect_attempts, nats_reconnect_time_wait)
|
|
if tls_enabled == 1:
|
|
ssl_ctx = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
|
|
client_cert = get_itsi_event_management_nats_certificate_value(self.session_key, 'nats_queue',
|
|
'client_cert')
|
|
client_cert_key = get_itsi_event_management_nats_certificate_value(self.session_key, 'nats_queue',
|
|
'client_cert_key')
|
|
client_cert_path = make_splunkhome_path(['etc', 'auth', 'nats', client_cert])
|
|
client_cert_key_path = make_splunkhome_path(['etc', 'auth', 'nats', client_cert_key])
|
|
self.logger.info('SSL Client certificate file location: %s, SSL Client certificate key file location: %s', client_cert_path, client_cert_key_path)
|
|
ssl_ctx.load_cert_chain(certfile=client_cert_path, keyfile=client_cert_key_path)
|
|
except Exception as e:
|
|
self.logger.exception('Failed to setup nats client certificate : %s' % e.args[0])
|
|
raise Exception('Failed to setup nats client certificate.')
|
|
try:
|
|
if ssl_ctx is None:
|
|
self.nc = await nats.connect(servers=prefixed_nats_servers, connect_timeout=nats_server_connect_time,
|
|
max_reconnect_attempts=nats_max_reconnect_attempts,
|
|
reconnect_time_wait=nats_reconnect_time_wait)
|
|
else:
|
|
self.nc = await nats.connect(servers=prefixed_nats_servers, connect_timeout=nats_server_connect_time,
|
|
max_reconnect_attempts=nats_max_reconnect_attempts,
|
|
reconnect_time_wait=nats_reconnect_time_wait, tls=ssl_ctx)
|
|
self.js = self.nc.jetstream()
|
|
# retention: max_msgs=1000000, max_bytes=20000, max_age=3600
|
|
# for now, only set the max age to 1 hour.
|
|
await self.js.add_stream(name='itsi_indexes', subjects=['itsi_tracked_alerts'],
|
|
retention=nats.js.api.RetentionPolicy.LIMITS, max_age=retention_max_age)
|
|
for event in events:
|
|
await self.js.publish('itsi_tracked_alerts', json.dumps(event).encode())
|
|
# Close NATS connection
|
|
await self.nc.close()
|
|
except Exception as e:
|
|
self.logger.exception('Failed to connect nats-server : %s' % e.args[0])
|
|
raise Exception('Failed to connect nats-server.')
|
|
|
|
def send_multiple_break_group_events(self, group_list):
|
|
"""
|
|
Sends an event to the itsi_tracked_alerts index to break a specified group
|
|
|
|
@type group_list: list
|
|
@param group_list: notable events
|
|
|
|
@return:
|
|
"""
|
|
for group in group_list:
|
|
if 'break_group_policy_id' not in group:
|
|
continue
|
|
group_id = group.pop('_key', None)
|
|
policy_id = group.pop('break_group_policy_id', None)
|
|
self.send_break_group_event(group_id, policy_id, **group)
|
|
|
|
def check_break_group_event_fields(self, data):
|
|
"""
|
|
Check to see if the break group event has all the required fields present
|
|
|
|
@type data: dict
|
|
@param data: data for break group event
|
|
|
|
@return:
|
|
"""
|
|
macro_fields = ('title', 'description', 'severity', 'owner', 'status')
|
|
for field in macro_fields:
|
|
if field not in data:
|
|
raise NotableEventBadRequest('Unable to send the event due to missing field in data %s' % data)
|
|
|
|
def is_valid_drilldown(self, drilldown):
|
|
"""
|
|
Validation for drilldown link
|
|
Must have name and the link
|
|
And all values must be a string
|
|
|
|
@type drilldown: dict
|
|
@param drilldown: drilldown to be added
|
|
|
|
@rtype: bool
|
|
@return: True or false according to validation.
|
|
"""
|
|
if type(drilldown) is not dict:
|
|
return False
|
|
|
|
VALID_FIELD = ['name', 'link']
|
|
|
|
for field in VALID_FIELD:
|
|
if field not in drilldown:
|
|
return False
|
|
if not drilldown.get(field):
|
|
return False
|
|
if type(drilldown.get(field)) is not str:
|
|
return False
|
|
|
|
return True
|
|
|
|
def _clean_drilldown(self, drilldown):
|
|
"""
|
|
Remove all non-whitelisted fields from drilldown dict
|
|
|
|
@type drilldown: dict
|
|
@param drilldown: drilldown to clean
|
|
|
|
@rtype: dict
|
|
@return: cleaned drilldown
|
|
"""
|
|
whitelisted_fields = [
|
|
'name',
|
|
'link'
|
|
]
|
|
|
|
for key in list(drilldown.keys()):
|
|
if key not in whitelisted_fields:
|
|
del drilldown[key]
|
|
|
|
return drilldown
|
|
|
|
def _find_drilldown(self, drilldown_list, drilldown):
|
|
"""
|
|
Find drilldown in drilldown list by name
|
|
|
|
@type drilldown_list: list
|
|
@param drilldown_list: list of drilldowns
|
|
|
|
@type drilldown: dict
|
|
@param drilldown: drilldown to find
|
|
|
|
@rtype: int
|
|
@return: index of found drilldown in drilldown list
|
|
"""
|
|
for index, dd in enumerate(drilldown_list):
|
|
if dd['name'] == drilldown['name']:
|
|
return index
|
|
|
|
return None
|