You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
1794 lines
85 KiB
1794 lines
85 KiB
# Copyright (C) 2005-2024 Splunk Inc. All Rights Reserved.
|
|
|
|
import datetime
|
|
import re
|
|
import time
|
|
import json
|
|
import itsi_py3
|
|
|
|
import ITOA.itoa_common as utils
|
|
from ITOA.itoa_exceptions import ItoaError, ItoaValidationError
|
|
from ITOA.itoa_object import CRUDMethodTypes
|
|
from ITOA.service_tree import generate_subgraphs_json
|
|
from ITOA.setup_logging import InstrumentCall, logger
|
|
from itsi.objects.changehandlers.sandbox_base_service_template_delete_handler import SandboxServiceTemplateDeleteHandler
|
|
from itsi.objects.itsi_sandbox import ItsiSandbox, Status
|
|
from itsi.objects.itsi_sandbox_service import ItsiSandboxService, SandboxServiceStatus, SANDBOX_SERVICES_LIMIT
|
|
from itsi.objects.itsi_sandbox_sync_log import (
|
|
ItsiSandboxSyncLog, TYPE_VALIDATE, TYPE_PUBLISH, STATUS_NOT_STARTED, STATUS_IN_PROGRESS, STATUS_SUCCESS,
|
|
STATUS_FAILED,
|
|
ACTION_TYPE_SYNCHRONIZE
|
|
)
|
|
from itsi.objects.itsi_security_group import ItsiSecGrp
|
|
from itsi.objects.itsi_service import ItsiService
|
|
from itsi_py3 import _, string_type
|
|
from itsi.health_services.health_services_provider import Provider
|
|
from copy import deepcopy
|
|
from itsi.itsi_utils import SplunkMessageHandler
|
|
|
|
|
|
def sandbox_lock_and_update_count_factory(intermediate_status, update_count=True,
|
|
multiple_sandbox_publish_validate_status_check=False):
|
|
"""
|
|
Create a decorator to handle sandbox validation, locking, and unlocking automatically.
|
|
|
|
Usage:
|
|
```
|
|
@sandbox_lock_and_update_count_factory(desired_status_for_duration_of_foo)
|
|
def foo():
|
|
```
|
|
|
|
:param intermediate_status: Status that should be used while the decorated function runs
|
|
:type: int (see Status in itsi_sandbox.py)
|
|
|
|
:param update_count: boolean flag that would update the count of services and uniqueu service template in service sandbox
|
|
:type: bool
|
|
|
|
:return: Decorator to handle sandbox validation, locking, and unlocking automatically
|
|
:type: function
|
|
"""
|
|
|
|
def lock_and_update_count_sandbox(func):
|
|
"""
|
|
This function is the decorator to handle sandbox validation, locking, and unlocking automatically.
|
|
|
|
:param func: Function to use
|
|
:type: function
|
|
|
|
:return: Wrapping function to do the actual work
|
|
:type: function
|
|
"""
|
|
|
|
def lock_and_update_count_sandbox_function(obj, owner, sandbox_id, transaction_id, *args, **kwargs):
|
|
"""
|
|
This is the actual wrapping function that will:
|
|
1. Validate that the sandbox is in a ready state
|
|
2. Lock the sandbox
|
|
3. Perform some operation
|
|
4. Unlock the sandbox for all cases except publish
|
|
5. Set the status to partial publish, successful publish or edit based on the publish action result
|
|
6. Throw any errors encountered
|
|
|
|
Wrapped functions should begin with the same parameters as this function.
|
|
|
|
Object should have a `sandbox_utils` variable.
|
|
|
|
:param obj: SandboxServiceUtils (or similar) object to use
|
|
:type: SandboxServiceUtils (or similar)
|
|
|
|
:param owner: Owner of the objects
|
|
:type: string
|
|
|
|
:param sandbox_id: ID of the sandbox
|
|
:type: string
|
|
|
|
:param transaction_id: ID of transaction for tracebacks
|
|
:type: string
|
|
|
|
:return: Sandbox sync log creation response
|
|
:rtype: dict
|
|
"""
|
|
rv = None
|
|
allowed_initial_statuses = Status.get_allowed_statuses(intermediate_status)
|
|
|
|
# This is outside the try-loop to avoid locking and unlocking a non-existent sandbox, a sandbox that the
|
|
# user shouldn't have permissions to, or a sandbox under lock by someone else.
|
|
sandbox_status_before_action = obj.sandbox_utils.validate_and_return_current_sandbox_status(
|
|
owner, sandbox_id, transaction_id=transaction_id, multiple_sandbox_publish_validate_status_check=multiple_sandbox_publish_validate_status_check, allowed_initial_statuses=allowed_initial_statuses)
|
|
try:
|
|
obj.sandbox_utils.sandbox_interface.update(owner, sandbox_id, data={'status': intermediate_status},
|
|
is_partial_data=True, transaction_id=transaction_id)
|
|
rv = func(obj, owner, sandbox_id,
|
|
transaction_id, *args, **kwargs)
|
|
except Exception as e:
|
|
raise e
|
|
finally:
|
|
# Edit is the default fall back status in case of any unknown case
|
|
if intermediate_status == Status.STATUS_SYNC:
|
|
data = { 'status' : sandbox_status_before_action}
|
|
else:
|
|
data = { 'status' : Status.STATUS_EDIT}
|
|
if intermediate_status == Status.STATUS_PUBLISH:
|
|
sandbox = obj.sandbox_interface.get(owner, sandbox_id)
|
|
log_link = obj.sandbox_utils.get_search_log_url(transaction_id)
|
|
sandbox_name = sandbox['identifying_name']
|
|
notification_type = SplunkMessageHandler.INFO
|
|
notification_message = "Services from %s published. %s for more details." % (sandbox_name, log_link)
|
|
sandbox_sync_filter = {'transaction_id': transaction_id}
|
|
results = obj.sandbox_sync_log_interface.get_bulk(
|
|
owner, filter_data=sandbox_sync_filter, fields=[
|
|
'_key', 'details', 'errors'], sort_key='timestamp', sort_dir='desc',
|
|
transaction_id=transaction_id,
|
|
)
|
|
# Parse the most recently updated record
|
|
sandbox_sync_log_record = results[0]
|
|
error_count = 0
|
|
if 'details' in sandbox_sync_log_record and 'errors' in sandbox_sync_log_record:
|
|
errors = sandbox_sync_log_record.get('errors', [])
|
|
data['last_published_record'] = {
|
|
'details': sandbox_sync_log_record['details'], 'errors': errors, 'publish_time': time.time()}
|
|
|
|
service_tree = sandbox_sync_log_record['details'].get(
|
|
'service_tree', {})
|
|
total_publish_service_count_attempted = 0
|
|
service_count_by_tree_id = {}
|
|
if service_tree and 'graphs' in service_tree and 'totalCount' in service_tree:
|
|
for error in errors:
|
|
if error['object_type'] == 'sandbox_service_tree':
|
|
if len(service_count_by_tree_id) == 0:
|
|
for tree in service_tree['graphs']:
|
|
service_count_by_tree_id[tree['id']] = len(
|
|
tree['vertices'])
|
|
error_count += service_count_by_tree_id[error['_key']]
|
|
elif error['object_type'] == obj.sandbox_utils.sandbox_interface.object_type:
|
|
# This signifies that all the service in the sandbox failed to publish
|
|
# as part of bulk import
|
|
error_count = service_tree['totalCount']
|
|
break
|
|
else:
|
|
# Assuming the error object type to be obj.sandbox_utils.service_interface.object_type
|
|
error_count += 1
|
|
total_publish_service_count_attempted = service_tree['totalCount']
|
|
if error_count == 0:
|
|
# All services are published successfully
|
|
# The status of the sandbox will be set back to edit when
|
|
# a reset publish or revert publish call is made
|
|
data['status'] = Status.STATUS_PUBLISH_SUCCESS
|
|
notification_type = SplunkMessageHandler.INFO
|
|
notification_message = "Services from %s published. %s for more details." % (sandbox_name, log_link)
|
|
elif error_count < total_publish_service_count_attempted:
|
|
# Partial publish where some service were published others were not
|
|
# The status of the sandbox will be set back to edit when
|
|
# a reset publish or revert publish call is made
|
|
data['status'] = Status.STATUS_PARTIAL_PUBLISH
|
|
notification_type = SplunkMessageHandler.WARNING
|
|
notification_message = "Some services in %s failed to publish. %s for more details." % (sandbox_name, log_link)
|
|
else:
|
|
# Do not save the publish record in case of failed publish
|
|
data.pop('last_published_record')
|
|
notification_type = SplunkMessageHandler.ERROR
|
|
notification_message = "Services in %s failed to published. %s for more details." % (sandbox_name, log_link)
|
|
obj.sandbox_utils.message_handler.post_or_update_message(
|
|
'service_sandbox_%s' % (
|
|
transaction_id), notification_type, notification_message
|
|
)
|
|
|
|
if update_count:
|
|
count_of_services, count_of_unique_service_templates = obj.sandbox_utils.determine_counts(owner,
|
|
sandbox_id,
|
|
transaction_id=transaction_id)
|
|
data['count_of_unique_service_templates'] = count_of_unique_service_templates
|
|
data['count_of_services'] = count_of_services
|
|
|
|
obj.sandbox_utils.sandbox_interface.update(owner, sandbox_id, data=data,
|
|
is_partial_data=True, transaction_id=transaction_id)
|
|
return rv
|
|
|
|
return lock_and_update_count_sandbox_function
|
|
|
|
return lock_and_update_count_sandbox
|
|
|
|
|
|
class SandboxUtils(object):
|
|
def __init__(self, session_key, current_user_name):
|
|
"""
|
|
@type: string
|
|
@param: session_key
|
|
|
|
@type: string
|
|
@param current_user_name: user invoking this call
|
|
"""
|
|
self._session_key = session_key
|
|
self.current_user_name = current_user_name
|
|
self.base_service_templates = None
|
|
self.sandbox_interface = ItsiSandbox(self._session_key, self.current_user_name)
|
|
self.sandbox_service_interface = ItsiSandboxService(self._session_key, self.current_user_name)
|
|
self.message_handler = SplunkMessageHandler(session_key)
|
|
|
|
def validate_and_return_current_sandbox_status(self, owner, sandbox_id, transaction_id=None, multiple_sandbox_publish_validate_status_check=False, allowed_initial_statuses=[Status.STATUS_EDIT]):
|
|
"""
|
|
Validates if sandbox exists and if the status is Edit
|
|
|
|
@type string
|
|
@param sandbox_id: key of the sandbox
|
|
|
|
@rtype None
|
|
"""
|
|
current_status = self.sandbox_interface.validate_and_return_current_sandbox_status(owner, sandbox_id, transaction_id=transaction_id, allowed_initial_statuses=allowed_initial_statuses)
|
|
if multiple_sandbox_publish_validate_status_check:
|
|
if not self.allow_publish_validate(owner):
|
|
self.sandbox_interface.raise_error_bad_validation(logger, 'Concurrent publish/validate operation across different service sandboxes are not allowed', 409)
|
|
return current_status
|
|
|
|
def get_search_log_url(self, transaction_id):
|
|
"""
|
|
Builds the search log url to be provided as part of the notification message
|
|
|
|
@type string
|
|
@param transaction_id: transaction id of the action on which the logs are to be retrieved
|
|
"""
|
|
# sid (search id) is dynamically generated as part of search created.
|
|
build_url = 'app/itsi/search?q=search index="_internal" transaction_id=%s OR tid=%s&display.page.search.mode=smart&dispatch.sample_ratio=1&workload_pool=standard_perf&earliest=0&latest=now' % (
|
|
transaction_id, transaction_id)
|
|
log_url = '[[%s|View logs]]' % (build_url)
|
|
return log_url
|
|
|
|
def allow_publish_validate(self, owner, transaction_id=None):
|
|
"""
|
|
Checks if all the sandboxes are not in publish or validates state.
|
|
|
|
@type string
|
|
@param owner: owner value
|
|
|
|
@type string
|
|
@param transaction_id: Transaction Id
|
|
|
|
@rtype bool
|
|
@return boolean value for allow publish and validate
|
|
"""
|
|
results = self.sandbox_interface.get_bulk(
|
|
'nobody',
|
|
fields=['title', 'status'],
|
|
filter_data={'$or': [
|
|
{'status': Status.STATUS_PUBLISH},
|
|
{'status': Status.STATUS_VALIDATE},
|
|
]}
|
|
)
|
|
if len(results) > 0:
|
|
for entry in results:
|
|
status_pretty = Status.pretty_string(entry['status'])
|
|
logger.error('Publish/Validate operations are blocked as sandbox service="%s" has current status="%s"'
|
|
% (entry['title'], status_pretty))
|
|
return False
|
|
return True
|
|
|
|
def determine_counts(self, owner, sandbox_id, transaction_id=None):
|
|
"""
|
|
determines the number of services and unique set of service templates used in service sandbox
|
|
|
|
@type string
|
|
@param sandbox_id: key of the sandbox
|
|
|
|
@rtype None
|
|
"""
|
|
sandbox_id_filter = {'sandbox_id': sandbox_id}
|
|
results = self.sandbox_service_interface.get_bulk_skip_enforce_security(
|
|
owner, filter_data=sandbox_id_filter, fields=['_key', 'base_service_template_id'],
|
|
transaction_id=transaction_id,
|
|
)
|
|
count_of_unique_service_templates = 0
|
|
count_of_services = 0
|
|
if results:
|
|
count_of_services = len(results)
|
|
service_templates = set()
|
|
[service_templates.add(
|
|
result['base_service_template_id']) for result in results if
|
|
result.get('base_service_template_id', False)]
|
|
count_of_unique_service_templates = len(service_templates)
|
|
return count_of_services, count_of_unique_service_templates
|
|
|
|
|
|
class SandboxServiceUtils(object):
|
|
def __init__(self, session_key, current_user_name):
|
|
"""
|
|
Constructor
|
|
|
|
|
|
@type: string
|
|
@param: session_key
|
|
|
|
@type: string
|
|
@param owner: "current_user_name" user invoking this call
|
|
|
|
@rtype: None
|
|
@return: None
|
|
"""
|
|
self._session_key = session_key
|
|
self.current_user_name = current_user_name
|
|
self.base_service_templates = None
|
|
self.sandbox_service_interface = ItsiSandboxService(self._session_key, self.current_user_name)
|
|
self.sandbox_interface = ItsiSandbox(self._session_key, self.current_user_name)
|
|
self.sandbox_sync_log_interface = ItsiSandboxSyncLog(self._session_key, self.current_user_name)
|
|
self.service_interface = ItsiService(self._session_key, self.current_user_name)
|
|
self.sandbox_utils = SandboxUtils(self._session_key, self.current_user_name)
|
|
self._instrumentation = InstrumentCall(logger)
|
|
self.health_provider = Provider(session_key, logger)
|
|
|
|
@sandbox_lock_and_update_count_factory(Status.STATUS_ADD_SERVICE)
|
|
@InstrumentCall(logger)
|
|
def create_sandbox_service(self, owner, sandbox_id, transaction_id, data):
|
|
"""
|
|
Method to create a sandbox service object
|
|
|
|
@type: string
|
|
@param owner: Owner of the object
|
|
|
|
@type: string
|
|
@param sandbox_id: ID of the sandbox to associate the service with
|
|
|
|
@type: string
|
|
@param transaction_id: Unique identifier for the transaction
|
|
|
|
@type: dict
|
|
@param data: Details of the object to be created
|
|
|
|
@rtype: dict
|
|
@return: response containing key of object created
|
|
"""
|
|
logger.info('Sandbox logs before adding service="%s", transaction_id="%s"', data, transaction_id)
|
|
sandbox_id_filter = {'sandbox_id': sandbox_id}
|
|
results = self.sandbox_service_interface.get_bulk_skip_enforce_security(
|
|
owner, filter_data=sandbox_id_filter, fields=['_key'], transaction_id=transaction_id,
|
|
)
|
|
# Create adds one new Service. If the current count + 1 exceeds the limit then error
|
|
if len(results) + 1 > SANDBOX_SERVICES_LIMIT:
|
|
raise ItoaError(
|
|
"You can only have a total of 250 services in the sandbox. To continue creating services,\
|
|
delete one or more of the existing services or reduce the number of services.",
|
|
logger, status_code=400)
|
|
# setting sync status as synced for the initial creation
|
|
data['sync_status'] = '1'
|
|
result = self.sandbox_service_interface.create(owner, data, transaction_id=transaction_id)
|
|
service_key = result['_key']
|
|
# Used to generate telemetry
|
|
base_service_template_id = data.get('base_service_template_id')
|
|
if base_service_template_id:
|
|
template_log_str = ', base_service_template_id="%s"' % base_service_template_id
|
|
else:
|
|
template_log_str = ''
|
|
logger.info('Successfully added service="%s" to Sandbox, transaction_id="%s"%s' % (service_key, transaction_id,
|
|
template_log_str))
|
|
if not isinstance(data.get('sec_grp'), string_type):
|
|
data['sec_grp'] = self.sandbox_service_interface._get_security_enforcer(). \
|
|
get_default_itsi_security_group_key()
|
|
log_object = {
|
|
'title': 'Service Added to Sandbox %s (%s)' % (self.sandbox_service_interface.object_type,
|
|
transaction_id),
|
|
'action_type': 'Add Service',
|
|
'details': data,
|
|
'sec_grp': data['sec_grp'],
|
|
}
|
|
self.sandbox_sync_log_interface.create_record(
|
|
owner, transaction_id, self.sandbox_interface.object_type, sandbox_id,
|
|
log_object,
|
|
)
|
|
return result
|
|
|
|
def perform_sandbox_service_delete(self, owner, to_be_deleted_sandbox_id, transaction_id):
|
|
delete_sandbox_filter = {'$or': [{'sandbox_id': to_be_deleted_sandbox_id}]}
|
|
self.sandbox_service_interface.delete_bulk(owner, filter_data=delete_sandbox_filter, transaction_id=transaction_id)
|
|
|
|
def update_sandbox_services_status(
|
|
self, transaction_id, updated_services_list, status
|
|
):
|
|
"""
|
|
Performs bulk CRUD operation of sandbox services to update the status field.
|
|
|
|
@type string
|
|
@param owner
|
|
|
|
@type string
|
|
@param transaction_id: Transaction ID to associate logs with
|
|
|
|
@type list
|
|
@param updated_services_list: List of services to be updated
|
|
|
|
"""
|
|
for entry in updated_services_list:
|
|
entry['sync_status'] = status
|
|
self.sandbox_service_interface.save_batch(
|
|
'nobody',
|
|
updated_services_list,
|
|
method=CRUDMethodTypes.METHOD_UPDATE,
|
|
validate_names=True,
|
|
is_partial_data=True,
|
|
transaction_id=transaction_id)
|
|
|
|
@sandbox_lock_and_update_count_factory(Status.STATUS_SAVE)
|
|
@InstrumentCall(logger)
|
|
def save_sandbox_services(
|
|
self, owner, sandbox_id, transaction_id, updated_services_list, deleted_services_list
|
|
):
|
|
"""
|
|
Performs bulk CRUD operation of sandbox services.
|
|
Below is the execution order
|
|
Put Sandbox status in Save mode
|
|
update sandbox services
|
|
delete the sandbox services
|
|
update the sandbox sync log with the details of the changes and the affected sandbox object
|
|
Put Sandbox status in Edit mode
|
|
|
|
@type string
|
|
@param owner
|
|
|
|
@type string
|
|
@param sandbox_id
|
|
|
|
@type string
|
|
@param transaction_id: Transaction ID to associate logs with
|
|
|
|
@type list
|
|
@param updated_services_list: List of services to be updated
|
|
|
|
@type list
|
|
@param deleted_services_list: List of services to be deleted
|
|
|
|
@type boolean
|
|
@param Enable or disable sandbox
|
|
"""
|
|
data = {}
|
|
self.sandbox_service_interface.base_service_templates = self.base_service_templates
|
|
if len(updated_services_list) == 0 and len(deleted_services_list) == 0:
|
|
raise ItoaValidationError(
|
|
_('The sandbox has no data. Check that the inputted parameters are valid and try again.'), logger)
|
|
|
|
sandbox_sync_data = {
|
|
'title': 'Sync Log for %s (%s)' % (self.sandbox_interface.object_type, transaction_id),
|
|
}
|
|
try:
|
|
if updated_services_list:
|
|
updated_keys = set([service_val['_key'] for service_val in updated_services_list])
|
|
update_sandbox_service_filter = {'$or': [{'_key': sandbox_service_id} for sandbox_service_id in
|
|
updated_keys]}
|
|
|
|
found_services = self.sandbox_service_interface.get_bulk(
|
|
owner, sort_key=None, sort_dir=None, filter_data=update_sandbox_service_filter,
|
|
fields=['_key'], skip=None, limit=None, req_source='unknown',
|
|
transaction_id=transaction_id)
|
|
found_keys = set([service_val['_key'] for service_val in found_services])
|
|
updated_keys_difference = updated_keys.difference(found_keys)
|
|
if updated_keys_difference:
|
|
error_msg = _('{} key(s) could not be found').format(
|
|
', '.join(updated_keys_difference))
|
|
raise ItoaError(error_msg, logger, status_code=404)
|
|
self.sandbox_service_interface.base_service_templates = self.base_service_templates
|
|
data['updated_sandbox_service_ids'] = self.sandbox_service_interface.save_batch(
|
|
owner,
|
|
updated_services_list,
|
|
method=CRUDMethodTypes.METHOD_UPDATE,
|
|
validate_names=True,
|
|
is_partial_data=True,
|
|
transaction_id=transaction_id)
|
|
if deleted_services_list:
|
|
delete_sandbox_service_filter = {'$or': []}
|
|
for sandbox_service_id in deleted_services_list:
|
|
delete_sandbox_service_filter['$or'].append({'_key': sandbox_service_id})
|
|
self.sandbox_service_interface.delete_bulk(owner, filter_data=delete_sandbox_service_filter)
|
|
remnant_sandbox_services = self.sandbox_service_interface.get_bulk(
|
|
owner, sort_key=None, sort_dir=None, filter_data=delete_sandbox_service_filter,
|
|
fields=['title', '_key'], skip=None, limit=None, req_source='unknown',
|
|
transaction_id=transaction_id)
|
|
remnant_sandbox_services_titles = []
|
|
for rem in remnant_sandbox_services:
|
|
remnant_sandbox_services_titles.append(rem['title'])
|
|
deleted_services_list.remove(rem['_key'])
|
|
data['deleted_sandbox_service_ids'] = deleted_services_list
|
|
if remnant_sandbox_services:
|
|
error_msg = _('Failed to delete sandbox services. {}').format(
|
|
', '.join(remnant_sandbox_services_titles))
|
|
raise ItoaError(error_msg, logger, status_code=500)
|
|
sandbox_sync_data['details'] = data
|
|
sandbox_sync_data['sec_grp'] = self.set_first_assigned_security_group(owner, transaction_id, data)
|
|
self.sandbox_sync_log_interface.create_record(
|
|
owner, transaction_id, self.sandbox_interface.object_type, sandbox_id, sandbox_sync_data,
|
|
)
|
|
except ItoaError as e:
|
|
if data.get('updated_sandbox_service_ids') is None and data.get('deleted_sandbox_service_ids') is None:
|
|
raise e
|
|
data['error'] = str(e)
|
|
sandbox_sync_data['details'] = data
|
|
sandbox_sync_data['sec_grp'] = self.set_first_assigned_security_group(owner, transaction_id, data)
|
|
self.sandbox_sync_log_interface.create_record(
|
|
owner, transaction_id, self.sandbox_interface.object_type, sandbox_id, sandbox_sync_data,
|
|
)
|
|
except Exception as e:
|
|
if data.get('updated_sandbox_service_ids') is None and data.get('deleted_sandbox_service_ids') is None:
|
|
raise e
|
|
data['error'] = str(e)
|
|
sandbox_sync_data['details'] = data
|
|
sandbox_sync_data['sec_grp'] = self.set_first_assigned_security_group(owner, transaction_id, data)
|
|
self.sandbox_sync_log_interface.create_record(
|
|
owner, transaction_id, self.sandbox_interface.object_type, sandbox_id, sandbox_sync_data,
|
|
)
|
|
return data
|
|
|
|
def set_first_assigned_security_group(self, owner, transaction_id, data):
|
|
"""
|
|
Fetches the security groups associated to the user
|
|
and then will select the first one and use it to create
|
|
sync log. We cant use default sec grp as itoteamadmin does not
|
|
have permission on the grp.
|
|
|
|
@type: string
|
|
@param owner: Object type for which sync log is generated
|
|
|
|
@type: string
|
|
@param transaction_id: Transaction ID for tracebacks
|
|
|
|
@rtype: string
|
|
@return: security grp key
|
|
"""
|
|
if data.get('sec_grp') is None:
|
|
sec_grp_interface = ItsiSecGrp(self._session_key, self.current_user_name)
|
|
teams = sec_grp_interface.get_bulk(owner, transaction_id=transaction_id)
|
|
for sec_grp in teams:
|
|
if sec_grp.get('_key') is not None:
|
|
# setting the first security grp/team.
|
|
data['sec_grp'] = sec_grp.get('_key')
|
|
break
|
|
return data.get('sec_grp')
|
|
|
|
def get_sandbox_service_keys_from_titles(self, owner, titles, sandbox_id, transaction_id=None):
|
|
"""
|
|
Fetches the sandbox service keys from titles for a specific sandbox
|
|
|
|
@type: string
|
|
@param owner: Object type for which sync log is generated
|
|
|
|
@type: list
|
|
@param titles: titles of the services for which the keys are retrieved
|
|
|
|
@type: string
|
|
@param sandbox_id: ID of the sandbox for which the sandbox service is associated
|
|
|
|
@type: string
|
|
@param transaction_id: Transaction ID for tracebacks
|
|
|
|
@rtype: list
|
|
@return: list of dicts carrying the _key
|
|
"""
|
|
sandbox_id_filter = {'sandbox_id': sandbox_id}
|
|
sandbox_service_filter = {'$and': [sandbox_id_filter]}
|
|
if len(titles) > 0:
|
|
sandbox_service_title_filter = {
|
|
'$or': [{'title': title} for title in titles]
|
|
}
|
|
sandbox_service_filter['$and'].append(sandbox_service_title_filter)
|
|
else:
|
|
sandbox_service_filter = sandbox_id_filter
|
|
return self.sandbox_service_interface.get_bulk(owner, filter_data=sandbox_service_filter, fields=['_key'],
|
|
transaction_id=transaction_id)
|
|
|
|
def get_all_services(self, owner, sandbox_id, transaction_id=None):
|
|
"""
|
|
Fetches all the services that are part of a given sandbox
|
|
|
|
@type: string
|
|
@param owner: Object type for which sync log is generated
|
|
|
|
@type: string
|
|
@param sandbox_id: ID of the sandbox for which the sandbox services are associated
|
|
|
|
@type: string
|
|
@param transaction_id: Transaction ID for tracebacks
|
|
|
|
@rtype: list of dictionary
|
|
@return: sandbox services associated with the given sandbox
|
|
"""
|
|
sandbox_id_filter = {'sandbox_id': sandbox_id}
|
|
return self.sandbox_service_interface.get_bulk_skip_enforce_security(
|
|
owner, filter_data=sandbox_id_filter, fields=['_key'], transaction_id=transaction_id,
|
|
)
|
|
|
|
def get_sandbox_service_trees(self, owner, sandbox_id='default_itsi_sandbox', service_id_filter=[], services=[],
|
|
transaction_id=None):
|
|
"""
|
|
Generates and returns graph of the sandbox service trees
|
|
|
|
:param owner: Owner of the sandbox services
|
|
:type: string
|
|
|
|
:param sandbox_id: ID of sandbox to fetch services from
|
|
:type: string
|
|
|
|
:param service_id_filter: List of service IDs to filter to
|
|
:type: list
|
|
|
|
:param services: List of services to merge with before calculation
|
|
:type list:
|
|
|
|
:param transaction_id: ID of transaction for tracebacks
|
|
:type: string
|
|
|
|
:return: Graph of the sandbox services trees
|
|
:type: dict
|
|
"""
|
|
# perform a partial fetch of services
|
|
fields = ['title', '_key', 'services_depends_on']
|
|
data = self.sandbox_service_interface.get_bulk(owner, filter_data={'sandbox_id': sandbox_id}, fields=fields,
|
|
transaction_id=transaction_id)
|
|
|
|
# Replace any existing service data with any specifically provided data
|
|
data = utils.combine_object_lists(data, services)
|
|
return generate_subgraphs_json(data, service_id_filter=service_id_filter, partial_backup=False,
|
|
max_depth_filter=-2, severity_filter_ids=[])
|
|
|
|
def process_health(self, services_by_id):
|
|
"""
|
|
Process the services and generates the health result of the services
|
|
|
|
:param services_by_id: Services for which the service health must be processes
|
|
:type: list
|
|
|
|
:return: The health scores of individual services provided
|
|
:type: dict
|
|
"""
|
|
health_result = {}
|
|
|
|
def process_node(service_id, services_by_id):
|
|
service = services_by_id.get(service_id)
|
|
severity_urgency_list = []
|
|
service_dependencies = service.get('dependencies')
|
|
for dep_service_key in service_dependencies:
|
|
dep_service = services_by_id[dep_service_key]
|
|
if not dep_service.get('visited'):
|
|
# process the child node if not visited
|
|
process_node(dep_service_key, services_by_id)
|
|
for kpi_key in service_dependencies.get(dep_service_key):
|
|
if kpi_key.startswith('SHKPI-'):
|
|
# Get the computed service health score severity value as the service health score of the
|
|
# depended service could change based on updates
|
|
split_sh_service_id = kpi_key.split('SHKPI-', 1)
|
|
dep_service_id = split_sh_service_id[1]
|
|
if health_result.get(dep_service_id) is None:
|
|
# Process the service health if the service health is not computed already
|
|
process_node(dep_service_id, services_by_id)
|
|
sev_name_value = health_result[dep_service_id]['score']['data']['health_severity_name']
|
|
else:
|
|
sev_name_value = services_by_id[dep_service_key]['kpis'][kpi_key]['severity_name']
|
|
val = {'urgency': service_dependencies[dep_service_key][kpi_key]['urgency'],
|
|
'severity_name': sev_name_value}
|
|
severity_urgency_list.append(val)
|
|
# If only SHKPI is associate with the service with no other kpi or dependend kpis,
|
|
# then use value of the SHKPI to compute the health
|
|
only_shkpi_present = len(service['kpis']) == 1 and len(service.get('dependencies')) == 0
|
|
for kpi in service['kpis']:
|
|
# Remove existing Service health score KPI for the calculation of the new Service health score
|
|
if only_shkpi_present or "SHKPI-" + service_id != kpi:
|
|
severity_urgency_list.append(service['kpis'][kpi])
|
|
score = deepcopy(self.health_provider.calculate_score(severity_urgency_list))
|
|
health_result[service_id] = { 'score' : score }
|
|
|
|
for service_key in services_by_id:
|
|
process_node(service_key, services_by_id)
|
|
|
|
return health_result
|
|
|
|
@InstrumentCall(logger)
|
|
def process_services_health( self, services_by_id, nodes_to_process=[]):
|
|
"""
|
|
Generates and returns graph of the sandbox service trees
|
|
:param services_by_id: The list of services and its dependency with the importance and severity levels
|
|
:type: dict
|
|
|
|
:param nodes_to_process: service nodes for which the service health must be simulated
|
|
Assumes all services to be processed if empty list is provided
|
|
:type: list
|
|
|
|
:return: Services and with severity level and urgency by id
|
|
:type: dict
|
|
"""
|
|
if len(nodes_to_process) == 0:
|
|
# Assume all nodes to process if nodes_to_process is empty
|
|
for service in services_by_id:
|
|
services_by_id[service]['visited'] = False
|
|
else:
|
|
for node in nodes_to_process:
|
|
service = services_by_id[node]
|
|
parents = service['parent']
|
|
for parent in parents:
|
|
if services_by_id.get(parent):
|
|
services_by_id[parent]['visited'] = False
|
|
health_result = self.process_health(services_by_id)
|
|
return health_result
|
|
|
|
def get_sandbox_service_with_severity_urgency(self, owner, sandbox_id, transaction_id=None):
|
|
"""
|
|
Generates and returns graph of the sandbox service trees
|
|
|
|
:param owner: Owner of the sandbox services
|
|
:type: string
|
|
|
|
:param sandbox_id: ID of sandbox to fetch services from
|
|
:type: string
|
|
|
|
:param transaction_id: ID of transaction for tracebacks
|
|
:type: string
|
|
|
|
:return: Services and with severity level and urgency by id
|
|
:type: dict
|
|
"""
|
|
# perform a partial fetch of services
|
|
fields = ['title', '_key', 'kpis.title', 'kpis.urgency',
|
|
'kpis._key', 'kpis.severity_name', 'services_depends_on', 'services_depending_on_me']
|
|
services = self.sandbox_service_interface.get_bulk(owner, filter_data={'sandbox_id': sandbox_id}, fields=fields,
|
|
transaction_id=transaction_id)
|
|
service_urgency_severity_by_ids = {}
|
|
for service in services:
|
|
_key = service.get('_key')
|
|
details = {}
|
|
service_urgency_severity_by_ids[_key] = details
|
|
parents = service.get('services_depending_on_me', [])
|
|
details['parent'] = []
|
|
for parent in parents:
|
|
details['parent'].append(parent['serviceid'])
|
|
kpis = {}
|
|
details['kpis'] = kpis
|
|
for kpi in service.get('kpis'):
|
|
severity_name = kpi.get('severity_name', 'normal')
|
|
kpis[kpi['_key']] = { 'urgency': kpi['urgency'], 'severity_name': severity_name}
|
|
dependencies = {}
|
|
details['dependencies'] = dependencies
|
|
for dependency in service.get('services_depends_on'):
|
|
dep_kpis = {}
|
|
for kpi in dependency.get('kpis_depending_on'):
|
|
# Default importance is set to 5 for all kpis except SHKPI
|
|
urgency = 5
|
|
if kpi.startswith('SHKPI-'):
|
|
urgency = 11
|
|
severity_name = 'normal'
|
|
if dependency.get('overloaded_urgencies'):
|
|
urgency = dependency.get('overloaded_urgencies').get(kpi, urgency)
|
|
if dependency.get('overloaded_severities'):
|
|
severity_name = dependency.get('overloaded_severities').get(kpi, 'normal')
|
|
dep_kpis[kpi] = { 'urgency': urgency, 'severity_name': severity_name}
|
|
dependencies[dependency['serviceid']] = dep_kpis
|
|
details['visited'] = True
|
|
return service_urgency_severity_by_ids
|
|
|
|
def filter_services_by_tree(self, tree, services):
|
|
"""
|
|
Generate a list of service keys belonging to a service tree
|
|
|
|
:param tree: Representation of a service tree generated by `get_sandbox_service_trees()`
|
|
:type: dict
|
|
|
|
:param services: List of services
|
|
:type: list of sandbox service objects
|
|
|
|
:return:
|
|
"""
|
|
vertex_keys = set()
|
|
for vertex in tree['vertices']:
|
|
vertex_keys.add(vertex['id'])
|
|
|
|
def helper(service):
|
|
return service['_key'] in vertex_keys
|
|
|
|
return list(filter(helper, services))
|
|
|
|
def get_sandbox_services_for_template(
|
|
self,
|
|
owner,
|
|
template_id,
|
|
filter_string,
|
|
count,
|
|
offset,
|
|
sort_key,
|
|
sort_dir,
|
|
transaction_id=None,
|
|
):
|
|
"""
|
|
Method to return the Sandbox services for template
|
|
|
|
:param owner: Owner of the sandbox services
|
|
:type: string
|
|
|
|
:param template_id: ID of template to fetch linked sandbox services
|
|
:type: string
|
|
|
|
:param count: Count of sandbox services to get
|
|
:type: string
|
|
|
|
:param offset: start of list of records to get
|
|
:type: string
|
|
|
|
:param transaction_id: ID of transaction for tracebacks
|
|
:type: string
|
|
|
|
:return: List of sandbox services associated to template with sandbox details.
|
|
:type: List
|
|
"""
|
|
# perform fetch
|
|
fields = ["title", "_key", "base_service_template_id", "sync_status", "sandbox_id", "identifying_name" ]
|
|
filter_data_value = {'$and': [{'base_service_template_id': template_id}]}
|
|
if isinstance(filter_string, itsi_py3.string_type) and len(filter_string) > 0:
|
|
filter_data = json.loads(filter_string)
|
|
if filter_data.get('filter_string') is not None and isinstance(filter_data['filter_string'], dict):
|
|
filter_data_value.update(filter_data["filter_string"])
|
|
if filter_data.get('sync_status') is not None:
|
|
sync_status = {'sync_status': filter_data["sync_status"]}
|
|
filter_data_value.update(sync_status)
|
|
sandbox_service_data = self.sandbox_service_interface.get_bulk(
|
|
owner,
|
|
filter_data=filter_data_value,
|
|
fields=fields,
|
|
sort_key=sort_key,
|
|
sort_dir=sort_dir,
|
|
transaction_id=transaction_id,
|
|
)
|
|
sandbox_details_map = self.get_all_sandbox_details_in_map(transaction_id=transaction_id)
|
|
for sandbox_service in sandbox_service_data:
|
|
if sandbox_service:
|
|
sandbox_object = sandbox_details_map.get(sandbox_service.get('sandbox_id'), None)
|
|
if sandbox_object:
|
|
sandbox_service['sandbox_title'] = sandbox_object.get('title')
|
|
else:
|
|
sandbox_service_data.remove(sandbox_service)
|
|
|
|
if (count is not None) and (offset is not None):
|
|
count = int(count)
|
|
offset = int(offset)
|
|
end_offset_requested = offset + count
|
|
count_of_results = len(sandbox_service_data)
|
|
|
|
if (count_of_results < offset) or (end_offset_requested < offset):
|
|
raise Exception(
|
|
_(
|
|
"Invalid range requested. offset: {0}, count: {1}, result set count: {2}"
|
|
).format(offset, count, count_of_results)
|
|
)
|
|
range_end = end_offset_requested
|
|
if count_of_results < end_offset_requested:
|
|
range_end = count_of_results
|
|
return sandbox_service_data[offset: range_end]
|
|
else:
|
|
return sandbox_service_data
|
|
|
|
def get_all_sandbox_details_in_map(self, transaction_id=None):
|
|
"""
|
|
Create the mapping of sandbox_id and sandbox_object.
|
|
|
|
:param transaction_id: ID of transaction for tracebacks
|
|
:type: string
|
|
|
|
:return: dict of sandbox id and sandbox object
|
|
:type: dict
|
|
"""
|
|
sandbox_details_map = {}
|
|
fields = ["title", "_key"]
|
|
all_sandbox_objects = self.sandbox_interface.get_bulk('nobody', fields=fields, transaction_id=transaction_id)
|
|
for entry in all_sandbox_objects:
|
|
key = entry['_key']
|
|
sandbox_details_map[key] = entry
|
|
return sandbox_details_map
|
|
|
|
def convert_sandbox_services_to_services(self, sandbox_services, transaction_id, backfill_enabled=False):
|
|
"""
|
|
Convert sandbox service objects to service objects in format
|
|
|
|
:param sandbox_services: List of sandbox services to convert
|
|
:type: list of sandbox service objects
|
|
:param transaction_id: Transaction ID of operation
|
|
:type: string
|
|
|
|
:return: tuple of List of errors encountered and service-to-base service template map
|
|
:rtype: tuple of list of dicts
|
|
"""
|
|
errors = []
|
|
service_base_service_template_map = {}
|
|
current_time = time.time()
|
|
for sandbox_service in sandbox_services:
|
|
try:
|
|
# Clean up sandbox service
|
|
sandbox_service['object_type'] = self.service_interface.object_type
|
|
sandbox_service['is_created_from_sandbox'] = True
|
|
publish_history = sandbox_service.get('publish_history', [])
|
|
publish_history.append({
|
|
'sandbox_id': sandbox_service['sandbox_id'],
|
|
'publish_time': current_time,
|
|
'transaction_id': transaction_id,
|
|
})
|
|
sandbox_service['publish_history'] = publish_history
|
|
for field in self.sandbox_service_interface.SANDBOX_SERVICE_SPECIFIC_FIELDS:
|
|
try:
|
|
# Note update this to more generic code if there is a requirement to delete a nested object other than kpis
|
|
if field.startswith('kpis.'):
|
|
values = field.split(".")
|
|
if len(values) == 2:
|
|
kpis = sandbox_service.get(values[0])
|
|
for kpi in kpis:
|
|
kpi.pop(values[1], None)
|
|
else:
|
|
del sandbox_service[field]
|
|
except KeyError:
|
|
continue
|
|
# enable kpi backfill during publish
|
|
kpis = sandbox_service.get('kpis', [])
|
|
if backfill_enabled:
|
|
for kpi in kpis:
|
|
if not kpi.get('_key', "").startswith('SHKPI-'):
|
|
kpi['backfill_enabled'] = True
|
|
self.service_interface.ensure_required_fields([sandbox_service])
|
|
service_base_service_template_map[sandbox_service['_key']] = \
|
|
sandbox_service.get('base_service_template_id', '')
|
|
except Exception as e:
|
|
errors.append({
|
|
'_key': sandbox_service['_key'],
|
|
'object_type': self.sandbox_service_interface.object_type,
|
|
'description': str(e),
|
|
})
|
|
return errors, service_base_service_template_map
|
|
|
|
@sandbox_lock_and_update_count_factory(Status.STATUS_VALIDATE, update_count=False,
|
|
multiple_sandbox_publish_validate_status_check=True)
|
|
@InstrumentCall(logger)
|
|
def validate_sandbox(self, owner, sandbox_id, transaction_id):
|
|
"""
|
|
Perform a best-effort validation that the sandbox has no issues and the sandbox services can be readily
|
|
converted into real services.
|
|
|
|
Note: This would be a good candidate for conversion to an asynchronous action when (if?) the refresh queue is
|
|
able to handle priority.
|
|
|
|
:param owner: Owner of the objects
|
|
:type: string
|
|
|
|
:param sandbox_id: ID of the sandbox
|
|
:type: string
|
|
|
|
:param transaction_id: ID of transaction for tracebacks
|
|
:type: string
|
|
|
|
:return: Sandbox sync log creation response
|
|
:type: dict
|
|
"""
|
|
log_update_timestamp = time.time()
|
|
record = {
|
|
'action_type': TYPE_VALIDATE,
|
|
'action_status': STATUS_IN_PROGRESS,
|
|
'errors': [],
|
|
}
|
|
self.sandbox_sync_log_interface.create_record(
|
|
owner, transaction_id, self.sandbox_interface.object_type, sandbox_id, record,
|
|
)
|
|
service_trees = {}
|
|
sandbox_services = self.sandbox_service_interface.get_bulk(
|
|
owner, filter_data={'sandbox_id': sandbox_id}, transaction_id=transaction_id,
|
|
)
|
|
if not sandbox_services:
|
|
record['errors'].append({
|
|
'_key': sandbox_id,
|
|
'object_type': self.sandbox_interface.object_type,
|
|
'description': 'No sandbox services to validate.',
|
|
})
|
|
else:
|
|
# Bulk service checks
|
|
if not record['errors']:
|
|
try:
|
|
error_key = sandbox_id
|
|
error_object_type = self.sandbox_interface.object_type
|
|
title_conflict_dict = {}
|
|
try:
|
|
# Bulk check (moved from do_object_validation())
|
|
self.service_interface.validate_identifying_name(
|
|
owner, sandbox_services, transaction_id=transaction_id, ignore_same_key=True,
|
|
)
|
|
except ItoaValidationError as e:
|
|
# Look for more detailed information about duplicate title conflicts
|
|
if "Duplicate object name(s) found" in str(e):
|
|
# Check for title conflicts in bulk
|
|
case_insensitive_filter = {
|
|
'$or': [
|
|
{
|
|
'title': {
|
|
'$regex': re.escape(service['title']),
|
|
# Case-insensitive regex flag
|
|
'$options': 'i',
|
|
},
|
|
} for service in sandbox_services
|
|
],
|
|
}
|
|
title_conflicts = self.service_interface.get_bulk(
|
|
owner, filter_data=case_insensitive_filter, fields=['_key', 'title'],
|
|
)
|
|
for service in title_conflicts:
|
|
title_conflict_dict[service['title'].lower()] = service
|
|
# Throw miscellaneous errors
|
|
else:
|
|
raise e
|
|
|
|
services_simplified = []
|
|
for sandbox_service in sandbox_services:
|
|
# Clean up sandbox service
|
|
# Per service checks similar to convert_sandbox_services_to_services() but streamlined
|
|
error_key = sandbox_service['_key']
|
|
error_object_type = self.sandbox_service_interface.object_type
|
|
sandbox_service['object_type'] = self.service_interface.object_type
|
|
for field in self.sandbox_service_interface.SANDBOX_SERVICE_SPECIFIC_FIELDS:
|
|
try:
|
|
del sandbox_service[field]
|
|
except KeyError:
|
|
continue
|
|
self.service_interface.ensure_required_fields([sandbox_service])
|
|
|
|
services_simplified.append({
|
|
'_key': sandbox_service['_key'],
|
|
'title': sandbox_service['title'],
|
|
'services_depends_on': sandbox_service['services_depends_on'],
|
|
})
|
|
|
|
# Conflicting service checks
|
|
# We check over all sandbox services instead of over all conflicts to catch multiple sandbox
|
|
# services that have the same conflict
|
|
conflicting_service = title_conflict_dict.get(sandbox_service['title'].lower())
|
|
if conflicting_service and (conflicting_service['sec_grp'] == sandbox_service['sec_grp']):
|
|
record['errors'].append({
|
|
'_key': sandbox_service['_key'],
|
|
'object_type': self.sandbox_service_interface.object_type,
|
|
'description':
|
|
'A conflicting title (%s) was found between sandbox service %s and service %s.' % (
|
|
conflicting_service['title'], sandbox_service['_key'],
|
|
conflicting_service['_key'],
|
|
)
|
|
})
|
|
|
|
service_trees = generate_subgraphs_json(services_simplified)
|
|
|
|
# Check each graph individually
|
|
self.service_interface.set_persisted_services_caching(True)
|
|
record['details'] = {
|
|
'services_processed': 0,
|
|
'trees_processed': 0,
|
|
}
|
|
|
|
for tree in service_trees['graphs']:
|
|
tree_services = self.filter_services_by_tree(tree, sandbox_services)
|
|
error_key = tree['id']
|
|
# This is not a real object
|
|
error_object_type = 'sandbox_service_tree'
|
|
# Ensure no errors are present in the object setup
|
|
self.service_interface.do_object_validation(
|
|
owner, tree_services, validate_name=False, transaction_id=transaction_id,
|
|
ignore_same_key=True,
|
|
)
|
|
self.service_interface.do_additional_setup(
|
|
owner, tree_services, method=CRUDMethodTypes.METHOD_CREATE,
|
|
transaction_id=transaction_id,
|
|
)
|
|
# Ensure no errors are present in the dependency generation
|
|
self.service_interface.identify_dependencies(
|
|
owner, tree_services, CRUDMethodTypes.METHOD_CREATE, transaction_id=transaction_id,
|
|
dry_run=True,
|
|
)
|
|
# Update the service cache
|
|
ids = [{'_key': service['_key'] for service in tree_services}]
|
|
self.service_interface.post_save_setup(
|
|
owner, ids, tree_services, req_source="validate_sandbox",
|
|
method=CRUDMethodTypes.METHOD_CREATE, transaction_id=transaction_id, dry_run=True,
|
|
)
|
|
|
|
record['details']['services_processed'] += len(tree_services)
|
|
record['details']['trees_processed'] += 1
|
|
|
|
# Update every 5 seconds (in cadence with front-end pull)
|
|
current_time = time.time()
|
|
if current_time - log_update_timestamp > 5:
|
|
self.sandbox_sync_log_interface.update_record(owner, transaction_id, record)
|
|
log_update_timestamp = current_time
|
|
except Exception as e:
|
|
record['errors'].append({
|
|
'_key': error_key,
|
|
'object_type': error_object_type,
|
|
'description': str(e),
|
|
})
|
|
finally:
|
|
self.service_interface.set_persisted_services_caching(False)
|
|
if record['errors']:
|
|
record['action_status'] = STATUS_FAILED
|
|
else:
|
|
record['action_status'] = STATUS_SUCCESS
|
|
record['details'] = {
|
|
'service_tree': service_trees,
|
|
}
|
|
return self.sandbox_sync_log_interface.update_record(owner, transaction_id, record)
|
|
|
|
@sandbox_lock_and_update_count_factory(Status.STATUS_PUBLISH, update_count=True,
|
|
multiple_sandbox_publish_validate_status_check=True)
|
|
@InstrumentCall(logger)
|
|
def publish_sandbox(self, owner, sandbox_id, transaction_id, skip_validate_identifying_name=False):
|
|
"""
|
|
Publish the sandbox as services.
|
|
|
|
Note: This would be a good candidate for conversion to an asynchronous action when (if?) the refresh queue is
|
|
able to handle priority.
|
|
|
|
:param owner: Owner of the objects
|
|
:type: string
|
|
|
|
:param sandbox_id: ID of the sandbox
|
|
:type: string
|
|
|
|
:param transaction_id: ID of transaction for tracebacks
|
|
:type: string
|
|
|
|
:param skip_validate_identifying_name: skip performing validate_identifying_name
|
|
:type: bool
|
|
|
|
:return: Sandbox sync log creation response
|
|
:type: dict
|
|
"""
|
|
self.service_interface.set_persisted_services_caching(False)
|
|
log_update_timestamp = time.time()
|
|
record = {
|
|
'action_type': TYPE_PUBLISH,
|
|
'action_status': STATUS_IN_PROGRESS,
|
|
'errors': [],
|
|
}
|
|
self.sandbox_sync_log_interface.create_record(
|
|
owner, transaction_id, self.sandbox_interface.object_type, sandbox_id, record,
|
|
)
|
|
sandbox_detail = self.sandbox_interface.get(owner, sandbox_id)
|
|
# Variable initialization to make error-handling cleaner
|
|
service_trees = {}
|
|
published_service_ids = []
|
|
failed_service_ids = []
|
|
|
|
# Actual publishing
|
|
sandbox_services = self.sandbox_service_interface.get_bulk(
|
|
owner, filter_data={'sandbox_id': sandbox_id}, transaction_id=transaction_id,
|
|
)
|
|
if not sandbox_services:
|
|
record['errors'].append({
|
|
'_key': sandbox_id,
|
|
'object_type': self.sandbox_interface.object_type,
|
|
'description': 'No sandbox services to publish.',
|
|
})
|
|
else:
|
|
try:
|
|
services_simplified = [{
|
|
'_key': service['_key'],
|
|
'title': service['title'],
|
|
'services_depends_on': service['services_depends_on'],
|
|
} for service in sandbox_services]
|
|
service_trees = generate_subgraphs_json(services_simplified)
|
|
except Exception as e:
|
|
record['errors'].append({
|
|
'_key': sandbox_id,
|
|
'object_type': self.sandbox_interface.object_type,
|
|
'description': 'Error encountered in sandbox service tree generation: %s' % str(e),
|
|
})
|
|
|
|
# Format conversion
|
|
backfill_enabled = sandbox_detail.get('backfill_enabled', False)
|
|
errors, service_base_service_template_map = \
|
|
self.convert_sandbox_services_to_services(
|
|
sandbox_services, transaction_id, backfill_enabled)
|
|
record['errors'].extend(errors)
|
|
|
|
if not skip_validate_identifying_name:
|
|
try:
|
|
# Bulk check (moved from do_object_validation())
|
|
self.service_interface.validate_identifying_name(
|
|
owner, sandbox_services, transaction_id=transaction_id, ignore_same_key=True,
|
|
)
|
|
except Exception as e:
|
|
record['errors'].append({
|
|
'_key': sandbox_id,
|
|
'object_type': self.sandbox_interface.object_type,
|
|
'description': 'Error encountered in sandbox services bulk object validation: %s' % str(e),
|
|
})
|
|
|
|
if not record['errors']:
|
|
record['details'] = {
|
|
'services_processed': 0,
|
|
'trees_processed': 0,
|
|
}
|
|
service_ids = None
|
|
for tree in service_trees.get('graphs', []):
|
|
try:
|
|
tree_services = self.filter_services_by_tree(tree, sandbox_services)
|
|
service_ids = self.service_interface.save_batch_via_publish(
|
|
owner, tree_services, False, transaction_id=transaction_id,
|
|
)
|
|
|
|
record['details']['services_processed'] += len(tree_services)
|
|
record['details']['trees_processed'] += 1
|
|
|
|
# Update every 10 seconds (in cadence with front-end pull)
|
|
# This is longer than in the Validate stage because the user is expected to wait longer here
|
|
current_time = time.time()
|
|
if current_time - log_update_timestamp > 10:
|
|
self.sandbox_sync_log_interface.update_record(owner, transaction_id, record)
|
|
log_update_timestamp = current_time
|
|
except Exception as e:
|
|
record['errors'].append({
|
|
'_key': tree['id'],
|
|
# This is not a real object
|
|
'object_type': 'sandbox_service_tree',
|
|
'description': 'Publish failed: %s' % str(e),
|
|
})
|
|
if service_ids:
|
|
failed_service_ids.extend(service_ids)
|
|
else:
|
|
published_service_ids.extend(service_ids)
|
|
service_ids = []
|
|
|
|
# Post-publish cleanup
|
|
# Remove failed sandbox services from the production environment
|
|
try:
|
|
if failed_service_ids:
|
|
filter_data = {'$or': [{'_key': service_id} for service_id in failed_service_ids]}
|
|
self.service_interface.delete_bulk(owner, filter_data=filter_data, transaction_id=transaction_id)
|
|
except Exception as e:
|
|
for service_id in failed_service_ids:
|
|
# This is technically inaccurate, but this exists to provide clearer information to the user
|
|
record['errors'].append({
|
|
'_key': service_id,
|
|
'object_type': self.service_interface.object_type,
|
|
'description': 'Error encountered in failed services clean-up: %s' % str(e),
|
|
})
|
|
if published_service_ids:
|
|
# Map the service template with the list of service ids being published
|
|
service_templates_services_map = {}
|
|
for service_id in published_service_ids:
|
|
service_template_id = service_base_service_template_map.get(service_id)
|
|
if service_template_id not in service_templates_services_map.keys():
|
|
service_templates_services_map[service_template_id] = []
|
|
service_templates_services_map[service_template_id].append(service_id)
|
|
service_template_get_filter = {'$or': []}
|
|
for key in service_templates_services_map.keys():
|
|
service_template_get_filter['$or'].append({'_key': key})
|
|
# Fetch the persisted base service template and update the linked_services value with the published service
|
|
# Necessary evil
|
|
from itsi.objects.itsi_service_template import ItsiBaseServiceTemplate
|
|
service_template_interface = ItsiBaseServiceTemplate(self._session_key, self.current_user_name)
|
|
base_service_templates = service_template_interface.get_bulk(owner, filter_data=service_template_get_filter,
|
|
transaction_id=transaction_id)
|
|
for base_service_template in base_service_templates:
|
|
if 'linked_services' not in base_service_template:
|
|
base_service_template['linked_services'] = []
|
|
linked_services = base_service_template['linked_services']
|
|
for service_key in service_templates_services_map.get(base_service_template.get('_key')):
|
|
if service_key in linked_services:
|
|
continue
|
|
linked_services.append(service_key)
|
|
# update the base service template with the updated linked service count
|
|
base_service_template['total_linked_services'] = len(base_service_template['linked_services'])
|
|
try:
|
|
service_template_interface.batch_save_backend(
|
|
owner, base_service_templates, transaction_id=transaction_id)
|
|
except Exception as error:
|
|
logger.exception(
|
|
f'Exception while saving service template interface {error}')
|
|
|
|
if record['errors']:
|
|
record['action_status'] = STATUS_FAILED
|
|
else:
|
|
record['action_status'] = STATUS_SUCCESS
|
|
record['details'] = {
|
|
'service_tree': service_trees,
|
|
}
|
|
return self.sandbox_sync_log_interface.update_record(owner, transaction_id, record)
|
|
|
|
def create_sync_records_for_synchronize(self, transaction_id, service_template, affected_sandbox_services, change):
|
|
"""
|
|
Create the Sync log record for the Synchronize process that is used to
|
|
sync the template updation with the Sandbox service.
|
|
|
|
:param transaction_id: ID of transaction for tracebacks
|
|
:type: string
|
|
|
|
:param service_template: service_template object
|
|
:type: dict
|
|
|
|
:param affected_sandbox_services: list of affeced services
|
|
:type: list
|
|
|
|
:param change: dict object of the change with details
|
|
:type: dict
|
|
|
|
:return: Sandbox sync log creation response
|
|
:type: dict
|
|
"""
|
|
change_detail = change.get('change_detail')
|
|
# Updating the Sandbox services status to pending
|
|
self.update_sandbox_services_status(transaction_id, affected_sandbox_services,
|
|
SandboxServiceStatus.STATUS_SERVICE_SYNC_PENDING)
|
|
mapped_sandbox_with_services = self.get_sandbox_services_mapping(affected_sandbox_services)
|
|
for sandbox_id, sandbox_services in mapped_sandbox_with_services.items():
|
|
change_detail[service_template.get('_key')]['affected_sandbox_services'] = sandbox_services
|
|
change_detail['object_type'] = service_template.get('object_type')
|
|
record = {'details': change, 'action_type': ACTION_TYPE_SYNCHRONIZE, 'action_status': STATUS_NOT_STARTED}
|
|
self.sandbox_sync_log_interface.create_record(
|
|
"nobody", transaction_id, 'sandbox', sandbox_id, record
|
|
)
|
|
|
|
def get_sandbox_services_mapping(self, affected_sandbox_services):
|
|
"""
|
|
Create the mapping of sandbox and services.
|
|
|
|
:param affected_sandbox_services: list of affeced services
|
|
:type: list
|
|
|
|
:return: dict of sandbox services and sandbox
|
|
:type: dict
|
|
"""
|
|
mapped_sandbox_with_services = {}
|
|
for entry in affected_sandbox_services:
|
|
sandbox_id = entry['sandbox_id']
|
|
key = entry['_key']
|
|
if sandbox_id not in mapped_sandbox_with_services:
|
|
mapped_sandbox_with_services[sandbox_id] = []
|
|
mapped_sandbox_with_services[sandbox_id].append(key)
|
|
return mapped_sandbox_with_services
|
|
|
|
def create_sync_records_for_delete_synchronize(self, transaction_id, service_template, affected_sandbox_services,
|
|
change):
|
|
"""
|
|
Create the Sync log record for the Synchronize process that is used to
|
|
sync the template deletion with the Sandbox service.
|
|
|
|
:param transaction_id: ID of transaction for tracebacks
|
|
:type: string
|
|
|
|
:param service_template: service_template object
|
|
:type: dict
|
|
|
|
:param affected_sandbox_services: list of affeced services
|
|
:type: list
|
|
|
|
:param change: dict object of the change with details
|
|
:type: dict
|
|
|
|
:return: Sandbox sync log creation response
|
|
:type: dict
|
|
"""
|
|
mapped_sandbox_with_services = self.get_sandbox_services_mapping(affected_sandbox_services)
|
|
for sandbox_id, sandbox_services in mapped_sandbox_with_services.items():
|
|
record = {'details': change, 'action_type': ACTION_TYPE_SYNCHRONIZE, 'action_status': STATUS_NOT_STARTED}
|
|
self.sandbox_sync_log_interface.create_record(
|
|
'nobody', transaction_id, 'sandbox', sandbox_id, record
|
|
)
|
|
|
|
def get_all_sandbox_that_needs_sync(self, owner):
|
|
"""
|
|
Method to return all the sandbox objects that need sync.
|
|
|
|
:param owner: owner of objects
|
|
:type: string
|
|
|
|
:return: sandbox details that need sync
|
|
:type: list
|
|
"""
|
|
filter_data = {"$and": [{"action_type": ACTION_TYPE_SYNCHRONIZE},
|
|
{"$or": [{"action_status": STATUS_NOT_STARTED}, {"action_status": STATUS_FAILED}]}]}
|
|
sync_data = self.sandbox_sync_log_interface.get_bulk(owner, fields=["_key", "title"], filter_data=filter_data)
|
|
return sync_data
|
|
|
|
def needs_sandbox_synchronize(self, owner, sandbox_id, last_mod_timestamp):
|
|
"""
|
|
Checks to ensure that sandbox needs to be synched or not.
|
|
|
|
:param owner: Owner of the objects
|
|
:type: string
|
|
|
|
:param sandbox_id: ID of the sandbox
|
|
:type: string
|
|
|
|
:param last_mod_timestamp: Last modified time of the sandbox that is loaded in the UI
|
|
:type: float (UTC time)
|
|
|
|
:return: if the sandbox needs to perform a sync and/or browser needs a refresh or none
|
|
:type: dict
|
|
"""
|
|
# validate the current status of sandbox. If already synching then return synching status
|
|
sandbox_detail = self.sandbox_interface.get(owner, sandbox_id)
|
|
needs_browser_refresh = False
|
|
if not sandbox_detail:
|
|
logger.error(
|
|
"Unable to retrieve the sandbox details sandbox_id=%s",
|
|
sandbox_id)
|
|
self.raise_error_bad_validation(
|
|
logger, 'Unable to retrieve the sandbox details. Check input.', 404)
|
|
|
|
sandbox_status = sandbox_detail.get('status', Status.STATUS_EDIT)
|
|
backfill_enabled, backfill_enabled_services_unused = self.get_backfill_enabled_value(owner, sandbox_id, sandbox_detail)
|
|
|
|
# Compare the last modified time with the modified time of sandbox.
|
|
# if Sandbox modified time value is greater than the last modified time
|
|
# then set needs_browser_refresh to true
|
|
if last_mod_timestamp is None:
|
|
last_mod_timestamp = utils.get_current_timestamp_utc()
|
|
# '+' is converted to ' ' through the web interface
|
|
last_mod_timestamp = last_mod_timestamp.replace(' ', '+')
|
|
|
|
sandbox_mod_time_epoch = int(datetime.datetime.fromisoformat(
|
|
sandbox_detail.get('mod_timestamp')).timestamp())
|
|
loaded_sandbox_mod_time_epoch = int(
|
|
datetime.datetime.fromisoformat(last_mod_timestamp).timestamp())
|
|
if sandbox_mod_time_epoch > loaded_sandbox_mod_time_epoch:
|
|
needs_browser_refresh = True
|
|
allow_publish_validate = self.sandbox_utils.allow_publish_validate(owner)
|
|
sync_response = {
|
|
"sandbox_status": sandbox_status,
|
|
"needs_browser_refresh": needs_browser_refresh,
|
|
"allow_publish_validate": allow_publish_validate,
|
|
"backfill_enabled": backfill_enabled
|
|
}
|
|
return sync_response
|
|
|
|
def get_backfill_enabled_value(self, owner, sandbox_id, sandbox_detail=None):
|
|
"""
|
|
Determines if the services published from sandbox are backfill_enabled or not
|
|
:param owner: Owner of the objects
|
|
:type: string
|
|
|
|
:param sandbox_id: Kay of the sandbox
|
|
:type: string
|
|
|
|
:param sandbox_detail: sandbox object
|
|
:type: object
|
|
|
|
:return boolean value to determine if backfill is enabled and list of service with backfill enabled kpis
|
|
:type bool, list of objects
|
|
|
|
"""
|
|
backfill_enabled_services = []
|
|
if sandbox_detail is None:
|
|
sandbox_detail = self.sandbox_interface.get(owner, sandbox_id)
|
|
backfill_enabled = sandbox_detail.get('backfill_enabled', False)
|
|
sandbox_status = sandbox_detail.get('status', Status.STATUS_EDIT)
|
|
if not backfill_enabled and (sandbox_status == Status.STATUS_PUBLISH_SUCCESS or sandbox_status == Status.STATUS_PARTIAL_PUBLISH):
|
|
last_published_record = sandbox_detail.get(
|
|
'last_published_record', {})
|
|
services_to_be_reverted, failed_to_publish_services = self.find_services_by_publish_status(
|
|
last_published_record)
|
|
if services_to_be_reverted:
|
|
filter_data = {"$or": [{"_key": service_id}
|
|
for service_id in services_to_be_reverted]}
|
|
backfill_filter = {"kpis.backfill_enabled": True}
|
|
and_filter = {"$and": [filter_data, backfill_filter]}
|
|
backfill_enabled_services = self.service_interface.get_bulk_skip_enforce_security(
|
|
owner, fields=['title'], filter_data=and_filter)
|
|
if len(backfill_enabled_services) > 0:
|
|
backfill_enabled = True
|
|
return backfill_enabled, backfill_enabled_services
|
|
|
|
@sandbox_lock_and_update_count_factory(Status.STATUS_SYNC)
|
|
@InstrumentCall(logger)
|
|
def synchronize_sandbox(self, owner, sandbox_id, change, transaction_id):
|
|
"""
|
|
Synchronize the updates from Service Template or teams with Sandbox.
|
|
We will be locking the sandbox and the status will be set to Sync.
|
|
|
|
:param owner: Owner of the objects
|
|
:type: string
|
|
|
|
:param sandbox_id: ID of the sandbox
|
|
:type: string
|
|
|
|
:return: boolean of the operation commpletion
|
|
:type: boolean
|
|
"""
|
|
sandbox_sync_completed = False
|
|
# Get the list of changes from the sandbox sync log
|
|
# Check the sandbox sync log for the presence of action_type as synchronize .
|
|
from itsi.objects.changehandlers.sandbox_base_service_template_update_handler import SandboxBaseServiceTemplateUpdateHandler
|
|
try:
|
|
if (change.get("changed_object_type") == "base_service_template") and (
|
|
change.get("change_type") == "sandbox_base_service_template_update"):
|
|
# Call the sandbox update handler to perform the sync operation with the affected services
|
|
sandbox_sync_completed = SandboxBaseServiceTemplateUpdateHandler(logger,
|
|
self._session_key).perform_template_sync(
|
|
change=change, scheduled_for_later=False)
|
|
elif (change.get("changed_object_type") == "base_service_template") and (
|
|
change.get("change_type") == "sandbox_delete_base_service_template"):
|
|
sandbox_sync_completed = SandboxServiceTemplateDeleteHandler(logger,
|
|
self._session_key).deferred(
|
|
change=change, transaction_id=transaction_id)
|
|
except BaseException as e:
|
|
logger.error('Synchronizing not Completed for Sandbox sandboxid=%s', sandbox_id)
|
|
raise e
|
|
logger.info('Synchronizing Completed for Sandbox sandboxid=%s', sandbox_id)
|
|
return sandbox_sync_completed
|
|
|
|
@InstrumentCall(logger)
|
|
def validated_publish_sandbox(self, owner, sandbox_id, transaction_id):
|
|
"""
|
|
Wrapper to ensure that publish operations can only follow validate operations for a sandbox.
|
|
|
|
:param owner: Owner of the objects
|
|
:type: string
|
|
|
|
:param sandbox_id: ID of the sandbox
|
|
:type: string
|
|
|
|
:param transaction_id: ID of transaction for tracebacks
|
|
:type: string
|
|
|
|
:return: Sandbox sync log creation response
|
|
:type: dict
|
|
"""
|
|
modified_since_last_validate = []
|
|
sandbox_services = self.sandbox_service_interface.get_bulk(
|
|
owner, filter_data={'sandbox_id': sandbox_id}, fields=['_key', 'mod_timestamp'],
|
|
transaction_id=transaction_id,
|
|
)
|
|
log_filter = {
|
|
'$and': [
|
|
{'mod_object_type': self.sandbox_interface.object_type},
|
|
{'mod_object_id': sandbox_id}
|
|
]
|
|
}
|
|
last_log = self.sandbox_sync_log_interface.get_bulk(
|
|
owner, filter_data=log_filter, sort_key='timestamp', sort_dir='desc',
|
|
fields=['_key', 'timestamp', 'mod_timestamp'], transaction_id=transaction_id,
|
|
)
|
|
if last_log:
|
|
for service in sandbox_services:
|
|
service_timestamp = utils.parse_mod_timestamp(service['mod_timestamp'])
|
|
if service_timestamp >= last_log[0]['timestamp']:
|
|
# Re-using this to store warnings
|
|
modified_since_last_validate.append({
|
|
'_key': service['_key'],
|
|
'object_type': self.sandbox_service_interface.object_type,
|
|
'description': 'Sandbox service %s was not validated when publish was attempted.' %
|
|
service['_key'],
|
|
})
|
|
else:
|
|
modified_since_last_validate.append({
|
|
'_key': sandbox_id,
|
|
'object_type': self.sandbox_interface.object_type,
|
|
'description': 'Sandbox %s has no records.' % sandbox_id,
|
|
})
|
|
if modified_since_last_validate:
|
|
validation_response = self.validate_sandbox(owner, sandbox_id, 'pre_%s' % transaction_id)
|
|
validation_results = self.sandbox_sync_log_interface.get(owner, validation_response['_key'])
|
|
if validation_results['errors']:
|
|
validation_results['warnings'].extend(modified_since_last_validate)
|
|
return validation_results
|
|
return self.publish_sandbox(owner, sandbox_id, transaction_id)
|
|
|
|
def find_services_by_publish_status(self, last_published_record):
|
|
"""
|
|
Identifies the services that are succesfuly published and failed to publish from the sandbox's last_published_record
|
|
|
|
:param last_published_record: Last published record detail from the sandbox
|
|
:type: dict
|
|
|
|
:return: List of successfully published service keys and failed to publish service keys
|
|
:rtype: (list, list)
|
|
"""
|
|
errors = last_published_record.get('errors', [])
|
|
successfuly_published_services = []
|
|
failed_service_trees = []
|
|
failed_to_publish_services = []
|
|
for error in errors:
|
|
if error['object_type'] == 'sandbox_service_tree':
|
|
failed_service_trees.append(error['_key'])
|
|
elif error['object_type'] == self.service_interface.object_type:
|
|
failed_to_publish_services.append(error['_key'])
|
|
|
|
details = last_published_record.get('details', {})
|
|
if 'service_tree' in details and 'graphs' in details['service_tree']:
|
|
service_tree_graphs = details['service_tree']['graphs']
|
|
for graph in service_tree_graphs:
|
|
if graph['id'] not in failed_service_trees:
|
|
for vertice in graph['vertices']:
|
|
successfuly_published_services.append(vertice['id'])
|
|
if graph['id'] in failed_service_trees:
|
|
for vertice in graph['vertices']:
|
|
failed_to_publish_services.append(vertice['id'])
|
|
return successfuly_published_services, failed_to_publish_services
|
|
|
|
@InstrumentCall(logger)
|
|
def publish_revert_sandbox(self, owner, sandbox_id, transaction_id):
|
|
"""
|
|
Checks if revert of sandbox can be performed and then invokes the revert
|
|
|
|
:param owner: Owner of the objects
|
|
:type: string
|
|
|
|
:param sandbox_id: ID of the sandbox
|
|
:type: string
|
|
|
|
:param transaction_id: ID of transaction for tracebacks
|
|
:type: string
|
|
|
|
:return: results from the revert operation
|
|
:type: dict
|
|
"""
|
|
backfill_enabled, backfill_enabled_services = self.get_backfill_enabled_value(owner, sandbox_id)
|
|
if backfill_enabled:
|
|
if len(backfill_enabled_services) > 0:
|
|
service_names = [ service['title'] for service in backfill_enabled_services]
|
|
logger.error('Revert operations are blocked as services="%s" has the kpi backfill enabled'
|
|
% (service_names))
|
|
raise ItoaError(logger=logger, status_code=409, message="Reverting of sandbox with service containing backfill enabled kpi is not allowed ")
|
|
return self.process_publish_revert_sandbox(owner, sandbox_id, transaction_id)
|
|
|
|
@sandbox_lock_and_update_count_factory(Status.STATUS_PUBLISH_REVERT, update_count=True,
|
|
multiple_sandbox_publish_validate_status_check=False)
|
|
@InstrumentCall(logger)
|
|
def process_publish_revert_sandbox(self, owner, sandbox_id, transaction_id):
|
|
"""
|
|
Revert services back to published sandbox and clear the production services
|
|
|
|
:param owner: Owner of the objects
|
|
:type: string
|
|
|
|
:param sandbox_id: ID of the sandbox
|
|
:type: string
|
|
|
|
:param transaction_id: ID of transaction for tracebacks
|
|
:type: string
|
|
|
|
:return: results from the revert operation
|
|
:type: dict
|
|
"""
|
|
sandbox = self.sandbox_interface.get(owner, sandbox_id)
|
|
log_link = self.sandbox_utils.get_search_log_url(transaction_id)
|
|
sandbox_name = sandbox['identifying_name']
|
|
notification_type = SplunkMessageHandler.INFO
|
|
notification_message = "%s reverted to the last published version. %s for more details." % (sandbox_name, log_link)
|
|
last_published_record = sandbox.get('last_published_record', {})
|
|
services_to_be_reverted, failed_to_publish_services = self.find_services_by_publish_status(last_published_record)
|
|
data = { 'services_to_be_reverted': services_to_be_reverted, 'services_skipped_from_revert': failed_to_publish_services, 'errors' : []}
|
|
log_object = {
|
|
'title': 'Revert published services %s (%s)' % (self.service_interface.object_type,
|
|
transaction_id),
|
|
'action_type': 'Revert publish',
|
|
'details': data,
|
|
'sec_grp': self.set_first_assigned_security_group(owner, transaction_id, data),
|
|
}
|
|
try:
|
|
if services_to_be_reverted:
|
|
filter_data = {'$or': [{'_key': service_id} for service_id in services_to_be_reverted]}
|
|
self.service_interface.delete_bulk(owner, filter_data=filter_data, transaction_id=transaction_id)
|
|
|
|
# Empty the last published record information from sandbox after revert
|
|
self.sandbox_interface.update(owner, sandbox_id, data={'last_published_record': {}},
|
|
is_partial_data=True, transaction_id=transaction_id)
|
|
except Exception as e:
|
|
for service_id in services_to_be_reverted:
|
|
# This is technically inaccurate, but this exists to provide clearer information to the user
|
|
data['errors'].append({
|
|
'_key': service_id,
|
|
'object_type': self.service_interface.object_type,
|
|
'description': 'Error encountered in failed services clean-up: %s' % str(e),
|
|
})
|
|
notification_type = SplunkMessageHandler.ERROR
|
|
notification_message = "%s failed to revert. %s for more details." % (sandbox_name, log_link)
|
|
|
|
self.sandbox_sync_log_interface.create_record(
|
|
owner, transaction_id, self.sandbox_interface.object_type, sandbox_id, log_object)
|
|
self.sandbox_utils.message_handler.post_or_update_message(
|
|
'service_sandbox_%s' % (
|
|
transaction_id), notification_type, notification_message
|
|
)
|
|
return data
|
|
|
|
def clear_publish_references_from_sandbox(self, owner, sandbox_id, transaction_id):
|
|
"""
|
|
Empties out the last_published_record information from the sandbox
|
|
|
|
:param owner: Owner of the objects
|
|
:type: string
|
|
|
|
:param sandbox_id: ID of the sandbox
|
|
:type: string
|
|
|
|
:param transaction_id: ID of transaction for tracebacks
|
|
:type: string
|
|
"""
|
|
sandbox = self.sandbox_interface.get(owner, sandbox_id)
|
|
last_published_record = sandbox.get('last_published_record', {})
|
|
services_to_be_deleted, failed_to_publish_services_unused = self.find_services_by_publish_status(last_published_record)
|
|
if services_to_be_deleted:
|
|
filter_data = {'$or': [{'_key': service_id}
|
|
for service_id in services_to_be_deleted]}
|
|
self.clear_publish_history(owner, filter_data, transaction_id)
|
|
|
|
def clear_publish_history(self, owner, filter_data, transaction_id):
|
|
"""
|
|
Clears the publish history from the published services
|
|
|
|
:param owner: Owner of the objects
|
|
:type: string
|
|
|
|
:param sandbox_id: ID of the sandbox
|
|
:type: string
|
|
|
|
:param transaction_id: ID of transaction for tracebacks
|
|
:type: string
|
|
"""
|
|
services_list = self.service_interface.get_bulk(owner, filter_data=filter_data, fields=[
|
|
'_key'], sort_key='timestamp', sort_dir='desc', transaction_id=transaction_id,
|
|
)
|
|
for service in services_list:
|
|
service['publish_history'] = []
|
|
|
|
# Empty the publish_history information from service after reset of sandbox
|
|
if len(services_list) > 0:
|
|
self.service_interface.save_batch(
|
|
owner,
|
|
services_list,
|
|
method=CRUDMethodTypes.METHOD_UPDATE,
|
|
validate_names=True,
|
|
is_partial_data=True,
|
|
transaction_id=transaction_id)
|
|
|
|
@sandbox_lock_and_update_count_factory(Status.STATUS_PUBLISH_REVERT, update_count=True,
|
|
multiple_sandbox_publish_validate_status_check=False)
|
|
@InstrumentCall(logger)
|
|
def publish_reset_sandbox(self, owner, sandbox_id, transaction_id):
|
|
"""
|
|
Resets the sandbox back to editable state
|
|
The failed to publsih services will remain in the sandbox after publish.
|
|
Successfully published services would be cleared off from the sandbox.
|
|
|
|
:param owner: Owner of the objects
|
|
:type: string
|
|
|
|
:param sandbox_id: ID of the sandbox
|
|
:type: string
|
|
|
|
:param transaction_id: ID of transaction for tracebacks
|
|
:type: string
|
|
|
|
:return: results from the reset operation
|
|
:type: dict
|
|
"""
|
|
sandbox = self.sandbox_interface.get(owner, sandbox_id)
|
|
log_link = self.sandbox_utils.get_search_log_url(transaction_id)
|
|
sandbox_name = sandbox['identifying_name']
|
|
notification_type = SplunkMessageHandler.INFO
|
|
notification_message = "%s reset completed. %s for more details." % (sandbox_name, log_link)
|
|
last_published_record = sandbox.get('last_published_record', {})
|
|
services_to_be_resetted, failed_to_publish_services = self.find_services_by_publish_status(last_published_record)
|
|
data = { 'services_to_be_resetted': services_to_be_resetted, 'services_skipped_from_reset': failed_to_publish_services, 'errors' : []}
|
|
log_object = {
|
|
'title': 'Reset published services %s (%s)' % (self.service_interface.object_type,
|
|
transaction_id),
|
|
'action_type': 'Reset publish',
|
|
'details': data,
|
|
'sec_grp': self.set_first_assigned_security_group(owner, transaction_id, data),
|
|
}
|
|
try:
|
|
if services_to_be_resetted:
|
|
filter_data = {'$or': [{'_key': service_id} for service_id in services_to_be_resetted]}
|
|
self.sandbox_service_interface.delete_bulk(owner, filter_data=filter_data, transaction_id=transaction_id)
|
|
self.clear_publish_history(owner, filter_data, transaction_id)
|
|
# Empty the last published record information from sandbox after revert
|
|
# set the backfill enabled flag to false by default
|
|
self.sandbox_interface.update(owner, sandbox_id, data={'last_published_record': {}, 'backfill_enabled': False},
|
|
is_partial_data=True, transaction_id=transaction_id)
|
|
except Exception as e:
|
|
for service_id in services_to_be_resetted:
|
|
# This is technically inaccurate, but this exists to provide clearer information to the user
|
|
data['errors'].append({
|
|
'_key': service_id,
|
|
'object_type': self.sandbox_service_interface.object_type,
|
|
'description': 'Error encountered in failed services clean-up: %s' % str(e),
|
|
})
|
|
notification_type = SplunkMessageHandler.ERROR
|
|
notification_message = "%s failed to reset. %s for more details." % (sandbox_name, log_link)
|
|
self.sandbox_sync_log_interface.create_record(
|
|
owner, transaction_id, self.sandbox_interface.object_type, sandbox_id, log_object)
|
|
self.sandbox_utils.message_handler.post_or_update_message(
|
|
'service_sandbox_%s' % (
|
|
transaction_id), notification_type, notification_message
|
|
)
|
|
return data
|