# Copyright (C) 2005-2025 Splunk Inc. All Rights Reserved. import itsi_py3 import sys import os from ITOA.itoa_object import ItoaObject, CRUDMethodTypes from ITOA.setup_logging import logger from time import time from itsi.itsi_utils import ITOAInterfaceUtils, DEFAULT_SCHEDULED_BACKUP_KEY from splunk.clilib.bundle_paths import make_splunkhome_path from itsi.upgrade.file_manager import FileManager sys.path.append(make_splunkhome_path(['etc', 'apps', 'SA-ITOA', 'lib', 'SA_ITOA_app_common'])) from SA_ITOA_app_common.solnlib.server_info import ServerInfo BACKUP_PATH = make_splunkhome_path(['var', 'itsi', 'backups']) class ItsiBackupRestore(ItoaObject): """ Implements ITSI Backup Restore """ logger = logger log_prefix = '[ITSI Backup Restore] ' collection_name = 'itsi_backup_restore_queue' ITOA_OBJECT_TYPE = 'backup_restore' def __init__(self, session_key, current_user_name): session_key = session_key super(ItsiBackupRestore, self).__init__(session_key, current_user_name, 'backup_restore', collection_name=self.collection_name, title_validation_required=True) def do_object_validation(self, owner, objects, validate_name=True, dupname_tag=None, transaction_id=None, skip_local_failure=False): super(ItsiBackupRestore, self).do_object_validation(owner, objects, validate_name=validate_name, dupname_tag=dupname_tag, transaction_id=transaction_id, skip_local_failure=skip_local_failure) for object_data in objects: self.validate_keep_max_time(object_data) def validate_keep_max_time(self, data): """ Validates that the keep_max_time field is set appropriately. @param data: the backup/restore object data @type data: dict """ frequency = data.get('frequency', None) keep_max_time = data.get('keep_max_time', None) if frequency is None or keep_max_time is None: return freq_to_min_days = { 'daily': 7, 'weekly': 14 } min_days = freq_to_min_days[frequency] min_keep_max_time = min_days * 24 * 60 * 60 if keep_max_time < min_keep_max_time: logger.warn( 'The keep_max_time value is too low ({}). Setting it to {} for {} scheduled backup.'.format( keep_max_time, min_keep_max_time, frequency) ) data['keep_max_time'] = min_keep_max_time """ Not adding any validations here since its only used internally In future if we see need, we could add validations. Schema for this object and how its used internally: create_time - set when it creates the job start_time - set when the job starts running end_time - set when the job ends running last_queued_time - set when the job is queued status - 'Not Started'/'Queued'/'Completed'/'Failed' path - file path to the backup, uses upload/download endpoints job_type - 'Backup' or 'Restore' last_error - Last error seen when job tried to backup/restore, if any, else None """ def do_additional_setup(self, owner, objects, req_source='unknown', method=CRUDMethodTypes.METHOD_UPSERT, transaction_id=None, skip_local_failure=False): info = ServerInfo(self.session_key) local_search_head_id = info.guid for json_data in objects: # Assume json_data is valid # If creating a backup job or a restore job, generate key and assign path if method == CRUDMethodTypes.METHOD_CREATE: # if it's default scheduled backup, no need to generate a new key if not (json_data.get('_key') == DEFAULT_SCHEDULED_BACKUP_KEY and json_data.get('scheduled') == 1): json_data['_key'] = ITOAInterfaceUtils.generate_backend_key() if not json_data.get('search_head_id'): json_data['search_head_id'] = local_search_head_id if json_data.get('status') == 'Queued' and ( not isinstance(json_data.get('last_queued_time'), itsi_py3.string_type) or len( json_data.get('last_queued_time')) < 1): json_data['last_queued_time'] = time() if method == CRUDMethodTypes.METHOD_UPDATE or method == CRUDMethodTypes.METHOD_UPSERT: # if Backup zip file exists locally, populate search_head_id to current host's guid path_to_backup_zip = os.path.join(BACKUP_PATH, json_data['_key'] + '.zip') if FileManager.is_exists(path_to_backup_zip): json_data['search_head_id'] = local_search_head_id if json_data['status'] == 'Queued' and ( not isinstance(json_data.get('last_queued_time'), itsi_py3.string_type) or len( json_data.get('last_queued_time')) < 1): json_data['last_queued_time'] = time() def identify_dependencies(self, owner, objects, method, req_source='unknown', transaction_id=None, skip_local_failure=False): """ Identifying dependencies due to the changes and instead of creating a refresh job, immediately handling the job @param {string} owner: user which is performing this operation @param {list} objects: list of object @param {string} method: method name @param {string} req_source: request source @return: a tuple {boolean} set to true/false if dependency update is required {list} list - list of refresh job, each element has the following change_type: , changed_object_key: , changed_object_type: """ if method == CRUDMethodTypes.METHOD_DELETE: for object in objects: path_to_directory = os.path.join(BACKUP_PATH, object.get('_key')) path_to_zip = os.path.join(BACKUP_PATH, object.get('_key') + '.zip') if FileManager.is_exists(path_to_directory): FileManager.delete_working_directory(path_to_directory) if FileManager.is_exists(path_to_zip): FileManager.delete_file(path_to_zip) return False, None def is_any_backup_restore_job_in_progress(self, owner, req_source='unknown'): """ Checks for any backup/restore job in progress. Returns True, if there is at least on job is in progress. Else, returns False. @type owner: basestring @param owner: user performing operation @type req_source: basestring @param req_source: request source @return: bool """ job_fetch_filter = {'status': 'In Progress'} backup_restore_job = self.get_bulk(owner, filter_data=job_fetch_filter, limit=1, req_source=req_source) is_job_in_progress = False if backup_restore_job and len(backup_restore_job) > 0: is_job_in_progress = True return is_job_in_progress