You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

258 lines
9.1 KiB

# Copyright (C) 2005-2025 Splunk Inc. All Rights Reserved.
"""
This module implements a refresh queue utility to enable object consistency management.
Interfaces exposed act as utility methods to abstract processing of refresh queue.
"""
# Python Imports
import os
import time
from ITOA.setup_logging import getLogger
# pylint: disable = import-error
from ITOA.storage import statestore
from .itoa_config import get_registered_change_handlers
# pylint: enable = import-error
DEFAULT_JOB_PRIORITY = 0
logger = getLogger()
# a dictionary that stores process_id refresh_queue_job_key key value pairs
process_map = {}
def generate_refresh_queue_job(
change_type, changed_object_key, changed_object_type, change_detail={},
transaction_id=None, priority=DEFAULT_JOB_PRIORITY, number_of_failures=0,
parent_job=None, job_data=None, handler_object=None, processor=None,
):
"""
Creates a new refresh queue job in the refresh queue for the given change type, with the key and
the type of changed object as well as the other required fields.
@param change_type: a string of the change descriptor/type
@type change_type: str
@param changed_object_key: The key or list of keys for the changed objects
@type changed_object_key: str|list
@param changed_object_type: the type of ITSI object changed
@type changed_object_type: str
@param change_detail: any extra information the change handler could use
@type change_detail: dict
@param transaction_id: The transaction id used to trace a user request all the way through the system
@type transaction_id: str
@param priority: The priority of the job (0 by default)
@type priority: int
@param number_of_failures: The number of failures that have occurred (0 by default)
@type number_of_failures: int
@param parent_job: The parent job key
@type parent_job: str
@param job_data: Any additional data for the job
@type job_data: dict
@param handler_object: The key of the object that will handle the job
@type handler_object: str
@param processor: The instance handling a refresh queue job
@type processor: str
@return: dictionary containing the refresh job details
@rtype: dict
"""
current_time = time.time()
last_queued_time = None
if priority > -1:
last_queued_time = current_time
return {
'change_type': change_type,
'changed_object_key': changed_object_key,
'changed_object_type': changed_object_type,
'change_detail': change_detail,
'transaction_id': transaction_id,
'create_time': current_time,
'number_of_failures': number_of_failures,
'parent_job': parent_job,
'job_data': job_data,
'handler_object': handler_object,
'last_queued_time': last_queued_time,
'processor': processor,
'priority': priority,
}
def get_handler_for_job(job):
"""
Looks at the change type and other information from the job if necessary to determine
what handler class to delegate the execution to. Instantiates the handler and returns it.
@param job: itsi refresh job formetted dictionary
@type job: dict
@returns: the handler
@rtype: ItoaChangeHandler
"""
handler_class = get_registered_change_handlers().get(job.get("change_type"))
if handler_class is None:
message = 'No valid handler found for job: {0}.'.format(job)
raise Exception(message)
return handler_class
class RefreshQueueAdapter(object):
"""
Provides an interface to set get and delete refresh queue entries
"""
def __init__(self, session_key):
self.statestore = statestore.StateStore(collection="itsi_refresh_queue")
self.statestore.lazy_init(session_key)
self.session_key = session_key
self.owner = "nobody"
self.objecttype = "refresh_queue_job"
self.logger = logger
def create_refresh_job(
self,
change_type,
changed_object_key,
changed_object_type,
change_detail=None,
transaction_id=None,
synchronous=False,
priority=DEFAULT_JOB_PRIORITY,
):
"""
Creates a record in the refresh queue for the given change type, with the key and
the type of changed object as well as the other required fields.
Final refresh job object looks like:
{
_key: <generated by statestore>,
change_type: <identifier of the change used to pick change handler>,
changed_object_key: <Array of changed objects' keys>,
changed_object_type: <string of the type of object>,
create_time: <epoch timestamp>,
change_detail: dict of whatever user sends
object_type: "refresh_job"
object_type: "refresh_job",
parent_job: <key of parent job, if applicable, or None>,
priority: <one of (-1, 0, 1)>,
}
@param change_type: a string of the change descriptor/type/identifier
@type change_type: str
@param changed_object_key: The key or list of keys for the changed objects
@type changed_object_key: str|list
@param changed_object_type: the type of ITSI object(s) changed
@type changed_object_type: str
@param change_detail: any extra information the change handler could use
@type change_detail: dict (by convention, not enforced)
@param transaction_id: The transaction id used to trace a user request all the way through the system
@type transaction_id: String
@param synchronous: A parameter to ignore the refresh queue entirely and process synchronously
@type synchronous: Boolean
@param priority: A parameter to set the priority of the job. Defaults to 0.
@type priority: int
@return: True if successful, False otherwise. For synchronous jobs, successful jobs mean everything is done
@rtype: bool
"""
if change_detail is None:
change_detail = {}
if change_type is None or changed_object_key is None or changed_object_type is None:
raise TypeError("Problem with input to create_refresh_job")
if not isinstance(changed_object_key, list):
changed_object_key = [changed_object_key]
parent_job = process_map.get(os.getpid(), {})
logger.debug(
"Current job context key=%s priority=%d",
parent_job.get("_key"),
parent_job.get("priority", 0),
)
data = generate_refresh_queue_job(
change_type,
changed_object_key,
changed_object_type,
change_detail,
transaction_id,
parent_job=parent_job.get("_key"),
priority=parent_job.get("priority", priority),
)
if synchronous:
handler_class = get_registered_change_handlers().get(data.get("change_type"))
handler = handler_class(self.logger, self.session_key)
handler.assert_valid_change_object_synchronous(data)
return handler.deferred(data, transaction_id=transaction_id)
else:
try:
self.statestore.create(self.session_key, self.owner, self.objecttype, data)
return True
except statestore.StateStoreError:
return False
def get_all_refresh_jobs(self):
"""
Get all refresh jobs sorted by their create time and return them as a list.
Returns an empty list in certain error scenarios
@returns: list of refresh jobs
@rtype: list
"""
return self.statestore.get_all(
self.session_key,
self.owner,
self.objecttype,
sort_key="create_time",
sort_dir="asc",
limit=1000
)
def get_refresh_jobs_by_filter_condition(self, filter_data=None):
"""
Get refresh jobs based on filter condition and return them as a list.
@returns: count of refresh jobs
@rtype: list
"""
refresh_jobs = self.statestore.get_all(
self.session_key,
self.owner,
self.objecttype,
filter_data=filter_data,
fields=['_key', 'transaction_id'],
limit=1000
)
return refresh_jobs
def delete_refresh_job(self, refresh_job_key):
"""
Deletes a refresh job record from the kv store collection
@param refresh_job_key: the key of the object to be deleted
@type refresh_job_key: str
@returns: True if successful, False otherwise
@rtype: bool
"""
try:
self.statestore.delete(self.session_key, self.owner, self.objecttype, refresh_job_key)
return True
except statestore.StateStoreError:
return False
def flush_refresh_job_queue(self):
"""
Mainly for testing, this will delete everything in the collection
"""
self.statestore.delete_all(
self.session_key,
self.owner,
self.objecttype,
{"object_type": self.objecttype}
)