{"basic_builder": {"appname": "TA-socradar-incidents", "friendly_name": "SOCRadar Incidents", "version": "1.9.22", "author": "Burak G\u00f6ger", "description": "SOCRadar Incident Collector", "theme": "#a65c7d", "large_icon": "iVBORw0KGgoAAAANSUhEUgAAAEgAAABICAYAAABV7bNHAAAAAXNSR0IArs4c6QAADqlJREFUeF7tXGuMXGUZft5znZnd2dnt7lLdC9YWiuXStFCEYkvXRIyCmqAx3jAKXkkUjX+M/jH+0fiHRDRRE0VNUPGHoImAMRoXqNRioWutYBFqoduF0nZvs3OfOZ8878yh22a2c87MslWzX7LZZPfMd3m+9/q87xnBCozBwbHuRKK8FnCHjDGjxphtgLkJwMaIyz8DyAMisk9EjgKVqWLRO37ixPhCxM+3/Zi0/cnmHxRgzB4aQi8QjFpWkGk8lgCwBpB+Y8wFAC4BsB3ASMT1JwHsAXBIRF4GzCkA0wCK/HwQWHOAdXRqCrPAeA2AiThvy8eWGaAxZ+1az7ftwhbLwgeNMZdzByKwjYEHiAcYgpUG0A8g2XKH9QcKAAhKFpAiYMoiKBsDggERORgEuLdWS04cP14uAePViPO2fGxZAFq//m2Zcrn0BmNkQAQJY8wWwHwIgAK0AuMgIL8QkQljUBQxJz3Pf/7w4T/Mdbr2sgA0MrJzM4BbjDFbjRFPxFCdRgETqlin+2zxeZkDzFFjZFqE0iX7AdwzOfnogU4Xbgugiy56p5/NzmUcx+oVQR+Aa4yRWwBKDmwAVqcba/PzAUC1kwkRcw+AvcZgploNZtPpzNyzzz5UijtvWwCtXXvdBY4jlwHYYoxsE8GlAC4EaJzBOduaN+7mmzxP48yfWQAvGIOnRMw+ABPVqvnH8eOPvRx3jVgHWb/+qkyl4o8aIxuDQDaLmK2AbAMwFHfhFXp+CjD7jJH9lmUOiJhnXLd09PDhJyLbplgAjY5ed3kQWB+kSgGgux4AVMWieqMVwuXVZej9ZgCcBEDp2WtZwb1Hjz52MOpGIgG0bt1Yb6VSXmeM7BARAkRb4wNwoi50np+j26f9mTDG3Ctidruud+TIkXGq4jlHJICGh6/bAtgfA4Jdde+ktobGONLnW21iBf5Pu8SYaZbeDrAeBmo/PXbssYlWa5/zgKHNqdVkTAS3AbK11YT/G/83+43B3bZtxlvZpHMCtMjmjDXSA9qc/4dBm3QIwHgrm9QUoDDOcV3ZYYzc3sib2rY5ScvCGttGj22hy7LgSnuaWTEGuSDAXC3ATK2GQsCwp60R2qQ9IuZ7lYrZvVSc1HSnYZxjjOwSwc2Axjlt25xh18XVqSQuS/hY57notTlV/DFbq+Hf5Qr+USzhr/kCpiqV+JPUPxHapKeMwf0i5uGl4qSmAA0P79jYoCPG2olzbBH4IioxFzgONnqeArQp4eNC11UJOlGtIhsEoFRoxtlkEEY+221ZGHBsVA3wQqWCpxsA/atcxsvVKuZrAUqcx8RO4jVOoqqRTjl2bPczZ2+jKUAjIzuuCQLcIQLanthxTqpxIErMW7u7cKnvq4rx77YAR8tVPJrL41CppKpSXOJgCRH02hYu9n3s6EopuIQgH9RV7KlSCX9ayKlEnazW9O8xh8ZJxtAW4a7Jyd17zwlQmJUDZswY3NqIdyKvmbYsvN518DrHwVrXweWJBMa6U+i3bRyrVDFdq6EUGDxXLitA/yqVMVerKW9x9k0RCE8EaQLkeXhLVwobfE9B5nzDroNTtRrGF/I4WCzieKWKl6pVvFipS2bMMSGCHwMyfjYLcMa+TmfllByzoU5yRR8bfQ839aSxOZFAn22hz7FVco6UK7h/bh5PFUuqTjS0J6p1I0t1ZGYb/n7VQBgDHrNqDJKWYMB20GVbaggpmTdnetSeEfSZag2ztQAHikX8dj6LZ0rl6JuuPzkNyHOvaMz42SxACJAygcPDwfVA8BUAu+Jk5aHkbEsl8Z6eNC72PZUKAkFV2F8o4v65LI6WKyphGdvWg1JCEg2vtpgCCC0o7RNBDJkxShslZNRzcXMmja1JXoStNopzPVsq49fzWezLF+JKUoMFwMOA9c1jx6xHQmayAVCdCXTd4q4gMF8WMTviZOWUnHf1pHFNKqlqwIPRLlBi/lkqqec5VqlgreOohFEC6O55KAVGpKmKBQ0DHoLNOR+Yz+J4tQp6xjd6Lt7k+7g04ePyhA9HBIdLZfwlX4grScoCGCO7LUu+VakkHg6ZSQVoaGhsAKhdIkLJic4Eht5qWzKB29b0YXPS19smILtzeRwoFFW9KjDqzbYkErippxvrfU9tUcEEKmWVJZyPK1Agk2LBt+qHf2B+ARPFonovF6JqtjmZUCNOwAj6gUIJd0/PYF+hGNe7KTNpDCXJPjQ1NX6yAdD1Wy3L3GaMoVuPzASG3oqS85HeDF7nOjhYLKlKPZ4v4KVKVQ9GlaM3uyKRwOsdBwtBgCcLBTxdKimAtB/NBj0YAdjk+7gymVRVerFaxd+LRfVeNPIEmuu+OZVUlaMkcd2fzc5hb74Q07vVmUkRGQ8CuXtq6pH9CtDIyA56ra+9Eg/QrUce3Bg3dW0qpd6KqvXg/IIGcTw4YxiK/7ZUAtd3dan34QGpdnyGKlMHqHkkxICSAFElGUdRlSmJp6o1/DmXx5OFoqox1+VzfObGnm5dl97tL/m8XhYBiznGX9H6r09O7h7vCCCK9i19GbU99FY8+I+nZ/H3Qkklh/bhHeluBYm3TzBoQ/5WLKr3YYBXV7HmOsaDUsUYcHJ+zkNJXOe66uEoQb/LLui6lKQrkj5uXdOr63J+StA9M3Oq6jHHaYBY1PP9Ko3yV43BzigThbnV9lQSH1/Tq7dHu7Mnn1dvRc9FySJwlBwekB6GGyZAbbhh3RZtDAGiOhEEBpiP5fL4a6GgkkKPRu+2PVW3R7yQn0zPYk++oIBFzd1E8OgrTvYbpZKzW0ZHr9tgjHWNMbijwRS2xCjMrQjQzq6UBma/nJ3TjdBbrfM8laxtDbvx73IZv2nP/Z6xF0rTBY6ttuyGdLd6TMZQjH8oKUfKZfVu3NcHejNg+MGAlPuiSnNvEcdeEdwlEuyV0dFdO40JttQjZ3LMrQdzqvdmelRCGP7T2H77xCk10IxzeIMf6stgyHFUWihZbQZwTTfzBs9VydzeldRLoHT8YoYXlNf4h6p4x0A/NiU8TFaqKrn3zc1rDhdtyH5G1iLWhIyMXP9hY4JNAN4HgL9bDgaEn+nvw9XJJCyBBmbfOzWjxpZxDm/wEt/X0H/xxttIAZruhd5z0LH1Ij7al8GI655xEWnLxqf7+9Q50Mg+USjiB6dmdJ8Rx9MAfiViPS3DwzvvBAxr5JFr5du7UvjSYD82+Z7qOW/uvrmsGlQCtyWZUKO5r1BQGzAR30hGOgfXoQ3kRSUs0XV4UdlagPeoLUriIt/DoVIZd544hT25fKR5ATR6AWRShod3kFlLxamVhwAN2jYeyi4oQBTf9Z6HLwz2q9oxzqFo0wa8UI6s+1EPoM9d6LlqA69NJTVOer5SUVV/tlxWBoAAvbsnrUltTIDCXoA8AYpNooQAkY5gxMrQnrd2ZTKBOwb7kbEt1fk9uXo8tFScEwuNJg+HcRJtEW0iwwYCxPiInpbAfaq/T6PpmAC9ulpHADGP+s7JafUQzINotD/b38duDtV5AnSuOKdTgMI4iQBRtUkWcV1KLlMe2srPD6zRmOm8AMQDcuEn8gV1qZQs3hg31+6G2gEtlGiymD/kxeQLmK/VcGUqqbYy3GcMG7Q8EhQu/GS+gB6bXiWJT3Yo0p0C9KMGQKRG/qsAIsdDgD6xChBUlRaLLiVoFaBFst8MoFUVOwdANNLM1gkcI9hVI91QMSaK3224ebKLdPO3/5e4eVIooZsn29SuV+0oDqJbDQPFhVqgFAcj6fMdKJL66LbrgSKpYNIinQDUdqoxYNt4kKlGLq+k1YbzlGpQcplqsOrKSPpwuaIZPb0qCbsTnaUa7Serb/K9OlGWqxNlvLXzkayS8giT1e+fmtEK6/szPWoTR11XK7AxJeh0stoJ3cGNkU5g1s6NMThbTHdMVSv4GXmaXEFLNUw7lmPQIbCExBTjI8o7nUl3MEf73MAaXJVMaMXk8UJBUxA6k4jjNN3RKWHGGwoJM5Lwiwkzsn9k+wjQH7I5HC7Hrng2PQ9V+YY0CbMUrkj4eLl6JmFGkv+Lg/1Kyz5fqdfJKOH/bIcwey0pV/I1TCifC+tZhXo9q11JCiWHzuDGnjQ2+K6GFDTKzShXPv9ILqe5GcmyqejVjdOU62tJ2pNcZ9MBC3+UMtbK/pjNgRx1O4OS8/Y0SfsULvE95EygBUrOuxRpzyoL+ap6w1U0ZucM0p4bbbcuFrXsw3oWy8vPl8v440JOa1ntlH1YFHx7ulurKOR4DhVXoOzTCUBRC4ck0lgaXuPYqmIk8tspHFJqaJzJEFJynsivUOFwaGh5Ss/sCaKhPp+lZ8Y/7BX6+XKWnpejeeHWRc0LpFmbNS9sTdC4dta8wNL2/iWaF1gOYmf7gWIJP52e1WpGzNa85s0LQOftL4x/GNGyirBU+ws7z+h9ora/mEb7Cw9Jz0fpfHA+q+Wkxe0vrNOxzY/tfazg0mizmMDSdMRx7vaXei/Qa99ANdlooCI1Qj6buRzJdYYCTHzDbq7wlR26cJaLVQoApVFZGBw5q4GKXpKDgDyYzaqK8zl2kUQcrRqo6tOsdAtesdGCx5u3cLqJqg4Qu1aBAEbDhEHHQZdVf4YSs7gFjw2cDBbZpPD77IL2QMYcLVvwdL7z0cRJqaCENGu3ZX7FtIER8S52dXiuSh7/triJk/1ClBi2uTCliSE5IY7RmjjDp5e7DZgNUOy8YOXVidEGnBLBgOOoxOzqSmHIdZAPDLJBTbvtGXyuaBtwCNByN5Jf5Hm4KpXQTrE3ep62yUVtJK83pNtai2ckzN5qBprM8UhrnJdG8uV+FYEJ7FXJ+qsITBfYHtzOOFmtap2d3oz2hurU5ujsVYTlfpmFtoQqlrHqauaxJaSNUQoC7UViiZnUylId+hGm7uxllnCB1dehWrwxuPpCXcRXKldfyWyhtKsv9Uawanxk9bXwFkCtfrFAREla/WqKFkCtfrlJREk6nbutfj3OOSFb/YKlyBK1+hVdraBa/ZK3Vgg1+///8tcE/gdYQxUWQAlFUgAAAABJRU5ErkJggg==", "small_icon": "iVBORw0KGgoAAAANSUhEUgAAACQAAAAkCAYAAADhAJiYAAAAAXNSR0IArs4c6QAAA25JREFUWEfdmEtME0EYx/8Lu7RAohAgQLekUCgoJnow+AQvPi4ejBdvnJWrytGTMR40Xok3TxK9eFbjwVhfQQ6oQQTS8uhWGiAgiVDayprZMpvddndmFmkk7qnpfPvNb773rIQdPMFg32FdxylAH3J+XRqQJLxLJN589qpeEn1BVc/WAZtLovJ2OV+9pr1aFnlXCEhVe9cBVIooZMhsaFq0iqeDCRQInL4pSdI9nhIv67quDyaTb++7veMKpKq9iwDqvWzmQXZJ06INTvKOQKraq3tQvmNRTYsW7V/0R4ktUwhfZCkbkGjMPAg04UrNPqZlnqyu4UZygWu9wpiyAfFc1VNViWetLcYmS7nf6J/T8CWdtm3a7ffhUYuKgCIb/1+emcfI+gYTzOo6E4iX2h8jYaiKjF9bW+iamDY3aJBl+KW8moyuI5XLmWvfD3SguqwMP7I59EzFWFBmSTA08YreSCSMZkXG7dQiHi6vGIrfR9rQoiiOm8xnszg5FTfWrtbV4lZjgwBUvnhuA7lnFXUThbG67XpyAU9X12xQJLZIjFndRaF47iOu4wIlujtNN1EYLZvDcbYLjFgj8hSCui84PunqOgMo3yj1MScpmk1UCYFLZnM4xoGhuh6HgjhTXQXr+8SixLKORVGSjkiq2nfNrWsTABKkRydjZsywTui0CdFBgzra0YbWCsUELJaXBiRWqhNlF2KzGE9vgvx2ihleoaExRQ7S7qvA6/ZWBhDABSKKmmQZnzrDTEUsMHKYE1NxJLJZ42AsKwsBkfQmae7VXRSSQJAyQMrB/wnUKMsY3SsuOxebxcQuBTXpcy/CIV4MsdOe1h3aKrzGkbV2fe1qR015OTvtvRbGsXQaF2NzvGw31p+HQzjk99kK4/DqTwwmU+6FMd9c3XuZU+sY3UjjUpwN9TIcwkG/z3vr4AH9o+bKvnPRWcg6fnwgdcll/CAFkBRC6/gRz2TQNz3DcLVl/Ni2EvPu5TagNSkyFOQHtBx0o2/Rh3b4b+kMzsdYMLAPaFTBboywxMXDoaA5RfJmILK34whLFkSH/LvNjeiv3c/MtKHlFdxJkasd+2EO+duuK+UFsZCOfQ0SdR3v1KLrQhdFC1QpLeXtKk2hRGNK1CJEbscfG6yb8O5sgkB//znGDrWHPlgVWqCUn/T+AO1PHOWrYatkAAAAAElFTkSuQmCC", "visible": true, "tab_version": "4.5.0", "tab_build_no": "0", "build_no": 31}, "data_input_builder": {"datainputs": [{"index": "default", "sourcetype": "socradar:incidents", "interval": "30", "use_external_validation": true, "streaming_mode_xml": true, "name": "socradar_incidents_collector", "title": "SOCRadar Incidents API Collector", "description": "Collects security incidents from the SOCRadar v4 API. Configure global API credentials and other settings on the Add-on's setup page", "type": "customized", "parameters": [{"name": "socradar_api_key", "label": "SOCRadar API Key", "help_string": "", "required": false, "format_type": "text", "default_value": "SOCRadar", "placeholder": "", "type": "text", "value": "bfe8b319dca643589c10d740f0e02281d59c24fec0b3410ab68305117e57d850"}, {"name": "socradar_company_id", "label": "SOCRadar Company ID", "help_string": "", "required": false, "format_type": "text", "default_value": "", "placeholder": "", "type": "text", "value": "26808"}], "data_inputs_options": [{"type": "customized_var", "name": "socradar_api_key", "title": "SOCRadar API Key", "description": "", "required_on_edit": false, "required_on_create": false, "format_type": "text", "default_value": "SOCRadar", "placeholder": ""}, {"type": "customized_var", "name": "socradar_company_id", "title": "SOCRadar Company ID", "description": "", "required_on_edit": false, "required_on_create": false, "format_type": "text", "default_value": "", "placeholder": ""}], "code": "\"\"\"\nSplunk Add-on - SOCRadar Incidents v4 collector\nProduction-ready script for Splunk Modular Input with API pagination,\nrate limit handling, and a per-run total new incident cap.\n\"\"\"\n\nimport json\nimport time\nimport requests\nfrom datetime import datetime, timedelta, timezone\n\n# Splunk Add-on SDK objects (helper, ew) are injected by Splunk when the script runs.\n\nSOCRADAR_API_BASE_URL = \"https://platform.socradar.com/api\"\nAPI_TIMEOUT_SECONDS = 30\nDEFAULT_MAX_PAGES_TO_FETCH = 50\nDEFAULT_MAX_NEW_INCIDENTS_PER_RUN = 500\n\ndef validate_input(helper, definition):\n \"\"\"\n Validation for modular input configurations\n \"\"\"\n pass\n\ndef collect_events(helper, ew):\n helper.log_info(\"SCRIPT_START: SOCRadar Incidents v4 collection run.\")\n\n # --- Stage 1: Get Configuration ---\n # Get credentials from INPUT parameters (not global settings)\n company_id = helper.get_arg(\"socradar_company_id\")\n api_key = helper.get_arg(\"socradar_api_key\")\n \n # Log what we retrieved (be careful with full API key)\n helper.log_info(f\"Retrieved company_id: {company_id}\")\n \n # Check if they might be swapped (company ID is usually shorter)\n if company_id and api_key:\n if len(company_id) > 20 and len(api_key) < 20:\n helper.log_warning(\"WARNING: API key and company ID might be swapped! Company ID is usually shorter.\")\n # Swap them\n helper.log_info(\"Swapping credentials...\")\n company_id, api_key = api_key, company_id\n helper.log_info(f\"After swap - company_id: {company_id}, api_key: {api_key[:10]}...\")\n \n if not company_id or not api_key:\n helper.log_error(\"Missing credentials. Please configure socradar_company_id and socradar_api_key in the input configuration.\")\n return\n \n # Get how many days from GLOBAL settings (add-on level)\n how_many_days_str = helper.get_global_setting(\"how_many_days\")\n if how_many_days_str:\n try:\n how_many_days = max(1, int(how_many_days_str))\n except:\n how_many_days = 1\n else:\n how_many_days = 1\n \n helper.log_info(f\"Configuration: Looking back {how_many_days} days\")\n \n # Get other settings from global\n try:\n max_pages_to_fetch = int(helper.get_global_setting(\"max_page\") or DEFAULT_MAX_PAGES_TO_FETCH)\n except:\n max_pages_to_fetch = DEFAULT_MAX_PAGES_TO_FETCH\n \n try:\n max_new_incidents_per_run = int(helper.get_global_setting(\"total_limit\") or DEFAULT_MAX_NEW_INCIDENTS_PER_RUN)\n except:\n max_new_incidents_per_run = DEFAULT_MAX_NEW_INCIDENTS_PER_RUN\n\n # Get Splunk settings\n input_stanza_name = helper.get_input_stanza_names()\n output_index = helper.get_output_index()\n sourcetype = helper.get_sourcetype()\n input_type = helper.get_input_type()\n \n # Log the actual input stanza name for debugging\n helper.log_info(f\"Input stanza name from helper: {input_stanza_name}\")\n\n # --- Stage 2: Load Checkpoint ---\n checkpoint_key = f\"{input_stanza_name}_socradar_v4_processed_alarms\"\n processed_alarms = {} # Changed to dict to store alarm_id -> status\n \n try:\n checkpoint_data_raw = helper.get_check_point(checkpoint_key)\n if checkpoint_data_raw:\n checkpoint_data = json.loads(checkpoint_data_raw)\n # Support both old format (list) and new format (dict)\n if isinstance(checkpoint_data.get(\"alarm_ids\"), list):\n # Old format - convert to dict with string keys\n for alarm_id in checkpoint_data.get(\"alarm_ids\", []):\n processed_alarms[str(alarm_id)] = None\n else:\n # New format - dict with status, ensure all keys are strings\n temp_alarms = checkpoint_data.get(\"alarm_status\", {})\n processed_alarms = {str(k): v for k, v in temp_alarms.items()}\n helper.log_info(f\"Loaded checkpoint with {len(processed_alarms)} already processed alarm IDs\")\n except:\n helper.log_info(\"No checkpoint found, starting fresh\")\n\n # --- Stage 3: Calculate Time Window ---\n current_time = datetime.now(timezone.utc)\n start_time = current_time - timedelta(days=how_many_days)\n \n # Convert to Unix timestamps (SOCRadar format)\n end_timestamp = int(current_time.timestamp())\n start_timestamp = int(start_time.timestamp())\n \n helper.log_info(f\"Time window: {start_time.strftime('%Y-%m-%d %H:%M')} to {current_time.strftime('%Y-%m-%d %H:%M')}\")\n \n # --- Stage 4: Build API URL ---\n api_url = f\"{SOCRADAR_API_BASE_URL}/company/{company_id}/incidents/v4\"\n helper.log_info(f\"API URL: {api_url}\")\n \n # --- Stage 5: Fetch Incidents with Pagination ---\n all_incidents = []\n page = 1\n consecutive_rate_limits = 0\n new_incidents_count = 0\n duplicate_count = 0\n \n while page <= max_pages_to_fetch:\n # API parameters - SOCRadar expects key as URL parameter for incidents endpoint\n params = {\n \"key\": api_key,\n \"limit\": 100, # Get 100 per page from SOCRadar\n \"page\": page,\n \"start_date\": start_timestamp,\n \"end_date\": end_timestamp\n }\n \n # Log the request details\n helper.log_info(f\"Making request to page {page}\")\n helper.log_debug(f\"Request params: {params}\")\n \n # Make request\n try:\n response = requests.get(api_url, params=params, timeout=API_TIMEOUT_SECONDS)\n \n # Log response status\n helper.log_info(f\"Response status code: {response.status_code}\")\n \n # Check for rate limit\n if response.status_code == 429 or \"rate limit exceeded\" in response.text.lower():\n consecutive_rate_limits += 1\n \n if consecutive_rate_limits == 1:\n wait_time = 30\n else:\n wait_time = 60\n \n helper.log_warning(f\"Rate limit hit! Waiting {wait_time} seconds... (attempt {consecutive_rate_limits})\")\n time.sleep(wait_time)\n continue # Retry same page\n \n # Reset rate limit counter on success\n consecutive_rate_limits = 0\n \n # Check for authentication error\n if response.status_code == 401:\n helper.log_error(f\"API error: HTTP 401 Unauthorized. Please check your API credentials.\")\n helper.log_error(f\"Response: {response.text}\")\n break\n \n # Check for other errors\n if response.status_code != 200:\n helper.log_error(f\"API error: HTTP {response.status_code}\")\n helper.log_error(f\"Response: {response.text}\")\n break\n \n # Parse response\n data = response.json()\n incidents = data.get(\"data\", [])\n \n if not incidents:\n helper.log_info(f\"No more incidents at page {page}\")\n break\n \n # Process incidents on this page\n for incident in incidents:\n # Truncate long text fields to 5000 characters\n if 'alarm_text' in incident and incident['alarm_text'] and len(str(incident['alarm_text'])) > 5000:\n incident['alarm_text'] = str(incident['alarm_text'])[:5000] + '...'\n if 'alarm_response' in incident and incident['alarm_response'] and len(str(incident['alarm_response'])) > 5000:\n incident['alarm_response'] = str(incident['alarm_response'])[:5000] + '...'\n if 'alarm_type_details' in incident and isinstance(incident['alarm_type_details'], dict):\n if 'alarm_default_mitigation_plan' in incident['alarm_type_details'] and incident['alarm_type_details']['alarm_default_mitigation_plan']:\n if len(str(incident['alarm_type_details']['alarm_default_mitigation_plan'])) > 5000:\n incident['alarm_type_details']['alarm_default_mitigation_plan'] = str(incident['alarm_type_details']['alarm_default_mitigation_plan'])[:5000] + '...'\n # Extract alarm main type and sub type\n incident['alarm_main_type'] = incident['alarm_type_details'].get('alarm_main_type', 'N/A')\n incident['alarm_sub_type'] = incident['alarm_type_details'].get('alarm_sub_type', 'N/A')\n else:\n # Set defaults if alarm_type_details is not present\n incident['alarm_main_type'] = 'N/A'\n incident['alarm_sub_type'] = 'N/A'\n \n alarm_id = incident.get(\"alarm_id\")\n \n # Generate alarm link\n if alarm_id and company_id:\n incident['alarm_link'] = f\"https://platform.socradar.com/app/company/{company_id}/alarm-management?tab=approved&alarmId={alarm_id}\"\n else:\n incident['alarm_link'] = 'N/A'\n current_status = incident.get(\"status\", \"N/A\")\n helper.log_info(f\"Processing Alarm ID: {alarm_id} - Status: {current_status}\")\n \n # Check if already processed and if status changed\n if alarm_id:\n # Convert alarm_id to string for consistent comparison\n alarm_id_str = str(alarm_id)\n if alarm_id_str in processed_alarms:\n old_status = processed_alarms.get(alarm_id_str)\n helper.log_debug(f\"Checkpoint check - alarm: {alarm_id_str}, old_status: {old_status}, current_status: {current_status}\")\n if old_status == current_status:\n duplicate_count += 1\n continue\n else:\n # Status changed - index the update\n helper.log_info(f\"Status changed for {alarm_id}: {old_status} -> {current_status}\")\n incident['status_changed'] = True\n incident['previous_status'] = old_status\n \n # New incident or status update\n all_incidents.append(incident)\n new_incidents_count += 1\n \n # Stop if we hit the per-run limit\n if new_incidents_count >= max_new_incidents_per_run:\n helper.log_info(f\"Reached max incidents per run ({max_new_incidents_per_run})\")\n break\n \n helper.log_info(f\"Page {page}: Got {len(incidents)} incidents ({new_incidents_count} new/updated so far, {duplicate_count} unchanged)\")\n \n # Stop if we hit the limit\n if new_incidents_count >= max_new_incidents_per_run:\n break\n \n # Check if this was the last page\n if len(incidents) < 100:\n helper.log_info(\"Reached last page\")\n break\n \n page += 1\n time.sleep(2) # Wait 2 seconds between requests\n \n except Exception as e:\n helper.log_error(f\"Error on page {page}: {str(e)}\")\n break\n \n helper.log_info(f\"Fetching complete. Found {new_incidents_count} new/updated incidents, skipped {duplicate_count} unchanged\")\n \n # --- Stage 6: Index New Incidents to Splunk in Batches of 30 ---\n indexed_count = 0\n indexed_alarms = {} # Changed to dict to store alarm_id -> status\n batch_size = 30\n \n # Process in batches of 30\n for batch_start in range(0, len(all_incidents), batch_size):\n batch_end = min(batch_start + batch_size, len(all_incidents))\n batch = all_incidents[batch_start:batch_end]\n \n helper.log_info(f\"Indexing batch {batch_start//batch_size + 1}: incidents {batch_start+1} to {batch_end}\")\n \n for incident in batch:\n alarm_id = incident.get(\"alarm_id\")\n \n try:\n # Get event time from incident\n event_time = None\n date_str = incident.get(\"date\") # Using 'date' field based on your sample\n if date_str:\n try:\n # Parse date like \"2025-03-18 10:40:10\"\n event_datetime = datetime.strptime(date_str, \"%Y-%m-%d %H:%M:%S\")\n event_time = event_datetime.timestamp()\n except:\n pass\n \n # Log what we're indexing for debugging\n if incident.get('status_changed'):\n helper.log_info(f\"Indexing status change for alarm {alarm_id}: {incident.get('previous_status')} -> {incident.get('status')}\")\n helper.log_debug(f\"Full incident data: {json.dumps(incident)[:500]}...\")\n \n # Create Splunk event\n event = helper.new_event(\n data=json.dumps(incident),\n index=output_index,\n source=input_type,\n sourcetype=sourcetype,\n time=event_time\n )\n ew.write_event(event)\n \n indexed_count += 1\n if alarm_id:\n # Store alarm_id as string for consistent checkpoint handling\n indexed_alarms[str(alarm_id)] = incident.get('status')\n \n except Exception as e:\n helper.log_error(f\"Failed to index incident {alarm_id}: {str(e)}\")\n \n # Log progress after each batch\n helper.log_info(f\"Batch complete. Total indexed: {indexed_count}\")\n \n helper.log_info(f\"Indexing complete. Indexed {indexed_count} new incidents to Splunk\")\n \n # --- Stage 7: Update Checkpoint ---\n if indexed_alarms:\n # Update checkpoint with new alarms and their statuses\n processed_alarms.update(indexed_alarms)\n \n # Keep only last 10000 alarms to prevent checkpoint from growing too large\n if len(processed_alarms) > 10000:\n # Sort by alarm_id and keep the most recent ones\n sorted_alarms = sorted(processed_alarms.items(), key=lambda x: x[0])\n processed_alarms = dict(sorted_alarms[-10000:])\n \n # Save checkpoint with new format\n checkpoint_data = {\n \"alarm_status\": processed_alarms,\n \"last_updated\": datetime.now(timezone.utc).isoformat()\n }\n \n try:\n helper.save_check_point(checkpoint_key, json.dumps(checkpoint_data))\n helper.log_info(f\"Checkpoint updated. Total tracked alarms: {len(processed_alarms)}\")\n except Exception as e:\n helper.log_error(f\"Failed to save checkpoint: {str(e)}\")\n \n helper.log_info(\"SCRIPT_END: SOCRadar Incidents v4 collection complete.\")\n\n\nfrom splunklib.modularinput import Scheme\n\ndef get_scheme():\n \"\"\"Returns scheme parameters for this modular input.\"\"\"\n scheme = Scheme(\"SOCRadar Incidents Collector v4\")\n scheme.description = \"Collects incidents from the SOCRadar v4 API.\"\n scheme.use_external_validation = True\n scheme.use_single_instance = False\n return scheme", "customized_options": [{"name": "socradar_api_key", "value": "bfe8b319dca643589c10d740f0e02281d59c24fec0b3410ab68305117e57d850"}, {"name": "socradar_company_id", "value": "26808"}], "uuid": "7ea794adf02c46a2920a50af3aa4af97", "sample_count": "76"}]}, "field_extraction_builder": {"socradar:incidents": {"data_format": "json"}, "socradar:status_updates": {"data_format": "json"}}, "global_settings_builder": {"global_settings": {"proxy_settings": {"proxy_type": "http"}, "log_settings": {"log_level": "DEBUG"}, "customized_settings": [{"required": false, "name": "how_many_days", "label": "How Many Days", "default_value": "", "placeholder": "", "help_string": "", "type": "text", "format_type": "text", "value": "1"}]}}, "sourcetype_builder": {"socradar:incidents": {"metadata": {"data_input_name": "socradar_incidents_collector"}}}, "validation": {"validators": ["best_practice_validation", "data_model_mapping_validation", "field_extract_validation", "app_cert_validation"], "status": "job_started", "validation_id": "v_1762123959_26", "progress": 0.7142857142857143}}