add_metricator_hec

master
JocelynPa 3 years ago
parent 2c7a08aa8a
commit 0ebc7a7180

@ -0,0 +1,5 @@
# TA-metricator-hec-for-nmon
Copyright 2017 Octamis - Copyright 2017 Guilhem Marchand
All rights reserved.

@ -0,0 +1,258 @@
# nmon.conf.spec
# This file contains possibles attributes and values you can use to configure nmon processes generation.
# There is an nmon.conf in $SPLUNK_HOME/etc/[nmon|TA-nmon|PA-nmon]/default/. To set custom configurations,
# place an nmon.conf in $SPLUNK_HOME/etc/[nmon|TA-nmon|PA-nmon]/default/.
# *** FILE ENCODING: UTF-8 ! ***
# When creating a local/nmon.conf, pay attention to file encoding specially when working under Windows.
# The file must be UTF-8 encoded or you may run in trouble.
# *** DON'T MODIFY THIS FILE ***
########################################################################################################################
### NMON COLLECT OPTIONS ###
########################################################################################################################
# The metricator_helper.sh input script is set by default to run every 60 seconds
# If Nmon is not running, the script will start Nmon using the configuration above
###
### FIFO options:
###
# Using FIFO files (named pipe) are now used to minimize the CPU footprint of the technical addons
# As such, it is not required anymore to use short cycle of Nmon run to reduce the CPU usage
# You can still want to manage the volume of data to be generated by managing the interval and snapshot values
# as a best practice recommendation, the time to live of nmon processes writing to FIFO should be 24 hours
# value for interval: time in seconds between 2 performance measures
fifo_interval=<value>
# value for snapshot: number of measure to perform
fifo_snapshot=<value>
########################################################################################################################
### VARIOUS COMMON OPTIONS ###
########################################################################################################################
# Time in seconds of margin before running a new iteration of Nmon process to prevent data gaps between 2 iterations of Nmon
# the metricator_helper.sh script will spawn a new Nmon process when the age in seconds of the current process gets higher than this value
# The endtime is evaluated the following way:
# endtime=$(( ${interval} * ${snapshot} - ${endtime_margin} ))
# When the endtime gets higher than the endtime_margin, a new Nmon process will be spawned
# default value to 240 seconds which will start a new process 4 minutes before the current process ends
# Setting this value to "0" will totally disable this feature
# Default value:
# endtime_margin="240"
endtime_margin=<value>
### NFS OPTIONS ###
# Change to "1" to activate NFS V2 / V3 (option -N) for AIX hosts
# Default value:
# AIX_NFS23="0"
AIX_NFS23=<string>
# Change to "1" to activate NFS V4 (option -NN) for AIX hosts
# Default value:
# AIX_NFS4="0"
AIX_NFS4=<string>
# Change to "1" to activate NFS V2 / V3 / V4 (option -N) for Linux hosts
# Note: Some versions of Nmon introduced a bug that makes Nmon to core when activating NFS, ensure your version is not outdated
# Default value:
# Linux_NFS="0"
Linux_NFS=<string>
########################################################################################################################
### LINUX OPTIONS ###
########################################################################################################################
# Change the priority applied while looking at nmon binary
# by default, the metricator_helper.sh script will use any nmon binary found in PATH
# Set to "1" to give the priority to embedded nmon binaries
# Note: Since release 1.6.07, priority is given by default to embedded binaries
# Default value:
# Linux_embedded_nmon_priority="1"
Linux_embedded_nmon_priority=<string>
# Change the limit for processes and disks capture of nmon for Linux
# In default configuration, nmon will capture most of the process table by capturing main consuming processes
# This function is percentage limit of CPU time, with a default limit of 0.01
# Changing this value can influence the volume of data to be generated, and the associated CPU overhead for that data to be parsed
# Possible values are:
# Linux_unlimited_capture="0" --> Default nmon behavior, capture main processes (no -I option)
# Linux_unlimited_capture="-1" --> Set the capture mode to unlimited (-I -1)
# Linux_unlimited_capture="x.xx" --> Set the percentage limit to a custom value, ex: "0.01" will set "-I 0.01"
Linux_unlimited_capture=<value>
# Set the maximum number of devices collected by Nmon, default is set to 1500 devices
# Increase this value if you have systems with more devices
# Up to 3000 devices will be taken in charge by the Application (hard limit in nmonparser.py / nmonparser.pl)
# Default value:
# Linux_devices="1500"
Linux_devices=<value>
# Enable disks extended statistics (DG*)
# Default is true, which activates and generates DG statistics
Linux_disk_dg_enable=<string>
# Name of the User Defined Disk Groups file, "auto" generates this for you
Linux_disk_dg_group=<value>
########################################################################################################################
### SOLARIS OPTIONS ###
########################################################################################################################
# Change to "1" to activate VxVM volumes IO statistics
# Default value:
# Solaris_VxVM="0"
Solaris_VxVM=<string>
# UARG collection (new in Version 1.11), Change to "0" to deactivate, "1" to activate (default is activate)
# Default value:
# Solaris_UARG="1"
Solaris_UARG=<string>
########################################################################################################################
### AIX OPTIONS ###
########################################################################################################################
# CAUTION: Since release 1.3.0, we use fifo files, which requires the option "-yoverwrite=1"
# Change this line if you add or remove common options for AIX, do not change NFS options here (see NFS options)
# the -p option is mandatory as it is used at launch time to save instance pid
# Default value:
# AIX_options="-f -T -A -d -K -L -M -P -^ -p -yoverwrite=1"
AIX_options=<string>
#############################
# Application related options
#############################
######################
# hostname definition:
######################
# This option can be used to force the technical add-on to use the Splunk configured value of the server hostname
# If for some reason, you need to use the Splunk host value instead of the system real hostname value, set this value to "1"
# We will search for the value of host=<value> in $SPLUNK_HOME/etc/system/local/inputs.conf
# If no value can be found, or if the file does not exist, we will fallback to the normal behavior
# Default is use system hostname
# FQDN management in nmonparser: The --fqdn option is not compatible with the host name override, if the override_sys_hostname
# is activated, the --fqdn argument will have no effect
override_sys_hostname=<string>
#####################
# frameID definition:
#####################
# The frameID definition is an enrichment mechanism used within the application to associate a given host with a given frame identifier
# By default, the mapping is operated against the value of "serialnum" which is defined at the raw level by nmon binaries
# On AIX systems, the serialnum value is equal to the serial number of the frame hosting the partition
# On Linux and Solaris systems, the serialnum is equal to the value of the hostname
# Using this option allows you to override the serialnum value by a static value defined in the nmon.conf configuration file
# nmon.conf precedence allows defining the serialnum value on per deployment basis (local/nmon.conf) or on a per server basis (/etc/nmon.conf)
# default is:
# override_sys_serialnum="0"
# which lets nmon set the serialnum value
# Set this value to:
# override_sys_serialnum="1"
# to activate the serialnum override based on the value defined in:
# override_sys_serialnum_value="<sting>"
# Acceptable values for <string> are letters (lower and upper case), numbers and "-" / "_"
override_sys_serialnum=<string>
override_sys_serialnum_value=<string>
########################
# nmon external metrics:
########################
# nmon external generation management
# This option will manage the activation or deactivation of the nmon external data generation at the lower level, before it comes to parsers
# default is activated (value=1), set to "0" to deactivate
nmon_external_generation=<string>
###############
# fifo options:
###############
# Fifo options
# The realtime mode which corresponds to the old mechanism is now deprecated
# fifo mode is mandatory
# Default is "1" which means write to fifo
mode_fifo=<string>
#######################
# nmon parsers options:
#######################
# consult the documentation to get the full list of available options
# --mode fifo|colddata --> explicitly manage data in fifo/colddata
# --use_fqdn --> use the host fully qualified domain name (default)
# --silent --> minimize the processing output to save data volume (deactivated by default)
# --show_zero_values --> allows generating metrics with 0 values (default removes any metric with a zero value before it reaches the ingestion)
# --no_local_log --> do no write metrics, events and config locally on file-system (activated by default)
# --splunk_http_url --> Splunk HEC endpoint URL (must contain the protocol, IP or FQDN and endpoint path)
# --splunk_http_token --> Splunk HEC token value
# --splunk_metrics_index --> Name of the metrics index (default: os-unix-nmon-metrics)
# --splunk_events_index --> Name of the events index (default: os-unix-nmon-events)
# --splunk_config_index --> Name of the config index (default: os-unix-nmon-config)
# In fifo mode, options are sent by the metricator_consumer.sh
# In file mode, options are sent by Splunk via the nmon_processing stanza in props.conf
#
# Splunk HEC configuration (http input)
#
# Change the Splunk URL to match your protocol (http vs https) and your access URL
# By default, as long the token value is not changed from the demonstration value above, the parser will just do nothing else than writing to local logs
# For more information, see: http://dev.splunk.com/view/event-collector/SP-CAAAE6M
# TO CONFIGURE:
# - copy the default/nmon.conf to local/
# - manage your settings in your local nmon.conf
nmonparser_options=<string>

@ -0,0 +1 @@
This is where you put any scripts you want to add to this app.

@ -0,0 +1,199 @@
#!/usr/bin/env python
# Program name: create_agent.py
# Compatibility: Python 2x
# Purpose - Create a customized version of the TA-metricator-hec-for-nmon
# Licence:
# Copyright 2018 Guilhem Marchand
import sys
import os
import tarfile
import glob
import fnmatch
import argparse
import shutil
version = '2.0.0'
####################################################################
############# Arguments Parser
####################################################################
# Define Arguments
parser = argparse.ArgumentParser()
parser.add_argument('-f', action='store', dest='INFILE',
help='Name of the tgz archive file')
parser.add_argument('--agentname', action='store', dest='TARGET',
help='Define the TA Agent name and root directory')
parser.add_argument('--version', action='version', version='%(prog)s ' + version)
parser.add_argument('--debug', dest='debug', action='store_true')
parser.set_defaults(debug=False)
args = parser.parse_args()
# Set debug
if args.debug:
debug = True
####################################################################
############# Functions
####################################################################
# String replacement function
# Can be called by:
# findreplace(path, string_to_search, replace_by, file_extension)
def findreplace(directory, find, replace, filepattern):
for path, dirs, files in os.walk(os.path.abspath(directory)):
for filename in fnmatch.filter(files, filepattern):
filepath = os.path.join(path, filename)
# Prevents binaries modification
if "bin/linux" in filepath:
if debug:
print("file " + str(filename) + " is binary or binary related")
elif "bin/sarmon" in filepath:
if debug:
print("file " + str(filename) + " is binary or binary related")
else:
with open(filepath) as f:
s = f.read()
s = s.replace(find, replace)
with open(filepath, "w") as f:
f.write(s)
####################################################################
############# Main Program
####################################################################
# Check Arguments
if len(sys.argv) < 2:
print "\n%s" % os.path.basename(sys.argv[0])
print "\nThis utility had been designed to allow creating customized agents for the TA-metricator-hec-for-nmon" \
" please follow these instructions:\n"
print "- Download the current release of the technical add-on"
print "- Ensure to have this Python script and the TGZ archive in the same directory"
print "- Run the tool: ./create_agent.py and check for available options"
print "- After the execution, a new agent package will have been created in the resources directory"
print "- Extract its content to your Splunk deployment server, configure the server class, associated clients and" \
" deploy the agent"
print "- Don't forget to set the application to restart splunkd after deployment\n"
print "\nRun this tool such as:\n"
print "./create_agent.py -f TA-metricator-hec-for-nmon_xxx.tgz --agentname TA-metricator-hec-for-nmon-custom \n"
sys.exit(0)
# Will expect in first Argument the name of the tgz Archive of the Application to be downloaded in Splunk Base
if not args.INFILE:
print "\nERROR: Please provide the tgz Archive file with -f statement\n"
sys.exit(1)
else:
infile = args.INFILE
# If the root directory of the TA-nmon is not defined, exit and show message
if not args.TARGET:
print "ERROR: You must specify the name of the agent package you want to create, and it must be different from" \
" the default package: TA-metricator-hec-for-nmon"
sys.exit(0)
else:
ta_root_dir = args.TARGET
# Avoid naming the TA ascore application
if not "TA-" in ta_root_dir:
print "ERROR: The TA package name should always start by TA_ as good Splunk practice."
sys.exit(1)
# Verify tgz Archive file exists
if not os.path.exists(infile):
print ('ERROR: invalid file, could not find: ' + infile)
sys.exit(1)
# Ensure the same package name does not already exist in current directory
if os.path.exists(ta_root_dir):
print ('ERROR: A directory named ' + ta_root_dir + ' already exist in current directory, please remove it and'
' restart')
sys.exit(1)
elif os.path.exists(ta_root_dir + ".tgz"):
print ('ERROR: A tgz archive named ' + ta_root_dir + ".tgz" + ' already exist in current directory, please'
' remove it and restart')
sys.exit(1)
# Extract Archive
tar = tarfile.open(infile)
msg = 'Extracting tgz Archive: ' + infile
print (msg)
tar.extractall()
tar.close()
# Operate
# Get current directory
curdir = os.getcwd()
# Extract the TA-nmon default package in current directory
print ('INFO: Extracting Agent tgz resources Archives')
tgz_files = 'TA-metricator-hec-for-nmon*.tgz'
for tgz in glob.glob(str(tgz_files)):
tar = tarfile.open(tgz)
tar.extractall()
tar.close()
# Rename the TA directory to match agent name
msg = 'INFO: Renaming TA-metricator-hec-for-nmon default agent to ' + ta_root_dir
print (msg)
shutil.copytree('TA-metricator-hec-for-nmon', ta_root_dir)
################# STRING REPLACEMENTS #################
# Replace the old agent name in files
# Achieve string replacements
print ('Achieving files transformation...')
search = 'TA-metricator-hec-for-nmon'
replace = ta_root_dir
findreplace(ta_root_dir, search, replace, "*.sh")
findreplace(ta_root_dir, search, replace, "*.py")
findreplace(ta_root_dir, search, replace, "*.pl")
findreplace(ta_root_dir, search, replace, "*.conf")
print ('Done.')
# Don't use "with" statement in tar creation for Python 2.6 backward compatibility
tar_file = ta_root_dir + '.tgz'
out = tarfile.open(tar_file, mode='w:gz')
try:
out.add(ta_root_dir)
finally:
msg = 'INFO: ************* Tar creation done of: ' + tar_file + ' *************'
print (msg)
out.close()
# remove Agent directory
if os.path.isdir(ta_root_dir):
shutil.rmtree(ta_root_dir)
print ('\n*** Agent Creation terminated: To install the agent: ***\n')
print (' - Upload the tgz Archive ' + tar_file + ' to your Splunk deployment server')
print (' - Extract the content of the TA package in $SPLUNK_HOME/etc/deployment-apps/')
print (' - Configure the Application (set splunkd to restart), server class and associated clients to push the new'
' package to your clients\n')
# END
print ('Operation terminated.\n')
sys.exit(0)

@ -0,0 +1,3 @@
Text-CSV-1.95: http://search.cpan.org/~ishigaki/Text-CSV-1.95/lib/Text/CSV.pm
Compiled on AIX 7.1, certified under AIX 7.1 and 7.2

@ -0,0 +1,745 @@
package Text::Diff;
use 5.006;
use strict;
use warnings;
use Carp qw/ croak confess /;
use Exporter ();
use Algorithm::Diff ();
our $VERSION = '1.45';
our @ISA = qw/ Exporter /;
our @EXPORT = qw/ diff /;
## Hunks are made of ops. An op is the starting index for each
## sequence and the opcode:
use constant A => 0; # Array index before match/discard
use constant B => 1;
use constant OPCODE => 2; # "-", " ", "+"
use constant FLAG => 3; # What to display if not OPCODE "!"
my %internal_styles = (
Unified => undef,
Context => undef,
OldStyle => undef,
Table => undef, ## "internal", but in another module
);
sub diff {
my @seqs = ( shift, shift );
my $options = shift || {};
for my $i ( 0 .. 1 ) {
my $seq = $seqs[$i];
my $type = ref $seq;
while ( $type eq "CODE" ) {
$seqs[$i] = $seq = $seq->( $options );
$type = ref $seq;
}
my $AorB = !$i ? "A" : "B";
if ( $type eq "ARRAY" ) {
## This is most efficient :)
$options->{"OFFSET_$AorB"} = 0
unless defined $options->{"OFFSET_$AorB"};
}
elsif ( $type eq "SCALAR" ) {
$seqs[$i] = [split( /^/m, $$seq )];
$options->{"OFFSET_$AorB"} = 1
unless defined $options->{"OFFSET_$AorB"};
}
elsif ( ! $type ) {
$options->{"OFFSET_$AorB"} = 1
unless defined $options->{"OFFSET_$AorB"};
$options->{"FILENAME_$AorB"} = $seq
unless defined $options->{"FILENAME_$AorB"};
$options->{"MTIME_$AorB"} = (stat($seq))[9]
unless defined $options->{"MTIME_$AorB"};
local $/ = "\n";
open F, "<$seq" or croak "$!: $seq";
$seqs[$i] = [<F>];
close F;
}
elsif ( $type eq "GLOB" || UNIVERSAL::isa( $seq, "IO::Handle" ) ) {
$options->{"OFFSET_$AorB"} = 1
unless defined $options->{"OFFSET_$AorB"};
local $/ = "\n";
$seqs[$i] = [<$seq>];
}
else {
confess "Can't handle input of type ", ref;
}
}
## Config vars
my $output;
my $output_handler = $options->{OUTPUT};
my $type = ref $output_handler ;
if ( ! defined $output_handler ) {
$output = "";
$output_handler = sub { $output .= shift };
}
elsif ( $type eq "CODE" ) {
## No problems, mate.
}
elsif ( $type eq "SCALAR" ) {
my $out_ref = $output_handler;
$output_handler = sub { $$out_ref .= shift };
}
elsif ( $type eq "ARRAY" ) {
my $out_ref = $output_handler;
$output_handler = sub { push @$out_ref, shift };
}
elsif ( $type eq "GLOB" || UNIVERSAL::isa $output_handler, "IO::Handle" ) {
my $output_handle = $output_handler;
$output_handler = sub { print $output_handle shift };
}
else {
croak "Unrecognized output type: $type";
}
my $style = $options->{STYLE};
$style = "Unified" unless defined $options->{STYLE};
$style = "Text::Diff::$style" if exists $internal_styles{$style};
if ( ! $style->can( "hunk" ) ) {
eval "require $style; 1" or die $@;
}
$style = $style->new if ! ref $style && $style->can( "new" );
my $ctx_lines = $options->{CONTEXT};
$ctx_lines = 3 unless defined $ctx_lines;
$ctx_lines = 0 if $style->isa( "Text::Diff::OldStyle" );
my @keygen_args = $options->{KEYGEN_ARGS}
? @{$options->{KEYGEN_ARGS}}
: ();
## State vars
my $diffs = 0; ## Number of discards this hunk
my $ctx = 0; ## Number of " " (ctx_lines) ops pushed after last diff.
my @ops; ## ops (" ", +, -) in this hunk
my $hunks = 0; ## Number of hunks
my $emit_ops = sub {
$output_handler->( $style->file_header( @seqs, $options ) )
unless $hunks++;
$output_handler->( $style->hunk_header( @seqs, @_, $options ) );
$output_handler->( $style->hunk ( @seqs, @_, $options ) );
$output_handler->( $style->hunk_footer( @seqs, @_, $options ) );
};
## We keep 2*ctx_lines so that if a diff occurs
## at 2*ctx_lines we continue to grow the hunk instead
## of emitting diffs and context as we go. We
## need to know the total length of both of the two
## subsequences so the line count can be printed in the
## header.
my $dis_a = sub {push @ops, [@_[0,1],"-"]; ++$diffs ; $ctx = 0 };
my $dis_b = sub {push @ops, [@_[0,1],"+"]; ++$diffs ; $ctx = 0 };
Algorithm::Diff::traverse_sequences(
@seqs,
{
MATCH => sub {
push @ops, [@_[0,1]," "];
if ( $diffs && ++$ctx > $ctx_lines * 2 ) {
$emit_ops->( [ splice @ops, 0, $#ops - $ctx_lines ] );
$ctx = $diffs = 0;
}
## throw away context lines that aren't needed any more
shift @ops if ! $diffs && @ops > $ctx_lines;
},
DISCARD_A => $dis_a,
DISCARD_B => $dis_b,
},
$options->{KEYGEN}, # pass in user arguments for key gen function
@keygen_args,
);
if ( $diffs ) {
$#ops -= $ctx - $ctx_lines if $ctx > $ctx_lines;
$emit_ops->( \@ops );
}
$output_handler->( $style->file_footer( @seqs, $options ) ) if $hunks;
return defined $output ? $output : $hunks;
}
sub _header {
my ( $h ) = @_;
my ( $p1, $fn1, $t1, $p2, $fn2, $t2 ) = @{$h}{
"FILENAME_PREFIX_A",
"FILENAME_A",
"MTIME_A",
"FILENAME_PREFIX_B",
"FILENAME_B",
"MTIME_B"
};
## remember to change Text::Diff::Table if this logic is tweaked.
return "" unless defined $fn1 && defined $fn2;
return join( "",
$p1, " ", $fn1, defined $t1 ? "\t" . localtime $t1 : (), "\n",
$p2, " ", $fn2, defined $t2 ? "\t" . localtime $t2 : (), "\n",
);
}
## _range encapsulates the building of, well, ranges. Turns out there are
## a few nuances.
sub _range {
my ( $ops, $a_or_b, $format ) = @_;
my $start = $ops->[ 0]->[$a_or_b];
my $after = $ops->[-1]->[$a_or_b];
## The sequence indexes in the lines are from *before* the OPCODE is
## executed, so we bump the last index up unless the OP indicates
## it didn't change.
++$after
unless $ops->[-1]->[OPCODE] eq ( $a_or_b == A ? "+" : "-" );
## convert from 0..n index to 1..(n+1) line number. The unless modifier
## handles diffs with no context, where only one file is affected. In this
## case $start == $after indicates an empty range, and the $start must
## not be incremented.
my $empty_range = $start == $after;
++$start unless $empty_range;
return
$start == $after
? $format eq "unified" && $empty_range
? "$start,0"
: $start
: $format eq "unified"
? "$start,".($after-$start+1)
: "$start,$after";
}
sub _op_to_line {
my ( $seqs, $op, $a_or_b, $op_prefixes ) = @_;
my $opcode = $op->[OPCODE];
return () unless defined $op_prefixes->{$opcode};
my $op_sym = defined $op->[FLAG] ? $op->[FLAG] : $opcode;
$op_sym = $op_prefixes->{$op_sym};
return () unless defined $op_sym;
$a_or_b = $op->[OPCODE] ne "+" ? 0 : 1 unless defined $a_or_b;
my @line = ( $op_sym, $seqs->[$a_or_b][$op->[$a_or_b]] );
unless ( $line[1] =~ /(?:\n|\r\n)$/ ) {
$line[1] .= "\n\\ No newline at end of file\n";
}
return @line;
}
SCOPE: {
package Text::Diff::Base;
sub new {
my $proto = shift;
return bless { @_ }, ref $proto || $proto;
}
sub file_header { return "" }
sub hunk_header { return "" }
sub hunk { return "" }
sub hunk_footer { return "" }
sub file_footer { return "" }
}
@Text::Diff::Unified::ISA = qw( Text::Diff::Base );
sub Text::Diff::Unified::file_header {
shift; ## No instance data
my $options = pop ;
_header(
{ FILENAME_PREFIX_A => "---", FILENAME_PREFIX_B => "+++", %$options }
);
}
sub Text::Diff::Unified::hunk_header {
shift; ## No instance data
pop; ## Ignore options
my $ops = pop;
return join( "",
"@@ -",
_range( $ops, A, "unified" ),
" +",
_range( $ops, B, "unified" ),
" @@\n",
);
}
sub Text::Diff::Unified::hunk {
shift; ## No instance data
pop; ## Ignore options
my $ops = pop;
my $prefixes = { "+" => "+", " " => " ", "-" => "-" };
return join "", map _op_to_line( \@_, $_, undef, $prefixes ), @$ops
}
@Text::Diff::Context::ISA = qw( Text::Diff::Base );
sub Text::Diff::Context::file_header {
_header { FILENAME_PREFIX_A=>"***", FILENAME_PREFIX_B=>"---", %{$_[-1]} };
}
sub Text::Diff::Context::hunk_header {
return "***************\n";
}
sub Text::Diff::Context::hunk {
shift; ## No instance data
pop; ## Ignore options
my $ops = pop;
## Leave the sequences in @_[0,1]
my $a_range = _range( $ops, A, "" );
my $b_range = _range( $ops, B, "" );
## Sigh. Gotta make sure that differences that aren't adds/deletions
## get prefixed with "!", and that the old opcodes are removed.
my $after;
for ( my $start = 0; $start <= $#$ops ; $start = $after ) {
## Scan until next difference
$after = $start + 1;
my $opcode = $ops->[$start]->[OPCODE];
next if $opcode eq " ";
my $bang_it;
while ( $after <= $#$ops && $ops->[$after]->[OPCODE] ne " " ) {
$bang_it ||= $ops->[$after]->[OPCODE] ne $opcode;
++$after;
}
if ( $bang_it ) {
for my $i ( $start..($after-1) ) {
$ops->[$i]->[FLAG] = "!";
}
}
}
my $b_prefixes = { "+" => "+ ", " " => " ", "-" => undef, "!" => "! " };
my $a_prefixes = { "+" => undef, " " => " ", "-" => "- ", "!" => "! " };
return join( "",
"*** ", $a_range, " ****\n",
map( _op_to_line( \@_, $_, A, $a_prefixes ), @$ops ),
"--- ", $b_range, " ----\n",
map( _op_to_line( \@_, $_, B, $b_prefixes ), @$ops ),
);
}
@Text::Diff::OldStyle::ISA = qw( Text::Diff::Base );
sub _op {
my $ops = shift;
my $op = $ops->[0]->[OPCODE];
$op = "c" if grep $_->[OPCODE] ne $op, @$ops;
$op = "a" if $op eq "+";
$op = "d" if $op eq "-";
return $op;
}
sub Text::Diff::OldStyle::hunk_header {
shift; ## No instance data
pop; ## ignore options
my $ops = pop;
my $op = _op $ops;
return join "", _range( $ops, A, "" ), $op, _range( $ops, B, "" ), "\n";
}
sub Text::Diff::OldStyle::hunk {
shift; ## No instance data
pop; ## ignore options
my $ops = pop;
## Leave the sequences in @_[0,1]
my $a_prefixes = { "+" => undef, " " => undef, "-" => "< " };
my $b_prefixes = { "+" => "> ", " " => undef, "-" => undef };
my $op = _op $ops;
return join( "",
map( _op_to_line( \@_, $_, A, $a_prefixes ), @$ops ),
$op eq "c" ? "---\n" : (),
map( _op_to_line( \@_, $_, B, $b_prefixes ), @$ops ),
);
}
1;
__END__
=head1 NAME
Text::Diff - Perform diffs on files and record sets
=head1 SYNOPSIS
use Text::Diff;
## Mix and match filenames, strings, file handles, producer subs,
## or arrays of records; returns diff in a string.
## WARNING: can return B<large> diffs for large files.
my $diff = diff "file1.txt", "file2.txt", { STYLE => "Context" };
my $diff = diff \$string1, \$string2, \%options;
my $diff = diff \*FH1, \*FH2;
my $diff = diff \&reader1, \&reader2;
my $diff = diff \@records1, \@records2;
## May also mix input types:
my $diff = diff \@records1, "file_B.txt";
=head1 DESCRIPTION
C<diff()> provides a basic set of services akin to the GNU C<diff> utility. It
is not anywhere near as feature complete as GNU C<diff>, but it is better
integrated with Perl and available on all platforms. It is often faster than
shelling out to a system's C<diff> executable for small files, and generally
slower on larger files.
Relies on L<Algorithm::Diff> for, well, the algorithm. This may not produce
the same exact diff as a system's local C<diff> executable, but it will be a
valid diff and comprehensible by C<patch>. We haven't seen any differences
between L<Algorithm::Diff>'s logic and GNU C<diff>'s, but we have not examined
them to make sure they are indeed identical.
B<Note>: If you don't want to import the C<diff> function, do one of the
following:
use Text::Diff ();
require Text::Diff;
That's a pretty rare occurrence,
so C<diff()> is exported by default.
If you pass a filename, but the file can't be read,
then C<diff()> will C<croak>.
=head1 OPTIONS
C<diff()> takes two parameters from which to draw input and a set of
options to control its output. The options are:
=over
=item FILENAME_A, MTIME_A, FILENAME_B, MTIME_B
The name of the file and the modification time "files".
These are filled in automatically for each file when C<diff()> is passed a
filename, unless a defined value is passed in.
If a filename is not passed in and FILENAME_A and FILENAME_B are not provided
or are C<undef>, the header will not be printed.
Unused on C<OldStyle> diffs.
=item OFFSET_A, OFFSET_B
The index of the first line / element. These default to 1 for all
parameter types except ARRAY references, for which the default is 0. This
is because ARRAY references are presumed to be data structures, while the
others are line-oriented text.
=item STYLE
"Unified", "Context", "OldStyle", or an object or class reference for a class
providing C<file_header()>, C<hunk_header()>, C<hunk()>, C<hunk_footer()> and
C<file_footer()> methods. The two footer() methods are provided for
overloading only; none of the formats provide them.
Defaults to "Unified" (unlike standard C<diff>, but Unified is what's most
often used in submitting patches and is the most human readable of the three.
If the package indicated by the STYLE has no C<hunk()> method, C<diff()> will
load it automatically (lazy loading). Since all such packages should inherit
from C<Text::Diff::Base>, this should be marvy.
Styles may be specified as class names (C<STYLE =E<gt> 'Foo'>),
in which case they will be C<new()>ed with no parameters,
or as objects (C<STYLE =E<gt> Foo-E<gt>new>).
=item CONTEXT
How many lines before and after each diff to display. Ignored on old-style
diffs. Defaults to 3.
=item OUTPUT
Examples and their equivalent subroutines:
OUTPUT => \*FOOHANDLE, # like: sub { print FOOHANDLE shift() }
OUTPUT => \$output, # like: sub { $output .= shift }
OUTPUT => \@output, # like: sub { push @output, shift }
OUTPUT => sub { $output .= shift },
If no C<OUTPUT> is supplied, returns the diffs in a string. If
C<OUTPUT> is a C<CODE> ref, it will be called once with the (optional)
file header, and once for each hunk body with the text to emit. If
C<OUTPUT> is an L<IO::Handle>, output will be emitted to that handle.
=item FILENAME_PREFIX_A, FILENAME_PREFIX_B
The string to print before the filename in the header. Unused on C<OldStyle>
diffs. Defaults are C<"---">, C<"+++"> for Unified and C<"***">, C<"+++"> for
Context.
=item KEYGEN, KEYGEN_ARGS
These are passed to L<Algorithm::Diff/traverse_sequences>.
=back
B<Note>: if neither C<FILENAME_> option is defined, the header will not be
printed. If at least one is present, the other and both C<MTIME_> options must
be present or "Use of undefined variable" warnings will be generated (except
on C<OldStyle> diffs, which ignores these options).
=head1 Formatting Classes
These functions implement the output formats. They are grouped in to classes
so C<diff()> can use class names to call the correct set of output routines and
so that you may inherit from them easily. There are no constructors or
instance methods for these classes, though subclasses may provide them if need
be.
Each class has C<file_header()>, C<hunk_header()>, C<hunk()>, and C<footer()>
methods identical to those documented in the C<Text::Diff::Unified> section.
C<header()> is called before the C<hunk()> is first called, C<footer()>
afterwards. The default footer function is an empty method provided for
overloading:
sub footer { return "End of patch\n" }
Some output formats are provided by external modules (which are loaded
automatically), such as L<Text::Diff::Table>. These are
are documented here to keep the documentation simple.
=head2 Text::Diff::Base
Returns "" for all methods (other than C<new()>).
=head2 Text::Diff::Unified
--- A Mon Nov 12 23:49:30 2001
+++ B Mon Nov 12 23:49:30 2001
@@ -2,13 +2,13 @@
2
3
4
-5d
+5a
6
7
8
9
+9a
10
11
-11d
12
13
=over
=item Text::Diff::Unified::file_header
$s = Text::Diff::Unified->file_header( $options );
Returns a string containing a unified header. The sole parameter is the
C<options> hash passed in to C<diff()>, containing at least:
FILENAME_A => $fn1,
MTIME_A => $mtime1,
FILENAME_B => $fn2,
MTIME_B => $mtime2
May also contain
FILENAME_PREFIX_A => "---",
FILENAME_PREFIX_B => "+++",
to override the default prefixes (default values shown).
=item Text::Diff::Unified::hunk_header
Text::Diff::Unified->hunk_header( \@ops, $options );
Returns a string containing the heading of one hunk of unified diff.
=item Text::Diff::Unified::hunk
Text::Diff::Unified->hunk( \@seq_a, \@seq_b, \@ops, $options );
Returns a string containing the output of one hunk of unified diff.
=back
=head2 Text::Diff::Table
+--+----------------------------------+--+------------------------------+
| |../Test-Differences-0.2/MANIFEST | |../Test-Differences/MANIFEST |
| |Thu Dec 13 15:38:49 2001 | |Sat Dec 15 02:09:44 2001 |
+--+----------------------------------+--+------------------------------+
| | * 1|Changes *
| 1|Differences.pm | 2|Differences.pm |
| 2|MANIFEST | 3|MANIFEST |
| | * 4|MANIFEST.SKIP *
| 3|Makefile.PL | 5|Makefile.PL |
| | * 6|t/00escape.t *
| 4|t/00flatten.t | 7|t/00flatten.t |
| 5|t/01text_vs_data.t | 8|t/01text_vs_data.t |
| 6|t/10test.t | 9|t/10test.t |
+--+----------------------------------+--+------------------------------+
This format also goes to some pains to highlight "invisible" characters on
differing elements by selectively escaping whitespace:
+--+--------------------------+--------------------------+
| |demo_ws_A.txt |demo_ws_B.txt |
| |Fri Dec 21 08:36:32 2001 |Fri Dec 21 08:36:50 2001 |
+--+--------------------------+--------------------------+
| 1|identical |identical |
* 2| spaced in | also spaced in *
* 3|embedded space |embedded tab *
| 4|identical |identical |
* 5| spaced in |\ttabbed in *
* 6|trailing spaces\s\s\n |trailing tabs\t\t\n *
| 7|identical |identical |
* 8|lf line\n |crlf line\r\n *
* 9|embedded ws |embedded\tws *
+--+--------------------------+--------------------------+
See L<Text::Diff::Table> for more details, including how the whitespace
escaping works.
=head2 Text::Diff::Context
*** A Mon Nov 12 23:49:30 2001
--- B Mon Nov 12 23:49:30 2001
***************
*** 2,14 ****
2
3
4
! 5d
6
7
8
9
10
11
- 11d
12
13
--- 2,14 ----
2
3
4
! 5a
6
7
8
9
+ 9a
10
11
12
13
Note: C<hunk_header()> returns only "***************\n".
=head2 Text::Diff::OldStyle
5c5
< 5d
---
> 5a
9a10
> 9a
12d12
< 11d
Note: no C<file_header()>.
=head1 LIMITATIONS
Must suck both input files entirely in to memory and store them with a normal
amount of Perlish overhead (one array location) per record. This is implied by
the implementation of L<Algorithm::Diff>, which takes two arrays. If
L<Algorithm::Diff> ever offers an incremental mode, this can be changed
(contact the maintainers of L<Algorithm::Diff> and C<Text::Diff> if you need
this; it shouldn't be too terribly hard to tie arrays in this fashion).
Does not provide most of the more refined GNU C<diff> options: recursive
directory tree scanning, ignoring blank lines / whitespace, etc., etc. These
can all be added as time permits and need arises, many are rather easy; patches
quite welcome.
Uses closures internally, this may lead to leaks on Perl versions 5.6.1 and
prior if used many times over a process' life time.
=head1 SEE ALSO
L<Algorithm::Diff> - the underlying implementation of the diff algorithm
used by C<Text::Diff>.
L<YAML::Diff> - find difference between two YAML documents.
L<HTML::Differences> - find difference between two HTML documents.
This uses a more sane approach than L<HTML::Diff>.
L<XML::Diff> - find difference between two XML documents.
L<Array::Diff> - find the differences between two Perl arrays.
L<Hash::Diff> - find the differences between two Perl hashes.
L<Data::Diff> - find difference between two arbitrary data structures.
=head1 REPOSITORY
L<https://github.com/neilbowers/Text-Diff>
=head1 AUTHOR
Adam Kennedy E<lt>adamk@cpan.orgE<gt>
Barrie Slaymaker E<lt>barries@slaysys.comE<gt>
=head1 COPYRIGHT
Some parts copyright 2009 Adam Kennedy.
Copyright 2001 Barrie Slaymaker. All Rights Reserved.
You may use this under the terms of either the Artistic License or GNU Public
License v 2.0 or greater.
=cut
1;

@ -0,0 +1,142 @@
package Text::Diff::Config;
use 5.006;
use strict;
use warnings;
our $VERSION = '1.44';
our $Output_Unicode;
BEGIN
{
$Output_Unicode = $ENV{'DIFF_OUTPUT_UNICODE'};
}
1;
__END__
=pod
=head1 NAME
Text::Diff::Config - global configuration for Text::Diff (as a
separate module).
=head1 SYNOPSIS
use Text::Diff::Config;
$Text::Diff::Config::Output_Unicode = 1;
=head1 DESCRIPTION
This module configures Text::Diff and its related modules. Currently it contains
only one global variable $Text::Diff::Config::Output_Unicode which is a boolean
flag, that if set outputs unicode characters as themselves without escaping them
as C< \x{HHHH} > first.
It is initialized to the value of C< $ENV{DIFF_OUTPUT_UNICODE} >, but can be
set to a different value at run-time, including using local.
=head1 AUTHOR
Shlomi Fish, L<http://www.shlomifish.org/> .
=head1 LICENSE
Copyright 2010, Shlomi Fish.
This file is licensed under the MIT/X11 License:
L<http://www.opensource.org/licenses/mit-license.php>.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
=cut
package Text::Diff::Config;
use strict;
use warnings;
use vars qw($Output_Unicode);
BEGIN
{
$Output_Unicode = $ENV{'DIFF_OUTPUT_UNICODE'};
}
1;
__END__
=pod
=head1 NAME
Text::Diff::Config - global configuration for Text::Diff (as a
separate module).
=head1 SYNOPSIS
use Text::Diff::Config;
$Text::Diff::Config::Output_Unicode = 1;
=head1 DESCRIPTION
This module configures Text::Diff and its related modules. Currently it contains
only one global variable $Text::Diff::Config::Output_Unicode which is a boolean
flag, that if set outputs unicode characters as themselves without escaping them
as C< \x{HHHH} > first.
It is initialized to the value of C< $ENV{DIFF_OUTPUT_UNICODE} >, but can be
set to a different value at run-time, including using local.
=head1 AUTHOR
Shlomi Fish, L<http://www.shlomifish.org/> .
=head1 LICENSE
Copyright 2010, Shlomi Fish.
This file is licensed under the MIT/X11 License:
L<http://www.opensource.org/licenses/mit-license.php>.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
=cut

@ -0,0 +1,429 @@
package Text::Diff::Table;
use 5.006;
use strict;
use warnings;
use Carp;
use Text::Diff::Config;
our $VERSION = '1.44';
our @ISA = qw( Text::Diff::Base Exporter );
our @EXPORT_OK = qw( expand_tabs );
my %escapes = map {
my $c =
$_ eq '"' || $_ eq '$' ? qq{'$_'}
: $_ eq "\\" ? qq{"\\\\"}
: qq{"$_"};
( ord eval $c => $_ )
} (
map( chr, 32..126),
map( sprintf( "\\x%02x", $_ ), ( 0..31, 127..255 ) ),
# map( "\\c$_", "A".."Z"),
"\\t", "\\n", "\\r", "\\f", "\\b", "\\a", "\\e"
## NOTE: "\\\\" is not here because some things are explicitly
## escaped before escape() is called and we don't want to
## double-escape "\". Also, in most texts, leaving "\" more
## readable makes sense.
);
sub expand_tabs($) {
my $s = shift;
my $count = 0;
$s =~ s{(\t)(\t*)|([^\t]+)}{
if ( $1 ) {
my $spaces = " " x ( 8 - $count % 8 + 8 * length $2 );
$count = 0;
$spaces;
}
else {
$count += length $3;
$3;
}
}ge;
return $s;
}
sub trim_trailing_line_ends($) {
my $s = shift;
$s =~ s/[\r\n]+(?!\n)$//;
return $s;
}
sub escape($);
SCOPE: {
## use utf8 if available. don't if not.
my $escaper = <<'EOCODE';
sub escape($) {
use utf8;
join "", map {
my $c = $_;
$_ = ord;
exists $escapes{$_}
? $escapes{$_}
: $Text::Diff::Config::Output_Unicode
? $c
: sprintf( "\\x{%04x}", $_ );
} split //, shift;
}
1;
EOCODE
unless ( eval $escaper ) {
$escaper =~ s/ *use *utf8 *;\n// or die "Can't drop use utf8;";
eval $escaper or die $@;
}
}
sub new {
my $proto = shift;
return bless { @_ }, $proto
}
my $missing_elt = [ "", "" ];
sub hunk {
my $self = shift;
my @seqs = ( shift, shift );
my $ops = shift; ## Leave sequences in @_[0,1]
my $options = shift;
my ( @A, @B );
for ( @$ops ) {
my $opcode = $_->[Text::Diff::OPCODE()];
if ( $opcode eq " " ) {
push @A, $missing_elt while @A < @B;
push @B, $missing_elt while @B < @A;
}
push @A, [ $_->[0] + ( $options->{OFFSET_A} || 0), $seqs[0][$_->[0]] ]
if $opcode eq " " || $opcode eq "-";
push @B, [ $_->[1] + ( $options->{OFFSET_B} || 0), $seqs[1][$_->[1]] ]
if $opcode eq " " || $opcode eq "+";
}
push @A, $missing_elt while @A < @B;
push @B, $missing_elt while @B < @A;
my @elts;
for ( 0..$#A ) {
my ( $A, $B ) = (shift @A, shift @B );
## Do minimal cleaning on identical elts so these look "normal":
## tabs are expanded, trailing newelts removed, etc. For differing
## elts, make invisible characters visible if the invisible characters
## differ.
my $elt_type = $B == $missing_elt ? "A" :
$A == $missing_elt ? "B" :
$A->[1] eq $B->[1] ? "="
: "*";
if ( $elt_type ne "*" ) {
if ( $elt_type eq "=" || $A->[1] =~ /\S/ || $B->[1] =~ /\S/ ) {
$A->[1] = escape trim_trailing_line_ends expand_tabs $A->[1];
$B->[1] = escape trim_trailing_line_ends expand_tabs $B->[1];
}
else {
$A->[1] = escape $A->[1];
$B->[1] = escape $B->[1];
}
}
else {
## not using \z here for backcompat reasons.
$A->[1] =~ /^(\s*?)([^ \t].*?)?(\s*)(?![\n\r])$/s;
my ( $l_ws_A, $body_A, $t_ws_A ) = ( $1, $2, $3 );
$body_A = "" unless defined $body_A;
$B->[1] =~ /^(\s*?)([^ \t].*?)?(\s*)(?![\n\r])$/s;
my ( $l_ws_B, $body_B, $t_ws_B ) = ( $1, $2, $3 );
$body_B = "" unless defined $body_B;
my $added_escapes;
if ( $l_ws_A ne $l_ws_B ) {
## Make leading tabs visible. Other non-' ' chars
## will be dealt with in escape(), but this prevents
## tab expansion from hiding tabs by making them
## look like ' '.
$added_escapes = 1 if $l_ws_A =~ s/\t/\\t/g;
$added_escapes = 1 if $l_ws_B =~ s/\t/\\t/g;
}
if ( $t_ws_A ne $t_ws_B ) {
## Only trailing whitespace gets the \s treatment
## to make it obvious what's going on.
$added_escapes = 1 if $t_ws_A =~ s/ /\\s/g;
$added_escapes = 1 if $t_ws_B =~ s/ /\\s/g;
$added_escapes = 1 if $t_ws_A =~ s/\t/\\t/g;
$added_escapes = 1 if $t_ws_B =~ s/\t/\\t/g;
}
else {
$t_ws_A = $t_ws_B = "";
}
my $do_tab_escape = $added_escapes || do {
my $expanded_A = expand_tabs join( $body_A, $l_ws_A, $t_ws_A );
my $expanded_B = expand_tabs join( $body_B, $l_ws_B, $t_ws_B );
$expanded_A eq $expanded_B;
};
my $do_back_escape = $do_tab_escape || do {
my ( $unescaped_A, $escaped_A,
$unescaped_B, $escaped_B
) =
map
join( "", /(\\.)/g ),
map {
( $_, escape $_ )
}
expand_tabs join( $body_A, $l_ws_A, $t_ws_A ),
expand_tabs join( $body_B, $l_ws_B, $t_ws_B );
$unescaped_A ne $unescaped_B && $escaped_A eq $escaped_B;
};
if ( $do_back_escape ) {
$body_A =~ s/\\/\\\\/g;
$body_B =~ s/\\/\\\\/g;
}
my $line_A = join $body_A, $l_ws_A, $t_ws_A;
my $line_B = join $body_B, $l_ws_B, $t_ws_B;
unless ( $do_tab_escape ) {
$line_A = expand_tabs $line_A;
$line_B = expand_tabs $line_B;
}
$A->[1] = escape $line_A;
$B->[1] = escape $line_B;
}
push @elts, [ @$A, @$B, $elt_type ];
}
push @{$self->{ELTS}}, @elts, ["bar"];
return "";
}
sub _glean_formats {
my $self = shift;
}
sub file_footer {
my $self = shift;
my @seqs = (shift,shift);
my $options = pop;
my @heading_lines;
if ( defined $options->{FILENAME_A} || defined $options->{FILENAME_B} ) {
push @heading_lines, [
map(
{
( "", escape( defined $_ ? $_ : "<undef>" ) );
}
( @{$options}{qw( FILENAME_A FILENAME_B)} )
),
"=",
];
}
if ( defined $options->{MTIME_A} || defined $options->{MTIME_B} ) {
push @heading_lines, [
map( {
( "",
escape(
( defined $_ && length $_ )
? localtime $_
: ""
)
);
}
@{$options}{qw( MTIME_A MTIME_B )}
),
"=",
];
}
if ( defined $options->{INDEX_LABEL} ) {
push @heading_lines, [ "", "", "", "", "=" ] unless @heading_lines;
$heading_lines[-1]->[0] = $heading_lines[-1]->[2] =
$options->{INDEX_LABEL};
}
## Not ushifting on to @{$self->{ELTS}} in case it's really big. Want
## to avoid the overhead.
my $four_column_mode = 0;
for my $cols ( @heading_lines, @{$self->{ELTS}} ) {
next if $cols->[-1] eq "bar";
if ( $cols->[0] ne $cols->[2] ) {
$four_column_mode = 1;
last;
}
}
unless ( $four_column_mode ) {
for my $cols ( @heading_lines, @{$self->{ELTS}} ) {
next if $cols->[-1] eq "bar";
splice @$cols, 2, 1;
}
}
my @w = (0,0,0,0);
for my $cols ( @heading_lines, @{$self->{ELTS}} ) {
next if $cols->[-1] eq "bar";
for my $i (0..($#$cols-1)) {
$w[$i] = length $cols->[$i]
if defined $cols->[$i] && length $cols->[$i] > $w[$i];
}
}
my %fmts = $four_column_mode
? (
"=" => "| %$w[0]s|%-$w[1]s | %$w[2]s|%-$w[3]s |\n",
"A" => "* %$w[0]s|%-$w[1]s * %$w[2]s|%-$w[3]s |\n",
"B" => "| %$w[0]s|%-$w[1]s * %$w[2]s|%-$w[3]s *\n",
"*" => "* %$w[0]s|%-$w[1]s * %$w[2]s|%-$w[3]s *\n",
)
: (
"=" => "| %$w[0]s|%-$w[1]s |%-$w[2]s |\n",
"A" => "* %$w[0]s|%-$w[1]s |%-$w[2]s |\n",
"B" => "| %$w[0]s|%-$w[1]s |%-$w[2]s *\n",
"*" => "* %$w[0]s|%-$w[1]s |%-$w[2]s *\n",
);
my @args = ('', '', '');
push(@args, '') if $four_column_mode;
$fmts{bar} = sprintf $fmts{"="}, @args;
$fmts{bar} =~ s/\S/+/g;
$fmts{bar} =~ s/ /-/g;
# Sometimes the sprintf has too many arguments,
# which results in a warning on Perl 5.021+
# I really wanted to write:
# no warnings 'redundant';
# but that causes a compilation error on older versions of perl
# where the warnings pragma doesn't know about 'redundant'
no warnings;
return join( "",
map {
sprintf( $fmts{$_->[-1]}, @$_ );
} (
["bar"],
@heading_lines,
@heading_lines ? ["bar"] : (),
@{$self->{ELTS}},
),
);
@{$self->{ELTS}} = [];
}
1;
__END__
=pod
=head1 NAME
Text::Diff::Table - Text::Diff plugin to generate "table" format output
=head1 SYNOPSIS
use Text::Diff;
diff \@a, $b, { STYLE => "Table" };
=head1 DESCRIPTION
This is a plugin output formatter for Text::Diff that generates "table" style
diffs:
+--+----------------------------------+--+------------------------------+
| |../Test-Differences-0.2/MANIFEST | |../Test-Differences/MANIFEST |
| |Thu Dec 13 15:38:49 2001 | |Sat Dec 15 02:09:44 2001 |
+--+----------------------------------+--+------------------------------+
| | * 1|Changes *
| 1|Differences.pm | 2|Differences.pm |
| 2|MANIFEST | 3|MANIFEST |
| | * 4|MANIFEST.SKIP *
| 3|Makefile.PL | 5|Makefile.PL |
| | * 6|t/00escape.t *
| 4|t/00flatten.t | 7|t/00flatten.t |
| 5|t/01text_vs_data.t | 8|t/01text_vs_data.t |
| 6|t/10test.t | 9|t/10test.t |
+--+----------------------------------+--+------------------------------+
This format also goes to some pains to highlight "invisible" characters on
differing elements by selectively escaping whitespace. Each element is split
in to three segments (leading whitespace, body, trailing whitespace). If
whitespace differs in a segement, that segment is whitespace escaped.
Here is an example of the selective whitespace.
+--+--------------------------+--------------------------+
| |demo_ws_A.txt |demo_ws_B.txt |
| |Fri Dec 21 08:36:32 2001 |Fri Dec 21 08:36:50 2001 |
+--+--------------------------+--------------------------+
| 1|identical |identical |
* 2| spaced in | also spaced in *
* 3|embedded space |embedded tab *
| 4|identical |identical |
* 5| spaced in |\ttabbed in *
* 6|trailing spaces\s\s\n |trailing tabs\t\t\n *
| 7|identical |identical |
* 8|lf line\n |crlf line\r\n *
* 9|embedded ws |embedded\tws *
+--+--------------------------+--------------------------+
Here's why the lines do or do not have whitespace escaped:
=over
=item lines 1, 4, 7 don't differ, no need.
=item lines 2, 3 differ in non-whitespace, no need.
=item lines 5, 6, 8, 9 all have subtle ws changes.
=back
Whether or not line 3 should have that tab character escaped is a judgement
call; so far I'm choosing not to.
=head1 UNICODE
To output the raw unicode chracters consult the documentation of
L<Text::Diff::Config>. You can set the C<DIFF_OUTPUT_UNICODE> environment
variable to 1 to output it from the command line. For more information,
consult this bug: L<https://rt.cpan.org/Ticket/Display.html?id=54214> .
=head1 LIMITATIONS
Table formatting requires buffering the entire diff in memory in order to
calculate column widths. This format should only be used for smaller
diffs.
Assumes tab stops every 8 characters, as $DIETY intended.
Assumes all character codes >= 127 need to be escaped as hex codes, ie that the
user's terminal is ASCII, and not even "high bit ASCII", capable. This can be
made an option when the need arises.
Assumes that control codes (character codes 0..31) that don't have slash-letter
escapes ("\n", "\r", etc) in Perl are best presented as hex escapes ("\x01")
instead of octal ("\001") or control-code ("\cA") escapes.
=head1 AUTHOR
Barrie Slaymaker E<lt>barries@slaysys.comE<gt>
=head1 LICENSE
Copyright 2001 Barrie Slaymaker, All Rights Reserved.
You may use this software under the terms of the GNU public license, any
version, or the Artistic license.
=cut

@ -0,0 +1,3 @@
# The Linux tgz archive contains compiled nmon binaries for various Linux distribution on various type of processor: x86 / Power / arm / s390x
The source code of Nmon Linux is available in the Nmon Linux site: http://nmon.sourceforge.net

@ -0,0 +1,221 @@
#!/usr/bin/perl
# Program name: metricator_cleaner.pl
# Compatibility: Perl x
# Purpose - Clean nmon and csv files when retention expired
# Author - Guilhem Marchand
$version = "2.0.0";
use Time::Local;
use Time::HiRes;
use Getopt::Long;
use POSIX 'strftime';
use File::stat; # use the object-oriented interface to stat
# LOGGING INFORMATION:
# - The program uses the standard logging Python module to display important messages in Splunk logs
# - Every message of the script will be indexed and accessible within Splunk splunkd logs
#################################################
## Arguments Parser
#################################################
# Default values
my $CSV_REPOSITORY = "csv_repository";
my $APP = "";
my $CONFIG_REPOSITORY = "config_repository";
my $MAXSECONDS = "";
my $verbose;
$result = GetOptions(
"csv_repository=s" => \$CSV_REPOSITORY, # string
"config_repository=s" => \$CONFIG_REPOSITORY, # string
"cleancsv" => \$CLEANCSV, # string
"approot=s" => \$APP, # string
"maxseconds_csv=s" => \$MAXSECONDS_CSV, # string
"version" => \$VERSION, # flag
"help" => \$help # flag
);
# Show version
if ($VERSION) {
print("nmon_clean.pl version $version \n");
exit 0;
}
# Show help
if ($help) {
print( "
Help for metricator_cleaner.pl:
In default configuration (eg. no options specified) the script will purge any nmon file (*.nmon) in default nmon_repository
Available options are:
--cleancsv :Activate the purge of csv files from csv repository and config repository (see also options above)
--maxseconds_csv <value> :Set the maximum file retention in seconds for csv data, every files older than this value will be permanently removed
--approot <value> :Set a custom value for the Application root directory (default are: nmon / TA-metricator-hec-for-nmon / PA-nmon)
--csv_repository <value> :Set a custom location for directory containing csv data (default: csv_repository)
--config_repository <value> :Set a custom location for directory containing config data (default: config_repository)
--version :Show current program version \n
"
);
exit 0;
}
#################################################
## Parameters
#################################################
# Default values for CSV retention (4 hours less 1 minute)
my $MAXSECONDS_CSV_DEFAULT = 86400;
#################################################
## Functions
#################################################
#################################################
## Program
#################################################
# Processing starting time
my $t_start = [Time::HiRes::gettimeofday];
# Local time
my $time = strftime "%d-%m-%Y %H:%M:%S", localtime;
# Default Environment Variable SPLUNK_HOME, this shall be automatically defined if as the script shall be launched by Splunk
my $SPLUNK_HOME = $ENV{SPLUNK_HOME};
# Verify SPLUNK_HOME definition
if ( not $SPLUNK_HOME ) {
print(
"\n$time ERROR: The environment variable SPLUNK_HOME could not be verified, if you want to run this script manually you need to export it before processing \n"
);
die;
}
# Discover TA-metricator-hec-for-nmon path
if ( length($APP) == 0 ) {
if ( -d "$SPLUNK_HOME/etc/apps/TA-metricator-hec-for-nmon" ) {
$APP = "$SPLUNK_HOME/etc/apps/TA-metricator-hec-for-nmon";
}
elsif ( -d "$SPLUNK_HOME/etc/slave-apps/TA-metricator-hec-for-nmon" ) {
$APP = "$SPLUNK_HOME/etc/slave-apps/TA-metricator-hec-for-nmon";
}
}
else {
if ( !-d "$APP" ) {
print(
"\n$time ERROR: The Application root directory could be verified using your custom setting: $APP \n"
);
die;
}
}
# Verify existence of APP
if ( !-d "$APP" ) {
print(
"\n$time ERROR: The Application root directory could not be found, is the TA-metricator-hec-for-nmon installed ?\n"
);
die;
}
# var directories
my $APP_MAINVAR = "$SPLUNK_HOME/var/log/metricator";
my $APP_VAR = "$APP_MAINVAR/var";
if ( !-d "$APP_MAINVAR" ) {
print(
"\n$time INFO: main var directory not found ($APP_MAINVAR), no need to run.\n"
);
exit 0;
}
####################################################################
############# Main Program
####################################################################
# check retention
if ( not "$MAXSECONDS_CSV" ) {
$MAXSECONDS_CSV = $MAXSECONDS_CSV_DEFAULT;
}
# Print starting message
print("$time Starting nmon cleaning:\n");
print("Splunk Root Directory $SPLUNK_HOME nmon_cleaner version: $version Perl version: $] \n");
# Set current epoch time
$epoc = time();
# If the csv switch is on, purge csv data
if ($CLEANCSV) {
# Counter
$count = 0;
# CSV Items to clean
@cleaning =
( "$APP_VAR/$CSV_REPOSITORY/*.csv", "$APP_VAR/$CONFIG_REPOSITORY/*.csv" );
# Enter loop
foreach $key (@cleaning) {
@files = glob($key);
foreach $file (@files) {
if ( -f $file ) {
# Get file timestamp
my $file_timestamp = stat($file)->mtime;
# Get difference
my $timediff = $epoc - $file_timestamp;
# If retention has expired
if ( $timediff > $MAXSECONDS_CSV ) {
# information
print ("Max set retention of $MAXSECONDS_CSV seconds seconds expired for file: $file \n");
# purge file
unlink $file;
# Increment counter
$count++;
}
}
}
if ( $count eq 0 ) {
print ("$key, no action required. \n");
}
else {
print("INFO: $count files were permanently removed from $key \n");
}
}
}
#############################################
############# Main Program End ############
#############################################
# Show elapsed time
my $t_end = [Time::HiRes::gettimeofday];
print "Elapsed time was: ",
Time::HiRes::tv_interval( $t_start, $t_end ) . " seconds \n";
exit(0);

@ -0,0 +1,281 @@
#!/usr/bin/env python
# Program name: metricator_cleaner.py
# Compatibility: Python 2.7
# Purpose - Clean csv files when retention expires, tuned for the Coke Company
# Author - Guilhem Marchand
# Load libs
from __future__ import print_function
import sys
import os
import glob
import time
import logging
import platform
import re
import argparse
# Converter version
version = '2.0.0'
# LOGGING INFORMATION:
# - The program uses the standard logging Python module to display important messages in Splunk logs
# - Every message of the script will be indexed and accessible within Splunk splunkd logs
#################################################
# Functions
#################################################
# Disallow negative value in parser
def check_negative(value):
ivalue = int(value)
if ivalue < 0:
raise argparse.ArgumentTypeError("%s is an invalid positive int value" % value)
return ivalue
#################################################
# Arguments Parser
#################################################
# Define Arguments
parser = argparse.ArgumentParser()
parser.add_argument('--cleancsv', action='store_true', default=False, dest='cleancsv',
help='Activate the purge of csv files from csv repository and config repository '
'(see also options above)')
parser.add_argument('--maxseconds_csv', action='store', dest='MAXSECONDS_CSV', type=check_negative,
help='Set the maximum file retention in seconds for csv data, every files older'
' than this value will be permanently removed')
parser.add_argument('--approot', action='store', dest='APP',
help='Set a custom value for the Application root directory '
'(default are: nmon / TA-metricator-hec-for-nmon / PA-nmon)')
parser.add_argument('--csv_repository', action='store', dest='CSV_REPOSITORY',
help='Set a custom location for directory containing csv data (default: csv_repository)')
parser.add_argument('--config_repository', action='store', dest='CONFIG_REPOSITORY',
help='Set a custom location for directory containing config data (default: config_repository)')
parser.add_argument('--version', action='version', version='%(prog)s ' + version)
args = parser.parse_args()
#################################################
# Variables
#################################################
# Set logging format
logging.root
logging.root.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logging.root.addHandler(handler)
# Current date
now = time.strftime("%d-%m-%Y %H:%M:%S")
# Set maxseconds
maxseconds_csv = args.MAXSECONDS_CSV
# Set cleancsv
cleancsv = args.cleancsv
# If the root directory App is no defined, use empty value (will be set later)
if not args.APP:
APP = ''
else:
APP = args.APP
# If the csv_repository is not defined, apply default 'csv_repository' value
if not args.CSV_REPOSITORY:
csv_repository = "csv_repository"
else:
csv_repository = args.CSV_REPOSITORY
# If the config_repository is not defined, apply default 'config_repository' value
if not args.CONFIG_REPOSITORY:
config_repository = "config_repository"
else:
config_repository = args.CONFIG_REPOSITORY
# Guest Operation System type
ostype = platform.system().lower()
# If running Windows OS (used for directory identification)
is_windows = re.match(r'^win\w+', (platform.system().lower()))
# Python version
python_version = platform.python_version()
# Verify SPLUNK_HOME environment variable is available, the script is expected to be launched by Splunk which
# will set this for debugging or manual run, please set this variable manually
try:
os.environ["SPLUNK_HOME"]
except KeyError:
logging.error('The environment variable SPLUNK_HOME could not be verified, if you want to run this script '
'manually you need to export it before processing')
sys.exit(1)
# SPLUNK_HOME environment variable
SPLUNK_HOME = os.environ['SPLUNK_HOME']
# Set APP root directory
if not APP:
# Discover TA-metricator-hec-for-nmon path
if is_windows:
TA_NMON_APP = SPLUNK_HOME + '\\etc\\apps\\TA-metricator-hec-for-nmon'
else:
TA_NMON_APP = SPLUNK_HOME + '/etc/apps/TA-metricator-hec-for-nmon'
if is_windows:
TA_NMON_APP_CLUSTERED = SPLUNK_HOME + '\\etc\\slave-apps\\TA-metricator-hec-for-nmon'
else:
TA_NMON_APP_CLUSTERED = SPLUNK_HOME + '/etc/slave-apps/TA-metricator-hec-for-nmon'
# Verify APP exist
if os.path.exists(TA_NMON_APP):
APP = TA_NMON_APP
elif os.path.exists(TA_NMON_APP_CLUSTERED):
APP = TA_NMON_APP_CLUSTERED
else:
msg = 'The Application root directory could not be found, is the TA-metricator-hec-for-nmon ? We tried: '\
+ str(TA_NMON_APP) + ' ' + str(TA_NMON_APP_CLUSTERED)
logging.error(msg)
sys.exit(1)
else:
if is_windows:
NMON_APP = SPLUNK_HOME + '\\etc\\apps\\' + APP
else:
NMON_APP = SPLUNK_HOME + '/etc/apps/' + APP
# Verify APP exist
if os.path.exists(NMON_APP):
APP = NMON_APP
else:
msg = 'The Application root directory could not be found, is the TA-metricator-hec-for-nmon installed ? We tried: '\
+ str(NMON_APP)
logging.error(msg)
sys.exit(1)
# APP_MAINVAR and APP_VAR directories
if is_windows:
APP_MAINVAR = SPLUNK_HOME + '\\var\\log\\nmon'
APP_VAR = APP_MAINVAR + '\\var'
else:
APP_MAINVAR = SPLUNK_HOME + '/var/log/metricator'
APP_VAR = APP_MAINVAR + '/var'
if not os.path.exists(APP_MAINVAR):
msg = 'The main var directory ' + APP_VAR + ' has not been found, there is no need to run now.'
sys.exit(1)
# Repositories definition
if is_windows:
CSV_DIR = APP_VAR + '\\' + csv_repository
CONFIG_DIR = APP_VAR + '\\' + config_repository
else:
CSV_DIR = APP_VAR + '/' + csv_repository
CONFIG_DIR = APP_VAR + '/' + config_repository
# List of directories to be proceeded
WORKING_DIR = {CSV_DIR, CONFIG_DIR}
# Starting time of process
start_time = time.time()
####################################################################
# Main Program
####################################################################
# Default value for CSV retention
if maxseconds_csv is None:
maxseconds_csv = 86400
# Show current time
msg = now + " Starting nmon cleaning"
print (msg)
# Display some basic information about us
msg = "Splunk Root Directory ($SPLUNK_HOME): " + str(SPLUNK_HOME) + " nmon_cleaner version: " + str(version) \
+ " Python version: " + str(python_version)
print (msg)
# Proceed to CSV cleaning
if cleancsv:
for DIR in WORKING_DIR:
if os.path.exists(DIR):
# cd to directory
os.chdir(DIR)
# Verify we have data to manage
counter = len(glob.glob1(DIR, "*.csv"))
# print (counter)
if counter == 0:
msg = str(DIR) + ', no action required.'
print (msg)
else:
# counter of files with retention expired
counter_expired = 0
curtime = time.time()
limit = maxseconds_csv
for xfile in glob.glob('*.csv'):
filemtime = os.path.getmtime(xfile)
if curtime - filemtime > limit:
counter_expired += 1
size_mb = os.path.getsize(xfile)/1000.0/1000.0
size_mb = format(size_mb, '.2f')
mtime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(filemtime)) # Human readable datetime
msg = 'Max set retention of ' + str(maxseconds_csv) + ' seconds expired for file: ' +\
xfile + ' size(MB): '\
+ str(size_mb) + ' mtime: ' + str(mtime)
print (msg)
os.remove(xfile) # Permanently remove the file!
if counter_expired != 0:
msg = str(counter_expired) + ' files were permanently removed due to retention expired' \
' for directory ' + DIR
else:
msg = str(DIR) + ', no action required.'
print (msg)
###################
# End
###################
# Time required to process
end_time = time.time()
result = "Elapsed time was: %g seconds" % (end_time - start_time)
print (result)
# exit
sys.exit(0)

@ -0,0 +1,368 @@
#!/bin/sh
# set -x
# Program name: metricator_cleaner.sh
# Purpose - Frontal script to metricator_cleaner.py and metricator_cleaner.pl, will launch Python or Perl script depending on interpreter availability
# See metricator_cleaner.py | metricator_cleaner.pl
# Author - Guilhem Marchand
# Version 2.0.1
# For AIX / Linux / Solaris
#################################################
## Your Customizations Go Here ##
#################################################
# format date output to strftime dd/mm/YYYY HH:MM:SS
log_date () {
date "+%d-%m-%Y %H:%M:%S"
}
# hostname
HOST=`hostname`
# Which type of OS are we running
UNAME=`uname`
if [ -z "${SPLUNK_HOME}" ]; then
echo "`log_date`, ${HOST} ERROR, SPLUNK_HOME variable is not defined"
exit 1
fi
# APP path discovery
if [ -d "$SPLUNK_HOME/etc/apps/TA-metricator-hec-for-nmon" ]; then
APP=$SPLUNK_HOME/etc/apps/TA-metricator-hec-for-nmon
elif [ -d "$SPLUNK_HOME/etc/slave-apps/TA-metricator-hec-for-nmon" ];then
APP=$SPLUNK_HOME/etc/slave-apps/TA-metricator-hec-for-nmon
else
echo "`log_date`, ${HOST} ERROR, the APP directory could not be defined, is the TA-metricator-hec-for-nmon installed ?"
exit 1
fi
# source default nmon.conf
if [ -f $APP/default/nmon.conf ]; then
# During initial deployment, the nmon.conf needs to be managed properly by the metricator_consumer.sh
# wait for this to be done
grep '\[nmon\]' $APP/default/nmon.conf >/dev/null
if [ $? -eq 0 ]; then
echo "`log_date`, ${HOST} INFO, initial deployment condition detected, safe exiting."
exit 0
else
. $APP/default/nmon.conf
fi
fi
# source local nmon.conf, if any
# Search for a local nmon.conf file located in $SPLUNK_HOME/etc/apps/TA-metricator-hec-for-nmon/local
if [ -f $APP/local/nmon.conf ]; then
. $APP/local/nmon.conf
fi
# On a per server basis, you can also set in /etc/nmon.conf
if [ -f /etc/nmon.conf ]; then
. /etc/nmon.conf
fi
# Manage FQDN option
echo $nmonparser_options | grep '\-\-use_fqdn' >/dev/null
if [ $? -eq 0 ]; then
# Only relevant for Linux OS
case $UNAME in
Linux)
HOST=`hostname -f` ;;
AIX)
HOST=`hostname` ;;
SunOS)
HOST=`hostname` ;;
esac
else
HOST=`hostname`
fi
# Manage host override option based on Splunk hostname defined
case $override_sys_hostname in
"1")
# Retrieve the Splunk host value
HOST=`cat $SPLUNK_HOME/etc/system/local/inputs.conf | grep '^host =' | awk -F\= '{print $2}' | sed 's/ //g'`
;;
esac
#
# Interpreter choice
#
PYTHON=0
PYTHON2=0
PYTHON3=0
PERL=0
# Set the default interpreter
INTERPRETER="python"
# Get the version for both worlds
PYTHON2=`which python 2>&1`
PYTHON3=`which python3 2>&1`
PERL=`which perl 2>&1`
# Handle Python
PYTHON_available="false"
case $PYTHON3 in
*python*)
PYTHON_available="true"
INTERPRETER="python3" ;;
*)
case $PYTHON2 in
*python*)
PYTHON_available="true"
INTERPRETER="python" ;;
esac
;;
esac
# Handle Perl
case $PERL in
*perl*)
PERL_available="true"
;;
*)
PERL_available="false"
;;
esac
case `uname` in
# AIX priority is Perl
"AIX")
case $PERL_available in
"true")
INTERPRETER="perl" ;;
"false")
INTERPRETER="$INTERPRETER" ;;
esac
;;
# Other OS, priority is Python
*)
case $PYTHON_available in
"true")
INTERPRETER="$INTERPRETER" ;;
"false")
INTERPRETER="perl" ;;
esac
;;
esac
# POSIX process run time in seconds (for Solaris only)
P_RUNTIME () {
t=`LC_ALL=POSIX ps -o etime= -p $1 | awk '{print $1}'`
d=0 h=0
case $t in *-*) d=$((0 + ${t%%-*})); t=${t#*-};; esac
case $t in *:*:*) h=$((0 + ${t%%:*})); t=${t#*:};; esac
s=$((10#$d*86400 + 10#$h*3600 + 10#${t%%:*}*60 + 10#${t#*:}))
echo $s
}
####################################################################
############# Main Program ############
####################################################################
# Store arguments sent to script
userargs=$@
###### Maintenance tasks ######
#
# Maintenance task1
#
# Maintenance task 1: verify if we have nmon processes running over the allowed period
# This issue seems to happen sometimes specially on AIX servers
# If an nmon process has not been terminated after its grace period, the process will be killed
# get the allowed runtime in seconds for an nmon process according to the configuration
# and add a 10 minute grace period
case `uname` in
"AIX"|"Linux"|"SunOS")
echo "`log_date`, ${HOST} INFO, starting maintenance task 1: verify nmon processes running over expected time period"
endtime=0
case ${mode_fifo} in
"1")
endtime=`expr ${fifo_interval} \* ${fifo_snapshot}` ;;
*)
endtime=`expr ${interval} \* ${snapshot}` ;;
esac
endtime=`expr ${endtime} + 600`
# get the list of running processes
case $UNAME in
"AIX"|"Linux")
oldPidList=`ps -eo user,pid,command,etime,args | grep "nmon" | grep "splunk" | grep "var/log/metricator" | grep -v metricator_reader | grep -v grep | awk '{ print $2 }'`
ps -eo user,pid,command,etime,args | grep "nmon" | grep "splunk" | grep "var/log/metricator" | grep -v metricator_reader | grep -v grep >/dev/null ;;
"SunOS")
oldPidList=`ps auxwww | grep "sadc" | grep "splunk" | grep "var/log/metricator" | grep -v metricator_reader | grep -v grep | awk '{ print $2 }'`
ps auxwww | grep "sadc" | grep "splunk" | grep "var/log/metricator" | grep -v metricator_reader | grep -v grep >/dev/null ;;
esac
if [ $? -eq 0 ]; then
for pid in $oldPidList; do
pid_runtime=0
# only run the process is running
if [ -d /proc/${pid} ]; then
# get the process runtime in seconds
case $UNAME in
"AIX"|"Linux")
pid_runtime=`ps -p ${pid} -oetime= | tr '-' ':' | awk -F: '{ total=0; m=1; } { for (i=0; i < NF; i++) {total += $(NF-i)*m; m *= i >= 2 ? 24 : 60 }} {print total}'`
;;
"SunOS")
pid_runtime=`P_RUNTIME ${pid}`
;;
esac
# additional protection
case ${pid_runtime} in
"")
;;
*)
if [ ${pid_runtime} -gt ${endtime} ]; then
echo "`log_date`, ${HOST} WARN, old nmon process found due to: `ps auxwww | grep $pid | grep -v grep` killing (SIGTERM) process $pid"
kill $pid
# Allow some time for the process to end
sleep 5
# re-check the status
ps -p ${pid} -oetime= >/dev/null
if [ $? -eq 0 ]; then
echo "`log_date`, ${HOST} WARN, old nmon process found due to: `ps auxwww | grep $pid | grep -v grep` failed to stop, killing (-9) process $pid"
kill -9 $pid
fi
fi
;;
esac
fi
done
fi
#
# Maintenance task2
# set -x
# - manage any fifo reader orphan processes (no associated nmon process)
# - manage any fifo reader duplicated (abnormal situation)
echo "`log_date`, ${HOST} INFO, starting maintenance task 2: verify orphan or duplicated fifo_reader processes"
for instance in fifo1 fifo2; do
# Initiate
oldPidNb=0
case $INTERPRETER in
"perl")
readerNbProc=2 ;;
"python"|"python3")
readerNbProc=3 ;;
esac
# get the list of running processes
ps auxwww | grep "nmon" | grep "splunk" | grep metricator_reader | grep ${instance} >/dev/null
if [ $? -eq 0 ]; then
oldPidList=`ps auxwwww | grep "nmon" | grep "splunk" | grep metricator_reader | grep ${instance} | grep -v grep | awk '{ print $2 }'`
oldPidNb=`ps auxwww | grep "nmon" | grep "splunk" | grep metricator_reader | grep ${instance} | grep -v grep | wc -l | awk '{print $1}'`
# search for associated nmon process
case $UNAME in
"AIX"|"Linux")
ps auxwww | grep "nmon" | grep "splunk" | grep "var/log/metricator" | grep -v metricator_reader | grep ${instance} >/dev/null
;;
"SunOS")
ps auxwww | grep "sadc" | grep "splunk" | grep "var/log/metricator" | grep -v metricator_reader | grep ${instance} >/dev/null
;;
esac
if [ $? -ne 0 ] && [ $oldPidNb -eq $readerNbProc ]; then
# no process found, kill the reader processes
for pid in $oldPidList; do
echo "`log_date`, ${HOST} WARN, orphan reader process found (no associated nmon process) due to: `ps auxwww | grep $pid | grep -v grep` killing (SIGTERM) process $pid"
kill $pid
# Allow some time for the process to end
sleep 5
# re-check the status
ps -p ${pid} -oetime= >/dev/null
if [ $? -eq 0 ]; then
echo "`log_date`, ${HOST} WARN, orphan reader process (no associated nmon process) due to: `ps auxwww | grep $pid | grep -v grep` failed to stop, killing (-9) process $pid"
kill -9 $pid
fi
done
# If nmon is running but the number of reader processes is higher than 2 (shell parent + Python/Perl child), something went wrong
elif [ $oldPidNb -gt $readerNbProc ]; then
echo "`log_date`, ${HOST} WARN, multiple reader for the same fifo were detected, this is an abnormal situation and reader will be killed."
# no process found, kill the reader processes
for pid in $oldPidList; do
echo "`log_date`, ${HOST} WARN, duplicated reader process found due to: `ps auxwww | grep $pid | grep -v grep` killing (SIGTERM) process $pid"
kill $pid
# Allow some time for the process to end
sleep 5
# re-check the status
ps -p ${pid} -oetime= >/dev/null
if [ $? -eq 0 ]; then
echo "`log_date`, ${HOST} WARN, duplicated reader process found due to: `ps auxwww | grep $pid | grep -v grep` failed to stop, killing (-9) process $pid"
kill -9 $pid
fi
done
fi
fi
done
;;
# End of per OS case
esac
###### End maintenance tasks ######
###### Start cleaner ######
case ${INTERPRETER} in
"python"|"python3")
$INTERPRETER $APP/bin/metricator_cleaner.py ${userargs} ;;
"perl")
$APP/bin/metricator_cleaner.pl ${userargs} ;;
esac
exit 0

@ -0,0 +1,367 @@
#!/bin/sh
# set -x
# Program name: metricator_consumer.sh
# Purpose - consume data produced by the fifo readers
# Author - Guilhem Marchand
# Version 2.0.0
# For AIX / Linux / Solaris
#################################################
## Your Customizations Go Here ##
#################################################
# hostname
HOST=`hostname`
# Which type of OS are we running
UNAME=`uname`
# format date output to strftime dd/mm/YYYY HH:MM:SS
log_date () {
date "+%d-%m-%Y %H:%M:%S"
}
if [ -z "${SPLUNK_HOME}" ]; then
echo "`log_date`, ${HOST} ERROR, SPLUNK_HOME variable is not defined"
exit 1
fi
# check and wait to acquire mutex
mutex="$SPLUNK_HOME/var/log/metricator/mutex"
remove_mutex () {
rm -f $mutex
}
# Allow 10s mini to acquire mutex and break
count=0
while [ -f $mutex ]; do
sleep 2
count=`expr $count + 1`
if [ $count -gt 5 ]; then
break
fi
done
# acquire mutex
if [ -d $SPLUNK_HOME/var/log/metricator ]; then
touch $mutex
fi
# tmp dir and file
temp_dir="${SPLUNK_HOME}/var/log/metricator/tmp/"
if [ ! -d ${temp_dir} ]; then
mkdir -p ${temp_dir}
fi
temp_file="${temp_dir}/metricator_consumer.sh.$$"
# Splunk Home variable: This should automatically defined when this script is being launched by Splunk
# If you intend to run this script out of Splunk, please set your custom value here
SPL_HOME=${SPLUNK_HOME}
# Check SPL_HOME variable is defined, this should be the case when launched by Splunk scheduler
if [ -z "${SPL_HOME}" ]; then
echo "`log_date`, ${HOST} ERROR, SPL_HOME (SPLUNK_HOME) variable is not defined"
remove_mutex
exit 1
fi
# APP path discovery
if [ -d "$SPLUNK_HOME/etc/apps/TA-metricator-hec-for-nmon" ]; then
APP=$SPLUNK_HOME/etc/apps/TA-metricator-hec-for-nmon
elif [ -d "$SPLUNK_HOME/etc/slave-apps/TA-metricator-hec-for-nmon" ];then
APP=$SPLUNK_HOME/etc/slave-apps/TA-metricator-hec-for-nmon
else
echo "`log_date`, ${HOST} ERROR, the APP directory could not be defined, is the TA-metricator-hec-for-nmon installed ?"
remove_mutex
exit 1
fi
#
# Interpreter choice
#
PYTHON=0
PYTHON2=0
PYTHON3=0
PERL=0
# Set the default interpreter
INTERPRETER="python"
# Get the version for both worlds
PYTHON2=`which python 2>&1`
PYTHON3=`which python3 2>&1`
PERL=`which perl 2>&1`
# Handle Python
PYTHON_available="false"
case $PYTHON3 in
*python*)
PYTHON_available="true"
INTERPRETER="python3" ;;
*)
case $PYTHON2 in
*python*)
PYTHON_available="true"
INTERPRETER="python" ;;
esac
;;
esac
# Handle Perl
case $PERL in
*perl*)
PERL_available="true"
;;
*)
PERL_available="false"
;;
esac
case `uname` in
# AIX priority is Perl
"AIX")
case $PERL_available in
"true")
INTERPRETER="perl" ;;
"false")
INTERPRETER="$INTERPRETER" ;;
esac
;;
# Other OS, priority is Python
*)
case $PYTHON_available in
"true")
INTERPRETER="$INTERPRETER" ;;
"false")
INTERPRETER="perl" ;;
esac
;;
esac
# default values relevant for our context
nmonparser_options="--mode fifo"
# source default nmon.conf
if [ -f $APP/default/nmon.conf ]; then
# During initial deployment, the nmon.conf needs to be managed properly by the metricator_consumer.sh
# wait for this to be done
grep '\[nmon\]' $APP/default/nmon.conf >/dev/null
if [ $? -eq 0 ]; then
echo "`log_date`, ${HOST} INFO, initial deployment condition detected, safe exiting."
exit 0
else
. $APP/default/nmon.conf
fi
fi
# source local nmon.conf, if any
# Search for a local nmon.conf file located in $SPLUNK_HOME/etc/apps/TA-metricator-hec-for-nmon/local
if [ -f $APP/local/nmon.conf ]; then
. $APP/local/nmon.conf
fi
# On a per server basis, you can also set in /etc/nmon.conf
if [ -f /etc/nmon.conf ]; then
. /etc/nmon.conf
fi
# Manage FQDN option
echo $nmonparser_options | grep '\-\-use_fqdn' >/dev/null
if [ $? -eq 0 ]; then
# Only relevant for Linux OS
case $UNAME in
Linux)
HOST=`hostname -f` ;;
AIX)
HOST=`hostname` ;;
SunOS)
HOST=`hostname` ;;
esac
else
HOST=`hostname`
fi
# Manage host override option based on Splunk hostname defined
case $override_sys_hostname in
"1")
# Retrieve the Splunk host value
HOST=`cat $SPLUNK_HOME/etc/system/local/inputs.conf | grep '^host =' | awk -F\= '{print $2}' | sed 's/ //g'`
;;
esac
############################################
# functions
############################################
# consume function
consume_data () {
# fifo name (valid choices are: fifo1 | fifo2)
FIFO=$1
# consume fifo
# realtime
nmon_config=$SPLUNK_HOME/var/log/metricator/var/nmon_repository/$FIFO/nmon_config.dat
nmon_header=$SPLUNK_HOME/var/log/metricator/var/nmon_repository/$FIFO/nmon_header.dat
nmon_timestamp=$SPLUNK_HOME/var/log/metricator/var/nmon_repository/$FIFO/nmon_timestamp.dat
nmon_data=$SPLUNK_HOME/var/log/metricator/var/nmon_repository/$FIFO/nmon_data.dat
nmon_data_tmp=$SPLUNK_HOME/var/log/metricator/var/nmon_repository/$FIFO/nmon_data_tmp.dat
nmon_external=$SPLUNK_HOME/var/log/metricator/var/nmon_repository/$FIFO/nmon_external.dat
nmon_external_header=$SPLUNK_HOME/var/log/metricator/var/nmon_repository/$FIFO/nmon_external_header.dat
# rotated
nmon_config_rotated=$SPLUNK_HOME/var/log/metricator/var/nmon_repository/$FIFO/nmon_config.dat.rotated
nmon_header_rotated=$SPLUNK_HOME/var/log/metricator/var/nmon_repository/$FIFO/nmon_header.dat.rotated
nmon_timestamp_rotated=$SPLUNK_HOME/var/log/metricator/var/nmon_repository/$FIFO/nmon_timestamp.dat.rotated
nmon_data_rotated=$SPLUNK_HOME/var/log/metricator/var/nmon_repository/$FIFO/nmon_data.dat.rotated
nmon_external_rotated=$SPLUNK_HOME/var/log/metricator/var/nmon_repository/$FIFO/nmon_external.dat.rotated
nmon_external_header_rotated=$SPLUNK_HOME/var/log/metricator/var/nmon_repository/$FIFO/nmon_external_header.dat.rotated
# manage rotated data if existing, prevent any data loss
# all files must be existing to be managed
if [ -s $nmon_config_rotated ] && [ -s $nmon_header_rotated ] && [ -s $nmon_data_rotated ]; then
# Manager headers
unset nmon_header_files
if [ -f $nmon_external_header_rotated ]; then
nmon_header_files="$nmon_header_rotated $nmon_external_header_rotated"
else
nmon_header_files="$nmon_header_rotated"
fi
# Ensure the first line of nmon_data starts by the relevant timestamp, if not add it
head -1 $nmon_data_rotated | grep 'ZZZZ,T' >/dev/null
if [ $? -ne 0 ]; then
# check timestamp dat exists before processing
# there is no else possible, if the the timestamp data file does not exist, there is nothing we can do
# and the parser will raise an error
if [ -f $nmon_timestamp_rotated ]; then
tail -1 $nmon_timestamp_rotated >$temp_file
cat $nmon_config_rotated $nmon_header_files $temp_file $nmon_data_rotated $nmon_external_rotated | $SPLUNK_HOME/bin/splunk cmd $APP/bin/nmonparser.sh $nmonparser_options
fi
else
cat $nmon_config_rotated $nmon_header_files $nmon_data_rotated $nmon_external_rotated | $SPLUNK_HOME/bin/splunk cmd $APP/bin/nmonparser.sh $nmonparser_options
fi
# remove rotated
rm -f $SPLUNK_HOME/var/log/metricator/var/nmon_repository/$FIFO/*.dat.rotated
# header var
unset nmon_header_files
fi
# Manage realtime files
# all files must be existing to be managed
if [ -s $nmon_config ] && [ -s $nmon_header ] && [ -s $nmon_data ]; then
# get data mtime
case $INTERPRETER in
"perl")
perl -e "\$mtime=(stat(\"$nmon_data\"))[9]; \$cur_time=time(); print \$cur_time - \$mtime;" >$temp_file
nmon_data_mtime=`cat $temp_file`
;;
"python"|"python3")
$INTERPRETER -c "import os; import time; now = time.strftime(\"%s\"); print(int(int(now)-(os.path.getmtime('$nmon_data'))))" >$temp_file
nmon_data_mtime=`cat $temp_file`
;;
esac
# file should have last mtime of mini 5 sec
while [ $nmon_data_mtime -lt 5 ];
do
sleep 1
# get data mtime
case $INTERPRETER in
"perl")
perl -e "\$mtime=(stat(\"$nmon_data\"))[9]; \$cur_time=time(); print \$cur_time - \$mtime;" >$temp_file
nmon_data_mtime=`cat $temp_file`
;;
"python"|"python3")
$INTERPRETER -c "import os; import time; now = time.strftime(\"%s\"); print(int(int(now)-(os.path.getmtime('$nmon_data'))))" >$temp_file
nmon_data_mtime=`cat $temp_file`
;;
esac
done
# copy content
cat $nmon_data > $nmon_data_tmp
# nmon external data
if [ -f $nmon_external ]; then
cat $nmon_external >> $nmon_data_tmp
fi
# empty the nmon_data file & external
> $nmon_data
> $nmon_external
# Manager headers
unset nmon_header_files
if [ -f $nmon_external_header ]; then
nmon_header_files="$nmon_header $nmon_external_header"
else
nmon_header_files="$nmon_header"
fi
# Ensure the first line of nmon_data starts by the relevant timestamp, if not add it
head -1 $nmon_data_tmp | grep 'ZZZZ,T' >/dev/null
if [ $? -ne 0 ]; then
tail -1 $nmon_timestamp >$temp_file
cat $nmon_config $nmon_header_files $temp_file $nmon_data_tmp | $SPLUNK_HOME/bin/splunk cmd $APP/bin/nmonparser.sh $nmonparser_options
else
cat $nmon_config $nmon_header_files $nmon_data_tmp | $SPLUNK_HOME/bin/splunk cmd $APP/bin/nmonparser.sh $nmonparser_options
fi
# remove the copy
rm -f $nmon_data_tmp
# header var
unset nmon_header_files
fi
}
####################################################################
############# Main Program ############
####################################################################
# consume fifo1
consume_data fifo1
# allow 1 sec idle
sleep 1
# consume fifo2
consume_data fifo2
# remove the temp file
if [ -f $temp_file ]; then
rm -f $temp_file
fi
remove_mutex
exit 0

@ -0,0 +1,248 @@
#!/usr/bin/perl
# Program name: metricator_reader.pl
# Compatibility: Perl x
# Purpose - read nmon data from fifo file
# Author - Guilhem Marchand
my $version = "2.0.0";
use Getopt::Long;
use File::stat;
use File::Copy;
use POSIX 'strftime';
#################################################
## Arguments Parser
#################################################
# Default values
my $APP = "";
my $fifo_name = "";
my $VERSION = "";
my $help = "";
my $result = GetOptions(
"fifo=s" => \$fifo_name, # string
"version" => \$VERSION, # flag
"help" => \$help # flag
);
# Show version
if ($VERSION) {
print("metricator_reader.pl version $version \n");
exit 0;
}
# Show help
if ($help) {
print( "
Help for metricator_reader.pl:
The script should be run in the backgroud to continously read nmon data from fifo files.
Available options are:
--fifo <name of fifo> :Name of the pre-configured fifo file
--version :Show current program version \n
"
);
exit 0;
}
# Local time
my $time = strftime "%d-%m-%Y %H:%M:%S", localtime;
# Default Environment Variable SPLUNK_HOME, this shall be automatically defined if as the script shall be launched by Splunk
my $SPLUNK_HOME = $ENV{SPLUNK_HOME};
# Verify SPLUNK_HOME definition
if ( not $SPLUNK_HOME ) {
print(
"\n$time ERROR: The environment variable SPLUNK_HOME could not be verified, if you want to run this script manually you need to export it before processing \n"
);
die;
}
# Discover TA-metricator-hec-for-nmon path
if ( length($APP) == 0 ) {
if ( -d "$SPLUNK_HOME/etc/apps/TA-metricator-hec-for-nmon" ) {
$APP = "$SPLUNK_HOME/etc/apps/TA-metricator-hec-for-nmon";
}
elsif ( -d "$SPLUNK_HOME/etc/slave-apps/TA-metricator-hec-for-nmon" ) {
$APP = "$SPLUNK_HOME/etc/slave-apps/TA-metricator-hec-for-nmon";
}
}
else {
if ( !-d "$APP" ) {
print(
"\n$time ERROR: The Application root directory could be verified using your custom setting: $APP \n"
);
die;
}
}
# Verify existence of APP
if ( !-d "$APP" ) {
print(
"\n$time ERROR: The Application root directory could not be found, is the TA-metricator-hec-for-nmon installed ?\n"
);
die;
}
# var directories
my $APP_MAINVAR = "$SPLUNK_HOME/var/log/metricator";
my $APP_VAR = "$APP_MAINVAR/var";
if ( !-d "$APP_MAINVAR" ) {
print(
"\n$time INFO: main var directory not found ($APP_MAINVAR), no need to run.\n"
);
exit 0;
}
# check fifo_name
if ( not "$fifo_name" ) {
print("\n$time ERROR: the --fifo_name <name of fifo> is mandatory\n");
die;
}
# define the full path to the fifo file
my $fifo_path = "$APP_VAR/nmon_repository/$fifo_name/nmon.fifo";
# At startup, rotate any existing non empty .dat file if nmon_data.dat is not empty
# define the various files to be written
# realtime files
my $nmon_config_dat = "$APP_VAR/nmon_repository/$fifo_name/nmon_config.dat";
my $nmon_header_dat = "$APP_VAR/nmon_repository/$fifo_name/nmon_header.dat";
my $nmon_data_dat = "$APP_VAR/nmon_repository/$fifo_name/nmon_data.dat";
my $nmon_external_dat = "$APP_VAR/nmon_repository/$fifo_name/nmon_external.dat";
my $nmon_external_header_dat =
"$APP_VAR/nmon_repository/$fifo_name/nmon_external_header.dat";
my $nmon_timestamp_dat =
"$APP_VAR/nmon_repository/$fifo_name/nmon_timestamp.dat";
my $nmon_error_dat = "$APP_VAR/nmon_repository/$fifo_name/nmon_error.dat";
my @nmon_dat = (
"$nmon_config_dat", "$nmon_header_dat",
"$nmon_data_dat", "$nmon_timestamp_dat",
"$nmon_external_dat", "$nmon_external_header_dat",
"$nmon_error_dat"
);
my $file;
my $rotated_file;
# Remove any existing rotated file
foreach $file (@nmon_dat) {
$rotated_file = "$file.rotated";
if ( -e $rotated_file ) {
unlink $rotated_file;
}
}
# Manage existing files and do the rotation if required
if ( !-z $nmon_data_dat ) {
foreach $file (@nmon_dat) {
$rotated_file = "$file.rotated";
move( $file, $rotated_file );
}
}
else {
foreach $file (@nmon_dat) {
if ( -e $file ) {
unlink $file;
}
}
}
####################################################################
############# Main Program
####################################################################
if ( !-p $fifo_path ) {
print(
"\n$time INFO: The fifo file $fifo_path does not exist yet, we are not ready to start.\n"
);
exit 0;
}
else {
my $fifoh;
# Open the named pipe "a la shell" to ensure that we we will quite when the nmon process has ended as well
open( $fifoh, "$APP/bin/metricator_reader.sh $fifo_path|" );
while (<$fifoh>) {
chomp($_);
my $nmon_config_match = '^[AAA|BBB].+';
my $nmon_header_match =
'^(?!AAA|BBB|TOP)[a-zA-Z0-9\-\_]*,(?!T\d{3,})[^,]*,(?!T\d{3,})[^,]*.*';
my $nmon_header_TOP_match = '^TOP,(?!\d*,)';
my $nmon_timestamp_match = '^ZZZZ,T\d*';
my $nmon_error_match = '^ERROR,T\d*';
if ( $_ =~ /$nmon_config_match/ ) {
open( my $fh, '>>', $nmon_config_dat )
or die "Could not open file '$nmon_config_dat' $!";
print $fh "$_\n";
close $fh;
}
elsif ( $_ =~ /$nmon_header_match/ ) {
open( my $fh, '>>', $nmon_header_dat )
or die "Could not open file '$nmon_header_dat' $!";
print $fh "$_\n";
close $fh;
}
elsif ( $_ =~ /$nmon_header_TOP_match/ ) {
open( my $fh, '>>', $nmon_header_dat )
or die "Could not open file '$nmon_header_dat' $!";
print $fh "$_\n";
close $fh;
}
elsif ( $_ =~ /$nmon_error_match/ ) {
open( my $fh, '>>', $nmon_error_dat )
or die "Could not open file '$nmon_error_dat' $!";
print $fh "$_\n";
close $fh;
}
elsif ( $_ =~ /$nmon_timestamp_match/ ) {
open( my $fh, '>>', $nmon_timestamp_dat )
or die "Could not open file '$nmon_timestamp_dat' $!";
print $fh "$_\n";
close $fh;
open( my $fh, '>>', $nmon_data_dat )
or die "Could not open file '$nmon_data_dat' $!";
print $fh "$_\n";
close $fh;
}
else {
open( my $fh, '>>', $nmon_data_dat )
or die "Could not open file '$nmon_data_dat' $!";
print $fh "$_\n";
close $fh;
}
}
close $fifoh;
exit(0);
}

@ -0,0 +1,180 @@
# Program name: metricator_reader.py
# Compatibility: Python 3.x
# Purpose - read nmon data from fifo file
# Author - Guilhem Marchand
import os
import sys
import optparse
import logging
import re
import subprocess
# script version
version = '3.0.0'
#################################################
# Variables
#################################################
# Set logging format
logging.root
logging.root.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logging.root.addHandler(handler)
# Verify SPLUNK_HOME environment variable is available, the script is expected to be launched by Splunk
# which will set this.
# for debugging or manual run, please set this variable manually
try:
os.environ["SPLUNK_HOME"]
except KeyError:
logging.error(
'The environment variable SPLUNK_HOME could not be verified, if you want to run this script manually you need'
' to export it before processing')
sys.exit(1)
# SPLUNK_HOME environment variable
SPLUNK_HOME = os.environ['SPLUNK_HOME']
# APP_VAR directory
APP_VAR = SPLUNK_HOME + '/var/log/metricator/var'
if not os.path.exists(APP_VAR):
logging.info(
'The application var directory does not exist yet, we are not ready to start')
sys.exit(0)
# APP Directories for TA-metricator-hec-for-nmon
TA_NMON_APP = SPLUNK_HOME + '/etc/apps/TA-metricator-hec-for-nmon'
TA_NMON_APP_CLUSTERED = SPLUNK_HOME + '/etc/slave-apps/TA-metricator-hec-for-nmon'
# Empty APP
APP = ''
# Verify APP exist
if os.path.exists(TA_NMON_APP):
APP = TA_NMON_APP
elif os.path.exists(TA_NMON_APP_CLUSTERED):
APP = TA_NMON_APP_CLUSTERED
else:
msg = 'The Application root directory could not be found, is the TA-metricator-hec-for-nmon ? We tried: ' + \
str(TA_NMON_APP) + ' ' + str(TA_NMON_APP_CLUSTERED)
logging.error(msg)
sys.exit(1)
# metricator_reader.sh
fifo_reader = APP + "/bin/metricator_reader.sh"
#################################################
# Arguments
#################################################
parser = optparse.OptionParser(usage='usage: %prog [options]', version='%prog '+version)
parser.add_option('-F', '--fifo', action='store', type='string', dest='fifo_name',
help='set the fifo file to be read')
parser.add_option('--dumpargs', action='store_true', dest='dumpargs',
help='only dump the passed arguments and exit (for debugging purposes only)')
(options, args) = parser.parse_args()
if options.dumpargs:
print("options: ", options)
print("args: ", args)
sys.exit(0)
if not options.fifo_name:
logging.error(
'The fifo file option has not been set (-F fifo_name or --fifo fifo_name)')
sys.exit(1)
else:
fifo_name = options.fifo_name
# define the full path to the fifo file
fifo_path = APP_VAR + '/nmon_repository/' + fifo_name + '/nmon.fifo'
# At startup, rotate any existing non empty .dat file if nmon_data.dat is not empty
# define the various files to be written
# realtime files
nmon_config_dat = APP_VAR + '/nmon_repository/' + fifo_name + '/nmon_config.dat'
nmon_header_dat = APP_VAR + '/nmon_repository/' + fifo_name + '/nmon_header.dat'
nmon_data_dat = APP_VAR + '/nmon_repository/' + fifo_name + '/nmon_data.dat'
nmon_timestamp_dat = APP_VAR + '/nmon_repository/' + fifo_name + '/nmon_timestamp.dat'
nmon_external_dat = APP_VAR + '/nmon_repository/' + fifo_name + '/nmon_external.dat'
nmon_external_header_dat = APP_VAR + '/nmon_repository/' + fifo_name + '/nmon_external_header.dat'
nmon_error_dat = APP_VAR + '/nmon_repository/' + fifo_name + '/nmon_error.dat'
nmon_dat = {nmon_config_dat, nmon_header_dat, nmon_timestamp_dat, nmon_data_dat, nmon_external_dat,
nmon_external_header_dat, nmon_error_dat}
# Manage existing files and do the rotation if required
if os.path.exists(nmon_data_dat) and os.path.getsize(nmon_data_dat) > 0:
for file in nmon_dat:
rotated_file = str(file) + ".rotated"
if os.path.isfile(rotated_file):
os.remove(rotated_file)
if os.path.isfile(file):
os.rename(file, rotated_file)
elif os.path.exists(nmon_data_dat):
for file in nmon_dat:
if os.path.isfile(file):
os.remove(file)
####################################################################
# Main Program
####################################################################
# Verify the fifo file exists, and start processing
if not os.path.exists(fifo_path):
logging.info(
'The fifo file ' + fifo_path + ' does not exist yet, we are not ready to start')
sys.exit(0)
else:
# we use the metricator_reader.sh to read the fifo file, benchmarks have shown more stability than
# opening the fifo file in pure Python
cmd = fifo_reader + " " + fifo_path
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, universal_newlines=True)
while 1:
line = str(p.stdout.readline())
if line == '' and p.poll() != None:
break
# Manage nmon config
nmon_config_match = re.match(r'^[AAA|BBB].+', str(line))
nmon_header_match = re.match(r'^(?!AAA|BBB|TOP)[a-zA-Z0-9\-\_]*,(?!T\d{3,})[^,]*,(?!T\d{3,})[^,]*.*', str(line))
nmon_header_TOP_match = re.match(r'^TOP,(?!\d*,)', str(line))
nmon_timestamp_match = re.match(r'^ZZZZ,T\d*', str(line))
nmon_error_match = re.match(r'^ERROR,T\d*', str(line))
if nmon_config_match:
with open(nmon_config_dat, "a") as nmon_config:
print(str(line))
nmon_config.write(line)
elif nmon_header_match:
with open(nmon_header_dat, "a") as nmon_header:
nmon_header.write(line)
elif nmon_header_TOP_match:
with open(nmon_header_dat, "a") as nmon_header:
nmon_header.write(line)
elif nmon_error_match:
with open(nmon_error_dat, "a") as nmon_error:
nmon_error.write(line)
# timestamp management: write the nmon timestamp in nmon_data and as well nmon_timestamp for later use
elif nmon_timestamp_match:
with open(nmon_timestamp_dat, "a") as nmon_timestamp:
nmon_timestamp.write(line)
with open(nmon_data_dat, "a") as nmon_data:
nmon_data.write(line)
else:
with open(nmon_data_dat, "a") as nmon_data:
nmon_data.write(line)

@ -0,0 +1,30 @@
#!/bin/sh
# set -x
# Program name: metricator_reader.sh
# Compatibility: Shell
# Purpose - read nmon data from fifo file and output to stdout
# Author - Guilhem Marchand
# Version 2.0.0
# For AIX / Linux / Solaris
#################################################
## Your Customizations Go Here ##
#################################################
# fifo to be read (valid choices are: fifo1 | fifo2
FIFO=$1
####################################################################
############# Main Program ############
####################################################################
while IFS= read line
do
echo $line
done <$FIFO
exit 0

@ -0,0 +1,58 @@
#!/bin/sh
# Program name: nmon_external_snap.sh
# Purpose - Add external command results to extend nmon data
# Author - Guilhem Marchand
# Disclaimer: this provided "as is".
# Date - March 2017
# Guilhem Marchand 2017/03/18, initial version
# 2017/04/29, Guilhem Marchand: - AIX compatibility issues, detach the commands in background
# 2017/06/04, Guilhem Marchand: - Manage nmon external data in a dedicated file
# 2018/01/09, Guilhem Marchand: - exclude dockers virtual fs, improve exclusion of false fs
# Version 1.0.3
# For AIX / Linux / Solaris
# for more information, see:
# https://www.ibm.com/developerworks/community/blogs/aixpert/entry/nmon_and_External_Data_Collectors?lang=en
# This script will output the values for our custom external monitors
# The first field defines the name of the monitor (type field in the application)
# This monitor name must then be added to your local/nmonparser_config.json file
# 2 sections are available for nmon external monitor managements:
# - nmon_external: manage any number of fields without transposition
# - nmon_external_transposed: manage any number of fields with a notion of device / value
# note: the NMON_FIFO_PATH is a pattern that will be replaced by the metricator_helper.sh script in a copy of this script
# that lives for the time to live of the nmon process started
# CAUTION: ensure your custom command does not output any comma within the field name and value
# Number of running processes
echo "PROCCOUNT,$1,`ps -ef | wc -l`" >>NMON_FIFO_PATH/nmon_external.dat &
# Uptime information (uptime command output)
echo "UPTIME,$1,\"`uptime | sed 's/^\s//g' | sed 's/,/;/g'`\"" >>NMON_FIFO_PATH/nmon_external.dat &
# df table information
DF_TABLE=`df -k -P | sed '1d' | egrep -v '\/proc$|/dev$|\/run$|^tmpfs.*\/dev.*$|^tmpfs.*\/run.*$|^tmpfs.*\/sys.*$|^tmpfs.*\/var.*$|^none.*\/run|^none.*\/sys.*|\/var\/lib\/docker\/aufs\/mnt\/.*|\/var\/lib\/docker\/containers\/.*' | awk '{print $6}'`
for fs in $DF_TABLE; do
echo "DF_STORAGE,$1,`df -k -P $fs | sed '1d' | sed 's/%//g' | sed 's/,/;/g' | awk '{print $1 "," $2 "," $3 "," $4 "," $5 "," $6}'`" >>NMON_FIFO_PATH/nmon_external.dat
done
# DF_INODES, for AIX and Linux
case `uname` in
"AIX")
for fs in $DF_TABLE; do
echo "DF_INODES,$1,`df -i $fs | sed '1d' | sed 's/%//g' | sed 's/,/;/g' | awk '{print $1 "," $5 "," $6 "," $7}'`" >>NMON_FIFO_PATH/nmon_external.dat
done
;;
"Linux")
for fs in $DF_TABLE; do
echo "DF_INODES,$1,`df -i -P $fs | sed '1d' | sed 's/%//g' | sed 's/,/;/g' | awk '{print $1 "," $2 "," $3 "," $4 "," $5 "," $6}'`" >>NMON_FIFO_PATH/nmon_external.dat
done
;;
esac

@ -0,0 +1,52 @@
#!/bin/sh
# Program name: nmon_external_start.sh
# Purpose - Add external command results to extend nmon data (header definition)
# Author - Guilhem Marchand
# Disclaimer: this provided "as is".
# Date - March 2017
# Guilhem Marchand 2017/03/18, initial version
# Guilhem Marchand 2017/03/29, protect against unexpecting failure in NMON_EXTERNAL_DIR getting value
# Guilhem Marchand 2017/06/09, use dedicated files for external header and data
# Guilhem Marchand 2017/08/17, Adding DF table
# Version 1.0.3
# For AIX / Linux / Solaris
# for more information, see:
# https://www.ibm.com/developerworks/community/blogs/aixpert/entry/nmon_and_External_Data_Collectors?lang=en
# This script will define the headers for our custom external monitors
# The first field defines the name of the monitor (type field in the application)
# This monitor name must then be added to your local/nmonparser_config.json file
# 2 sections are available for nmon external monitor managements:
# - nmon_external: manage any number of fields without transposition
# - nmon_external_transposed: manage any number of fields with a notion of device / value
# note: the NMON_FIFO_PATH is a pattern that will be replaced by the metricator_helper.sh script in a copy of this script
# that lives for the time to live of the nmon process started
# Empty the header file if existing
if [ -f NMON_FIFO_PATH/nmon_external_header.dat ]; then
>NMON_FIFO_PATH/nmon_external_header.dat
fi
# CAUTION: ensure your custom command does not output any comma within the field name and value
# number of running processes
echo "PROCCOUNT,Process Count,nb_running_processes" >>NMON_FIFO_PATH/nmon_external_header.dat
# uptime information
echo "UPTIME,Server Uptime and load,uptime_stdout" >>NMON_FIFO_PATH/nmon_external_header.dat
# DF table (file systems usage)
echo "DF_STORAGE,File system disk space usage,filesystem,blocks,Used,Available,Use_pct,mount" >>NMON_FIFO_PATH/nmon_external_header.dat
# inodes utilization table is system specific and available for AIX and Linux
case `uname` in
"AIX")
echo "DF_INODES,File system inodes usage,filesystem,IUsed,IUse_pct,mount" >>NMON_FIFO_PATH/nmon_external_header.dat ;;
"Linux")
echo "DF_INODES,File system inodes usage,filesystem,Inodes,IUsed,IFree,IUse_pct,mount" >>NMON_FIFO_PATH/nmon_external_header.dat ;;
esac

@ -0,0 +1,197 @@
#!/bin/sh
# set -x
# Program name: nmonparser.sh
# Purpose - Frontal script to nmonparser, will launch Python or Perl script depending on interpreter availability
# See nmonparser | nmonparser.pl
# Author - Guilhem Marchand
# Version 2.0.1
# For AIX / Linux / Solaris
#################################################
## Your Customizations Go Here ##
#################################################
# format date output to strftime dd/mm/YYYY HH:MM:SS
log_date () {
date "+%d-%m-%Y %H:%M:%S"
}
# Set host
HOST=`hostname`
if [ -z "${SPLUNK_HOME}" ]; then
echo "`log_date`, ERROR, SPLUNK_HOME variable is not defined"
exit 1
fi
# Set tmp directory
APP_VAR=${SPLUNK_HOME}/var/log/metricator
# Verify it exists
if [ ! -d ${APP_VAR} ]; then
mkdir -p ${APP_VAR}
exit 1
fi
# silently remove tmp file (testing exists before rm seems to cause trouble on some old OS)
rm -f ${APP_VAR}/nmonparser.temp.*
# Set nmon_temp
nmon_temp=${APP_VAR}/nmonparser.temp.$$
# APP path discovery
if [ -d "$SPLUNK_HOME/etc/apps/TA-metricator-hec-for-nmon" ]; then
APP=$SPLUNK_HOME/etc/apps/TA-metricator-hec-for-nmon
elif [ -d "$SPLUNK_HOME/etc/slave-apps/TA-metricator-hec-for-nmon" ];then
APP=$SPLUNK_HOME/etc/slave-apps/TA-metricator-hec-for-nmon
else
echo "`log_date`, ${HOST} ERROR, the APP directory could not be defined, is the TA-metricator-hec-for-nmon installed ?"
exit 1
fi
# source default nmon.conf
if [ -f $APP/default/nmon.conf ]; then
# During initial deployment, the nmon.conf needs to be managed properly by the metricator_consumer.sh
# wait for this to be done
grep '\[nmon\]' $APP/default/nmon.conf >/dev/null
if [ $? -eq 0 ]; then
echo "`log_date`, ${HOST} INFO, initial deployment condition detected, safe exiting."
exit 0
else
. $APP/default/nmon.conf
fi
fi
# source local nmon.conf, if any
# Search for a local nmon.conf file located in $SPLUNK_HOME/etc/apps/TA-metricator-hec-for-nmon/local
if [ -f $APP/local/nmon.conf ]; then
. $APP/local/nmon.conf
fi
# On a per server basis, you can also set in /etc/nmon.conf
if [ -f /etc/nmon.conf ]; then
. /etc/nmon.conf
fi
# Manage FQDN option
echo $nmonparser_options | grep '\-\-use_fqdn' >/dev/null
if [ $? -eq 0 ]; then
# Only relevant for Linux OS
case $UNAME in
Linux)
HOST=`hostname -f` ;;
AIX)
HOST=`hostname` ;;
SunOS)
HOST=`hostname` ;;
esac
else
HOST=`hostname`
fi
# Manage host override option based on Splunk hostname defined
case $override_sys_hostname in
"1")
# Retrieve the Splunk host value
HOST=`cat $SPLUNK_HOME/etc/system/local/inputs.conf | grep '^host =' | awk -F\= '{print $2}' | sed 's/ //g'`
;;
esac
#
# Interpreter choice
#
PYTHON=0
PYTHON2=0
PYTHON3=0
PERL=0
# Set the default interpreter
INTERPRETER="python"
# Get the version for both worlds
PYTHON2=`which python 2>&1`
PYTHON3=`which python3 2>&1`
PERL=`which perl 2>&1`
# Handle Python
PYTHON_available="false"
case $PYTHON3 in
*python*)
PYTHON_available="true"
INTERPRETER="python3" ;;
*)
case $PYTHON2 in
*python*)
PYTHON_available="true"
INTERPRETER="python" ;;
esac
;;
esac
# Handle Perl
case $PERL in
*perl*)
PERL_available="true"
;;
*)
PERL_available="false"
;;
esac
case `uname` in
# AIX priority is Perl
"AIX")
case $PERL_available in
"true")
INTERPRETER="perl" ;;
"false")
INTERPRETER="$INTERPRETER" ;;
esac
;;
# Other OS, priority is Python
*)
case $PYTHON_available in
"true")
INTERPRETER="$INTERPRETER" ;;
"false")
INTERPRETER="perl" ;;
esac
;;
esac
####################################################################
############# Main Program ############
####################################################################
# Store arguments sent to script
userargs=$@
# Store stdin
while read line ; do
echo "$line" >> ${nmon_temp}
done
# Start the parser
case ${INTERPRETER} in
"python"|"python3")
cat ${nmon_temp} | ${SPLUNK_HOME}/bin/splunk cmd $INTERPRETER ${APP}/bin/nmonparser_hec.py ${userargs} ;;
"perl")
cat ${nmon_temp} | ${SPLUNK_HOME}/bin/splunk cmd ${APP}/bin/nmonparser_hec.pl ${userargs} ;;
esac
# Remove temp
rm -f ${nmon_temp}
exit 0

@ -0,0 +1,140 @@
# pre-actions scripts
The frameID definition is an enrichment mechanism used within the application to associate a given host with a given frame identifier.
By default, the mapping is operated against the value of "serialnum" which is defined at the raw level by nmon binaries.
- On AIX systems, the serialnum value is equal to the serial number of the frame hosting the partition
- On Linux and Solaris systems, the serialnum is equal to the value of the hostname
A pre-action script can be designed to define a serial number according to your needs.
In "nmon.conf", the following settings are designed to manage the serial number:
- override_sys_serialnum="1": will activate the serial number override
- override_sys_serialnum_value="<value for serial number": defines it value
Add any shell script in this directory to get a pre-action to be executed automatically by the metricator_helper.sh script at startup time.
You can use this simple feature to perform a pre-action each time the metricator_helper.sh script is executed.
stdout will be indexed in "sourcetype=nmon_collect".
stderr will be indexes in splunkd logs.
Any script you would add in this directory will be upgrade resilient and would not be lost or modified when your upgrade the TA.
pre-action scripts execution will be visible in sourcetype=nmon_collect:
message = <date>, ${HOST} INFO, executing pre-action script: <name of pre-action script>
Requirements:
- scripts names can whatever you want
- must have ".sh" extension
- must have execution permission by the Unix username owning processes
## use case examples:
### 1. run a local command on servers to define the serial number value
----------------------------
#!/bin/sh
serialnumber=`<replace the command that retrieves a value to be used as the serialnumber`
# nmon_conf="/etc/nmon.conf" to write in /etc/nmon.conf (requires processes to run under root)
# nmon_conf="$SPLUNK_HOME/etc/apps/TA-metricator-hec-for-nmon/local/nmon.conf" to write in app name space
nmon_conf="$SPLUNK_HOME/etc/apps/TA-metricator-hec-for-nmon/local/nmon.conf"
# if nmon.conf could not be found, create, activate serial number override and fill its value
if [ ! -f $nmon_conf ]; then
echo "# nmon.conf" >> $nmon_conf
echo "override_sys_serialnum=\"1\"" >> $nmon_conf
echo "override_sys_serialnum_value=\"$serialnumber\"" >> $nmon_conf
else # verify the option activation, verify the serial number value
egrep "^override_sys_serialnum=" $nmon_conf >/dev/null
if [ $? -ne 0 ]; then
echo "override_sys_serialnum=\"1\"" >> $nmon_conf
fi
# verify serial number value
egrep "^override_sys_serialnum_value=" $nmon_conf >/dev/null
if [ $? -eq 0 ]; then # if option is set, check value
egrep "^override_sys_serialnum_value=\"$serialnumber\"" $nmon_conf >/dev/null
if [ $? -ne 0 ]; then # if mismatch, replace value
cat $nmon_conf | grep -v "override_sys_serialnum_value" > ${nmon_conf}.new
echo "override_sys_serialnum_value=\"$serialnumber\"" >> ${nmon_conf}.new
mv ${nmon_conf}.new $nmon_conf
fi
else # option is not set, simply add it
echo "override_sys_serialnum_value=\"$serialnumber\"" >> $nmon_conf
fi
fi
exit 0
----------------------------
### 2. Using servers naming convention and domain names to setup a frameID by extraction the region name
----------------------------
#!/bin/sh
# convention naming: server001.mycompany.co.uk | retrieve the region being the 4th segment of FQDN
HOST=`hostname`
case `uname` in
Linux)
FQDN=`hostname -f` ;;
*)
FQDN=$HOST ;;
esac
REGION=`echo $FQDN | awk -F\. '{print $4}'`
# revert to host value in case of failure
# otherwise, affect to: DC-<region>
case $REGION in
"")
serialnumber="$HOST" ;;
*)
serialnumber="datacenter-$REGION" ;;
esac
# nmon_conf="/etc/nmon.conf" to write in /etc/nmon.conf (requires processes to run under root)
# nmon_conf="$SPLUNK_HOME/etc/apps/TA-metricator-hec-for-nmon/local/nmon.conf" to write in app name space
nmon_conf="$SPLUNK_HOME/etc/apps/TA-metricator-hec-for-nmon/local/nmon.conf"
# if nmon.conf could not be found, create, activate serial number override and fill its value
if [ ! -f $nmon_conf ]; then
echo "# nmon.conf" >> $nmon_conf
echo "override_sys_serialnum=\"1\"" >> $nmon_conf
echo "override_sys_serialnum_value=\"$serialnumber\"" >> $nmon_conf
else # verify the option activation
egrep "^override_sys_serialnum=" $nmon_conf >/dev/null
if [ $? -ne 0 ]; then
echo "override_sys_serialnum=\"1\"" >> $nmon_conf
fi
# verify serial number value
egrep "^override_sys_serialnum_value=" $nmon_conf >/dev/null
if [ $? -eq 0 ]; then # if option is set, check value
egrep "^override_sys_serialnum_value=\"$serialnumber\"" $nmon_conf >/dev/null
if [ $? -ne 0 ]; then # if mismatch, replace value
cat $nmon_conf | grep -v "override_sys_serialnum_value" > ${nmon_conf}.new
echo "override_sys_serialnum_value=\"$serialnumber\"" >> ${nmon_conf}.new
mv ${nmon_conf}.new $nmon_conf
fi
else # option is not set, simply add it
echo "override_sys_serialnum_value=\"$serialnumber\"" >> $nmon_conf
fi
fi
exit 0
----------------------------

@ -0,0 +1,3 @@
# sarmon_bin_* directories contain compiled sarmon binaries for x86 and sparc processor
The source code of sarmon is available at the sarmon web site: http://www.geckotechnology.com/sarmon

@ -0,0 +1,19 @@
#
# Splunk app configuration file
#
[install]
is_configured = 0
[package]
id = TA-metricator-hec-for-nmon
check_for_updates = true
[ui]
is_visible = 0
label = TA-metricator-hec-for-nmon
[launcher]
author = Guilhem Marchand for Octamis
description = Nmon performance and configuration data technical addon for the metric store
version = 1.1.1

@ -0,0 +1,38 @@
# inputs.conf
######
# main
######
# The metricator_helper.sh performs nmon binary starting if required
# by default, the script is executed every 60 seconds and will not perform any action if
# the nmon binary is started
[script://./bin/metricator_helper.sh]
disabled = false
index = os-unix-nmon-internal
interval = 60
source = nmon_collect
sourcetype = nmon_collect
# The metricator_consumer.sh script consumes nmon data produced in fifo mode
[script://./bin/metricator_consumer.sh]
disabled = false
index = os-unix-nmon-internal
interval = 60
source = fifo_consumer
sourcetype = nmon_processing
##############
# nmon cleaner
##############
# The metricator_cleaner.sh script performs cleaning tasks internal to the TA, scheduled once a day
[script://./bin/metricator_cleaner.sh --cleancsv]
disabled = false
index = os-unix-nmon-internal
interval = 14400
source = nmon_cleaner
sourcetype = nmon_clean

@ -0,0 +1,233 @@
# nmon.conf
################################
# Nmon processes related options
################################
#
# These options will be used to manage Nmon processes starting options
#
# This configuration file will set the interval and snapshot values when starting up the nmon binary
# It is being sourced by the metricator_helper.sh script during script startup
# *** BE UPGRADE RESILIENT: *** Copy this file to your local/nmon.conf to prevent future upgrades from overwriting your settings
# *** DON'T MODIFY THIS FILE ***
# *** FILE ENCODING: UTF-8 ! ***
# When creating a local/nmon.conf, pay attention to file encoding specially when working under Windows.
# The file must be UTF-8 encoded or you may run in trouble.
### NMON COLLECT OPTIONS ###
# The metricator_helper.sh input script is set by default to run every 60 seconds
# If Nmon is not running, the script will start Nmon using the configuration above
###
### FIFO options:
###
# Using FIFO files (named pipe) are now used to minimize the CPU footprint of the technical addons
# As such, it is not required anymore to use short cycle of Nmon run to reduce the CPU usage
# You can still want to manage the volume of data to be generated by managing the interval and snapshot values
# as a best practice recommendation, the time to live of nmon processes writing to FIFO should be 24 hours
# value for interval: time in seconds between 2 performance measures
fifo_interval="60"
# value for snapshot: number of measure to perform
fifo_snapshot="1440"
### VARIOUS COMMON OPTIONS ###
# Time in seconds of margin before running a new iteration of Nmon process to prevent data gaps between 2 iterations of Nmon
# the metricator_helper.sh script will spawn a new Nmon process when the age in seconds of the current process gets higher than this value
# The endtime is evaluated the following way:
# endtime=$(( ${interval} * ${snapshot} - ${endtime_margin} ))
# When the endtime gets higher than the endtime_margin, a new Nmon process will be spawned
# default value to 240 seconds which will start a new process 4 minutes before the current process ends
# Setting this value to "0" will totally disable this feature
endtime_margin="240"
### NFS OPTIONS ###
# Change to "1" to activate NFS V2 / V3 (option -N) for AIX hosts
AIX_NFS23="0"
# Change to "1" to activate NFS V4 (option -NN) for AIX hosts
AIX_NFS4="0"
# Change to "1" to activate NFS V2 / V3 / V4 (option -N) for Linux hosts
# Note: Some versions of Nmon introduced a bug that makes Nmon to core when activating NFS, ensure your version is not outdated
Linux_NFS="0"
### LINUX OPTIONS ###
# Change the priority applied while looking at nmon binary
# by default, the metricator_helper.sh script will use any nmon binary found in PATH
# Set to "1" to give the priority to embedded nmon binaries
# Note: Since release 1.6.07, priority is given by default to embedded binaries
Linux_embedded_nmon_priority="1"
# Change the limit for processes and disks capture of nmon for Linux
# In default configuration, nmon will capture most of the process table by capturing main consuming processes
# This function is percentage limit of CPU time, with a default limit of 0.01
# Changing this value can influence the volume of data to be generated, and the associated CPU overhead for that data to be parsed
# Possible values are:
# Linux_unlimited_capture="0" --> Default nmon behavior, capture main processes (no -I option)
# Linux_unlimited_capture="-1" --> Set the capture mode to unlimited (-I -1)
# Linux_unlimited_capture="x.xx" --> Set the percentage limit to a custom value, ex: "0.01" will set "-I 0.01"
Linux_unlimited_capture="0"
# Set the maximum number of devices collected by Nmon, default is set to 1500 devices
# This option will be ignored if you set the Linux_unlimited_capturation below.
# Increase this value if you have systems with more devices
# Up to 3000 devices will be taken in charge by the Application (hard limit in nmonparser)
Linux_devices="1500"
# Enable disks extended statistics (DG*)
# Default is true, which activates and generates DG statistics
Linux_disk_dg_enable="1"
# Name of the User Defined Disk Groups file, "auto" generates this for you
Linux_disk_dg_group="auto"
### SOLARIS OPTIONS ###
# Change to "1" to activate VxVM volumes IO statistics
Solaris_VxVM="0"
# UARG collection (new in Version 1.11), Change to "0" to deactivate, "1" to activate (default is activate)
Solaris_UARG="1"
### AIX COMMON OPTIONS ###
# CAUTION: Since release 1.3.0, we use fifo files, which requires the option "-yoverwrite=1"
# Change this line if you add or remove common options for AIX, do not change NFS options here (see NFS options)
# the -p option is mandatory as it is used at launch time to save instance pid
AIX_options="-T -A -d -K -L -M -P -^ -p -yoverwrite=1"
# enable this line if you want to get only active disks
# AIX_options=""-T -A -d -K -L -M -P -^ -p -k `lspv|grep active|awk '{print $1","}'|tr -d '\040\011\012\015'` -yoverwrite=1"
#############################
# Application related options
#############################
#
# These options are not directly related to nmon processes but to general features of the technical add-on
#
######################
# hostname definition:
######################
# This option can be used to force the technical add-on to use the Splunk configured value of the server hostname
# If for some reason, you need to use the Splunk host value instead of the system real hostname value, set this value to "1"
# We will search for the value of host=<value> in $SPLUNK_HOME/etc/system/local/inputs.conf
# If no value can be found, or if the file does not exist, we will fallback to the normal behavior
# Default is use system hostname
# FQDN management in nmonparser: The --fqdn option is not compatible with the host name override, if the override_sys_hostname
# is activated, the --fqdn argument will have no effect
override_sys_hostname="0"
#####################
# frameID definition:
#####################
# The frameID definition is an enrichment mechanism used within the application to associate a given host with a given frame identifier
# By default, the mapping is operated against the value of "serialnum" which is defined at the raw level by nmon binaries
# On AIX systems, the serialnum value is equal to the serial number of the frame hosting the partition
# On Linux and Solaris systems, the serialnum is equal to the value of the hostname
# Using this option allows you to override the serialnum value by a static value defined in the nmon.conf configuration file
# nmon.conf precedence allows defining the serialnum value on per deployment basis (local/nmon.conf) or on a per server basis (/etc/nmon.conf)
# default is:
# override_sys_serialnum="0"
# which lets nmon set the serialnum value
# Set this value to:
# override_sys_serialnum="1"
# to activate the serialnum override based on the value defined in:
# override_sys_serialnum_value="<sting>"
# Acceptable values for <string> are letters (lower and upper case), numbers and "-" / "_"
override_sys_serialnum="0"
override_sys_serialnum_value="none"
########################
# nmon external metrics:
########################
# nmon external generation management
# This option will manage the activation or deactivation of the nmon external data generation at the lower level, before it comes to parsers
# default is activated (value=1), set to "0" to deactivate
nmon_external_generation="1"
###############
# fifo options:
###############
# Fifo options
# The realtime mode which corresponds to the old mechanism is now deprecated
# fifo mode is mandatory
# Default is "1" which means write to fifo
mode_fifo="1"
#######################
# nmon parsers options:
#######################
# consult the documentation to get the full list of available options
# --mode fifo|colddata --> explicitly manage data in fifo/colddata
# --use_fqdn --> use the host fully qualified domain name (default)
# --silent --> minimize the processing output to save data volume (deactivated by default)
# --show_zero_values --> allows generating metrics with 0 values (default removes any metric with a zero value before it reaches the ingestion)
# --no_local_log --> do no write metrics, events and config locally on file-syste, (activated by default)
# --splunk_http_url --> Splunk HEC endpoint URL (must contain the protocol, IP or FQDN and endpoint path)
# --splunk_http_token --> Splunk HEC token value
# --splunk_metrics_index --> Name of the metrics index (default: os-unix-nmon-metrics)
# --splunk_events_index --> Name of the events index (default: os-unix-nmon-events)
# --splunk_config_index --> Name of the config index (default: os-unix-nmon-config)
# In fifo mode, options are sent by the metricator_consumer.sh
# In file mode, options are sent by Splunk via the nmon_processing stanza in props.conf
#
# Splunk HEC configuration (http input)
#
# Change the Splunk URL to match your protocol (http vs https) and your access URL
# By default, as long the token value is not changed from the demonstration value above, the parser will just do nothing else than writing to local logs
# For more information, see: http://dev.splunk.com/view/event-collector/SP-CAAAE6M
# TO CONFIGURE:
# - create the "local" directory in /etc/nmon-logger
# - copy the default/nmon.conf to local/
# - manage your settings in your local nmon.conf
nmonparser_options="--mode fifo --use_fqdn --silent --no_local_log --splunk_http_url https://splunk.mydomain.com:8088/services/collector/event --splunk_http_token insert_your_splunk_http_token --splunk_metrics_index os-unix-nmon-metrics --splunk_events_index os-unix-nmon-events --splunk_config_index os-unix-nmon-config"

@ -0,0 +1,32 @@
{
"static_section":["CPUnn", "CPU_ALL", "FILE", "MEM", "PAGE", "MEMNEW", "MEMUSE", "PROC", "VM", "NFSSVRV2", "NFSSVRV3", "NFSSVRV4", "NFSCLIV2", "NFSCLIV3", "NFSCLIV4"],
"Solaris_static_section":["PROCSOL"],
"LPAR_static_section":["LPAR", "POOLS"],
"top_section":["TOP"],
"uarg_section":["UARG"],
"dynamic_section1":["DISKBUSY", "DISKBSIZE", "DISKREAD", "DISKWRITE", "DISKXFER", "DISKREADSERV", "DISKWRITESERV"],
"dynamic_section2":["IOADAPT", "NETERROR", "NET", "NETPACKET", "JFSFILE", "JFSINODE", "FCREAD", "FCWRITE", "FCXFERIN", "FCXFEROUT"],
"disk_extended_section":["DGBUSY", "DGREAD", "DGWRITE", "DGSIZE", "DGXFER", "DGREADS", "DGREADMERGE", "DGREADSERV", "DGWRITES", "DGWRITEMERGE", "DGWRITESERV", "DGINFLIGHT", "DGIOTIME", "DGBACKLOG"],
"solaris_WLM":["WLMPROJECTCPU", "WLMZONECPU", "WLMTASKCPU", "WLMUSERCPU", "WLMPROJECTMEM", "WLMZONEMEM", "WLMTASKMEM", "WLMUSERMEM"],
"solaris_VxVM":["VxVMREAD", "VxVMWRITE", "VxVMXFER", "VxVMBSIZE", "VxVMBUSY", "VxVMSVCTM", "VxVMWAITTM"],
"solaris_dynamic_various":["DISKSVCTM", "DISKWAITTM"],
"AIX_dynamic_various":["SEA", "SEAPACKET", "SEACHPHY"],
"AIX_WLM":["WLMCPU", "WLMMEM", "WLMBIO"],
"nmon_external":["UPTIME", "PROCCOUNT", "DF_STORAGE", "DF_INODES"],
"nmon_external_transposed":[""]
}

@ -0,0 +1,171 @@
# props.conf
###############################
# nmon metrics for metric store
###############################
# Introduced with Splunk 7, metrics are now natively supported
# Nmon uses its own copy of the default metrics_csv sourcetype
[nmon_metrics_csv]
SHOULD_LINEMERGE = False
pulldown_type = true
INDEXED_EXTRACTIONS = csv
ADD_EXTRA_TIME_FIELDS = False
KV_MODE = none
TIMESTAMP_FIELDS = metric_timestamp
TIME_FORMAT = %s.%Q
category = Metrics
description = Comma-separated value format for metrics. Nmon implementation.
# Overwritting default host field based on event data for nmon_data sourcetype (useful when managing Nmon central shares)
TRANSFORMS-hostfield=nmon_metrics_csv_hostoverride
# Metrics can be sent by http using the Splunk Http Event Collector (HEC)
[nmon_metrics_http]
TIME_PREFIX = metric_timestamp=\"(\d+)\"
TIME_FORMAT = %s
TRANSFORMS-nmon_metrics_http = nmon_metrics_http_host, nmon_metrics_http_metric_name, nmon_metrics_http_metric_value, nmon_metrics_http_dims, nmon_metrics_http_OStype, nmon_metrics_http_serialnum
NO_BINARY_CHECK = true
SHOULD_LINEMERGE = false
pulldown_type = 1
category = Metrics
########################
# nmon metrics as events
########################
# This sourcetype stanza will be used to index nmon csv converted data
# Every generated csv file will contain a CSV header used by Splunk to identify fields
[nmon_data]
FIELD_DELIMITER=,
FIELD_QUOTE="
HEADER_FIELD_LINE_NUMBER=1
# your settings
INDEXED_EXTRACTIONS=csv
NO_BINARY_CHECK=1
SHOULD_LINEMERGE=false
TIMESTAMP_FIELDS=ZZZZ
TIME_FORMAT=%d-%m-%Y %H:%M:%S
# set by detected source type
KV_MODE=none
pulldown_type=true
# Leaving PUNCT enabled can impact indexing performance, and uses space
# For structured data, it has poor interest and shall be deactivated
ANNOTATE_PUNCT=false
# Overwritting default host field based on event data for nmon_data sourcetype (useful when managing Nmon central shares)
TRANSFORMS-hostfield=nmon_data_hostoverride
# nmon_data sent over http using the Splunk Http Event Collector (HEC)
# This sourcetype will be automatically renamed to nmon_data
[nmon_data_http]
SHOULD_LINEMERGE=false
NO_BINARY_CHECK=true
CHARSET=UTF-8
TIME_FORMAT=%s
TIME_PREFIX=timestamp="
MAX_TIMESTAMP_LOOKAHEAD=26
KV_MODE=auto
# Apply indexing time parsing configuration
TRANSFORMS-nmon_data_http = nmon_data_http_host, nmon_data_http_OStype, nmon_data_http_type, nmon_data_http_sourcetype
# For search time extractions, activate kvmode to auto for that source
[source::nmon_data:http]
KV_MODE=auto
########################
# nmon processing events
########################
[nmon_processing]
SHOULD_LINEMERGE=false
NO_BINARY_CHECK=true
CHARSET=UTF-8
TIME_PREFIX=^
TIME_FORMAT=%d-%m-%Y %H:%M:%S
MAX_TIMESTAMP_LOOKAHEAD=19
LINE_BREAKER=([\n\r]+)\d{2}-\d{2}-\d{4}\s\d{2}:\d{2}:\d{2}
TRUNCATE=999999
# Deactivate KV
KV_MODE=none
####################
# nmon config events
####################
[nmon_config]
SHOULD_LINEMERGE=false
NO_BINARY_CHECK=true
CHARSET=UTF-8
TIME_PREFIX=^CONFIG,
TIME_FORMAT=%d-%b-%Y:%H:%M.%S
LINE_BREAKER=([\r\n]+)CONFIG,\d{2}-\w{3}-\d{4}:\d{2}:\d{2}\.\d{2},
TRUNCATE=0
MAX_EVENTS=100000
MAX_TIMESTAMP_LOOKAHEAD=30
# Deactivate KV
KV_MODE = none
# Overwritting default host field based on event data for nmon_data sourcetype (useful when managing Nmon central shares)
TRANSFORMS-hostfield=nmon_config_hostoverride
# nmon_config sent over http
[nmon_config:http]
SHOULD_LINEMERGE=false
NO_BINARY_CHECK=true
CHARSET=UTF-8
LINE_BREAKER=([\r\n]+)timestamp=\"
MAX_EVENTS=100000
TIME_FORMAT=%s
TIME_PREFIX=timestamp="
TRUNCATE=0
# Rewrite the source Metadata to manage search time extraction
TRANSFORMS-nmon_config_http = nmon_config_http_rewrite_host, nmon_config_http_rewrite_sourcetype
# For search heads
[source::nmon_config:http]
KV_MODE=none
#####################
# nmon collect events
#####################
[nmon_collect]
SHOULD_LINEMERGE=false
NO_BINARY_CHECK=true
CHARSET=UTF-8
TIME_PREFIX=^
TIME_FORMAT=%d-%m-%Y %H:%M:%S
MAX_TIMESTAMP_LOOKAHEAD=19
LINE_BREAKER=([\n\r]+)\d{2}-\d{2}-\d{4}\s\d{2}:\d{2}:\d{2}
TRUNCATE=999999
# Deactivate KV
KV_MODE = none
###################
# nmon clean events
###################
[nmon_clean]
SHOULD_LINEMERGE=false
NO_BINARY_CHECK=true
CHARSET=UTF-8
TIME_PREFIX=^
TIME_FORMAT=%d-%m-%Y %H:%M:%S
MAX_TIMESTAMP_LOOKAHEAD=19
LINE_BREAKER=([\n\r]+)\d{2}-\d{2}-\d{4}\s\d{2}:\d{2}:\d{2}
TRUNCATE=999999
# Deactivate KV
KV_MODE = none

@ -0,0 +1,114 @@
# transforms.conf
##############
# nmon metrics
##############
# host Meta overridden with 5th column
[nmon_metrics_csv_hostoverride]
DEST_KEY = MetaData:Host
REGEX = ^\d*,\"{0,1}[^\"\,]*\"{0,1},\"{0,1}[^\"\,]*\"{0,1}[^\"\,]*\"{0,1},\"{0,1}[^\"\,]*\"{0,1},\"{0,1}([^\"\,]*)\"{0,1}
FORMAT = host::$1
# Metrics sent over http - host
[nmon_metrics_http_host]
DEST_KEY = MetaData:Host
REGEX = hostname=\"([^\"]*)\"
FORMAT = host::$1
# Metrics sent over http - metric_name
[nmon_metrics_http_metric_name]
REGEX = metric_name=\"([^\"]*)\"
FORMAT = metric_name::$1
WRITE_META = true
[nmon_metrics_http_metric_value]
REGEX = _value=\"([\d|\.]*)\"
FORMAT = _value::$1
WRITE_META = true
# Metrics sent over http - dimensions
[nmon_metrics_http_dims]
REGEX = (dimension\_\w*)=\"([^\"]*)\"
FORMAT = $1::$2
WRITE_META = true
REPEAT_MATCH = true
# Metrics sent over http - OStype
[nmon_metrics_http_OStype]
REGEX = OStype=\"([^\"]*)\"
FORMAT = OStype::$1
WRITE_META = true
# Metrics sent over http - serialnum
[nmon_metrics_http_serialnum]
REGEX = serialnum=\"([^\"]*)\"
FORMAT = serialnum::$1
WRITE_META = true
###########
# nmon data
###########
# Host override based on event data form nmon_data sourcetype
[nmon_data_hostoverride]
DEST_KEY = MetaData:Host
REGEX = ^\"{0,1}[a-zA-Z0-9\_]+\"{0,1},\"{0,1}[a-zA-Z0-9\-\_\.]+\"{0,1},\"{0,1}([a-zA-Z0-9\-\_\.]+)\"{0,1},.+
FORMAT = host::$1
# nmon data as events sent over http - host indexed field
[nmon_data_http_host]
DEST_KEY = MetaData:Host
REGEX = hostname=\"([^\"]*)\"
FORMAT = host::$1
# nmon data as events sent over http - OStype indexed field
[nmon_data_http_OStype]
REGEX = \sOStype=\"([^\"]*)\"
WRITE_META = true
FORMAT = OStype::$1
DEFAULT_VALUE = NULL
# nmon data as events sent over http - type indexed field
[nmon_data_http_type]
REGEX = \stype=\"([^\"]*)\"
WRITE_META = true
FORMAT = type::$1
DEFAULT_VALUE = NULL
# nmon data as events sent over http - rewrite sourcetype
[nmon_data_http_sourcetype]
DEST_KEY = MetaData:Sourcetype
REGEX = .*
FORMAT = sourcetype::nmon_data
#############
# nmon config
#############
# Host override based on event data form nmon_config sourcetype
[nmon_config_hostoverride]
DEST_KEY = MetaData:Host
REGEX = CONFIG\,[a-zA-Z0-9\-\:\.]+\,([a-zA-Z0-9\-\_\.]+)\,[a-zA-Z0-9\-\_\.]+
FORMAT = host::$1
# nmon_config sent over http
[nmon_config_http_rewrite_host]
DEST_KEY = MetaData:Host
REGEX = host=\"{0,}([a-zA-Z0-9\-\_\.]+)\"{0,}
FORMAT = host::$1
# nmon_config source
[nmon_config_http_rewrite_source]
DEST_KEY = MetaData:Source
REGEX = .*
FORMAT = source::configdata:http
# nmon_config sourcetype
[nmon_config_http_rewrite_sourcetype]
DEST_KEY = MetaData:Sourcetype
REGEX = .*
FORMAT = sourcetype::nmon_config

@ -0,0 +1,7 @@
# Application-level permissions
[]
owner = admin
access = read : [ * ], write : [ admin ]
export = system

@ -0,0 +1,208 @@
{
"version": "1.0",
"date": "2022-11-14T09:26:13.3459814Z",
"hashAlgorithm": "SHA-256",
"app": {
"id": 4022,
"version": "1.1.1",
"files": [
{
"path": "README.md",
"hash": "cd786d3b5421268f0f893be70011210ae5db6efbe077558d9219845d3b6c7b7a"
},
{
"path": "license.txt",
"hash": "7ddba183d8c539be99f03fe99499ddbc863cc688164bf3b5ea59d0d918a9e653"
},
{
"path": "metadata/default.meta",
"hash": "6b6c91fc18940aeb1580da6c06f92810beef8af632f73714070bae9e4a777af2"
},
{
"path": "static/appLogo.png",
"hash": "0736204483f4205c90d49c1f212e70d4e15c4d79aee19d43a0ab8247455118d1"
},
{
"path": "static/appIcon.png",
"hash": "e0611349e349b6cee55d123f85ed286a4ac2c0f1bbdbbedcbf230207bc2404ee"
},
{
"path": "static/appIcon_2x.png",
"hash": "5434fede7130f1bacc4d8e3ec48f2b3bd67367f55658b6d07d5b232b8f60f522"
},
{
"path": "static/appIconAlt.png",
"hash": "e0611349e349b6cee55d123f85ed286a4ac2c0f1bbdbbedcbf230207bc2404ee"
},
{
"path": "static/appLogo_2x.png",
"hash": "845f9bcdcd947b60e7c6d110f03debad96fee327b13a1bda2457788e069c350e"
},
{
"path": "static/appIconAlt_2x.png",
"hash": "5434fede7130f1bacc4d8e3ec48f2b3bd67367f55658b6d07d5b232b8f60f522"
},
{
"path": "README/nmon.conf.spec",
"hash": "27ec7f455744434f988312504fa3c9a89c0a77b2e76c6974ae187ddcd1e8f380"
},
{
"path": "default/app.conf",
"hash": "82887e56e863986c96284e5519d37873529be320e4c6571b8410f770af16ee77"
},
{
"path": "default/nmon.conf",
"hash": "85042666cc733e23c1bf4b96dad9eaaf8a90fbeff4770fd595d58bf54671fff2"
},
{
"path": "default/nmonparser_config.json",
"hash": "9db81c4534b90ef9f4dd67dfe0417c7c9bd669d95703d0dd91e6f27b4ac27c6c"
},
{
"path": "default/props.conf",
"hash": "865bce58bea6b7ae664d6b1f4d95457dfb41ab34e4588af9efa2625ac36325eb"
},
{
"path": "default/inputs.conf",
"hash": "1bfb384dff0bb2661d3ced596e2c74a780d229413463f0b29015f5f734e7cf3c"
},
{
"path": "default/transforms.conf",
"hash": "4c575ce58234455c879370ab8f69ed4ea6b9bd7901b6bceb53ac1049c9ad1b82"
},
{
"path": "bin/nmonparser_hec.py",
"hash": "c7262e35253317410cbf3b2fdb9e6822f80704a52afbd94abb3396c56b29d576"
},
{
"path": "bin/sarmon_binaries_README",
"hash": "ab79e66800d2291dd43dec243fb6df8abd0975dbc2183477656eb61ee95fe456"
},
{
"path": "bin/sarmon_bin_i386.tgz",
"hash": "d184fa41438ac42a974373abb4607b926a02ea0b9f64d6e7a15f13c0aad76062"
},
{
"path": "bin/nmonparser_hec.pl",
"hash": "1a28b0be80748d94f43283b65e71c300ef2171a2286330141f09daef1ea72ef5"
},
{
"path": "bin/metricator_consumer.sh",
"hash": "c5662c4362572dc28fe89af2ff605f0f49c701434237c6f93ede62e0e515ca73"
},
{
"path": "bin/metricator_reader.sh",
"hash": "0aaefacadc71ebc964f9de2ea11195d8552f24f2b578249f7337d6a1e8003bc0"
},
{
"path": "bin/metricator_cleaner.sh",
"hash": "f932b0c3ef29fd07ff680e15e0a34f2ce3f0062cf8b6443a20750e324cbea544"
},
{
"path": "bin/metricator_helper.sh",
"hash": "83a13788aefed93c7549cf2520323317fb16b691259d45db8c4dede9d4e2f73f"
},
{
"path": "bin/linux.tgz",
"hash": "376bc4ad7d197dca898ef06d03df631f5e8e6fe365e398bc2905d2146af6cc01"
},
{
"path": "bin/README",
"hash": "597cdad620bec4e52e0e8adc3cad99de9b3ce45da0dd18e4159e1009c976e957"
},
{
"path": "bin/sarmon_bin_sparc.tgz",
"hash": "73a2b9555ffdd62666063bbfa33520de4ccd68d4b5c63f08a365be5a5f2ea76f"
},
{
"path": "bin/nmonparser.sh",
"hash": "ade7e0ab7889500e443306483f4f3bccc7124d4612a77cf6b1cadf0d218f7abb"
},
{
"path": "bin/create_agent.py",
"hash": "ee69e9471d32d0d5fe2d97915d0064663ad726da74a774ca138d8b30f84fa2fa"
},
{
"path": "bin/metricator_cleaner.py",
"hash": "f5a3478151137d206313de436c7556bbc32b1f18405f30abea016d8a9485c674"
},
{
"path": "bin/metricator_reader.py",
"hash": "1af43497bbf7f4b9c2a637d5677fc0a964d33f4f4578170aa630c56cb90e8577"
},
{
"path": "bin/metricator_reader.pl",
"hash": "582e2f1707f3904999c311fe46dce92ddbe46f2ff7b61eb3a2f5347c989e9f62"
},
{
"path": "bin/linux_binaries_README",
"hash": "1e5003fdf5a0a4b10dbdc5db1e9bc1960520e7ecd804a20f75ee6b299c043007"
},
{
"path": "bin/metricator_cleaner.pl",
"hash": "582c9bc0a35908dcecceeab631f29ff3b4725921b3239204dfb713c5853444b7"
},
{
"path": "bin/lib/aix/README",
"hash": "5931eb43b5a106c340925b610ab2a9357d752440b0c9cb32ec3041c64a92ec3e"
},
{
"path": "bin/lib/aix/Text/CSV_PP.pm",
"hash": "05de57abb2780a6ae9788af4a56f351928d3f070d29e16ee3d0d1c8c20167317"
},
{
"path": "bin/lib/aix/Text/Diff.pm",
"hash": "a3c8d575cfcf07cd50848883103ba4606653818c76d19e2d68ff0e0734966fd8"
},
{
"path": "bin/lib/aix/Text/CSV.pm",
"hash": "4c5e21fa0718c2be432fa53f2c8440e725bfc94f0649539749559807eb133420"
},
{
"path": "bin/lib/aix/Text/Diff/Config.pm",
"hash": "09dac9cd5f3903c1224d8e901a73dca610e79e1243d246fdbca2775658476b63"
},
{
"path": "bin/lib/aix/Text/Diff/Table.pm",
"hash": "c1629847301d6cfaf766a99a64423736684d615f3c9b9cdccddf4988dee9816e"
},
{
"path": "bin/nmon_external_cmd/nmon_external_snap.sh",
"hash": "794971722e75aa462dbe47fa182617c81a197da1ed6fa89398c2d2ffacf8f7cb"
},
{
"path": "bin/nmon_external_cmd/nmon_external_start.sh",
"hash": "a396573e35d651c48e3227556e0b2f45096f2d5cc7784d96cf48f02aab3abe50"
},
{
"path": "bin/pre_action_scripts/README",
"hash": "39bfe5c6f1ecbefaa9741ccfb55e425d5320c9f75334249b068ac822095864f2"
}
]
},
"products": [
{
"platform": "splunk",
"product": "enterprise",
"versions": [
"7.2",
"7.3",
"8.0",
"8.1",
"8.2",
"9.0"
],
"architectures": [
"x86_64"
],
"operatingSystems": [
"windows",
"linux",
"macos",
"freebsd",
"solaris",
"aix"
]
}
]
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.6 KiB

Loading…
Cancel
Save