*** test 2008-07-29 16:17:37.000000000 -0600 --- snmp-graph.properties 2008-07-18 09:59:07.000000000 -0600 *************** *** 1,11 **** ######################################################################### ## G E N E R A L R E P O R T I N G S E T T I N G S ######################################################################### ! command.prefix=@install.rrdtool.bin@ graph - --imgformat PNG --font DEFAULT:7 --font TITLE:10 --start {startTime} --end {endTime} #the command used to get the RRD info ! info.command=@install.rrdtool.bin@ info #The default graph for KSC node and domain reports default.report=mib2.HCbits --- 1,13 ---- + + ######################################################################### ## G E N E R A L R E P O R T I N G S E T T I N G S ######################################################################### ! command.prefix=/usr/bin/rrdtool graph - --imgformat PNG --font DEFAULT:7 --font TITLE:10 --start {startTime} --end {endTime} #the command used to get the RRD info ! info.command=/usr/bin/rrdtool info #The default graph for KSC node and domain reports default.report=mib2.HCbits *************** *** 88,94 **** ipunity.cnfr.outcallsFail, ipunity.cnfr.confRecFail, ipunity.cnfr.tuiLoginFail, \ juniper.bufferPoolUtil, juniper.cpu, juniper.temp, \ mge.ambtemp, mge.ambhumid, mge.current, mge.level, mge.temp, mge.time, mge.voltage, \ ! microsoft.cpuPercentBusy, microsoft.hrDisk2, microsoft.hrDisk3, microsoft.hrDisk4, microsoft.memory, \ mikrotik.temp,mikrotik.volt,mikrotik.wlstatbps,mikrotik.wlstatrssi, \ mikrotik.wlrtabrssi,mikrotik.wlrtabbit,mikrotik.wlrtabbytes,mikrotik.wlrtabpkts, \ netapp.cpu, netapp.fspercent, netapp.fsfiles, \ --- 90,98 ---- ipunity.cnfr.outcallsFail, ipunity.cnfr.confRecFail, ipunity.cnfr.tuiLoginFail, \ juniper.bufferPoolUtil, juniper.cpu, juniper.temp, \ mge.ambtemp, mge.ambhumid, mge.current, mge.level, mge.temp, mge.time, mge.voltage, \ ! microsoft.hrDisk2, microsoft.hrDisk3, microsoft.hrDisk4, microsoft.memory, \ ! microsoft.cpuPercentBusy, \ ! windows.cpu, \ mikrotik.temp,mikrotik.volt,mikrotik.wlstatbps,mikrotik.wlstatrssi, \ mikrotik.wlrtabrssi,mikrotik.wlrtabbit,mikrotik.wlrtabbytes,mikrotik.wlrtabpkts, \ netapp.cpu, netapp.fspercent, netapp.fsfiles, \ *************** *** 119,125 **** airport.numClients,iisTraffic,iisRequests,exchangeMessages,exchangeBytes, \ exchangeRecipPartitions,dnsThroughput,mssqlmemory,mssqlusage,mssqlhitratios,mssqllockwaittime, \ windowsCPU,livecommsusers,livecommsmessages,mailmarshal, \ ! alvarion.droppedrec,alvarion.surb-stats,alvarion.lqi-stats # values available to prefab reports: --- 123,130 ---- airport.numClients,iisTraffic,iisRequests,exchangeMessages,exchangeBytes, \ exchangeRecipPartitions,dnsThroughput,mssqlmemory,mssqlusage,mssqlhitratios,mssqllockwaittime, \ windowsCPU,livecommsusers,livecommsmessages,mailmarshal, \ ! alvarion.droppedrec,alvarion.surb-stats,alvarion.lqi-stats, \ ! apache.workers # values available to prefab reports: *************** *** 1479,1482 **** GPRINT:lgmiss:MIN:"Min \\: %8.2lf %s" \ GPRINT:lgmiss:MAX:"Max \\: %8.2lf %s\\n" \ LINE2:hgmiss#cccccc:"Huge Buffer Misses" \ ! GPRINT:hgmiss:AVERAGE:" Avg \\: %8.2lf --- 1484,6190 ---- GPRINT:lgmiss:MIN:"Min \\: %8.2lf %s" \ GPRINT:lgmiss:MAX:"Max \\: %8.2lf %s\\n" \ LINE2:hgmiss#cccccc:"Huge Buffer Misses" \ ! GPRINT:hgmiss:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:hgmiss:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:hgmiss:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.ccmcti.name=Cisco CCM CTI Device Status ! report.cisco.ccmcti.columns=ccmRegCTIDev,ccmUnregCTIDev,ccmRejCTIDev ! report.cisco.ccmcti.type=nodeSnmp ! report.cisco.ccmcti.command=--title="Cisco CCM CTI Device Status" \ ! --lower-limit 0 \ ! DEF:reg={rrd1}:ccmRegCTIDev:AVERAGE \ ! DEF:unreg={rrd2}:ccmUnregCTIDev:AVERAGE \ ! DEF:rej={rrd3}:ccmRejCTIDev:AVERAGE \ ! AREA:reg#00ff00:"Reg. CTI Devices " \ ! GPRINT:reg:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:reg:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:reg:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:unreg#0000ff:"Unreg. CTI Devices" \ ! GPRINT:unreg:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:unreg:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:unreg:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:rej#ff0000:"Rej. CTI Devices " \ ! GPRINT:rej:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:rej:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:rej:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.ccmgs.name=Cisco CCM Gateway Status ! report.cisco.ccmgs.columns=ccmRegGateways,ccmUnregGateways,ccmRejectedGateways ! report.cisco.ccmgs.type=nodeSnmp ! report.cisco.ccmgs.command=--title="Cisco CCM Gateway Status" \ ! --lower-limit 0 \ ! DEF:reg={rrd1}:ccmRegGateways:AVERAGE \ ! DEF:unreg={rrd2}:ccmUnregGateways:AVERAGE \ ! DEF:rej={rrd3}:ccmRejectedGateways:AVERAGE \ ! AREA:reg#00ff00:"Registered Gateways " \ ! GPRINT:reg:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:reg:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:reg:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:unreg#0000ff:"Unregistered Gateways" \ ! GPRINT:unreg:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:unreg:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:unreg:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:rej#ff0000:"Rejected Gateways " \ ! GPRINT:rej:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:rej:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:rej:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.ccmgw.name=Cisco CCM Gateways ! report.cisco.ccmgw.columns=ccmActiveGateways, ccmInActiveGateways ! report.cisco.ccmgw.type=nodeSnmp ! report.cisco.ccmgw.command=--title="Cisco CCM Gateways" \ ! --lower-limit 0 \ ! DEF:actgw={rrd1}:ccmActiveGateways:AVERAGE \ ! DEF:inactgw={rrd2}:ccmInActiveGateways:AVERAGE \ ! CDEF:totalgw=actgw,inactgw,+ \ ! AREA:totalgw#ff0000:"Total Gateways " \ ! GPRINT:totalgw:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:totalgw:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:totalgw:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:actgw#0000ff:"Active Gateways" \ ! GPRINT:actgw:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:actgw:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:actgw:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.ccmmd.name=Cisco CCM Media Device Status ! report.cisco.ccmmd.columns=ccmRegMediaDev,ccmUnregMediaDev,ccmRejMediaDev ! report.cisco.ccmmd.type=nodeSnmp ! report.cisco.ccmmd.command=--title="Cisco CCM Media Device Status" \ ! --lower-limit 0 \ ! DEF:reg={rrd1}:ccmRegMediaDev:AVERAGE \ ! DEF:unreg={rrd2}:ccmUnregMediaDev:AVERAGE \ ! DEF:rej={rrd3}:ccmRejMediaDev:AVERAGE \ ! AREA:reg#00ff00:"Reg. Media Devices " \ ! GPRINT:reg:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:reg:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:reg:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:unreg#0000ff:"Unreg. Media Devices" \ ! GPRINT:unreg:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:unreg:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:unreg:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:rej#ff0000:"Rej. Media Devices " \ ! GPRINT:rej:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:rej:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:rej:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.ccmphones.name=Cisco CCM Phones ! report.cisco.ccmphones.columns=ccmActivePhones, ccmInActivePhones ! report.cisco.ccmphones.type=nodeSnmp ! report.cisco.ccmphones.command=--title="Cisco CCM Phones" \ ! --lower-limit 0 \ ! DEF:actph={rrd1}:ccmActivePhones:AVERAGE \ ! DEF:inactph={rrd2}:ccmInActivePhones:AVERAGE \ ! CDEF:totalph=actph,inactph,+ \ ! AREA:totalph#ff0000:"Total Phones " \ ! GPRINT:totalph:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:totalph:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:totalph:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:actph#0000ff:"Active Phones" \ ! GPRINT:actph:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:actph:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:actph:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.ccmps.name=Cisco CCM Phone Status ! report.cisco.ccmps.columns=ccmRegPhones,ccmUnregPhones,ccmRejectedPhones ! report.cisco.ccmps.type=nodeSnmp ! report.cisco.ccmps.command=--title="Cisco CCM Phone Status" \ ! --lower-limit 0 \ ! DEF:reg={rrd1}:ccmRegPhones:AVERAGE \ ! DEF:unreg={rrd2}:ccmUnregPhones:AVERAGE \ ! DEF:rej={rrd3}:ccmRejectedPhones:AVERAGE \ ! AREA:reg#00ff00:"Registered Phones " \ ! GPRINT:reg:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:reg:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:reg:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:unreg#0000ff:"Unregistered Phones" \ ! GPRINT:unreg:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:unreg:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:unreg:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:rej#ff0000:"Rejected Phones " \ ! GPRINT:rej:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:rej:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:rej:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.ccmvm.name=Cisco CCM Voice Mail Device Status ! report.cisco.ccmvm.columns=ccmRegVMailDev,ccmUnregVMailDev,ccmRejVMailDev ! report.cisco.ccmvm.type=nodeSnmp ! report.cisco.ccmvm.command=--title="Cisco CCM Voice Mail Device Status" \ ! --lower-limit 0 \ ! DEF:reg={rrd1}:ccmRegVMailDev:AVERAGE \ ! DEF:unreg={rrd2}:ccmUnregVMailDev:AVERAGE \ ! DEF:rej={rrd3}:ccmRejVMailDev:AVERAGE \ ! AREA:reg#00ff00:"Reg. VM Devices " \ ! GPRINT:reg:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:reg:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:reg:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:unreg#0000ff:"Unreg. VM Devices" \ ! GPRINT:unreg:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:unreg:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:unreg:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:rej#ff0000:"Rej. VM Devices " \ ! GPRINT:rej:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:rej:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:rej:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.cike-bits.name=Cisco VPN Bits In/Out ! report.cisco.cike-bits.columns=cikeInOctets,cikeOutOctets ! report.cisco.cike-bits.type=nodeSnmp ! report.cisco.cike-bits.command=--title="Cisco VPN Bits In/Out" \ ! DEF:octIn={rrd1}:cikeInOctets:AVERAGE \ ! DEF:octOut={rrd2}:cikeOutOctets:AVERAGE \ ! CDEF:bitsIn=octIn,8,* \ ! CDEF:bitsOut=octOut,8,* \ ! LINE2:bitsIn#0000ff:"Bits In " \ ! GPRINT:bitsIn:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:bitsIn:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:bitsIn:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:bitsOut#ff0000:"Bits Out" \ ! GPRINT:bitsOut:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:bitsOut:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:bitsOut:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.cike-dn.name=Cisco VPN Dropped Packets and Notifys ! report.cisco.cike-dn.columns=cikeInDropPkts,cikeInNotifys,cikeOutDropPkts,cikeOutNotifys ! report.cisco.cike-dn.type=nodeSnmp ! report.cisco.cike-dn.command=--title="Cisco VPN Dropped Packets and Notifys" \ ! DEF:inDrop={rrd1}:cikeInDropPkts:AVERAGE \ ! DEF:inNotify={rrd2}:cikeInNotifys:AVERAGE \ ! DEF:outDrop={rrd3}:cikeOutDropPkts:AVERAGE \ ! DEF:outNotify={rrd4}:cikeOutNotifys:AVERAGE \ ! LINE2:inDrop#0000ff:"Drops (In) " \ ! GPRINT:inDrop:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:inDrop:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:inDrop:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:inNotify#ff0000:"Notifys (In) " \ ! GPRINT:inNotify:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:inNotify:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:inNotify:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:outDrop#000080:"Drops (Out) " \ ! GPRINT:outDrop:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:outDrop:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:outDrop:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:outNotify#800000:"Notifys (Out) " \ ! GPRINT:outNotify:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:outNotify:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:outNotify:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.cike-fail.name=Cisco VPN Failures ! report.cisco.cike-fail.columns=cikeSysCapFails,cikeAuthFails,cikeHashValidFails,cikeDecryptFails,cikeNoSaFails ! report.cisco.cike-fail.type=nodeSnmp ! report.cisco.cike-fail.command=--title="Cisco VPN Failures" \ ! DEF:syscap={rrd1}:cikeSysCapFails:AVERAGE \ ! DEF:auth={rrd2}:cikeAuthFails:AVERAGE \ ! DEF:hash={rrd3}:cikeHashValidFails:AVERAGE \ ! DEF:decrypt={rrd4}:cikeDecryptFails:AVERAGE \ ! DEF:nosa={rrd5}:cikeNoSaFails:AVERAGE \ ! LINE2:syscap#0000A0:"System Capacity" \ ! GPRINT:syscap:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:syscap:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:syscap:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:auth#A00000:"Authentication " \ ! GPRINT:auth:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:auth:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:auth:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:hash#00A000:"Invalid Hash " \ ! GPRINT:hash:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:hash:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:hash:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:decrypt#A0A000:"Decryption " \ ! GPRINT:decrypt:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:decrypt:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:decrypt:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:nosa#00A0A0:"Sec Association" \ ! GPRINT:nosa:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:nosa:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:nosa:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.cike-init.name=Cisco VPN Tunnel Initialization ! report.cisco.cike-init.columns=cikeInitTunnels,cikeInitTunnelFails,cikeRespTunnelFails ! report.cisco.cike-init.type=nodeSnmp ! report.cisco.cike-init.command=--title="Cisco VPN Tunnel Initialization" \ ! DEF:init={rrd1}:cikeInitTunnels:AVERAGE \ ! DEF:lfail={rrd2}:cikeInitTunnelFails:AVERAGE \ ! DEF:rfail={rrd3}:cikeRespTunnelFails:AVERAGE \ ! LINE2:init#0000ff:"Inits " \ ! GPRINT:init:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:init:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:init:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:lfail#ff0000:"Local Failures " \ ! GPRINT:lfail:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:lfail:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:lfail:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:rfail#800000:"Remote Failures" \ ! GPRINT:rfail:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:rfail:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:rfail:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.cike-pkts.name=Cisco VPN Packets ! report.cisco.cike-pkts.columns=cikeInPkts,cikeOutPkts ! report.cisco.cike-pkts.type=nodeSnmp ! report.cisco.cike-pkts.command=--title="Cisco VPN Packets" \ ! DEF:inpkts={rrd1}:cikeInPkts:AVERAGE \ ! DEF:outpkts={rrd2}:cikeOutPkts:AVERAGE \ ! LINE2:inpkts#ff0000:"Packets In " \ ! GPRINT:inpkts:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:inpkts:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:inpkts:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:outpkts#0000ff:"Packets Out" \ ! GPRINT:outpkts:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:outpkts:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:outpkts:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.cike-p2ex.name=Cisco VPN Phase 2 Exchanges ! report.cisco.cike-p2ex.columns=cikeInP2Exchgs,cikeOutP2Exchgs ! report.cisco.cike-p2ex.type=nodeSnmp ! report.cisco.cike-p2ex.command=--title="Cisco VPN Phase 2 Exchanges" \ ! DEF:inex={rrd1}:cikeInP2Exchgs:AVERAGE \ ! DEF:outex={rrd2}:cikeOutP2Exchgs:AVERAGE \ ! LINE2:inex#ff0000:"Exchanges (In) " \ ! GPRINT:inex:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:inex:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:inex:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:outex#0000ff:"Exchanges (Out)" \ ! GPRINT:outex:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:outex:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:outex:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.cike-p2exinv.name=Cisco VPN Phase 2 Invalid Exchanges ! report.cisco.cike-p2exinv.columns=cikeInP2ExchgInv,cikeOutP2ExchgInv ! report.cisco.cike-p2exinv.type=nodeSnmp ! report.cisco.cike-p2exinv.command=--title="Cisco VPN Phase 2 Invalid Exchanges" \ ! DEF:inex={rrd1}:cikeInP2ExchgInv:AVERAGE \ ! DEF:outex={rrd2}:cikeOutP2ExchgInv:AVERAGE \ ! LINE2:inex#ff0000:"Invalid (In) " \ ! GPRINT:inex:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:inex:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:inex:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:outex#0000ff:"Invalid (Out)" \ ! GPRINT:outex:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:outex:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:outex:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.cike-p2exrej.name=Cisco VPN Phase 2 Rejected Exchanges ! report.cisco.cike-p2exrej.columns=cikeInP2ExchgRej,cikeOutP2ExchgRej ! report.cisco.cike-p2exrej.type=nodeSnmp ! report.cisco.cike-p2exrej.command=--title="Cisco VPN Phase 2 Rejected Exchanges" \ ! DEF:inex={rrd1}:cikeInP2ExchgRej:AVERAGE \ ! DEF:outex={rrd2}:cikeOutP2ExchgRej:AVERAGE \ ! LINE2:inex#ff0000:"Rejects (In) " \ ! GPRINT:inex:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:inex:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:inex:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:outex#0000ff:"Rejects (Out)" \ ! GPRINT:outex:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:outex:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:outex:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.cike-tunnels.name=Cisco VPN Active Tunnels ! report.cisco.cike-tunnels.columns=cikeActiveTunnels ! report.cisco.cike-tunnels.type=nodeSnmp ! report.cisco.cike-tunnels.command=--title="Cisco VPN Active Tunnels" \ ! DEF:acttun={rrd1}:cikeActiveTunnels:AVERAGE \ ! LINE2:acttun#0000ff:"Active Tunnels" \ ! GPRINT:acttun:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:acttun:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:acttun:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.dropsinout.name=Cisco Packets Dropped In/Out ! report.cisco.dropsinout.columns=locIfInQueueDrops, locIfOutQueueDrops ! report.cisco.dropsinout.type=interfaceSnmp ! report.cisco.dropsinout.command=--title="Cisco Packets Dropped In/Out" \ ! --vertical-label="Packets per second" \ ! DEF:dropin={rrd1}:locIfInQueueDrops:AVERAGE \ ! DEF:dropout={rrd2}:locIfOutQueueDrops:AVERAGE \ ! CDEF:dropoutNeg=0,dropout,- \ ! AREA:dropin#00ff00:"In " \ ! GPRINT:dropin:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:dropin:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:dropin:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:dropoutNeg#0000ff:"Out" \ ! GPRINT:dropout:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:dropout:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:dropout:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.freemem.name=Available Memory (Cisco) ! report.cisco.freemem.columns=freeMem ! report.cisco.freemem.type=nodeSnmp ! report.cisco.freemem.command=--title="Available Memory" \ ! --vertical-label="Bytes" \ ! --base=1024 \ ! DEF:freemem={rrd1}:freeMem:AVERAGE \ ! LINE2:freemem#0000ff:"Available Memory" \ ! GPRINT:freemem:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:freemem:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:freemem:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.firewall.name=Cisco PIX Firewall ! report.cisco.firewall.columns=cfwConnectCurr,cfwConnectMax ! report.cisco.firewall.type=nodeSnmp ! report.cisco.firewall.command=--title="Cisco PIX Firewall" \ ! DEF:cCurr={rrd1}:cfwConnectCurr:AVERAGE \ ! DEF:cMax={rrd2}:cfwConnectMax:AVERAGE \ ! LINE2:cCurr#0000ff:"Current Connections" \ ! GPRINT:cCurr:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:cCurr:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:cCurr:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:cMax#ff0000:"Max Connections " \ ! GPRINT:cMax:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:cMax:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:cMax:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.memory.name=Cisco Memory ! report.cisco.memory.columns=ciscoMemoryPoolMax,ciscoMemoryPoolUsed,ciscoMemoryPoolFree ! report.cisco.memory.type=nodeSnmp ! report.cisco.memory.suppress=cisco.freemem ! report.cisco.memory.command=--title="Cisco Memory Pool" \ ! --vertical-label="Bytes" \ ! --lower-limit 0 \ ! --base=1024 \ ! DEF:memMax={rrd1}:ciscoMemoryPoolMax:AVERAGE \ ! DEF:memUsed={rrd2}:ciscoMemoryPoolUsed:AVERAGE \ ! DEF:memFree={rrd3}:ciscoMemoryPoolFree:AVERAGE \ ! CDEF:totalMem=memFree,memUsed,+ \ ! AREA:memUsed#0000ff:"Used " \ ! GPRINT:memUsed:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:memUsed:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:memUsed:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:memFree#00ff00:"Free " \ ! GPRINT:memFree:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:memFree:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:memFree:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:memMax#ff0000:"Max Used" \ ! GPRINT:memMax:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:memMax:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:memMax:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE1:totalMem#000000:"Total " \ ! GPRINT:totalMem:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:totalMem:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:totalMem:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.pixcpu.name=CPU Utilization (Cisco PIX, Deprecated) ! report.cisco.pixcpu.columns=cpmCPUTotal5min ! report.cisco.pixcpu.type=nodeSnmp ! report.cisco.pixcpu.command=--title="CPU Utilization" \ ! DEF:avgBusy={rrd1}:cpmCPUTotal5min:AVERAGE \ ! LINE2:avgBusy#0000ff:"CPU Level" \ ! GPRINT:avgBusy:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:avgBusy:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:avgBusy:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.pixcpurev.name=CPU Utilization (Cisco PIX) ! report.cisco.pixcpurev.columns=cpmCPUTotal5minRev ! report.cisco.pixcpurev.type=nodeSnmp ! report.cisco.pixcpurev.suppress=cisco.pixcpu ! report.cisco.pixcpurev.command=--title="CPU Utilization" \ ! DEF:avgBusy={rrd1}:cpmCPUTotal5min:AVERAGE \ ! LINE2:avgBusy#0000ff:"CPU Level" \ ! GPRINT:avgBusy:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:avgBusy:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:avgBusy:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.pkts.name=Cisco Packets In/Out ! report.cisco.pkts.columns=locIfInPktsSec, locIfOutPktsSec ! report.cisco.pkts.type=interfaceSnmp ! report.cisco.pkts.command=--title="Cisco Packets In/Out" \ ! --vertical-label="Packets per second" \ ! DEF:pktsIn={rrd1}:locIfInPktsSec:AVERAGE \ ! DEF:pktsOut={rrd2}:locIfOutPktsSec:AVERAGE \ ! CDEF:pktsOutNeg=0,pktsOut,- \ ! AREA:pktsIn#00ff00:"In " \ ! GPRINT:pktsIn:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:pktsIn:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:pktsIn:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:pktsOutNeg#0000ff:"Out" \ ! GPRINT:pktsOut:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:pktsOut:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:pktsOut:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.protoin.name=Protocol Distribution In (Cisco) ! report.cisco.protoin.columns=locIfipInOct, locIfnovellInOct,locIfbridgedInOct ! report.cisco.protoin.type=interfaceSnmp ! report.cisco.protoin.command=--title="Protocol Dist. In" \ ! --upper-limit 100 --rigid \ ! DEF:ipIn={rrd1}:locIfipInOct:AVERAGE \ ! DEF:ipxIn={rrd2}:locIfnovellInOct:AVERAGE \ ! DEF:bridgeIn={rrd3}:locIfbridgedInOct:AVERAGE \ ! CDEF:totalIn=ipIn,ipxIn,+,bridgeIn,+ \ ! CDEF:ipPct=ipIn,totalIn,/,100,* \ ! CDEF:ipxPct=ipxIn,totalIn,/,100,* \ ! CDEF:bridgePct=bridgeIn,totalIn,/,100,* \ ! AREA:ipPct#0000ff:"Pct IP In" \ ! GPRINT:ipPct:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:ipPct:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:ipPct:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:ipxPct#ff0000:"Pct IPX In" \ ! GPRINT:ipxPct:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:ipxPct:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:ipxPct:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:bridgePct#00ff00:"Pct Bridge In" \ ! GPRINT:bridgePct:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:bridgePct:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:bridgePct:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.protoout.name=Protocol Distribution Out (Cisco) ! report.cisco.protoout.columns=locIfipOutOct, locIfnovellOutOct,locIfbridgedOutOct ! report.cisco.protoout.type=interfaceSnmp ! report.cisco.protoout.command=--title="Protocol Dist. Out" \ ! --upper-limit 100 --rigid \ ! DEF:ipOut={rrd1}:locIfipOutOct:AVERAGE \ ! DEF:ipxOut={rrd2}:locIfnovellOutOct:AVERAGE \ ! DEF:bridgeOut={rrd3}:locIfbridgedOutOct:AVERAGE \ ! CDEF:totalOut=ipOut,ipxOut,+,bridgeOut,+ \ ! CDEF:ipPct=ipOut,totalOut,/,100,* \ ! CDEF:ipxPct=ipxOut,totalOut,/,100,* \ ! CDEF:bridgePct=bridgeOut,totalOut,/,100,* \ ! AREA:ipPct#0000ff:"Pct IP Out" \ ! GPRINT:ipPct:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:ipPct:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:ipPct:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:ipxPct#ff0000:"Pct IPX Out" \ ! GPRINT:ipxPct:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:ipxPct:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:ipxPct:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:bridgePct#00ff00:"Pct Bridge Out" \ ! GPRINT:bridgePct:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:bridgePct:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:bridgePct:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.temp.name=Chassis Temperatures (Cisco) ! report.cisco.temp.columns=cvmTempStatusValue ! report.cisco.temp.propertiesValues=cvmTempStatusDescr ! report.cisco.temp.type=ciscoEnvMonTemperatureStatusIndex ! report.cisco.temp.command=--title="Chassis Temperature of {cvmTempStatusDescr}" \ ! --vertical-label="celsius degrees" \ ! DEF:temp={rrd1}:cvmTempStatusValue:AVERAGE \ ! LINE2:temp#0000ff:"Temperature" \ ! GPRINT:temp:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:temp:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:temp:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.volt.name=Chassis Voltage (Cisco) ! report.cisco.volt.columns=cvmVoltStatusValue ! report.cisco.volt.propertiesValues=cvmVoltStatusDescr ! report.cisco.volt.type=ciscoEnvMonVoltageStatusIndex ! report.cisco.volt.command=--title="Chassis Voltage of {cvmVoltStatusDescr}" \ ! --vertical-label="millivolts" \ ! DEF:volt={rrd1}:cvmVoltStatusValue:AVERAGE \ ! LINE2:volt#0000ff:"Voltage" \ ! GPRINT:volt:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:volt:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:volt:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.4bb.name=Cisco PIX 4 Byte Blocks ! report.cisco.4bb.columns=cfwBuff4BBmax,cfwBuff4BBcurr,cfwBuff4BBmin ! report.cisco.4bb.type=nodeSnmp ! report.cisco.4bb.command=--title="Cisco PIX 4 Byte Blocks" \ ! DEF:4BBmax={rrd1}:cfwBuff4BBmax:AVERAGE \ ! DEF:4BBcurr={rrd2}:cfwBuff4BBcurr:AVERAGE \ ! DEF:4BBmin={rrd3}:cfwBuff4BBmin:AVERAGE \ ! LINE2:4BBmax#0000ff:"Max" \ ! GPRINT:4BBmax:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:4BBmax:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:4BBmax:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:4BBcurr#ff0000:"Current" \ ! GPRINT:4BBcurr:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:4BBcurr:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:4BBcurr:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:4BBmin#00ff00:"Min" \ ! GPRINT:4BBmin:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:4BBmin:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:4BBmin:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.80bb.name=Cisco PIX 80 Byte Blocks ! report.cisco.80bb.columns=cfwBuff80BBmax,cfwBuff80BBcurr,cfwBuff80BBmin ! report.cisco.80bb.type=nodeSnmp ! report.cisco.80bb.command=--title="Cisco PIX 80 Byte Blocks" \ ! DEF:80BBmax={rrd1}:cfwBuff80BBmax:AVERAGE \ ! DEF:80BBcurr={rrd2}:cfwBuff80BBcurr:AVERAGE \ ! DEF:80BBmin={rrd3}:cfwBuff80BBmin:AVERAGE \ ! LINE2:80BBmax#0000ff:"Max" \ ! GPRINT:80BBmax:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:80BBmax:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:80BBmax:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:80BBcurr#ff0000:"Current" \ ! GPRINT:80BBcurr:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:80BBcurr:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:80BBcurr:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:80BBmin#00ff00:"Min" \ ! GPRINT:80BBmin:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:80BBmin:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:80BBmin:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.256bb.name=Cisco PIX 256 Byte Blocks ! report.cisco.256bb.columns=cfwBuff256BBmax,cfwBuff256BBcurr,cfwBuff256BBmin ! report.cisco.256bb.type=nodeSnmp ! report.cisco.256bb.command=--title="Cisco PIX 256 Byte Blocks" \ ! DEF:256BBmax={rrd1}:cfwBuff256BBmax:AVERAGE \ ! DEF:256BBcurr={rrd2}:cfwBuff256BBcurr:AVERAGE \ ! DEF:256BBmin={rrd3}:cfwBuff256BBmin:AVERAGE \ ! LINE2:256BBmax#0000ff:"Max" \ ! GPRINT:256BBmax:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:256BBmax:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:256BBmax:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:256BBcurr#ff0000:"Current" \ ! GPRINT:256BBcurr:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:256BBcurr:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:256BBcurr:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:256BBmin#00ff00:"Min" \ ! GPRINT:256BBmin:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:256BBmin:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:256BBmin:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.1550bb.name=Cisco PIX 1550 Byte Blocks ! report.cisco.1550bb.columns=cfwBuff1550BBmax,cfwBuff1550BBcurr,cfwBuff1550BBmin ! report.cisco.1550bb.type=nodeSnmp ! report.cisco.1550bb.command=--title="Cisco PIX 1550 Byte Blocks" \ ! DEF:1550BBmax={rrd1}:cfwBuff1550BBmax:AVERAGE \ ! DEF:1550BBcurr={rrd2}:cfwBuff1550BBcurr:AVERAGE \ ! DEF:1550BBmin={rrd3}:cfwBuff1550BBmin:AVERAGE \ ! LINE2:1550BBmax#0000ff:"Max" \ ! GPRINT:1550BBmax:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:1550BBmax:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:1550BBmax:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:1550BBcurr#ff0000:"Current" \ ! GPRINT:1550BBcurr:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:1550BBcurr:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:1550BBcurr:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:1550BBmin#00ff00:"Min" \ ! GPRINT:1550BBmin:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:1550BBmin:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:1550BBmin:MAX:"Max \\: %8.2lf %s\\n" ! ! ###### ! ###### Colubris Reports ! ###### ! ! report.colubris.users.name=Colubris Public Access Users ! report.colubris.users.columns=colPubAccUsrCnt, colPubAccUsrThresh ! report.colubris.users.type=nodeSnmp ! report.colubris.users.command=--title="Colubris Public Access Users" \ ! DEF:usersCount={rrd1}:colPubAccUsrCnt:AVERAGE \ ! DEF:usersThresh={rrd2}:colPubAccUsrThresh:AVERAGE \ ! LINE2:usersCount#000000:"Users" \ ! GPRINT:usersCount:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:usersCount:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:usersCount:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:usersThresh#54a4de:"Thresh" \ ! GPRINT:usersThresh:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:usersThresh:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:usersThresh:MAX:"Max \\: %8.2lf %s\\n" ! ! ###### ! ###### Reports Generated for Empire (CA / Concord SystemEDGE) Agents ! ###### ! report.empire.loadavg.name=SystemEDGE Load Average ! report.empire.loadavg.columns=nhLoadavg1,nhLoadavg5,nhLoadavg15 ! report.empire.loadavg.type=nodeSnmp ! report.empire.loadavg.command=--title="SystemEDGE Load Average" \ ! --lower-limit=0 --vertical-label="System Load" \ ! DEF:load1raw={rrd1}:nhLoadavg1:AVERAGE \ ! DEF:load5raw={rrd2}:nhLoadavg5:AVERAGE \ ! DEF:load15raw={rrd3}:nhLoadavg15:AVERAGE \ ! CDEF:load1=load1raw,100,/ \ ! CDEF:load5=load5raw,100,/ \ ! CDEF:load15=load15raw,100,/ \ ! LINE2:load1#00ff00:"1-Minute" \ ! GPRINT:load1:AVERAGE:"Average\\: %5.2lf" \ ! GPRINT:load1:MIN:"Minimum\\: %5.2lf" \ ! GPRINT:load1:MAX:"Maximum\\: %5.2lf\\n" \ ! LINE2:load5#0000ff:"5-Minute" \ ! GPRINT:load5:AVERAGE:"Average\\: %5.2lf" \ ! GPRINT:load5:MIN:"Minimum\\: %5.2lf" \ ! GPRINT:load5:MAX:"Maximum\\: %5.2lf\\n" \ ! LINE2:load15#ff0000:"15-Minute" \ ! GPRINT:load15:AVERAGE:"Average\\: %5.2lf" \ ! GPRINT:load15:MIN:"Minimum\\: %5.2lf" \ ! GPRINT:load15:MAX:"Maximum\\: %5.2lf" ! ! report.empire.pageswaps.name=Page Swaps (SystemEDGE) ! report.empire.pageswaps.columns=nhSwapIn,nhSwapOut ! report.empire.pageswaps.type=nodeSnmp ! report.empire.pageswaps.command=--title="Page Swaps (SystemEDGE)" \ ! --lower-limit=0 --vertical-label="Page Swaps" \ ! DEF:in={rrd1}:nhSwapIn:AVERAGE \ ! DEF:out={rrd2}:nhSwapOut:AVERAGE \ ! CDEF:outinv=out,-1,* \ ! AREA:in#00ff00:"In" \ ! GPRINT:in:AVERAGE:"Average\\: %8.2lf" \ ! GPRINT:in:MIN:"Minimum\\: %8.2lf" \ ! GPRINT:in:MAX:"Maximum\\: %8.2lf\\n" \ ! AREA:outinv#0000ff:"Out" \ ! GPRINT:out:AVERAGE:"Average\\: %8.2lf" \ ! GPRINT:out:MIN:"Minimum\\: %8.2lf" \ ! GPRINT:out:MAX:"Maximum\\: %8.2lf" ! ! report.empire.syscalls.name=System Calls (SystemEDGE) ! report.empire.syscalls.columns=nhSysContext ! report.empire.syscalls.type=nodeSnmp ! report.empire.syscalls.command=--title="System Calls (SystemEDGE)" \ ! --vertical-label="System Calls" \ ! DEF:calls={rrd1}:nhSysContext:AVERAGE \ ! AREA:calls#00ff00:"System Calls" \ ! GPRINT:calls:AVERAGE:"Avg\\: %8.2lf" \ ! GPRINT:calls:MIN:"Min\\: %8.2lf" \ ! GPRINT:calls:MAX:"Max\\: %8.2lf" ! ! report.empire.sysinterrupts.name=System Interrupts (SystemEDGE) ! report.empire.sysinterrupts.columns=nhSysInterrupts ! report.empire.sysinterrupts.type=nodeSnmp ! report.empire.sysinterrupts.command=--title="System Interrupts (SystemEDGE)" \ ! --vertical-label="Interrupts" \ ! DEF:intr={rrd1}:nhSysInterrupts:AVERAGE \ ! AREA:intr#00ff00:"System Interrupts" \ ! GPRINT:intr:AVERAGE:"Avg\\: %8.2lf" \ ! GPRINT:intr:MIN:"Min\\: %8.2lf" \ ! GPRINT:intr:MAX:"Max\\: %8.2lf" ! ! report.empire.cpupct.name=SystemEDGE CPU Totals ! report.empire.cpupct.columns=nhCpuTotalIdlePct,nhCpuTotalUserPct,nhCpuTotalSysPct,nhCpuTotalWaitPct ! report.empire.cpupct.type=nodeSnmp ! report.empire.cpupct.command=--title="SystemEDGE CPU Totals" \ ! --lower-limit 0 --upper-limit 100 --rigid --vertical-label="Percent CPU" \ ! DEF:idle={rrd1}:nhCpuTotalIdlePct:AVERAGE \ ! DEF:user={rrd2}:nhCpuTotalUserPct:AVERAGE \ ! DEF:system={rrd3}:nhCpuTotalSysPct:AVERAGE \ ! DEF:wait={rrd4}:nhCpuTotalWaitPct:AVERAGE \ ! AREA:user#0000ff:"User " \ ! GPRINT:user:AVERAGE:"Avg\\: %3.2lf" \ ! GPRINT:user:MIN:"Min\\: %3.2lf" \ ! GPRINT:user:MAX:"Max\\: %3.2lf\\n" \ ! STACK:system#ffff00:"System" \ ! GPRINT:system:AVERAGE:"Avg\\: %3.2lf" \ ! GPRINT:system:MIN:"Min\\: %3.2lf" \ ! GPRINT:system:MAX:"Max\\: %3.2lf\\n" \ ! STACK:wait#ff0000:"Wait " \ ! GPRINT:wait:AVERAGE:"Avg\\: %3.2lf" \ ! GPRINT:wait:MIN:"Min\\: %3.2lf" \ ! GPRINT:wait:MAX:"Max\\: %3.2lf\\n" \ ! STACK:idle#00ff00:"Idle " \ ! GPRINT:idle:AVERAGE:"Avg\\: %3.2lf" \ ! GPRINT:idle:MIN:"Min\\: %3.2lf" \ ! GPRINT:idle:MAX:"Max\\: %3.2lf" ! ! report.empire.cpupctnowait.name=SystemEDGE CPU Totals (no wait) ! report.empire.cpupctnowait.columns=nhCpuTotalIdlePct,nhCpuTotalUserPct,nhCpuTotalSysPct ! report.empire.cpupctnowait.type=nodeSnmp ! report.empire.cpupctnowait.command=--title="SystemEDGE CPU Totals (no wait)" \ ! --lower-limit 0 --upper-limit 100 --rigid --vertical-label="Percent CPU" \ ! DEF:idle={rrd1}:nhCpuTotalIdlePct:AVERAGE \ ! DEF:user={rrd2}:nhCpuTotalUserPct:AVERAGE \ ! DEF:system={rrd3}:nhCpuTotalSysPct:AVERAGE \ ! AREA:user#0000ff:"User " \ ! GPRINT:user:AVERAGE:"Avg\\: %3.2lf" \ ! GPRINT:user:MIN:"Min\\: %3.2lf" \ ! GPRINT:user:MAX:"Max\\: %3.2lf\\n" \ ! STACK:system#ffff00:"System" \ ! GPRINT:system:AVERAGE:"Avg\\: %3.2lf" \ ! GPRINT:system:MIN:"Min\\: %3.2lf" \ ! GPRINT:system:MAX:"Max\\: %3.2lf\\n" \ ! STACK:idle#00ff00:"Idle " \ ! GPRINT:idle:AVERAGE:"Avg\\: %3.2lf" \ ! GPRINT:idle:MIN:"Min\\: %3.2lf" \ ! GPRINT:idle:MAX:"Max\\: %3.2lf" ! ! report.empire.pgmon.procs.name=Process Group Process Count (SystemEDGE) ! report.empire.pgmon.procs.columns=pgmonNumProcs ! report.empire.pgmon.procs.type=pgmonIndex ! report.empire.pgmon.procs.command=--title="Process Group Process Count (SystemEDGE)" \ ! --lower-limit=0 --vertical-label="Processes" \ ! DEF:procs={rrd1}:pgmonNumProcs:AVERAGE \ ! AREA:procs#00ff00:"Processes" \ ! GPRINT:procs:AVERAGE:"Avg\\: %8.2lf" \ ! GPRINT:procs:MIN:"Min\\: %8.2lf" \ ! GPRINT:procs:MAX:"Max\\: %8.2lf\\n" ! ! report.empire.pgmon.threads.name=Process Group Thread Count (SystemEDGE) ! report.empire.pgmon.threads.columns=pgmonThreadCount ! report.empire.pgmon.threads.type=pgmonIndex ! report.empire.pgmon.threads.command=--title="Process Group Thread Count (SystemEDGE)" \ ! --lower-limit=0 --vertical-label="Threads" \ ! DEF:threads={rrd1}:pgmonThreadCount:AVERAGE \ ! AREA:threads#00ff00:"Threads" \ ! GPRINT:threads:AVERAGE:"Avg\\: %8.2lf" \ ! GPRINT:threads:MIN:"Min\\: %8.2lf" \ ! GPRINT:threads:MAX:"Max\\: %8.2lf\\n" ! ! report.empire.pgmon.memory.name=Process Group Memory Size (SystemEDGE) ! report.empire.pgmon.memory.columns=pgmonSize,pgmonRSS ! report.empire.pgmon.memory.type=pgmonIndex ! report.empire.pgmon.memory.command=--title="Process Group Memory Size (SystemEDGE)" \ ! --lower-limit=0 --vertical-label="Size" \ ! DEF:size={rrd1}:pgmonSize:AVERAGE \ ! DEF:rss={rrd2}:pgmonRSS:AVERAGE \ ! AREA:size#00ff00:"Memory Size " \ ! GPRINT:size:AVERAGE:"Avg\\: %8.2lf" \ ! GPRINT:size:MIN:"Min\\: %8.2lf" \ ! GPRINT:size:MAX:"Max\\: %8.2lf\\n" \ ! LINE2:rss#0000ff:"Resident Set Size" \ ! GPRINT:rss:AVERAGE:"Avg\\: %8.2lf" \ ! GPRINT:rss:MIN:"Min\\: %8.2lf" \ ! GPRINT:rss:MAX:"Max\\: %8.2lf" ! ! report.empire.pgmon.mempct.name=Process Group Memory Utilization (SystemEDGE) ! report.empire.pgmon.mempct.columns=pgmonMEM ! report.empire.pgmon.mempct.type=pgmonIndex ! report.empire.pgmon.mempct.command=--title="Process Group Memory Utilization (SystemEDGE)" \ ! --lower-limit 0 --upper-limit 100 --rigid --vertical-label="% System Memory" \ ! DEF:util={rrd1}:pgmonMEM:AVERAGE \ ! AREA:util#00ff00:"Memory Utilization" \ ! GPRINT:util:AVERAGE:"Avg\\: %8.2lf" \ ! GPRINT:util:MIN:"Min\\: %8.2lf" \ ! GPRINT:util:MAX:"Max\\: %8.2lf" ! ! report.empire.pgmon.pagefaults.name=Process Group Page Faults (SystemEDGE) ! report.empire.pgmon.pagefaults.columns=pgmonMajorPgFlts ! report.empire.pgmon.pagefaults.type=pgmonIndex ! report.empire.pgmon.pagefaults.command=--title="Process Group Page Faults (SystemEDGE)" \ ! --lower-limit 0 --vertical-label="Page Faults" \ ! DEF:pgfaults={rrd1}:pgmonMajorPgFlts:AVERAGE \ ! AREA:pgfaults#ff0000:"Major" \ ! GPRINT:pgfaults:AVERAGE:"Avg\\: %8.2lf" \ ! GPRINT:pgfaults:MIN:"Min\\: %8.2lf" \ ! GPRINT:pgfaults:MAX:"Max\\: %8.2lf" ! ! ###### ! ###### Reports Generated for Extreme Devices ! ###### ! ! report.extreme.currentTemp.name=Temperature ! report.extreme.currentTemp.columns=xtrmCurrentTemp ! report.extreme.currentTemp.type=nodeSnmp ! report.extreme.currentTemp.command=--title="Temperature" \ ! DEF:temp={rrd1}:xtrmCurrentTemp:AVERAGE \ ! LINE2:temp#0000ff:"Temperature (celcius)" \ ! GPRINT:temp:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:temp:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:temp:MAX:"Max \\: %8.2lf %s\\n" ! ! ###### ! ###### Reports for Foundry Devices ! ###### ! ! report.foundry.actualTemp.name=Temperature ! report.foundry.actualTemp.columns=sbChAcTemperature, snChWarnTemp, snChShutTemp ! report.foundry.actualTemp.type=nodeSnmp ! report.foundry.actualTemp.command=--title="Temperature (Celcius)" \ ! DEF:fdrtemp={rrd1}:sbChAcTemperature:AVERAGE \ ! DEF:fdrwtemp={rrd2}:snChWarnTemp:AVERAGE \ ! DEF:fdrstemp={rrd3}:snChShutTemp:AVERAGE \ ! CDEF:fdrtmp=fdrtemp,2,/ \ ! CDEF:fdrwtmp=fdrwtemp,2,/ \ ! CDEF:fdrstmp=fdrstemp,2,/ \ ! LINE2:fdrtmp#00ff00:"Current" \ ! LINE2:fdrwtmp#ffa500:"Warning" \ ! LINE2:fdrstmp#ff0000:"Shutdown" \ ! COMMENT:"\\n" \ ! COMMENT:"\\n" \ ! GPRINT:fdrtmp:AVERAGE:"Temperature\\: Avg \\: %8.2lf %s" \ ! GPRINT:fdrtmp:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:fdrtmp:MAX:"Max \\: %8.2lf %s\\n" ! ! report.foundry.cpuUtil1Min.name=CpuUtil1Min ! report.foundry.cpuUtil1Min.columns=snAgGbCpu1MinAvg ! report.foundry.cpuUtil1Min.type=nodeSnmp ! report.foundry.cpuUtil1Min.command=--title="CpuUtil1Min" \ ! DEF:cpuUtil={rrd1}:snAgGbCpu1MinAvg:AVERAGE \ ! LINE2:cpuUtil#0000ff:"Percent Cpu Used" \ ! GPRINT:cpuUtil:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:cpuUtil:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:cpuUtil:MAX:"Max \\: %8.2lf %s\\n" ! ! report.foundry.dynMemUtil.name=MemoryUtil ! report.foundry.dynMemUtil.columns=snAgGblDynMemUtil ! report.foundry.dynMemUtil.type=nodeSnmp ! report.foundry.dynMemUtil.command=--title="MemoryUtil" \ ! DEF:memUtil={rrd1}:snAgGblDynMemUtil:AVERAGE \ ! LINE2:memUtil#0000ff:"Percent Memory Used" \ ! GPRINT:memUtil:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:memUtil:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:memUtil:MAX:"Max \\: %8.2lf %s\\n" ! ! ###### ! ###### Reports for Hewlett-Packard ! ###### ! ! report.hp.swbuffcorrupt.name=BufferCorrupt (Hp Procurve) ! report.hp.swbuffcorrupt.columns=hpMsgBufCorrupt ! report.hp.swbuffcorrupt.type=nodeSnmp ! report.hp.swbuffcorrupt.command=--title="BuffersCorrupt" \ ! DEF:bufcorrupt={rrd1}:hpMsgBufCorrupt:AVERAGE \ ! LINE2:bufcorrupt#0000ff:"Buffers Corrupt" \ ! GPRINT:bufcorrupt:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:bufcorrupt:MIN:" Avg \\: %8.2lf %s" \ ! GPRINT:bufcorrupt:MAX:"Max \\: %8.2lf %s\\n" ! ! report.hp.swbufferfree.name=BufferFree ! report.hp.swbufferfree.columns=hpMsgBufFree ! report.hp.swbufferfree.type=nodeSnmp ! report.hp.swbufferfree.command=--title="BuffersFree" \ ! DEF:buffree={rrd1}:hpMsgBufFree:AVERAGE \ ! LINE2:buffree#0000ff:"Buffers Free" \ ! GPRINT:buffree:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:buffree:MIN:" Avg \\: %8.2lf %s" \ ! GPRINT:buffree:MAX:"Max \\: %8.2lf %s\\n" ! ! report.hp.swbufferinit.name=BufferInit ! report.hp.swbufferinit.columns=hpMsgBufInit ! report.hp.swbufferinit.type=nodeSnmp ! report.hp.swbufferinit.command=--title="BuffersInit" \ ! DEF:bufinit={rrd1}:hpMsgBufInit:AVERAGE \ ! LINE2:bufinit#0000ff:"Buffers Init" \ ! GPRINT:bufinit:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:bufinit:MIN:" Avg \\: %8.2lf %s" \ ! GPRINT:bufinit:MAX:"Max \\: %8.2lf %s\\n" ! ! report.hp.swbuffermin.name=BufferMin ! report.hp.swbuffermin.columns=hpMsgBufMin ! report.hp.swbuffermin.type=nodeSnmp ! report.hp.swbuffermin.command=--title="BufferMinimum" \ ! DEF:bufmin={rrd1}:hpMsgBufMin:AVERAGE \ ! LINE2:bufmin#0000ff:"Buffers Minimum" \ ! GPRINT:bufmin:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:bufmin:MIN:" Avg \\: %8.2lf %s" \ ! GPRINT:bufmin:MAX:"Max \\: %8.2lf %s\\n" ! ! report.hp.swbuffermiss.name=BufferMiss ! report.hp.swbuffermiss.columns=hpMsgBufMiss ! report.hp.swbuffermiss.type=nodeSnmp ! report.hp.swbuffermiss.command=--title="BufferMisses" \ ! DEF:bufmiss={rrd1}:hpMsgBufMiss:AVERAGE \ ! LINE2:bufmiss#0000ff:"Buffer Misses" \ ! GPRINT:bufmiss:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:bufmiss:MIN:" Avg \\: %8.2lf %s" \ ! GPRINT:bufmiss:MAX:"Max \\: %8.2lf %s\\n" ! ! report.hp.swbuffersize.name=BufferSize ! report.hp.swbuffersize.columns=hpMsgBufSize ! report.hp.swbuffersize.type=nodeSnmp ! report.hp.swbuffersize.command=--title="BufferSize" \ ! DEF:bufsize={rrd1}:hpMsgBufSize:AVERAGE \ ! LINE2:bufsize#0000ff:"Buffer Size" \ ! GPRINT:bufsize:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:bufsize:MIN:" Avg \\: %8.2lf %s" \ ! GPRINT:bufsize:MAX:"Max \\: %8.2lf %s\\n" ! ! report.hp.swfreemem.name=FreeMemory (Hp Procurve) ! report.hp.swfreemem.columns=hpSwitchFreeMem ! report.hp.swfreemem.type=nodeSnmp ! report.hp.swfreemem.command=--title="Mem Free" \ ! --vertical-label="Bytes" \ ! --base=1024 \ ! DEF:freeMem={rrd1}:hpSwitchFreeMem:AVERAGE \ ! LINE2:freeMem#0000ff:"Free Memory" \ ! GPRINT:freeMem:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:freeMem:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:freeMem:MAX:"Max \\: %8.2lf %s\\n" ! ! report.hp.switchcpu.name=CPU (Hp Procurve) ! report.hp.switchcpu.columns=hpSwitchCpuStat ! report.hp.switchcpu.type=nodeSnmp ! report.hp.switchcpu.command=--title="Cpu Util" \ ! DEF:cpuUtil={rrd1}:hpSwitchCpuStat:AVERAGE \ ! LINE2:cpuUtil#0000ff:"Cpu Util" \ ! GPRINT:cpuUtil:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:cpuUtil:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:cpuUtil:MAX:"Max \\: %8.2lf %s\\n" ! ! report.hp.swmemalloc.name=Allocated Memory ! report.hp.swmemalloc.columns=hpSwitchAllocMem ! report.hp.swmemalloc.type=nodeSnmp ! report.hp.swmemalloc.command=--title="Allocated Memory" \ ! DEF:memalloc={rrd1}:hpSwitchAllocMem:AVERAGE \ ! LINE2:memalloc#0000ff:"Allocated Memory" \ ! GPRINT:memalloc:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:memalloc:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:memalloc:MAX:"Max \\: %8.2lf %s\\n" ! ! report.hp.swtotalmem.name=TotalMemory (Hp Procurve) ! report.hp.swtotalmem.columns=hpSwitchTotalMem ! report.hp.swtotalmem.type=nodeSnmp ! report.hp.swtotalmem.command=--title="Mem Used" \ ! --vertical-label="Bytes" \ ! --base=1024 \ ! DEF:ToMem={rrd1}:hpSwitchTotalMem:AVERAGE \ ! LINE2:ToMem#0000ff:"Mem Used" \ ! GPRINT:ToMem:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:ToMem:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:ToMem:MAX:"Max \\: %8.2lf %s\\n" ! ! report.hpux.cpu.name=CPU (hpux) ! report.hpux.cpu.columns=hpuxCpuIdle, hpuxCpuUsedByUsers, hpuxCpuUsedBySys ! report.hpux.cpu.type=nodeSnmp ! report.hpux.cpu.command=--title="CPU" \ ! DEF:idle={rrd1}:hpuxCpuIdle:AVERAGE \ ! DEF:user={rrd2}:hpuxCpuUsedByUsers:AVERAGE \ ! DEF:system={rrd3}:hpuxCpuUsedBySys:AVERAGE \ ! LINE2:idle#0000ff:"Idle" \ ! GPRINT:idle:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:idle:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:idle:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:user#ff0000:"Used By Users" \ ! GPRINT:user:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:user:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:user:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:system#00ff00:"Used By System" \ ! GPRINT:system:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:system:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:system:MAX:"Max \\: %8.2lf %s\\n" ! ! report.hpux.memory.name=Memory (hpux) ! report.hpux.memory.columns=hpuxFreeMemory, hpuxPhysMemory ! report.hpux.memory.type=nodeSnmp ! report.hpux.memory.command=--title="Memory" \ ! --vertical-label="Bytes" \ ! --base=1024 \ ! DEF:freeMem={rrd1}:hpuxFreeMemory:AVERAGE \ ! DEF:physMem={rrd2}:hpuxPhysMemory:AVERAGE \ ! LINE2:freeMem#0000ff:"Free Memory" \ ! GPRINT:freeMem:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:freeMem:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:freeMem:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:physMem#ff0000:"Total Memory" \ ! GPRINT:physMem:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:physMem:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:physMem:MAX:"Max \\: %8.2lf %s\\n" ! ! report.hpux.numFsMounted.name=Number of File Systems (hpux) ! report.hpux.numFsMounted.columns=hpuxTotalFsMounted ! report.hpux.numFsMounted.type=nodeSnmp ! report.hpux.numFsMounted.command=--title="Number of File Systems" \ ! DEF:fs={rrd1}:hpuxTotalFsMounted:AVERAGE \ ! LINE2:fs#0000ff:"Number of File Systems" \ ! GPRINT:fs:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:fs:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:fs:MAX:"Max \\: %8.2lf %s\\n" ! ! report.hpux.numProcs.name=Number of Processes (hpux) ! report.hpux.numProcs.columns=hpuxTotalProcesses ! report.hpux.numProcs.type=nodeSnmp ! report.hpux.numProcs.command=--title="Number of Processes" \ ! DEF:procs={rrd1}:hpuxTotalProcesses:AVERAGE \ ! LINE2:procs#0000ff:"Number of Processes" \ ! GPRINT:procs:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:procs:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:procs:MAX:"Max \\: %8.2lf %s\\n" ! ! report.hpux.numUsers.name=Number of Users (hpux) ! report.hpux.numUsers.columns=hpuxNumberOfUsers ! report.hpux.numUsers.type=nodeSnmp ! report.hpux.numUsers.command=--title="Number of Users" \ ! DEF:users={rrd1}:hpuxNumberOfUsers:AVERAGE \ ! LINE2:users#0000ff:"Number of Users" \ ! GPRINT:users:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:users:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:users:MAX:"Max \\: %8.2lf %s\\n" ! ! report.hpux.swap.name=Swap (hpux) ! report.hpux.swap.columns=hpuxSwapFree, hpuxSwapEnabled ! report.hpux.swap.type=nodeSnmp ! report.hpux.swap.command=--title="Swap" \ ! DEF:free={rrd1}:hpuxSwapFree:AVERAGE \ ! DEF:enabled={rrd2}:hpuxSwapEnabled:AVERAGE \ ! LINE2:free#0000ff:"Swap Free" \ ! GPRINT:free:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:free:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:free:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:enabled#ff0000:"Swap Enabled" \ ! GPRINT:enabled:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:enabled:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:enabled:MAX:"Max \\: %8.2lf %s\\n" ! ! ##### ! ##### Reports for IP Unity Application Server ! ##### ! ! report.ipunity.as.callsNoApp.name=Calls With No Application (IP Unity) ! report.ipunity.as.callsNoApp.columns=ipuCallsWithNoApp ! report.ipunity.as.callsNoApp.type=nodeSnmp ! report.ipunity.as.callsNoApp.command=--title="IP Unity Calls With No Application" \ ! --vertical-label="Calls" \ ! --units-exponent 0 \ ! DEF:calls={rrd1}:ipuCallsWithNoApp:AVERAGE \ ! AREA:calls#ff0000:"Calls" \ ! GPRINT:calls:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:calls:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:calls:MAX:"Max \\: %8.2lf %s\\n" ! ! report.ipunity.as.callsInProgress.name=Calls In Progress (IP Unity node) ! report.ipunity.as.callsInProgress.columns=asInCallsInPgr,asOutCallsInPgr ! report.ipunity.as.callsInProgress.propertiesValues=asAppName ! report.ipunity.as.callsInProgress.type=asAppIndex ! report.ipunity.as.callsInProgress.command=--title="Calls In Progress (Node): {asAppName}" \ ! --vertical-label="Calls" \ ! DEF:in={rrd1}:asInCallsInPgr:AVERAGE \ ! DEF:out={rrd2}:asOutCallsInPgr:AVERAGE \ ! CDEF:outInv=out,-1,* \ ! AREA:in#00ff00:"In " \ ! GPRINT:in:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:in:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:in:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:outInv#0000ff:"Out" \ ! GPRINT:out:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:out:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:out:MAX:"Max \\: %8.2lf %s\\n" ! ! report.ipunity.as.cluster.callsInProgress.name=Calls In Progress (IP Unity cluster) ! report.ipunity.as.cluster.callsInProgress.columns=asClsInCallsInPgr,asClsOutCallsInPgr ! report.ipunity.as.cluster.callsInProgress.propertiesValues=asAppName ! report.ipunity.as.cluster.callsInProgress.type=asAppIndex ! report.ipunity.as.cluster.callsInProgress.command=--title="Calls In Progress (Cluster): {asAppName}" \ ! --vertical-label="Calls" \ ! --units-exponent 0 \ ! DEF:in={rrd1}:asClsInCallsInPgr:AVERAGE \ ! DEF:out={rrd2}:asClsOutCallsInPgr:AVERAGE \ ! CDEF:outInv=out,-1,* \ ! AREA:in#00ff00:"In " \ ! GPRINT:in:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:in:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:in:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:outInv#0000ff:"Out" \ ! GPRINT:out:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:out:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:out:MAX:"Max \\: %8.2lf %s\\n" ! ! report.ipunity.as.callsCompleted.name=Calls Completed (IP Unity node) ! report.ipunity.as.callsCompleted.columns=asInCallsCompleted,asOutCallsCompleted ! report.ipunity.as.callsCompleted.propertiesValues=asAppName ! report.ipunity.as.callsCompleted.type=asAppIndex ! report.ipunity.as.callsCompleted.command=--title="Calls Completed (Node): {asAppName}" \ ! --vertical-label="Calls" \ ! --units-exponent 0 \ ! DEF:in={rrd1}:asInCallsCompleted:AVERAGE \ ! DEF:out={rrd2}:asOutCallsCompleted:AVERAGE \ ! CDEF:outInv=out,-1,* \ ! AREA:in#00ff00:"In " \ ! GPRINT:in:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:in:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:in:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:outInv#0000ff:"Out" \ ! GPRINT:out:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:out:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:out:MAX:"Max \\: %8.2lf %s\\n" ! ! report.ipunity.as.cluster.callsCompleted.name=Calls Completed (IP Unity cluster) ! report.ipunity.as.cluster.callsCompleted.columns=asClsInCallsCmpltd,asClsOutCallsCmpltd ! report.ipunity.as.cluster.callsCompleted.propertiesValues=asAppName ! report.ipunity.as.cluster.callsCompleted.type=asAppIndex ! report.ipunity.as.cluster.callsCompleted.command=--title="Calls Completed (Cluster): {asAppName}" \ ! --vertical-label="Calls" \ ! --units-exponent 0 \ ! DEF:in={rrd1}:asClsInCallsCmpltd:AVERAGE \ ! DEF:out={rrd2}:asClsOutCallsCmpltd:AVERAGE \ ! CDEF:outInv=out,-1,* \ ! AREA:in#00ff00:"In " \ ! GPRINT:in:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:in:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:in:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:outInv#0000ff:"Out" \ ! GPRINT:out:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:out:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:out:MAX:"Max \\: %8.2lf %s\\n" ! ! report.ipunity.as.callsRejected.name=Calls Rejected (IP Unity node) ! report.ipunity.as.callsRejected.columns=asInCallsFwRejected,asInCallsAppReject ! report.ipunity.as.callsRejected.propertiesValues=asAppName ! report.ipunity.as.callsRejected.type=asAppIndex ! report.ipunity.as.callsRejected.command=--title="Calls Rejected: {asAppName}" \ ! --vertical-label="Calls" \ ! --units-exponent 0 \ ! DEF:fw={rrd1}:asInCallsFwRejected:AVERAGE \ ! DEF:app={rrd2}:asInCallsAppReject:AVERAGE \ ! CDEF:total=fw,app,+ \ ! AREA:fw#00ff00:"Framewrk" \ ! GPRINT:fw:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:fw:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:fw:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:app#0000ff:"App " \ ! GPRINT:app:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:app:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:app:MAX:" Max \\: %8.2lf %s\\n" \ ! COMMENT:" Total " \ ! GPRINT:total:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:total:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:total:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.as.cluster.callsRejected.name=Calls Rejected (IP Unity cluster) ! report.ipunity.as.cluster.callsRejected.columns=asClsInCallsRejFw,asClsInCallsRejApp ! report.ipunity.as.cluster.callsRejected.propertiesValues=asAppName ! report.ipunity.as.cluster.callsRejected.type=asAppIndex ! report.ipunity.as.cluster.callsRejected.command=--title="Calls Rejected (Cluster): {asAppName}" \ ! --vertical-label="Calls" \ ! --units-exponent 0 \ ! DEF:fw={rrd1}:asClsInCallsRejFw:AVERAGE \ ! DEF:app={rrd2}:asClsInCallsRejApp:AVERAGE \ ! CDEF:total=fw,app,+ \ ! AREA:fw#00ff00:"Framewrk" \ ! GPRINT:fw:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:fw:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:fw:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:app#0000ff:"App " \ ! GPRINT:app:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:app:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:app:MAX:" Max \\: %8.2lf %s\\n" \ ! COMMENT:" Total " \ ! GPRINT:total:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:total:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:total:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.as.callSetups.name=Call Setup Failures (IP Unity node) ! report.ipunity.as.callSetups.columns=asInCallMsFailures,asInCallCaFailures,asOutCallSetupMsFl,asOutCallSetupCaFl ! report.ipunity.as.callSetups.propertiesValues=asAppName ! report.ipunity.as.callSetups.type=asAppIndex ! report.ipunity.as.callSetups.command=--title="Call Setup Failures (Node): {asAppName}" \ ! --vertical-label="Calls" \ ! --units-exponent 0 \ ! DEF:inMsFail={rrd1}:asInCallMsFailures:AVERAGE \ ! DEF:inCaFail={rrd2}:asInCallCaFailures:AVERAGE \ ! DEF:outMsFail={rrd3}:asOutCallSetupMsFl:AVERAGE \ ! DEF:outCaFail={rrd4}:asOutCallSetupCaFl:AVERAGE \ ! CDEF:outTotFail=outMsFail,outCaFail,+ \ ! CDEF:outMsFailInv=outMsFail,-1,* \ ! CDEF:outCaFailInv=outCaFail,-1,* \ ! COMMENT:" In:\\n" \ ! AREA:inMsFail#00ff00:"MediaSvr" \ ! GPRINT:inMsFail:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:inMsFail:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:inMsFail:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:inCaFail#8000ff:"CallAgnt" \ ! GPRINT:inCaFail:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:inCaFail:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:inCaFail:MAX:" Max \\: %8.2lf %s\\n" \ ! COMMENT:" Out:\\n" \ ! AREA:outMsFailInv#00ff00:"MediaSvr" \ ! GPRINT:outMsFail:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:outMsFail:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:outMsFail:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:outCaFailInv#8000ff:"CallAgnt" \ ! GPRINT:outCaFail:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:outCaFail:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:outCaFail:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.as.cluster.callSetups.name=Call Setup Failures (IP Unity cluster) ! report.ipunity.as.cluster.callSetups.columns=asClsInCallSuMsFail,asClsInCallSuCaFail,asClsOutCallSuMsFl,asClsOutCallSuCaFl ! report.ipunity.as.cluster.callSetups.propertiesValues=asAppName ! report.ipunity.as.cluster.callSetups.type=asAppIndex ! report.ipunity.as.cluster.callSetups.command=--title="Call Setup Failures (Cluster): {asAppName}" \ ! --vertical-label="Calls" \ ! --units-exponent 0 \ ! DEF:inMsFail={rrd1}:asClsInCallSuMsFail:AVERAGE \ ! DEF:inCaFail={rrd2}:asClsInCallSuCaFail:AVERAGE \ ! DEF:outMsFail={rrd3}:asClsOutCallSuMsFl:AVERAGE \ ! DEF:outCaFail={rrd4}:asClsOutCallSuCaFl:AVERAGE \ ! CDEF:outTotFail=outMsFail,outCaFail,+ \ ! CDEF:outMsFailInv=outMsFail,-1,* \ ! CDEF:outCaFailInv=outCaFail,-1,* \ ! COMMENT:" In:\\n" \ ! AREA:inMsFail#00ff00:"MediaSvr" \ ! GPRINT:inMsFail:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:inMsFail:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:inMsFail:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:inCaFail#8000ff:"CallAgnt" \ ! GPRINT:inCaFail:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:inCaFail:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:inCaFail:MAX:" Max \\: %8.2lf %s\\n" \ ! COMMENT:" Out:\\n" \ ! AREA:outMsFailInv#80ff00:"MediaSvr" \ ! GPRINT:outMsFail:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:outMsFail:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:outMsFail:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:outCaFailInv#ff0000:"CallAgnt" \ ! GPRINT:outCaFail:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:outCaFail:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:outCaFail:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.as.callTeardowns.name=Call Teardown Failures (IP Unity node) ! report.ipunity.as.callTeardowns.columns=asInCallTrdnMsFail,asInCallTrdnCaFail,asOutCallTrdnMsFail,asOutCallTrdnCaFail ! report.ipunity.as.callTeardowns.propertiesValues=asAppName ! report.ipunity.as.callTeardowns.type=asAppIndex ! report.ipunity.as.callTeardowns.command=--title="Call Teardown Failures (Node): {asAppName}" \ ! --vertical-label="Calls" \ ! --units-exponent 0 \ ! DEF:inMsFail={rrd1}:asInCallTrdnMsFail:AVERAGE \ ! DEF:inCaFail={rrd2}:asInCallTrdnCaFail:AVERAGE \ ! DEF:outMsFail={rrd3}:asOutCallTrdnMsFail:AVERAGE \ ! DEF:outCaFail={rrd4}:asOutCallTrdnCaFail:AVERAGE \ ! CDEF:outTotFail=outMsFail,outCaFail,+ \ ! CDEF:outMsFailInv=outMsFail,-1,* \ ! CDEF:outCaFailInv=outCaFail,-1,* \ ! COMMENT:" In:\\n" \ ! AREA:inMsFail#00ff00:"MediaSvr" \ ! GPRINT:inMsFail:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:inMsFail:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:inMsFail:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:inCaFail#8000ff:"CallAgnt" \ ! GPRINT:inCaFail:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:inCaFail:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:inCaFail:MAX:" Max \\: %8.2lf %s\\n" \ ! COMMENT:" Out:\\n" \ ! AREA:outMsFailInv#80ff00:"MediaSvr" \ ! GPRINT:outMsFail:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:outMsFail:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:outMsFail:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:outCaFailInv#ff0000:"CallAgnt" \ ! GPRINT:outCaFail:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:outCaFail:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:outCaFail:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.as.cluster.callTeardowns.name=Call Teardown Failures (IP Unity cluster) ! report.ipunity.as.cluster.callTeardowns.columns=asClsInCallTdMsFail,asClsInCallTdCaFail,asClsOutCallTdMsFl,asClsOutCallTdCaFl ! report.ipunity.as.cluster.callTeardowns.propertiesValues=asAppName ! report.ipunity.as.cluster.callTeardowns.type=asAppIndex ! report.ipunity.as.cluster.callTeardowns.command=--title="Call Teardown Failures (Cluster): {asAppName}" \ ! --vertical-label="Calls" \ ! --units-exponent 0 \ ! DEF:inMsFail={rrd1}:asClsInCallTdMsFail:AVERAGE \ ! DEF:inCaFail={rrd2}:asClsInCallTdCaFail:AVERAGE \ ! DEF:outMsFail={rrd3}:asClsOutCallTdMsFl:AVERAGE \ ! DEF:outCaFail={rrd4}:asClsOutCallTdCaFl:AVERAGE \ ! CDEF:outTotFail=outMsFail,outCaFail,+ \ ! CDEF:outMsFailInv=outMsFail,-1,* \ ! CDEF:outCaFailInv=outCaFail,-1,* \ ! COMMENT:" In:\\n" \ ! AREA:inMsFail#00ff00:"MediaSvr" \ ! GPRINT:inMsFail:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:inMsFail:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:inMsFail:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:inCaFail#8000ff:"CallAgnt" \ ! GPRINT:inCaFail:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:inCaFail:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:inCaFail:MAX:" Max \\: %8.2lf %s\\n" \ ! COMMENT:" Out:\\n" \ ! AREA:outMsFailInv#80ff00:"MediaSvr" \ ! GPRINT:outMsFail:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:outMsFail:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:outMsFail:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:outCaFailInv#ff0000:"CallAgnt" \ ! GPRINT:outCaFail:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:outCaFail:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:outCaFail:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.as.callsCancelled.name=Outcalls Cancelled (IP Unity node) ! report.ipunity.as.callsCancelled.columns=asOutCallsFwCancld,asOutCallsAppCancld ! report.ipunity.as.callsCancelled.propertiesValues=asAppName ! report.ipunity.as.callsCancelled.type=asAppIndex ! report.ipunity.as.callsCancelled.command=--title="Outcalls Cancelled (Node): {asAppName}" \ ! --vertical-label="Calls" \ ! --units-exponent 0 \ ! DEF:fw={rrd1}:asOutCallsFwCancld:AVERAGE \ ! DEF:app={rrd2}:asOutCallsAppCancld:AVERAGE \ ! AREA:fw#8000ff:"Framewrk" \ ! GPRINT:fw:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:fw:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:fw:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:app#808000:"App " \ ! GPRINT:app:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:app:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:app:MAX:"Max \\: %8.2lf %s\\n" ! ! report.ipunity.as.cluster.callsCancelled.name=Outcalls Cancelled (IP Unity cluster) ! report.ipunity.as.cluster.callsCancelled.columns=asClsOutCallsFwCncl,asClsOutCallsApCncl ! report.ipunity.as.cluster.callsCancelled.propertiesValues=asAppName ! report.ipunity.as.cluster.callsCancelled.type=asAppIndex ! report.ipunity.as.cluster.callsCancelled.command=--title="Outcalls Cancelled (Cluster): {asAppName}" \ ! --vertical-label="Calls" \ ! --units-exponent 0 \ ! DEF:fw={rrd1}:asClsOutCallsFwCncl:AVERAGE \ ! DEF:app={rrd2}:asClsOutCallsApCncl:AVERAGE \ ! AREA:fw#8000ff:"Framewrk" \ ! GPRINT:fw:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:fw:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:fw:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:app#808000:"App " \ ! GPRINT:app:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:app:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:app:MAX:"Max \\: %8.2lf %s\\n" ! ! report.ipunity.as.callTransfers.name=Call Transfer Failures (IP Unity node) ! report.ipunity.as.callTransfers.columns=asCallXfersFailApp,asCallXfersFailCa ! report.ipunity.as.callTransfers.propertiesValues=asAppName ! report.ipunity.as.callTransfers.type=asAppIndex ! report.ipunity.as.callTransfers.command=--title="Call Transfer Failures (Node): {asAppName}" \ ! --vertical-label="Calls" \ ! --units-exponent 0 \ ! DEF:app={rrd1}:asCallXfersFailApp:AVERAGE \ ! DEF:ca={rrd2}:asCallXfersFailCa:AVERAGE \ ! AREA:app#8000ff:"App " \ ! GPRINT:app:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:app:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:app:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:ca#808000:"CallAgnt" \ ! GPRINT:ca:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:ca:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:ca:MAX:"Max \\: %8.2lf %s\\n" ! ! report.ipunity.as.cluster.callTransfers.name=Call Transfer Failures (IP Unity cluster) ! report.ipunity.as.cluster.callTransfers.columns=asClsCallXferFailAp,asClsCallXferFailCa ! report.ipunity.as.cluster.callTransfers.propertiesValues=asAppName ! report.ipunity.as.cluster.callTransfers.type=asAppIndex ! report.ipunity.as.cluster.callTransfers.command=--title="Call Transfer Failures (Cluster): {asAppName}" \ ! --vertical-label="Calls" \ ! --units-exponent 0 \ ! DEF:app={rrd1}:asClsCallXferFailAp:AVERAGE \ ! DEF:ca={rrd2}:asClsCallXferFailCa:AVERAGE \ ! AREA:app#8000ff:"App " \ ! GPRINT:app:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:app:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:app:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:ca#808000:"CallAgnt" \ ! GPRINT:ca:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:ca:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:ca:MAX:"Max \\: %8.2lf %s\\n" ! ! report.ipunity.as.sessTimerExpired.name=Session Timer Expirations (IP Unity node) ! report.ipunity.as.sessTimerExpired.columns=asInCallsTrdnSsTmr,asOutCallsTrdnSsTmr ! report.ipunity.as.sessTimerExpired.propertiesValues=asAppName ! report.ipunity.as.sessTimerExpired.type=asAppIndex ! report.ipunity.as.sessTimerExpired.command=--title="Session Timer Expirations (Node): {asAppName}" \ ! --vertical-label="Calls" \ ! --units-exponent 0 \ ! DEF:in={rrd1}:asInCallsTrdnSsTmr:AVERAGE \ ! DEF:out={rrd2}:asOutCallsTrdnSsTmr:AVERAGE \ ! CDEF:outInv=out,-1,* \ ! AREA:in#ff0000:"In " \ ! GPRINT:in:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:in:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:in:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:outInv#0000ff:"Out" \ ! GPRINT:out:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:out:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:out:MAX:"Max \\: %8.2lf %s\\n" ! ! report.ipunity.as.hungCallsDropped.name=Hung Calls Dropped (IP Unity) ! report.ipunity.as.hungCallsDropped.columns=asHungCallsDropped,asClsHungCallsDrop ! report.ipunity.as.hungCallsDropped.propertiesValues=asAppName ! report.ipunity.as.hungCallsDropped.type=asAppIndex ! report.ipunity.as.hungCallsDropped.command=--title="Hung Calls Dropped: {asAppName}" \ ! --vertical-label="Calls" \ ! --units-exponent 0 \ ! DEF:node={rrd1}:asHungCallsDropped:AVERAGE \ ! DEF:cluster={rrd2}:asClsHungCallsDrop:AVERAGE \ ! CDEF:clusterInv=cluster,-1,* \ ! AREA:node#ff0000:"Node " \ ! GPRINT:node:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:node:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:node:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:clusterInv#0000ff:"Cluster" \ ! GPRINT:cluster:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:cluster:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:cluster:MAX:"Max \\: %8.2lf %s\\n" ! ! report.ipunity.as.callsNotHandled.name=Calls Not Handled (IP Unity Node) ! report.ipunity.as.callsNotHandled.columns=asInCallsRedir,asInCallsNotResp ! report.ipunity.as.callsNotHandled.propertiesValues=asAppName ! report.ipunity.as.callsNotHandled.type=asAppIndex ! report.ipunity.as.callsNotHandled.command=--title="Calls Not Handled (Node): {asAppName}" \ ! --vertical-label="Calls" \ ! --units-exponent 0 \ ! DEF:redir={rrd1}:asInCallsRedir:AVERAGE \ ! DEF:lock={rrd2}:asInCallsNotResp:AVERAGE \ ! CDEF:lockInv=lock,-1,* \ ! COMMENT:"Calls not handled by the framework either because they were redirected\\n" \ ! AREA:redir#ff0000:"Redir " \ ! GPRINT:redir:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:redir:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:redir:MAX:"Max \\: %8.2lf %s\\n" \ ! COMMENT:"Or because the framework was locked\\n" \ ! AREA:lockInv#0000ff:"Locked" \ ! GPRINT:lock:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:lock:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:lock:MAX:"Max \\: %8.2lf %s\\n" ! ! report.ipunity.sip.requests.name=SIP Requests Summary (IP Unity) ! report.ipunity.sip.requests.columns=ipuSIPSumInReqs,ipuSIPSumOutReqs ! report.ipunity.sip.requests.propertiesValues=applDescription ! report.ipunity.sip.requests.type=applIndex ! report.ipunity.sip.requests.command=--title="SIP Requests Summary ({applDescription})" \ ! --vertical-label="Requests" \ ! DEF:in={rrd1}:ipuSIPSumInReqs:AVERAGE \ ! DEF:out={rrd2}:ipuSIPSumOutReqs:AVERAGE \ ! CDEF:outInv=out,-1,* \ ! AREA:in#00ff00:"In " \ ! GPRINT:in:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:in:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:in:MAX:" Max \\: %8.2lf %s\\n" \ ! AREA:outInv#0000ff:"Out" \ ! GPRINT:out:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:out:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:out:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.sip.responses.name=SIP Responses Summary (IP Unity) ! report.ipunity.sip.responses.columns=ipuSIPSumInResps,ipuSIPSumOutResps ! report.ipunity.sip.responses.propertiesValues=applDescription ! report.ipunity.sip.responses.type=applIndex ! report.ipunity.sip.responses.command=--title="SIP Responses Summary ({applDescription})" \ ! --vertical-label="Responses" \ ! DEF:in={rrd1}:ipuSIPSumInResps:AVERAGE \ ! DEF:out={rrd2}:ipuSIPSumOutResps:AVERAGE \ ! CDEF:outInv=out,-1,* \ ! AREA:in#00ff00:"In " \ ! GPRINT:in:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:in:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:in:MAX:" Max \\: %8.2lf %s\\n" \ ! AREA:outInv#0000ff:"Out" \ ! GPRINT:out:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:out:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:out:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.sip.transactions.name=SIP Transactions Summary (IP Unity) ! report.ipunity.sip.transactions.columns=ipuSIPSumTotXaction ! report.ipunity.sip.transactions.propertiesValues=applDescription ! report.ipunity.sip.transactions.type=applIndex ! report.ipunity.sip.transactions.command=--title="SIP Transactions Summary ({applDescription})" \ ! --vertical-label="Total Transactions" \ ! DEF:total={rrd1}:ipuSIPSumTotXaction:AVERAGE \ ! AREA:total#00ff00:"Total" \ ! GPRINT:total:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:total:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:total:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.sip.methodDetail.name=SIP Method Detail (IP Unity) ! report.ipunity.sip.methodDetail.columns=ipuSIPInviteIn,ipuSIPInviteOut,ipuSIPAckIn,ipuSIPAckOut,ipuSIPByeIn,ipuSIPByeOut,ipuSIPCancelIn,ipuSIPCancelOut,ipuSIPOptionsIn,ipuSIPOptionsOut,ipuSIPRegisterIn,ipuSIPRegisterOut,ipuSIPInfoIn,ipuSIPInfoOut ! report.ipunity.sip.methodDetail.propertiesValues=applDescription ! report.ipunity.sip.methodDetail.type=applIndex ! report.ipunity.sip.methodDetail.command=--title="SIP Method Detail ({applDescription})" \ ! --vertical-label=Requests \ ! DEF:inviteIn={rrd1}:ipuSIPInviteIn:AVERAGE \ ! DEF:inviteOut={rrd2}:ipuSIPInviteOut:AVERAGE \ ! CDEF:inviteOutInv=inviteOut,-1,* \ ! DEF:ackIn={rrd3}:ipuSIPAckIn:AVERAGE \ ! DEF:ackOut={rrd4}:ipuSIPAckOut:AVERAGE \ ! CDEF:ackOutInv=ackOut,-1,* \ ! DEF:byeIn={rrd5}:ipuSIPByeIn:AVERAGE \ ! DEF:byeOut={rrd6}:ipuSIPByeOut:AVERAGE \ ! CDEF:byeOutInv=byeOut,-1,* \ ! DEF:cancelIn={rrd7}:ipuSIPCancelIn:AVERAGE \ ! DEF:cancelOut={rrd8}:ipuSIPCancelOut:AVERAGE \ ! CDEF:cancelOutInv=cancelOut,-1,* \ ! DEF:optionsIn={rrd9}:ipuSIPOptionsIn:AVERAGE \ ! DEF:optionsOut={rrd10}:ipuSIPOptionsOut:AVERAGE \ ! CDEF:optionsOutInv=optionsOut,-1,* \ ! DEF:registerIn={rrd11}:ipuSIPRegisterIn:AVERAGE \ ! DEF:registerOut={rrd12}:ipuSIPRegisterOut:AVERAGE \ ! CDEF:registerOutInv=registerOut,-1,* \ ! DEF:infoIn={rrd13}:ipuSIPInfoIn:AVERAGE \ ! DEF:infoOut={rrd14}:ipuSIPInfoOut:AVERAGE \ ! CDEF:infoOutInv=infoOut,-1,* \ ! COMMENT:"In\\n" \ ! AREA:inviteIn#00ff00:"INVITE " \ ! GPRINT:inviteIn:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:inviteIn:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:inviteIn:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:ackIn#0000ff:"ACK " \ ! GPRINT:ackIn:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:ackIn:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:ackIn:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:byeIn#ff0000:"BYE " \ ! GPRINT:byeIn:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:byeIn:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:byeIn:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:cancelIn#00ffff:"CANCEL " \ ! GPRINT:cancelIn:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:cancelIn:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:cancelIn:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:optionsIn#ffff00:"OPTIONS " \ ! GPRINT:optionsIn:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:optionsIn:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:optionsIn:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:registerIn#ff008a:"REGISTER" \ ! GPRINT:registerIn:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:registerIn:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:registerIn:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:infoIn#ff7200:"INFO " \ ! GPRINT:infoIn:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:infoIn:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:infoIn:MAX:" Max \\: %8.2lf %s\\n" \ ! COMMENT:"Out\\n" \ ! AREA:inviteOutInv#00ff00:"INVITE " \ ! GPRINT:inviteOut:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:inviteOut:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:inviteOut:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:ackOutInv#0000ff:"ACK " \ ! GPRINT:ackOut:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:ackOut:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:ackOut:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:byeOutInv#ff0000:"BYE " \ ! GPRINT:byeOut:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:byeOut:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:byeOut:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:cancelOutInv#00ffff:"CANCEL " \ ! GPRINT:cancelOut:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:cancelOut:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:cancelOut:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:optionsOutInv#ffff00:"OPTIONS " \ ! GPRINT:optionsOut:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:optionsOut:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:optionsOut:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:registerOutInv#ff008a:"REGISTER" \ ! GPRINT:registerOut:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:registerOut:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:registerOut:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:infoOutInv#ff7200:"INFO " \ ! GPRINT:infoOut:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:infoOut:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:infoOut:MAX:" Max \\: %8.2lf %s\\n" \ ! ! report.ipunity.sip.statusCodeDetail.name=SIP Status Detail (IP Unity) ! report.ipunity.sip.statusCodeDetail.columns=ipuSIPInfoClsIn,ipuSIPInfoClsOut,ipuSIPSuccClsIn,ipuSIPSuccClsOut,ipuSIPRedirClsIn,ipuSIPRedirClsOut,ipuSIPReqFailClsIn,ipuSIPReqFailClsOut,ipuSIPSrvFailClsIn,ipuSIPSrvFailClsOut,ipuSIPGblFailClsIn,ipuSIPGblFailClsOut ! report.ipunity.sip.statusCodeDetail.propertiesValues=applDescription ! report.ipunity.sip.statusCodeDetail.type=applIndex ! report.ipunity.sip.statusCodeDetail.command=--title="SIP Status Detail ({applDescription})" \ ! --vertical-label="Responses" \ ! DEF:infoIn={rrd1}:ipuSIPInfoClsIn:AVERAGE \ ! DEF:infoOut={rrd2}:ipuSIPInfoClsOut:AVERAGE \ ! CDEF:infoOutInv=infoOut,-1,* \ ! DEF:succIn={rrd3}:ipuSIPSuccClsIn:AVERAGE \ ! DEF:succOut={rrd4}:ipuSIPSuccClsOut:AVERAGE \ ! CDEF:succOutInv=succOut,-1,* \ ! DEF:redirIn={rrd5}:ipuSIPRedirClsIn:AVERAGE \ ! DEF:redirOut={rrd6}:ipuSIPRedirClsOut:AVERAGE \ ! CDEF:redirOutInv=redirOut,-1,* \ ! DEF:reqFailIn={rrd7}:ipuSIPReqFailClsIn:AVERAGE \ ! DEF:reqFailOut={rrd8}:ipuSIPReqFailClsOut:AVERAGE \ ! CDEF:reqFailOutInv=reqFailOut,-1,* \ ! DEF:srvFailIn={rrd9}:ipuSIPSrvFailClsIn:AVERAGE \ ! DEF:srvFailOut={rrd10}:ipuSIPSrvFailClsOut:AVERAGE \ ! CDEF:srvFailOutInv=srvFailOut,-1,* \ ! DEF:gblFailIn={rrd11}:ipuSIPGblFailClsIn:AVERAGE \ ! DEF:gblFailOut={rrd12}:ipuSIPGblFailClsOut:AVERAGE \ ! CDEF:gblFailOutInv=gblFailOut,-1,* \ ! COMMENT:"In\\n" \ ! AREA:infoIn#ff0000:"Info " \ ! GPRINT:infoIn:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:infoIn:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:infoIn:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:succIn#00ff00:"Success" \ ! GPRINT:succIn:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:succIn:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:succIn:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:redirIn#0000ff:"Redir " \ ! GPRINT:redirIn:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:redirIn:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:redirIn:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:reqFailIn#00ffff:"ReqFail" \ ! GPRINT:reqFailIn:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:reqFailIn:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:reqFailIn:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:srvFailIn#ffff00:"SrvFail" \ ! GPRINT:srvFailIn:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:srvFailIn:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:srvFailIn:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:gblFailIn#ff008a:"GblFail" \ ! GPRINT:gblFailIn:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:gblFailIn:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:gblFailIn:MAX:" Max \\: %8.2lf %s\\n" \ ! COMMENT:"Out\\n" \ ! AREA:infoOutInv#ff0000:"Info " \ ! GPRINT:infoOut:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:infoOut:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:infoOut:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:succOutInv#00ff00:"Success" \ ! GPRINT:succOut:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:succOut:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:succOut:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:redirOutInv#0000ff:"Redir " \ ! GPRINT:redirOut:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:redirOut:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:redirOut:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:reqFailOutInv#00ffff:"ReqFail" \ ! GPRINT:reqFailOut:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:reqFailOut:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:reqFailOut:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:srvFailOutInv#ffff00:"SrvFail" \ ! GPRINT:srvFailOut:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:srvFailOut:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:srvFailOut:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:gblFailOutInv#ff008a:"GblFail" \ ! GPRINT:gblFailOut:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:gblFailOut:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:gblFailOut:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.mgcp.badVersions.name=MGCP Bad Versions (IP Unity) ! report.ipunity.mgcp.badVersions.columns=ipuMGCPInBadVer ! report.ipunity.mgcp.badVersions.type=nodeSnmp ! report.ipunity.mgcp.badVersions.command=--title="MGCP Bad Versions Received" \ ! --vertical-label="Packets" \ ! DEF:total={rrd1}:ipuMGCPInBadVer:AVERAGE \ ! AREA:total#00ff00:"In " \ ! GPRINT:total:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:total:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:total:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.mgcp.unrecognizedPackets.name=MGCP Unrecognized Packets (IP Unity) ! report.ipunity.mgcp.unrecognizedPackets.columns=ipuMGCPUnrecogPkts ! report.ipunity.mgcp.unrecognizedPackets.type=nodeSnmp ! report.ipunity.mgcp.unrecognizedPackets.command=--title="MGCP Unrecognized Packets Received" \ ! --vertical-label="Packets" \ ! DEF:total={rrd1}:ipuMGCPUnrecogPkts:AVERAGE \ ! AREA:total#00ff00:"In " \ ! GPRINT:total:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:total:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:total:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.mgcp.commandStats.name=MGCP Command Detail (IP Unity) ! report.ipunity.mgcp.commandStats.columns=ipuMGCPSuccCmdsTx,ipuMGCPSuccCmdsRx,ipuMGCPRetxCmdsTx,ipuMGCPRetxCmdsRx,ipuMGCPBadFmtCmdsRx,ipuMGCPFailedCmdsTx ! report.ipunity.mgcp.commandStats.type=ipuMGCPMsgStatsEntry ! report.ipunity.mgcp.commandStats.command=--title="MGCP Command Detail" \ ! --vertical-label="Commands" \ ! DEF:successIn={rrd1}:ipuMGCPSuccCmdsTx:AVERAGE \ ! DEF:successOut={rrd2}:ipuMGCPSuccCmdsRx:AVERAGE \ ! CDEF:successOutInv=successOut,-1,* \ ! DEF:retranIn={rrd3}:ipuMGCPRetxCmdsTx:AVERAGE \ ! DEF:retranOut={rrd4}:ipuMGCPRetxCmdsRx:AVERAGE \ ! CDEF:retranOutInv=retranOut,-1,* \ ! DEF:badFmtIn={rrd5}:ipuMGCPBadFmtCmdsRx:AVERAGE \ ! DEF:badFmtOut={rrd6}:ipuMGCPFailedCmdsTx:AVERAGE \ ! CDEF:badFmtOutInv=badFmtOut,-1,* \ ! COMMENT:"In\\n" \ ! AREA:successIn#00ff00:"Success" \ ! GPRINT:successIn:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:successIn:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:successIn:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:retranIn#0000ff:"Retry " \ ! GPRINT:retranIn:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:retranIn:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:retranIn:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:badFmtIn#ff0000:"Bad Fmt" \ ! GPRINT:badFmtIn:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:badFmtIn:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:badFmtIn:MAX:" Max \\: %8.2lf %s\\n" \ ! COMMENT:"Out\\n" \ ! AREA:successOutInv#00ff00:"Success" \ ! GPRINT:successOut:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:successOut:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:successOut:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:retranOutInv#0000ff:"Retran " \ ! GPRINT:retranOut:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:retranOut:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:retranOut:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:badFmtOutInv#ff0000:"Bad Fmt" \ ! GPRINT:badFmtOut:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:badFmtOut:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:badFmtOut:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.mgcp.responseStats.name=MGCP Response Detail (IP Unity) ! report.ipunity.mgcp.responseStats.columns=ipuMGCPSuccRespTx,ipuMGCPSuccRespRx,ipuMGCPRetxRespTx,ipuMGCPRetxRespRx,ipuMGCPBadFmtRespRx ! report.ipunity.mgcp.responseStats.type=ipuMGCPMsgStatsEntry ! report.ipunity.mgcp.responseStats.command=--title="MGCP Response Detail" \ ! --vertical-label="Responses" \ ! DEF:successOut={rrd1}:ipuMGCPSuccRespTx:AVERAGE \ ! DEF:successIn={rrd2}:ipuMGCPSuccRespRx:AVERAGE \ ! CDEF:successOutInv=successOut,-1,* \ ! DEF:retranOut={rrd3}:ipuMGCPRetxRespTx:AVERAGE \ ! DEF:retranIn={rrd4}:ipuMGCPRetxRespRx:AVERAGE \ ! CDEF:retranOutInv=retranOut,-1,* \ ! DEF:badFmtIn={rrd5}:ipuMGCPBadFmtRespRx:AVERAGE \ ! COMMENT:"In\\n" \ ! AREA:successIn#00ff00:"Success" \ ! GPRINT:successIn:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:successIn:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:successIn:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:retranIn#0000ff:"Retran " \ ! GPRINT:retranIn:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:retranIn:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:retranIn:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:badFmtIn#ff0000:"Bad Fmt" \ ! GPRINT:badFmtIn:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:badFmtIn:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:badFmtIn:MAX:" Max \\: %8.2lf %s\\n" \ ! COMMENT:"Out\\n" \ ! AREA:successOutInv#00ff00:"Success" \ ! GPRINT:successOut:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:successOut:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:successOut:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:retranOutInv#0000ff:"Retran " \ ! GPRINT:retranOut:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:retranOut:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:retranOut:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.fcLoad.name=Framework Load (IP Unity) ! report.ipunity.fcLoad.columns=fcOverloadThresh1,fcOverloadThresh2,fcOverloadThresh3,fcCurrentLoad ! report.ipunity.fcLoad.type=nodeSnmp ! report.ipunity.fcLoad.command=--title="IP Unity Framework Load" \ ! --vertical-label="Load Average" \ ! --units-exponent=0 \ ! DEF:thresh1Raw={rrd1}:fcOverloadThresh1:AVERAGE \ ! CDEF:thresh1=thresh1Raw,1000,/ \ ! DEF:thresh2Raw={rrd2}:fcOverloadThresh2:AVERAGE \ ! CDEF:thresh2=thresh2Raw,1000,/ \ ! DEF:thresh3Raw={rrd3}:fcOverloadThresh3:AVERAGE \ ! CDEF:thresh3=thresh3Raw,1000,/ \ ! DEF:curRaw={rrd4}:fcCurrentLoad:AVERAGE \ ! CDEF:cur=curRaw,1000,/ \ ! AREA:cur#00ff00:"Current " \ ! GPRINT:cur:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:cur:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:cur:MAX:" Max \\: %8.2lf %s\\n" \ ! LINE1:thresh1#0000ff:"Thresh 1" \ ! GPRINT:thresh1:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:thresh1:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:thresh1:MAX:" Max \\: %8.2lf %s\\n" \ ! LINE1:thresh2#000000:"Thresh 2" \ ! GPRINT:thresh2:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:thresh2:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:thresh2:MAX:" Max \\: %8.2lf %s\\n" \ ! LINE1:thresh3#ff0000:"Thresh 3" \ ! GPRINT:thresh3:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:thresh3:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:thresh3:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.um.callsInProgress.name=Calls In Progress (Unified Messaging) ! report.ipunity.um.callsInProgress.columns=umAnsModeCallsInPgr,umUIModeCallsInPgr,umOutCallsInPgr,umAACallsInPgr,umMaxCallsSupported ! report.ipunity.um.callsInProgress.type=nodeSnmp ! report.ipunity.um.callsInProgress.command=--title="Calls In Progress (Unified Messaging)" \ ! --vertical-label="Calls" \ ! DEF:ansMode={rrd1}:umAnsModeCallsInPgr:AVERAGE \ ! DEF:uiMode={rrd2}:umUIModeCallsInPgr:AVERAGE \ ! DEF:outCall={rrd3}:umOutCallsInPgr:AVERAGE \ ! DEF:aaMode={rrd4}:umAACallsInPgr:AVERAGE \ ! DEF:maxCalls={rrd5}:umMaxCallsSupported:AVERAGE \ ! AREA:ansMode#00ff00:"Answer " \ ! GPRINT:ansMode:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:ansMode:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:ansMode:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:uiMode#0000ff:"TUI " \ ! GPRINT:uiMode:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:uiMode:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:uiMode:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:outCall#ffff00:"Outcall " \ ! GPRINT:outCall:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:outCall:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:outCall:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:aaMode#00ffff:"AutoAtt " \ ! GPRINT:aaMode:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:aaMode:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:aaMode:MAX:" Max \\: %8.2lf %s\\n" \ ! COMMENT:"Maximum Concurrent Calls Supported\\n" \ ! LINE1:maxCalls#ff0000:"MaxCalls" \ ! GPRINT:maxCalls:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:maxCalls:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:maxCalls:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.um.msgStore.usage.name=Message Store Usage (Unified Messaging) ! report.ipunity.um.msgStore.usage.columns=umMsgStoreDiskUsage ! report.ipunity.um.msgStore.usage.type=nodeSnmp ! report.ipunity.um.msgStore.usage.command=--title="Message Store Usage (Unified Messaging)" \ ! --vertical-label="Percent" \ ! --units-exponent=0 \ ! --upper-limit=100 \ ! DEF:pct={rrd1}:umMsgStoreDiskUsage:AVERAGE \ ! AREA:pct#00ff00:"Usage" \ ! GPRINT:pct:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:pct:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:pct:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.um.msgStore.size.name=Message Store Size (Unified Messaging) ! report.ipunity.um.msgStore.size.columns=umMsgStoreSize ! report.ipunity.um.msgStore.size.type=nodeSnmp ! report.ipunity.um.msgStore.size.command=--title="Message Store Size (Unified Messaging)" \ ! --vertical-label="Gigabytes" \ ! DEF:storeGB={rrd1}:umMsgStoreSize:AVERAGE \ ! AREA:storeGB#00ff00:"Size" \ ! GPRINT:storeGB:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:storeGB:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:storeGB:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.um.sentMsg.size.name=Sent Messages Size (Unified Messaging) ! report.ipunity.um.sentMsg.size.columns=umSentMsgTotalSize ! report.ipunity.um.sentMsg.size.type=nodeSnmp ! report.ipunity.um.sentMsg.size.command=--title="Sent Messages Size (Unified Messaging)" \ ! --vertical-label="Kilobytes" \ ! DEF:size={rrd1}:umSentMsgTotalSize:AVERAGE \ ! AREA:size#00ff00:"Size" \ ! GPRINT:size:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:size:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:size:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.um.callsCompleted.name=Calls Completed (Unified Messaging) ! report.ipunity.um.callsCompleted.columns=umAnsModeCallsCompl,umUIModeCallsCompl ! report.ipunity.um.callsCompleted.type=nodeSnmp ! report.ipunity.um.callsCompleted.command=--title="Calls Completed (Unified Messaging)" \ ! --vertical-label="Calls" \ ! DEF:ansMode={rrd1}:umAnsModeCallsCompl:AVERAGE \ ! DEF:uiMode={rrd2}:umUIModeCallsCompl:AVERAGE \ ! CDEF:uiModeInv=uiMode,-1,* \ ! CDEF:total=ansMode,uiMode,+ \ ! AREA:ansMode#00ff00:"Answer" \ ! GPRINT:ansMode:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:ansMode:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:ansMode:MAX:" Max \\: %8.2lf %s\\n" \ ! AREA:uiModeInv#0000ff:"TUI " \ ! GPRINT:uiMode:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:uiMode:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:uiMode:MAX:" Max \\: %8.2lf %s\\n" \ ! COMMENT:" Total" \ ! GPRINT:total:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:total:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:total:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.um.org.msgUsage.name=Organization Message Usage (Unified Messaging) ! report.ipunity.um.org.msgUsage.columns=umOrgMsgsDiskUsage ! report.ipunity.um.org.msgUsage.propertiesValues=umOrgName ! report.ipunity.um.org.msgUsage.type=umOrgIndex ! report.ipunity.um.org.msgUsage.command=--title="UM Message Usage for {umOrgName}" \ ! --vertical-label="Percent" \ ! --upper-limit=100 \ ! DEF:pct={rrd1}:umOrgMsgsDiskUsage:AVERAGE \ ! AREA:pct#00ff00:"Usage" \ ! GPRINT:pct:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:pct:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:pct:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.um.org.msgSize.name=Organization Message Size (Unified Messaging) ! report.ipunity.um.org.msgSize.columns=umOrgMsgsSize ! report.ipunity.um.org.msgSize.propertiesValues=umOrgName ! report.ipunity.um.org.msgSize.type=umOrgIndex ! report.ipunity.um.org.msgSize.command=--title="UM Message Size for {umOrgName}" \ ! --vertical-label="Gigabytes" \ ! DEF:size={rrd1}:umOrgMsgsSize:AVERAGE \ ! AREA:size#00ff00:"Size" \ ! GPRINT:size:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:size:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:size:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.um.pages.name=Pages Pending (Unified Messaging) ! report.ipunity.um.pages.columns=umPagesPending ! report.ipunity.um.pages.type=nodeSnmp ! report.ipunity.um.pages.command=--title="Pages Pending (Unified Messaging)" \ ! --vertical-label="Pages" \ ! DEF:pend={rrd1}:umPagesPending:AVERAGE \ ! AREA:pend#00ff00:"Pending" \ ! GPRINT:pend:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:pend:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:pend:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.um.outCalls.name=Outcall Queue and Failures (Unified Messaging) ! report.ipunity.um.outCalls.columns=umMWNOutCallsPend,umMWNOutCallsFail ! report.ipunity.um.outCalls.type=nodeSnmp ! report.ipunity.um.outCalls.command=--title="Outcall Queue and Failures (Unified Messaging)" \ ! --vertical-label="Outcalls" \ ! DEF:pend={rrd1}:umMWNOutCallsPend:AVERAGE \ ! DEF:fail={rrd2}:umMWNOutCallsFail:AVERAGE \ ! CDEF:failInv=fail,-1,* \ ! AREA:pend#00ff00:"Pending" \ ! GPRINT:pend:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:pend:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:pend:MAX:" Max \\: %8.2lf %s\\n" \ ! AREA:failInv#ff0000:"Failed " \ ! GPRINT:fail:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:fail:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:fail:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.um.mwi.name=MWI Queue and Failures (Unified Messaging) ! report.ipunity.um.mwi.columns=umMWIEventsPend,umMWIEventsFailed ! report.ipunity.um.mwi.type=nodeSnmp ! report.ipunity.um.mwi.command=--title="MWI Queue and Failures (Unified Messaging)" \ ! --vertical-label="MWI Events" \ ! DEF:pend={rrd1}:umMWIEventsPend:AVERAGE \ ! DEF:fail={rrd2}:umMWIEventsFailed:AVERAGE \ ! CDEF:failInv=fail,-1,* \ ! AREA:pend#00ff00:"Pending" \ ! GPRINT:pend:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:pend:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:pend:MAX:" Max \\: %8.2lf %s\\n" \ ! AREA:failInv#ff0000:"Failed " \ ! GPRINT:fail:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:fail:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:fail:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.cnfr.rec.inProgress.name=Conference Recordings In Progress ! report.ipunity.cnfr.rec.inProgress.columns=confRecInProgress ! report.ipunity.cnfr.rec.inProgress.type=nodeSnmp ! report.ipunity.cnfr.rec.inProgress.command=--title="Conference Recordings In Progress" \ ! --vertical-label="Recordings" \ ! DEF:rec={rrd1}:confRecInProgress:AVERAGE \ ! AREA:rec#00ff00:"Total" \ ! GPRINT:rec:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:rec:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:rec:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.cnfr.rec.usage.name=Conference Recordings Disk Usage ! report.ipunity.cnfr.rec.usage.columns=confRecStorageUsage ! report.ipunity.cnfr.rec.usage.type=nodeSnmp ! report.ipunity.cnfr.rec.usage.command=--title="Conference Recordings Disk Usage" \ ! --vertical-label="Percent" \ ! DEF:usage={rrd1}:confRecStorageUsage:AVERAGE \ ! AREA:usage#00ff00:"Usage" \ ! GPRINT:usage:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:usage:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:usage:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.cnfr.rec.avail.name=Conference Recordings Disk Available ! report.ipunity.cnfr.rec.avail.columns=confRecStorageAvail ! report.ipunity.cnfr.rec.avail.type=nodeSnmp ! report.ipunity.cnfr.rec.avail.command=--title="Conference Recordings Disk Available" \ ! --vertical-label="Kilobytes" \ ! DEF:avail={rrd1}:confRecStorageAvail:AVERAGE \ ! AREA:avail#00ff00:"Available" \ ! GPRINT:avail:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:avail:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:avail:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.cnfr.slide.usage.name=Conference Slides Disk Usage ! report.ipunity.cnfr.slide.usage.columns=confRecSlideUsage ! report.ipunity.cnfr.slide.usage.type=nodeSnmp ! report.ipunity.cnfr.slide.usage.command=--title="Conference Slides Disk Usage" \ ! --vertical-label="Percent" \ ! DEF:usage={rrd1}:confRecSlideUsage:AVERAGE \ ! AREA:usage#00ff00:"Usage" \ ! GPRINT:usage:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:usage:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:usage:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.cnfr.slide.avail.name=Conference Slides Disk Available ! report.ipunity.cnfr.slide.avail.columns=confRecStorageAvail ! report.ipunity.cnfr.slide.avail.type=nodeSnmp ! report.ipunity.cnfr.slide.avail.command=--title="Conference Slides Disk Available" \ ! --vertical-label="Kilobytes" \ ! DEF:avail={rrd1}:confRecStorageAvail:AVERAGE \ ! AREA:avail#00ff00:"Available" \ ! GPRINT:avail:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:avail:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:avail:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.cnfr.inProgress.name=Conferences In Progress ! report.ipunity.cnfr.inProgress.columns=confInProgress ! report.ipunity.cnfr.inProgress.type=nodeSnmp ! report.ipunity.cnfr.inProgress.command=--title="Conferences In Progress" \ ! --vertical-label="Conferences" \ ! DEF:total={rrd1}:confInProgress:AVERAGE \ ! AREA:total#00ff00:"Total" \ ! GPRINT:total:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:total:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:total:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.cnfr.reserved.name=Reserved Conferences ! report.ipunity.cnfr.reserved.columns=confReservedInUse,confReservedAvail ! report.ipunity.cnfr.reserved.type=nodeSnmp ! report.ipunity.cnfr.reserved.command=--title="Reserved Conferences" \ ! --vertical-label="Conferences" \ ! DEF:inUse={rrd1}:confReservedInUse:AVERAGE \ ! DEF:avail={rrd2}:confReservedAvail:AVERAGE \ ! CDEF:availInv=avail,-1,* \ ! AREA:inUse#00ff00:"In Use " \ ! GPRINT:inUse:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:inUse:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:inUse:MAX:" Max \\: %8.2lf %s\\n" \ ! AREA:availInv#0000ff:"Available" \ ! GPRINT:avail:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:avail:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:avail:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.cnfr.adhoc.name=Ad-Hoc Conferences In Use ! report.ipunity.cnfr.adhoc.columns=confAdhocInUse ! report.ipunity.cnfr.adhoc.type=nodeSnmp ! report.ipunity.cnfr.adhoc.command=--title="Ad-Hoc Conferences In Use" \ ! --vertical-label="Conferences" \ ! DEF:total={rrd1}:confAdhocInUse:AVERAGE \ ! AREA:total#00ff00:"Total" \ ! GPRINT:total:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:total:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:total:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.cnfr.participants.name=Reserved Conferences ! report.ipunity.cnfr.participants.columns=confActiveParticip,confPasvParticip,confAdhocParticip ! report.ipunity.cnfr.participants.type=nodeSnmp ! report.ipunity.cnfr.participants.command=--title="Conference Participants" \ ! --vertical-label="Participants" \ ! DEF:active={rrd1}:confActiveParticip:AVERAGE \ ! DEF:pasv={rrd2}:confPasvParticip:AVERAGE \ ! DEF:adhoc={rrd3}:confAdhocParticip:AVERAGE \ ! AREA:active#00ff00:"Active " \ ! GPRINT:active:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:active:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:active:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:pasv#0000ff:"Passive" \ ! GPRINT:pasv:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:pasv:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:pasv:MAX:" Max \\: %8.2lf %s\\n" \ ! STACK:adhoc#00ffff:"Ad-Hoc" \ ! GPRINT:adhoc:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:adhoc:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:adhoc:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.cnfr.legsAvail.name=Conference Legs Available ! report.ipunity.cnfr.legsAvail.columns=confAPLegsAvail,confPPLegsAvail ! report.ipunity.cnfr.legsAvail.type=nodeSnmp ! report.ipunity.cnfr.legsAvail.command=--title="Conference Legs Available" \ ! --vertical-label="Legs" \ ! DEF:active={rrd1}:confAPLegsAvail:AVERAGE \ ! DEF:pasv={rrd2}:confPPLegsAvail:AVERAGE \ ! CDEF:pasvInv=pasv,-1,* \ ! AREA:active#00ff00:"Active " \ ! GPRINT:active:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:active:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:active:MAX:" Max \\: %8.2lf %s\\n" \ ! AREA:pasvInv#0000ff:"Passive" \ ! GPRINT:pasv:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:pasv:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:pasv:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.cnfr.outcallsFail.name=Conference Outcall Failures ! report.ipunity.cnfr.outcallsFail.columns=confOutCallsFail ! report.ipunity.cnfr.outcallsFail.type=nodeSnmp ! report.ipunity.cnfr.outcallsFail.command=--title="Conference Outcall Failures" \ ! --vertical-label="Failures" \ ! DEF:total={rrd1}:confOutCallsFail:AVERAGE \ ! AREA:total#ff0000:"Total" \ ! GPRINT:total:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:total:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:total:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.cnfr.confRecFail.name=Conference Recording Failures ! report.ipunity.cnfr.confRecFail.columns=confRecFailed ! report.ipunity.cnfr.confRecFail.type=nodeSnmp ! report.ipunity.cnfr.confRecFail.command=--title="Conference Recording Failures" \ ! --vertical-label="Failures" \ ! DEF:total={rrd1}:confRecFailed:AVERAGE \ ! AREA:total#ff0000:"Total" \ ! GPRINT:total:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:total:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:total:MAX:" Max \\: %8.2lf %s\\n" ! ! report.ipunity.cnfr.tuiLoginFail.name=Conference TUI Login Failures ! report.ipunity.cnfr.tuiLoginFail.columns=confTUILoginsFail ! report.ipunity.cnfr.tuiLoginFail.type=nodeSnmp ! report.ipunity.cnfr.tuiLoginFail.command=--title="Conference TUI Login Failures" \ ! --vertical-label="Failures" \ ! DEF:total={rrd1}:confTUILoginsFail:AVERAGE \ ! AREA:total#ff0000:"Total" \ ! GPRINT:total:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:total:MIN:" Min \\: %8.2lf %s" \ ! GPRINT:total:MAX:" Max \\: %8.2lf %s\\n" ! ! ###### ! ###### Reports for Juniper devices ! ###### ! ! report.juniper.bufferPoolUtil.name=Buffer Utilization (Juniper) ! report.juniper.bufferPoolUtil.columns=juniperBufferFeb,juniperBufferFpc0 ! report.juniper.bufferPoolUtil.type=nodeSnmp ! report.juniper.bufferPoolUtil.command=--title="Buffer Utilization" \ ! DEF:val1={rrd1}:juniperBufferFeb:AVERAGE \ ! DEF:val2={rrd2}:juniperBufferFpc0:AVERAGE \ ! LINE2:val1#0000ff:"Slot FEB Buffer " \ ! GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:val1:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:val1:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:val2#ff0000:"Slot FPC0 Buffer" \ ! GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:val2:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:val2:MAX:"Max \\: %8.2lf %s\\n" ! ! report.juniper.cpu.name=CPU Utilization (Juniper) ! report.juniper.cpu.columns=juniperCpuFeb,juniperCpuFpc0,juniperCpuRe ! report.juniper.cpu.type=nodeSnmp ! report.juniper.cpu.command=--title="CPU Utilization" \ ! DEF:val1={rrd1}:juniperCpuFeb:AVERAGE \ ! DEF:val2={rrd2}:juniperCpuFpc0:AVERAGE \ ! DEF:val3={rrd3}:juniperCpuRe:AVERAGE \ ! LINE2:val1#0000ff:"Slot FEB CPU Level " \ ! GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:val1:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:val1:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:val2#ff0000:"Slot FPC0 CPU Level" \ ! GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:val2:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:val2:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:val3#00ff00:"Routing Engine CPU " \ ! GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:val3:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:val3:MAX:"Max \\: %8.2lf %s\\n" ! ! report.juniper.temp.name=Temperature (Juniper) ! report.juniper.temp.columns=juniperTempFeb,juniperTempFpc0,juniperTempRe ! report.juniper.temp.type=nodeSnmp ! report.juniper.temp.command=--title="Current Temperature" \ ! DEF:val1={rrd1}:juniperTempFeb:AVERAGE \ ! DEF:val2={rrd2}:juniperTempFpc0:AVERAGE \ ! DEF:val3={rrd3}:juniperTempRe:AVERAGE \ ! LINE2:val1#0000ff:"Slot FEB Temperature " \ ! GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:val1:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:val1:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:val2#ff0000:"Slot FPC0 Temperature" \ ! GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:val2:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:val2:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:val3#00ff00:"Chassis Temperature " \ ! GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:val3:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:val3:MAX:"Max \\: %8.2lf %s\\n" ! ! ##### ! ##### Juniper ERX reports ! ##### ! ! report.erx.subscribers.name=Juniper ERX Subscribers ! report.erx.subscribers.columns=juniSubsCount,juniSubsMaxCount,juniSubsPeakCount ! report.erx.subscribers.type=nodeSnmp ! report.erx.subscribers.command=--title="Subscribers" \ ! --vertical-label="Sessions" \ ! DEF:juniSubsCount={rrd1}:juniSubsCount:AVERAGE \ ! DEF:juniSubsMaxCount={rrd2}:juniSubsMaxCount:AVERAGE \ ! DEF:juniSubsPeakCount={rrd3}:juniSubsPeakCount:AVERAGE \ ! LINE2:juniSubsCount#0000ff:"Subscribers" \ ! GPRINT:juniSubsCount:AVERAGE:" Avg \\: %5.3lf%s" \ ! GPRINT:juniSubsCount:MIN:"Min \\: %5.3lf%s" \ ! GPRINT:juniSubsCount:MAX:"Max \\: %5.3lf%s\\n" \ ! LINE2:juniSubsPeakCount#ff0000:"Subscribers" \ ! GPRINT:juniSubsPeakCount:AVERAGE:"Peak \\: %5.3lf%s\\t" \ ! GPRINT:juniSubsMaxCount:MAX:"Licensed Subscribers \\: %5.3lf%s" ! ! report.erx.systemmodule.name=Juniper ERX System Module ! report.erx.systemmodule.columns=juniSMCpuUtilPct, juniSMMemUtilPct ! report.erx.systemmodule.type=juniSystemSlot ! report.erx.systemmodule.propertiesValues=juniSystemModuleDescr ! report.erx.systemmodule.command=--title="Utilization of {juniSystemModuleDescr}" \ ! --vertical-label="Percent" \ ! DEF:juniSMCpuUtilPct={rrd1}:juniSMCpuUtilPct:AVERAGE \ ! DEF:juniSMMemUtilPct={rrd2}:juniSMMemUtilPct:AVERAGE \ ! LINE2:juniSMCpuUtilPct#000000:"CPU %" \ ! GPRINT:juniSMCpuUtilPct:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:juniSMCpuUtilPct:MAX:"Max \\: %8.2lf %s" \ ! GPRINT:juniSMCpuUtilPct:AVERAGE:"Avg \\: %8.2lf %s\\n" \ ! LINE2:juniSMMemUtilPct#54a4de:"Mem %" \ ! GPRINT:juniSMMemUtilPct:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:juniSMMemUtilPct:MAX:"Max \\: %8.2lf %s" \ ! GPRINT:juniSMMemUtilPct:AVERAGE:"Avg \\: %8.2lf %s\\n" ! ! report.erx.temp.name=Juniper ERX Temperature ! report.erx.temp.columns=juniSTValue ! report.erx.temp.type=juniSystemTempIndex ! report.erx.temp.command=--title="Temperature" \ ! --vertical-label="Celsius" \ ! DEF:juniSTValue={rrd1}:juniSTValue:AVERAGE \ ! LINE2:juniSTValue#000000:"Temperature" \ ! GPRINT:juniSTValue:AVERAGE:"Avg \\: %3.0f%s" \ ! GPRINT:juniSTValue:MIN:"Min \\: %3.0lf%s" \ ! GPRINT:juniSTValue:MAX:"Max \\: %3.0lf%s\\n" ! ! report.mge.ambtemp.name=MGE UPS Ambient Temperature ! report.mge.ambtemp.columns=mgeEnvAmbientTemp ! report.mge.ambtemp.type=nodeSnmp ! report.mge.ambtemp.command=--title="Ambient Temperature for MGE UPS" \ ! DEF:ambtemp={rrd1}:mgeEnvAmbientTemp:AVERAGE \ ! LINE2:ambtemp#00ff00:"Temperature " \ ! GPRINT:ambtemp:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:ambtemp:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:ambtemp:MAX:"Max \\: %8.2lf %s\\n" ! ! report.mge.ambhumid.name=MGE UPS Ambient Humidity ! report.mge.ambhumid.columns=mgeEnvAmbientHumid ! report.mge.ambhumid.type=nodeSnmp ! report.mge.ambhumid.command=--title="Ambient Humidity for MGE UPS" \ ! DEF:ambhumid={rrd1}:mgeEnvAmbientHumid:AVERAGE \ ! LINE2:ambhumid#00ff00:"Humidity " \ ! GPRINT:ambhumid:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:ambhumid:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:ambhumid:MAX:"Max \\: %8.2lf %s\\n" ! ! report.mge.current.name=MGE UPS Current ! report.mge.current.columns=mgeBattCurrent ! report.mge.current.type=nodeSnmp ! report.mge.current.command=--title="Battery Current for MGE UPS" \ ! DEF:current={rrd1}:mgeBattCurrent:AVERAGE \ ! LINE2:current#00ff00:"Current " \ ! GPRINT:current:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:current:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:current:MAX:"Max \\: %8.2lf %s\\n" ! ! report.mge.level.name=MGE UPS Level ! report.mge.level.columns=mgeBattLevel, mgeBattRechargeLvl ! report.mge.level.type=nodeSnmp ! report.mge.level.command=--title="Important Battery Levels for MGE UPS" \ ! DEF:battery={rrd1}:mgeBattLevel:AVERAGE \ ! DEF:recharge={rrd2}:mgeBattRechargeLvl:AVERAGE \ ! LINE2:battery#00ff00:"Battery " \ ! GPRINT:battery:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:battery:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:battery:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:recharge#0000ff:"Recharge " \ ! GPRINT:recharge:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:recharge:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:recharge:MAX:"Max \\: %8.2lf %s\\n" ! ! report.mge.temp.name=MGE UPS Battery Temperature ! report.mge.temp.columns=mgeBattTemperature ! report.mge.temp.type=nodeSnmp ! report.mge.temp.command=--title="Battery Temperature for MGE UPS" \ ! DEF:temp={rrd1}:mgeBattTemperature:AVERAGE \ ! LINE2:temp#00ff00:"Temperature " \ ! GPRINT:temp:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:temp:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:temp:MAX:"Max \\: %8.2lf %s\\n" ! ! report.mge.time.name=MGE UPS Time ! report.mge.time.columns=mgeBattRemainTime, mgeBattRechargeTime, mgeBattFullRechTime ! report.mge.time.type=nodeSnmp ! report.mge.time.command=--title="Important Battery Times for MGE UPS" \ ! DEF:remain={rrd1}:mgeBattRemainTime:AVERAGE \ ! DEF:recharge={rrd2}:mgeBattRechargeTime:AVERAGE \ ! DEF:frecharge={rrd3}:mgeBattFullRechTime:AVERAGE \ ! LINE2:remain#00ff00:"Time Remaining " \ ! GPRINT:remain:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:remain:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:remain:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:recharge#0000ff:"Recharge Time " \ ! GPRINT:recharge:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:recharge:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:recharge:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:frecharge#ff0000:"Full Recharge Time" \ ! GPRINT:frecharge:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:frecharge:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:frecharge:MAX:"Max \\: %8.2lf %s\\n" ! ! report.mge.voltage.name=MGE UPS Voltage ! report.mge.voltage.columns=mgeBattVoltage ! report.mge.voltage.type=nodeSnmp ! report.mge.voltage.command=--title="Battery Voltage for MGE UPS" \ ! DEF:voltage={rrd1}:mgeBattVoltage:AVERAGE \ ! LINE2:voltage#00ff00:"Voltage " \ ! GPRINT:voltage:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:voltage:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:voltage:MAX:"Max \\: %8.2lf %s\\n" ! ! ###### ! ###### Reports Generated for Microsoft Devices ! ###### ! ! report.microsoft.cpuPercentBusy.name=CPU Utilization ! report.microsoft.cpuPercentBusy.columns=cpuPercentBusy ! report.microsoft.cpuPercentBusy.type=nodeSnmp ! report.microsoft.cpuPercentBusy.command=--title="CPU Utilization" \ ! --units-exponent 0 --upper-limit 100 \ ! DEF:cpuPercentBusy={rrd1}:cpuPercentBusy:AVERAGE \ ! LINE2:cpuPercentBusy#0000ff:"Utilization" \ ! GPRINT:cpuPercentBusy:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:cpuPercentBusy:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:cpuPercentBusy:MAX:"Max \\: %8.2lf %s\\n" ! ! report.windows.cpu.name=CPU Utilization ! report.windows.cpu.columns=cpuPercentBusy ! report.windows.cpu.type=hrProcessorIndex ! report.windows.cpu.command=--title="Multiple CPU Utilization" \ ! --units-exponent 0 --upper-limit 100 \ ! DEF:cpuPercentBusy={rrd1}:cpuPercentBusy:AVERAGE \ ! LINE2:cpuPercentBusy#0000ff:"Utilization" \ ! GPRINT:cpuPercentBusy:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:cpuPercentBusy:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:cpuPercentBusy:MAX:"Max \\: %8.2lf %s\\n" ! ! report.microsoft.hrDisk2.name=Disk Space Drive 2 (Windows NT/2000) ! report.microsoft.hrDisk2.columns=hrSizeDisk2, hrUsedDisk2, hrUnitsDisk2 ! report.microsoft.hrDisk2.type=nodeSnmp ! report.microsoft.hrDisk2.propertiesValues=hrDescDisk2 ! report.microsoft.hrDisk2.command=--title="Disk Space Drive {hrDescDisk2}" \ ! --vertical-label="Bytes" \ ! --base=1024 \ ! DEF:total={rrd1}:hrSizeDisk2:AVERAGE \ ! DEF:used={rrd2}:hrUsedDisk2:AVERAGE \ ! DEF:units={rrd3}:hrUnitsDisk2:AVERAGE \ ! CDEF:totalBytes=total,units,* \ ! CDEF:freeBytes=total,used,-,units,* \ ! LINE2:totalBytes#0000ff:"Total Space" \ ! GPRINT:totalBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:totalBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:totalBytes:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:freeBytes#ff0000:"Free Space " \ ! GPRINT:freeBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:freeBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:freeBytes:MAX:"Max \\: %8.2lf %s\\n" ! ! report.microsoft.hrDisk3.name=Disk Space Drive 3 (Windows NT/2000) ! report.microsoft.hrDisk3.columns=hrSizeDisk3, hrUsedDisk3, hrUnitsDisk3 ! report.microsoft.hrDisk3.type=nodeSnmp ! report.microsoft.hrDisk3.propertiesValues=hrDescDisk3 ! report.microsoft.hrDisk3.command=--title="Disk Space Drive {hrDescDisk3}" \ ! --vertical-label="Bytes" \ ! --base=1024 \ ! DEF:total={rrd1}:hrSizeDisk3:AVERAGE \ ! DEF:used={rrd2}:hrUsedDisk3:AVERAGE \ ! DEF:units={rrd3}:hrUnitsDisk3:AVERAGE \ ! CDEF:totalBytes=total,units,* \ ! CDEF:freeBytes=total,used,-,units,* \ ! LINE2:totalBytes#0000ff:"Total Space" \ ! GPRINT:totalBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:totalBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:totalBytes:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:freeBytes#ff0000:"Free Space " \ ! GPRINT:freeBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:freeBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:freeBytes:MAX:"Max \\: %8.2lf %s\\n" ! ! report.microsoft.hrDisk4.name=Disk Space Drive 4 (Windows NT/2000) ! report.microsoft.hrDisk4.columns=hrSizeDisk4, hrUsedDisk4, hrUnitsDisk4 ! report.microsoft.hrDisk4.type=nodeSnmp ! report.microsoft.hrDisk4.propertiesValues=hrDescDisk4 ! report.microsoft.hrDisk4.command=--title="Disk Space Drive {hrDescDisk4}" \ ! --vertical-label="Bytes" \ ! --base=1024 \ ! DEF:total={rrd1}:hrSizeDisk4:AVERAGE \ ! DEF:used={rrd2}:hrUsedDisk4:AVERAGE \ ! DEF:units={rrd3}:hrUnitsDisk4:AVERAGE \ ! CDEF:totalBytes=total,units,* \ ! CDEF:freeBytes=total,used,-,units,* \ ! LINE2:totalBytes#0000ff:"Total Space" \ ! GPRINT:totalBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:totalBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:totalBytes:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:freeBytes#ff0000:"Free Space " \ ! GPRINT:freeBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:freeBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:freeBytes:MAX:"Max \\: %8.2lf %s\\n" ! ! report.microsoft.memory.name=Total Memory ! report.microsoft.memory.columns=memorySize ! report.microsoft.memory.type=nodeSnmp ! report.microsoft.memory.command=--title="Total Memory" \ ! --vertical-label="Bytes" \ ! --base=1024 \ ! DEF:memorySize={rrd1}:memorySize:AVERAGE \ ! CDEF:sizeBytes=memorySize,1024,* \ ! LINE2:sizeBytes#0000ff:"Memory" \ ! GPRINT:sizeBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:sizeBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:sizeBytes:MAX:"Max \\: %8.2lf %s\\n" ! ! ###### ! ###### Reports Generated from Network Appliance agents ! ###### ! ! report.netapp.cpu.name=Netapp CPU Usage ! report.netapp.cpu.columns=cpuBusyTimePct, cpuIdleTimePct ! report.netapp.cpu.type=nodeSnmp ! report.netapp.cpu.command=--title="Network Appliance CPU Use" \ ! DEF:cpubusy={rrd1}:cpuBusyTimePct:AVERAGE \ ! DEF:cpuidle={rrd2}:cpuIdleTimePct:AVERAGE \ ! AREA:cpubusy#ff0000:"Busy" \ ! GPRINT:cpubusy:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:cpubusy:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:cpubusy:MAX:"Max \\: %10.2lf %s\\n" \ ! STACK:cpuidle#00ff00:"Idle" \ ! GPRINT:cpuidle:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:cpuidle:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:cpuidle:MAX:"Max \\: %10.2lf %s\\n" ! ! report.netapp.fsfiles.name=Netapp Filesystem Files ! report.netapp.fsfiles.columns=fsMaxfilesAvail, fsMaxfilesUsed, fsMaxfilesPossible ! report.netapp.fsfiles.type=nodeSnmp ! report.netapp.fsfiles.command=--title="Network Appliance Filesystem Max Files" \ ! DEF:avail={rrd1}:fsMaxfilesAvail:AVERAGE \ ! DEF:used={rrd2}:fsMaxfilesUsed:AVERAGE \ ! DEF:poss={rrd3}:fsMaxfilesPossible:AVERAGE \ ! AREA:poss#54a4de:"Poss." \ ! GPRINT:poss:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:poss:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:poss:MAX:"Max \\: %10.2lf %s\\n" \ ! LINE2:avail#000000:"Avail" \ ! GPRINT:avail:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:avail:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:avail:MAX:"Max \\: %10.2lf %s\\n" \ ! LINE2:used#cc0000:"Used " \ ! GPRINT:used:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:used:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:used:MAX:"Max \\: %10.2lf %s\\n" ! ! report.netapp.fspercent.name=Netapp Filesystem Space/Inodes ! report.netapp.fspercent.columns=fsMaxUsedBytesPct, fsMaxUsedInodesPct ! report.netapp.fspercent.type=nodeSnmp ! report.netapp.fspercent.command=--title="Network Appliance Filesystem Max Space/Inodes" \ ! DEF:space={rrd1}:fsMaxUsedBytesPct:AVERAGE \ ! DEF:inodes={rrd2}:fsMaxUsedInodesPct:AVERAGE \ ! LINE2:space#000000:"Space " \ ! GPRINT:space:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:space:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:space:MAX:"Max \\: %10.2lf %s\\n" \ ! LINE2:inodes#54a4de:"Inodes" \ ! GPRINT:inodes:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:inodes:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:inodes:MAX:"Max \\: %10.2lf %s\\n" ! ! report.netapp.servercall.name=NetApp Server Calls ! report.netapp.servercall.columns=naMiscLowNfsOps, naMiscLowCifsOps ! report.netapp.servercall.type=nodeSnmp ! report.netapp.servercall.command=--title="NetApp Server Calls" \ ! --vertical-label operations \ ! DEF:naMiscLowNfsOps={rrd1}:naMiscLowNfsOps:AVERAGE \ ! DEF:naMiscLowCifsOps={rrd2}:naMiscLowCifsOps:AVERAGE \ ! LINE1:naMiscLowNfsOps#0000ff:"NFS " \ ! GPRINT:naMiscLowNfsOps:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:naMiscLowNfsOps:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:naMiscLowNfsOps:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE1:naMiscLowCifsOps#00ff00:"CIFS" \ ! GPRINT:naMiscLowCifsOps:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:naMiscLowCifsOps:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:naMiscLowCifsOps:MAX:"Max \\: %8.2lf %s\\n" ! ! report.netapp.cifsrw.name=NetApp CIFS rw ! report.netapp.cifsrw.columns=naCifsReads, naCifsWrites ! report.netapp.cifsrw.type=nodeSnmp ! report.netapp.cifsrw.command=--title="NetApp CIFS Read/Write" \ ! --vertical-label operations \ ! DEF:naCifsReads={rrd1}:naCifsReads:AVERAGE \ ! DEF:naCifsWrites={rrd2}:naCifsWrites:AVERAGE \ ! LINE1:naCifsReads#0000ff:"CIFS reads " \ ! GPRINT:naCifsReads:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:naCifsReads:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:naCifsReads:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE1:naCifsWrites#ff0000:"CIFS writes" \ ! GPRINT:naCifsWrites:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:naCifsWrites:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:naCifsWrites:MAX:"Max \\: %8.2lf %s\\n" ! ! report.netapp.nfsv2rw.name=Network Appliance NFS v2 rw ! report.netapp.nfsv2rw.columns=naV2cReads, naV2cWrcaches, naV2cWrites ! report.netapp.nfsv2rw.type=nodeSnmp ! report.netapp.nfsv2rw.command=--title="NetApp NFS v2 Read/Write" \ ! --vertical-label operations \ ! DEF:naV2cReads={rrd1}:naV2cReads:AVERAGE \ ! DEF:naV2cWrcaches={rrd2}:naV2cWrcaches:AVERAGE \ ! DEF:naV2cWrites={rrd3}:naV2cWrites:AVERAGE \ ! LINE1:naV2cReads#0000ff:"NFS v2 reads " \ ! GPRINT:naV2cReads:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:naV2cReads:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:naV2cReads:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE1:naV2cWrcaches#00ff00:"NFS v2 cache writes" \ ! GPRINT:naV2cWrcaches:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:naV2cWrcaches:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:naV2cWrcaches:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE1:naV2cWrites#ff0000:"NFS v2 writes " \ ! GPRINT:naV2cWrites:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:naV2cWrites:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:naV2cWrcaches:MAX:"Max \\: %8.2lf %s\\n" ! ! report.netapp.nfsv3rw.name=Network Appliance NFS v3 rw ! report.netapp.nfsv3rw.columns=naV3cReads, naV3cWrites ! report.netapp.nfsv3rw.type=nodeSnmp ! report.netapp.nfsv3rw.command=--title="NetApp NFS v3 Read/Write" \ ! --vertical-label operations \ ! DEF:naV3cReads={rrd1}:naV3cReads:AVERAGE \ ! DEF:naV3cWrites={rrd2}:naV3cWrites:AVERAGE \ ! LINE1:naV3cReads#0000ff:"NFS v3 reads " \ ! GPRINT:naV3cReads:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:naV3cReads:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:naV3cReads:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE1:naV3cWrites#ff0000:"NFS v3 writes" \ ! GPRINT:naV3cWrites:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:naV3cWrites:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:naV3cWrites:MAX:"Max \\: %8.2lf %s\\n" ! ! report.netapp.fspercentkbyte.name=Netapp dfEntry Filesystem percent usage ! report.netapp.fspercentkbyte.columns=naDfPctKB ! report.netapp.fspercentkbyte.type=naDfIndex ! report.netapp.fspercentkbyte.propertiesValues=naDfFileSys ! report.netapp.fspercentkbyte.command=--title="NetApp {naDfFileSys} percent usage" \ ! --lower-limit 0 --upper-limit 100 --rigid \ ! DEF:naDfPctKB={rrd1}:naDfPctKB:AVERAGE \ ! LINE2:naDfPctKB#0000ff:"percent" \ ! GPRINT:naDfPctKB:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:naDfPctKB:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:naDfPctKB:MAX:"Max \\: %8.2lf %s\\n" ! ! report.netapp.fskbyte.name=Netapp dfEntry Filesystem usage ! report.netapp.fskbyte.columns=naDfLowUsedKB, naDfLowFreeKB ! report.netapp.fskbyte.type=naDfIndex ! report.netapp.fskbyte.propertiesValues=naDfFileSys ! report.netapp.fskbyte.command=--title="NetApp {naDfFileSys} usage" \ ! --vertical-label="Bytes" \ ! --base=1024 \ ! DEF:usedKB={rrd1}:naDfLowUsedKB:AVERAGE \ ! DEF:freeKB={rrd2}:naDfLowFreeKB:AVERAGE \ ! CDEF:usedBytes=usedKB,1024,* \ ! CDEF:freeBytes=freeKB,1024,* \ ! AREA:usedBytes#ff0000:"Used" \ ! GPRINT:usedBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:usedBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:usedBytes:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:freeBytes#00ff00:"Free" \ ! GPRINT:freeBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:freeBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:freeBytes:MAX:"Max \\: %8.2lf %s\\n" ! ! report.netapp.fspercentinode.name=Netapp dfEntry Filesystem percent inodes ! report.netapp.fspercentinode.columns=naDfInodePct ! report.netapp.fspercentinode.type=naDfIndex ! report.netapp.fspercentinode.propertiesValues=naDfFileSys ! report.netapp.fspercentinode.command=--title="NetApp {naDfFileSys} percent inodes" \ ! --lower-limit 0 --upper-limit 100 --rigid \ ! DEF:pctInode={rrd1}:naDfInodePct:AVERAGE \ ! LINE2:pctInode#0000ff:"percent" \ ! GPRINT:pctInode:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:pctInode:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:pctInode:MAX:"Max \\: %8.2lf %s\\n" ! ! report.netapp.fsinode.name=Netapp dfEntry Filesystem inodes ! report.netapp.fsinode.columns=naDfInodeUsed, naDfInodeFree ! report.netapp.fsinode.type=naDfIndex ! report.netapp.fsinode.propertiesValues=naDfFileSys ! report.netapp.fsinode.command=--title="NetApp {naDfFileSys} inodes" \ ! DEF:usedInode={rrd1}:naDfInodeUsed:AVERAGE \ ! DEF:freeInode={rrd2}:naDfInodeFree:AVERAGE \ ! AREA:usedInode#ff0000:"Used" \ ! GPRINT:usedInode:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:usedInode:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:usedInode:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:freeInode#00ff00:"Free" \ ! GPRINT:freeInode:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:freeInode:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:freeInode:MAX:"Max \\: %8.2lf %s\\n" ! ! report.netapp.fsfile.name=Netapp dfEntry Filesystem files ! report.netapp.fsfile.columns=naDfMaxFileUsed,naDfMaxFileAvail ! report.netapp.fsfile.type=naDfIndex ! report.netapp.fsfile.propertiesValues=naDfFileSys ! report.netapp.fsfile.command=--title="NetApp {naDfFileSys} files" \ ! DEF:usedFiles={rrd1}:naDfMaxFileUsed:AVERAGE \ ! DEF:maxAvailFiles={rrd2}:naDfMaxFileAvail:AVERAGE \ ! LINE2:usedFiles#ff0000:"Used" \ ! GPRINT:usedFiles:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:usedFiles:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:usedFiles:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:maxAvailFiles#000000:"Max " \ ! GPRINT:maxAvailFiles:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:maxAvailFiles:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:maxAvailFiles:MAX:"Max \\: %8.2lf %s\\n" ! ! ###### ! ###### Reports Generated from NET-SNMP agents ! ###### ! ! report.netsnmp.context.name=Context - Deprecated ! report.netsnmp.context.columns=SysContext ! report.netsnmp.context.type=nodeSnmp ! report.netsnmp.context.command=--title="Context (Deprecated Objects)" \ ! DEF:context={rrd1}:SysContext:AVERAGE \ ! CDEF:floatc=context,1,/ \ ! LINE2:floatc#0000ff:"Context" \ ! GPRINT:floatc:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:floatc:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:floatc:MAX:"Max \\: %10.2lf %s\\n" \ ! ! report.netsnmp.rawcontext.name=Context ! report.netsnmp.rawcontext.columns=SysRawContext ! report.netsnmp.rawcontext.type=nodeSnmp ! report.netsnmp.rawcontext.command=--title="Context" \ ! DEF:context={rrd1}:SysRawContext:AVERAGE \ ! CDEF:floatc=context,1,/ \ ! LINE2:floatc#0000ff:"Context" \ ! GPRINT:floatc:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:floatc:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:floatc:MAX:"Max \\: %10.2lf %s\\n" \ ! ! # The following report has been replaced with cpuStats ! report.netsnmp.cpuRaw.name=CPU Usage ! report.netsnmp.cpuRaw.columns=CpuRawSystem, CpuRawUser ! report.netsnmp.cpuRaw.type=nodeSnmp ! report.netsnmp.cpuRaw.command=--title="CPU Use" \ ! DEF:cpuuses={rrd1}:CpuRawSystem:AVERAGE \ ! DEF:cpuuseu={rrd2}:CpuRawUser:AVERAGE \ ! CDEF:floats=cpuuses,100,/ \ ! CDEF:floatu=cpuuseu,100,/ \ ! LINE2:floats#0000ff:"System" \ ! GPRINT:floats:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:floats:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:floats:MAX:"Max \\: %10.2lf %s\\n" \ ! LINE2:floatu#00ff00:"User" \ ! GPRINT:floatu:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:floatu:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:floatu:MAX:"Max \\: %10.2lf %s\\n" ! ! report.netsnmp.cpuStats.name=CPU Statistics ! report.netsnmp.cpuStats.columns=loadavg1,loadavg5,loadavg15,CpuRawUser,CpuRawSystem,CpuRawIdle,CpuRawNice ! report.netsnmp.cpuStats.type=nodeSnmp ! report.netsnmp.cpuStats.width=565 ! report.netsnmp.cpuStats.height=200 ! report.netsnmp.cpuStats.command=--title="CPU Statistics" \ ! --units-exponent 0 \ ! --width 565 \ ! --height 200 \ ! --vertical-label Load \ ! --lower-limit 0 \ ! DEF:loadavg1={rrd1}:loadavg1:AVERAGE \ ! DEF:loadavg5={rrd2}:loadavg5:AVERAGE \ ! DEF:loadavg15={rrd3}:loadavg15:AVERAGE \ ! DEF:ssCpuRawUser={rrd4}:CpuRawUser:AVERAGE \ ! DEF:ssCpuRawSystem={rrd5}:CpuRawSystem:AVERAGE \ ! DEF:ssCpuRawIdle={rrd6}:CpuRawIdle:AVERAGE \ ! DEF:ssCpuRawNice={rrd7}:CpuRawNice:AVERAGE \ ! CDEF:float1=loadavg1,100,/ \ ! CDEF:float5=loadavg5,100,/ \ ! CDEF:float15=loadavg15,100,/ \ ! CDEF:cpuNumerator=ssCpuRawUser,ssCpuRawNice,+,ssCpuRawSystem,+ \ ! CDEF:cpuDivisor=ssCpuRawUser,ssCpuRawNice,+,ssCpuRawSystem,+,ssCpuRawIdle,+ \ ! CDEF:cpuUse=cpuNumerator,cpuDivisor,/,100,* \ ! CDEF:cpuUse10=0,cpuUse,GE,0,float15,IF \ ! CDEF:cpuUse20=10,cpuUse,GT,0,float15,IF \ ! CDEF:cpuUse30=20,cpuUse,GT,0,float15,IF \ ! CDEF:cpuUse40=30,cpuUse,GT,0,float15,IF \ ! CDEF:cpuUse50=40,cpuUse,GT,0,float15,IF \ ! CDEF:cpuUse60=50,cpuUse,GT,0,float15,IF \ ! CDEF:cpuUse70=60,cpuUse,GT,0,float15,IF \ ! CDEF:cpuUse80=70,cpuUse,GT,0,float15,IF \ ! CDEF:cpuUse90=80,cpuUse,GT,0,float15,IF \ ! CDEF:cpuUse100=90,cpuUse,GT,0,float15,IF \ ! COMMENT:"\\n" \ ! COMMENT:"CPU Utilization (%)\\n" \ ! COMMENT:" " \ ! AREA:cpuUse10#0066FF:" 0-10%" \ ! AREA:cpuUse20#00CCFF:"11-20%" \ ! AREA:cpuUse30#00FFFF:"21-30%" \ ! AREA:cpuUse40#00CC00:"31-40%" \ ! AREA:cpuUse50#00FF00:"41-50%" \ ! COMMENT:" " \ ! GPRINT:cpuUse:MIN:"Minimum \\: %2.1lf%%%s " \ ! GPRINT:cpuUse:MAX:"Maximum \\: %2.1lf%%%s" \ ! COMMENT:"\\n" \ ! COMMENT:" " \ ! AREA:cpuUse60#FFFF99:"51-60%" \ ! AREA:cpuUse70#FFFF00:"61-70%" \ ! AREA:cpuUse80#FFCC66:"71-80%" \ ! AREA:cpuUse90#FF9900:"81-90%" \ ! AREA:cpuUse100#FF0000:"91-100%" \ ! COMMENT:" " \ ! GPRINT:cpuUse:LAST:"Current \\: %2.1lf%%%s" \ ! GPRINT:cpuUse:AVERAGE:"Average \\: %2.1lf%%%s" \ ! COMMENT:"\\n" \ ! COMMENT:"\\n" \ ! COMMENT:"Load Average\\n" \ ! COMMENT:" " \ ! LINE1:float1#CC0099:"1 minute" \ ! COMMENT:" " \ ! GPRINT:float1:MIN:"Minimum \\: %.2lf" \ ! GPRINT:float1:MAX:" Maximum \\: %.2lf" \ ! COMMENT:"\\n" \ ! COMMENT:" " \ ! LINE1:float5#FF99CC:"5 minutes" \ ! COMMENT:" " \ ! GPRINT:float1:LAST:"Current \\: %.2lf" \ ! GPRINT:float1:AVERAGE:" Average \\: %.2lf" \ ! COMMENT:"\\n" \ ! COMMENT:" " \ ! LINE1:float15#000000:"15 minutes\\n" ! ! report.netsnmp.cpuUsage.name=CPU Usage ! report.netsnmp.cpuUsage.columns=CpuRawInterrupt,CpuRawUser,CpuRawWait,CpuRawNice,CpuRawSystem,CpuRawKernel ! report.netsnmp.cpuUsage.type=nodeSnmp ! report.netsnmp.cpuUsage.command=--title="CPU Usage" \ ! --width 565 \ ! --height 200 \ ! --upper-limit 100 \ ! -l 0 \ ! DEF:cpuinterrupt={rrd1}:CpuRawInterrupt:AVERAGE \ ! DEF:cpuuser={rrd2}:CpuRawUser:AVERAGE \ ! DEF:cpuwait={rrd3}:CpuRawWait:AVERAGE \ ! DEF:cpunice={rrd4}:CpuRawNice:AVERAGE \ ! DEF:cpusystem={rrd5}:CpuRawSystem:AVERAGE \ ! DEF:cpukernel={rrd6}:CpuRawKernel:AVERAGE \ ! CDEF:interrupt=cpuinterrupt \ ! CDEF:system=cpusystem,interrupt,+ \ ! CDEF:wait=cpuwait,system,+ \ ! CDEF:nice=cpunice,wait,+ \ ! CDEF:user=cpuuser,nice,+ \ ! AREA:user#0000ff:"User " \ ! GPRINT:user:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:user:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:user:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:nice#00aa00:"Nice " \ ! GPRINT:nice:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:nice:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:nice:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:wait#ffff00:"Wait " \ ! GPRINT:wait:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:wait:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:wait:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:system#00ffff:"System " \ ! GPRINT:system:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:system:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:system:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:interrupt#ff0000:"Interrupts" \ ! GPRINT:interrupt:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:interrupt:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:interrupt:MAX:"Max \\: %8.2lf %s\\n" ! ! report.netsnmp.diskio.bytes.name=Disk IO Bytes ! report.netsnmp.diskio.bytes.columns=diskIONRead,diskIONWritten ! report.netsnmp.diskio.bytes.type=diskIOIndex ! report.netsnmp.diskio.bytes.command=--title="Disk IO Bytes" \ ! DEF:nread={rrd1}:diskIONRead:AVERAGE \ ! DEF:nwritten={rrd2}:diskIONWritten:AVERAGE \ ! CDEF:nwritteninv=nwritten,-1,* \ ! AREA:nread#00ff00:"Read" \ ! GPRINT:nread:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:nread:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:nread:MAX:"Max \\: %10.2lf %s\\n" \ ! AREA:nwritteninv#0000ff:"Written" \ ! GPRINT:nwritten:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:nwritten:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:nwritten:MAX:"Max \\: %10.2lf %s\\n" ! ! report.netsnmp.diskio.ops.name=Disk IO Operations ! report.netsnmp.diskio.ops.columns=diskIOReads,diskIOWrites ! report.netsnmp.diskio.ops.type=diskIOIndex ! report.netsnmp.diskio.ops.command=--title="Disk IO Operations" \ ! DEF:reads={rrd1}:diskIOReads:AVERAGE \ ! DEF:writes={rrd2}:diskIOWrites:AVERAGE \ ! CDEF:writesinv=writes,-1,* \ ! AREA:reads#00ff00:"Reads" \ ! GPRINT:reads:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:reads:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:reads:MAX:"Max \\: %10.2lf %s\\n" \ ! AREA:writesinv#0000ff:"Writes" \ ! GPRINT:writes:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:writes:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:writes:MAX:"Max \\: %10.2lf %s\\n" ! ! report.netsnmp.diskio.opsize.name=Disk IO Size ! report.netsnmp.diskio.opsize.columns=diskIONRead,diskIONWritten,diskIOReads,diskIOWrites ! report.netsnmp.diskio.opsize.type=diskIOIndex ! report.netsnmp.diskio.opsize.command=--title="Disk IO Size" \ ! DEF:nread={rrd1}:diskIONRead:AVERAGE \ ! DEF:nwritten={rrd2}:diskIONWritten:AVERAGE \ ! DEF:reads={rrd3}:diskIOReads:AVERAGE \ ! DEF:writes={rrd4}:diskIOWrites:AVERAGE \ ! CDEF:readsize=nread,reads,/ \ ! CDEF:writesize=nwritten,writes,/ \ ! CDEF:writesizeinv=nwritten,writes,/,-1,* \ ! AREA:readsize#00ff00:"Read Size" \ ! GPRINT:readsize:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:readsize:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:readsize:MAX:"Max \\: %10.2lf %s\\n" \ ! AREA:writesizeinv#0000ff:"Write Size" \ ! GPRINT:writesize:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:writesize:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:writesize:MAX:"Max \\: %10.2lf %s\\n" ! ! report.netsnmp.disk.name=Net-SNMP Disk Space ! report.netsnmp.disk.columns=ns-dskTotal, ns-dskUsed ! report.netsnmp.disk.type=dskIndex ! report.netsnmp.disk.propertiesValues=ns-dskPath ! report.netsnmp.disk.command=--title="Disk Space on {ns-dskPath}" \ ! DEF:dtotalk={rrd1}:ns-dskTotal:AVERAGE \ ! DEF:dusedk={rrd2}:ns-dskUsed:AVERAGE \ ! CDEF:dtotal=dtotalk,1024,* \ ! CDEF:dused=dusedk,1024,* \ ! LINE2:dtotal#0000ff:"Total:" \ ! GPRINT:dtotal:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:dtotal:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:dtotal:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:dused#ff0000:"Used :" \ ! GPRINT:dused:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:dused:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:dused:MAX:"Max \\: %8.2lf %s\\n" ! ! report.netsnmp.diskpercent.name=NetSNMP Percentage Disk Space ! report.netsnmp.diskpercent.columns=ns-dskPercent ! report.netsnmp.diskpercent.type=dskIndex ! report.netsnmp.diskpercent.propertiesValues=ns-dskPath ! report.netsnmp.diskpercent.command=--title="Percent Space Used: {ns-dskPath}" \ ! DEF:dpercent={rrd1}:ns-dskPercent:AVERAGE \ ! LINE2:dpercent#0000ff:"% Used:" \ ! GPRINT:dpercent:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:dpercent:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:dpercent:MAX:"Max \\: %8.2lf %s\n" ! ! report.netsnmp.hrMemory.name=Total Memory (Net-SNMP) ! report.netsnmp.hrMemory.columns=hrMemorySize ! report.netsnmp.hrMemory.type=nodeSnmp ! report.netsnmp.hrMemory.command=--title="Total Memory" \ ! --vertical-label="Bytes" \ ! --base=1024 \ ! DEF:hrMemorySize={rrd1}:hrMemorySize:AVERAGE \ ! CDEF:sizeBytes=hrMemorySize,1024,* \ ! LINE2:sizeBytes#0000ff:"Memory" \ ! GPRINT:sizeBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:sizeBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:sizeBytes:MAX:"Max \\: %8.2lf %s\\n" ! ! report.netsnmp.hrNumUsers.name=Number of Users (Net-SNMP) ! report.netsnmp.hrNumUsers.columns=hrSystemNumUsers ! report.netsnmp.hrNumUsers.type=nodeSnmp ! report.netsnmp.hrNumUsers.command=--title="Number of Users" \ ! DEF:hrSystemNumUsers={rrd1}:hrSystemNumUsers:AVERAGE \ ! LINE2:hrSystemNumUsers#0000ff:"Number of Users" \ ! GPRINT:hrSystemNumUsers:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:hrSystemNumUsers:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:hrSystemNumUsers:MAX:"Max \\: %8.2lf %s\\n" ! ! report.netsnmp.hrSystemProcesses.name=Number of Processes (Net-SNMP) ! report.netsnmp.hrSystemProcesses.columns=hrSystemProcesses ! report.netsnmp.hrSystemProcesses.type=nodeSnmp ! report.netsnmp.hrSystemProcesses.command=--title="Number of Processes" \ ! DEF:SystemProcesses={rrd1}:hrSystemProcesses:AVERAGE \ ! LINE2:SystemProcesses#0000ff:"Number of Processes" \ ! GPRINT:SystemProcesses:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:SystemProcesses:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:SystemProcesses:MAX:"Max \\: %8.2lf %s\\n" ! ! report.netsnmp.hrSystemUptime.name=System Uptime (Net-SNMP) ! report.netsnmp.hrSystemUptime.columns=hrSystemUptime ! report.netsnmp.hrSystemUptime.type=nodeSnmp ! report.netsnmp.hrSystemUptime.command=--title="System Uptime" \ ! --vertical-label Days \ ! DEF:time={rrd1}:hrSystemUptime:AVERAGE \ ! CDEF:days=time,8640000,/ \ ! LINE2:days#0000ff:"System Uptime (Days)" \ ! GPRINT:days:AVERAGE:"Avg \\: %8.1lf %s" \ ! GPRINT:days:MIN:"Min \\: %8.1lf %s" \ ! GPRINT:days:MAX:"Max \\: %8.1lf %s\\n" ! ! report.netsnmp.interrupts.name=Interrupts - Deprecated ! report.netsnmp.interrupts.columns=SysInterrupts ! report.netsnmp.interrupts.type=nodeSnmp ! report.netsnmp.interrupts.command=--title="Interrupts (Deprecated Objects)" \ ! DEF:interrupts={rrd1}:SysInterrupts:AVERAGE \ ! CDEF:floati=interrupts,1,/ \ ! LINE2:floati#0000ff:"Interrupts" \ ! GPRINT:floati:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:floati:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:floati:MAX:"Max \\: %10.2lf %s\\n" \ ! ! report.netsnmp.rawinterrupts.name=Interrupts ! report.netsnmp.rawinterrupts.columns=SysRawInterrupts ! report.netsnmp.rawinterrupts.type=nodeSnmp ! report.netsnmp.rawinterrupts.command=--title="Interrupts" \ ! DEF:interrupts={rrd1}:SysRawInterrupts:AVERAGE \ ! CDEF:floati=interrupts,1,/ \ ! LINE2:floati#0000ff:"Interrupts" \ ! GPRINT:floati:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:floati:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:floati:MAX:"Max \\: %10.2lf %s\\n" \ ! ! report.netsnmp.loadavg.name=Load Average ! report.netsnmp.loadavg.columns=loadavg1, loadavg5, loadavg15 ! report.netsnmp.loadavg.type=nodeSnmp ! report.netsnmp.loadavg.command=--title="Load Average" \ ! DEF:avg1={rrd1}:loadavg1:AVERAGE \ ! DEF:avg5={rrd2}:loadavg5:AVERAGE \ ! DEF:avg15={rrd3}:loadavg15:AVERAGE \ ! CDEF:float1=avg1,100,/ \ ! CDEF:float5=avg5,100,/ \ ! CDEF:float15=avg15,100,/ \ ! LINE2:float1#0000ff:"1 minute" \ ! GPRINT:float1:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:float1:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:float1:MAX:"Max \\: %10.2lf %s\\n" \ ! LINE2:float5#00ff00:"5 minute" \ ! GPRINT:float5:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:float5:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:float5:MAX:"Max \\: %10.2lf %s\\n" \ ! LINE2:float15#ff0000:"15 minute" \ ! GPRINT:float15:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:float15:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:float15:MAX:"Max \\: %10.2lf %s\\n" \ ! ! #report.netsnmp.memStats.name=System Memory Stats ! #report.netsnmp.memStats.columns=memAvailSwap,memTotalReal,memAvailReal,memBuffer,memCached,memShared ! #report.netsnmp.memStats.type=nodeSnmp ! #report.netsnmp.memStats.width=565 ! #report.netsnmp.memStats.height=200 ! #report.netsnmp.memStats.command=--title="System Memory Stats" \ ! # --width 565 \ ! # --height 200 \ ! # --lower-limit 0 \ ! # --base=1024 \ ! # --vertical-label="Bytes" \ ! # DEF:memavailswap={rrd1}:memAvailSwap:AVERAGE \ ! # DEF:memtotalreal={rrd2}:memTotalReal:AVERAGE \ ! # DEF:memavailreal={rrd3}:memAvailReal:AVERAGE \ ! # DEF:membuffer={rrd4}:memBuffer:AVERAGE \ ! # DEF:memcached={rrd5}:memCached:AVERAGE \ ! # DEF:memshared={rrd6}:memShared:AVERAGE \ ! # CDEF:memavailswapBytes=memavailswap,1024,* \ ! # CDEF:memtotalrealBytes=memtotalreal,1024,* \ ! # CDEF:memavailrealBytes=memavailreal,1024,* \ ! # CDEF:membufferBytes=membuffer,1024,* \ ! # CDEF:memcachedBytes=memcached,1024,* \ ! # CDEF:memsharedBytes=memshared,1024,* \ ! # LINE2:memavailswapBytes#ff0000:"Total Swap " \ ! # GPRINT:memavailswapBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! # GPRINT:memavailswapBytes:MIN:"Min \\: %8.2lf %s" \ ! # GPRINT:memavailswapBytes:MAX:"Max \\: %8.2lf %s\\n" \ ! # LINE2:memtotalrealBytes#0000ff:"Total Real Mem" \ ! # GPRINT:memtotalrealBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! # GPRINT:memtotalrealBytes:MIN:"Min \\: %8.2lf %s" \ ! # GPRINT:memtotalrealBytes:MAX:"Max \\: %8.2lf %s\\n" \ ! # LINE2:memavailrealBytes#dd4400:"Avail Real Mem" \ ! # GPRINT:memavailrealBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! # GPRINT:memavailrealBytes:MIN:"Min \\: %8.2lf %s" \ ! # GPRINT:memavailrealBytes:MAX:"Max \\: %8.2lf %s\\n" \ ! # LINE1:membufferBytes#00aa00:"IO Buff Ram " \ ! # GPRINT:membufferBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! # GPRINT:membufferBytes:MIN:"Min \\: %8.2lf %s" \ ! # GPRINT:membufferBytes:MAX:"Max \\: %8.2lf %s\\n" \ ! # LINE1:memcachedBytes#00ffff:"Filesystem Cache" \ ! # GPRINT:memcachedBytes:AVERAGE:"Avg \\: %8.2lf %s" \ ! # GPRINT:memcachedBytes:MIN:"Min \\: %8.2lf %s" \ ! # GPRINT:memcachedBytes:MAX:"Max \\: %8.2lf %s\\n" \ ! # LINE2:memsharedBytes#000a44:"Shared Mem " \ ! # GPRINT:memsharedBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! # GPRINT:memsharedBytes:MIN:"Min \\: %8.2lf %s" \ ! # GPRINT:memsharedBytes:MAX:"Max \\: %8.2lf %s\\n" ! ! report.netsnmp.memStats.name=System Memory Stats ! report.netsnmp.memStats.columns=memAvailSwap,memTotalReal,memAvailReal,memBuffer,memCached,memShared ! report.netsnmp.memStats.type=nodeSnmp ! report.netsnmp.memStats.width=565 ! report.netsnmp.memStats.height=200 ! report.netsnmp.memStats.command=--title="System Memory Stats" \ ! --width 565 \ ! --height 200 \ ! --lower-limit 0 \ ! --base=1024 \ ! --vertical-label="Bytes" \ ! DEF:memavailswap={rrd1}:memAvailSwap:AVERAGE \ ! DEF:memtotalreal={rrd2}:memTotalReal:AVERAGE \ ! DEF:memavailreal={rrd3}:memAvailReal:AVERAGE \ ! DEF:membuffer={rrd4}:memBuffer:AVERAGE \ ! DEF:memcached={rrd5}:memCached:AVERAGE \ ! DEF:memshared={rrd6}:memShared:AVERAGE \ ! CDEF:memavailswapBytes=memavailswap,1024,* \ ! CDEF:memtotalrealBytes=memtotalreal,1024,* \ ! CDEF:memavailrealBytes=memavailreal,1024,* \ ! CDEF:membufferBytes=membuffer,1024,* \ ! CDEF:memcachedBytes=memcached,1024,* \ ! CDEF:memsharedBytes=memshared,1024,* \ ! CDEF:usedBytes=memtotalrealBytes,membufferBytes,-,memcachedBytes,-,memsharedBytes,-,memavailrealBytes,- \ ! AREA:usedBytes#dd4400:"Used (Other)" \ ! GPRINT:usedBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:usedBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:usedBytes:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:membufferBytes#00ffff:"IO Buff Ram " \ ! GPRINT:membufferBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:membufferBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:membufferBytes:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:memsharedBytes#000a44:"Shared Mem " \ ! GPRINT:memsharedBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:memsharedBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:memsharedBytes:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:memcachedBytes#00aa00:"Filesystem Cache" \ ! GPRINT:memcachedBytes:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:memcachedBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:memcachedBytes:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:memavailrealBytes#00ff00:"Avail Real Mem" \ ! GPRINT:memavailrealBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:memavailrealBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:memavailrealBytes:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:memavailswapBytes#ff0000:"Total Swap " \ ! GPRINT:memavailswapBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:memavailswapBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:memavailswapBytes:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:memtotalrealBytes#0000ff:"Total Real Mem" \ ! GPRINT:memtotalrealBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:memtotalrealBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:memtotalrealBytes:MAX:"Max \\: %8.2lf %s\\n" \ ! ! report.netsnmp.swapinout.name=Swap ! report.netsnmp.swapinout.columns=SwapIn, SwapOut ! report.netsnmp.swapinout.type=nodeSnmp ! report.netsnmp.swapinout.command=--title="Swap" \ ! DEF:swapin={rrd1}:SwapIn:AVERAGE \ ! DEF:swapout={rrd2}:SwapOut:AVERAGE \ ! CDEF:floatin=swapin,100,/ \ ! CDEF:floatout=swapout,100,/ \ ! LINE2:floatin#0000ff:"In " \ ! GPRINT:floatin:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:floatin:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:floatin:MAX:"Max \\: %10.2lf %s\\n" \ ! LINE2:floatout#00ff00:"Out" \ ! GPRINT:floatout:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:floatout:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:floatout:MAX:"Max \\: %10.2lf %s\\n" \ ! ! ###### ! ###### NetBotz Reports ! ###### ! ! report.netBotz.air.name=NetBotz Air Report ! report.netBotz.air.columns=netBotz-airflow, netBotz-air-min ! report.netBotz.air.type=nodeSnmp ! report.netBotz.air.command=--title="NetBotz Air" \ ! DEF:air={rrd1}:netBotz-airflow:AVERAGE \ ! DEF:airmin={rrd2}:netBotz-air-min:AVERAGE \ ! LINE2:air#0000ff:"Air" \ ! LINE2:airmin#ff0000:"Air (min)" \ ! GPRINT:air:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:air:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:air:MAX:"Max \\: %8.2lf %s\\n" ! ! report.netBotz.humid.name=NetBotz Humidity Report ! report.netBotz.humid.columns=netBotz-humidity, netBotz-humid-min, netBotz-humid-max ! report.netBotz.humid.type=nodeSnmp ! report.netBotz.humid.command=--title="NetBotz Humidity" \ ! DEF:humid={rrd1}:netBotz-humidity:AVERAGE \ ! DEF:humidmin={rrd2}:netBotz-humid-min:AVERAGE \ ! DEF:humidmax={rrd3}:netBotz-humid-max:AVERAGE \ ! LINE2:humid#0000ff:"Humidity" \ ! LINE2:humidmin#ff0000:"Humidity (min)" \ ! LINE2:humidmax#ff0000:"Humidity (max)" \ ! GPRINT:humid:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:humid:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:humid:MAX:"Max \\: %8.2lf %s\\n" ! ! report.netBotz.temp.name=NetBotz Temp Report ! report.netBotz.temp.columns=netBotz-temp, netBotz-temp-min, netBotz-temp-max ! report.netBotz.temp.type=nodeSnmp ! report.netBotz.temp.command=--title="NetBotz Temp" \ ! DEF:temp={rrd1}:netBotz-temp:AVERAGE \ ! DEF:tempmin={rrd2}:netBotz-temp-min:AVERAGE \ ! DEF:tempmax={rrd3}:netBotz-temp-max:AVERAGE \ ! LINE2:temp#0000ff:"Temperature" \ ! LINE2:tempmin#ff0000:"Temp (min)" \ ! LINE2:tempmax#ff0000:"Temp (max)" \ ! GPRINT:temp:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:temp:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:temp:MAX:"Max \\: %8.2lf %s\\n" ! ! ###### ! ###### Reports Generated From NORTEL OIDs ! ###### ! ! report.nortel.kernelmem.name=Kernel Memory (NORTEL) ! report.nortel.kernelmem.columns=wfkernelBuffersFree,wfkernelMemoryFree ! report.nortel.kernelmem.type=nodeSnmp ! report.nortel.kernelmem.command=--title="Kernel Memory" \ ! DEF:buffers={rrd1}:wfkernelBuffersFree:AVERAGE \ ! DEF:memory={rrd2}:wfkernelMemoryFree:AVERAGE \ ! LINE2:total#0000ff:"Buffers Free" \ ! GPRINT:total:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:total:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:total:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:inQueue#ff0000:"Memory Free" \ ! GPRINT:inQueue:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:inQueue:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:inQueue:MAX:"Max \\: %8.2lf %s\\n" ! ! report.nortel.kerneltasks.name=Kernel Tasks (NORTEL) ! report.nortel.kerneltasks.columns=wfkernelTasksTotal,wfkernelTasksInQueue ! report.nortel.kerneltasks.type=nodeSnmp ! report.nortel.kerneltasks.command=--title="Kernel Tasks" \ ! DEF:total={rrd1}:wfkernelTasksTotal:AVERAGE \ ! DEF:inQueue={rrd2}:wfkernelTasksInQueue:AVERAGE \ ! LINE2:total#0000ff:"Kernel Tasks (Total)" \ ! GPRINT:total:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:total:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:total:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:inQueue#ff0000:"Kernel Tasks (In Queue)" \ ! GPRINT:inQueue:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:inQueue:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:inQueue:MAX:"Max \\: %8.2lf %s\\n" ! ! ###### ! ###### Reports Generated for Novell Devices ! ###### ! ! report.novell.cacheBuffers.name=Cache Buffers (Novell) ! report.novell.cacheBuffers.columns=cacheBuffersSize,cacheBuffersUsed,cacheBuffersUnits ! report.novell.cacheBuffers.type=nodeSnmp ! report.novell.cacheBuffers.command=--title="Cache Buffers" \ ! --vertical-label="Bytes" \ ! --base=1024 \ ! DEF:cacheSize={rrd1}:cacheBuffersSize:AVERAGE \ ! DEF:cacheUsed={rrd2}:cacheBuffersUsed:AVERAGE \ ! DEF:cacheUnits={rrd3}:cacheBuffersUnits:AVERAGE \ ! CDEF:sizeBytes=cacheSize,cacheUnits,* \ ! CDEF:usedBytes=cacheUsed,cacheUnits,* \ ! LINE2:sizeBytes#0000ff:"Available" \ ! GPRINT:sizeBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:sizeBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:sizeBytes:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:usedBytes#ff0000:"Used " \ ! GPRINT:usedBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:usedBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:usedBytes:MAX:"Max \\: %8.2lf %s\\n" ! ! report.novell.codeDataMemory.name=Code and Data Memory (Novell) ! report.novell.codeDataMemory.columns=codeDataMemorySize,codeDataMemoryUsed,codeDataMemoryUnits ! report.novell.codeDataMemory.type=nodeSnmp ! report.novell.codeDataMemory.command=--title="Code and Data Memory" \ ! --vertical-label="Bytes" \ ! --base=1024 \ ! DEF:memSize={rrd1}:codeDataMemorySize:AVERAGE \ ! DEF:memUsed={rrd2}:codeDataMemoryUsed:AVERAGE \ ! DEF:memUnits={rrd3}:codeDataMemoryUnits:AVERAGE \ ! CDEF:sizeBytes=memSize,memUnits,* \ ! CDEF:usedBytes=memUsed,memUnits,* \ ! LINE2:sizeBytes#0000ff:"Available" \ ! GPRINT:sizeBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:sizeBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:sizeBytes:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:usedBytes#ff0000:"Used" \ ! GPRINT:usedBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:usedBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:usedBytes:MAX:"Max \\: %8.2lf %s\\n" ! ! report.novell.diskSpaceSys.name=Disk Space on SYS (Novell) ! report.novell.diskSpaceSys.columns=freeSpaceOnSys,freeableSpaceOnSys ! report.novell.diskSpaceSys.type=nodeSnmp ! report.novell.diskSpaceSys.command=--title="Disk Space on SYS" \ ! --vertical-label="Bytes" \ ! --base=1024 \ ! DEF:free={rrd1}:freeSpaceOnSys:AVERAGE \ ! CDEF:freeBytes=free,1024,* \ ! DEF:freeable={rrd2}:freeableSpaceOnSys:AVERAGE \ ! CDEF:freeableBytes=freeable,1024,* \ ! LINE2:freeBytes#0000ff:"Free Space" \ ! GPRINT:freeBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:freeBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:freeBytes:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:freeableBytes#ff0000:"Freeable Space" \ ! GPRINT:freeableBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:freeableBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:freeableBytes:MAX:"Max \\: %8.2lf %s\\n" ! ! report.novell.diskSpaceVol2.name=Disk Space on Second Volume (Novell) ! report.novell.diskSpaceVol2.columns=freeSpaceOnVol2,freeableSpaceOnVol2 ! report.novell.diskSpaceVol2.type=nodeSnmp ! report.novell.diskSpaceVol2.command=--title="Disk Space on Second Volume" \ ! --vertical-label="Bytes" \ ! --base=1024 \ ! DEF:free={rrd1}:freeSpaceOnVol2:AVERAGE \ ! CDEF:freeBytes=free,1024,* \ ! DEF:freeable={rrd2}:freeableSpaceOnVol2:AVERAGE \ ! CDEF:freeableBytes=freeable,1024,* \ ! LINE2:freeBytes#0000ff:"Free Space" \ ! GPRINT:freeBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:freeBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:freeBytes:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:freeableBytes#ff0000:"Freeable Space" \ ! GPRINT:freeableBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:freeableBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:freeableBytes:MAX:"Max \\: %8.2lf %s\\n" ! ! report.novell.licensedConnections.name=Licensed Connections (Novell) ! report.novell.licensedConnections.columns=licensedConnections ! report.novell.licensedConnections.type=nodeSnmp ! report.novell.licensedConnections.command=--title="Licensed Connections" \ ! DEF:licensedConnections={rrd1}:licensedConnections:AVERAGE \ ! LINE2:licensedConnections#0000ff:"Licensed Connections" \ ! GPRINT:licensedConnections:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:licensedConnections:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:licensedConnections:MAX:"Max \\: %8.2lf %s\\n" ! ! report.novell.numberOfNLMsLoaded.name=Number of NLMs Loaded (Novell) ! report.novell.numberOfNLMsLoaded.columns=numberOfNLMsLoaded ! report.novell.numberofNLMsLoaded.type=nodeSnmp ! report.novell.numberOfNLMsLoaded.command=--title="Number of NLMs Loaded" \ ! DEF:numberOfNLMsLoaded={rrd1}:numberOfNLMsLoaded:AVERAGE \ ! LINE2:numberOfNLMsLoaded#0000ff:"Loaded NLMs" \ ! GPRINT:numberOfNLMsLoaded:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:numberOfNLMsLoaded:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:numberOfNLMsLoaded:MAX:"Max \\: %8.2lf %s\\n" ! ! report.novell.openFiles.name=Open Files(Novell) ! report.novell.openFiles.columns=currentOpenFiles ! report.novell.openFiles.type=nodeSnmp ! report.novell.openFiles.command=--title="Open Files" \ ! DEF:currentOpenFiles={rrd1}:currentOpenFiles:AVERAGE \ ! LINE2:currentOpenFiles#0000ff:"Open Files" \ ! GPRINT:currentOpenFiles:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:currentOpenFiles:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:currentOpenFiles:MAX:"Max \\: %8.2lf %s\\n" ! ! ###### ! ###### Reports for SNMP Informant Agents ! ###### ! ! report.sinf.availmem.name=Available Memory (SNMP-Inf) ! report.sinf.availmem.columns=sinfMemAvailMB ! report.sinf.availmem.type=nodeSnmp ! report.sinf.availmem.command=--title="Windows Available Memory (SNMP-Informant)" \ ! --vertical-label="Bytes" \ ! --base=1024 \ ! DEF:availmem={rrd1}:sinfMemAvailMB:AVERAGE \ ! CDEF:availmemBytes=availmem,1024,*,1024,* \ ! LINE2:availmemBytes#ff0000:"Available Memory" \ ! GPRINT:availmemBytes:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:availmemBytes:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:availmemBytes:MAX:"Max \\: %10.2lf %s\\n" ! ! # This report should go away in favor of report.sinf.cpupercent ! report.sinf.cpu0percent.name=Windows CPU 0 Percent Processor Time (SNMP-Inf) ! report.sinf.cpu0percent.columns=sinfCpuPtProcTime0 ! report.sinf.cpu0percent.type=nodeSnmp ! report.sinf.cpu0percent.command=--title="Windows CPU 0 Utilization (SNMP-Informant) - DEPRECATED" \ ! DEF:utilization={rrd1}:sinfCpuPtProcTime0:AVERAGE \ ! LINE2:utilization#ff0000:"% util." \ ! GPRINT:utilization:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:utilization:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:utilization:MAX:"Max \\: %10.2lf %s\\n" ! ! # This report should go away in favor of report.sinf.cpupercent ! report.sinf.cpu1percent.name=Windows CPU 1 Percent Processor Time (SNMP-Inf) ! report.sinf.cpu1percent.columns=sinfCpuPtProcTime1 ! report.sinf.cpu1percent.type=nodeSnmp ! report.sinf.cpu1percent.command=--title="Windows CPU 1 Utilization (SNMP-Informant) - DEPRECATED" \ ! DEF:utilization={rrd1}:sinfCpuPtProcTime1:AVERAGE \ ! LINE2:utilization#ff0000:"% util." \ ! GPRINT:utilization:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:utilization:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:utilization:MAX:"Max \\: %10.2lf %s\\n" ! ! # This report should go away in favor of report.sinf.cpupercent ! report.sinf.cpu2percent.name=Windows CPU 2 Percent Processor Time (SNMP-Inf) ! report.sinf.cpu2percent.columns=sinfCpuPtProcTime2 ! report.sinf.cpu2percent.type=nodeSnmp ! report.sinf.cpu2percent.command=--title="Windows CPU 2 Utilization (SNMP-Informant) - DEPRECATED" \ ! DEF:utilization={rrd1}:sinfCpuPtProcTime2:AVERAGE \ ! LINE2:utilization#ff0000:"% util." \ ! GPRINT:utilization:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:utilization:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:utilization:MAX:"Max \\: %10.2lf %s\\n" ! ! # This report should go away in favor of report.sinf.cpupercent ! report.sinf.cpu3percent.name=Windows CPU 3 Percent Processor Time (SNMP-Inf) ! report.sinf.cpu3percent.columns=sinfCpuPtProcTime3 ! report.sinf.cpu3percent.type=nodeSnmp ! report.sinf.cpu3percent.command=--title="Windows CPU 3 Utilization (SNMP-Informant) - DEPRECATED" \ ! DEF:utilization={rrd1}:sinfCpuPtProcTime3:AVERAGE \ ! LINE2:utilization#ff0000:"% util." \ ! GPRINT:utilization:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:utilization:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:utilization:MAX:"Max \\: %10.2lf %s\\n" ! ! # This report should go away in favor of report.sinf.cpupercent ! report.sinf.cputotalpercent.name=Windows CPU Total Percent Processor Time (SNMP-Inf) ! report.sinf.cputotalpercent.columns=sinfCpuPtProcTimeTl ! report.sinf.cputotalpercent.type=nodeSnmp ! report.sinf.cputotalpercent.command=--title="Windows Total CPU Utilization (SNMP-Informant) - DEPRECATED" \ ! DEF:utilization={rrd1}:sinfCpuPtProcTimeTl:AVERAGE \ ! LINE2:utilization#ff0000:"% util." \ ! GPRINT:utilization:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:utilization:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:utilization:MAX:"Max \\: %10.2lf %s\\n" ! ! report.sinf.cpupercent.name=Windows CPU Percent Processor Time (SNMP-Inf) ! report.sinf.cpupercent.columns=sinfCpuPctProcTime ! report.sinf.cpupercent.type=sinfCpuInstance ! report.sinf.cpupercent.propertiesValues=sinfCpuInstance ! report.sinf.cpupercent.command=--title="Windows CPU {sinfCpuInstance} Utilization (SNMP-Informant)" \ ! DEF:utilization={rrd1}:sinfCpuPctProcTime:AVERAGE \ ! LINE2:utilization#ff0000:"% util." \ ! GPRINT:utilization:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:utilization:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:utilization:MAX:"Max \\: %10.2lf %s\\n" ! ! # This report should go away in favor of report.sinf.diskfree ! report.sinf.diskfreeC.name=Available Disk Space (Drive C) (SNMP-Inf) ! report.sinf.diskfreeC.columns=sinfDskPtFreeSpcC ! report.sinf.diskfreeC.type=nodeSnmp ! report.sinf.diskfreeC.command=--title="Windows Available Space Disk Drive C (SNMP-Informant) - DEPRECATED" \ ! DEF:availspace={rrd1}:sinfDskPtFreeSpcC:AVERAGE \ ! LINE2:availspace#ff0000:"% Avail." \ ! GPRINT:availspace:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:availspace:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:availspace:MAX:"Max \\: %10.2lf %s\\n" ! ! # This report should go away in favor of report.sinf.diskfree ! report.sinf.diskfreeD.name=Available Disk Space (Drive D) (SNMP-Inf) ! report.sinf.diskfreeD.columns=sinfDskPtFreeSpcD ! report.sinf.diskfreeD.type=nodeSnmp ! report.sinf.diskfreeD.command=--title="Windows Available Space Disk Drive D (SNMP-Informant) - DEPRECATED" \ ! DEF:availspace={rrd1}:sinfDskPtFreeSpcD:AVERAGE \ ! LINE2:availspace#ff0000:"% Avail." \ ! GPRINT:availspace:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:availspace:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:availspace:MAX:"Max \\: %10.2lf %s\\n" ! ! # This report should go away in favor of report.sinf.diskfree ! report.sinf.diskfreeE.name=Available Disk Space (Drive E) (SNMP-Inf) ! report.sinf.diskfreeE.columns=sinfDskPtFreeSpcE ! report.sinf.diskfreeE.type=nodeSnmp ! report.sinf.diskfreeE.command=--title="Windows Available Space Disk Drive E (SNMP-Informant) - DEPRECATED" \ ! DEF:availspace={rrd1}:sinfDskPtFreeSpcE:AVERAGE \ ! LINE2:availspace#ff0000:"% Avail." \ ! GPRINT:availspace:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:availspace:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:availspace:MAX:"Max \\: %10.2lf %s\\n" ! ! # This report should go away in favor of report.sinf.diskfree ! report.sinf.diskfreetotal.name=Available Disk Space (SNMP-Inf) ! report.sinf.diskfreetotal.columns=sinfDskPtFreeSpcTl ! report.sinf.diskfreetotal.type=nodeSnmp ! report.sinf.diskfreetotal.command=--title="Windows Available Total Disk Space (SNMP-Informant) - DEPRECATED" \ ! DEF:availspace={rrd1}:sinfDskPtFreeSpcTl:AVERAGE \ ! LINE2:availspace#ff0000:"% Avail." \ ! GPRINT:availspace:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:availspace:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:availspace:MAX:"Max \\: %10.2lf %s\\n" ! ! report.sinf.diskfree.name=Available Disk Space (SNMP-Inf) ! report.sinf.diskfree.columns=sinfLDskPctFreeSpc ! report.sinf.diskfree.type=sinfLDskInstance ! report.sinf.diskfree.propertiesValues=sinfLDskInstance ! report.sinf.diskfree.command=--title="Windows Available Disk Space {sinfLDskInstance} (SNMP-Informant)" \ ! DEF:availspace={rrd1}:sinfLDskPctFreeSpc:AVERAGE \ ! LINE2:availspace#ff0000:"% Avail." \ ! GPRINT:availspace:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:availspace:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:availspace:MAX:"Max \\: %10.2lf %s\\n" ! ! report.sinf.exchange-mta.diskfile.name=Exchange MTA Disk File Stats (SNMP-Informant) ! report.sinf.exchange-mta.diskfile.columns=mtaDiskFileDelPS,mtaDiskFileOpensPS,mtaDiskFileReadsPS,mtaDiskFileSyncsPS,mtaDiskFileWritesPS ! report.sinf.exchange-mta.diskfile.type=nodeSnmp ! report.sinf.exchange-mta.diskfile.command=--title="Exchange MTA Disk File Stats (SNMP-Informant)" \ ! DEF:mtafiledels={rrd1}:mtaDiskFileDelPS:AVERAGE \ ! DEF:mtafileopens={rrd2}:mtaDiskFileOpensPS:AVERAGE \ ! DEF:mtafilereads={rrd3}:mtaDiskFileReadsPS:AVERAGE \ ! DEF:mtafilesyncs={rrd4}:mtaDiskFileSyncsPS:AVERAGE \ ! DEF:mtafilewrites={rrd5}:mtaDiskFileWritesPS:AVERAGE \ ! LINE2:mtafilereads#0000ff:"File Reads :" \ ! GPRINT:mtafilereads:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:mtafilereads:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:mtafilereads:MAX:"Max \\: %10.2lf %s\\n" \ ! LINE2:mtafilewrites#00ff00:"File Writes :" \ ! GPRINT:mtafilewrites:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:mtafilewrites:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:mtafilewrites:MAX:"Max \\: %10.2lf %s\\n" \ ! LINE2:mtafileopens#ffff00:"File Opens :" \ ! GPRINT:mtafileopens:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:mtafileopens:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:mtafileopens:MAX:"Max \\: %10.2lf %s\\n" \ ! LINE2:mtafilesyncs#00ffff:"File Syncs :" \ ! GPRINT:mtafilesyncs:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:mtafilesyncs:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:mtafilesyncs:MAX:"Max \\: %10.2lf %s\\n" \ ! LINE2:mtafiledels#ff0000:"File Deletes :" \ ! GPRINT:mtafiledels:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:mtafiledels:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:mtafiledels:MAX:"Max \\: %10.2lf %s\\n" ! ! report.sinf.exchange-is.vmfreeblks.name=Exchange VM Free Blocks Stats (SNMP-Informant) ! report.sinf.exchange-is.vmfreeblks.columns=isVMTotalFreeBlocks, isVMTotal16MBFrBlk ! report.sinf.exchange-is.vmfreeblks.type=nodeSnmp ! report.sinf.exchange-is.vmfreeblks.command=--title="Exchange VM Free Blocks Stats (SNMP-Informant)" \ ! DEF:freeblocks={rrd1}:isVMTotalFreeBlocks:AVERAGE \ ! DEF:free16mb={rrd2}:isVMTotal16MBFrBlk:AVERAGE \ ! LINE2:freeblocks#0000ff:"Free Blocks :" \ ! GPRINT:freeblocks:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:freeblocks:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:freeblocks:MAX:"Max \\: %10.2lf %s\\n" \ ! LINE2:free16mb#ff0000:"Free 16MB Blocks :" \ ! GPRINT:free16mb:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:free16mb:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:free16mb:MAX:"Max \\: %10.2lf %s\\n" ! ! report.sinf.exchange-is.users.name=Exchange User Stats (SNMP-Informant) ! report.sinf.exchange-is.users.columns=isUserCount,isActUserCount,isAnonUserCount,isActAnonUserCount ! report.sinf.exchange-is.users.type=nodeSnmp ! report.sinf.exchange-is.users.command=--title="Exchange User Stats (SNMP-Informant)" \ ! DEF:users={rrd1}:isUserCount:AVERAGE \ ! DEF:actusers={rrd2}:isActUserCount:AVERAGE \ ! DEF:anonusers={rrd3}:isAnonUserCount:AVERAGE \ ! DEF:actanonusers={rrd4}:isActAnonUserCount:AVERAGE \ ! LINE2:users#0000ff:"Users :" \ ! GPRINT:users:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:users:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:users:MAX:"Max \\: %10.2lf %s\\n" \ ! LINE2:actusers#00ffff:"Act. Users :" \ ! GPRINT:actusers:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:actusers:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:actusers:MAX:"Max \\: %10.2lf %s\\n" \ ! LINE2:anonusers#00ff00:"Anon. Users :" \ ! GPRINT:anonusers:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:anonusers:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:anonusers:MAX:"Max \\: %10.2lf %s\\n" \ ! LINE2:actanonusers#ffff00:"Act. Anon. Users :" \ ! GPRINT:actanonusers:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:actanonusers:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:actanonusers:MAX:"Max \\: %10.2lf %s\\n" ! ! report.sinf.mempool.name=Memory Pool (SNMP-Inf) ! report.sinf.mempool.columns=sinfMemPNonpagedByt, sinfMemPPagedBytes, sinfMemPPagedResByt ! report.sinf.mempool.type=nodeSnmp ! report.sinf.mempool.command=--title="Windows Memory Pool (SNMP-Informant)" \ ! DEF:mempoolnonpage={rrd1}:sinfMemPNonpagedByt:AVERAGE \ ! DEF:mempoolpaged={rrd2}:sinfMemPPagedBytes:AVERAGE \ ! DEF:mempoolpageres={rrd3}:sinfMemPPagedResByt:AVERAGE \ ! LINE2:mempoolnonpage#0000ff:"Non-Paged (Bytes)" \ ! GPRINT:mempoolnonpage:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:mempoolnonpage:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:mempoolnonpage:MAX:"Max \\: %10.2lf %s\\n" \ ! LINE2:mempoolpaged#00ff00:"Paged (Bytes)" \ ! GPRINT:mempoolpaged:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:mempoolpaged:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:mempoolpaged:MAX:"Max \\: %10.2lf %s\\n" \ ! LINE2:mempoolpageres#ff0000:"Page Res. (Bytes)" \ ! GPRINT:mempoolpageres:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:mempoolpageres:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:mempoolpageres:MAX:"Max \\: %10.2lf %s\\n" ! ! report.sinf.paging.name=Memory Paging (SNMP-Inf) ! report.sinf.paging.columns=sinfMemPageFaultsPS, sinfMemPagesInputPS, sinfMemPagesOutPS, sinfMemPagesPerSec ! report.sinf.paging.type=nodeSnmp ! report.sinf.paging.command=--title="Windows Memory Paging (SNMP-Informant)" \ ! DEF:pfaults={rrd1}:sinfMemPageFaultsPS:AVERAGE \ ! DEF:pinput={rrd2}:sinfMemPagesInputPS:AVERAGE \ ! DEF:poutput={rrd3}:sinfMemPagesOutPS:AVERAGE \ ! DEF:pages={rrd4}:sinfMemPagesPerSec:AVERAGE \ ! LINE2:pfaults#0000ff:"Faults (per sec)" \ ! GPRINT:pfaults:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:pfaults:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:pfaults:MAX:"Max \\: %10.2lf %s\\n" \ ! LINE2:pinput#00ff00:"Input (per sec)" \ ! GPRINT:pinput:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:pinput:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:pinput:MAX:"Max \\: %10.2lf %s\\n" \ ! LINE2:poutput#ff0000:"Output (per sec)" \ ! GPRINT:poutput:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:poutput:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:poutput:MAX:"Max \\: %10.2lf %s\\n" \ ! LINE2:pages#ff00ff:"Pages (per sec)" \ ! GPRINT:pages:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:pages:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:pages:MAX:"Max \\: %10.2lf %s\\n" ! ! report.sinf.processes.name=Windows Processes (SNMP-Inf) ! report.sinf.processes.columns=sinfProcesses, sinfThreads ! report.sinf.processes.type=nodeSnmp ! report.sinf.processes.command=--title="Windows Processes (SNMP-Informant)" \ ! DEF:processes={rrd1}:sinfProcesses:AVERAGE \ ! DEF:threads={rrd2}:sinfThreads:AVERAGE \ ! LINE2:processes#0000ff:"Processes" \ ! GPRINT:processes:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:processes:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:processes:MAX:"Max \\: %10.2lf %s\\n" \ ! LINE2:threads#00ff00:"Threads " \ ! GPRINT:threads:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:threads:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:threads:MAX:"Max \\: %10.2lf %s\\n" ! ! report.sinf.sysmem.name=System Memory (SNMP-Inf) ! report.sinf.sysmem.columns=sinfMemSysCacheResB, sinfMemSysCodeResB, sinfMemSysCodeTotB, sinfMemSysDrvResB, sinfMemSysDrvTotB ! report.sinf.sysmem.type=nodeSnmp ! report.sinf.sysmem.command=--title="Windows System Memory (SNMP-Informant)" \ ! DEF:syscacheres={rrd1}:sinfMemSysCacheResB:AVERAGE \ ! DEF:syscoderes={rrd2}:sinfMemSysCodeResB:AVERAGE \ ! DEF:syscodetot={rrd3}:sinfMemSysCodeTotB:AVERAGE \ ! DEF:sysdrvres={rrd4}:sinfMemSysDrvResB:AVERAGE \ ! DEF:sysdrvtot={rrd5}:sinfMemSysDrvTotB:AVERAGE \ ! LINE2:syscacheres#0000ff:"Resident Cache " \ ! GPRINT:syscacheres:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:syscacheres:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:syscacheres:MAX:"Max \\: %10.2lf %s\\n" \ ! LINE2:syscoderes#00ff00:"Resident Code " \ ! GPRINT:syscoderes:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:syscoderes:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:syscoderes:MAX:"Max \\: %10.2lf %s\\n" \ ! LINE2:syscodetot#ff0000:"Total Code " \ ! GPRINT:syscodetot:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:syscodetot:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:syscodetot:MAX:"Max \\: %10.2lf %s\\n" \ ! LINE2:sysdrvres#ffff00:"Resident Driver " \ ! GPRINT:sysdrvres:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:sysdrvres:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:sysdrvres:MAX:"Max \\: %10.2lf %s\\n" \ ! LINE2:sysdrvtot#ff00ff:"Resident Total " \ ! GPRINT:sysdrvtot:AVERAGE:"Avg \\: %10.2lf %s" \ ! GPRINT:sysdrvtot:MIN:"Min \\: %10.2lf %s" \ ! GPRINT:sysdrvtot:MAX:"Max \\: %10.2lf %s\\n" ! ! report.sinf.net.bits.name=Bits In/Out (SNMP Informant) ! report.sinf.net.bits.columns=netBytesRcvdPerSec,netBytesSentPerSec ! report.sinf.net.bits.type=sinfNetInstance ! report.sinf.net.bits.propertiesValues=sinfNetInstance ! report.sinf.net.bits.command=--title="Bits Per Second (SNMP Informant)" \ ! --vertical-label="Bytes per second" \ ! DEF:octIn={rrd1}:netBytesRcvdPerSec:AVERAGE \ ! DEF:octOut={rrd2}:netBytesSentPerSec:AVERAGE \ ! CDEF:rawbitsIn=octIn,8,* \ ! CDEF:rawbitsOut=octOut,8,* \ ! CDEF:rawbitsOutNeg=0,rawbitsOut,- \ ! CDEF:rawtotBits=octIn,octOut,+,8,* \ ! CDEF:bitsIn=rawbitsIn,UN,0,rawbitsIn,IF \ ! CDEF:bitsOut=rawbitsOut,UN,0,rawbitsOut,IF \ ! CDEF:totBits=rawtotBits,UN,0,rawtotBits,IF \ ! CDEF:outSum=bitsOut,{diffTime},* \ ! CDEF:inSum=bitsIn,{diffTime},* \ ! CDEF:totSum=totBits,{diffTime},* \ ! AREA:rawbitsIn#00ff00:"In " \ ! GPRINT:rawbitsIn:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:rawbitsIn:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:rawbitsIn:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:rawbitsOutNeg#0000ff:"Out" \ ! GPRINT:rawbitsOut:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:rawbitsOut:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:rawbitsOut:MAX:"Max \\: %8.2lf %s\\n" \ ! GPRINT:inSum:AVERAGE:" Tot In \\: %8.2lf %s" \ ! GPRINT:outSum:AVERAGE:" Tot Out \\: %8.2lf %s" \ ! GPRINT:totSum:AVERAGE:" Tot \\: %8.2lf %s\\n" ! ! report.sinf.net.utilization.name=InOut Traffic (SNMP Informant) ! report.sinf.net.utilization.columns=netBytesRcvdPerSec,netBytesSentPerSec,netCurrentBandwidth ! report.sinf.net.utilization.type=sinfNetInstance ! report.sinf.net.utilization.propertiesValues=sinfNetInstance ! report.sinf.net.utilization.command=--title="In/Out Traffic Utilization (SNMP Informant)" \ ! --vertical-label="Percent utilization" \ ! DEF:octIn={rrd1}:netBytesRcvdPerSec:AVERAGE \ ! DEF:octOut={rrd2}:netBytesSentPerSec:AVERAGE \ ! DEF:curBw={rrd3}:netCurrentBandwidth:AVERAGE \ ! CDEF:percentIn=octIn,8,*,curBw,/,100,* \ ! CDEF:percentOut=octOut,8,*,curBw,/,100,* \ ! CDEF:percentOutNeg=0,percentOut,- \ ! AREA:percentIn#00ff00:"In " \ ! GPRINT:percentIn:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:percentIn:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:percentIn:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:percentOutNeg#0000ff:"Out" \ ! GPRINT:percentOut:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:percentOut:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:percentOut:MAX:"Max \\: %8.2lf %s\\n" ! ! report.sinf.net.discards.name=Discards In/Out (SNMP Informant) ! report.sinf.net.discards.columns=netPktsRcvdDiscard,netPktsOutDiscard ! report.sinf.net.discards.type=sinfNetInstance ! report.sinf.net.discards.propertiesValues=sinfNetInstance ! report.sinf.net.discards.command=--title="Discards In/Out (SNMP Informant)" \ ! --vertical-label="Discarded packets" \ ! DEF:octIn={rrd1}:netPktsRcvdDiscard:AVERAGE \ ! DEF:octOut={rrd2}:netPktsOutDiscard:AVERAGE \ ! CDEF:octOutNeg=0,octOut,- \ ! AREA:octIn#00ff00:"In " \ ! GPRINT:octIn:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:octIn:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:octIn:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:octOutNeg#0000ff:"Out" \ ! GPRINT:octOut:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:octOut:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:octOut:MAX:"Max \\: %8.2lf %s\\n" ! ! report.sinf.net.errors.name=Errors In/Out (SNMP Informant) ! report.sinf.net.errors.columns=netPktsRcvdErrors,netPktsOutErrors ! report.sinf.net.errors.type=sinfNetInstance ! report.sinf.net.errors.propertiesValues=sinfNetInstance ! report.sinf.net.errors.command=--title="Errors In/Out (SNMP Informant)" \ ! --vertical-label="Errant packets" \ ! DEF:octIn={rrd1}:netPktsRcvdErrors:AVERAGE \ ! DEF:octOut={rrd2}:netPktsOutErrors:AVERAGE \ ! CDEF:octOutNeg=0,octOut,- \ ! AREA:octIn#00ff00:"In " \ ! GPRINT:octIn:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:octIn:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:octIn:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:octOutNeg#0000ff:"Out" \ ! GPRINT:octOut:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:octOut:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:octOut:MAX:"Max \\: %8.2lf %s\\n" ! ! report.sinf.net.unknown.name=Errors In/Out (SNMP Informant) ! report.sinf.net.unknown.columns=netPktsRcvdUnknown ! report.sinf.net.unknown.type=sinfNetInstance ! report.sinf.net.unknown.propertiesValues=sinfNetInstance ! report.sinf.net.unknown.command=--title="Unknown Packets (SNMP Informant)" \ ! --vertical-label="Unknown packets" \ ! DEF:octIn={rrd1}:netPktsRcvdUnknown:AVERAGE \ ! AREA:octIn#00ff00:"In " \ ! GPRINT:octIn:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:octIn:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:octIn:MAX:"Max \\: %8.2lf %s\\n" ! ! report.sinf.net.packets.name=Packets In/Out (SNMP Informant) ! report.sinf.net.packets.columns=netPktsRcvdPerSec,netPktsSentPerSec ! report.sinf.net.packets.type=sinfNetInstance ! report.sinf.net.packets.propertiesValues=sinfNetInstance ! report.sinf.net.packets.command=--title="Packets In/Out (SNMP Informant)" \ ! --vertical-label="Packets" \ ! DEF:octIn={rrd1}:netPktsRcvdPerSec:AVERAGE \ ! DEF:octOut={rrd2}:netPktsSentPerSec:AVERAGE \ ! CDEF:octOutNeg=0,octOut,- \ ! AREA:octIn#00ff00:"In " \ ! GPRINT:octIn:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:octIn:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:octIn:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:octOutNeg#0000ff:"Out" \ ! GPRINT:octOut:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:octOut:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:octOut:MAX:"Max \\: %8.2lf %s\\n" ! ! ###### ! ###### Reports for SUN devices ! ###### ! ! report.sun.cpuTime.name=CPU Time ! report.sun.cpuTime.columns=rsSystemProcessTime, rsUserProcessTime, rsNiceModeTime, rsIdleModeTime ! report.sun.cpuTime.type=nodeSnmp ! report.sun.cpuTime.command=--title="CPU Time Distribution" \ ! --units-exponent 0 --lower-limit 0 \ ! DEF:system={rrd1}:rsSystemProcessTime:AVERAGE \ ! DEF:user={rrd2}:rsUserProcessTime:AVERAGE \ ! DEF:nice={rrd3}:rsNiceModeTime:AVERAGE \ ! DEF:idle={rrd4}:rsIdleModeTime:AVERAGE \ ! AREA:system#aa3366:"System Process" \ ! GPRINT:system:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:system:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:system:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:user#77b0f0:"User Process " \ ! GPRINT:user:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:user:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:user:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:nice#ffd660:"Nice Mode " \ ! GPRINT:nice:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:nice:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:nice:MAX:"Max \\: %8.2lf %s\\n" \ ! STACK:idle#77ff00:"Idle Mode " \ ! GPRINT:idle:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:idle:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:idle:MAX:"Max \\: %8.2lf %s\\n" ! ! report.sun.diskxfer.name=Disk Transfer Rate (sun) ! report.sun.diskxfer.columns=rsDiskXfer1, rsDiskXfer2, rsDiskXfer3, rsDiskXfer4 ! report.sun.diskxfer.type=nodeSnmp ! report.sun.diskxfer.command=--title="Disk Transfer Rate" \ ! DEF:disk1={rrd1}:rsDiskXfer1:AVERAGE \ ! DEF:disk2={rrd2}:rsDiskXfer2:AVERAGE \ ! DEF:disk3={rrd3}:rsDiskXfer3:AVERAGE \ ! DEF:disk4={rrd4}:rsDiskXfer4:AVERAGE \ ! LINE1:disk1#aaaa00:"Disk 1" \ ! GPRINT:disk1:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:disk1:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:disk1:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE1:disk2#aa0000:"Disk 2" \ ! GPRINT:disk2:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:disk2:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:disk2:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE1:disk3#00aa00:"Disk 3" \ ! GPRINT:disk3:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:disk3:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:disk3:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE1:disk4#0000aa:"Disk 4" \ ! GPRINT:disk4:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:disk4:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:disk4:MAX:"Max \\: %8.2lf %s\\n" ! ! report.sun.interrupts.name=Interrupts (sun) ! report.sun.interrupts.columns=rsVIntr ! report.sun.interrupts.type=nodeSnmp ! report.sun.interrupts.command=--title="Interrupts" \ ! DEF:interrupts={rrd1}:rsVIntr:AVERAGE \ ! LINE2:interrupts#7f0000:"Interrupts" \ ! GPRINT:interrupts:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:interrupts:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:interrupts:MAX:"Max \\: %8.2lf %s\\n" ! ! report.sun.packets.name=Packet Statistics (sun) ! report.sun.packets.columns=rsIfInPackets, rsIfOutPackets, rsIfInErrors, rsIfOutErrors, rsIfCollisions ! report.sun.packets.type=nodeSnmp ! report.sun.packets.command=--title="Packet Statistics" \ ! DEF:inpkt={rrd1}:rsIfInPackets:AVERAGE \ ! DEF:outpkt={rrd2}:rsIfOutPackets:AVERAGE \ ! DEF:inerr={rrd3}:rsIfInErrors:AVERAGE \ ! DEF:outerr={rrd4}:rsIfOutErrors:AVERAGE \ ! DEF:coll={rrd5}:rsIfCollisions:AVERAGE \ ! LINE2:inpkt#889900:"Packets In " \ ! GPRINT:inpkt:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:inpkt:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:inpkt:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:outpkt#990000:"Packets Out " \ ! GPRINT:outpkt:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:outpkt:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:outpkt:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE1:inerr#bbcc00:"Input Errors " \ ! GPRINT:inerr:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:inerr:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:inerr:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE1:outerr#dd0000:"Output Errors" \ ! GPRINT:outerr:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:outerr:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:outerr:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE1:coll#0000dd:"Collisions " \ ! GPRINT:coll:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:coll:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:coll:MAX:"Max \\: %8.2lf %s\\n" ! ! report.sun.paging.name=Paging (sun) ! report.sun.paging.columns=rsVPagesIn, rsVPagesOut ! report.sun.paging.type=nodeSnmp ! report.sun.paging.command=--title="Paging" \ ! DEF:pagesin={rrd1}:rsVPagesIn:AVERAGE \ ! DEF:pagesout={rrd2}:rsVPagesOut:AVERAGE \ ! LINE2:pagesin#00aa00:"Pages In " \ ! GPRINT:pagesin:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:pagesin:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:pagesin:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:pagesout#ff0000:"Pages Out " \ ! GPRINT:pagesout:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:pagesout:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:pagesout:MAX:"Max \\: %8.2lf %s\\n" ! ! report.sun.procs.name=System Performance (sun) ! report.sun.procs.columns=sunProcsInRunQueue, sunProcsBlocked, sunProcsInSwap ! report.sun.procs.type=nodeSnmp ! report.sun.procs.command=--title="System Performance" \ ! DEF:inQueue={rrd1}:unProcsInRunQueue:AVERAGE \ ! DEF:blocked={rrd2}:sunProcsBlocked:AVERAGE \ ! DEF:inSwap={rrd3}:sunProcsInSwap:AVERAGE \ ! LINE2:inQueue#0000ff:"Procs in Run Queue" \ ! GPRINT:inQueue:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:inQueue:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:inQueue:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:blocked#ff0000:"Procs Blocked" \ ! GPRINT:blockd:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:blocked:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:blocked:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:inSwap#00ff00:"Procs In Swap" \ ! GPRINT:inSwap:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:inSwap:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:inSwap:MAX:"Max \\: %8.2lf %s\\n" ! ! report.sun.swap.name=Swap (sun) ! report.sun.swap.columns=sunAvailableSwap, sunAllocatedSwap ! report.sun.swap.type=nodeSnmp ! report.sun.swap.command=--title="Swap" \ ! --vertical-label="Bytes" \ ! --base=1024 \ ! DEF:availKB={rrd1}:sunAvailableSwap:AVERAGE \ ! DEF:allocatedKB={rrd2}:sunAllocatedSwap:AVERAGE \ ! CDEF:availBytes=availKB,1024,* \ ! CDEF:allocatedBytes=allocatedKB,1024,* \ ! LINE2:availBytes#0000ff:"Swap Available" \ ! GPRINT:availBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:availBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:availBytes:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:allocatedBytes#ff0000:"Swap Allocated" \ ! GPRINT:allocatedBytes:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:allocatedBytes:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:allocatedBytes:MAX:"Max \\: %8.2lf %s\\n" ! ! report.sun.swapping.name=Swapping (sun) ! report.sun.swapping.columns=rsVSwapIn, rsVSwapOut ! report.sun.swapping.type=nodeSnmp ! report.sun.swapping.command=--title="Swapping" \ ! DEF:swapin={rrd1}:rsVSwapIn:AVERAGE \ ! DEF:swapout={rrd2}:rsVSwapOut:AVERAGE \ ! LINE2:swapin#00aa00:"Swap In " \ ! GPRINT:swapin:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:swapin:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:swapin:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:swapout#ff0000:"Swap Out" \ ! GPRINT:swapout:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:swapout:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:swapout:MAX:"Max \\: %8.2lf %s\\n" ! ! ### ! ## JVM ! ### ! report.free.memory.name=Free Memory ! report.free.memory.columns=FreeMemory, TotalMemory ! report.free.memory.type=interfaceSnmp ! report.free.memory.command=--title="Memory Usage" \ ! DEF:freemem={rrd1}:FreeMemory:AVERAGE \ ! DEF:totalmem={rrd2}:TotalMemory:AVERAGE \ ! CDEF:percentfree=freemem,totalmem,/,100,* \ ! LINE2:percentfree#0000ff:"%FreeMemory" \ ! GPRINT:percentfree:AVERAGE:" Avg \\: %5.2lf %s" \ ! GPRINT:percentfree:MIN:"Min \\: %5.2lf %s" \ ! GPRINT:percentfree:MAX:"Max \\: %5.2lf %s\\n" ! ! report.jvm.thread.count.name=JVM Threading ! report.jvm.thread.count.columns=ThreadCount ! report.jvm.thread.count.type=interfaceSnmp ! report.jvm.thread.count.command=--title="JVM Thread Count" \ ! DEF:threads={rrd1}:ThreadCount:AVERAGE \ ! LINE2:threads#0000ff:"Threads" \ ! GPRINT:threads:AVERAGE:" Avg \\: %8.2lf %s\\n" ! ! report.jvm.thread.daemon.name=JVM Daemon Threads ! report.jvm.thread.daemon.columns=DaemonThreadCount ! report.jvm.thread.daemon.type=interfaceSnmp ! report.jvm.thread.daemon.command=--title="JVM Daemon Thread Count" \ ! DEF:dthreads={rrd1}:DaemonThreadCount:AVERAGE \ ! LINE2:dthreads#0000ff:"Daemon Threads" \ ! GPRINT:dthreads:AVERAGE:" Avg \\: %5.2lf %s\\n" ! ! report.jvm.thread.peak.name=JVM Peak Thread Count ! report.jvm.thread.peak.columns=PeakThreadCount ! report.jvm.thread.peak.type=interfaceSnmp ! report.jvm.thread.peak.command=--title="JVM Peak Thread Count" \ ! DEF:threads={rrd1}:PeakThreadCount:AVERAGE \ ! LINE2:threads#0000ff:"Threads" \ ! GPRINT:threads:AVERAGE:" Avg \\: %5.2lf %s\\n" ! ! ### ! ## JBoss ! ### ! report.jboss.defaultDS.name=Default DS ! report.jboss.defaultDS.columns=DefDS_AvailConns, DefDS_Conns, DefDS_InUseConns, DefDS_CreatedConns, DefDS_DestroyConns ! report.jboss.defaultDS.type=interfaceSnmp ! report.jboss.defaultDS.command=--title="DS Connection Pool" \ ! DEF:available={rrd1}:DefDS_AvailConns:AVERAGE \ ! DEF:total={rrd2}:DefDS_Conns:AVERAGE \ ! DEF:inuse={rrd3}:DefDS_InUseConns:AVERAGE \ ! DEF:created={rrd4}:DefDS_CreatedConns:AVERAGE \ ! DEF:destroyed={rrd5}:DefDS_DestroyConns:AVERAGE \ ! LINE2:available#0000ff:"Available" \ ! GPRINT:available:AVERAGE:" Avg \\: %5.2lf %s" \ ! GPRINT:available:MIN:"Min \\: %5.2lf %s" \ ! GPRINT:available:MAX:"Max \\: %5.2lf %s\\n" \ ! LINE2:total#00ff00:"Current " \ ! GPRINT:total:AVERAGE:" Avg \\: %5.2lf %s" \ ! GPRINT:total:MIN:"Min \\: %5.2lf %s" \ ! GPRINT:total:MAX:"Max \\: %5.2lf %s\\n" \ ! LINE2:inuse#ff0000:"InUse " \ ! GPRINT:inuse:AVERAGE:" Avg \\: %5.2lf %s" \ ! GPRINT:inuse:MIN:"Min \\: %5.2lf %s" \ ! GPRINT:inuse:MAX:"Max \\: %5.2lf %s\\n" \ ! LINE2:created#00fff0:"Created " \ ! GPRINT:created:AVERAGE:" Avg \\: %5.2lf %s" \ ! GPRINT:created:MIN:"Min \\: %5.2lf %s" \ ! GPRINT:created:MAX:"Max \\: %5.2lf %s\\n" \ ! LINE2:destroyed#c0a000:"Destroyed" \ ! GPRINT:destroyed:AVERAGE:" Avg \\: %5.2lf %s" \ ! GPRINT:destroyed:MIN:"Min \\: %5.2lf %s" \ ! GPRINT:destroyed:MAX:"Max \\: %5.2lf %s\\n" ! ! report.jboss.servletstatus.time.name=Servlet Status Time ! report.jboss.servletstatus.time.columns=SS_processTime, SS_minTime, SS_maxTime ! report.jboss.servletstatus.time.type=interfaceSnmp ! report.jboss.servletstatus.time.command=--title="Servlet Status - Time" \ ! DEF:process={rrd1}:SS_processTime:AVERAGE \ ! DEF:mintime={rrd2}:SS_minTIME:AVERAGE \ ! DEF:maxTime={rrd3}:SS_maxTime:AVERAGE \ ! LINE2:process#0000ff:"ProcessTime" \ ! GPRINT:process:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:process:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:process:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:minTime#000ff0:"MinTime " \ ! GPRINT:minTime:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:minTime:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:minTime:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:maxTime#000ff0:"MaxTime " \ ! GPRINT:maxTime:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:maxTime:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:maxTime:MAX:"Max \\: %8.2lf %s\\n" ! ! report.jboss.servletstatus.cnt.name=Servlet Status Counts ! report.jboss.servletstatus.cnt.columns=SS_requests, SS_errors ! report.jboss.servletstatus.cnt.type=interfaceSnmp ! report.jboss.servletstatus.cnt.command=--title="Servlet Status - Counts" \ ! DEF:errors={rrd1}:SS_errors:AVERAGE \ ! DEF:process={rrd2}:SS_processTime:AVERAGE \ ! LINE2:error#0000ff:"Errors " \ ! GPRINT:error:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:error:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:error:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:process#00ff00:"ProcessTime" \ ! GPRINT:process:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:process:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:process:MAX:"Max \\: %8.2lf %s\\n" ! ! report.jboss.grp.tp.name=GRP_throughput ! report.jboss.grp.tp.columns=GRP_bytesSent, GRP_bytesRec ! report.jboss.grp.tp.type=interfaceSnmp ! report.jboss.grp.tp.command=--title="HTTP Global Request Processor - Throughput" \ ! DEF:sent={rrd1}:GRP_bytesSent:AVERAGE \ ! DEF:rec={rrd2}:GRP_bytesRec:AVERAGE \ ! LINE2:sent#0000ff:"BytesSent" \ ! GPRINT:sent:AVERAGE:" Avg \\: %7.2lf %s" \ ! GPRINT:sent:MIN:"Min \\: %7.2lf %s" \ ! GPRINT:sent:MAX:"Max \\: %7.2lf %s\\n" \ ! LINE2:rec#00ff00:"BytesRec " \ ! GPRINT:rec:AVERAGE:" Avg \\: %7.2lf %s" \ ! GPRINT:rec:MIN:"Min \\: %7.2lf %s" \ ! GPRINT:rec:MAX:"Max \\: %7.2lf %s\\n" ! ! report.jboss.grp.cnt.name=GRP_counts ! report.jboss.grp.cnt.columns=GRP_requests, GRP_errors ! report.jboss.grp.cnt.type=interfaceSnmp ! report.jboss.grp.cnt.command=--title="HTTP Global Request Processor - Counts" \ ! DEF:req={rrd1}:GRP_requests:AVERAGE \ ! DEF:errors={rrd2}:GRP_errors:AVERAGE \ ! LINE2:req#0000ff:"Requests" \ ! GPRINT:req:AVERAGE:" Avg \\: %7.2lf %s" \ ! GPRINT:req:MIN:"Min \\: %7.2lf %s" \ ! GPRINT:req:MAX:"Max \\: %7.2lf %s\\n" \ ! LINE2:errors#ff0000:"Errors " \ ! GPRINT:errors:AVERAGE:" Avg \\: %7.2lf %s" \ ! GPRINT:errors:MIN:"Min \\: %7.2lf %s" \ ! GPRINT:errors:MAX:"Max \\: %7.2lf %s\\n" ! ! report.jboss.grp.time.name=GRP_time ! report.jboss.grp.time.columns=GRP_procTime ! report.jboss.grp.time.type=interfaceSnmp ! report.jboss.grp.time.command=--title="HTTP Global Request Processor - Time" \ ! DEF:proc={rrd1}:GRP_procTime:AVERAGE \ ! LINE2:proc#0000ff:"ProcessTime" \ ! GPRINT:proc:AVERAGE:" Avg \\: %6.2lf %s" \ ! GPRINT:proc:MIN:"Min \\: %6.2lf %s" \ ! GPRINT:proc:MAX:"Max \\: %6.2lf %s\\n" \ ! ! report.jboss.http.tp.name=Http Thread Pool ! report.jboss.http.tp.columns=BusyThreads, Threads ! report.jboss.http.tp.type=interfaceSnmp ! report.jboss.http.tp.command=--title="HTTP Thread Pool" \ ! DEF:busy={rrd1}:BusyThreads:AVERAGE \ ! DEF:threads={rrd2}:Threads:AVERAGE \ ! LINE2:busy#00ff00:"BusyThreads" \ ! GPRINT:busy:AVERAGE:" Avg \\: %5.2lf %s" \ ! GPRINT:busy:MIN:"Min \\: %5.2lf %s" \ ! GPRINT:busy:MAX:"Max \\: %5.2lf %s\\n" \ ! LINE2:threads#0000ff:"ThreadLimit" \ ! GPRINT:threads:AVERAGE:" Avg \\: %5.2lf %s" \ ! GPRINT:threads:MIN:"Min \\: %5.2lf %s" \ ! GPRINT:threads:MAX:"Max \\: %5.2lf %s\\n" ! ! report.airport.numClients.name=Airport Clients ! report.airport.numClients.columns=airportClients ! report.airport.numClients.type=nodeSnmp ! report.airport.numClients.command=--title="Airport Clients" \ ! DEF:airportClients={rrd1}:airportClients:AVERAGE \ ! LINE2:airportClients#00ff00:"Number of clients " \ ! GPRINT:airportClients:AVERAGE:" Avg \\: %5.2lf %s" \ ! GPRINT:airportClients:MIN:"Min \\: %5.2lf %s" \ ! GPRINT:airportClients:MAX:"Max \\: %5.2lf %s\\n" \ ! ! report.adsl.attenuation.name=ADSL Attenuation ! report.adsl.attenuation.columns=AtucCurAtn,AturCurAtn ! report.adsl.attenuation.type=interfaceSnmp ! report.adsl.attenuation.command=--title="ADSL Line Attenuation" \ ! --vertical-label dB \ ! DEF:AtucCurAtn={rrd1}:AtucCurAtn:AVERAGE \ ! DEF:AturCurAtn={rrd2}:AturCurAtn:AVERAGE \ ! CDEF:nearEndAttdB=AtucCurAtn,10,/ \ ! CDEF:remoteEndAttdB=AturCurAtn,10,/ \ ! LINE2:nearEndAttdB#0000ff:"near end " \ ! GPRINT:nearEndAttdB:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:nearEndAttdB:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:nearEndAttdB:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:remoteEndAttdB#ff0000:"remote end" \ ! GPRINT:remoteEndAttdB:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:remoteEndAttdB:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:remoteEndAttdB:MAX:"Max \\: %8.2lf %s\\n" ! ! report.adsl.power.name=ADSL Output Power ! report.adsl.power.columns=AtucCurOPwr,AturCurOPwr ! report.adsl.power.type=interfaceSnmp ! report.adsl.power.command=--title="ADSL Output Power" \ ! --vertical-label dBm \ ! DEF:AtucCurOPwr={rrd1}:AtucCurOPwr:AVERAGE \ ! DEF:AturCurOPwr={rrd2}:AturCurOPwr:AVERAGE \ ! CDEF:nearEndOPwrdB=AtucCurOPwr,10,/ \ ! CDEF:remoteEndOPwrdB=AturCurOPwr,10,/ \ ! LINE2:nearEndOPwrdB#0000ff:"near end " \ ! GPRINT:nearEndOPwrdB:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:nearEndOPwrdB:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:nearEndOPwrdB:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:remoteEndOPwrdB#ff0000:"remote end" \ ! GPRINT:remoteEndOPwrdB:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:remoteEndOPwrdB:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:remoteEndOPwrdB:MAX:"Max \\: %8.2lf %s\\n" ! ! report.adsl.snr.name=ADSL Noise Margin ! report.adsl.snr.columns=AtucCurSnr,AturCurSnr ! report.adsl.snr.type=interfaceSnmp ! report.adsl.snr.command=--title="ADSL Noise Margin" \ ! --vertical-label dB \ ! DEF:AtucCurSnr={rrd1}:AtucCurSnr:AVERAGE \ ! DEF:AturCurSnr={rrd2}:AturCurSnr:AVERAGE \ ! CDEF:nearEndSnrdB=AtucCurSnr,10,/ \ ! CDEF:remoteEndSnrdB=AturCurSnr,10,/ \ ! LINE2:nearEndSnrdB#0000ff:"near end " \ ! GPRINT:nearEndSnrdB:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:nearEndSnrdB:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:nearEndSnrdB:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:remoteEndSnrdB#ff0000:"remote end" \ ! GPRINT:remoteEndSnrdB:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:remoteEndSnrdB:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:remoteEndSnrdB:MAX:"Max \\: %8.2lf %s\\n" ! ! report.iisRequests.name=IIS Requests ! report.iisRequests.columns=iisgetreq,iispostreq,iisheadreq,iiscgireq ! report.iisRequests.type=nodeSnmp ! report.iisRequests.command=--title="IIS Requests" \ ! --vertical-label "Requests per second" \ ! --lower-limit 0 \ ! DEF:gets={rrd1}:iisgetreq:AVERAGE \ ! DEF:posts={rrd2}:iispostreq:AVERAGE \ ! DEF:heads={rrd3}:iisheadreq:AVERAGE \ ! DEF:cgis={rrd4}:iiscgireq:AVERAGE \ ! LINE1:gets#0000FF:"GETs " \ ! GPRINT:gets:AVERAGE:"Avg:%4.2lf" \ ! GPRINT:gets:MAX:"Max:%4.2lf" \ ! GPRINT:gets:MIN:"Min:%4.2lf\\n" \ ! LINE1:posts#00FF00:"POSTs" \ ! GPRINT:posts:AVERAGE:"Avg:%4.2lf" \ ! GPRINT:posts:MAX:"Max:%4.2lf" \ ! GPRINT:posts:MIN:"Min:%4.2lf \\n" \ ! LINE1:heads#FF0000:"HEADs" \ ! GPRINT:heads:AVERAGE:"Avg:%4.2lf" \ ! GPRINT:heads:MAX:"Max:%4.2lf" \ ! GPRINT:heads:MIN:"Min:%4.2lf \\n" \ ! LINE1:cgis#000000:"CGIs " \ ! GPRINT:cgis:AVERAGE:"Avg:%4.2lf" \ ! GPRINT:cgis:MAX:"Max:%4.2lf" \ ! GPRINT:cgis:MIN:"Min:%4.2lf" ! ! ! report.iisTraffic.name=IIS Traffic Stats ! report.iisTraffic.columns=iisbytes ! report.iisTraffic.type=nodeSnmp ! report.iisTraffic.command=--title="IIS Traffic Stats" \ ! --vertical-label Bytes \ ! --lower-limit 0 \ ! DEF:bytesTrans={rrd1}:iisbytes:AVERAGE \ ! LINE2:bytesTrans#0000FF:"Bytes " \ ! GPRINT:bytesTrans:AVERAGE:"Avg: %3.2lf" \ ! GPRINT:bytesTrans:MAX:"Max: %3.2lf" \ ! GPRINT:bytesTrans:MIN:"Min: %3.2lf" ! ! report.exchangeMessages.name=Exchange Message Transfer Volumes ! report.exchangeMessages.columns=exchangemsgsent,exchangemsgrecv,exchangemsgdeliv ! #report.exchangeMessages.columns=exchangemsgsent,exchangemsgdeliv ! report.exchangeMessages.type=nodeSnmp ! report.exchangeMessages.command=--title="Exchange Message Transfer Volumes" \ ! --vertical-label "Messages per second" \ ! --lower-limit 0 \ ! DEF:outmsgs={rrd1}:exchangemsgsent:AVERAGE \ ! DEF:inmsgs={rrd2}:exchangemsgrecv:AVERAGE \ ! DEF:localdel={rrd3}:exchangemsgdeliv:AVERAGE \ ! LINE2:outmsgs#00FF00:"Out:" \ ! GPRINT:outmsgs:AVERAGE:"Avg:%4.2lf" \ ! GPRINT:outmsgs:MAX:"Max:%4.2lf" \ ! GPRINT:outmsgs:MIN:"Min:%4.2lf\\n" \ ! LINE2:inmsgs#0000FF:"In :" \ ! GPRINT:inmsgs:AVERAGE:"Avg:%4.2lf" \ ! GPRINT:inmsgs:MAX:"Max:%4.2lf" \ ! GPRINT:inmsgs:MIN:"Min:%4.2lf\\n" \ ! LINE2:localdel#FF0000:"Local :" \ ! GPRINT:localdel:AVERAGE:"Avg:%4.2lf" \ ! GPRINT:localdel:MAX:"Max:%4.2lf" \ ! GPRINT:localdel:MIN:"Min:%4.2lf\\n" ! ! report.exchangeBytes.name=Exchange Byte Volumes ! report.exchangeBytes.columns=exchangebytesrecv,exchangebytessent ! report.exchangeBytes.type=nodeSnmp ! report.exchangeBytes.command=--title="Exchange Byte Volumes" \ ! --vertical-label "Bytes per second" \ ! --lower-limit 0 \ ! DEF:inbytes={rrd1}:exchangebytesrecv:AVERAGE \ ! DEF:outbytes={rrd2}:exchangebytessent:AVERAGE \ ! LINE2:inbytes#0000FF:"In: " \ ! GPRINT:inbytes:AVERAGE:"Avg:%4.2lf" \ ! GPRINT:inbytes:MAX:"Max: %4.2lf" \ ! GPRINT:inbytes:MIN:"Min: %4.2lf \\n" \ ! LINE2:outbytes#00FF00:"Out:" \ ! GPRINT:outbytes:AVERAGE:"Avg:%4.2lf" \ ! GPRINT:outbytes:MAX:"Max: %4.2lf" \ ! GPRINT:outbytes:MIN:"Min: %4.2lf\\n" ! ! report.exchangeRecipPartitions.name=Exchange Recipient Partitions ! report.exchangeRecipPartitions.columns=exchangereciplocal,exchangerecipremote ! report.exchangeRecipPartitions.type=nodeSnmp ! report.exchangeRecipPartitions.command=--title="Exchange Recipients" \ ! --vertical-label "Recipient Percentage" \ ! --lower-limit 0 --upper-limit 100 \ ! DEF:local={rrd1}:exchangereciplocal:AVERAGE \ ! DEF:remote={rrd2}:exchangerecipremote:AVERAGE \ ! LINE2:local#0000FF:"Local: " \ ! GPRINT:local:AVERAGE:"Avg:%4.1lf" \ ! GPRINT:local:MAX:"Max:%4.1lf" \ ! GPRINT:local:MIN:"Min:%4.1lf \\n" \ ! LINE2:remote#00FF00:"Remote:" \ ! GPRINT:remote:AVERAGE:"Avg:%4.1lf" \ ! GPRINT:remote:MAX:"Max:%4.1lf" \ ! GPRINT:remote:MIN:"Min:%4.1lf\\n" ! ! report.dnsThroughput.name=DNS Throughput ! report.dnsThroughput.columns=dnsqueryrecv,dnsresponsesent ! report.dnsThroughput.type=nodeSnmp ! report.dnsThroughput.command=--title="DNS Throughput" \ ! --vertical-label "Messages per second" \ ! DEF:query={rrd1}:dnsqueryrecv:AVERAGE \ ! DEF:response={rrd2}:dnsresponsesent:AVERAGE \ ! LINE2:query#0000FF:"Queries: " \ ! GPRINT:query:AVERAGE:"Avg:%4.1lf" \ ! GPRINT:query:MAX:"Max: %4.1lf" \ ! GPRINT:query:MIN:"Min: %4.1lf \\n" \ ! LINE2:response#00FF00:"Responses:" \ ! GPRINT:response:AVERAGE:"Avg:%4.1lf" \ ! GPRINT:response:MAX:"Max: %4.1lf" \ ! GPRINT:response:MIN:"Min: %4.1lf \\n" ! ! report.mssqlmemory.name=MSSQL Memory ! report.mssqlmemory.columns=sqlconnmemory,sqllockmemory,sqloptmemory,sqlsqlcachememory,sqltargetmemory,sqltotalmemory ! report.mssqlmemory.type=nodeSnmp ! report.mssqlmemory.command=--title="MSSQL Memory" \ ! --vertical-label "Memory" \ ! --lower-limit 0 \ ! DEF:conn={rrd1}:sqlconnmemory:AVERAGE \ ! DEF:lock={rrd2}:sqllockmemory:AVERAGE \ ! DEF:opt={rrd3}:sqloptmemory:AVERAGE \ ! DEF:sqlcache={rrd4}:sqlsqlcachememory:AVERAGE \ ! DEF:target={rrd5}:sqltargetmemory:AVERAGE \ ! DEF:total={rrd6}:sqltotalmemory:AVERAGE \ ! AREA:lock#00FF00:"Lock :" \ ! GPRINT:lock:AVERAGE:"Avg:%4.1lf" \ ! STACK:opt#0000FF:"Optimizer :" \ ! GPRINT:opt:AVERAGE:"Avg:%4.1lf" \ ! STACK:sqlcache#FFFF00:"SQL Cache :" \ ! GPRINT:sqlcache:AVERAGE:"Avg:%4.1lf\\n" \ ! STACK:conn#FF0000:"Conn :" \ ! GPRINT:conn:AVERAGE:"Avg:%4.1lf" \ ! LINE2:target#00FFFF:"Target :" \ ! GPRINT:target:AVERAGE:"Avg:%4.1lf" \ ! LINE2:total#000000:"Actual:" \ ! GPRINT:total:AVERAGE:"Avg:%4.1lf\\n" ! ! report.mssqlusage.name=MSSQL Usage ! report.mssqlusage.columns=sqluserconn,sqlactivetrans ! report.mssqlusage.type=nodeSnmp ! report.mssqlusage.command=--title="MSSQL Usage" \ ! --vertical-label "Current Count" \ ! DEF:conns={rrd1}:sqluserconn:AVERAGE \ ! DEF:trans={rrd2}:sqlactivetrans:AVERAGE \ ! LINE2:conns#0000FF:"Connections :" \ ! GPRINT:conns:AVERAGE:"Avg:%4.1lf" \ ! GPRINT:conns:MAX:"Max:%4.1lf" \ ! GPRINT:conns:MIN:"Min:%4.1lf \\n" \ ! LINE2:trans#00FF00:"Transactions :" \ ! GPRINT:trans:AVERAGE:"Avg:%4.1lf" \ ! GPRINT:trans:MAX:"Max:%4.1lf" \ ! GPRINT:trans:MIN:"Min:%4.1lf" ! ! report.mssqlhitratios.name=MSSQL Hit Ratios ! report.mssqlhitratios.columns=sqlbuffhitratio,sqlcachehitratio,sqllogcachehitratio ! report.mssqlhitratios.type=nodeSnmp ! report.mssqlhitratios.command=--title="MSSQL Hit Ratios" \ ! --vertical-label "Hit Ratio" \ ! --lower-limit 0 \ ! DEF:buff={rrd1}:sqlbuffhitratio:AVERAGE \ ! DEF:cache={rrd2}:sqlcachehitratio:AVERAGE \ ! DEF:logcache={rrd3}:sqllogcachehitratio:AVERAGE \ ! LINE2:buff#0000FF:"Buffer :" \ ! GPRINT:buff:AVERAGE:"Avg:%4.1lf" \ ! GPRINT:buff:MAX:"Max:%4.1lf" \ ! GPRINT:buff:MIN:"Min:%4.1lf" \ ! LINE2:cache#00FF00:"Cache :" \ ! GPRINT:cache:AVERAGE:"Avg:%4.1lf" \ ! GPRINT:cache:MAX:"Max:%4.1lf" \ ! GPRINT:cache:MIN:"Min:%4.1lf" \ ! LINE2:logcache#FF0000:"Log Cache :" \ ! GPRINT:logcache:AVERAGE:"Avg:%4.1lf" \ ! GPRINT:logcache:MAX:"Max:%4.1lf" \ ! GPRINT:logcache:MIN:"Min:%4.1lf" \ ! ! report.mssqllockwaittime.name=MSSQL Lock Wait Time ! report.mssqllockwaittime.columns=sqllockavgwaittime ! report.mssqllockwaittime.type=nodeSnmp ! report.mssqllockwaittime.command=--title="MSSQL Lock Avg Wait Time" \ ! --vertical-label "Seconds" \ ! DEF:wait={rrd1}:sqllockavgwaittime:AVERAGE \ ! LINE2:wait#0000FF:"Lock Average Wait :" \ ! GPRINT:wait:AVERAGE:"Avg:%4.1lf" ! ! report.windowsCPU.name=Windows CPU stats ! report.windowsCPU.columns=cpuProcTime,cpuIntrTime,cpuPrivTime,cpuUserTime ! report.windowsCPU.type=nodeSnmp ! report.windowsCPU.command=--title="Windows CPU stats" \ ! --vertical-label "% Time" \ ! --lower-limit 0 --upper-limit 100 \ ! DEF:proc={rrd1}:cpuProcTime:AVERAGE \ ! DEF:intr={rrd2}:cpuIntrTime:AVERAGE \ ! DEF:priv={rrd3}:cpuPrivTime:AVERAGE \ ! DEF:user={rrd4}:cpuUserTime:AVERAGE \ ! LINE2:intr#FF00FF:"Interrupt :" \ ! GPRINT:intr:AVERAGE:"Avg:%3.1lf" \ ! GPRINT:intr:MAX:"Max:%3.1lf" \ ! GPRINT:intr:MIN:"Min:%3.1lf \\n" \ ! LINE2:priv#FF0000:"Privileged:" \ ! GPRINT:priv:AVERAGE:"Avg:%3.1lf" \ ! GPRINT:priv:MAX:"Max:%3.1lf" \ ! GPRINT:priv:MIN:"Min:%3.1lf \\n" \ ! LINE2:user#00FF00:"User :" \ ! GPRINT:user:AVERAGE:"Avg:%3.1lf" \ ! GPRINT:user:MAX:"Max:%3.1lf" \ ! GPRINT:user:MIN:"Min:%3.1lf \\n" \ ! LINE1:proc#000000:"Total :" \ ! GPRINT:proc:AVERAGE:"Avg:%3.1lf" \ ! GPRINT:proc:MAX:"Max:%3.1lf" \ ! GPRINT:proc:MIN:"Min:%3.1lf" ! ! report.windowsProc.name=Windows Procs and Threads ! report.windowsProc.columns=winProcQueueLength,winProcesses,winThreads ! report.windowsProc.type=nodeSnmp ! report.windowsProc.command=--title="Windows Processes & Threads" \ ! --vertical-label "Count" \ ! --lower-limit 0 \ ! DEF:queue={rrd1}:winProcQueueLength:AVERAGE \ ! DEF:proc={rrd2}:winProcesses:AVERAGE \ ! DEF:thread={rrd3}:winThreads:AVERAGE \ ! CDEF:lqueue=lqueue,10,* \ ! CDEF:sthread=thread,10,/ \ ! LINE2:lqueue#0000FF:"Processor queue length:" \ ! GPRINT:queue:AVERAGE:"Avg:%3.1lf" \ ! GPRINT:queue:MAX:"Max:%3.1lf" \ ! GPRINT:queue:MIN:"Min:%3.1lf" \ ! LINE2:sthread#0000FF:"Threads:" \ ! GPRINT:thread:AVERAGE:"Avg:%3.1lf" \ ! GPRINT:thread:MAX:"Max:%3.1lf" \ ! GPRINT:thread:MIN:"Min:%3.1lf" \ ! LINE2:proc#0000FF:"Processes:" \ ! GPRINT:proc:AVERAGE:"Avg:%3.1lf" \ ! GPRINT:proc:MAX:"Max:%3.1lf" \ ! GPRINT:proc:MIN:"Min:%3.1lf" ! ! report.livecommsusers.name=Live Communications Server Users ! report.livecommsusers.columns=lcsusers ! report.livecommsusers.type=nodeSnmp ! report.livecommsusers.command=--title="Live Communications Server users" \ ! --vertical-label "Users" \ ! --lower-limit 0 \ ! DEF:users={rrd1}:lcsusers:AVERAGE \ ! LINE2:users#0000FF:"Connected:" \ ! GPRINT:users:AVERAGE:"Avg:%3.0lf" \ ! GPRINT:users:MAX:"Max:%3.0lf" \ ! GPRINT:users:MIN:"Min:%3.0lf" ! ! report.livecommsmessages.name=Live Communications Server Messages ! report.livecommsmessages.columns=lcsmessages ! report.livecommsmessages.type=nodeSnmp ! report.livecommsmessages.command=--title="Live Communications Server Messages" \ ! --vertical-label "Messages per second" \ ! --lower-limit 0 \ ! DEF:messages={rrd1}:lcsmessages:AVERAGE \ ! LINE2:messages#0000FF:"Messages:" \ ! GPRINT:messages:AVERAGE:"Avg:%3.4lf" \ ! GPRINT:messages:MAX:"Max:%3.4lf" \ ! GPRINT:messages:MIN:"Min:%3.4lf" ! ! report.mailmarshal.name=Mail Marshal Stats ! report.mailmarshal.columns=mmruleimposed,mmvirusdetect,relayattempts ! report.mailmarshal.type=nodeSnmp ! report.mailmarshal.command=--title="MailMarshal Stats" \ ! --vertical-label "Count per second" \ ! --lower-limit 0 \ ! DEF:blocked={rrd1}:mmruleimposed:AVERAGE \ ! DEF:virus={rrd2}:mmvirusdetect:AVERAGE \ ! DEF:relay={rrd3}:relayattempts:AVERAGE \ ! LINE2:blocked#0000FF:"Messages blocked:" \ ! GPRINT:blocked:AVERAGE:"Avg:%3.2lf" \ ! GPRINT:blocked:MAX:"Max:%3.2lf" \ ! GPRINT:blocked:MIN:"Min:%3.2lf" \ ! LINE2:virus#00FF00:"Viruses blocked:" \ ! GPRINT:virus:AVERAGE:"Avg:%3.2lf" \ ! GPRINT:virus:MAX:"Max:%3.2lf" \ ! GPRINT:virus:MIN:"Min:%3.2lf" \ ! LINE2:relay#FF0000:"Relay Attempts:" \ ! GPRINT:relay:AVERAGE:"Avg:%3.2lf" \ ! GPRINT:relay:MAX:"Max:%3.2lf" \ ! GPRINT:relay:MIN:"Min:%3.2lf" ! ! report.asterisk.activechan.name=Asterisk Active Channels ! report.asterisk.activechan.columns=astNumChannels ! report.asterisk.activechan.type=nodeSnmp ! report.asterisk.activechan.command=--title="Asterisk Active Channels" \ ! --lower-limit 0 \ ! DEF:active={rrd1}:astNumChannels:AVERAGE \ ! AREA:active#0000ff:"Active:" \ ! GPRINT:active:AVERAGE:"Avg:%3.2lf" \ ! GPRINT:active:MAX:"Max:%3.2lf" \ ! GPRINT:active:MIN:"Min:%3.2lf" ! ! report.asterisk.actbrchan.name=Asterisk Active & Bridged Channels ! report.asterisk.actbrchan.columns=astNumChannels,astNumChanBridge ! report.asterisk.actbrchan.type=nodeSnmp ! report.asterisk.actbrchan.command=--title="Asterisk Active & Bridged Channels" \ ! --lower-limit 0 \ ! DEF:active={rrd1}:astNumChannels:AVERAGE \ ! DEF:bridged={rrd2}:astNumChanBridge:AVERAGE \ ! CDEF:bridgedinv=bridged,-1,* \ ! AREA:active#00ff00:"Active:" \ ! GPRINT:active:AVERAGE:"Avg:%3.2lf" \ ! GPRINT:active:MAX:"Max:%3.2lf" \ ! GPRINT:active:MIN:"Min:%3.2lf\\n" \ ! AREA:bridgedinv#0000ff:"Bridged:" \ ! GPRINT:bridged:AVERAGE:"Avg:%3.2lf" \ ! GPRINT:bridged:MAX:"Max:%3.2lf" \ ! GPRINT:bridged:MIN:"Min:%3.2lf" ! ! report.asterisk.callsactive.name=Asterisk Calls Active ! report.asterisk.callsactive.columns=astCfgCallsActive ! report.asterisk.callsactive.type=nodeSnmp ! report.asterisk.callsactive.command=--title="Asterisk Calls Active" \ ! --lower-limit 0 \ ! DEF:active={rrd1}:astCfgCallsActive:AVERAGE \ ! AREA:active#0000ff:"Active:" \ ! GPRINT:active:AVERAGE:"Avg:%3.2lf" \ ! GPRINT:active:MAX:"Max:%3.2lf" \ ! GPRINT:active:MIN:"Min:%3.2lf" ! ! report.asterisk.callsprocessed.name=Asterisk Calls Processed ! report.asterisk.callsprocessed.columns=astCfgCallsPrcessed ! report.asterisk.callsprocessed.type=nodeSnmp ! report.asterisk.callsprocessed.command=--title="Asterisk Calls Processed" \ ! --lower-limit 0 \ ! DEF:active={rrd1}:astCfgCallsPrcessed:AVERAGE \ ! AREA:active#0000ff:"Processed:" \ ! GPRINT:active:AVERAGE:"Avg:%3.2lf" \ ! GPRINT:active:MAX:"Max:%3.2lf" \ ! GPRINT:active:MIN:"Min:%3.2lf" ! ! report.asterisk.chanbytype.name=Asterisk Channels by Type ! report.asterisk.chanbytype.columns=astChanTypeChannels ! report.asterisk.chanbytype.type=astChanType ! report.asterisk.chanbytype.propertiesValues=astChanTypeName ! report.asterisk.chanbytype.command=--title="Asterisk {astChanTypeName} Channels" \ ! --lower-limit 0 \ ! DEF:chan={rrd1}:astChanTypeChannels:AVERAGE \ ! AREA:chan#0000ff:"Channels" \ ! GPRINT:chan:AVERAGE:"Avg \\: %8.2lf" \ ! GPRINT:chan:MAX:"Max \\: %8.2lf" \ ! GPRINT:chan:MIN:"Min \\: %8.2lf" ! ! report.cisco.rttmon.jitter.rtt.name=RTT (Cisco IP SLA Jitter Monitor) ! report.cisco.rttmon.jitter.rtt.columns=jitterRTTSum,jitterNumRTT ! report.cisco.rttmon.jitter.rtt.type=rttMonCtrlAdminIndex ! report.cisco.rttmon.jitter.rtt.command=--title="Round-Trip Time (Cisco IP SLA Jitter Monitor)" \ ! --vertical-label ms \ ! DEF:sumRTT={rrd1}:jitterRTTSum:AVERAGE \ ! DEF:numRTT={rrd2}:jitterNumRTT:AVERAGE \ ! CDEF:avgRTT=sumRTT,numRTT,/ \ ! LINE2:avgRTT#0000ff:"Round-Trip Time (ms) " \ ! GPRINT:avgRTT:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:avgRTT:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:avgRTT:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.rttmon.jitter.icpif.name=ICPIF (Cisco IP SLA Jitter Monitor) ! report.cisco.rttmon.jitter.icpif.columns=jitterICPIF ! report.cisco.rttmon.jitter.icpif.type=rttMonCtrlAdminIndex ! report.cisco.rttmon.jitter.icpif.command=--title="ICPIF (Cisco IP SLA Jitter Monitor)" \ ! --vertical-label ICPIF \ ! DEF:ICPIF={rrd1}:jitterICPIF:AVERAGE \ ! LINE2:ICPIF#0000ff:"Actual " \ ! GPRINT:ICPIF:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:ICPIF:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:ICPIF:MAX:"Max \\: %8.2lf %s\\n" \ ! COMMENT:"[Guideline: Good 10; Adequate 20; Limiting 30]\\n" ! ! report.cisco.rttmon.jitter.mos.name=MOS (Cisco IP SLA Jitter Monitor) ! report.cisco.rttmon.jitter.mos.columns=jitterMOS ! report.cisco.rttmon.jitter.mos.type=rttMonCtrlAdminIndex ! report.cisco.rttmon.jitter.mos.command=--title="Estimated MOS (Cisco IP SLA Jitter Monitor)" \ ! --vertical-label "Estimated MOS" \ ! DEF:mosRaw={rrd1}:jitterMOS:AVERAGE \ ! CDEF:MOS=mosRaw,100,/ \ ! AREA:MOS#0000ff:"MOS " \ ! GPRINT:MOS:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:MOS:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:MOS:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.rttmon.jitter.owavg.name=Avg One-Way Time (Cisco IP SLA Jitter Monitor) ! report.cisco.rttmon.jitter.owavg.columns=jitterOWSumDS,jitterOWSumSD,jitterNumOW ! report.cisco.rttmon.jitter.owavg.type=rttMonCtrlAdminIndex ! report.cisco.rttmon.jitter.owavg.command=--title="Avg One-Way Time (Cisco IP SLA Jitter Monitor)" \ ! --vertical-label ms \ ! DEF:sumDS={rrd1}:jitterOWSumDS:AVERAGE \ ! DEF:sumSD={rrd2}:jitterOWSumSD:AVERAGE \ ! DEF:numOW={rrd3}:jitterNumOW:AVERAGE \ ! CDEF:timeDS=sumDS,numOW,/ \ ! CDEF:timeSD=sumSD,numOW,/,-1,* \ ! AREA:timeDS#00ff00:"In " \ ! GPRINT:timeDS:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:timeDS:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:timeDS:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:timeSD#0000ff:"Out " \ ! GPRINT:timeSD:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:timeSD:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:timeSD:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.rttmon.jitter.owmax.name=Max One-Way Time (Cisco IP SLA Jitter Monitor) ! report.cisco.rttmon.jitter.owmax.columns=jitterOWMaxDS,jitterOWMaxSD,jitterNumOW ! report.cisco.rttmon.jitter.owmax.type=rttMonCtrlAdminIndex ! report.cisco.rttmon.jitter.owmax.command=--title="Max One-Way Time (Cisco IP SLA Jitter Monitor)" \ ! --vertical-label ms \ ! DEF:maxDS={rrd1}:jitterOWMaxDS:AVERAGE \ ! DEF:maxSDRaw={rrd2}:jitterOWMaxSD:AVERAGE \ ! DEF:numOW={rrd3}:jitterNumOW:AVERAGE \ ! CDEF:maxSD=maxSDRaw,-1,* \ ! AREA:maxDS#00ff00:"In " \ ! GPRINT:maxDS:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:maxDS:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:maxDS:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:maxSD#0000ff:"Out " \ ! GPRINT:maxSD:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:maxSD:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:maxSD:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.rttmon.jitter.owmin.name=Min One-Way Time (Cisco IP SLA Jitter Monitor) ! report.cisco.rttmon.jitter.owmin.columns=jitterOWMinDS,jitterOWMinSD,jitterNumOW ! report.cisco.rttmon.jitter.owmin.type=rttMonCtrlAdminIndex ! report.cisco.rttmon.jitter.owmin.command=--title="Min One-Way Time (Cisco IP SLA Jitter Monitor)" \ ! --vertical-label ms \ ! DEF:minDS={rrd1}:jitterOWMinDS:AVERAGE \ ! DEF:minSDRaw={rrd2}:jitterOWMinSD:AVERAGE \ ! DEF:numOW={rrd3}:jitterNumOW:AVERAGE \ ! CDEF:minSD=minSDRaw,-1,* \ ! AREA:minDS#00ff00:"In " \ ! GPRINT:minDS:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:minDS:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:minDS:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:minSD#0000ff:"Out " \ ! GPRINT:minSD:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:minSD:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:minSD:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.rttmon.jitter.owstddev.name=One-Way Time StdDev (Cisco IP SLA Jitter Monitor) ! report.cisco.rttmon.jitter.owstddev.columns=jitterOWSum2DS,jitterOWSumDS,jitterOWSum2SD,jitterOWSumSD,jitterNumOW ! report.cisco.rttmon.jitter.owstddev.type=rttMonCtrlAdminIndex ! report.cisco.rttmon.jitter.owstddev.command=--title="One-Way Time StdDev (Cisco IP SLA Jitter Monitor)" \ ! --vertical-label="StdDev (ms)" \ ! --units-exponent 0 \ ! DEF:sum2DS={rrd1}:jitterOWSum2DS:AVERAGE \ ! DEF:sumDS={rrd2}:jitterOWSumDS:AVERAGE \ ! DEF:sum2SD={rrd3}:jitterOWSum2SD:AVERAGE \ ! DEF:sumSD={rrd4}:jitterOWSumSD:AVERAGE \ ! DEF:numOW={rrd5}:jitterNumOW:AVERAGE \ ! CDEF:stddevIn=sum2DS,numOW,/,sumDS,numOW,/,sumDS,numOW,/,*,-,SQRT \ ! CDEF:stddevOut=sum2SD,numOW,/,sumSD,numOW,/,sumSD,numOW,/,*,-,SQRT,-1,* \ ! AREA:stddevIn#00ff00:"In " \ ! GPRINT:stddevIn:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:stddevIn:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:stddevIn:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:stddevOut#0000ff:"Out " \ ! GPRINT:stddevOut:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:stddevOut:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:stddevOut:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.rttmon.jitter.avgj.name=Average Jitter (Cisco IP SLA Jitter Monitor) ! report.cisco.rttmon.jitter.avgj.columns=jitterSumPosDS,jitterSumNegDS,jitterSumPosSD,jitterSumNegSD,jitterNumRTT ! report.cisco.rttmon.jitter.avgj.type=rttMonCtrlAdminIndex ! report.cisco.rttmon.jitter.avgj.command=--title="Average Jitter (Cisco IP SLA Jitter Monitor)" \ ! --vertical-label ms \ ! --units-exponent 0 \ ! DEF:sumPosDS={rrd1}:jitterSumPosDS:AVERAGE \ ! DEF:sumNegDS={rrd2}:jitterSumNegDS:AVERAGE \ ! DEF:sumPosSD={rrd3}:jitterSumPosSD:AVERAGE \ ! DEF:sumNegSD={rrd4}:jitterSumNegSD:AVERAGE \ ! DEF:numRTT={rrd5}:jitterNumRTT:AVERAGE \ ! CDEF:avgIn=sumPosDS,sumNegDS,+,numRTT,/ \ ! CDEF:avgOut=sumPosSD,sumNegSD,+,numRTT,/ \ ! LINE2:avgIn#00ff00:"In " \ ! GPRINT:avgIn:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:avgIn:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:avgIn:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:avgOut#0000ff:"Out " \ ! GPRINT:avgOut:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:avgOut:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:avgOut:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.rttmon.jitter.pktloss.name=Packet Loss (Cisco IP SLA Jitter Monitor) ! report.cisco.rttmon.jitter.pktloss.columns=jitterPktLossSD,jitterPktLossDS,echoNumPackets ! report.cisco.rttmon.jitter.pktloss.type=rttMonCtrlAdminIndex ! report.cisco.rttmon.jitter.pktloss.command=--title="Packet Loss (Cisco IP SLA Jitter Monitor)" \ ! --vertical-label percent \ ! --lower-limit -100 --upper-limit 100 --rigid \ ! --units-exponent 0 \ ! DEF:pktLossSD={rrd1}:jitterPktLossSD:AVERAGE \ ! DEF:pktLossDS={rrd2}:jitterPktLossDS:AVERAGE \ ! DEF:numPackets={rrd3}:echoNumPackets:AVERAGE \ ! CDEF:pctLossIn=pktLossDS,numPackets,/,100,* \ ! CDEF:pctLossOut=pktLossSD,numPackets,/,100,*,-1,* \ ! AREA:pctLossIn#00ff00:"In " \ ! GPRINT:pctLossIn:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:pctLossIn:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:pctLossIn:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:pctLossOut#0000ff:"Out " \ ! GPRINT:pctLossOut:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:pctLossOut:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:pctLossOut:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.rttmon.jitter.maxjds.name=Max Jitter Dest-Src (Cisco IP SLA Jitter Monitor) ! report.cisco.rttmon.jitter.maxjds.columns=jitterMaxNegDS,jitterMaxPosDS,jitterNumRTT ! report.cisco.rttmon.jitter.maxjds.type=rttMonCtrlAdminIndex ! report.cisco.rttmon.jitter.maxjds.command=--title="Max Jitter Dest-Src (Cisco IP SLA Jitter Monitor)" \ ! --vertical-label ms \ ! --units-exponent 0 \ ! DEF:maxNegDSRaw={rrd1}:jitterMaxNegDS:AVERAGE \ ! DEF:maxPosDS={rrd2}:jitterMaxPosDS:AVERAGE \ ! DEF:numRTT={rrd3}:jitterNumRTT:AVERAGE \ ! CDEF:maxNegDS=maxNegDSRaw,-1,* \ ! AREA:maxPosDS#00ff00:"Positive " \ ! GPRINT:maxPosDS:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:maxPosDS:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:maxPosDS:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:maxNegDS#0000ff:"Negative " \ ! GPRINT:maxNegDS:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:maxNegDS:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:maxNegDS:MAX:"Max \\: %8.2lf %s\\n" ! ! report.cisco.rttmon.jitter.maxjsd.name=Max Jitter Src-Dest (Cisco IP SLA Jitter Monitor) ! report.cisco.rttmon.jitter.maxjsd.columns=jitterMaxNegSD,jitterMaxPosSD,jitterNumRTT ! report.cisco.rttmon.jitter.maxjsd.type=rttMonCtrlAdminIndex ! report.cisco.rttmon.jitter.maxjsd.command=--title="Max Jitter Src-Dest (Cisco IP SLA Jitter Monitor)" \ ! --vertical-label ms \ ! --units-exponent 0 \ ! DEF:maxNegSDRaw={rrd1}:jitterMaxNegSD:AVERAGE \ ! DEF:maxPosSD={rrd2}:jitterMaxPosSD:AVERAGE \ ! DEF:numRTT={rrd3}:jitterNumRTT:AVERAGE \ ! CDEF:maxNegSD=maxNegSDRaw,-1,* \ ! AREA:maxPosSD#00ff00:"Positive " \ ! GPRINT:maxPosSD:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:maxPosSD:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:maxPosSD:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:maxNegSD#0000ff:"Negative " \ ! GPRINT:maxNegSD:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:maxNegSD:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:maxNegSD:MAX:"Max \\: %8.2lf %s\\n" ! ! ###### ! ###### Reports Generated from Mikrotik agents ! ###### ! ! report.mikrotik.temp.name=Temperature (Mikrotik) ! report.mikrotik.temp.columns=mtxrHlSensorTemp,mtxrHlCpuTemp,mtxrHlBoardTemp ! report.mikrotik.temp.type=nodeSnmp ! report.mikrotik.temp.command=--title="Mikrotik Temperature" \ ! DEF:val1={rrd1}:mtxrHlSensorTemp:AVERAGE \ ! DEF:val2={rrd2}:mtxrHlCpuTemp:AVERAGE \ ! DEF:val3={rrd3}:mtxrHlBoardTemp:AVERAGE \ ! LINE2:val1#0000ff:"Sensor Temperature" \ ! GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:val1:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:val1:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:val2#ff0000:"CPU Temperature" \ ! GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:val2:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:val2:MAX:"Max \\: %8.2lf %s\\n" ! LINE3:val3#ff0000:"Board Temperature" \ ! GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:val3:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:val3:MAX:"Max \\: %8.2lf %s\\n" ! ! report.mikrotik.volt.name=Voltage (Mikrotik) ! report.mikrotik.volt.columns=mtxrHlCoreVolt,mtxrHlThreePtThreeV,mtxrHlFiveVolt,mtxrHlTwelveVolt ! report.mikrotik.volt.type=nodeSnmp ! report.mikrotik.volt.command=--title="Mikrotik Voltage" \ ! DEF:val1={rrd1}:mtxrHlCoreVolt:AVERAGE \ ! DEF:val2={rrd2}:mtxrHlThreePtThreeV:AVERAGE \ ! DEF:val3={rrd3}:mtxrHlFiveVolt:AVERAGE \ ! DEF:val4={rrd4}:mtxrHlTwelveVolt:AVERAGE \ ! LINE2:val1#0000ff:"Sensor Temperature" \ ! GPRINT:val1:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:val1:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:val1:MAX:"Max \\: %8.2lf %s\\n" \ ! LINE2:val2#ff0000:"CPU Temperature" \ ! GPRINT:val2:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:val2:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:val2:MAX:"Max \\: %8.2lf %s\\n" ! LINE3:val3#ff0000:"Board Temperature" \ ! GPRINT:val3:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:val3:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:val3:MAX:"Max \\: %8.2lf %s\\n" ! LINE3:val4#ff0000:"Board Temperature" \ ! GPRINT:val4:AVERAGE:" Avg \\: %8.2lf %s" \ ! GPRINT:val4:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:val4:MAX:"Max \\: %8.2lf %s\\n" ! ! report.mikrotik.wlstatbps.name=Base Station Link Rate ! report.mikrotik.wlstatbps.columns=mtxrWlStatRxRate,mtxrWlStatTxRate ! report.mikrotik.wlstatbps.type=mtxrWlStatIndex ! report.mikrotik.wlstatbps.command=--title="Wireless Link Rate In/Out" \ ! --vertical-label="Megabits per second" \ ! DEF:bitsIn={rrd1}:mtxrWlStatRxRate:AVERAGE \ ! DEF:bitsOut={rrd2}:mtxrWlStatTxRate:AVERAGE \ ! CDEF:MbitsIn=bitsIn,1000000,/ \ ! CDEF:MbitsOut=bitsOut,1000000,/ \ ! CDEF:MbitsOutNeg=0,MbitsOut,- \ ! AREA:MbitsIn#00ff00:"In " \ ! GPRINT:MbitsIn:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:MbitsIn:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:MbitsIn:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:MbitsOutNeg#0000ff:"Out" \ ! GPRINT:MbitsOut:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:MbitsOut:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:MbitsOut:MAX:"Max \\: %8.2lf %s\\n" ! ! report.mikrotik.wlstatrssi.name=Mikrotik Wls Station Signal Level ! report.mikrotik.wlstatrssi.columns=mtxrWlStatStrength ! report.mikrotik.wlstatrssi.type=mtxrWlStatIndex ! report.mikrotik.wlstatrssi.command=--title="Wireless Station RSSI" \ ! --vertical-label="dBm" \ ! DEF:rssi={rrd1}:mtxrWlStatStrength:AVERAGE \ ! AREA:rssi#00ff00:"SigLevel " \ ! GPRINT:rssi:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:rssi:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:rssi:MAX:"Max \\: %8.2lf %s\\n" \ ! ! report.mikrotik.wlrtabrssi.name=Mikrotik Remote Station Signal Level ! report.mikrotik.wlrtabrssi.columns=mtxrWlRtabStrength ! report.mikrotik.wlrtabrssi.type=mtxrWlRtabAddr ! report.mikrotik.wlrtabrssi.command=--title="Wireless Station RSSI" \ ! --vertical-label="dBm" \ ! DEF:rssi={rrd1}:mtxrWlRtabStrength:AVERAGE \ ! AREA:rssi#00ff00:"SigLevel " \ ! GPRINT:rssi:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:rssi:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:rssi:MAX:"Max \\: %8.2lf %s\\n" \ ! ! report.mikrotik.wlrtabbit.name=Mikrotik Remote Wls Link Rate ! report.mikrotik.wlrtabbit.columns=mtxrWlRtabRxRate,mtxrWlRtabTxRate ! report.mikrotik.wlrtabbit.type=mtxrWlRtabAddr ! report.mikrotik.wlrtabbit.command=--title="Wireless Link Rate In/Out" \ ! --vertical-label="Megabits/Sec" \ ! DEF:bitsIn={rrd1}:mtxrWlRtabRxRate:AVERAGE \ ! DEF:bitsOut={rrd2}:mtxrWlRtabTxRate:AVERAGE \ ! CDEF:MbitsIn=bitsIn,1000000,/ \ ! CDEF:MbitsOut=bitsOut,1000000,/ \ ! CDEF:MbitsOutNeg=0,MbitsOut,- \ ! AREA:MbitsIn#00ff00:"In " \ ! GPRINT:MbitsIn:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:MbitsIn:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:MbitsIn:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:MbitsOutNeg#0000ff:"Out" \ ! GPRINT:MbitsOut:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:MbitsOut:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:MbitsOut:MAX:"Max \\: %8.2lf %s\\n" ! ! report.mikrotik.wlrtabbytes.name=Mikrotik Remote Wls Station Bytes ! report.mikrotik.wlrtabbytes.columns=mtxrWlRtabRxBytes,mtxrWlRtabTxBytes ! report.mikrotik.wlrtabbytes.type=mtxrWlRtabAddr ! report.mikrotik.wlrtabbytes.command=--title="Wireless Station Bytes In/Out (Mktk)" \ ! --vertical-label="Bytes/Sec" \ ! DEF:bytesIn={rrd1}:mtxrWlRtabRxBytes:AVERAGE \ ! DEF:bytesOut={rrd2}:mtxrWlRtabTxBytes:AVERAGE \ ! CDEF:bytesOutNeg=0,bytesOut,- \ ! AREA:bytesIn#00ff00:"In " \ ! GPRINT:bytesIn:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:bytesIn:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:bytesIn:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:bytesOutNeg#0000ff:"Out" \ ! GPRINT:bytesOut:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:bytesOut:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:bytesOut:MAX:"Max \\: %8.2lf %s\\n" ! ! report.mikrotik.wlrtabpkts.name=Mikrotik Remote Wls Station Packets ! report.mikrotik.wlrtabpkts.columns=mtxrWlRtabRxPackets,mtxrWlRtabTxPackets ! report.mikrotik.wlrtabpkts.type=mtxrWlRtabAddr ! report.mikrotik.wlrtabpkts.command=--title="Wireless Station Packets In/Out (Mktk)" \ ! --vertical-label="Packets/Sec" \ ! DEF:pktsIn={rrd1}:mtxrWlRtabRxPackets:AVERAGE \ ! DEF:pktsOut={rrd2}:mtxrWlRtabTxPackets:AVERAGE \ ! CDEF:pktsOutNeg=0,pktsOut,- \ ! AREA:pktsIn#00ff00:"In " \ ! GPRINT:pktsIn:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:pktsIn:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:pktsIn:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:pktsOutNeg#0000ff:"Out" \ ! GPRINT:pktsOut:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:pktsOut:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:pktsOut:MAX:"Max \\: %8.2lf %s\\n" ! ! report.apache.workers.name=Apache HTTP Workers ! report.apache.workers.columns=BusyWorkers,IdleWorkers ! report.apache.workers.type=nodeSnmp ! report.apache.workers.command=--title="Apache HTTP Workers" \ ! --vertical-label="workers" \ ! DEF:BusyWorkers={rrd1}:BusyWorkers:AVERAGE \ ! DEF:IdleWorkers={rrd2}:IdleWorkers:AVERAGE \ ! AREA:BusyWorkers#ff0000:"busy workers" \ ! GPRINT:BusyWorkers:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:BusyWorkers:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:BusyWorkers:MAX:"Max \\: %8.2lf %s\\n" \ ! AREA:IdleWorkers#0000ff:"idle workers" \ ! GPRINT:IdleWorkers:AVERAGE:"Avg \\: %8.2lf %s" \ ! GPRINT:IdleWorkers:MIN:"Min \\: %8.2lf %s" \ ! GPRINT:IdleWorkers:MAX:"Max \\: %8.2lf %s\\n" ! ! ## EOF