Commit 82799e49 by root

Initial commit

parents
#!/bin/bash
#dell & hp
#0表示故障,1表示警告,2表示OK
PATH=$PATH:/usr/sbin:/sbin
step=$(echo $0|grep -Po '\d+(?=_)')
endpoint=$(ifconfig `route|grep '^default'|awk '{print $NF}'`|grep inet|awk '{print $2}'|awk -F ':' '{print $NF}'|head -n 1)
base_dir=$(cd $(dirname $0);pwd)
cd $base_dir
vendor=$(dmidecode|grep Vendor|awk -F'[: ]' '{print tolower($3)}')
tags="vendor=$vendor"
Json_join(){
metric=$1
value=$2
tags=${3:-""}
countertype=${4:-GAUGE}
jstr=$jstr"{\"endpoint\": \"${endpoint}\", \"metric\": \"${metric}\", \"value\": $value,\"step\": ${step}, \"tags\": \"${tags}\",\"counterType\":\"${countertype}\",\"timestamp\": $(date +%s)},"
}
if [ "X$vendor" == "Xdell" ];then
test -f /usr/bin/hwinfo && value=2 || value=0
Json_join hw.status $value "$tags"
if [ $value -eq 2 ];then
PATH=/sbin:/bin:/usr/sbin:/usr/bin:/opt/dell/srvadmin/sbin:/opt/dell/srvadmin/bin
SHELL=/bin/bash
./hwcheck_dell.py -p -s $step
exit 0
fi
elif [ "X$vendor" == "Xhp" ];then
test -f /usr/sbin/hpacucli && value=2 || value=0
Json_join hw.status $value "$tags"
if [ $value -eq 2 ];then
hpacucli_log='/tmp/hpacucli_log'
temp_log='/tmp/temp_log'
hpacucli ctrl all show config > $hpacucli_log
vdisk=$(grep logicaldrive ${hpacucli_log}|grep -v OK|wc -l|awk '{print $1==0?2:0}')
pdisk=$(grep physicaldrive ${hpacucli_log}|grep -v OK|wc -l|awk '{print $1==0?2:0}')
memory=$(hpasmcli -s 'SHOW DIMM'|grep 'Status' |grep -v Ok|wc -l|awk '{print $1==0?2:0}')
fan=$(hpasmcli -s 'SHOW FANS'|grep '#'|grep -v Yes|wc -l|awk '{print $1==0?2:0}')
power=$(hpasmcli -s 'SHOW POWERSUPPLY'|grep Present|grep -v Yes|wc -l|awk '{print $1==0?2:0}')
cpu=$(hpasmcli -s 'SHOW SERVER' |grep Status|grep -v Ok|wc -l|awk '{print $1==0?2:0}')
raidcard=$(hpssacli ctrl all show status|grep Status|grep -v OK|wc -l|awk '{print $1==0?2:0}')
hpasmcli -s 'SHOW TEMP' >$temp_log
#power_temp=$(awk '/POWER_SUPPLY_BAY/{print $3}' $temp_log|awk -F"C" '{print $1}'|awk 'BEGIN {max = 0} {if ($1>max) max=$1 fi} END {print max}')
#system_temp=$(awk '/SYSTEM_BD/{print $3}' $temp_log|awk -F"C" '{print $1}' |awk 'BEGIN {max = 0} {if ($1>max) max=$1 fi} END {print max}')
ambient_temp=$(awk '/AMBIENT/{print $3}' $temp_log|awk -F"C" '{print $1}')
cpu_temp=$(awk '/PROCESSOR_ZONE/{print $3}' $temp_log|awk -F"C" '{print $1}' |awk 'BEGIN {max = 0} {if ($1>max) max=$1 fi} END {print max}')
memory_temp=$(awk '/MEMORY_BD/{print $3}' $temp_log|awk -F"C" '{print $1}' |awk 'BEGIN {max = 0} {if ($1>max) max=$1 fi} END {print max}')
Json_join hw.raidcard $raidcard $tags
Json_join hw.vdisk $vdisk $tags
Json_join hw.pdisk $pdisk $tags
Json_join hw.memory $memory $tags
Json_join hw.fan $fan $tags
Json_join hw.power $power $tags
Json_join hw.cpu $cpu $tags
Json_join hw.ambient_temp $ambient_temp $tags
Json_join hw.cpu_temp $cpu_temp $tags
Json_join hw.memory_temp $memory_temp $tags
#Json_join hw.power_temp $power_temp $tags
#Json_join hw.system_temp $system_temp $tags
fi
fi
jstr=$(echo $jstr|sed 's/^/[/;s/,$/]/;s/\[$/[]/')
echo $jstr
#!/bin/bash
service=mysqld
step=$(echo $0|grep -Po '\d+(?=_)')
dirname=$(cd $(dirname $0);pwd|awk -F\/ '$0=$NF')
base_dir=$(cd $(dirname $0);pwd)
cd $base_dir
mysqld_max_con=13684
user="monitor"
pass="3IPSkSxDpiPUtlF"
host="127.0.0.1"
endpoint=$(ifconfig `route|grep '^default'|awk '{print $NF}'`|grep inet|awk '{print $2}'|awk -F ':' '{print $NF}'|head -n 1)
Json_join(){
metric=$1
value=$2
tags=${3:-""}
countertype=${4:-GAUGE}
jstr=$jstr"{\"endpoint\": \"${endpoint}\", \"metric\": \"${metric}\", \"value\": $value,\"step\": ${step}, \"tags\": \"${tags}\",\"counterType\":\"${countertype}\",\"timestamp\": $(date +%s)},"
}
metric_arrays=(metric_global_status metric_slave_status metric_global_variables)
metric_global_status=(Aborted_clients:compute Aborted_connects:compute Bytes_received:compute Bytes_sent:compute Com_lock_tables:compute Com_rollback:compute Com_delete:compute Com_insert:compute Com_insert_select:compute Com_load:compute Com_replace:compute Com_select:compute Com_update:compute Qcache_hits:compute Slow_queries:compute Threads_connected:undefined Threads_running:undefined Uptime:undefined Queries:compute)
metric_slave_status=(slave_status:undefined Seconds_Behind_Master:undefined)
#metric_global_variables=(auto_increment_increment:undefined auto_increment_offset:undefined autocommit:undefined binlog_format:undefined general_log:undefined gtid_mode:undefined query_cache_size:undefined query_cache_type:undefined read_only:undefined report_host:undefined report_port:undefined server_id:undefined server_uuid:undefined skip_name_resolve:undefined slave_skip_errors:undefined slow_query_log:undefined sql_mode:undefined time_zone:undefined tx_isolation:undefined version:undefined)
Get_current_value(){
flag=$1
case $flag in
global_status)
sql="show global status"
eval $(mysql -u$user -p$pass -h$host -P$port -e "$sql" 2>/dev/null|awk '{printf("mysqld_%s=\"%s\"\n",$1,$2)}')
;;
slave_status)
sql="show slave status\G"
eval $(mysql -u$user -p$pass -h$host -P$port -e "$sql" 2>/dev/null |grep -v row |grep -v '_Gtid_Set'| grep -v ':\w' | awk -F'[: ]+' 'NR>1&&$0="mysqld_"$2"="$3')
#mysqld_slave_status
if [ ! -z $mysqld_Master_Host ];then
[ $mysqld_Slave_IO_Running == 'Yes' -a $mysqld_Slave_SQL_Running == 'Yes' ] && mysqld_slave_status=1 || mysqld_slave_status=0
fi
;;
# global_variables)
# sql="show global variables"
# eval $(mysql -u$user -p$pass -h$host -P$port -e "$sql" 2>/dev/null|awk '{printf("mysqld_%s=\"%s\"\n",$1,$2)}')
# ;;
esac
}
Push_n9e(){
for metric_array in ${metric_arrays[@]};do
{
for pre_metric in $(eval echo \${$metric_array[@]});do
{
[[ "$pre_metric" =~ ':compute' ]] \
&& countertype="COUNTER" \
|| countertype="GAUGE"
key="${service}_${pre_metric%%:*}"
value=$(eval echo \$$key)
metric="mysql.${pre_metric%%:*}"
[ "X"$value == "X" -o "X"$value == "XNULL" ] && continue
Json_join $metric $value "port=$port" $countertype
}
done
}
done
}
Test_connection_status(){
tags="port=$port"
ret=$(/usr/bin/mysql -u$user -p$pass -h$host -P$port -e 'quit' 2>&1)
#alive
echo "$ret"|grep -qi "Can't connect" && value=1 || value=0
metric="mysql.alive"
Json_join $metric $value "port=$port"
[ $value -eq 1 ] && return $value
#monitor auth
echo "$ret"|grep -qi 'Access denied' && value=1 || value=0
metric="mysql.monitor_auth"
Json_join $metric $value "port=$port"
[ $value -eq 1 ] && return $value
# connection status
echo "$ret"|grep -qi 'Too many connections' && value=1 || value=0
metric="mysql.connection_status"
Json_join $metric $value "port=$port"
return $value
}
Main(){
for port in $(grep $service ../service_port 2>/dev/null|awk '$0=$2');do
{
Test_connection_status || continue
Get_current_value global_status
Get_current_value slave_status
#Get_current_value global_variables
Push_n9e
}
done
jstr=$(echo $jstr|sed 's/^/[/;s/,$/]/;s/\[$/[]/')
echo $jstr
}
Main
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
oracle_auth.conf 配置文件内容格式
items:
- {host: 127.0.0.1, port: 1521, user: monitor, passwd: 3IPSkSxDpiPUtlF, dbname: orcl11g}
"""
import os
import sys
import urllib2
import base64
import json
import re
import time
import yaml
import commands
step = int(os.path.basename(__file__).split('_')[0])
ts = int(time.time())
metric_list = ["check_active","rcachehit","dsksortratio","activeusercount","dbsize","dbfilesize","uptime","commits","rollbacks","deadlocks","redowrites","tblscans","tblrowsscans","indexffs","hparsratio","netsent","netresv","netroundtrips","logonscurrent","lastarclog","lastapplarclog","freebufwaits","bufbusywaits","logswcompletion","logfilesync","logprllwrite","enqueue","dbseqread","dbscattread","dbsnglwrite","dbprllwrite","directread","directwrite","latchfree","query_lock","query_redologs","query_rollbacks","query_sessions","query_sysmetrics","fra_use"]
counter_metric_list = ["commits","dbseqread","indexffs","logfilesync","logprllwrite","netresv","netroundtrips","netsent","redowrites","rollbacks","tblrowsscans","tblscans"]
conf_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'oracle_auth.conf')
if not os.path.exists(conf_file): sys.exit(0)
code, endpoint = commands.getstatusoutput("ifconfig `route|grep '^default'|awk '{print $NF}'`|grep inet|awk '{print $2}'|awk -F ':' '{print $NF}'|head -n 1")
if code != 0: sys.exit(0)
f = open(conf_file)
y = yaml.load(f)
f.close()
items = y["items"]
from oracle_pyora import OracleTool
data = []
for item in items:
odb = OracleTool(item)
tablespaces = odb.show_tablespaces()
tablespaces_temp = odb.show_tablespaces_temp()
volumes = odb.show_asm_volumes()
for metric in metric_list:
t = {}
t['metric'] = 'oracle.%s' % metric
t['endpoint'] = endpoint
t['timestamp'] = ts
t['step'] = step
t['counterType'] = 'GAUGE'
if metric in counter_metric_list: t['counterType'] = 'COUNTER'
try:
t['value'] = getattr(odb, metric)()
except:
t['value'] = -1
data.append(t)
for tablespace in tablespaces:
t = {}
t['metric'] = 'oracle.tablespace'
t['endpoint'] = endpoint
t['timestamp'] = ts
t['step'] = step
t['counterType'] = 'GAUGE'
t['tags'] = 'name=%s' % tablespace
try:
t['value'] = odb.tablespace(tablespace)
except:
t['value'] = -1
data.append(t)
for tablespace_temp in tablespaces_temp:
t = {}
t['metric'] = 'oracle.tablespace_temp'
t['endpoint'] = endpoint
t['timestamp'] = ts
t['step'] = step
t['counterType'] = 'GAUGE'
t['tags'] = 'name=%s' % tablespace_temp
try:
t['value'] = odb.tablespace_temp(tablespace_temp)
except:
t['value'] = -1
data.append(t)
for volume in volumes:
t = {}
t['metric'] = 'oracle.volume'
t['endpoint'] = endpoint
t['timestamp'] = ts
t['step'] = step
t['counterType'] = 'GAUGE'
t['tags'] = 'name=%s' % volume
try:
t['value'] = odb.asm_volume_use(volume)
except:
t['value'] = -1
data.append(t)
print(json.dumps(data))
#!/usr/bin/env python
#-*- coding:utf-8 -*-
# alarm value: =-1 or =1024
import os
import sys
import urllib2
import base64
import json
import time
import commands
step = int(os.path.basename(__file__).split('_')[0])
ts = int(time.time())
keys = ('messages_ready', 'messages_unacknowledged')
rates = ('ack', 'deliver', 'deliver_get', 'publish')
code, num = commands.getstatusoutput("grep 'beam.smp 15672' /home/n9e/service_port 2>/dev/null|grep -v grep|wc -l")
if code != 0 or int(num) == 0: sys.exit(0)
code, endpoint = commands.getstatusoutput("ifconfig `route|grep '^default'|awk '{print $NF}'`|grep inet|awk '{print $2}'|awk -F ':' '{print $NF}'|head -n 1")
if code != 0: sys.exit(0)
timeout = 10
p = []
# see #issue4
base64string = base64.b64encode('monitor:monitor')
try:
request = urllib2.Request("http://{}:15672/api/queues".format(endpoint))
request.add_header("Authorization", "Basic %s" % base64string)
result = urllib2.urlopen(request, timeout=timeout)
data = json.loads(result.read())
except:
q = {}
q["endpoint"] = endpoint
q['timestamp'] = ts
q['step'] = step
q['counterType'] = "GAUGE"
q['metric'] = 'rabbitmq.alive'
q['value'] = 1024
q['tags'] = ''
p.append(q)
print(json.dumps(p))
sys.exit(0)
for queue in data:
# ready and unack
msg_total = 0
for key in keys:
if not queue.has_key(key): continue
q = {}
q["endpoint"] = endpoint
q['timestamp'] = ts
q['step'] = step
q['counterType'] = "GAUGE"
q['metric'] = 'rabbitmq.%s' % key
q['tags'] = 'name={}'.format(queue['name'])
q['value'] = int(queue[key])
msg_total += q['value']
p.append(q)
# total
q = {}
q["endpoint"] = endpoint
q['timestamp'] = ts
q['step'] = step
q['counterType'] = "GAUGE"
q['metric'] = 'rabbitmq.messages_total'
q['tags'] = 'name={}'.format(queue['name'])
q['value'] = msg_total
p.append(q)
# rates
for rate in rates:
q = {}
q["endpoint"] = endpoint
q['timestamp'] = ts
q['step'] = step
q['counterType'] = "GAUGE"
q['metric'] = 'rabbitmq.%s_rate' % rate
q['tags'] = 'name={}'.format(queue['name'])
try:
q['value'] = int(queue['message_stats']["%s_details" % rate]['rate'])
except:
q['value'] = 0
p.append(q)
q = {}
q["endpoint"] = endpoint
q['timestamp'] = ts
q['step'] = step
q['counterType'] = "GAUGE"
q['metric'] = 'rabbitmq.alive'
if p:
q['value'] = 1
else:
q['value'] = -1
q['tags'] = ''
p.append(q)
print(json.dumps(p))
#!/bin/bash
#如果有密码需要在当前目录下创建redis_auth.conf文件,内容格式"端口:密码",多个实例换行,一个实例一行
service="redis"
step=$(echo $0|grep -Po '\d+(?=_)')
dirname=$(cd $(dirname $0);pwd|awk -F\/ '$0=$NF')
base_dir=$(cd $(dirname $0);pwd)
cd $base_dir
ip=127.0.0.1
metrics_counter=(total_connections_received rejected_connections keyspace_hits keyspace_misses total_commands_processed total_net_input_bytes total_net_output_bytes expired_keys evicted_keys used_cpu_sys used_cpu_user)
endpoint=$(ifconfig `route|grep '^default'|awk '{print $NF}'`|grep inet|awk '{print $2}'|awk -F ':' '{print $NF}'|head -n 1)
Json_join(){
metric=$1
value=$2
tags=${3:-""}
countertype=${4:-GAUGE}
jstr=$jstr"{\"endpoint\": \"${endpoint}\", \"metric\": \"${metric}\", \"value\": $value,\"step\": ${step}, \"tags\": \"${tags}\",\"counterType\":\"${countertype}\",\"timestamp\": $(date +%s)},"
}
Get_current_value(){
cluster_enabled=0
eval $($redis_cli_cmd -h $ip -p $port info 2>/dev/null|tr -d "\r"|egrep ':'|awk -F: '{printf("%s=\"%s\"\n",$1,$2)}')
[ $cluster_enabled -eq 1 ] && eval $($redis_cli_cmd -h $ip -p $port cluster info 2>/dev/null|tr -d "\r"|sed 's/-/_/g'|egrep ':'|awk -F: '{printf("%s=\"%s\"\n",$1,$2)}')
eval $($redis_cli_cmd -h $ip -p $port info commandstats 2>/dev/null|tr -d "\r"|egrep ':'|sed 's/-/_/g'|awk -F[:=,] '{printf("%s_%s=%s\n%s_%s=%s\n%s_%s=%s\n",$1,$2,$3,$1,$4,$5,$1,$6,$7)}')
maxmemory=$($redis_cli_cmd -h $ip -p $port config get maxmemory 2>/dev/null|sed -n '2p')
test -z $maxmemory && maxmemory=$($redis_cli_cmd -h $ip -p $port sc_config get maxmemory 2>/dev/null|sed -n '2p')
maxclients=$($redis_cli_cmd -h $ip -p $port config get maxclients 2>/dev/null|sed -n '2p')
test -z $maxclients && maxclients=$($redis_cli_cmd -h $ip -p $port sc_config get maxclients 2>/dev/null|sed -n '2p')
}
Push_n9e(){
for metric in $(cat redis_metrics);do
countertype=GAUGE
[[ "${metrics_counter[@]}" =~ "$metric" ]] && countertype=COUNTER
[[ "$metric" =~ "cmdstat_" ]] && countertype=COUNTER
[[ "$metric" =~ "cluster_" ]] && [ $cluster_enabled -eq 0 ] && continue
value=$(eval echo \$$metric)
if [ "X"$metric == 'Xrole' ];then
if [ "X"$value == 'Xmaster' ];then
value=1
else
value=0
master_link_status_value=$(eval echo \$master_link_status)
[ "X"$master_link_status_value == 'Xup' ] && master_link_status_value=1 || master_link_status_value=0
Json_join redis.master_link_status ${master_link_status_value} "port=$port" $countertype
fi
fi
if [ "X"$metric == 'Xrdb_last_bgsave_status' ];then
[ "X"$value == 'Xok' ] && value=1 || value=0
fi
if [ "X"$metric == 'Xaof_last_bgrewrite_status' ];then
[ "X"$value == 'Xok' ] && value=1 || value=0
fi
if [ "X"$metric == 'Xaof_last_write_status' ];then
[ "X"$value == 'Xok' ] && value=1 || value=0
fi
if [ "X"$metric == 'Xcluster_state' ];then
[ "X"$value == 'Xok' ] && value=1 || value=0
fi
[ "X"$value == "X" ] && continue
Json_join redis.$metric $value "port=$port" $countertype
done
}
Test_alive(){
r=$($redis_cli_cmd -h $ip -p $port ping 2>/dev/null) && value=0 || value=1
Json_join redis.alive $value "port=$port" $countertype
[ "X$r" == 'XPONG' ] && value=0 || value=1
Json_join redis.auth_passwd $value "port=$port" $countertype
return $value
}
Test_slowlog(){
$redis_cli_cmd -h $ip -p $port slowlog get 1024 >/tmp/redis_slowlog_$port 2>/dev/null|| return 1
grep -v ^$ /tmp/redis_slowlog_$port >>/opt/redis_slowlog_$port
value=$($redis_cli_cmd -h $ip -p $port slowlog len 2>/dev/null)
$redis_cli_cmd -h $ip -p $port slowlog reset &>/dev/null
Json_join redis.slowlog_len $value "port=$port" $countertype
if [ $value -gt 0 ];then
timestamp=$(date '+%s')
time_str=${timestamp:0:8}
max_time=$(sed -n "/^$time_str/ {n;p}" /tmp/redis_slowlog_$port|sort -n|sed -n '$p')
fi
test -z $max_time && max_time=0
Json_join redis.slowlog_max_time $max_time "port=$port" $countertype
return 0
}
Tast_keyspace_hit_ratio(){
value=0
keyspace_total=$((keyspace_hits + keyspace_misses))
[ $keyspace_total -ne 0 ] && value=$((100 * keyspace_hits / keyspace_total))
Json_join redis.keyspace_hit_ratio $value "port=$port" $countertype
return $value
}
Tast_use_memory(){
value=0
[ $maxmemory -ne 0 ] && value=$((100 * used_memory / maxmemory))
Json_join redis.used_memory_percent $value "port=$port" $countertype
return $value
}
Tast_use_connected_clients(){
value=0
[ $maxclients -ne 0 ] && value=$((100 * connected_clients / maxclients))
Json_join redis.used_connected_clients_percent $value "port=$port" $countertype
return $value
}
Test_use_cpu(){
pid=$(ps aux|grep redis-server|grep :${port}|awk '{print $2}')
value=$(top -b -n1| grep redis-server| grep ${pid}|awk '{print $9}')
Json_join redis.used_cpu_percent $value "port=$port" $countertype
}
Main(){
for port in $(grep $service ../service_port 2>/dev/null| grep -v redis-shake 2>/dev/null|awk '$0=$2');do
#which redis-cli &>/dev/null || exit 1
[ $port -gt 10000 ] && continue
test -f redis_auth.conf && passwd=$(awk -F: '/^'$port':/{print $2}' redis_auth.conf)
test -z $passwd && redis_cli_cmd='redis-cli' || redis_cli_cmd="redis-cli -a $passwd"
Test_alive || continue
Test_slowlog || continue
Get_current_value
Tast_keyspace_hit_ratio
Tast_use_connected_clients
Tast_use_memory
#Test_use_cpu
Push_n9e
done
jstr=$(echo $jstr|sed 's/^/[/;s/,$/]/;s/\[$/[]/')
echo $jstr
}
Main
#!/bin/bash
step=$(echo $0|grep -Po '\d+(?=_)')
service_port=/home/n9e/service_port
base_dir=$(cd $(dirname $0);pwd)
cd $base_dir
endpoint=$(ifconfig `route|grep '^default'|awk '{print $NF}'`|grep inet|awk '{print $2}'|awk -F ':' '{print $NF}'|head -n 1)
Json_join(){
metric=$1
value=$2
tags=${3:-""}
countertype=${4:-GAUGE}
jstr=$jstr"{\"endpoint\": \"${endpoint}\", \"metric\": \"${metric}\", \"value\": $value,\"step\": ${step}, \"tags\": \"${tags}\",\"counterType\":\"${countertype}\",\"timestamp\": $(date +%s)},"
}
Check_plugin(){
Json_join plugin.myself.status 1
}
Check_ntpd(){
#alarm value: >30 or =1024
value=$(ntpq -pn 2>/dev/null|grep ^*|awk '{print $9}')
test -z $value && value=1024
Json_join sys.ntp.offset $value
}
Check_passwd(){
#alarm value: diff(#1)!=0
value=$(stat -c %Y /etc/shadow)
Json_join sys.passwd.modify $value
}
Check_uptime(){
#alarm value: diff(#1)<0
value=$(awk '$0=$1' /proc/uptime)
Json_join sys.uptime.duration $value
}
Service_port() {
service_port_tmp=/tmp/service_port_tmp
if [ ! -f $service_port ];then
ss -tnlp|gawk 'match($0,"[^ ]+:([0-9]+).+users:\\(\\(\"([^\"]+)",a)&&$0=a[2]" "a[1]' |sort |uniq > $service_port
else
ss -tnlp|gawk 'match($0,"[^ ]+:([0-9]+).+users:\\(\\(\"([^\"]+)",a)&&$0=a[2]" "a[1]' |sort |uniq > $service_port_tmp
grep -v -f $service_port $service_port_tmp >> $service_port
fi
}
Check_plugin
Service_port
Check_ntpd
Check_passwd
Check_uptime
jstr=$(echo $jstr|sed 's/^/[/;s/,$/]/;s/\[$/[]/')
echo $jstr
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import time
import json
import commands
metric = ['usr', 'nice', 'sys', 'idle', 'iowait', 'irq', 'soft', 'steal', 'guest']
def get_cpu_core_stat(num):
data = []
for x in range(num):
try:
handler = os.popen("cat /proc/stat | grep cpu%d " % x)
except:
continue
output = handler.read().strip().split()[1:]
if len(output) < 9:
continue
index=0
for m in output:
if len(metric) == index: continue
t = {}
t['endpoint'] = endpoint
t['metric'] = 'cpu.core.%s' % metric[index]
t['timestamp'] = int(time.time())
t['step'] = 60
t['counterType'] = 'COUNTER'
t['tags'] = 'core=%s' % str(x)
t['value'] = int(m)
index += 1
data.append(t)
return data
if __name__ == "__main__":
code, endpoint = commands.getstatusoutput("ifconfig `route|grep '^default'|awk '{print $NF}'`|grep inet|awk '{print $2}'|awk -F ':' '{print $NF}'|head -n 1")
if code != 0:
sys.stderr.write('cannot get local ip')
sys.exit(0)
core_total = int(os.popen("cat /proc/cpuinfo | grep processor | tail -1 | cut -d' ' -f2").read().strip()) + 1
print(json.dumps(get_cpu_core_stat(core_total)))
#!/bin/env python
#-*- coding:utf8 -*-
"""
mongodb_auth.conf 配置文件内容格式
items:
- {port: 20000, user: "",password: ""}
"""
import os
import sys
import time
import yaml
import json
import commands
# all falcon counter type metrics list
mongodb_counter_metric = ["asserts_msg",
"asserts_regular",
"asserts_rollovers",
"asserts_user",
"asserts_warning",
"page_faults",
"connections_totalCreated",
"locks_Global_acquireCount_ISlock",
"locks_Global_acquireCount_IXlock",
"locks_Global_acquireCount_Slock",
"locks_Global_acquireCount_Xlock",
"locks_Global_acquireWaitCount_ISlock",
"locks_Global_acquireWaitCount_IXlock",
"locks_Global_timeAcquiringMicros_ISlock",
"locks_Global_timeAcquiringMicros_IXlock",
"locks_Database_acquireCount_ISlock",
"locks_Database_acquireCount_IXlock",
"locks_Database_acquireCount_Slock",
"locks_Database_acquireCount_Xlock",
"locks_Collection_acquireCount_ISlock",
"locks_Collection_acquireCount_IXlock",
"locks_Collection_acquireCount_Xlock",
"opcounters_command",
"opcounters_insert",
"opcounters_delete",
"opcounters_update",
"opcounters_query",
"opcounters_getmore",
"opcountersRepl_command",
"opcountersRepl_insert",
"opcountersRepl_delete",
"opcountersRepl_update",
"opcountersRepl_query",
"opcountersRepl_getmore",
"network_bytesIn",
"network_bytesOut",
"network_numRequests",
"backgroundFlushing_flushes",
"backgroundFlushing_last_ms",
"cursor_timedOut",
"wt_cache_readinto_bytes",
"wt_cache_writtenfrom_bytes",
"wt_bm_bytes_read",
"wt_bm_bytes_written",
"wt_bm_blocks_read",
"wt_bm_blocks_written"]
ts = int(time.time())
step = int(os.path.basename(__file__).split('_')[0])
conf_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'mongodb_auth.conf')
if not os.path.exists(conf_file): sys.exit(0)
from mongodb_server import mongodbMonitor
code, endpoint = commands.getstatusoutput("ifconfig `route|grep '^default'|awk '{print $NF}'`|grep inet|awk '{print $2}'|awk -F ':' '{print $NF}'|head -n 1")
if code != 0: sys.exit(0)
f = open(conf_file)
y = yaml.load(f)
f.close()
mongodb_items = y["items"]
mongodb_upate_list = []
for mongodb_ins in mongodb_items:
mongodb_monitor = mongodbMonitor()
mongodb_tag = "port=" + str(mongodb_ins["port"])
err, conn = mongodb_monitor.mongodb_connect(host=endpoint, port=mongodb_ins[
"port"], user=mongodb_ins["user"], password=mongodb_ins["password"])
if err != 0:
key_item_dict = {
"endpoint": endpoint,
"metric": "mongodb.local_alive",
"tags": mongodb_tag,
"timestamp": ts,
"value": 0,
"step": step,
"counterType": "GAUGE"}
mongodb_upate_list.append(key_item_dict)
# The instance is dead. upload the "mongo_alive_local=0" key, then
# continue.
continue
mongodb_dict = mongodb_monitor.get_mongo_monitor_data(conn)
mongodb_dict_keys = mongodb_dict.keys()
for mongodb_metric in mongodb_dict_keys:
if mongodb_metric in mongodb_counter_metric:
key_item_dict = {
"endpoint": endpoint,
"metric": "mongodb." + mongodb_metric,
"tags": mongodb_tag,
"timestamp": ts,
"value": mongodb_dict[mongodb_metric],
"step": step,
"counterType": "COUNTER"}
else:
if mongodb_metric == 'mem_supported': continue
key_item_dict = {
"endpoint": endpoint,
"metric": "mongodb." + mongodb_metric,
"tags": mongodb_tag,
"timestamp": ts,
"value": mongodb_dict[mongodb_metric],
"step": step,
"counterType": "GAUGE"}
mongodb_upate_list.append(key_item_dict)
print(json.dumps(mongodb_upate_list))
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
cluster_state
cluster_slots_assigned
cluster_slots_ok
cluster_slots_pfail
cluster_slots_fail
cluster_known_nodes
cluster_size
cluster_current_epoch
cluster_my_epoch
cluster_stats_messages_sent
cluster_stats_messages_received
cmdstat_get_calls
cmdstat_get_usec
cmdstat_get_usec_per_call
cmdstat_set_calls
cmdstat_set_usec
cmdstat_set_usec_per_call
cmdstat_setex_calls
cmdstat_setex_usec
cmdstat_setex_usec_per_call
cmdstat_del_calls
cmdstat_del_usec
cmdstat_del_usec_per_call
cmdstat_exists_calls
cmdstat_exists_usec
cmdstat_exists_usec_per_call
cmdstat_hset_calls
cmdstat_hset_usec
cmdstat_hset_usec_per_call
cmdstat_hgetall_calls
cmdstat_hgetall_usec
cmdstat_hgetall_usec_per_call
cmdstat_keys_calls
cmdstat_keys_usec
cmdstat_keys_usec_per_call
cmdstat_ping_calls
cmdstat_ping_usec
cmdstat_ping_usec_per_call
cmdstat_info_calls
cmdstat_info_usec
cmdstat_info_usec_per_call
cmdstat_ttl_calls
cmdstat_ttl_usec
cmdstat_ttl_usec_per_call
cmdstat_config_calls
cmdstat_config_usec
cmdstat_config_usec_per_call
cmdstat_cluster_calls
cmdstat_cluster_usec
cmdstat_cluster_usec_per_call
cmdstat_slowlog_calls
cmdstat_slowlog_usec
cmdstat_slowlog_usec_per_call
uptime_in_seconds
uptime_in_days
hz
lru_clock
connected_clients
client_longest_output_list
client_biggest_input_buf
blocked_clients
used_memory
used_memory_rss
used_memory_peak
total_system_memory
used_memory_lua
maxmemory
mem_fragmentation_ratio
loading
rdb_changes_since_last_save
rdb_bgsave_in_progress
rdb_last_save_time
rdb_last_bgsave_status
rdb_last_bgsave_time_sec
rdb_current_bgsave_time_sec
aof_enabled
aof_rewrite_in_progress
aof_rewrite_scheduled
aof_last_rewrite_time_sec
aof_current_rewrite_time_sec
aof_last_bgrewrite_status
aof_last_write_status
total_connections_received
total_commands_processed
instantaneous_ops_per_sec
total_net_input_bytes
total_net_output_bytes
instantaneous_input_kbps
instantaneous_output_kbps
rejected_connections
sync_full
sync_partial_ok
sync_partial_err
expired_keys
evicted_keys
keyspace_hits
keyspace_misses
pubsub_channels
pubsub_patterns
latest_fork_usec
migrate_cached_sockets
role
connected_slaves
master_repl_offset
repl_backlog_active
repl_backlog_size
repl_backlog_first_byte_offset
repl_backlog_histlen
used_cpu_sys
used_cpu_user
used_cpu_sys_children
used_cpu_user_children
cluster_enabled
slave_read_only
maxclients
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment