diff --git a/cookbooks/ganglia/templates/default/python_modules/infobright/python_modules/infobright.py b/cookbooks/ganglia/templates/default/python_modules/infobright/python_modules/infobright.py index 5e60fee1..759a8a15 100644 --- a/cookbooks/ganglia/templates/default/python_modules/infobright/python_modules/infobright.py +++ b/cookbooks/ganglia/templates/default/python_modules/infobright/python_modules/infobright.py @@ -56,7 +56,7 @@ MAX_UPDATE_TIME = 15 -def update_stats(get_brighthouse=True, get_brighthouse_engine=True, get_master=True, get_slave=True): +def update_stats(get_brighthouse=True, get_brighthouse_engine=True, get_main=True, get_subordinate=True): """ """ @@ -114,28 +114,28 @@ def update_stats(get_brighthouse=True, get_brighthouse_engine=True, get_master=T # try not to fail ? # BRIGHTHOUSE ENGINE status variables are pretty obscure get_brighthouse_engine = get_brighthouse_engine and variables.has_key('brighthouse_ini_controlmessages') - get_master = get_master and variables['log_bin'].lower() == 'on' + get_main = get_main and variables['log_bin'].lower() == 'on' if get_brighthouse_engine: logging.warn('get_brighthouse_engine status not implemented') - master_logs = tuple - if get_master: + main_logs = tuple + if get_main: cursor = conn.cursor(MySQLdb.cursors.Cursor) cursor.execute("SHOW MASTER LOGS") - master_logs = cursor.fetchall() + main_logs = cursor.fetchall() cursor.close() - slave_status = {} - if get_slave: + subordinate_status = {} + if get_subordinate: cursor = conn.cursor(MySQLdb.cursors.DictCursor) cursor.execute("SHOW SLAVE STATUS") res = cursor.fetchone() if res: for (k,v) in res.items(): - slave_status[k.lower()] = v + subordinate_status[k.lower()] = v else: - get_slave = False + get_subordinate = False cursor.close() cursor = conn.cursor(MySQLdb.cursors.DictCursor) @@ -199,8 +199,8 @@ def update_stats(get_brighthouse=True, get_brighthouse_engine=True, get_master=T 'select_range', 'select_range_check', 'select_scan', - 'slave_open_temp_tables', - 'slave_retried_transactions', + 'subordinate_open_temp_tables', + 'subordinate_retried_transactions', 'slow_launch_threads', 'slow_queries', 'sort_range', @@ -222,7 +222,7 @@ def update_stats(get_brighthouse=True, get_brighthouse_engine=True, get_master=T 'qcache_free_blocks', 'qcache_free_memory', 'qcache_total_blocks', - 'slave_open_temp_tables', + 'subordinate_open_temp_tables', 'threads_cached', 'threads_connected', 'threads_running', @@ -298,32 +298,32 @@ def update_stats(get_brighthouse=True, get_brighthouse_engine=True, get_master=T infobright_stats['open_files_used'] = int(global_status['open_files']) / int(variables['open_files_limit']) - # process master logs - if get_master: - infobright_stats['binlog_count'] = len(master_logs) - infobright_stats['binlog_space_current'] = master_logs[-1][1] - #infobright_stats['binlog_space_total'] = sum((long(s[1]) for s in master_logs)) + # process main logs + if get_main: + infobright_stats['binlog_count'] = len(main_logs) + infobright_stats['binlog_space_current'] = main_logs[-1][1] + #infobright_stats['binlog_space_total'] = sum((long(s[1]) for s in main_logs)) infobright_stats['binlog_space_total'] = 0 - for s in master_logs: + for s in main_logs: infobright_stats['binlog_space_total'] += int(s[1]) - infobright_stats['binlog_space_used'] = float(master_logs[-1][1]) / float(variables['max_binlog_size']) * 100 - - # process slave status - if get_slave: - infobright_stats['slave_exec_master_log_pos'] = slave_status['exec_master_log_pos'] - #infobright_stats['slave_io'] = 1 if slave_status['slave_io_running'].lower() == "yes" else 0 - if slave_status['slave_io_running'].lower() == "yes": - infobright_stats['slave_io'] = 1 + infobright_stats['binlog_space_used'] = float(main_logs[-1][1]) / float(variables['max_binlog_size']) * 100 + + # process subordinate status + if get_subordinate: + infobright_stats['subordinate_exec_main_log_pos'] = subordinate_status['exec_main_log_pos'] + #infobright_stats['subordinate_io'] = 1 if subordinate_status['subordinate_io_running'].lower() == "yes" else 0 + if subordinate_status['subordinate_io_running'].lower() == "yes": + infobright_stats['subordinate_io'] = 1 else: - infobright_stats['slave_io'] = 0 - #infobright_stats['slave_sql'] = 1 if slave_status['slave_sql_running'].lower() =="yes" else 0 - if slave_status['slave_sql_running'].lower() == "yes": - infobright_stats['slave_sql'] = 1 + infobright_stats['subordinate_io'] = 0 + #infobright_stats['subordinate_sql'] = 1 if subordinate_status['subordinate_sql_running'].lower() =="yes" else 0 + if subordinate_status['subordinate_sql_running'].lower() == "yes": + infobright_stats['subordinate_sql'] = 1 else: - infobright_stats['slave_sql'] = 0 - infobright_stats['slave_lag'] = slave_status['seconds_behind_master'] - infobright_stats['slave_relay_log_pos'] = slave_status['relay_log_pos'] - infobright_stats['slave_relay_log_space'] = slave_status['relay_log_space'] + infobright_stats['subordinate_sql'] = 0 + infobright_stats['subordinate_lag'] = subordinate_status['seconds_behind_main'] + infobright_stats['subordinate_relay_log_pos'] = subordinate_status['relay_log_pos'] + infobright_stats['subordinate_relay_log_space'] = subordinate_status['relay_log_space'] logging.debug('success updating stats') @@ -370,8 +370,8 @@ def metric_init(params): REPORT_BRIGHTHOUSE = str(params.get('get_brighthouse', True)) == "True" REPORT_BRIGHTHOUSE_ENGINE = str(params.get('get_brighthouse_engine', True)) == "True" - REPORT_MASTER = str(params.get('get_master', True)) == "True" - REPORT_SLAVE = str(params.get('get_slave', True)) == "True" + REPORT_MASTER = str(params.get('get_main', True)) == "True" + REPORT_SLAVE = str(params.get('get_subordinate', True)) == "True" logging.debug("init: " + str(params)) @@ -391,9 +391,9 @@ def metric_init(params): delta_per_second = True mysql_stats_descriptions = {} - master_stats_descriptions = {} + main_stats_descriptions = {} brighthouse_stats_descriptions = {} - slave_stats_descriptions = {} + subordinate_stats_descriptions = {} mysql_stats_descriptions = dict( aborted_clients = { @@ -652,15 +652,15 @@ def metric_init(params): 'units': 'joins', }, - slave_open_temp_tables = { - 'description': 'The number of temporary tables that the slave SQL thread currently has open', + subordinate_open_temp_tables = { + 'description': 'The number of temporary tables that the subordinate SQL thread currently has open', 'value_type': 'float', 'units': 'tables', 'slope': 'both', }, - slave_retried_transactions = { - 'description': 'The total number of times since startup that the replication slave SQL thread has retried transactions', + subordinate_retried_transactions = { + 'description': 'The total number of times since startup that the replication subordinate SQL thread has retried transactions', 'value_type': 'float', 'units': 'count', }, @@ -974,7 +974,7 @@ def metric_init(params): if REPORT_MASTER: - master_stats_descriptions = dict( + main_stats_descriptions = dict( binlog_count = { 'description': "Number of binary logs", 'units': 'logs', @@ -1002,34 +1002,34 @@ def metric_init(params): ) if REPORT_SLAVE: - slave_stats_descriptions = dict( - slave_exec_master_log_pos = { - 'description': "The position of the last event executed by the SQL thread from the master's binary log", + subordinate_stats_descriptions = dict( + subordinate_exec_main_log_pos = { + 'description': "The position of the last event executed by the SQL thread from the main's binary log", 'units': 'bytes', 'slope': 'both', }, - slave_io = { - 'description': "Whether the I/O thread is started and has connected successfully to the master", + subordinate_io = { + 'description': "Whether the I/O thread is started and has connected successfully to the main", 'value_type': 'uint8', 'units': 'True/False', 'slope': 'both', }, - slave_lag = { + subordinate_lag = { 'description': "Replication Lag", 'units': 'secs', 'slope': 'both', }, - slave_relay_log_pos = { + subordinate_relay_log_pos = { 'description': "The position up to which the SQL thread has read and executed in the current relay log", 'units': 'bytes', 'slope': 'both', }, - slave_sql = { - 'description': "Slave SQL Running", + subordinate_sql = { + 'description': "Subordinate SQL Running", 'value_type': 'uint8', 'units': 'True/False', 'slope': 'both', @@ -1042,7 +1042,7 @@ def metric_init(params): update_stats(REPORT_BRIGHTHOUSE, REPORT_BRIGHTHOUSE_ENGINE, REPORT_MASTER, REPORT_SLAVE) - for stats_descriptions in (brighthouse_stats_descriptions, master_stats_descriptions, mysql_stats_descriptions, slave_stats_descriptions): + for stats_descriptions in (brighthouse_stats_descriptions, main_stats_descriptions, mysql_stats_descriptions, subordinate_stats_descriptions): for label in stats_descriptions: if infobright_stats.has_key(label): format = '%u' @@ -1089,8 +1089,8 @@ def metric_cleanup(): parser.add_option("-S", "--socket", dest="unix_socket", help="unix_socket", default="") parser.add_option("--no-brighthouse", dest="get_brighthouse", action="store_false", default=True) parser.add_option("--no-brighthouse-engine", dest="get_brighthouse_engine", action="store_false", default=False) - parser.add_option("--no-master", dest="get_master", action="store_false", default=True) - parser.add_option("--no-slave", dest="get_slave", action="store_false", default=True) + parser.add_option("--no-main", dest="get_main", action="store_false", default=True) + parser.add_option("--no-subordinate", dest="get_subordinate", action="store_false", default=True) parser.add_option("-b", "--gmetric-bin", dest="gmetric_bin", help="path to gmetric binary", default="/usr/bin/gmetric") parser.add_option("-c", "--gmond-conf", dest="gmond_conf", help="path to gmond.conf", default="/etc/ganglia/gmond.conf") parser.add_option("-g", "--gmetric", dest="gmetric", help="submit via gmetric", action="store_true", default=False) @@ -1105,8 +1105,8 @@ def metric_cleanup(): 'port': options.port, 'get_brighthouse': options.get_brighthouse, 'get_brighthouse_engine': options.get_brighthouse_engine, - 'get_master': options.get_master, - 'get_slave': options.get_slave, + 'get_main': options.get_main, + 'get_subordinate': options.get_subordinate, 'unix_socket': options.unix_socket, }) diff --git a/cookbooks/ganglia/templates/default/python_modules/jenkins/python_modules/jenkins.py b/cookbooks/ganglia/templates/default/python_modules/jenkins/python_modules/jenkins.py index ac4fffbf..bfbce47d 100644 --- a/cookbooks/ganglia/templates/default/python_modules/jenkins/python_modules/jenkins.py +++ b/cookbooks/ganglia/templates/default/python_modules/jenkins/python_modules/jenkins.py @@ -157,17 +157,17 @@ def metric_init(params): 'value_type': 'float', 'format': '%.3f', 'units': 'executors', - 'description': 'Number of busy executors (master and slaves)'}, + 'description': 'Number of busy executors (main and subordinates)'}, jenkins_overallload_queue_length = { 'value_type': 'float', 'format': '%.3f', 'units': 'queued items', - 'description': 'Length of the queue (master and slaves)'}, + 'description': 'Length of the queue (main and subordinates)'}, jenkins_overallload_total_executors = { 'value_type': 'float', 'format': '%.3f', 'units': 'executors', - 'description': 'Number of executors (master and slaves)'}, + 'description': 'Number of executors (main and subordinates)'}, jenkins_jobs_total = { 'description': 'Total number of jobs'}, jenkins_jobs_blue = { diff --git a/cookbooks/ganglia/templates/default/python_modules/mongodb/python_modules/mongodb.py b/cookbooks/ganglia/templates/default/python_modules/mongodb/python_modules/mongodb.py index 7ef34bed..b7168a4d 100755 --- a/cookbooks/ganglia/templates/default/python_modules/mongodb/python_modules/mongodb.py +++ b/cookbooks/ganglia/templates/default/python_modules/mongodb/python_modules/mongodb.py @@ -133,10 +133,10 @@ def get_rate(name): def get_opcounter_rate(name): """Return change over time for an opcounter metric""" - master_rate = get_rate(name) + main_rate = get_rate(name) repl_rate = get_rate(name.replace('opcounters_', 'opcountersRepl_')) - return master_rate + repl_rate + return main_rate + repl_rate def get_globalLock_ratio(name): @@ -175,8 +175,8 @@ def get_connections_current_ratio(name): return result -def get_slave_delay(name): - """Return the replica set slave delay""" +def get_subordinate_delay(name): + """Return the replica set subordinate delay""" # get metrics metrics = get_metrics()[0] @@ -185,17 +185,17 @@ def get_slave_delay(name): if 'rs_status_myState' not in metrics['data'] or metrics['data']['rs_status_myState'] != 2: result = 0 - # compare my optime with the master's + # compare my optime with the main's else: - master = {} - slave = {} + main = {} + subordinate = {} try: for member in metrics['data']['rs_status_members']: if member['state'] == 1: - master = member + main = member if member['name'].split(':')[0] == socket.getfqdn(): - slave = member - result = max(0, master['optime']['t'] - slave['optime']['t']) / 1000 + subordinate = member + result = max(0, main['optime']['t'] - subordinate['optime']['t']) / 1000 except KeyError: result = 0 @@ -454,14 +454,14 @@ def metric_init(lparams): 'groups': groups }, { - 'name': NAME_PREFIX + 'slave_delay', - 'call_back': get_slave_delay, + 'name': NAME_PREFIX + 'subordinate_delay', + 'call_back': get_subordinate_delay, 'time_max': time_max, 'value_type': 'uint', 'units': 'Seconds', 'slope': 'both', 'format': '%u', - 'description': 'Replica Set Slave Delay', + 'description': 'Replica Set Subordinate Delay', 'groups': groups }, { diff --git a/cookbooks/ganglia/templates/default/python_modules/mysqld/python_modules/mysql.py b/cookbooks/ganglia/templates/default/python_modules/mysqld/python_modules/mysql.py index f5f7954b..14a463d4 100644 --- a/cookbooks/ganglia/templates/default/python_modules/mysqld/python_modules/mysql.py +++ b/cookbooks/ganglia/templates/default/python_modules/mysqld/python_modules/mysql.py @@ -65,7 +65,7 @@ MAX_UPDATE_TIME = 15 -def update_stats(get_innodb=True, get_master=True, get_slave=True): +def update_stats(get_innodb=True, get_main=True, get_subordinate=True): """ """ @@ -130,7 +130,7 @@ def update_stats(get_innodb=True, get_master=True, get_slave=True): # try not to fail ? get_innodb = get_innodb and have_innodb - get_master = get_master and variables['log_bin'].lower() == 'on' + get_main = get_main and variables['log_bin'].lower() == 'on' innodb_status = defaultdict(int) if get_innodb: @@ -140,23 +140,23 @@ def update_stats(get_innodb=True, get_master=True, get_slave=True): cursor.close() logging.debug('innodb_status: ' + str(innodb_status)) - master_logs = tuple - if get_master: + main_logs = tuple + if get_main: cursor = conn.cursor(MySQLdb.cursors.Cursor) cursor.execute("SHOW MASTER LOGS") - master_logs = cursor.fetchall() + main_logs = cursor.fetchall() cursor.close() - slave_status = {} - if get_slave: + subordinate_status = {} + if get_subordinate: cursor = conn.cursor(MySQLdb.cursors.DictCursor) cursor.execute("SHOW SLAVE STATUS") res = cursor.fetchone() if res: for (k,v) in res.items(): - slave_status[k.lower()] = v + subordinate_status[k.lower()] = v else: - get_slave = False + get_subordinate = False cursor.close() cursor = conn.cursor(MySQLdb.cursors.DictCursor) @@ -220,8 +220,8 @@ def update_stats(get_innodb=True, get_master=True, get_slave=True): 'select_range', 'select_range_check', 'select_scan', - 'slave_open_temp_tables', - 'slave_retried_transactions', + 'subordinate_open_temp_tables', + 'subordinate_retried_transactions', 'slow_launch_threads', 'slow_queries', 'sort_range', @@ -243,7 +243,7 @@ def update_stats(get_innodb=True, get_master=True, get_slave=True): 'qcache_free_blocks', 'qcache_free_memory', 'qcache_total_blocks', - 'slave_open_temp_tables', + 'subordinate_open_temp_tables', 'threads_cached', 'threads_connected', 'threads_running', @@ -301,32 +301,32 @@ def update_stats(get_innodb=True, get_master=True, get_slave=True): else: mysql_stats[key] = innodb_status[istat] - # process master logs - if get_master: - mysql_stats['binlog_count'] = len(master_logs) - mysql_stats['binlog_space_current'] = master_logs[-1][1] - #mysql_stats['binlog_space_total'] = sum((long(s[1]) for s in master_logs)) + # process main logs + if get_main: + mysql_stats['binlog_count'] = len(main_logs) + mysql_stats['binlog_space_current'] = main_logs[-1][1] + #mysql_stats['binlog_space_total'] = sum((long(s[1]) for s in main_logs)) mysql_stats['binlog_space_total'] = 0 - for s in master_logs: + for s in main_logs: mysql_stats['binlog_space_total'] += int(s[1]) - mysql_stats['binlog_space_used'] = float(master_logs[-1][1]) / float(variables['max_binlog_size']) * 100 - - # process slave status - if get_slave: - mysql_stats['slave_exec_master_log_pos'] = slave_status['exec_master_log_pos'] - #mysql_stats['slave_io'] = 1 if slave_status['slave_io_running'].lower() == "yes" else 0 - if slave_status['slave_io_running'].lower() == "yes": - mysql_stats['slave_io'] = 1 + mysql_stats['binlog_space_used'] = float(main_logs[-1][1]) / float(variables['max_binlog_size']) * 100 + + # process subordinate status + if get_subordinate: + mysql_stats['subordinate_exec_main_log_pos'] = subordinate_status['exec_main_log_pos'] + #mysql_stats['subordinate_io'] = 1 if subordinate_status['subordinate_io_running'].lower() == "yes" else 0 + if subordinate_status['subordinate_io_running'].lower() == "yes": + mysql_stats['subordinate_io'] = 1 else: - mysql_stats['slave_io'] = 0 - #mysql_stats['slave_sql'] = 1 if slave_status['slave_sql_running'].lower() =="yes" else 0 - if slave_status['slave_sql_running'].lower() == "yes": - mysql_stats['slave_sql'] = 1 + mysql_stats['subordinate_io'] = 0 + #mysql_stats['subordinate_sql'] = 1 if subordinate_status['subordinate_sql_running'].lower() =="yes" else 0 + if subordinate_status['subordinate_sql_running'].lower() == "yes": + mysql_stats['subordinate_sql'] = 1 else: - mysql_stats['slave_sql'] = 0 - mysql_stats['slave_lag'] = slave_status['seconds_behind_master'] - mysql_stats['slave_relay_log_pos'] = slave_status['relay_log_pos'] - mysql_stats['slave_relay_log_space'] = slave_status['relay_log_space'] + mysql_stats['subordinate_sql'] = 0 + mysql_stats['subordinate_lag'] = subordinate_status['seconds_behind_main'] + mysql_stats['subordinate_relay_log_pos'] = subordinate_status['relay_log_pos'] + mysql_stats['subordinate_relay_log_space'] = subordinate_status['relay_log_space'] logging.debug('success updating stats') @@ -369,8 +369,8 @@ def metric_init(params): global REPORT_SLAVE REPORT_INNODB = str(params.get('get_innodb', True)) == "True" - REPORT_MASTER = str(params.get('get_master', True)) == "True" - REPORT_SLAVE = str(params.get('get_slave', True)) == "True" + REPORT_MASTER = str(params.get('get_main', True)) == "True" + REPORT_SLAVE = str(params.get('get_subordinate', True)) == "True" logging.debug("init: " + str(params)) @@ -387,9 +387,9 @@ def metric_init(params): if params.get("delta_per_second", '') != '': delta_per_second = True - master_stats_descriptions = {} + main_stats_descriptions = {} innodb_stats_descriptions = {} - slave_stats_descriptions = {} + subordinate_stats_descriptions = {} misc_stats_descriptions = dict( aborted_clients = { @@ -648,15 +648,15 @@ def metric_init(params): 'units': 'joins', }, - slave_open_temp_tables = { - 'description': 'The number of temporary tables that the slave SQL thread currently has open', + subordinate_open_temp_tables = { + 'description': 'The number of temporary tables that the subordinate SQL thread currently has open', 'value_type': 'float', 'units': 'tables', 'slope': 'both', }, - slave_retried_transactions = { - 'description': 'The total number of times since startup that the replication slave SQL thread has retried transactions', + subordinate_retried_transactions = { + 'description': 'The total number of times since startup that the replication subordinate SQL thread has retried transactions', 'value_type': 'float', 'units': 'count', }, @@ -755,7 +755,7 @@ def metric_init(params): ) if REPORT_MASTER: - master_stats_descriptions = dict( + main_stats_descriptions = dict( binlog_count = { 'description': "Number of binary logs", 'units': 'logs', @@ -783,34 +783,34 @@ def metric_init(params): ) if REPORT_SLAVE: - slave_stats_descriptions = dict( - slave_exec_master_log_pos = { - 'description': "The position of the last event executed by the SQL thread from the master's binary log", + subordinate_stats_descriptions = dict( + subordinate_exec_main_log_pos = { + 'description': "The position of the last event executed by the SQL thread from the main's binary log", 'units': 'bytes', 'slope': 'both', }, - slave_io = { - 'description': "Whether the I/O thread is started and has connected successfully to the master", + subordinate_io = { + 'description': "Whether the I/O thread is started and has connected successfully to the main", 'value_type': 'uint8', 'units': 'True/False', 'slope': 'both', }, - slave_lag = { + subordinate_lag = { 'description': "Replication Lag", 'units': 'secs', 'slope': 'both', }, - slave_relay_log_pos = { + subordinate_relay_log_pos = { 'description': "The position up to which the SQL thread has read and executed in the current relay log", 'units': 'bytes', 'slope': 'both', }, - slave_sql = { - 'description': "Slave SQL Running", + subordinate_sql = { + 'description': "Subordinate SQL Running", 'value_type': 'uint8', 'units': 'True/False', 'slope': 'both', @@ -1087,7 +1087,7 @@ def metric_init(params): time.sleep(MAX_UPDATE_TIME) update_stats(REPORT_INNODB, REPORT_MASTER, REPORT_SLAVE) - for stats_descriptions in (innodb_stats_descriptions, master_stats_descriptions, misc_stats_descriptions, slave_stats_descriptions): + for stats_descriptions in (innodb_stats_descriptions, main_stats_descriptions, misc_stats_descriptions, subordinate_stats_descriptions): for label in stats_descriptions: if mysql_stats.has_key(label): format = '%u' @@ -1133,8 +1133,8 @@ def metric_cleanup(): parser.add_option("-P", "--port", dest="port", help="port", default=3306, type="int") parser.add_option("-S", "--socket", dest="unix_socket", help="unix_socket", default="") parser.add_option("--no-innodb", dest="get_innodb", action="store_false", default=True) - parser.add_option("--no-master", dest="get_master", action="store_false", default=True) - parser.add_option("--no-slave", dest="get_slave", action="store_false", default=True) + parser.add_option("--no-main", dest="get_main", action="store_false", default=True) + parser.add_option("--no-subordinate", dest="get_subordinate", action="store_false", default=True) parser.add_option("-b", "--gmetric-bin", dest="gmetric_bin", help="path to gmetric binary", default="/usr/bin/gmetric") parser.add_option("-c", "--gmond-conf", dest="gmond_conf", help="path to gmond.conf", default="/etc/ganglia/gmond.conf") parser.add_option("-g", "--gmetric", dest="gmetric", help="submit via gmetric", action="store_true", default=False) @@ -1148,8 +1148,8 @@ def metric_cleanup(): 'user': options.user, 'port': options.port, 'get_innodb': options.get_innodb, - 'get_master': options.get_master, - 'get_slave': options.get_slave, + 'get_main': options.get_main, + 'get_subordinate': options.get_subordinate, 'unix_socket': options.unix_socket, }) diff --git a/cookbooks/ganglia/templates/default/python_modules/procstat/python_modules/procstat.py b/cookbooks/ganglia/templates/default/python_modules/procstat/python_modules/procstat.py index e4054d16..19641546 100644 --- a/cookbooks/ganglia/templates/default/python_modules/procstat/python_modules/procstat.py +++ b/cookbooks/ganglia/templates/default/python_modules/procstat/python_modules/procstat.py @@ -68,12 +68,12 @@ ### Example Values: ### httpd: /var/run/httpd.pid or \/usr\/sbin\/httpd ### mysqld: /var/run/mysqld/mysqld.pid or /\/usr\/bin\/mysqld_safe/ -### postgresql: /var/run/postmaster.[port].pid or /\/usr\/bin\/postmaster.*[port]/ +### postgresql: /var/run/postmain.[port].pid or /\/usr\/bin\/postmain.*[port]/ ### splunk: /splunkd.*start/ ### splunk-web: /twistd.*SplunkWeb/ ### opennms: /opt/opennms/logs/daemon/opennms.pid or java.*Dopennms ### netflow: /java.*NetFlow/ -### postfix: /var/spool/postfix/pid/master.pid or /\/usr\/libexec\/postfix\/master/ +### postfix: /var/spool/postfix/pid/main.pid or /\/usr\/libexec\/postfix\/main/ ### ### Error Tests: ### python procstat.py -p test-more,test-none,test-pidfail -v '/java/','/javaw/','java.pid' -t diff --git a/cookbooks/ganglia/templates/default/python_modules/redis-gmond/python_modules/redis-gmond.py b/cookbooks/ganglia/templates/default/python_modules/redis-gmond/python_modules/redis-gmond.py index 96094ead..ca0dace6 100644 --- a/cookbooks/ganglia/templates/default/python_modules/redis-gmond/python_modules/redis-gmond.py +++ b/cookbooks/ganglia/templates/default/python_modules/redis-gmond/python_modules/redis-gmond.py @@ -30,7 +30,7 @@ def metric_handler(name): continue n, v = line.split(":") if n in metric_handler.descriptors: - if n == "master_sync_status": + if n == "main_sync_status": v = 1 if v == 'up' else 0 if n == "db0": v = v.split('=')[1].split(',')[0] @@ -75,13 +75,13 @@ def metric_init(params={}): metric_handler.prev_total_connections = 0 metrics = { "connected_clients": {"units": "clients"}, - "connected_slaves": {"units": "slaves"}, + "connected_subordinates": {"units": "subordinates"}, "blocked_clients": {"units": "clients"}, "used_memory": {"units": "KB"}, "rdb_changes_since_last_save": {"units": "changes"}, "rdb_bgsave_in_progress": {"units": "yes/no"}, - "master_sync_in_progress": {"units": "yes/no"}, - "master_link_status": {"units": "yes/no"}, + "main_sync_in_progress": {"units": "yes/no"}, + "main_link_status": {"units": "yes/no"}, #"aof_bgrewriteaof_in_progress": {"units": "yes/no"}, "total_connections_received": { "units": "connections/sec" }, "instantaneous_ops_per_sec": {"units": "ops"}, @@ -90,7 +90,7 @@ def metric_init(params={}): "pubsub_channels": {"units": "channels"}, "pubsub_patterns": {"units": "patterns"}, #"vm_enabled": {"units": "yes/no"}, - "master_last_io_seconds_ago": {"units": "seconds ago"}, + "main_last_io_seconds_ago": {"units": "seconds ago"}, "db0": {"units": "keys"}, } metric_handler.descriptors = {} diff --git a/cookbooks/nagios/files/default/plugins/check_mongodb.py b/cookbooks/nagios/files/default/plugins/check_mongodb.py index c2c1b0f8..d8be152a 100755 --- a/cookbooks/nagios/files/default/plugins/check_mongodb.py +++ b/cookbooks/nagios/files/default/plugins/check_mongodb.py @@ -252,10 +252,10 @@ def mongo_connect(host=None, port=None, ssl=False, user=None, passwd=None, repli con = pymongo.Connection(host, port, read_preference=pymongo.ReadPreference.SECONDARY, ssl=ssl, replicaSet=replica, network_timeout=10) else: if replica is None: - con = pymongo.Connection(host, port, slave_okay=True, network_timeout=10) + con = pymongo.Connection(host, port, subordinate_okay=True, network_timeout=10) else: - con = pymongo.Connection(host, port, slave_okay=True, network_timeout=10) - #con = pymongo.Connection(host, port, slave_okay=True, replicaSet=replica, network_timeout=10) + con = pymongo.Connection(host, port, subordinate_okay=True, network_timeout=10) + #con = pymongo.Connection(host, port, subordinate_okay=True, replicaSet=replica, network_timeout=10) if user and passwd: db = con["admin"] @@ -329,7 +329,7 @@ def check_rep_lag(con, host, warning, critical, percent, perf_data, max_lag, use warning = warning or 600 critical = critical or 3600 rs_status = {} - slaveDelays = {} + subordinateDelays = {} try: set_read_preference(con.admin) @@ -348,10 +348,10 @@ def check_rep_lag(con, host, warning, critical, percent, perf_data, max_lag, use # rs_conf = con.local.system.replset.find_one() for member in rs_conf['members']: - if member.get('slaveDelay') is not None: - slaveDelays[member['host']] = member.get('slaveDelay') + if member.get('subordinateDelay') is not None: + subordinateDelays[member['host']] = member.get('subordinateDelay') else: - slaveDelays[member['host']] = 0 + subordinateDelays[member['host']] = 0 # Find the primary and/or the current node primary_node = None @@ -385,8 +385,8 @@ def check_rep_lag(con, host, warning, critical, percent, perf_data, max_lag, use maximal_lag = 0 for member in rs_status['members']: if not member['stateStr'] == "ARBITER": - lastSlaveOpTime = member['optimeDate'] - replicationLag = abs(primary_node["optimeDate"] - lastSlaveOpTime).seconds - slaveDelays[member['name']] + lastSubordinateOpTime = member['optimeDate'] + replicationLag = abs(primary_node["optimeDate"] - lastSubordinateOpTime).seconds - subordinateDelays[member['name']] data = data + member['name'] + " lag=%d;" % replicationLag maximal_lag = max(maximal_lag, replicationLag) if percent: @@ -409,12 +409,12 @@ def check_rep_lag(con, host, warning, critical, percent, perf_data, max_lag, use optime_lag = abs(primary_node["optimeDate"] - host_node["optimeDate"]) - if host_node['name'] in slaveDelays: - slave_delay = slaveDelays[host_node['name']] - elif host_node['name'].endswith(':27017') and host_node['name'][:-len(":27017")] in slaveDelays: - slave_delay = slaveDelays[host_node['name'][:-len(":27017")]] + if host_node['name'] in subordinateDelays: + subordinate_delay = subordinateDelays[host_node['name']] + elif host_node['name'].endswith(':27017') and host_node['name'][:-len(":27017")] in subordinateDelays: + subordinate_delay = subordinateDelays[host_node['name'][:-len(":27017")]] else: - raise Exception("Unable to determine slave delay for {0}".format(host_node['name'])) + raise Exception("Unable to determine subordinate delay for {0}".format(host_node['name'])) try: # work starting from python2.7 lag = optime_lag.total_seconds() @@ -435,7 +435,7 @@ def check_rep_lag(con, host, warning, critical, percent, perf_data, max_lag, use else: message = "Lag is " + str(lag) + " seconds" message += performance_data(perf_data, [(lag, "replication_lag", warning, critical)]) - return check_levels(lag, warning + slaveDelays[host_node['name']], critical + slaveDelays[host_node['name']], message) + return check_levels(lag, warning + subordinateDelays[host_node['name']], critical + subordinateDelays[host_node['name']], message) else: # # less than 2.0 check @@ -857,7 +857,7 @@ def check_oplog(con, warning, critical, perf_data): if (db.system.namespaces.find_one({"name": "local.oplog.$main"}) != None): oplog = "oplog.$main" else: - message = "neither master/slave nor replica set replication detected" + message = "neither main/subordinate nor replica set replication detected" return check_levels(None, warning, critical, message) try: @@ -1174,11 +1174,11 @@ def check_connect_primary(con, warning, critical, perf_data): try: try: set_read_preference(con.admin) - data = con.admin.command(pymongo.son_manipulator.SON([('isMaster', 1)])) + data = con.admin.command(pymongo.son_manipulator.SON([('isMain', 1)])) except: - data = con.admin.command(son.SON([('isMaster', 1)])) + data = con.admin.command(son.SON([('isMain', 1)])) - if data['ismaster'] == True: + if data['ismain'] == True: print "OK - This server is primary" return 0 diff --git a/cookbooks/zeus-zxtm/files/default/failover_check.py b/cookbooks/zeus-zxtm/files/default/failover_check.py index b0a59a33..3a60a813 100755 --- a/cookbooks/zeus-zxtm/files/default/failover_check.py +++ b/cookbooks/zeus-zxtm/files/default/failover_check.py @@ -23,18 +23,18 @@ addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr':'No IP addr'}] )] for address in addresses: try: - address_current_master_fh=open('/tmp/current_master_%s' % address, 'r') - address_current_master_w_newline=address_current_master_fh.readline() - address_current_master = address_current_master_w_newline.rstrip() - match = re.match(address_current_master, hostname) + address_current_main_fh=open('/tmp/current_main_%s' % address, 'r') + address_current_main_w_newline=address_current_main_fh.readline() + address_current_main = address_current_main_w_newline.rstrip() + match = re.match(address_current_main, hostname) if match is None: #/usr/sbin/arping -I bond1.150 -c1 -U $ip os.system('/usr/sbin/arping -I %s -c1 -U %s' % (ifaceName,address)) - address_current_master_fh=open('/tmp/current_master_%s' % address, 'w') - address_current_master_fh.write('%s' % hostname) + address_current_main_fh=open('/tmp/current_main_%s' % address, 'w') + address_current_main_fh.write('%s' % hostname) except: - address_current_master_fh=open('/tmp/current_master_%s' % address, 'w') - address_current_master_fh.write('%s' % hostname) + address_current_main_fh=open('/tmp/current_main_%s' % address, 'w') + address_current_main_fh.write('%s' % hostname) ips_on_this_box.append(address) ips_on_other_host=set(ips_arr) - set(ips_on_this_box) @@ -44,5 +44,5 @@ #print set(ips_on_other_host) for ip in ips_on_other_host: - address_current_master_fh=open('/tmp/current_master_%s' % ip, 'w') - address_current_master_fh.write('%s' % other_hostname) + address_current_main_fh=open('/tmp/current_main_%s' % ip, 'w') + address_current_main_fh.write('%s' % other_hostname)