[0-9]+(?:\.[0-9]+)*) # release segment
+ (?P # pre-release
+ [-_\.]?
+ (?P(a|b|c|rc|alpha|beta|pre|preview))
+ [-_\.]?
+ (?P[0-9]+)?
+ )?
+ (?P # post release
+ (?:-(?P[0-9]+))
+ |
+ (?:
+ [-_\.]?
+ (?Ppost|rev|r)
+ [-_\.]?
+ (?P[0-9]+)?
+ )
+ )?
+ (?P # dev release
+ [-_\.]?
+ (?Pdev)
+ [-_\.]?
+ (?P[0-9]+)?
+ )?
+ )
+ (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
+"""
+
+
+class Version(_BaseVersion):
+
+ _regex = re.compile(
+ r"^\s*" + VERSION_PATTERN + r"\s*$",
+ re.VERBOSE | re.IGNORECASE,
+ )
+
+ def __init__(self, version):
+ # Validate the version and parse it into pieces
+ match = self._regex.search(version)
+ if not match:
+ raise InvalidVersion("Invalid version: '{0}'".format(version))
+
+ # Store the parsed out pieces of the version
+ self._version = _Version(
+ epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+ release=tuple(int(i) for i in match.group("release").split(".")),
+ pre=_parse_letter_version(
+ match.group("pre_l"),
+ match.group("pre_n"),
+ ),
+ post=_parse_letter_version(
+ match.group("post_l"),
+ match.group("post_n1") or match.group("post_n2"),
+ ),
+ dev=_parse_letter_version(
+ match.group("dev_l"),
+ match.group("dev_n"),
+ ),
+ local=_parse_local_version(match.group("local")),
+ )
+
+ # Generate a key which will be used for sorting
+ self._key = _cmpkey(
+ self._version.epoch,
+ self._version.release,
+ self._version.pre,
+ self._version.post,
+ self._version.dev,
+ self._version.local,
+ )
+
+ def __repr__(self):
+ return "".format(repr(str(self)))
+
+ def __str__(self):
+ parts = []
+
+ # Epoch
+ if self._version.epoch != 0:
+ parts.append("{0}!".format(self._version.epoch))
+
+ # Release segment
+ parts.append(".".join(str(x) for x in self._version.release))
+
+ # Pre-release
+ if self._version.pre is not None:
+ parts.append("".join(str(x) for x in self._version.pre))
+
+ # Post-release
+ if self._version.post is not None:
+ parts.append(".post{0}".format(self._version.post[1]))
+
+ # Development release
+ if self._version.dev is not None:
+ parts.append(".dev{0}".format(self._version.dev[1]))
+
+ # Local version segment
+ if self._version.local is not None:
+ parts.append(
+ "+{0}".format(".".join(str(x) for x in self._version.local))
+ )
+
+ return "".join(parts)
+
+ @property
+ def public(self):
+ return str(self).split("+", 1)[0]
+
+ @property
+ def base_version(self):
+ parts = []
+
+ # Epoch
+ if self._version.epoch != 0:
+ parts.append("{0}!".format(self._version.epoch))
+
+ # Release segment
+ parts.append(".".join(str(x) for x in self._version.release))
+
+ return "".join(parts)
+
+ @property
+ def local(self):
+ version_string = str(self)
+ if "+" in version_string:
+ return version_string.split("+", 1)[1]
+
+ @property
+ def is_prerelease(self):
+ return bool(self._version.dev or self._version.pre)
+
+ @property
+ def is_postrelease(self):
+ return bool(self._version.post)
+
+
+def _parse_letter_version(letter, number):
+ if letter:
+ # We consider there to be an implicit 0 in a pre-release if there is
+ # not a numeral associated with it.
+ if number is None:
+ number = 0
+
+ # We normalize any letters to their lower case form
+ letter = letter.lower()
+
+ # We consider some words to be alternate spellings of other words and
+ # in those cases we want to normalize the spellings to our preferred
+ # spelling.
+ if letter == "alpha":
+ letter = "a"
+ elif letter == "beta":
+ letter = "b"
+ elif letter in ["c", "pre", "preview"]:
+ letter = "rc"
+ elif letter in ["rev", "r"]:
+ letter = "post"
+
+ return letter, int(number)
+ if not letter and number:
+ # We assume if we are given a number, but we are not given a letter
+ # then this is using the implicit post release syntax (e.g. 1.0-1)
+ letter = "post"
+
+ return letter, int(number)
+
+
+_local_version_seperators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local):
+ """
+ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+ """
+ if local is not None:
+ return tuple(
+ part.lower() if not part.isdigit() else int(part)
+ for part in _local_version_seperators.split(local)
+ )
+
+
+def _cmpkey(epoch, release, pre, post, dev, local):
+ # When we compare a release version, we want to compare it with all of the
+ # trailing zeros removed. So we'll use a reverse the list, drop all the now
+ # leading zeros until we come to something non zero, then take the rest
+ # re-reverse it back into the correct order and make it a tuple and use
+ # that for our sorting key.
+ release = tuple(
+ reversed(list(
+ itertools.dropwhile(
+ lambda x: x == 0,
+ reversed(release),
+ )
+ ))
+ )
+
+ # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+ # We'll do this by abusing the pre segment, but we _only_ want to do this
+ # if there is not a pre or a post segment. If we have one of those then
+ # the normal sorting rules will handle this case correctly.
+ if pre is None and post is None and dev is not None:
+ pre = -Infinity
+ # Versions without a pre-release (except as noted above) should sort after
+ # those with one.
+ elif pre is None:
+ pre = Infinity
+
+ # Versions without a post segment should sort before those with one.
+ if post is None:
+ post = -Infinity
+
+ # Versions without a development segment should sort after those with one.
+ if dev is None:
+ dev = Infinity
+
+ if local is None:
+ # Versions without a local segment should sort before those with one.
+ local = -Infinity
+ else:
+ # Versions with a local segment need that segment parsed to implement
+ # the sorting rules in PEP440.
+ # - Alpha numeric segments sort before numeric segments
+ # - Alpha numeric segments sort lexicographically
+ # - Numeric segments sort numerically
+ # - Shorter versions sort before longer versions when the prefixes
+ # match exactly
+ local = tuple(
+ (i, "") if isinstance(i, int) else (-Infinity, i)
+ for i in local
+ )
+
+ return epoch, release, pre, post, dev, local
diff --git a/mamonsu/plugins/pgsql/bgwriter.py b/mamonsu/plugins/pgsql/bgwriter.py
index 6d95013..9615b70 100644
--- a/mamonsu/plugins/pgsql/bgwriter.py
+++ b/mamonsu/plugins/pgsql/bgwriter.py
@@ -12,44 +12,69 @@ class BgWriter(Plugin):
SELECT {0}
FROM pg_catalog.pg_stat_bgwriter;
"""
- Items = [
- # key, zbx_key, description,
- # ('graph name', color, side), units, delta
-
- ("buffers_checkpoint", "bgwriter[buffers_checkpoint]",
- "Buffers Written During Checkpoints",
- ("PostgreSQL bgwriter", "006AAE", 1),
- Plugin.DELTA.simple_change),
-
- ("buffers_clean", "bgwriter[buffers_clean]",
- "Buffers Written",
- ("PostgreSQL bgwriter", "00CC00", 1),
- Plugin.DELTA.simple_change),
-
- ("maxwritten_clean", "bgwriter[maxwritten_clean]",
- "Number of bgwriter Stopped by Max Write Count",
- ("PostgreSQL bgwriter", "FF5656", 0),
- Plugin.DELTA.simple_change),
-
- ("buffers_backend", "bgwriter[buffers_backend]",
- "Buffers Written Directly by a Backend",
- ("PostgreSQL bgwriter", "9C8A4E", 1),
- Plugin.DELTA.simple_change),
-
- ("buffers_backend_fsync", "bgwriter[buffers_backend_fsync]",
- "Times a Backend Execute Its Own Fsync",
- ("PostgreSQL bgwriter", "00CC00", 0),
- Plugin.DELTA.simple_change),
-
- ("buffers_alloc", "bgwriter[buffers_alloc]",
- "Buffers Allocated",
- ("PostgreSQL bgwriter", "FF5656", 1),
- Plugin.DELTA.simple_change)
- ]
graph_name_buffers = "PostgreSQL bgwriter: Buffers"
graph_name_ws = "PostgreSQL bgwriter: Write/Sync"
+ def __init__(self, config):
+ super(BgWriter, self).__init__(config)
+ if self.is_enabled():
+ if Pooler.server_version_less("17"):
+ self.Items = [
+ # key, zbx_key, description,
+ # ('graph name', color, side), units, delta
+
+ ("buffers_checkpoint", "bgwriter[buffers_checkpoint]",
+ "Buffers Written During Checkpoints",
+ ("PostgreSQL bgwriter", "006AAE", 1),
+ Plugin.DELTA.simple_change),
+
+ ("buffers_clean", "bgwriter[buffers_clean]",
+ "Buffers Written",
+ ("PostgreSQL bgwriter", "00CC00", 1),
+ Plugin.DELTA.simple_change),
+
+ ("maxwritten_clean", "bgwriter[maxwritten_clean]",
+ "Number of bgwriter Stopped by Max Write Count",
+ ("PostgreSQL bgwriter", "FF5656", 0),
+ Plugin.DELTA.simple_change),
+
+ ("buffers_backend", "bgwriter[buffers_backend]",
+ "Buffers Written Directly by a Backend",
+ ("PostgreSQL bgwriter", "9C8A4E", 1),
+ Plugin.DELTA.simple_change),
+
+ ("buffers_backend_fsync", "bgwriter[buffers_backend_fsync]",
+ "Times a Backend Execute Its Own Fsync",
+ ("PostgreSQL bgwriter", "00CC00", 0),
+ Plugin.DELTA.simple_change),
+
+ ("buffers_alloc", "bgwriter[buffers_alloc]",
+ "Buffers Allocated",
+ ("PostgreSQL bgwriter", "FF5656", 1),
+ Plugin.DELTA.simple_change)
+ ]
+ else:
+ self.Items = [
+ # key, zbx_key, description,
+ # ('graph name', color, side), units, delta
+
+ ("buffers_clean", "bgwriter[buffers_clean]",
+ "Buffers Written",
+ ("PostgreSQL bgwriter", "00CC00", 1),
+ Plugin.DELTA.simple_change),
+
+ ("maxwritten_clean", "bgwriter[maxwritten_clean]",
+ "Number of bgwriter Stopped by Max Write Count",
+ ("PostgreSQL bgwriter", "FF5656", 0),
+ Plugin.DELTA.simple_change),
+
+ ("buffers_alloc", "bgwriter[buffers_alloc]",
+ "Buffers Allocated",
+ ("PostgreSQL bgwriter", "FF5656", 1),
+ Plugin.DELTA.simple_change)
+ ]
+
def run(self, zbx):
columns = [x[0] for x in self.Items]
result = Pooler.query(self.query.format(", ".join(columns)))
diff --git a/mamonsu/plugins/pgsql/checkpoint.py b/mamonsu/plugins/pgsql/checkpoint.py
index f4a5324..c1ca9ac 100644
--- a/mamonsu/plugins/pgsql/checkpoint.py
+++ b/mamonsu/plugins/pgsql/checkpoint.py
@@ -9,14 +9,6 @@ class Checkpoint(Plugin):
AgentPluginType = "pg"
Interval = 60 * 5
- query = """
- SELECT {0}
- FROM pg_catalog.pg_stat_bgwriter;
- """ # for mamonsu and agent
- query_interval = """
- SELECT {0}*3600
- FROM pg_catalog.pg_stat_bgwriter;
- """ # for mamonsu and agent checkpoints in hour
key = "pgsql.checkpoint{0}"
# key: (macro, value)
@@ -24,34 +16,77 @@ class Checkpoint(Plugin):
"max_checkpoint_by_wal_in_hour": [("macro", "{$MAX_CHECKPOINT_BY_WAL_IN_HOUR}"), ("value", 12)]
}
- Items = [
- # key, zbx_key, description,
- # ('graph name', color, side), units, delta, factor
-
- ("checkpoints_timed", "count_timed",
- "by Timeout (in hour)",
- ("PostgreSQL Checkpoints: Count (in hour)", "00CC00", 0),
- Plugin.UNITS.none, Plugin.DELTA.speed_per_second, 60 * 60),
-
- ("checkpoints_req", "count_wal",
- "by WAL (in hour)",
- ("PostgreSQL Checkpoints: Count (in hour)", "FF5656", 0),
- Plugin.UNITS.none, Plugin.DELTA.speed_per_second, 60 * 60),
-
- ("checkpoint_write_time", "write_time",
- "Write Time",
- ("PostgreSQL Checkpoints: Write/Sync", "00CC00", 1),
- Plugin.UNITS.ms, Plugin.DELTA.speed_per_second, 1),
-
- ("checkpoint_sync_time", "checkpoint_sync_time",
- "Sync Time",
- ("PostgreSQL Checkpoints: Write/Sync", "FF5656", 1),
- Plugin.UNITS.ms, Plugin.DELTA.speed_per_second, 1)
- ]
-
graph_name_count = "PostgreSQL Checkpoints: Count (in hour)"
graph_name_ws = "PostgreSQL Checkpoints: Write/Sync"
+ def __init__(self, config):
+ super(Checkpoint, self).__init__(config)
+ if self.is_enabled():
+ if Pooler.server_version_less("17"):
+ self.query = """
+ SELECT {0}
+ FROM pg_catalog.pg_stat_bgwriter;
+ """ # for mamonsu and agent
+ self.query_interval = """
+ SELECT {0}*3600
+ FROM pg_catalog.pg_stat_bgwriter;
+ """ # for mamonsu and agent checkpoints in hour
+ self.Items = [
+ # key, zbx_key, description,
+ # ('graph name', color, side), units, delta, factor
+ ("checkpoints_timed", "count_timed",
+ "by Timeout (in hour)",
+ ("PostgreSQL Checkpoints: Count (in hour)", "00CC00", 0),
+ Plugin.UNITS.none, Plugin.DELTA.speed_per_second, 60 * 60),
+
+ ("checkpoints_req", "count_wal",
+ "by WAL (in hour)",
+ ("PostgreSQL Checkpoints: Count (in hour)", "FF5656", 0),
+ Plugin.UNITS.none, Plugin.DELTA.speed_per_second, 60 * 60),
+
+ ("checkpoint_write_time", "write_time",
+ "Write Time",
+ ("PostgreSQL Checkpoints: Write/Sync", "00CC00", 1),
+ Plugin.UNITS.ms, Plugin.DELTA.speed_per_second, 1),
+
+ ("checkpoint_sync_time", "checkpoint_sync_time",
+ "Sync Time",
+ ("PostgreSQL Checkpoints: Write/Sync", "FF5656", 1),
+ Plugin.UNITS.ms, Plugin.DELTA.speed_per_second, 1)
+ ]
+ else:
+ self.query = """
+ SELECT {0}
+ FROM pg_catalog.pg_stat_checkpointer;
+ """ # for mamonsu and agent
+ self.query_interval = """
+ SELECT {0}*3600
+ FROM pg_catalog.pg_stat_checkpointer;
+ """ # for mamonsu and agent checkpoints in hour
+ self.Items = [
+ # key, zbx_key, description,
+ # ('graph name', color, side), units, delta, factor
+ ("num_timed", "count_timed",
+ "by Timeout (in hour)",
+ ("PostgreSQL Checkpoints: Count (in hour)", "00CC00", 0),
+ Plugin.UNITS.none, Plugin.DELTA.speed_per_second, 60 * 60),
+
+ ("num_requested", "count_wal",
+ "by WAL (in hour)",
+ ("PostgreSQL Checkpoints: Count (in hour)", "FF5656", 0),
+ Plugin.UNITS.none, Plugin.DELTA.speed_per_second, 60 * 60),
+
+ ("write_time", "write_time",
+ "Write Time",
+ ("PostgreSQL Checkpoints: Write/Sync", "00CC00", 1),
+ Plugin.UNITS.ms, Plugin.DELTA.speed_per_second, 1),
+
+ ("sync_time", "checkpoint_sync_time",
+ "Sync Time",
+ ("PostgreSQL Checkpoints: Write/Sync", "FF5656", 1),
+ Plugin.UNITS.ms, Plugin.DELTA.speed_per_second, 1)
+ ]
+
def run(self, zbx):
columns = [x[0] for x in self.Items]
result = Pooler.query(self.query.format(", ".join(columns)))
@@ -146,5 +181,5 @@ def keys_and_queries(self, template_zabbix):
else:
result.append(
"{0}[*],$2 $1 -c \"{1}\"".format(self.key.format("." + item[1]),
- self.query_interval.format(item[0])))
+ self.query_interval.format(item[0])))
return template_zabbix.key_and_query(result)
diff --git a/mamonsu/plugins/pgsql/driver/pg8000/core.py b/mamonsu/plugins/pgsql/driver/pg8000/core.py
index 7816bff..b050af6 100755
--- a/mamonsu/plugins/pgsql/driver/pg8000/core.py
+++ b/mamonsu/plugins/pgsql/driver/pg8000/core.py
@@ -2,12 +2,12 @@
from collections import defaultdict, deque
from datetime import datetime as Datetime
from decimal import Decimal
-from pkg_resources import packaging
from hashlib import md5
from itertools import count, islice
from struct import Struct
from warnings import warn
+import mamonsu.lib.version as version
from mamonsu.plugins.pgsql.driver.pg8000 import converters
from .exceptions import (
ArrayContentNotSupportedError, DatabaseError, Error, IntegrityError,
@@ -1421,11 +1421,11 @@ def handle_PARAMETER_STATUS(self, data, ps):
# since distutils became deprecated we need this hack hoping that
# postgres package maintainers won't come up with something more exotic
string_version = value.decode('ascii').split(' ')[0]
- self._server_version = packaging.version.parse(string_version)
- if self._server_version < packaging.version.parse('8.2.0'):
+ self._server_version = version.parse(string_version)
+ if self._server_version < version.parse('8.2.0'):
self._commands_with_count = (
b"INSERT", b"DELETE", b"UPDATE", b"MOVE")
- elif self._server_version < packaging.version.parse('9.0.0'):
+ elif self._server_version < version.parse('9.0.0'):
self._commands_with_count = (
b"INSERT", b"DELETE", b"UPDATE", b"MOVE", b"FETCH",
b"COPY")
diff --git a/mamonsu/plugins/pgsql/driver/pool.py b/mamonsu/plugins/pgsql/driver/pool.py
index b7ca620..a8433d9 100644
--- a/mamonsu/plugins/pgsql/driver/pool.py
+++ b/mamonsu/plugins/pgsql/driver/pool.py
@@ -1,6 +1,8 @@
-from pkg_resources import packaging
from .connection import Connection, ConnectionInfo
+from mamonsu.lib.version import parse
+import threading
+
class Pool(object):
ExcludeDBs = ["template0", "template1"]
@@ -84,7 +86,7 @@ class Pool(object):
"""
SELECT application_name,
{0}
- coalesce((pg_{1}_{2}_diff(pg_current_{1}_{2}(), replay_lsn))::int, 0) AS total_lag
+ coalesce((pg_{1}_{2}_diff(pg_current_{1}_{2}(), replay_{2}))::int, 0) AS total_lag
FROM pg_stat_replication;
""",
"""
@@ -93,6 +95,30 @@ class Pool(object):
total_lag
FROM mamonsu.count_{1}_lag_lsn();
"""
+ ),
+ "wal_held_bytes_master": (
+ """
+ SELECT slot_name,
+ coalesce((pg_wal_lsn_diff(pg_current_wal_lsn(), restart_lsn))::int, 0) AS wal_held_bytes
+ FROM pg_replication_slots;
+ """,
+ """
+ SELECT slot_name,
+ wal_held_bytes
+ FROM mamonsu.bytes_held_by_inactive_slot_on_master();
+ """
+ ),
+ "wal_held_bytes_replica": (
+ """
+ SELECT slot_name,
+ coalesce((pg_wal_lsn_diff(pg_last_wal_replay_lsn(), restart_lsn))::int, 0) AS wal_held_bytes
+ FROM pg_replication_slots;
+ """,
+ """
+ SELECT slot_name,
+ wal_held_bytes
+ FROM mamonsu.bytes_held_by_inactive_slot_on_replica();
+ """
)
}
@@ -107,9 +133,11 @@ def __init__(self, params=None):
"bootstrap": {"storage": {}, "counter": 0, "cache": 10, "version": False},
"recovery": {"storage": {}, "counter": 0, "cache": 10},
"extension_schema": {"pg_buffercache": {}, "pg_stat_statements": {}, "pg_wait_sampling": {}, "pgpro_stats": {}},
+ "extension_versions": {},
"pgpro": {"storage": {}},
"pgproee": {"storage": {}}
}
+ self._lock = threading.RLock()
def connection_string(self, db=None):
db = self._normalize_db(db)
@@ -121,71 +149,88 @@ def query(self, query, db=None):
return self._connections[db].query(query)
def server_version(self, db=None):
- db = self._normalize_db(db)
- if db in self._cache["server_version"]["storage"]:
+ with self._lock:
+ db = self._normalize_db(db)
+ if db in self._cache["server_version"]["storage"]:
+ return self._cache["server_version"]["storage"][db]
+
+ version_string = self.query("show server_version", db)[0][0]
+ result = bytes(
+ version_string.split(" ")[0], "utf-8")
+ self._cache["server_version"]["storage"][db] = "{0}".format(
+ result.decode("ascii"))
return self._cache["server_version"]["storage"][db]
- version_string = self.query("show server_version", db)[0][0]
- result = bytes(
- version_string.split(" ")[0], "utf-8")
- self._cache["server_version"]["storage"][db] = "{0}".format(
- result.decode("ascii"))
- return self._cache["server_version"]["storage"][db]
+ def extension_version(self, extension, db=None):
+ with self._lock:
+ db = self._normalize_db(db)
+ if extension in self._cache["extension_versions"] and db in self._cache["extension_versions"][extension][db]:
+ return self._cache["extension_versions"][extension][db]
+
+ version_string = self.query("select extversion from pg_catalog.pg_extension where lower(extname) = lower('{0}');".format(extension), db)[0][0]
+ result = bytes(
+ version_string.split(" ")[0], "utf-8")
+ self._cache["extension_versions"][extension] = {}
+ self._cache["extension_versions"][extension][db] = "{0}".format(
+ result.decode("ascii"))
+ return self._cache["extension_versions"][extension][db]
def server_version_greater(self, version, db=None):
db = self._normalize_db(db)
- return packaging.version.parse(self.server_version(db)) >= packaging.version.parse(version)
+ return parse(self.server_version(db)) >= parse(version)
def server_version_less(self, version, db=None):
db = self._normalize_db(db)
- return packaging.version.parse(self.server_version(db)) <= packaging.version.parse(version)
+ return parse(self.server_version(db)) <= parse(version)
def bootstrap_version_greater(self, version):
- return packaging.version.parse(
- str(self._cache["bootstrap"]["version"])) >= packaging.version.parse(version)
+ with self._lock:
+ return parse(str(self._cache["bootstrap"]["version"])) >= parse(version)
def bootstrap_version_less(self, version):
- return packaging.version.parse(
- str(self._cache["bootstrap"]["version"])) <= packaging.version.parse(version)
+ with self._lock:
+ return parse(str(self._cache["bootstrap"]["version"])) <= parse(version)
def in_recovery(self, db=None):
- db = self._normalize_db(db)
- if db in self._cache["recovery"]["storage"]:
- if self._cache["recovery"]["counter"] < self._cache["recovery"]["cache"]:
- self._cache["recovery"]["counter"] += 1
- return self._cache["recovery"]["storage"][db]
- self._cache["recovery"]["counter"] = 0
- self._cache["recovery"]["storage"][db] = self.query(
- "select pg_catalog.pg_is_in_recovery()", db)[0][0]
- return self._cache["recovery"]["storage"][db]
+ with self._lock:
+ db = self._normalize_db(db)
+ if db in self._cache["recovery"]["storage"]:
+ if self._cache["recovery"]["counter"] < self._cache["recovery"]["cache"]:
+ self._cache["recovery"]["counter"] += 1
+ return self._cache["recovery"]["storage"][db]
+ self._cache["recovery"]["counter"] = 0
+ self._cache["recovery"]["storage"][db] = self.query(
+ "select pg_catalog.pg_is_in_recovery()", db)[0][0]
+ return self._cache["recovery"]["storage"][db]
def is_bootstraped(self, db=None):
- db = self._normalize_db(db)
- if db in self._cache["bootstrap"]["storage"]:
- if self._cache["bootstrap"]["counter"] < self._cache["bootstrap"]["cache"]:
- self._cache["bootstrap"]["counter"] += 1
- return self._cache["bootstrap"]["storage"][db]
- self._cache["bootstrap"]["counter"] = 0
- # TODO: изменить на нормальное название, 'config' слишком общее
- sql = """
- SELECT count(*)
- FROM pg_catalog.pg_class
- WHERE relname = 'config';
- """
- result = int(self.query(sql, db)[0][0])
- self._cache["bootstrap"]["storage"][db] = (result == 1)
- if self._cache["bootstrap"]["storage"][db]:
- self._connections[db].log.info("Found mamonsu bootstrap")
+ with self._lock:
+ db = self._normalize_db(db)
+ if db in self._cache["bootstrap"]["storage"]:
+ if self._cache["bootstrap"]["counter"] < self._cache["bootstrap"]["cache"]:
+ self._cache["bootstrap"]["counter"] += 1
+ return self._cache["bootstrap"]["storage"][db]
+ self._cache["bootstrap"]["counter"] = 0
+ # TODO: изменить на нормальное название, 'config' слишком общее
sql = """
- SELECT max(version)
- FROM mamonsu.config;
+ SELECT count(*)
+ FROM pg_catalog.pg_class
+ WHERE relname = 'config';
"""
- self._cache["bootstrap"]["version"] = self.query(sql, db)[0][0]
- else:
- self._connections[db].log.info("Mamonsu bootstrap is not found")
- self._connections[db].log.info(
- "hint: run `mamonsu bootstrap` if you want to run without superuser rights")
- return self._cache["bootstrap"]["storage"][db]
+ result = int(self.query(sql, db)[0][0])
+ self._cache["bootstrap"]["storage"][db] = (result == 1)
+ if self._cache["bootstrap"]["storage"][db]:
+ self._connections[db].log.info("Found mamonsu bootstrap")
+ sql = """
+ SELECT max(version)
+ FROM mamonsu.config;
+ """
+ self._cache["bootstrap"]["version"] = self.query(sql, db)[0][0]
+ else:
+ self._connections[db].log.info("Mamonsu bootstrap is not found")
+ self._connections[db].log.info(
+ "hint: run `mamonsu bootstrap` if you want to run without superuser rights")
+ return self._cache["bootstrap"]["storage"][db]
def is_superuser(self, db=None):
_ = self._normalize_db(db)
@@ -197,34 +242,44 @@ def is_superuser(self, db=None):
return False
def is_pgpro(self, db=None):
- db = self._normalize_db(db)
- if db in self._cache["pgpro"]:
+ with self._lock:
+ db = self._normalize_db(db)
+ if db in self._cache["pgpro"]:
+ return self._cache["pgpro"][db]
+ try:
+ self.query("""
+ SELECT pgpro_version();
+ """)
+ self._cache["pgpro"][db] = True
+ except:
+ self._cache["pgpro"][db] = False
return self._cache["pgpro"][db]
- try:
- self.query("""
- SELECT pgpro_version();
- """)
- self._cache["pgpro"][db] = True
- except:
- self._cache["pgpro"][db] = False
- return self._cache["pgpro"][db]
def is_pgpro_ee(self, db=None):
- db = self._normalize_db(db)
- if not self.is_pgpro(db):
- return False
- if db in self._cache["pgproee"]:
+ with self._lock:
+ db = self._normalize_db(db)
+ if not self.is_pgpro(db):
+ return False
+ if db in self._cache["pgproee"]:
+ return self._cache["pgproee"][db]
+ try:
+ ed = self.query("""
+ SELECT pgpro_edition();
+ """)[0][0]
+ self._connections[db].log.info("pgpro_edition is {}".format(ed))
+ self._cache["pgproee"][db] = (ed.lower() == "enterprise")
+ except:
+ self._connections[db].log.info("pgpro_edition() is not defined")
+ self._cache["pgproee"][db] = False
return self._cache["pgproee"][db]
- try:
- ed = self.query("""
- SELECT pgpro_edition();
- """)[0][0]
- self._connections[db].log.info("pgpro_edition is {}".format(ed))
- self._cache["pgproee"][db] = (ed.lower() == "enterprise")
- except:
- self._connections[db].log.info("pgpro_edition() is not defined")
- self._cache["pgproee"][db] = False
- return self._cache["pgproee"][db]
+
+ def extension_version_greater(self, extension, version, db=None):
+ db = self._normalize_db(db)
+ return parse(self.extension_version(extension, db)) >= parse(version)
+
+ def extension_version_less(self, extension, version, db=None):
+ db = self._normalize_db(db)
+ return parse(self.extension_version(extension, db)) <= parse(version)
def extension_installed(self, ext, db=None):
db = self._normalize_db(db)
@@ -236,19 +291,20 @@ def extension_installed(self, ext, db=None):
return (int(result[0][0])) == 1
def extension_schema(self, extension, db=None):
- db = self._normalize_db(db)
- if db in self._cache["extension_schema"][extension]:
- return self._cache["extension_schema"][extension][db]
- try:
- self._cache["extension_schema"][extension][db] = self.query("""
- SELECT n.nspname
- FROM pg_extension e
- JOIN pg_namespace n ON e.extnamespace = n.oid
- WHERE e.extname = '{0}'
- """.format(extension), db)[0][0]
- return self._cache["extension_schema"][extension][db]
- except:
- self._connections[db].log.info("{0} is not installed".format(extension))
+ with self._lock:
+ db = self._normalize_db(db)
+ if db in self._cache["extension_schema"][extension]:
+ return self._cache["extension_schema"][extension][db]
+ try:
+ self._cache["extension_schema"][extension][db] = self.query("""
+ SELECT n.nspname
+ FROM pg_extension e
+ JOIN pg_namespace n ON e.extnamespace = n.oid
+ WHERE e.extname = '{0}'
+ """.format(extension), db)[0][0]
+ return self._cache["extension_schema"][extension][db]
+ except:
+ self._connections[db].log.info("{0} is not installed".format(extension))
def databases(self):
result, databases = self.query("""
diff --git a/mamonsu/plugins/pgsql/memory_leak_diagnostic.py b/mamonsu/plugins/pgsql/memory_leak_diagnostic.py
index cb368a9..14c0749 100644
--- a/mamonsu/plugins/pgsql/memory_leak_diagnostic.py
+++ b/mamonsu/plugins/pgsql/memory_leak_diagnostic.py
@@ -4,10 +4,11 @@
import os
from .pool import Pooler
import re
-from pkg_resources import packaging
import mamonsu.lib.platform as platform
import posix
+import mamonsu.lib.version as version
+
class MemoryLeakDiagnostic(Plugin):
DEFAULT_CONFIG = {
@@ -91,7 +92,7 @@ def run(self, zbx):
for row in Pooler.query(query=self.query):
pids.append(row[0])
- if (packaging.version.parse(self.os_release) < packaging.version.parse("4.5")
+ if (version.parse(self.os_release) < version.parse("4.5")
and not (self.os_name == "centos" and self.os_version == "7")) \
or (not self.os_name and not self.os_version):
for pid in pids:
diff --git a/mamonsu/plugins/pgsql/replication.py b/mamonsu/plugins/pgsql/replication.py
index 0c53f5b..7ed701c 100644
--- a/mamonsu/plugins/pgsql/replication.py
+++ b/mamonsu/plugins/pgsql/replication.py
@@ -1,10 +1,11 @@
# -*- coding: utf-8 -*-
from mamonsu.plugins.pgsql.plugin import PgsqlPlugin as Plugin
-from pkg_resources import packaging
from .pool import Pooler
from mamonsu.lib.zbx_template import ZbxTemplate
+import mamonsu.lib.version as version
+
NUMBER_NON_ACTIVE_SLOTS = 0
@@ -12,7 +13,8 @@ class Replication(Plugin):
AgentPluginType = "pg"
# key: (macro, value)
plugin_macros = {
- "critical_lag_seconds": [("macro", "{$CRITICAL_LAG_SECONDS}"), ("value", 60 * 5)]
+ "critical_lag_seconds": [("macro", "{$CRITICAL_LAG_SECONDS}"), ("value", 60 * 5)],
+ "critical_bytes_held_by_none_active_slot": [("macro", "{$CRITICAL_BYTES_HELD_BY_NON_ACTIVE_SLOT}"), ("value", 1024 * 1024 * 1024)]
}
# get time of replication lag
@@ -29,8 +31,15 @@ class Replication(Plugin):
WHERE active = 'false';
"""
+ query_bytes_held_by_non_active_slot = """
+ SELECT slot_name, coalesce(pg_wal_lsn_diff(pg_current_wal_lsn(), restart_lsn)::bigint, 0) AS wal_size_bytes
+ FROM pg_replication_slots
+ WHERE active = 'false';
+ """
+
# for discovery rule for name of each replica
key_lsn_replication_discovery = "pgsql.replication.discovery{0}"
+ key_replication_non_active_slots_discovery = "pgsql.replication.non_active_slots_discovery{0}"
key_total_lag = "pgsql.replication.total_lag{0}"
# for PG 10 and higher
key_flush = "pgsql.replication.flush_lag{0}"
@@ -41,6 +50,7 @@ class Replication(Plugin):
key_replication = "pgsql.replication_lag{0}"
key_non_active_slots = "pgsql.replication.non_active_slots{0}"
+ key_non_active_slots_held_bytes = "pgsql.replication.non_active_slots_held_bytes{0}"
def run(self, zbx):
@@ -78,6 +88,14 @@ def run(self, zbx):
zbx.send("pgsql.replication.replay_lag[{0}]".format(info[0]), float(info[5]))
zbx.send("pgsql.replication.discovery[]", zbx.json({"data": lags}))
del lags
+ bytes_held_by_non_active_slot = Pooler.run_sql_type("wal_held_bytes_master", args=[])
+ if bytes_held_by_non_active_slot:
+ discovery = []
+ for info in bytes_held_by_non_active_slot:
+ discovery.append({"{#NON_ACTIVE_SLOT_NAME}": info[0]})
+ zbx.send("pgsql.replication.non_active_slots_held_bytes[{0}]".format(info[0]), int(info[1]))
+ zbx.send("pgsql.replication.non_active_slots_discovery[]", zbx.json({"data": discovery}))
+ del discovery
elif Pooler.is_superuser() or Pooler.is_bootstraped():
result_lags = Pooler.run_sql_type("wal_lag_lsn", args=[" ", "xlog", "location"])
if result_lags:
@@ -89,7 +107,15 @@ def run(self, zbx):
del lags
else:
self.disable_and_exit_if_not_superuser()
-
+ else:
+ bytes_held_by_non_active_slot = Pooler.run_sql_type("wal_held_bytes_replica", args=[])
+ if bytes_held_by_non_active_slot:
+ discovery = []
+ for info in bytes_held_by_non_active_slot:
+ discovery.append({"{#NON_ACTIVE_SLOT_NAME}": info[0]})
+ zbx.send("pgsql.replication.non_active_slots_held_bytes[{0}]".format(info[0]), int(info[1]))
+ zbx.send("pgsql.replication.non_active_slots_discovery[]", zbx.json({"data": discovery}))
+ del discovery
non_active_slots = Pooler.query(self.query_non_active_slots)
zbx.send(self.key_non_active_slots.format("[]"), int(non_active_slots[0][0]))
@@ -131,7 +157,8 @@ def triggers(self, template, dashboard=False):
}) + template.trigger({
"name": "PostgreSQL Replication: number of non-active replication slots on {HOSTNAME} (value={ITEM.LASTVALUE})",
"expression": "{#TEMPLATE:" + self.right_type(self.key_non_active_slots) + ".last()}>" + str(
- NUMBER_NON_ACTIVE_SLOTS)
+ NUMBER_NON_ACTIVE_SLOTS),
+ "status": 1
})
return triggers
@@ -197,11 +224,46 @@ def discovery_rules(self, template, dashboard=False):
]
}
]
- return template.discovery_rule(rule=rule, conditions=conditions, items=items, graphs=graphs)
+ active_slots_discovery_rule = template.discovery_rule(rule=rule, conditions=conditions, items=items, graphs=graphs)
+
+ rule = {
+ "name": "PostgreSQL Replication: Non Active Slots Discovery",
+ "key": self.key_replication_non_active_slots_discovery.format("[{0}]".format(self.Macros[self.Type]))
+ }
+ if Plugin.old_zabbix:
+ conditions = []
+ rule["filter"] = "{#NON_ACTIVE_SLOT_NAME}:.*"
+ else:
+ conditions = [{
+ "condition": [
+ {"macro": "{#NON_ACTIVE_SLOT_NAME}",
+ "value": ".*",
+ "operator": 8,
+ "formulaid": "A"}
+ ]
+ }]
+ items = [
+ {"key": self.right_type(self.key_non_active_slots_held_bytes, var_discovery="{#NON_ACTIVE_SLOT_NAME},"),
+ "name": "PostgreSQL Replication: Bytes held by non-active slot {#NON_ACTIVE_SLOT_NAME}",
+ "value_type": Plugin.VALUE_TYPE.numeric_float,
+ "delay": self.plugin_config("interval"),
+ "drawtype": 2}
+ ]
+ graphs = []
+ triggers = [
+ {
+ "name": "PostgreSQL Replication: bytes held by slot {#NON_ACTIVE_SLOT_NAME} is too high (value={ITEM.LASTVALUE})",
+ "expression": "{#TEMPLATE:" + self.right_type(self.key_non_active_slots_held_bytes, var_discovery="{#NON_ACTIVE_SLOT_NAME},") + ".last()}>" +
+ self.plugin_macros["critical_bytes_held_by_none_active_slot"][0][1]
+ }
+ ]
+ non_active_slots_discovery_rule = template.discovery_rule(rule=rule, conditions=conditions, items=items, graphs=graphs, triggers=triggers)
+
+ return active_slots_discovery_rule + non_active_slots_discovery_rule
def keys_and_queries(self, template_zabbix):
result = []
- if packaging.version.parse(self.VersionPG) < packaging.version.parse("10"):
+ if version.parse(self.VersionPG) < version.parse("10"):
result.append("{0},$2 $1 -c \"{1}\"".format("pgsql.replication_lag.sec[*]",
self.query_agent_replication_lag.format(
self.plugin_config("interval"), "xlog_receive_location",
diff --git a/mamonsu/plugins/pgsql/statements.py b/mamonsu/plugins/pgsql/statements.py
index 4cebcd6..784f226 100644
--- a/mamonsu/plugins/pgsql/statements.py
+++ b/mamonsu/plugins/pgsql/statements.py
@@ -27,7 +27,7 @@ class Statements(Plugin):
query_info = """
SELECT {metrics}
- FROM {extension_schema}.pg_stat_statements_info;
+ FROM {extension_schema}.{info_view_name};
"""
key = "pgsql."
# zbx_key, sql, desc, unit, delta, (Graph, color, side)
@@ -88,6 +88,32 @@ class Statements(Plugin):
("PostgreSQL Statements Info: Last Statistics Reset Time", "9C8A4E", 0))
]
+ Items_pgpro_stats_1_8 = [
+ ("stat[read_bytes]",
+ "(sum(shared_blks_read+local_blks_read+temp_blks_read)*8*1024)::bigint",
+ "Read bytes/s", Plugin.UNITS.bytes_per_second, Plugin.DELTA.speed_per_second,
+ ("PostgreSQL Statements: Bytes", "87C2B9", 0)),
+ ("stat[write_bytes]",
+ "(sum(shared_blks_written+local_blks_written+temp_blks_written)*8*1024)::bigint",
+ "Write bytes/s", Plugin.UNITS.bytes_per_second, Plugin.DELTA.speed_per_second,
+ ("PostgreSQL Statements: Bytes", "793F5D", 0)),
+ ("stat[dirty_bytes]",
+ "(sum(shared_blks_dirtied+local_blks_dirtied)*8*1024)::bigint",
+ "Dirty bytes/s", Plugin.UNITS.bytes_per_second, Plugin.DELTA.speed_per_second,
+ ("PostgreSQL Statements: Bytes", "9C8A4E", 0)),
+ ("stat[read_time]",
+ "(sum(shared_blk_read_time+local_blk_read_time+temp_blk_read_time)/float4(100))::bigint",
+ "Read IO Time", Plugin.UNITS.s, Plugin.DELTA.speed_per_second,
+ ("PostgreSQL Statements: Spent Time", "87C2B9", 0)),
+ ("stat[write_time]",
+ "(sum(shared_blk_write_time+local_blk_write_time+temp_blk_write_time)/float4(100))::bigint",
+ "Write IO Time", Plugin.UNITS.s, Plugin.DELTA.speed_per_second,
+ ("PostgreSQL Statements: Spent Time", "793F5D", 0)),
+ ["stat[other_time]",
+ "(sum(total_exec_time+total_plan_time-shared_blk_read_time-local_blk_read_time-temp_blk_read_time-shared_blk_write_time-local_blk_write_time-temp_blk_write_time)/float4(100))::bigint",
+ "Other (mostly CPU) Time", Plugin.UNITS.s, Plugin.DELTA.speed_per_second,
+ ("PostgreSQL Statements: Spent Time", "9C8A4E", 0)]]
+
all_graphs = [
("PostgreSQL Statements: Bytes", None),
("PostgreSQL Statements: Spent Time", 1),
@@ -115,21 +141,45 @@ def run(self, zbx):
# TODO: add 13 and 14 items when pgpro_stats added new WAL metrics
all_items = self.Items.copy()
- if Pooler.server_version_greater("14"):
+
+ if Pooler.extension_installed("pgpro_stats") and Pooler.extension_version_greater("pgpro_stats", "1.8"):
+ info_view = 'pg_stat_statements_info'
+ if self.extension == "pgpro_stats":
+ info_view = 'pgpro_stats_info'
+
+ info_items = self.Items_pg_14
+ info_params = [x[1] for x in info_items]
+ info_result = Pooler.query(
+ self.query_info.format(metrics=(", ".join(info_params)), extension_schema=extension_schema, info_view_name=info_view))
+ for key, value in enumerate(info_result[0]):
+ zbx_key, value = "pgsql.{0}".format(
+ info_items[key][0]), int(value)
+ zbx.send(zbx_key, value, info_items[key][4])
+
+ all_items = self.Items_pgpro_stats_1_8.copy()
+ all_items += self.Items_pg_13
+
+ elif Pooler.server_version_greater("14"):
self.Items[5][1] = self.Items[5][1].format("total_exec_time+total_plan_time")
all_items += self.Items_pg_13
+ info_view = 'pgpro_stats_info'
if self.extension == "pg_stat_statements":
- info_items = self.Items_pg_14
- info_params = [x[1] for x in info_items]
- info_result = Pooler.query(
- self.query_info.format(metrics=(", ".join(info_params)), extension_schema=extension_schema))
- for key, value in enumerate(info_result[0]):
- zbx_key, value = "pgsql.{0}".format(
- info_items[key][0]), int(value)
- zbx.send(zbx_key, value, info_items[key][4])
+ info_view = 'pg_stat_statements_info'
+ info_items = self.Items_pg_14
+ info_params = [x[1] for x in info_items]
+ info_result = Pooler.query(
+ self.query_info.format(metrics=(", ".join(info_params)),
+ extension_schema=extension_schema,
+ info_view_name=info_view))
+ for key, value in enumerate(info_result[0]):
+ zbx_key, value = "pgsql.{0}".format(
+ info_items[key][0]), int(value)
+ zbx.send(zbx_key, value, info_items[key][4])
+
elif Pooler.server_version_greater("13"):
self.Items[5][1] = self.Items[5][1].format("total_exec_time+total_plan_time")
all_items += self.Items_pg_13
+
else:
self.Items[5][1] = self.Items[5][1].format("total_time")
columns = [x[1] for x in all_items]
diff --git a/mamonsu/plugins/system/linux/disk_sizes.py b/mamonsu/plugins/system/linux/disk_sizes.py
index 898c2c0..d461812 100644
--- a/mamonsu/plugins/system/linux/disk_sizes.py
+++ b/mamonsu/plugins/system/linux/disk_sizes.py
@@ -20,7 +20,7 @@ class DiskSizes(Plugin):
ExcludeFsTypes = [
"none", "unknown", "rootfs", "iso9660", "squashfs", "udf", "romfs", "ramfs", "debugfs", "cgroup", "cgroup_root",
- "pstore", "devtmpfs", "autofs", "cgroup", "configfs", "devpts", "efivarfs", "fusectl", "fuse.gvfsd-fuse",
+ "pstore", "devtmpfs", "autofs", "cgroup2", "configfs", "devpts", "efivarfs", "fusectl", "fuse.gvfsd-fuse",
"hugetlbfs", "mqueue", "binfmt_misc", "nfsd", "proc", "pstore", "selinuxfs", "rpc_pipefs", "securityfs",
"sysfs", "nsfs", "tmpfs", "tracefs"
]
diff --git a/mamonsu/tools/bootstrap/sql.py b/mamonsu/tools/bootstrap/sql.py
index fb56a9d..bf99442 100644
--- a/mamonsu/tools/bootstrap/sql.py
+++ b/mamonsu/tools/bootstrap/sql.py
@@ -7,7 +7,7 @@
$do$
BEGIN
IF NOT EXISTS (
- SELECT FROM pg_catalog.pg_roles
+ SELECT FROM pg_catalog.pg_roles
WHERE rolname = '{0}') THEN
CREATE ROLE {0} LOGIN PASSWORD '{0}';
IF EXISTS (
@@ -166,15 +166,15 @@
DROP FUNCTION IF EXISTS mamonsu.get_oldest_transaction();
CREATE or REPLACE FUNCTION mamonsu.get_oldest_transaction()
RETURNS DOUBLE PRECISION AS $$
- SELECT
- CASE WHEN extract(epoch from max(now() - xact_start)) IS NOT null
+ SELECT
+ CASE WHEN extract(epoch from max(now() - xact_start)) IS NOT null
AND extract(epoch from max(now() - xact_start))>0
- THEN extract(epoch from max(now() - xact_start))
- ELSE 0
- END
- FROM pg_catalog.pg_stat_activity
- WHERE
- pid NOT IN(select pid from pg_stat_replication) AND
+ THEN extract(epoch from max(now() - xact_start))
+ ELSE 0
+ END
+ FROM pg_catalog.pg_stat_activity
+ WHERE
+ pid NOT IN(select pid from pg_stat_replication) AND
pid <> pg_backend_pid()
$$ LANGUAGE SQL SECURITY DEFINER;
@@ -225,17 +225,34 @@
CREATE OR REPLACE FUNCTION mamonsu.prepared_transaction()
RETURNS TABLE(count_prepared BIGINT, oldest_prepared BIGINT) AS $$
SELECT COUNT(*) AS count_prepared,
-coalesce (ROUND(MAX(EXTRACT (EPOCH FROM (now() - prepared)))),0)::bigint AS oldest_prepared
+coalesce (ROUND(MAX(EXTRACT (EPOCH FROM (now() - prepared)))),0)::bigint AS oldest_prepared
FROM pg_catalog.pg_prepared_xacts$$ LANGUAGE SQL SECURITY DEFINER;
DROP FUNCTION IF EXISTS mamonsu.count_{3}_lag_lsn();
CREATE OR REPLACE FUNCTION mamonsu.count_{3}_lag_lsn()
-RETURNS TABLE(application_name TEXT, {8} total_lag INTEGER) AS $$
+RETURNS TABLE(application_name TEXT, {8} total_lag BIGINT) AS $$
SELECT application_name,
- {6}
- coalesce((pg_{7}_diff(pg_current_{7}(), replay_{9}))::int, 0) AS total_lag
+ {6}
+ coalesce((pg_{7}_diff(pg_current_{7}(), replay_{9}))::bigint, 0) AS total_lag
FROM pg_stat_replication
$$ LANGUAGE SQL SECURITY DEFINER;
+
+DROP FUNCTION IF EXISTS mamonsu.bytes_held_by_inactive_slot_on_master();
+CREATE OR REPLACE FUNCTION mamonsu.bytes_held_by_inactive_slot_on_master()
+RETURNS TABLE(slot_name TEXT, wal_held_bytes BIGINT) AS $$
+SELECT slot_name::TEXT, coalesce((pg_{7}_diff(pg_current_wal_lsn(), restart_lsn))::bigint, 0) AS wal_held_bytes
+FROM pg_replication_slots
+WHERE active = 'false'
+$$ LANGUAGE SQL SECURITY DEFINER;
+
+DROP FUNCTION IF EXISTS mamonsu.bytes_held_by_inactive_slot_on_replica();
+CREATE OR REPLACE FUNCTION mamonsu.bytes_held_by_inactive_slot_on_replica()
+RETURNS TABLE(slot_name TEXT, wal_held_bytes BIGINT) AS $$
+SELECT slot_name::TEXT, coalesce((pg_{7}_diff(pg_last_wal_replay_lsn(), restart_lsn))::bigint, 0) AS wal_held_bytes
+FROM pg_replication_slots
+WHERE active = 'false'
+$$ LANGUAGE SQL SECURITY DEFINER;
+
"""
CreatePgBuffercacheFunctionsSQL = """
@@ -287,7 +304,7 @@
FROM pg_extension e
JOIN pg_namespace n
ON e.extnamespace = n.oid
- WHERE e.extname = 'pgpro_stats';
+ WHERE e.extname = 'pgpro_stats';
EXECUTE 'DROP FUNCTION IF EXISTS mamonsu.wait_sampling_all_locks();
CREATE OR REPLACE FUNCTION mamonsu.wait_sampling_all_locks()
RETURNS TABLE(lock_type text, count bigint) AS $$
@@ -298,7 +315,7 @@
FROM (SELECT key, value AS locktuple
FROM jsonb_each((SELECT wait_stats
FROM ' || extension_schema || '.pgpro_stats_totals()
- WHERE object_type = ''cluster''))) setoflocks,
+ WHERE object_type = ''cluster''))) setoflocks,
jsonb_each(setoflocks.locktuple) AS json_data)
SELECT
CASE
@@ -327,7 +344,7 @@
FROM (SELECT key, value AS locktuple
FROM jsonb_each((SELECT wait_stats
FROM ' || extension_schema || '.pgpro_stats_totals()
- WHERE object_type = ''cluster''))) setoflocks,
+ WHERE object_type = ''cluster''))) setoflocks,
jsonb_each(setoflocks.locktuple) AS json_data)
SELECT
lock_type,
@@ -347,7 +364,7 @@
FROM (SELECT key, value AS locktuple
FROM jsonb_each((SELECT wait_stats
FROM ' || extension_schema || '.pgpro_stats_totals()
- WHERE object_type = ''cluster''))) setoflocks,
+ WHERE object_type = ''cluster''))) setoflocks,
jsonb_each(setoflocks.locktuple) AS json_data
WHERE setoflocks.key IN (''Lock'', ''LWLock'', ''LWLockTranche'', ''LWLockNamed''))
SELECT
@@ -415,13 +432,13 @@
FROM pg_extension e
JOIN pg_namespace n
ON e.extnamespace = n.oid
- WHERE e.extname = 'pgpro_stats';
+ WHERE e.extname = 'pgpro_stats';
EXECUTE 'DROP FUNCTION IF EXISTS mamonsu.statements_pro();
CREATE OR REPLACE FUNCTION mamonsu.statements_pro()
RETURNS TABLE({columns}) AS $$
SELECT {metrics}
FROM ' || extension_schema || '.pgpro_stats_totals()
- WHERE object_type = ''cluster'';
+ WHERE object_type = ''cluster'';
$$ LANGUAGE SQL SECURITY DEFINER;';
ELSE
EXIT functions_creation;
diff --git a/mamonsu/tools/bootstrap/start.py b/mamonsu/tools/bootstrap/start.py
index 3976de1..6fbc582 100644
--- a/mamonsu/tools/bootstrap/start.py
+++ b/mamonsu/tools/bootstrap/start.py
@@ -237,7 +237,13 @@ def run_deploy():
if Pooler.is_pgpro() or Pooler.is_pgpro_ee():
bootstrap_extension_queries = fill_query_params(CreateWaitSamplingFunctionsSQL)
Pooler.query(bootstrap_extension_queries)
- if Pooler.server_version_greater("12"):
+ if Pooler.extension_installed("pgpro_stats") and Pooler.extension_version_greater("pgpro_stats", "1.8"):
+ statements_items = [x[1] for x in Statements.Items_pgpro_stats_1_8] + [x[1] for x in Statements.Items_pg_13]
+ statements_columns = [x[0][x[0].find("[")+1:x[0].find("]")] for x in Statements.Items_pgpro_stats_1_8] + [x[0][x[0].find("[")+1:x[0].find("]")] for x in Statements.Items_pg_13]
+ bootstrap_extension_queries = CreateStatementsFunctionsSQL.format(
+ columns=" bigint, ".join(statements_columns) + " bigint", metrics=(", ".join(statements_items)))
+ Pooler.query(bootstrap_extension_queries)
+ elif Pooler.server_version_greater("12"):
statements_items = [x[1] for x in Statements.Items] + ([x[1] for x in Statements.Items_pg_13] if Pooler.server_version_greater("13") else [])
statements_items[5] = statements_items[5].format("total_exec_time+total_plan_time")
statements_columns = [x[0][x[0].find("[")+1:x[0].find("]")] for x in Statements.Items] + ([x[0][x[0].find("[")+1:x[0].find("]")] for x in Statements.Items_pg_13] if Pooler.server_version_greater("13") else [])
diff --git a/mamonsu/tools/zabbix_cli/operations.py b/mamonsu/tools/zabbix_cli/operations.py
index d3b25e4..811ab0b 100644
--- a/mamonsu/tools/zabbix_cli/operations.py
+++ b/mamonsu/tools/zabbix_cli/operations.py
@@ -3,9 +3,10 @@
from __future__ import print_function
import sys
import json
+import mamonsu.lib.version as version
from mamonsu.tools.zabbix_cli.request import Request
from mamonsu.lib.parser import zabbix_msg
-from pkg_resources import packaging
+
from mamonsu.tools.zabbix_cli.dashboard import generate_dashboard
@@ -171,10 +172,10 @@ def template(self, args):
}
},
'source': open(file).read()}
- if packaging.version.parse(zabbix_version) < packaging.version.parse('5.4'):
+ if version.parse(zabbix_version) < version.parse('5.4'):
params['rules']['applications'] = {'createMissing': True,
'deleteMissing': True}
- if packaging.version.parse(zabbix_version) < packaging.version.parse('5.2'):
+ if version.parse(zabbix_version) < version.parse('5.2'):
params['rules']['templateScreens'] = {'createMissing': True,
'updateExisting': False,
'deleteMissing': True}
@@ -329,7 +330,7 @@ def dashboard(self, args):
if not len(args) == 2:
return self._print_help()
zabbix_version = str(self.req.post(method='apiinfo.version', params=[]))
- if packaging.version.parse(zabbix_version) < packaging.version.parse('6.0'):
+ if version.parse(zabbix_version) < version.parse('6.0'):
print("You can import Mamonsu dashboard only on Zabbix 6.0+.")
return
else:
diff --git a/mamonsu/tools/zabbix_cli/request.py b/mamonsu/tools/zabbix_cli/request.py
index 566868f..0ccd78c 100644
--- a/mamonsu/tools/zabbix_cli/request.py
+++ b/mamonsu/tools/zabbix_cli/request.py
@@ -4,8 +4,8 @@
import logging
from collections import OrderedDict
+import mamonsu.lib.version as version
-from pkg_resources import packaging
import urllib.request as urllib2
@@ -26,7 +26,7 @@ def _auth(self):
if self._auth_tocken is None:
if not self._user:
return None
- user_field = 'user' if packaging.version.parse(self._api_version) < packaging.version.parse('6.4') else 'username'
+ user_field = 'user' if version.parse(self._api_version) < version.parse('6.4') else 'username'
self._auth_tocken = self.post(
'user.login',
{user_field: self._user, 'password': self._passwd})
diff --git a/packaging/debian/changelog b/packaging/debian/changelog
index 0a0c778..6efa097 100644
--- a/packaging/debian/changelog
+++ b/packaging/debian/changelog
@@ -1,3 +1,27 @@
+mamonsu (3.5.13-1) stable; urgency=low
+ * Added a new metric that displays the bytes held by non-active replication slots, along with the corresponding trigger.;
+ * Set the trigger for 'number of non-active replication slots' to be disabled by default.;
+ * Fixed the Linux plugin to ensure compatibility with recent Linux versions that use cgroups2.;
+ * Resolved a deadlock issue in the send queue that caused Mamonsu to hang after network problems.;
+
+mamonsu (3.5.12-1) stable; urgency=low
+ * Port version parser code from public archive of pypa/pkg_resources;
+ * Thread-safe implementation of connection cache;
+ * Skip BGwriter and Checkpoint plugins initialization if Postgres metrics collection was explicitly disabled;
+
+mamonsu (3.5.11-1) stable; urgency=low
+ * Updated statements plugin: added support for pgpro_stats 1.8;
+ * Fixed types for count_wal_lag_lsn() function (int to bigint);
+
+mamonsu (3.5.10-1) stable; urgency=low
+ * Updated checkpoint plugin: added support for new view pg_stat_checkpointer;
+ * Updated bgwriter plugin: consider updated view pg_stat_bgwriter in postgres 17;
+ * Run zabbix cli tools with latest setuptools installed (>67.7.2);
+
+mamonsu (3.5.9-1) stable; urgency=low
+ * Run on systems with latest setuptools installed (>67.7.2);
+ * Drop using dotted user:group specification in RPM pre-install stage;
+
mamonsu (3.5.8-1) stable; urgency=low
* Prepare for python 3.12: remove deprecated distutils imports;
diff --git a/packaging/rpm/SPECS/mamonsu.spec b/packaging/rpm/SPECS/mamonsu.spec
index f35fbd9..dcfd2bd 100644
--- a/packaging/rpm/SPECS/mamonsu.spec
+++ b/packaging/rpm/SPECS/mamonsu.spec
@@ -1,5 +1,5 @@
Name: mamonsu
-Version: 3.5.8
+Version: 3.5.13
Release: 1%{?dist}
Summary: Monitoring agent for PostgreSQL
Group: Applications/Internet
@@ -57,22 +57,46 @@ getent passwd mamonsu > /dev/null || \
-c "mamonsu monitoring user" mamonsu
mkdir -p /var/run/mamonsu
-chown -R mamonsu.mamonsu /var/run/mamonsu
+chown -R mamonsu:mamonsu /var/run/mamonsu
mkdir -p /etc/mamonsu/plugins
touch /etc/mamonsu/plugins/__init__.py
mkdir -p /var/log/mamonsu
-chown -R mamonsu.mamonsu /var/log/mamonsu
+chown -R mamonsu:mamonsu /var/log/mamonsu
%preun
/sbin/service mamonsu stop >/dev/null 2>&1
/sbin/chkconfig --del mamonsu
%post
-chown -R mamonsu.mamonsu /etc/mamonsu
+chown -R mamonsu:mamonsu /etc/mamonsu
%changelog
+* Thu May 29 2025 Andrey Papsuyko - 3.5.13-1
+ - Added a new metric that displays the bytes held by non-active replication slots, along with the corresponding trigger.;
+ - Set the trigger for 'number of non-active replication slots' to be disabled by default.;
+ - Fixed the Linux plugin to ensure compatibility with recent Linux versions that use cgroups2.;
+ - Resolved a deadlock issue in the send queue that caused Mamonsu to hang after network problems.;
+
+* Wed Mar 5 2025 Maxim Styushin - 3.5.12-1
+ - Port version parser code from public archive of pypa/pkg_resources;
+ - Thread-safe implementation of connection cache;
+ - Skip BGwriter and Checkpoint plugins initialization if Postgres metrics collection was explicitly disabled;
+
+* Wed Jan 15 2025 Maxim Styushin - 3.5.11-1
+ - Updated statements plugin: added support for pgpro_stats 1.8;
+ - Fixed types for count_wal_lag_lsn() function (int to bigint);
+
+* Sat Dec 14 2024 Maxim Styushin - 3.5.10-1
+ - Updated checkpoint plugin: added support for new view pg_stat_checkpointer;
+ - Updated bgwriter plugin: consider updated view pg_stat_bgwriter in postgres 17;
+ - Run zabbix cli tools with latest setuptools installed (>67.7.2);
+
+* Mon Aug 19 2024 Maxim Styushin - 3.5.9-1
+ - Run on systems with latest setuptools installed (>67.7.2);
+ - Drop using dotted user:group specification in RPM pre-install stage;
+
* Thu Apr 18 2024 Maxim Styushin - 3.5.8-1
- Prepare for python 3.12: remove deprecated distutils imports;
diff --git a/packaging/win/mamonsu.def.nsh b/packaging/win/mamonsu.def.nsh
index 1e32c88..5afbfdc 100644
--- a/packaging/win/mamonsu.def.nsh
+++ b/packaging/win/mamonsu.def.nsh
@@ -1,5 +1,5 @@
!define NAME Mamonsu
-!define VERSION 3.5.8
+!define VERSION 3.5.13
!define MAMONSU_REG_PATH "Software\PostgresPro\Mamonsu"
!define MAMONSU_REG_UNINSTALLER_PATH "Software\Microsoft\Windows\CurrentVersion\Uninstall"
!define EDB_REG "SOFTWARE\Postgresql"