From a5abb464d41bc7d43725a522c6a02064214c3b61 Mon Sep 17 00:00:00 2001 From: Antony Lee Date: Thu, 16 Dec 2021 23:45:16 +0100 Subject: [PATCH] Autoinfer norm bounds. Instead of special-casing lognorm to only autoscale from positive values, perform autoscaling from values that map to finite transformed values. This ensures e.g. that make_norm_from_scale(LogitScale) automatically does the right thing (autoscaling from values in [0, 1]). This means that autoscale() and autoscale_None() are now slightly more expensive (because the transform needs to be applied), so skip the call to autoscale_None if not needed. However, note that these should typically only be called once per norm anyways, so hopefully this isn't a bottleneck. (Another idea would be to use `trf.inverse().transform([-np.inf, np.inf])` as clipping bounds, but there are some tests using `x->x**2` / `x->sqrt(x)` as a test for FuncNorm, which 1. doesn't go all the way to -inf, and 2. isn't even increasing for negative x's, so that idea doesn't work.) --- lib/matplotlib/colors.py | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/lib/matplotlib/colors.py b/lib/matplotlib/colors.py index 23f6e71189fc..d138443cc7ea 100644 --- a/lib/matplotlib/colors.py +++ b/lib/matplotlib/colors.py @@ -1229,7 +1229,8 @@ def __call__(self, value, clip=None): result, is_scalar = self.process_value(value) - self.autoscale_None(result) + if self.vmin is None or self.vmax is None: + self.autoscale_None(result) # Convert at least to float, without losing precision. (vmin,), _ = self.process_value(self.vmin) (vmax,), _ = self.process_value(self.vmax) @@ -1520,7 +1521,8 @@ def __init__(self, *args, **kwargs): def __call__(self, value, clip=None): value, is_scalar = self.process_value(value) - self.autoscale_None(value) + if self.vmin is None or self.vmax is None: + self.autoscale_None(value) if self.vmin > self.vmax: raise ValueError("vmin must be less or equal to vmax") if self.vmin == self.vmax: @@ -1555,6 +1557,15 @@ def inverse(self, value): .reshape(np.shape(value))) return value[0] if is_scalar else value + def autoscale(self, A): + # i.e. A[np.isfinite(...)], but also for non-array A's + in_trf_domain = np.extract(np.isfinite(self._trf.transform(A)), A) + return super().autoscale(in_trf_domain) + + def autoscale_None(self, A): + in_trf_domain = np.extract(np.isfinite(self._trf.transform(A)), A) + return super().autoscale_None(in_trf_domain) + Norm.__name__ = (f"{scale_cls.__name__}Norm" if base_norm_cls is Normalize else base_norm_cls.__name__) Norm.__qualname__ = base_norm_cls.__qualname__ @@ -1603,14 +1614,6 @@ def forward(values: array-like) -> array-like class LogNorm(Normalize): """Normalize a given value to the 0-1 range on a log scale.""" - def autoscale(self, A): - # docstring inherited. - super().autoscale(np.ma.array(A, mask=(A <= 0))) - - def autoscale_None(self, A): - # docstring inherited. - super().autoscale_None(np.ma.array(A, mask=(A <= 0))) - @make_norm_from_scale( scale.SymmetricalLogScale,