diff --git a/doc/modules/model_evaluation.rst b/doc/modules/model_evaluation.rst index eeb058e1440c1..e55dc1cc14762 100644 --- a/doc/modules/model_evaluation.rst +++ b/doc/modules/model_evaluation.rst @@ -176,7 +176,7 @@ Here is an example of building custom scorers, and of using the >>> import numpy as np >>> def my_custom_loss_func(y_true, y_pred): ... diff = np.abs(y_true - y_pred).max() - ... return np.log(1 + diff) + ... return np.log1p(diff) ... >>> # score will negate the return value of my_custom_loss_func, >>> # which will be np.log(2), 0.693, given the values for X diff --git a/examples/plot_isotonic_regression.py b/examples/plot_isotonic_regression.py index fd076b5afad62..2411aa1d95124 100644 --- a/examples/plot_isotonic_regression.py +++ b/examples/plot_isotonic_regression.py @@ -28,7 +28,7 @@ n = 100 x = np.arange(n) rs = check_random_state(0) -y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n)) +y = rs.randint(-50, 50, size=(n,)) + 50. * np.log1p(np.arange(n)) # ############################################################################# # Fit IsotonicRegression and LinearRegression models diff --git a/sklearn/gaussian_process/gpc.py b/sklearn/gaussian_process/gpc.py index e1f37617b6d1e..51da24fccc77b 100644 --- a/sklearn/gaussian_process/gpc.py +++ b/sklearn/gaussian_process/gpc.py @@ -409,7 +409,7 @@ def _posterior_mode(self, K, return_temporaries=False): # Line 10: Compute log marginal likelihood in loop and use as # convergence criterion lml = -0.5 * a.T.dot(f) \ - - np.log(1 + np.exp(-(self.y_train_ * 2 - 1) * f)).sum() \ + - np.log1p(np.exp(-(self.y_train_ * 2 - 1) * f)).sum() \ - np.log(np.diag(L)).sum() # Check if we have converged (log marginal likelihood does # not decrease) diff --git a/sklearn/metrics/regression.py b/sklearn/metrics/regression.py index 4bc88561a73fd..e9084a4276e18 100644 --- a/sklearn/metrics/regression.py +++ b/sklearn/metrics/regression.py @@ -314,7 +314,7 @@ def mean_squared_log_error(y_true, y_pred, raise ValueError("Mean Squared Logarithmic Error cannot be used when " "targets contain negative values.") - return mean_squared_error(np.log(y_true + 1), np.log(y_pred + 1), + return mean_squared_error(np.log1p(y_true), np.log1p(y_pred), sample_weight, multioutput) diff --git a/sklearn/neighbors/binary_tree.pxi b/sklearn/neighbors/binary_tree.pxi index 6b736d39fab82..3e17f1b93d6c0 100755 --- a/sklearn/neighbors/binary_tree.pxi +++ b/sklearn/neighbors/binary_tree.pxi @@ -486,7 +486,7 @@ cdef DTYPE_t _log_kernel_norm(DTYPE_t h, ITYPE_t d, elif kernel == EXPONENTIAL_KERNEL: factor = logSn(d - 1) + lgamma(d) elif kernel == LINEAR_KERNEL: - factor = logVn(d) - log(d + 1.) + factor = logVn(d) - np.log1p(d) elif kernel == COSINE_KERNEL: # this is derived from a chain rule integration factor = 0 diff --git a/sklearn/utils/_logistic_sigmoid.pyx b/sklearn/utils/_logistic_sigmoid.pyx index 58809eb7c1b7b..9c7b8d0a20843 100644 --- a/sklearn/utils/_logistic_sigmoid.pyx +++ b/sklearn/utils/_logistic_sigmoid.pyx @@ -13,9 +13,9 @@ ctypedef np.float64_t DTYPE_t cdef DTYPE_t _inner_log_logistic_sigmoid(DTYPE_t x): """Log of the logistic sigmoid function log(1 / (1 + e ** -x))""" if x > 0: - return -log(1 + exp(-x)) + return -np.log1p(exp(-x)) else: - return x - log(1 + exp(x)) + return x - np.log1p(exp(x)) def _log_logistic_sigmoid(int n_samples, int n_features,