Skip to content

Replacing log with log1p where applicable. #11428

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 7 commits into from
Jul 14, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion doc/modules/model_evaluation.rst
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ Here is an example of building custom scorers, and of using the
>>> import numpy as np
>>> def my_custom_loss_func(y_true, y_pred):
... diff = np.abs(y_true - y_pred).max()
... return np.log(1 + diff)
... return np.log1p(diff)
...
>>> # score will negate the return value of my_custom_loss_func,
>>> # which will be np.log(2), 0.693, given the values for X
Expand Down
2 changes: 1 addition & 1 deletion examples/plot_isotonic_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log1p(np.arange(n))

# #############################################################################
# Fit IsotonicRegression and LinearRegression models
Expand Down
2 changes: 1 addition & 1 deletion sklearn/gaussian_process/gpc.py
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,7 @@ def _posterior_mode(self, K, return_temporaries=False):
# Line 10: Compute log marginal likelihood in loop and use as
# convergence criterion
lml = -0.5 * a.T.dot(f) \
- np.log(1 + np.exp(-(self.y_train_ * 2 - 1) * f)).sum() \
- np.log1p(np.exp(-(self.y_train_ * 2 - 1) * f)).sum() \
- np.log(np.diag(L)).sum()
# Check if we have converged (log marginal likelihood does
# not decrease)
Expand Down
2 changes: 1 addition & 1 deletion sklearn/metrics/regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -314,7 +314,7 @@ def mean_squared_log_error(y_true, y_pred,
raise ValueError("Mean Squared Logarithmic Error cannot be used when "
"targets contain negative values.")

return mean_squared_error(np.log(y_true + 1), np.log(y_pred + 1),
return mean_squared_error(np.log1p(y_true), np.log1p(y_pred),
sample_weight, multioutput)


Expand Down
2 changes: 1 addition & 1 deletion sklearn/neighbors/binary_tree.pxi
Original file line number Diff line number Diff line change
Expand Up @@ -486,7 +486,7 @@ cdef DTYPE_t _log_kernel_norm(DTYPE_t h, ITYPE_t d,
elif kernel == EXPONENTIAL_KERNEL:
factor = logSn(d - 1) + lgamma(d)
elif kernel == LINEAR_KERNEL:
factor = logVn(d) - log(d + 1.)
factor = logVn(d) - np.log1p(d)
elif kernel == COSINE_KERNEL:
# this is derived from a chain rule integration
factor = 0
Expand Down
4 changes: 2 additions & 2 deletions sklearn/utils/_logistic_sigmoid.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,9 @@ ctypedef np.float64_t DTYPE_t
cdef DTYPE_t _inner_log_logistic_sigmoid(DTYPE_t x):
"""Log of the logistic sigmoid function log(1 / (1 + e ** -x))"""
if x > 0:
return -log(1 + exp(-x))
return -np.log1p(exp(-x))
else:
return x - log(1 + exp(x))
return x - np.log1p(exp(x))


def _log_logistic_sigmoid(int n_samples, int n_features,
Expand Down