Skip to content

Deprecations 1.6 #54

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 8 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion doc/TESTS.rst.txt
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ can be turned on (recommended after installation and before upgrading) by
starting the interpreter with the -Wd switch, or by::

>>> import warnings
>>> wwarnings.simplefilter('always', DeprecationWarning)
>>> warnings.simplefilter('always', DeprecationWarning)

The test method may take two or more arguments; the first is a string
label specifying what should be tested and the second is an integer
Expand Down
21 changes: 10 additions & 11 deletions doc/summarize.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@

# --- numpy.fft:
fft fft.Tester fft.bench fft.fftpack fft.fftpack_lite fft.helper
fft.refft fft.refft2 fft.refftn fft.irefft fft.irefft2 fft.irefftn
fft.info fft.test

# --- numpy.linalg:
Expand All @@ -67,10 +66,10 @@ def main():
fn = os.path.join(CUR_DIR, 'dump.xml')
if os.path.isfile(fn):
import_phantom_module(fn)

# check
documented, undocumented = check_numpy()

# report
in_sections = {}
for name, locations in documented.iteritems():
Expand Down Expand Up @@ -108,7 +107,7 @@ def check_numpy():
if k.startswith('numpy.'):
d[k[6:]] = d[k]
del d[k]

return documented, undocumented

def get_undocumented(documented, module, module_name=None, skip=[]):
Expand All @@ -123,24 +122,24 @@ def get_undocumented(documented, module, module_name=None, skip=[]):

"""
undocumented = {}

if module_name is None:
module_name = module.__name__

for name in dir(module):
obj = getattr(module, name)
if name.startswith('_'): continue

full_name = '.'.join([module_name, name])

if full_name in skip: continue
if full_name.startswith('numpy.') and full_name[6:] in skip: continue
if not (inspect.ismodule(obj) or callable(obj) or inspect.isclass(obj)):
continue

if full_name not in documented:
undocumented[full_name] = True

return undocumented

def format_in_columns(lst, max_columns):
Expand All @@ -160,9 +159,9 @@ def format_in_columns(lst, max_columns):
nrows = len(lst)//ncols
else:
nrows = 1 + len(lst)//ncols

fmt = ' %%-%ds ' % (col_len-2)

lines = []
for n in range(nrows):
lines.append("".join([fmt % x for x in lst[n::nrows]]))
Expand Down
7 changes: 2 additions & 5 deletions doc/swig/test/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,8 @@
# Third-party modules - we depend on numpy for everything
import numpy

# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
# Obtain the numpy include directory.
numpy_include = numpy.get_include()

# Array extension module
_Array = Extension("_Array",
Expand Down
10 changes: 0 additions & 10 deletions numpy/core/memmap.py
Original file line number Diff line number Diff line change
Expand Up @@ -278,11 +278,6 @@ def flush(self):
if self._mmap is not None:
self._mmap.flush()

def sync(self):
"""This method is deprecated, use `flush`."""
warnings.warn("Use ``flush``.", DeprecationWarning)
self.flush()

def _close(self):
"""Close the memmap file. Only do this when deleting the object."""
if self.base is self._mmap:
Expand All @@ -292,11 +287,6 @@ def _close(self):
self._mmap.close()
self._mmap = None

def close(self):
"""Close the memmap file. Does nothing."""
warnings.warn("``close`` is deprecated on memmap arrays. Use del",
DeprecationWarning)

def __del__(self):
# We first check if we are the owner of the mmap, rather than
# a view, so deleting a view does not call _close
Expand Down
8 changes: 2 additions & 6 deletions numpy/core/tests/test_memmap.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

from numpy import memmap
from numpy import arange, allclose
from numpy.testing import *
from numpy.testing import TestCase, assert_, assert_array_equal

class TestMemmap(TestCase):
def setUp(self):
Expand All @@ -27,7 +27,7 @@ def test_roundtrip(self):
# Read data back from file
newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r',
shape=self.shape)
assert allclose(self.data, newfp)
assert_(allclose(self.data, newfp))
assert_array_equal(self.data, newfp)

def test_open_with_filename(self):
Expand Down Expand Up @@ -71,10 +71,6 @@ def test_flush(self):
fp[:] = self.data[:]
fp.flush()

warnings.simplefilter('ignore', DeprecationWarning)
fp.sync()
warnings.simplefilter('default', DeprecationWarning)

def test_del(self):
# Make sure a view does not delete the underlying mmap
fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
Expand Down
12 changes: 1 addition & 11 deletions numpy/fft/fftpack.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,7 @@

"""
__all__ = ['fft','ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn',
'refft', 'irefft','refftn','irefftn', 'refft2', 'irefft2']
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']

from numpy.core import asarray, zeros, swapaxes, shape, conjugate, \
take
Expand Down Expand Up @@ -1114,12 +1113,3 @@ def irfft2(a, s=None, axes=(-2,-1)):
"""

return irfftn(a, s, axes)

# Deprecated names
from numpy import deprecate
refft = deprecate(rfft, 'refft', 'rfft')
irefft = deprecate(irfft, 'irefft', 'irfft')
refft2 = deprecate(rfft2, 'refft2', 'rfft2')
irefft2 = deprecate(irfft2, 'irefft2', 'irfft2')
refftn = deprecate(rfftn, 'refftn', 'rfftn')
irefftn = deprecate(irfftn, 'irefftn', 'irfftn')
78 changes: 2 additions & 76 deletions numpy/lib/arraysetops.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,6 @@
union1d,
setdiff1d

:Deprecated:
unique1d,
intersect1d_nu,
setmember1d

:Notes:

For floating point arrays, inaccurate results may appear due to usual round-off
Expand All @@ -28,8 +23,8 @@

:Author: Robert Cimrman
"""
__all__ = ['ediff1d', 'unique1d', 'intersect1d', 'intersect1d_nu', 'setxor1d',
'setmember1d', 'union1d', 'setdiff1d', 'unique', 'in1d']
__all__ = ['ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d',
'unique', 'in1d']

import numpy as np
from numpy.lib.utils import deprecate
Expand Down Expand Up @@ -417,72 +412,3 @@ def setdiff1d(ar1, ar2, assume_unique=False):
return aux
else:
return np.asarray(ar1)[aux == 0]

@deprecate
def unique1d(ar1, return_index=False, return_inverse=False):
"""
This function is deprecated. Use unique() instead.
"""
if return_index:
import warnings
warnings.warn("The order of the output arguments for "
"`return_index` has changed. Before, "
"the output was (indices, unique_arr), but "
"has now been reversed to be more consistent.")

ar = np.asanyarray(ar1).flatten()
if ar.size == 0:
if return_inverse and return_index:
return ar, np.empty(0, np.bool), np.empty(0, np.bool)
elif return_inverse or return_index:
return ar, np.empty(0, np.bool)
else:
return ar

if return_inverse or return_index:
perm = ar.argsort()
aux = ar[perm]
flag = np.concatenate(([True], aux[1:] != aux[:-1]))
if return_inverse:
iflag = np.cumsum(flag) - 1
iperm = perm.argsort()
if return_index:
return aux[flag], perm[flag], iflag[iperm]
else:
return aux[flag], iflag[iperm]
else:
return aux[flag], perm[flag]

else:
ar.sort()
flag = np.concatenate(([True], ar[1:] != ar[:-1]))
return ar[flag]

@deprecate
def intersect1d_nu(ar1, ar2):
"""
This function is deprecated. Use intersect1d()
instead.
"""
# Might be faster than unique1d( intersect1d( ar1, ar2 ) )?
aux = np.concatenate((unique1d(ar1), unique1d(ar2)))
aux.sort()
return aux[aux[1:] == aux[:-1]]

@deprecate
def setmember1d(ar1, ar2):
"""
This function is deprecated. Use in1d(assume_unique=True)
instead.
"""
# We need this to be a stable sort, so always use 'mergesort' here. The
# values from the first array should always come before the values from the
# second array.
ar = np.concatenate( (ar1, ar2 ) )
order = ar.argsort(kind='mergesort')
sar = ar[order]
equal_adj = (sar[1:] == sar[:-1])
flag = np.concatenate( (equal_adj, [False] ) )

indx = order.argsort(kind='mergesort')[:len( ar1 )]
return flag[indx]
65 changes: 0 additions & 65 deletions numpy/lib/benchmarks/bench_arraysetops.py

This file was deleted.

26 changes: 14 additions & 12 deletions numpy/lib/npyio.py
Original file line number Diff line number Diff line change
Expand Up @@ -1049,7 +1049,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
"""
Load data from a text file, with missing values handled as specified.

Each line past the first `skiprows` lines is split at the `delimiter`
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.

Parameters
Expand All @@ -1072,20 +1072,20 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
The numbers of lines to skip at the beginning of the file.
skip_footer : int, optional
The numbers of lines to skip at the end of the file
converters : variable or None, optional
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing_values : variable or None, optional
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable or None, optional
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence or None, optional
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skiprows` lines.
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
Expand Down Expand Up @@ -1218,9 +1218,10 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,

# Get the first valid lines after the first skiprows ones ..
if skiprows:
warnings.warn("The use of `skiprows` is deprecated.\n"\
"Please use `skip_header` instead.",
DeprecationWarning)
warnings.warn(\
"The use of `skiprows` is deprecated, it will be removed in numpy 2.0.\n" \
"Please use `skip_header` instead.",
DeprecationWarning)
skip_header = skiprows
# Skip the first `skip_header` rows
for i in xrange(skip_header):
Expand Down Expand Up @@ -1343,9 +1344,10 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,

# Process the deprecated `missing`
if missing != asbytes(''):
warnings.warn("The use of `missing` is deprecated.\n"\
"Please use `missing_values` instead.",
DeprecationWarning)
warnings.warn(\
"The use of `missing` is deprecated, it will be removed in Numpy 2.0.\n" \
"Please use `missing_values` instead.",
DeprecationWarning)
values = [str(_) for _ in missing.split(asbytes(","))]
for entry in missing_values:
entry.extend(values)
Expand Down
Loading