diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index d137bed49..492bbd562 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -15,11 +15,11 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.12"] + python-version: ["3"] steps: - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} allow-prereleases: true @@ -41,6 +41,8 @@ jobs: pwd ls -lR .. - name: Upload to codecov - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: files: tmp/coverage.xml + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/doc-build.yml b/.github/workflows/doc-build.yml index 9ef58daff..cf97653ff 100644 --- a/.github/workflows/doc-build.yml +++ b/.github/workflows/doc-build.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.12"] + python-version: ["3"] steps: - name: Apt update run: sudo apt update @@ -24,7 +24,7 @@ jobs: sudo apt install -y graphviz - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} allow-prereleases: true diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index cf1694125..ed79b1870 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -17,7 +17,7 @@ jobs: - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 9e20fa079..e00b38e0c 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -23,12 +23,12 @@ jobs: test: strategy: matrix: - python_version: ["3.8", "3.9", "3.10", "3.11", "3.12"] + python_version: ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] os: [ubuntu-latest, windows-latest, macos-latest] runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v4 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python_version }} allow-prereleases: true @@ -47,7 +47,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install diff --git a/.mailmap b/.mailmap index 7bd8bf237..d219f757e 100644 --- a/.mailmap +++ b/.mailmap @@ -12,6 +12,7 @@ Cindee Madison Cindee Madison Cindee Madison cindee.madison <> Cindee Madison cindeem <> Cindee Madison cindeem +Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Eleftherios Garyfallidis Erik Ziegler erikz Fabian Pedregosa @@ -34,6 +35,7 @@ Matthew Brett mb312 Matthieu Brucher Merlin Keller Merlin KELLER Merlin Keller keller +Nicholas Tolley <> Nicholas Tolley <55253912+ntolley@users.noreply.github.com> Tom Waite twaite Virgile Fritsch VirgileFritsch Virgile Fritsch Fritsch diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 29f737461..11b0b51b6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,7 +2,7 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: f71fa2c1f9cf5cb705f73dffe4b21f7c61470ba9 # v4.4.0 + rev: cef0300fd0fc4d2a87a85fa2093c6b283ea36f4b # v5.0.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer @@ -13,15 +13,15 @@ repos: args: [--allow-multiple-documents] - id: check-added-large-files - - repo: https://github.com/pre-commit/mirrors-prettier - rev: fc260393cc4ec09f8fc0a5ba4437f481c8b55dc1 # frozen: v3.0.3 + - repo: https://github.com/rbubley/mirrors-prettier + rev: 1463d990e0801964764a375260dca598513f3be5 # frozen: v3.3.3 hooks: - id: prettier files: \.(md|rst|toml|yml|yaml) args: [--prose-wrap=preserve] - repo: https://github.com/astral-sh/ruff-pre-commit - rev: 7bcc70ca475b87e0fdee2511300c74b25babe0b3 # frozen: v0.1.9 + rev: 8b76f04e7e5a9cd259e9d1db7799599355f97cdf # frozen: v0.8.2 hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix] diff --git a/Changelog b/Changelog index 438527522..9e99405e9 100644 --- a/Changelog +++ b/Changelog @@ -36,6 +36,19 @@ Abbreviated authors are: * JM - Jarrod Millman * SvdW - Stéfan van der Walt +* 0.6.1 (Saturday 5 October 2024) + + Compatibility release for Numpy 2.0 + + * Port code for Numpy 2.0 compatibility (MB) + * Update for test precision on Sympy 1.13 (MB) + * Clean up consts and casts in C code (BB) + * Refactoring to functools.cached_property, style and CI updates (CM) + * CI and automated style check updates (Dimitri Papadopoulos Orfanos) + * Fix for Viz example (Nicholas Tolley) + * Add spin tooling for working with repository checkout (SvdW) + * Fix shebangs for some development scripts (Étienne Mollier) + * 0.6.0 (Thursday 21 December 2023) Bugfix, refactoring and compatibility release. diff --git a/LICENSE b/LICENSE index 3f593692b..0bee217a6 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2006-2023, NIPY Developers +Copyright (c) 2006-2024, NIPY Developers All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/doc/conf.py b/doc/conf.py index 0eca1457c..6a9fda002 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -65,7 +65,7 @@ # copyright = ':ref:`2005-2018, Neuroimaging in Python team. # `' -copyright = '2005-2023, Neuroimaging in Python team' +copyright = '2005-2024, Neuroimaging in Python team' # The default replacements for |version| and |release|, also used in various # other places throughout the built documents. diff --git a/doc/devel/code_discussions/coordmap_notes.rst b/doc/devel/code_discussions/coordmap_notes.rst index f43542e4d..b8dc2f628 100644 --- a/doc/devel/code_discussions/coordmap_notes.rst +++ b/doc/devel/code_discussions/coordmap_notes.rst @@ -495,8 +495,10 @@ AffineTransform( [ 0., 0., 1.]]) ) ->>> bounding_box(y70, (x_spec[1], z_spec[1])) -((-92.0, 92.0), (70.0, 70.0), (-70.0, 100.0)) +>>> x_lims, y_lims, z_lims = bounding_box(y70, (x_spec[1], z_spec[1])) +>>> assert np.all(x_lims == (-92, 92)) +>>> assert np.all(y_lims == (70, 70)) +>>> assert np.all(z_lims == (-70, 100)) Maybe these aren't things that "normal human beings" (to steal a quote from Gael) can use, but they're explicit and they are tied to precise mathematical diff --git a/doc/labs/viz.rst b/doc/labs/viz.rst index 102d2c97c..4a179b9c4 100644 --- a/doc/labs/viz.rst +++ b/doc/labs/viz.rst @@ -36,7 +36,7 @@ An example mni_sform_inv = np.linalg.inv(mni_sform) # Color an asymmetric rectangle around Broca area: x, y, z = -52, 10, 22 - x_map, y_map, z_map = coord_transform(x, y, z, mni_sform_inv) + x_map, y_map, z_map = [int(coord) for coord in coord_transform(x, y, z, mni_sform_inv)] map = np.zeros((182, 218, 182)) map[x_map-30:x_map+30, y_map-3:y_map+3, z_map-10:z_map+10] = 1 diff --git a/doc/sphinxext/autosummary_generate.py b/doc/sphinxext/autosummary_generate.py index 812bde607..4fae0e55e 100755 --- a/doc/sphinxext/autosummary_generate.py +++ b/doc/sphinxext/autosummary_generate.py @@ -70,7 +70,7 @@ def main(): print(f"Failed to import '{name}': {e}") continue - fn = os.path.join(path, '%s.rst' % name) + fn = os.path.join(path, f'{name}.rst') if os.path.exists(fn): # skip diff --git a/doc/users/coordinate_map.rst b/doc/users/coordinate_map.rst index 76ba5ef7e..ba6388a14 100644 --- a/doc/users/coordinate_map.rst +++ b/doc/users/coordinate_map.rst @@ -170,8 +170,8 @@ We resample the 'subject' image to the 'atlas image True >>> normalized_subject_im.coordmap == atlas_im.coordmap True ->>> np.all(normalized_subject_im.affine == atlas_im.affine) -True +>>> # Normalized image now has atlas affine. +>>> assert np.all(normalized_subject_im.affine == atlas_im.affine) *********************** Mathematical definition diff --git a/doc/users/install_data.rst b/doc/users/install_data.rst index 3877162c6..dd181d458 100644 --- a/doc/users/install_data.rst +++ b/doc/users/install_data.rst @@ -18,17 +18,22 @@ Data package installation as an administrator The installation procedure, for now, is very basic. For example, let us say that you need the 'nipy-templates' package at -http://nipy.org/data-packages/nipy-templates-0.2.tar.gz +http://nipy.org/data-packages/nipy-templates-0.3.tar.gz . You simply download this archive, unpack it, and then run the standard ``python setup.py install`` on it. On a unix system this might look like:: # curl -L flag to follow redirect; can also use wget - curl -OL http://nipy.org/data-packages/nipy-templates-0.2.tar.gz - tar zxvf nipy-templates-0.2.tar.gz - cd nipy-templates-0.2 + curl -OL http://nipy.org/data-packages/nipy-templates-0.3.tar.gz + tar zxvf nipy-templates-0.3.tar.gz + cd nipy-templates-0.3 sudo python setup.py install +This is for the `nipy-templates` package; there is also a `nipy-data` package, +for which the equivalent would be: + + curl -OL http://nipy.org/data-packages/nipy-data-0.3.tar.gz + On windows, download the file, extract the archive to a folder using the GUI, and then, using the windows shell or similar:: @@ -63,25 +68,25 @@ things from Windows powershell. directory. You can do this via the GUI, or on the command line (in Unix):: cd ~/Downloads - curl -OL http://nipy.org/data-packages/nipy-templates-0.2.tar.gz - curl -OL http://nipy.org/data-packages/nipy-data-0.2.tar.gz + curl -OL http://nipy.org/data-packages/nipy-templates-0.3.tar.gz + curl -OL http://nipy.org/data-packages/nipy-data-0.3.tar.gz * Unpack both of these:: - tar zxvf nipy-data-0.2.tar.gz - tar zxvf nipy-templates-0.2.tar.gz + tar zxvf nipy-data-0.3.tar.gz + tar zxvf nipy-templates-0.3.tar.gz * After you have unpacked the templates, you will have a directory called - something like ``nipy-templates-0.2``. In that directory you should see a + something like ``nipy-templates-0.3``. In that directory you should see a subdirectory called ``templates``. Copy / move / link the ``templates`` subdirectory into ``/nipy``, so you now have a directory ``/nipy/templates``. From unpacking the data, you should also - have a directory like ``nipy-data-0.2`` with a subdirectory ``data``. Copy + have a directory like ``nipy-data-0.3`` with a subdirectory ``data``. Copy / move / link that ``data`` directory into ``/nipy`` as well. For example:: - cp -r nipy-data-0.2/data ~/.nipy/nipy - cp -r nipy-templates-0.2/templates ~/.nipy/nipy + cp -r nipy-data-0.3/data ~/.nipy/nipy + cp -r nipy-templates-0.3/templates ~/.nipy/nipy * Check whether that worked. Run the following command from the shell:: @@ -110,7 +115,7 @@ On unix For example, say you installed with:: - cd nipy-templates-0.2 + cd nipy-templates-0.3 python setup.py install --prefix=/home/my-user/some-dir Then you may want to do make a file ``~/.nipy/config.ini`` with the @@ -124,7 +129,7 @@ On windows Say you installed with (windows shell):: - cd nipy-templates-0.2 + cd nipy-templates-0.3 python setup.py install --prefix=c:\some\path Then first, find out your home directory:: diff --git a/doc/users/plots/hrf_different.py b/doc/users/plots/hrf_different.py index b47d79edf..527d047d2 100644 --- a/doc/users/plots/hrf_different.py +++ b/doc/users/plots/hrf_different.py @@ -16,8 +16,8 @@ ta = [0,4,8,12,16]; tb = [2,6,10,14,18] ba = 1; bb = -2 -na = ba * sum([glover.subs(T, T - t) for t in ta]) -nb = bb * sum([afni.subs(T, T - t) for t in tb]) +na = ba * sum(glover.subs(T, T - t) for t in ta) +nb = bb * sum(afni.subs(T, T - t) for t in tb) nav = lambdify_t(na) nbv = lambdify_t(nb) diff --git a/doc/users/plots/neuronal_block.py b/doc/users/plots/neuronal_block.py index f30061598..367544642 100644 --- a/doc/users/plots/neuronal_block.py +++ b/doc/users/plots/neuronal_block.py @@ -14,8 +14,8 @@ ta = [0,4,8,12,16]; tb = [2,6,10,14,18] ba = Symbol('ba'); bb = Symbol('bb'); t = Symbol('t') -fa = sum([Piecewise((0, (t<_t)), ((t-_t)/0.5, (t<_t+0.5)), (1, (t >= _t+0.5))) for _t in ta])*ba -fb = sum([Piecewise((0, (t<_t)), ((t-_t)/0.5, (t<_t+0.5)), (1, (t >= _t+0.5))) for _t in tb])*bb +fa = sum(Piecewise((0, (t<_t)), ((t-_t)/0.5, (t<_t+0.5)), (1, (t >= _t+0.5))) for _t in ta)*ba +fb = sum(Piecewise((0, (t<_t)), ((t-_t)/0.5, (t<_t+0.5)), (1, (t >= _t+0.5))) for _t in tb)*bb N = fa+fb Nn = N.subs(ba,1) diff --git a/doc/users/plots/neuronal_event.py b/doc/users/plots/neuronal_event.py index a16f59e0b..ef2e50b90 100644 --- a/doc/users/plots/neuronal_event.py +++ b/doc/users/plots/neuronal_event.py @@ -14,8 +14,8 @@ ba = Symbol('ba') bb = Symbol('bb') t = Symbol('t') -fa = sum([Heaviside(t - _t) for _t in ta]) * ba -fb = sum([Heaviside(t - _t) for _t in tb]) * bb +fa = sum(Heaviside(t - _t) for _t in ta) * ba +fb = sum(Heaviside(t - _t) for _t in tb) * bb N = fa + fb Nn = N.subs(ba, 1) diff --git a/examples/interfaces/process_ds105.py b/examples/interfaces/process_ds105.py index 168729b98..199d03521 100755 --- a/examples/interfaces/process_ds105.py +++ b/examples/interfaces/process_ds105.py @@ -58,7 +58,7 @@ def _sorted_prefer_nii(file_list): - """ Strip any filanames ending nii.gz if matching .nii filename in list + """ Strip any filenames ending nii.gz if matching .nii filename in list """ preferred = [] for fname in file_list: @@ -308,10 +308,7 @@ def process_subject(ddef, study_def, ana_def): def get_subjects(data_path, subj_ids, study_def, ana_def): - ddefs = [] - for subj_id in subj_ids: - ddefs.append(get_fdata(data_path, subj_id)) - return ddefs + return [get_fdata(data_path, subj_id) for subj_id in subj_ids] def main(): diff --git a/examples/labs/write_paradigm_file.py b/examples/labs/write_paradigm_file.py index a063cc620..b05cef98a 100755 --- a/examples/labs/write_paradigm_file.py +++ b/examples/labs/write_paradigm_file.py @@ -45,11 +45,7 @@ sess = np.zeros(np.size(time)).astype('int8') pdata = np.vstack((sess, cid, time)).T csvfile = 'localizer_paradigm.csv' -# Opening files for CSV writing differs between Python 2 and 3 -if sys.version_info[0] >= 3: # Python 3 - fid = open(csvfile, "w", newline = '') -else: # Python 2 - fid = open(csvfile, "wb") +fid = open(csvfile, "w", newline = '') writer = csv.writer(fid, delimiter=' ') for row in pdata: writer.writerow(row) diff --git a/lib/fff_python_wrapper/fffpy.c b/lib/fff_python_wrapper/fffpy.c index 90a3d475d..718c358ed 100644 --- a/lib/fff_python_wrapper/fffpy.c +++ b/lib/fff_python_wrapper/fffpy.c @@ -9,11 +9,7 @@ because PyArray_API is defined static, in order not to share that symbol within the dso. (import_array() asks the pointer value to the python process) */ -/* - * deal with differences in macro return result between Python 2 and 3 - * http://mail.scipy.org/pipermail/numpy-discussion/2010-December/054350.html - */ -IMP_OUT fffpy_import_array(void) { +void* fffpy_import_array(void) { import_array(); } @@ -47,7 +43,7 @@ void fff_vector_fetch_using_NumPy(fff_vector* y, const char* x, npy_intp stride, PyArrayObject* X = (PyArrayObject*) PyArray_New(&PyArray_Type, 1, dim, type, strides, (void*)x, itemsize, NPY_BEHAVED, NULL); PyArrayObject* Y = (PyArrayObject*) PyArray_SimpleNewFromData(1, dim, NPY_DOUBLE, (void*)y->data); - PyArray_CastTo(Y, X); + PyArray_CopyInto(Y, X); Py_XDECREF(Y); Py_XDECREF(X); return; @@ -203,7 +199,7 @@ fff_matrix* fff_matrix_fromPyArray(const PyArrayObject* x) y->size1 = (size_t) PyArray_DIM(x,0); y->size2 = (size_t) PyArray_DIM(x,1); y->tda = y->size2; - y->data = (double*) PyArray_DATA(x); + y->data = PyArray_DATA(x); y->owner = 0; } /* Otherwise, output a owner (contiguous) matrix with copied @@ -215,7 +211,7 @@ fff_matrix* fff_matrix_fromPyArray(const PyArrayObject* x) dim[1] = dim1; xd = (PyArrayObject*) PyArray_SimpleNewFromData(2, dim, NPY_DOUBLE, (void*)y->data); - PyArray_CastTo(xd, (PyArrayObject*)x); + PyArray_CopyInto(xd, (PyArrayObject*)x); Py_XDECREF(xd); } @@ -427,7 +423,7 @@ fff_array* fff_array_fromPyArray(const PyArrayObject* x) /* Create array (not owner) */ y = (fff_array*)malloc(sizeof(fff_array)); *y = fff_array_view(datatype, - (void*) PyArray_DATA(x), + PyArray_DATA(x), dimX, dimY, dimZ, dimT, offX, offY, offZ, offT); @@ -527,7 +523,11 @@ fffpy_multi_iterator* fffpy_multi_iterator_new(int narr, int axis, ...) /* Create new instance */ thisone = (fffpy_multi_iterator*)malloc(sizeof(fffpy_multi_iterator)); - multi = PyArray_malloc(sizeof(PyArrayMultiIterObject)); + /* Static size of PyArrayMultiIterObject. + * + * https://github.com/numpy/numpy/issues/26765#issuecomment-2391737671 + */ + multi = PyArray_malloc(PyArrayMultiIter_Type.tp_basicsize); vector = (fff_vector**)malloc(narr*sizeof(fff_vector*)); /* Initialize the PyArrayMultiIterObject instance from the variadic arguments */ diff --git a/lib/fff_python_wrapper/fffpy.h b/lib/fff_python_wrapper/fffpy.h index 0ba8f4fa7..216f920b7 100644 --- a/lib/fff_python_wrapper/fffpy.h +++ b/lib/fff_python_wrapper/fffpy.h @@ -31,17 +31,8 @@ static, in order not to share that symbol within the dso. (import_array() asks the pointer value to the python process) */ -/* - * deal with differences in macro return result between Python 2 and 3 - * http://mail.scipy.org/pipermail/numpy-discussion/2010-December/054350.html - */ -#if PY_MAJOR_VERSION >= 3 -typedef int IMP_OUT; -#else -typedef void IMP_OUT; -#endif -extern IMP_OUT fffpy_import_array(void); +extern void* fffpy_import_array(void); /*! \brief Convert \c PyArrayObject to \c fff_vector diff --git a/meson.build b/meson.build index ad7982b8e..1c1cef8f3 100644 --- a/meson.build +++ b/meson.build @@ -2,13 +2,16 @@ project( 'nipy', 'c', # Update also in __init__.py - version: '0.6.1.dev1', + # For development. + version: '0.6.2.dev1', + # For release. + # version: '0.6.1', license: 'BSD-3', meson_version: '>= 1.1.1', default_options: [ 'buildtype=debugoptimized', 'b_ndebug=if-release', - 'c_std=c99' + 'c_std=c17', ], ) diff --git a/nipy/__init__.py b/nipy/__init__.py index d9100486f..7534c47a5 100644 --- a/nipy/__init__.py +++ b/nipy/__init__.py @@ -7,9 +7,9 @@ # When updating here, also update meson.build file. # Form for development. -__version__ = "0.6.1.dev1" +__version__ = "0.6.2.dev1" # Form for release. -# __version__ = "0.6.0" +# __version__ = "0.6.1" def _test_local_install(): diff --git a/nipy/_build_utils/cythoner.py b/nipy/_build_utils/cythoner.py index 36ad4058a..10d4c06e5 100755 --- a/nipy/_build_utils/cythoner.py +++ b/nipy/_build_utils/cythoner.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ Scipy variant of Cython command Cython, as applied to single pyx file. diff --git a/nipy/_build_utils/gcc_build_bitness.py b/nipy/_build_utils/gcc_build_bitness.py index 66a3f341b..96fdefc30 100755 --- a/nipy/_build_utils/gcc_build_bitness.py +++ b/nipy/_build_utils/gcc_build_bitness.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ Detect bitness (32 or 64) of Mingw-w64 gcc build target on Windows. """ @@ -8,7 +8,7 @@ def main(): res = run(['gcc', '-v'], check=True, text=True, capture_output=True) - target = re.search(r'^Target: (.*)$', res.stderr, flags=re.M).groups()[0] + target = re.search(r'^Target: (.*)$', res.stderr, flags=re.MULTILINE).groups()[0] if target.startswith('i686'): print('32') elif target.startswith('x86_64'): diff --git a/nipy/algorithms/clustering/bgmm.py b/nipy/algorithms/clustering/bgmm.py index 02a0ed658..cbfbfa4b3 100644 --- a/nipy/algorithms/clustering/bgmm.py +++ b/nipy/algorithms/clustering/bgmm.py @@ -38,7 +38,7 @@ def detsh(H): Parameters ---------- H array of shape(n,n) - the input matrix, assumed symmmetric and positive + the input matrix, assumed symmetric and positive Returns ------- @@ -222,8 +222,8 @@ def multinomial(probabilities): Parameters ---------- probabilities: array of shape (nelements, nclasses): - likelihood of each element belongin to each class - each row is assumedt to sum to 1 + likelihood of each element belonging to each class + each row is assumed to sum to 1 One sample is draw from each row, resulting in Returns diff --git a/nipy/algorithms/clustering/gmm.py b/nipy/algorithms/clustering/gmm.py index ba1fbeee7..90e58aafc 100644 --- a/nipy/algorithms/clustering/gmm.py +++ b/nipy/algorithms/clustering/gmm.py @@ -827,7 +827,7 @@ def show_components(self, x, gd, density=None, mpaxes=None): bins = max(10, int((x.max() - x.min()) / step)) xmin = 1.1 * x.min() - 0.1 * x.max() xmax = 1.1 * x.max() - 0.1 * x.min() - h, c = np.histogram(x, bins, [xmin, xmax], normed=True) + h, c = np.histogram(x, bins, [xmin, xmax], density=True) # Make code robust to new and old behavior of np.histogram c = c[:len(h)] @@ -848,8 +848,7 @@ def show_components(self, x, gd, density=None, mpaxes=None): fontsize=12) legend = ['data'] - for k in range(self.k): - legend.append('component %d' % (k + 1)) + legend.extend(f'component {k}' for k in range(1, self.k + 1)) l = ax.legend(tuple(legend)) for t in l.get_texts(): t.set_fontsize(12) diff --git a/nipy/algorithms/clustering/hierarchical_clustering.py b/nipy/algorithms/clustering/hierarchical_clustering.py index 0813568a7..fd997f400 100644 --- a/nipy/algorithms/clustering/hierarchical_clustering.py +++ b/nipy/algorithms/clustering/hierarchical_clustering.py @@ -51,7 +51,7 @@ class WeightedForest(Forest): parents: array of shape (self.V) the parent array edges: array of shape (self.E,2) reprensenting pairwise neighbors weights, array of shape (self.E), +1/-1 for scending/descending links - children: list of arrays that represents the childs of any node + children: list of arrays that represents the children of any node height: array of shape(self.V) """ @@ -221,10 +221,8 @@ def list_of_subtrees(self): way such that parent[i]>i for all i Only the leaves are listeed, not the subtrees themselves """ - lst = [] + lst = [np.array([], np.int_) for i in range(self.V)] n = np.sum(self.isleaf()) - for i in range(self.V): - lst.append(np.array([], np.int_)) for i in range(n): lst[i] = np.array([i], np.int_) for i in range(self.V - 1): @@ -630,8 +628,8 @@ def ward_quick(G, feature, verbose=False): Caveat : only approximate """ - warn('Function ward_quick from ' + - 'nipy.algorithms.clustering.hierrachical_clustering ' + + warn('Function ward_quick from ' + 'nipy.algorithms.clustering.hierrachical_clustering ' 'deprecated, will be removed', FutureWarning, stacklevel=2) @@ -904,8 +902,8 @@ def ward(G, feature, verbose=False): When G has more than 1 connected component, t is no longer a tree. This case is handled cleanly now """ - warn('Function ward from ' + - 'nipy.algorithms.clustering.hierrachical_clustering ' + + warn('Function ward from ' + 'nipy.algorithms.clustering.hierrachical_clustering ' 'deprecated, will be removed', FutureWarning, stacklevel=2) diff --git a/nipy/algorithms/clustering/imm.py b/nipy/algorithms/clustering/imm.py index 9e8266a0a..24d8727d8 100644 --- a/nipy/algorithms/clustering/imm.py +++ b/nipy/algorithms/clustering/imm.py @@ -258,7 +258,7 @@ def cross_validated_update(self, x, z, plike, kfold=10): z[train] = self.reduce(z[train]) self.update(x[train], z[train]) - # draw the membership for the left-out datas + # draw the membership for the left-out data alike = self.likelihood(x[test], plike[test]) slike[test] = alike.sum(1) # standard + likelihood under the prior diff --git a/nipy/algorithms/clustering/von_mises_fisher_mixture.py b/nipy/algorithms/clustering/von_mises_fisher_mixture.py index 5958f3ea7..2aabd3916 100644 --- a/nipy/algorithms/clustering/von_mises_fisher_mixture.py +++ b/nipy/algorithms/clustering/von_mises_fisher_mixture.py @@ -9,8 +9,8 @@ import numpy as np -warn('Module nipy.algorithms.clustering.von_mises_fisher_mixture' + - 'deprecated, will be removed', +warn('Module nipy.algorithms.clustering.von_mises_fisher_mixture deprecated, ' + 'will be removed', FutureWarning, stacklevel=2) @@ -60,7 +60,7 @@ def log_density_per_component(self, x): Returns ------- - like: array of shape(n, self.k), with non-neagtive values + like: array of shape(n, self.k), with non-negative values the density """ n = x.shape[0] @@ -84,7 +84,7 @@ def density_per_component(self, x): Returns ------- - like: array of shape(n, self.k), with non-neagtive values + like: array of shape(n, self.k), with non-negative values the density """ return np.exp(self.log_density_per_component(x)) diff --git a/nipy/algorithms/diagnostics/timediff.py b/nipy/algorithms/diagnostics/timediff.py index 01b07475b..a59562258 100644 --- a/nipy/algorithms/diagnostics/timediff.py +++ b/nipy/algorithms/diagnostics/timediff.py @@ -186,12 +186,10 @@ def time_slice_diffs_image(img, time_axis='t', slice_axis='slice'): img_class = img.__class__ time_in_ax, time_out_ax = io_axis_indices(img.coordmap, time_axis) if None in (time_in_ax, time_out_ax): - raise AxisError('Cannot identify matching input output axes with "%s"' - % time_axis) + raise AxisError(f'Cannot identify matching input output axes with "{time_axis}"') slice_in_ax, slice_out_ax = io_axis_indices(img.coordmap, slice_axis) if None in (slice_in_ax, slice_out_ax): - raise AxisError('Cannot identify matching input output axes with "%s"' - % slice_axis) + raise AxisError(f'Cannot identify matching input output axes with "{slice_axis}"') vol_coordmap = drop_io_dim(img.coordmap, time_axis) results = time_slice_diffs(img.get_fdata(), time_in_ax, slice_in_ax) for key in ('slice_diff2_max_vol', 'diff2_mean_vol'): diff --git a/nipy/algorithms/diagnostics/tsdiffplot.py b/nipy/algorithms/diagnostics/tsdiffplot.py index 5d99ddea9..b2c9759c6 100644 --- a/nipy/algorithms/diagnostics/tsdiffplot.py +++ b/nipy/algorithms/diagnostics/tsdiffplot.py @@ -6,6 +6,7 @@ import nipy +from ...utils import deprecate_with_doc from .timediff import time_slice_diffs @@ -76,7 +77,7 @@ def xmax_labels(ax, val, xlabel, ylabel): return axes -@np.deprecate_with_doc('Please see docstring for alternative code') +@deprecate_with_doc('please see docstring for alternative code') def plot_tsdiffs_image(img, axes=None, show=True): ''' Plot time series diagnostics for image diff --git a/nipy/algorithms/graph/_graph.pyx b/nipy/algorithms/graph/_graph.pyx index 01bc359a8..e0300bde3 100644 --- a/nipy/algorithms/graph/_graph.pyx +++ b/nipy/algorithms/graph/_graph.pyx @@ -1,22 +1,21 @@ -import numpy as np -cimport numpy as np +cimport numpy as cnp cimport cython -ctypedef np.float64_t DOUBLE -ctypedef np.int_t INT +ctypedef cnp.float64_t DOUBLE +ctypedef cnp.intp_t INT @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) -def dilation(np.ndarray[DOUBLE, ndim=2] field,\ - np.ndarray[INT, ndim=1] idx,\ - np.ndarray[INT, ndim=1] neighb): +def dilation(cnp.ndarray[DOUBLE, ndim=2] field,\ + cnp.ndarray[INT, ndim=1] idx,\ + cnp.ndarray[INT, ndim=1] neighb): cdef int size_max = field.shape[0] cdef int dim = field.shape[1] cdef int i, j, d cdef DOUBLE fmax - cdef np.ndarray[DOUBLE, ndim=1] res = 0 * field[:, 0] + cdef cnp.ndarray[DOUBLE, ndim=1] res = 0 * field[:, 0] for d in range(dim): for i in range(size_max): fmax = field[i, d] diff --git a/nipy/algorithms/graph/forest.py b/nipy/algorithms/graph/forest.py index b6ffd636d..74e4d2791 100644 --- a/nipy/algorithms/graph/forest.py +++ b/nipy/algorithms/graph/forest.py @@ -155,9 +155,7 @@ def get_descendants(self, v, exclude_self=False): else: desc = [v] for w in self.children[v]: - temp = self.get_descendants(w) - for q in temp: - desc.append(q) + desc.extend(self.get_descendants(w)) desc.sort() if exclude_self and v in desc: desc = [i for i in desc if i != v] diff --git a/nipy/algorithms/graph/graph.py b/nipy/algorithms/graph/graph.py index ffbcdbce4..46fca7441 100644 --- a/nipy/algorithms/graph/graph.py +++ b/nipy/algorithms/graph/graph.py @@ -291,15 +291,15 @@ def mst(X): the corresponding WeightedGraph instance """ n = X.shape[0] - label = np.arange(n).astype(np.int_) + label = np.arange(n).astype(np.intp) - edges = np.zeros((0, 2)).astype(np.int_) + edges = np.zeros((0, 2)).astype(np.intp) # upper bound on maxdist**2 maxdist = 4 * np.sum((X - X[0]) ** 2, 1).max() nbcc = n while nbcc > 1: mindist = maxdist * np.ones(nbcc) - link = - np.ones((nbcc, 2)).astype(np.int_) + link = - np.ones((nbcc, 2)).astype(np.intp) # find nearest neighbors for n1 in range(n): @@ -347,7 +347,7 @@ def knn(X, k=1): Notes ----- - The knn system is symmeterized: if (ab) is one of the edges then (ba) is + The knn system is symmetrized: if (ab) is one of the edges then (ba) is also included """ from ..utils.fast_distance import euclidean_distance @@ -427,8 +427,8 @@ def lil_cc(lil): Dramatically slow for non-sparse graphs """ n = len(lil) - visited = np.zeros(n).astype(np.int_) - label = - np.ones(n).astype(np.int_) + visited = np.zeros(n).astype(np.intp) + label = - np.ones(n).astype(np.intp) k = 0 while (visited == 0).any(): front = [np.argmin(visited)] @@ -500,7 +500,7 @@ def create_edges(lxyz, nn, l1dist=1, left=np.array([]), right=np.array([]), i, j, d = create_edges(lxyz, n18, 2, i, j, d) if k == 26: i, j, d = create_edges(lxyz, n26, 3, i, j, d) - i, j = i.astype(np.int_), j.astype(np.int_) + i, j = i.astype(np.intp), j.astype(np.intp) # reorder the edges to have a more standard order order = np.argsort(i + j * (len(i) + 1)) @@ -515,7 +515,7 @@ def wgraph_from_3d_grid(xyz, k=18): Parameters ---------- - xyz: array of shape (nsamples, 3) and type np.int_, + xyz: array of shape (nsamples, 3) and type np.intp, k = 18: the number of neighbours considered. (6, 18 or 26) Returns @@ -617,7 +617,7 @@ def from_3d_grid(self, xyz, k=18): Parameters ---------- - xyz: array of shape (self.V, 3) and type np.int_, + xyz: array of shape (self.V, 3) and type np.intp, k = 18: the number of neighbours considered. (6, 18 or 26) Returns @@ -637,7 +637,7 @@ def from_3d_grid(self, xyz, k=18): raise TypeError('Creating graph from grid failed. '\ 'Maybe the grid is too big') self.E = np.size(i) - self.edges = np.zeros((self.E, 2), np.int_) + self.edges = np.zeros((self.E, 2), np.intp) self.edges[:, 0] = i self.edges[:, 1] = j self.weights = np.array(d) @@ -719,10 +719,10 @@ def compact_neighb(self): weights: array of shape(self.E), concatenated list of weights """ order = np.argsort(self.edges[:, 0] * float(self.V) + self.edges[:, 1]) - neighb = self.edges[order, 1].astype(np.int_) + neighb = self.edges[order, 1].astype(np.intp) weights = self.weights[order] degree, _ = self.degrees() - idx = np.hstack((0, np.cumsum(degree))).astype(np.int_) + idx = np.hstack((0, np.cumsum(degree))).astype(np.intp) return idx, neighb, weights def floyd(self, seed=None): @@ -730,7 +730,7 @@ def floyd(self, seed=None): Parameters ---------- - seed= None: array of shape (nbseed), type np.int_ + seed: None or array of shape (nbseed), type np.intp vertex indexes from which the distances are computed if seed==None, then every edge is a seed point @@ -880,7 +880,7 @@ def voronoi_labelling(self, seed): Parameters ---------- - seed: array of shape (nseeds), type (np.int_), + seed: array of shape (nseeds), type (np.intp), vertices from which the cells are built Returns @@ -896,7 +896,7 @@ def voronoi_labelling(self, seed): except: raise ValueError('undefined weights') dist, active = np.inf * np.ones(self.V), np.ones(self.V) - label = - np.ones(self.V, np.int_) + label = - np.ones(self.V, np.intp) idx, neighb, weight = self.compact_neighb() dist[seed] = 0 label[seed] = np.arange(len(seed)) @@ -930,7 +930,7 @@ def cliques(self): Returns ------- - cliques: array of shape (self.V), type (np.int_) + cliques: array of shape (self.V), type (np.intp) labelling of the vertices according to the clique they belong to """ if (self.weights < 0).any(): @@ -1034,7 +1034,7 @@ def kruskal(self): k = self.cc().max() + 1 E = 2 * self.V - 2 V = self.V - Kedges = np.zeros((E, 2)).astype(np.int_) + Kedges = np.zeros((E, 2)).astype(np.intp) Kweights = np.zeros(E) iw = np.argsort(self.weights) label = np.arange(V) @@ -1221,9 +1221,7 @@ def left_incidence(self): list[[e.0.0, .., e.0.i(0)], .., [e.V.0, E.V.i(V)]] where e.i.j is the set of edge indexes so that e.i.j[0] = i """ - linc = [] - for i in range(self.V): - linc.append([]) + linc = [[] for i in range(self.V)] for e in range(self.E): i = self.edges[e, 0] a = linc[i] @@ -1240,9 +1238,7 @@ def right_incidence(self): list[[e.0.0, .., e.0.i(0)], .., [e.V.0, E.V.i(V)]] where e.i.j is the set of edge indexes so that e.i.j[1] = i """ - rinc = [] - for i in range(self.V): - rinc.append([]) + rinc = [[] for i in range(self.V)] for e in range(self.E): i = self.edges[e, 1] a = rinc[i] diff --git a/nipy/algorithms/registration/affine.py b/nipy/algorithms/registration/affine.py index 86f65d736..b97241ba6 100644 --- a/nipy/algorithms/registration/affine.py +++ b/nipy/algorithms/registration/affine.py @@ -345,10 +345,10 @@ def compose(self, other): return a def __str__(self): - string = f'translation : {str(self.translation)}\n' - string += f'rotation : {str(self.rotation)}\n' - string += f'scaling : {str(self.scaling)}\n' - string += f'pre-rotation: {str(self.pre_rotation)}' + string = f'translation : {self.translation}\n' + string += f'rotation : {self.rotation}\n' + string += f'scaling : {self.scaling}\n' + string += f'pre-rotation: {self.pre_rotation}' return string def inv(self): @@ -388,8 +388,8 @@ def from_matrix44(self, aff): self._vec12 = vec12 def __str__(self): - string = f'translation : {str(self.translation)}\n' - string += f'rotation : {str(self.rotation)}\n' + string = f'translation : {self.translation}\n' + string += f'rotation : {self.rotation}\n' return string @@ -430,9 +430,9 @@ def _set_param(self, p): param = property(Affine._get_param, _set_param) def __str__(self): - string = f'translation : {str(self.translation)}\n' - string += f'rotation : {str(self.rotation)}\n' - string += f'scaling : {str(self.scaling[0])}\n' + string = f'translation : {self.translation}\n' + string += f'rotation : {self.rotation}\n' + string += f'scaling : {self.scaling[0]}\n' return string diff --git a/nipy/algorithms/registration/cubic_spline.c b/nipy/algorithms/registration/cubic_spline.c index 64f3daff8..3d1a175f4 100644 --- a/nipy/algorithms/registration/cubic_spline.c +++ b/nipy/algorithms/registration/cubic_spline.c @@ -199,7 +199,7 @@ void cubic_spline_transform(PyArrayObject* res, const PyArrayObject* src) unsigned int axis, aux=0, dimmax=0; /* Copy src into res */ - PyArray_CastTo(res, (PyArrayObject*)src); + PyArray_CopyInto(res, (PyArrayObject*)src); /* Compute the maximum array dimension over axes */ for(axis=0; axisindex < iter->size) { - buf_idx = (npy_intp*)PyArray_ITER_DATA(iter); + buf_idx = PyArray_ITER_DATA(iter); if (*buf_idx >= 0) mask_size ++; PyArray_ITER_NEXT(iter); @@ -244,7 +257,7 @@ PyArrayObject* make_edges(const PyArrayObject* idx, xi = iter->coordinates[0]; yi = iter->coordinates[1]; zi = iter->coordinates[2]; - buf_idx = (npy_intp*)PyArray_ITER_DATA(iter); + buf_idx = PyArray_ITER_DATA(iter); idx_i = *buf_idx; /* Loop over neighbors if current point is within the mask */ @@ -261,7 +274,10 @@ PyArrayObject* make_edges(const PyArrayObject* idx, /* Store edge if neighbor is within the mask */ if ((pos < 0) || (pos >= u0)) continue; - buf_idx = (npy_intp*)PyArray_DATA(idx) + pos; + /* Since PyArray_DATA() is a simple accessor, it is OK to cast away + * const as long as we treat the result as const. + */ + buf_idx = PyArray_DATA((PyArrayObject*) idx) + pos; if (*buf_idx < 0) continue; buf_edges[0] = idx_i; @@ -314,20 +330,27 @@ double interaction_energy(PyArrayObject* ppm, npy_intp K = PyArray_DIMS(ppm)[3]; npy_intp u2 = PyArray_DIMS(ppm)[2]*K; npy_intp u1 = PyArray_DIMS(ppm)[1]*u2; - npy_intp* xyz; - const double* U_data = (double*)PyArray_DATA(U); + const npy_intp* xyz; + /* Since PyArray_DATA() is a simple accessor, it is OK to cast away const as + * long as we treat the result as const. + */ + const double* U_data = PyArray_DATA((PyArrayObject*) U); int* ngb; /* Neighborhood system */ ngb = _select_neighborhood_system(ngb_size); /* Pointer to ppm array */ - ppm_data = (double*)PyArray_DATA(ppm); + ppm_data = PyArray_DATA(ppm); /* Allocate auxiliary vector */ p = (double*)calloc(K, sizeof(double)); /* Loop over points */ + + /* We can convert idx to a non-const PyObject for iteration purposes as long + * as we treat any pointer values obtained via the iterator as const. + */ iter = (PyArrayIterObject*)PyArray_IterAllButAxis((PyObject*)XYZ, &axis); while(iter->index < iter->size) { diff --git a/nipy/algorithms/statistics/formula/formulae.py b/nipy/algorithms/statistics/formula/formulae.py index d93797498..ceb52a43c 100644 --- a/nipy/algorithms/statistics/formula/formulae.py +++ b/nipy/algorithms/statistics/formula/formulae.py @@ -123,14 +123,14 @@ from nipy.algorithms.utils.matrices import full_rank, matrix_rank # Legacy repr printing from numpy. -from nipy.utils import VisibleDeprecationWarning, _NoValue +from nipy.utils import VisibleDeprecationWarning, _NoValue, deprecate_with_doc def _to_str(s): return s.decode('latin1') if isinstance(s, bytes) else str(s) -@np.deprecate(message = "Please use sympy.Dummy instead of this function") +@deprecate_with_doc("please use sympy.Dummy instead") def make_dummy(name): """ Make dummy variable of given name @@ -302,11 +302,10 @@ def getparams(expression): expression = expression.reshape((np.prod(expression.shape),)) for term in expression: atoms = atoms.union(sympy.sympify(term).atoms()) - params = [] - for atom in atoms: - if isinstance(atom, sympy.Symbol) and not is_term(atom): - params.append(atom) - params.sort(key=default_sort_key) + params = sorted((atom + for atom in atoms + if isinstance(atom, sympy.Symbol) and not is_term(atom)), + key=default_sort_key) return params @@ -330,11 +329,8 @@ def getterms(expression): expression = expression.reshape((np.prod(expression.shape),)) for e in expression: atoms = atoms.union(e.atoms()) - terms = [] - for atom in atoms: - if is_term(atom): - terms.append(atom) - terms.sort(key=default_sort_key) + terms = sorted((atom for atom in atoms if is_term(atom)), + key=default_sort_key) return terms @@ -1061,7 +1057,7 @@ def get_term(self, level): """ if level not in self.levels: raise ValueError('level not found') - return self[f"{self.name}_{str(level)}"] + return self[f"{self.name}_{level}"] def _getmaineffect(self, ref=-1): v = list(self._terms.copy()) diff --git a/nipy/algorithms/statistics/formula/tests/test_formula.py b/nipy/algorithms/statistics/formula/tests/test_formula.py index 2539aec98..19294a0f8 100644 --- a/nipy/algorithms/statistics/formula/tests/test_formula.py +++ b/nipy/algorithms/statistics/formula/tests/test_formula.py @@ -9,7 +9,6 @@ import numpy as np import pytest import sympy -from numpy.core.records import fromrecords from numpy.testing import assert_almost_equal, assert_array_equal from sympy.utilities.lambdify import implemented_function @@ -219,6 +218,7 @@ def test_make_recarray(): # Test make_array # From list / sequence # 2D case + fromrecords = np.rec.fromrecords data_2d = [(3, 4), (4, 6), (7, 9)] m = F.make_recarray(data_2d, 'wv', [np.float64, np.int_]) assert_starr_equal(m, fromrecords( diff --git a/nipy/algorithms/statistics/intvol.pyx b/nipy/algorithms/statistics/intvol.pyx index 81b2e7e39..720d8ac58 100644 --- a/nipy/algorithms/statistics/intvol.pyx +++ b/nipy/algorithms/statistics/intvol.pyx @@ -455,7 +455,7 @@ def EC3d(mask): l0 = l0 - m # fpmask has the same sum as mask, but with predictable dtype - return l0 + fpmask.sum() + return l0 + fpmask.sum().astype(int) def Lips3d(coords, mask): @@ -690,7 +690,7 @@ def Lips3d(coords, mask): l0 = l0 - m # fpmask has the same sum as mask, but with predictable dtype - l0 += fpmask.sum() + l0 += fpmask.sum().astype(int) return np.array([l0, l1, l2, l3]) @@ -898,7 +898,7 @@ def Lips2d(coords, mask): l0 = l0 - m # fpmask has the same sum as mask, but with predictable dtype - l0 += fpmask.sum() + l0 += fpmask.sum().astype(int) return np.array([l0,l1,l2]) @@ -944,12 +944,12 @@ def EC2d(mask): np.ndarray[np.intp_t, ndim=2] d2 np.ndarray[np.intp_t, ndim=2] d3 # scalars - np.uint8_t m + np.uint64_t m np.npy_intp i, j, k, l, s0, s1, ds2, ds3, index np.ndarray[np.intp_t, ndim=1] strides np.npy_intp ss0, ss1 # strides np.npy_intp v0, v1 # vertices - long l0 = 0 + np.npy_intp l0 = 0 mask = check_cast_bin8(mask) @@ -999,7 +999,7 @@ def EC2d(mask): l0 = l0 - m # fpmask has the same sum as mask, but with predictable dtype - l0 += fpmask.sum() + l0 += fpmask.sum().astype(int) return l0 @@ -1082,7 +1082,7 @@ def Lips1d(coords, mask): l0 = l0 - m # mask_c has the same sum as mask, but with predictable dtype - l0 += mask_c.sum() + l0 += mask_c.sum().astype(int) return np.array([l0, l1]) @@ -1137,5 +1137,5 @@ def EC1d(mask): l0 = l0 - m # mask_c has the same sum as mask, but with predictable dtype - l0 += mask_c.sum() + l0 += mask_c.sum().astype(int) return l0 diff --git a/nipy/algorithms/statistics/models/model.py b/nipy/algorithms/statistics/models/model.py index 996c53055..ccfd6939d 100644 --- a/nipy/algorithms/statistics/models/model.py +++ b/nipy/algorithms/statistics/models/model.py @@ -1,7 +1,8 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: +from functools import cached_property + import numpy as np -from nibabel.onetime import auto_attr from numpy.linalg import inv from scipy.stats import t as t_distribution @@ -118,14 +119,14 @@ def __init__(self, theta, Y, model, cov=None, dispersion=1., nuisance=None, # put this as a parameter of LikelihoodModel self.df_resid = self.df_total - self.df_model - @auto_attr + @cached_property def logL(self): """ The maximized log-likelihood """ return self.model.logL(self.theta, self.Y, nuisance=self.nuisance) - @auto_attr + @cached_property def AIC(self): """ Akaike Information Criterion @@ -133,7 +134,7 @@ def AIC(self): p = self.theta.shape[0] return -2 * self.logL + 2 * p - @auto_attr + @cached_property def BIC(self): """ Schwarz's Bayesian Information Criterion @@ -189,7 +190,7 @@ def vcov(self, matrix=None, column=None, dispersion=None, other=None): or a matrix. """ if self.cov is None: - raise ValueError('need covariance of parameters for computing' +\ + raise ValueError('need covariance of parameters for computing ' '(unnormalized) covariances') if dispersion is None: diff --git a/nipy/algorithms/statistics/models/regression.py b/nipy/algorithms/statistics/models/regression.py index a4efae53d..660babb33 100644 --- a/nipy/algorithms/statistics/models/regression.py +++ b/nipy/algorithms/statistics/models/regression.py @@ -21,11 +21,11 @@ __docformat__ = 'restructuredtext en' import warnings +from functools import cached_property import numpy as np import numpy.linalg as npl import scipy.linalg as spl -from nibabel.onetime import auto_attr from scipy import stats from nipy.algorithms.utils.matrices import matrix_rank, pos_recipr @@ -256,7 +256,7 @@ def whiten(self, X): """ return X - @auto_attr + @cached_property def has_intercept(self): """ Check if column of 1s is in column space of design @@ -264,11 +264,9 @@ def has_intercept(self): o = np.ones(self.design.shape[0]) obeta = np.dot(self.calc_beta, o) ohat = np.dot(self.wdesign, obeta) - if np.allclose(ohat, o): - return True - return False + return np.allclose(ohat, o) - @auto_attr + @cached_property def rank(self): """ Compute rank of design matrix """ @@ -709,14 +707,14 @@ def __init__(self, theta, Y, model, wY, wresid, cov=None, dispersion=1., self.wY = wY self.wresid = wresid - @auto_attr + @cached_property def resid(self): """ Residuals from the fit. """ return self.Y - self.predicted - @auto_attr + @cached_property def norm_resid(self): """ Residuals, normalized to have unit length. @@ -736,7 +734,7 @@ def norm_resid(self): """ return self.resid * pos_recipr(np.sqrt(self.dispersion)) - @auto_attr + @cached_property def predicted(self): """ Return linear predictor values from a design matrix. """ @@ -745,7 +743,7 @@ def predicted(self): X = self.model.design return np.dot(X, beta) - @auto_attr + @cached_property def R2_adj(self): """Return the R^2 value for each row of the response Y. @@ -756,13 +754,13 @@ def R2_adj(self): See: Davidson and MacKinnon p 74 """ if not self.model.has_intercept: - warnings.warn("model does not have intercept term, " +\ + warnings.warn("model does not have intercept term, " "SST inappropriate") d = 1. - self.R2 d *= ((self.df_total - 1.) / self.df_resid) return 1 - d - @auto_attr + @cached_property def R2(self): """ Return the adjusted R^2 value for each row of the response Y. @@ -776,43 +774,43 @@ def R2(self): d = self.SSE / self.SST return 1 - d - @auto_attr + @cached_property def SST(self): """Total sum of squares. If not from an OLS model this is "pseudo"-SST. """ if not self.model.has_intercept: - warnings.warn("model does not have intercept term, " +\ + warnings.warn("model does not have intercept term, " "SST inappropriate") return ((self.wY - self.wY.mean(0)) ** 2).sum(0) - @auto_attr + @cached_property def SSE(self): """Error sum of squares. If not from an OLS model this is "pseudo"-SSE. """ return (self.wresid ** 2).sum(0) - @auto_attr + @cached_property def SSR(self): """ Regression sum of squares """ return self.SST - self.SSE - @auto_attr + @cached_property def MSR(self): """ Mean square (regression)""" return self.SSR / (self.df_model - 1) - @auto_attr + @cached_property def MSE(self): """ Mean square (error) """ return self.SSE / self.df_resid - @auto_attr + @cached_property def MST(self): """ Mean square (total) """ return self.SST / (self.df_total - 1) - @auto_attr + @cached_property def F_overall(self): """ Overall goodness of fit F test, comparing model to a model with just an intercept. @@ -872,6 +870,4 @@ def isestimable(C, D): if C.shape[1] != D.shape[1]: raise ValueError('Contrast should have %d columns' % D.shape[1]) new = np.vstack([C, D]) - if matrix_rank(new) != matrix_rank(D): - return False - return True + return matrix_rank(new) == matrix_rank(D) diff --git a/nipy/algorithms/statistics/models/tests/test_anova.py b/nipy/algorithms/statistics/models/tests/test_anova.py index bb5f02952..13ab55420 100644 --- a/nipy/algorithms/statistics/models/tests/test_anova.py +++ b/nipy/algorithms/statistics/models/tests/test_anova.py @@ -85,9 +85,7 @@ # # http://www-stat.stanford.edu/~jtaylo/courses/stats191/data/kidney.table -D = [] -for row in StringIO(data): - D.append([float(val) for val in row.split()]) +D = [[float(val) for val in row.split()] for row in StringIO(data)] D = make_recarray(D, ['Days', 'Duration', 'Weight', 'ID']) # Create the categorical regressors, known as Factors diff --git a/nipy/algorithms/statistics/rft.py b/nipy/algorithms/statistics/rft.py index d164cad4d..b7a783d37 100644 --- a/nipy/algorithms/statistics/rft.py +++ b/nipy/algorithms/statistics/rft.py @@ -343,7 +343,7 @@ def __repr__(self): m = repr(self.m) else: m = f'{self.m:f}' - return f"ECquasi({repr(self.coeffs)}, m={m}, exponent={self.exponent:f})" + return f"ECquasi({self.coeffs!r}, m={m}, exponent={self.exponent:f})" __str__ = __repr__ __rsub__ = __sub__ diff --git a/nipy/algorithms/statistics/tests/test_intrinsic_volumes.py b/nipy/algorithms/statistics/tests/test_intrinsic_volumes.py index 74bf2778b..d2ea35de7 100644 --- a/nipy/algorithms/statistics/tests/test_intrinsic_volumes.py +++ b/nipy/algorithms/statistics/tests/test_intrinsic_volumes.py @@ -1,13 +1,15 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from itertools import combinations +from itertools import chain, combinations import numpy as np import numpy.linalg as npl import pytest from numpy.testing import assert_almost_equal, assert_array_equal +from nipy.utils import SCTYPES + from .. import intvol @@ -26,9 +28,7 @@ def randorth(p=10): def box(shape, edges): data = np.zeros(shape) - sl = [] - for i in range(len(shape)): - sl.append(slice(edges[i][0], edges[i][1],1)) + sl = [slice(edges[i][0], edges[i][1],1) for i in range(len(shape))] data[tuple(sl)] = 1 return data.astype(np.int_) @@ -190,7 +190,7 @@ def test_ec(): assert_almost_equal(f(box1), 1) # While we're here, test we can use different dtypes, and that values # other than 0 or 1 raise an error. - for dtt in sum([np.sctypes[t] for t in ('int', 'uint', 'float')], []): + for dtt in chain.from_iterable(SCTYPES[t] for t in ('int', 'uint', 'float')): box1_again = box1.copy().astype(dtt) assert_almost_equal(f(box1_again), 1) box1_again[(10,) * i] = 2 diff --git a/nipy/algorithms/statistics/tests/test_quantile.py b/nipy/algorithms/statistics/tests/test_quantile.py index 59284bd78..d6304b659 100644 --- a/nipy/algorithms/statistics/tests/test_quantile.py +++ b/nipy/algorithms/statistics/tests/test_quantile.py @@ -1,14 +1,22 @@ +""" Test quartile functions +""" + +from itertools import chain import numpy as np from numpy import median as np_median from numpy.testing import assert_array_almost_equal, assert_array_equal from scipy.stats import scoreatpercentile as sp_percentile +from nipy.utils import SCTYPES + from .._quantile import _median, _quantile -NUMERIC_TYPES = sum([np.sctypes[t] - for t in ('int', 'uint', 'float', 'complex')], - []) +NUMERIC_TYPES = list( + chain.from_iterable( + SCTYPES[t] for t in ("int", "uint", "float", "complex") + ) +) def another_percentile(arr, pct, axis): diff --git a/nipy/algorithms/statistics/tests/test_utils.py b/nipy/algorithms/statistics/tests/test_utils.py index d1ffd6207..878a3e1e8 100644 --- a/nipy/algorithms/statistics/tests/test_utils.py +++ b/nipy/algorithms/statistics/tests/test_utils.py @@ -9,6 +9,8 @@ ) from scipy.stats import norm +from nipy.utils import SCTYPES + from ..utils import check_cast_bin8, multiple_fast_inv, multiple_mahalanobis, z_score @@ -59,13 +61,13 @@ def assert_equal_bin8(actual, expected): def test_check_cast_bin8(): # Function to return np.uint8 array with check whether array is binary. - for in_dtype in np.sctypes['int'] + np.sctypes['uint']: + for in_dtype in SCTYPES['int'] + SCTYPES['uint']: assert_equal_bin8(np.array([0, 1, 1, 1], in_dtype), [0, 1, 1, 1]) assert_equal_bin8(np.array([[0, 1], [1, 1]], in_dtype), [[0, 1], [1, 1]]) pytest.raises(ValueError, check_cast_bin8, np.array([0, 1, 2], dtype=in_dtype)) - for in_dtype in np.sctypes['float']: + for in_dtype in SCTYPES['float']: assert_equal_bin8(np.array([0, 1, 1, -0], np.float64), [0, 1, 1, 0]) assert_equal_bin8(np.array([[0, 1], [1, -0]], np.float64), [[0, 1], [1, 0]]) diff --git a/nipy/algorithms/statistics/utils.py b/nipy/algorithms/statistics/utils.py index 99f5f57b6..96b3806ab 100644 --- a/nipy/algorithms/statistics/utils.py +++ b/nipy/algorithms/statistics/utils.py @@ -218,7 +218,7 @@ def cube_with_strides_center(center=[0,0,0], for m in maximal: nm = [vertices[j] for j in m] mm.append(nm) - maximal = [tuple([vertices[j] for j in m]) for m in maximal] + maximal = [tuple(vertices[j] for j in m) for m in maximal] return complex(maximal) diff --git a/nipy/algorithms/utils/matrices.py b/nipy/algorithms/utils/matrices.py index 446815bb9..cfedb7db8 100644 --- a/nipy/algorithms/utils/matrices.py +++ b/nipy/algorithms/utils/matrices.py @@ -105,9 +105,7 @@ def full_rank(X, r=None): V, D, U = spl.svd(X, full_matrices=0) order = np.argsort(D) order = order[::-1] - value = [] - for i in range(r): - value.append(V[:,order[i]]) + value = [V[:,order[i]] for i in range(r)] return np.asarray(np.transpose(value)).astype(np.float64) diff --git a/nipy/algorithms/utils/pca.py b/nipy/algorithms/utils/pca.py index 089f9d5d0..9e52304c3 100644 --- a/nipy/algorithms/utils/pca.py +++ b/nipy/algorithms/utils/pca.py @@ -24,6 +24,7 @@ io_axis_indices, orth_axes, ) +from ...utils import SCTYPES def pca(data, axis=0, mask=None, ncomp=None, standardize=True, @@ -204,8 +205,8 @@ def _get_covariance(data, UX, rmse_scales_func, mask): C = np.zeros((rank, rank)) # nan_to_num only for floating point masks if mask is not None: - nan_to_num = mask.dtype.type in (np.sctypes['float'] + - np.sctypes['complex']) + nan_to_num = mask.dtype.type in (SCTYPES['float'] + + SCTYPES['complex']) # loop over next dimension to save memory if data.ndim == 2: # If we have 2D data, just do the covariance all in one shot, by using @@ -215,7 +216,7 @@ def _get_covariance(data, UX, rmse_scales_func, mask): # If we have more then 2D, then we iterate over slices in the second # dimension, in order to save memory slices = [slice(i,i+1) for i in range(data.shape[1])] - for i, s_slice in enumerate(slices): + for s_slice in slices: Y = data[:,s_slice].reshape((n_pts, -1)) # project data into required space YX = np.dot(UX, Y) @@ -334,14 +335,14 @@ def pca_image(img, axis='t', mask=None, ncomp=None, standardize=True, if None in (in_ax, out_ax): raise AxisError(f'Cannot identify matching input output axes with "{axis}"') if not orth_axes(in_ax, out_ax, img.coordmap.affine): - raise AxisError('Input and output axes found from "%s" not orthogonal ' - 'to rest of affine' % axis) + raise AxisError(f'Input and output axes found from "{axis}" not orthogonal ' + 'to rest of affine') # Roll the chosen axis to input position zero work_img = rollimg(img, axis) if mask is not None: if not mask.coordmap.similar_to(drop_io_dim(img.coordmap, axis)): raise ValueError("Mask should have matching coordmap to `img` " - "coordmap with dropped axis %s" % axis) + f"coordmap with dropped axis {axis}") data = work_img.get_fdata() if mask is not None: mask_data = mask.get_fdata() diff --git a/nipy/algorithms/utils/tests/test_pca.py b/nipy/algorithms/utils/tests/test_pca.py index 983f261a9..491d2d2b2 100644 --- a/nipy/algorithms/utils/tests/test_pca.py +++ b/nipy/algorithms/utils/tests/test_pca.py @@ -9,6 +9,7 @@ assert_array_almost_equal, funcfile, ) +from nipy.utils import SCTYPES from ..pca import pca @@ -169,9 +170,9 @@ def test_PCAMask(data): assert_almost_equal(p['pcnt_var'].sum(), 100.) # Any reasonable datatype for mask for dt in ([np.bool_] + - np.sctypes['int'] + - np.sctypes['uint'] + - np.sctypes['float']): + SCTYPES['int'] + + SCTYPES['uint'] + + SCTYPES['float']): p = pca(arr4d, -1, mask3d.astype(dt), ncomp=ncomp) assert p['basis_vectors'].shape == (data['nimages'], ntotal) assert p['basis_projections'].shape == mask3d.shape + (ncomp,) diff --git a/nipy/algorithms/utils/tests/test_pca_image.py b/nipy/algorithms/utils/tests/test_pca_image.py index 33519f129..350ba8b99 100644 --- a/nipy/algorithms/utils/tests/test_pca_image.py +++ b/nipy/algorithms/utils/tests/test_pca_image.py @@ -302,7 +302,7 @@ def test_other_axes(data_dict): assert_almost_equal(pos_dp['basis_vectors'], pos_p[bv_key]) assert_almost_equal(pos_dp['basis_projections'], img_bps.get_fdata()) # And we've replaced the expected axis - exp_coords = in_coords[:] + exp_coords = in_coords.copy() exp_coords[exp_coords.index(axis_name)] = 'PCA components' assert img_bps.axes.coord_names == tuple(exp_coords) # If the affine is not diagonal, we'll get an error @@ -323,6 +323,6 @@ def test_other_axes(data_dict): pytest.raises(AxisError, pca_image, nd_img, axis_name) for axis_name in 'kt': p = pca_image(img, axis_name, ncomp=ncomp) - exp_coords = in_coords[:] + exp_coords = in_coords.copy() exp_coords[exp_coords.index(axis_name)] = 'PCA components' assert p['basis_projections'].axes.coord_names == tuple(exp_coords) diff --git a/nipy/core/image/image.py b/nipy/core/image/image.py index 6f5ee49b6..6dc8b3590 100644 --- a/nipy/core/image/image.py +++ b/nipy/core/image/image.py @@ -13,10 +13,12 @@ import warnings from copy import copy +from functools import cached_property from itertools import chain import numpy as np -from nibabel.onetime import auto_attr + +from ...utils import deprecate_with_doc # Legacy repr printing from numpy. from ..reference.array_coords import ArrayCoordMap @@ -79,27 +81,27 @@ class Image: np.diag([3,5,7,1])) _doc['coordmap'] = "Affine transform mapping from axes coordinates to reference coordinates." - @auto_attr + @cached_property def shape(self): return self._data.shape _doc['shape'] = "Shape of data array." - @auto_attr + @cached_property def ndim(self): return len(self._data.shape) _doc['ndim'] = "Number of data dimensions." - @auto_attr + @cached_property def reference(self): return self.coordmap.function_range _doc['reference'] = "Reference coordinate system." - @auto_attr + @cached_property def axes(self): return self.coordmap.function_domain _doc['axes'] = "Axes of image." - @auto_attr + @cached_property def affine(self): if hasattr(self.coordmap, "affine"): return self.coordmap.affine @@ -581,7 +583,7 @@ def fromarray(data, innames, outnames): return Image(data, coordmap) -@np.deprecate_with_doc('Please use rollimg instead') +@deprecate_with_doc('please use rollimg instead') def rollaxis(img, axis, inverse=False): """ Roll `axis` backwards, until it lies in the first position. diff --git a/nipy/core/reference/array_coords.py b/nipy/core/reference/array_coords.py index eeb9116c8..b6182c858 100644 --- a/nipy/core/reference/array_coords.py +++ b/nipy/core/reference/array_coords.py @@ -183,12 +183,12 @@ def from_shape(coordmap, shape): are 'array' coordinates. """ - slices = tuple([slice(0,s,1) for s in shape]) + slices = tuple(slice(0,s,1) for s in shape) return Grid(coordmap)[slices] def __repr__(self): return "ArrayCoordMap(\n coordmap=" + \ - '\n '.join(repr(self.coordmap).split('\n')) + f',\n shape={repr(self.shape)}' + '\n)' + '\n '.join(repr(self.coordmap).split('\n')) + f',\n shape={self.shape!r}' + '\n)' def _slice(coordmap, shape, *slices): diff --git a/nipy/core/reference/coordinate_map.py b/nipy/core/reference/coordinate_map.py index 1d2ea8ad1..a4f5bbbb0 100644 --- a/nipy/core/reference/coordinate_map.py +++ b/nipy/core/reference/coordinate_map.py @@ -145,7 +145,7 @@ class CoordinateMap: _doc['function_range'] = 'The range of the function, a CoordinateSystem.' inverse_function = np.log - _doc['inverse_function'] = 'The inverse function from function_range' + \ + _doc['inverse_function'] = 'The inverse function from function_range ' \ 'to function_domain, if supplied.' ndims = (1,1) @@ -200,9 +200,9 @@ def __init__(self, function_domain, def __setattr__(self, key, value): if key in self.__dict__: - raise AttributeError('the value of %s has already been ' + raise AttributeError(f'the value of {key} has already been ' 'set and all attributes are ' - 'read-only' % key) + 'read-only') object.__setattr__(self, key, value) ################################################################### @@ -423,9 +423,9 @@ def __copy__(self): def __repr__(self): if self.inverse_function is None: - return f"CoordinateMap(\n function_domain={self.function_domain},\n function_range={self.function_range},\n function={repr(self.function)}\n )" + return f"CoordinateMap(\n function_domain={self.function_domain},\n function_range={self.function_range},\n function={self.function!r}\n )" else: - return f"CoordinateMap(\n function_domain={self.function_domain},\n function_range={self.function_range},\n function={repr(self.function)},\n inverse_function={repr(self.inverse_function)}\n )" + return f"CoordinateMap(\n function_domain={self.function_domain},\n function_range={self.function_range},\n function={self.function!r},\n inverse_function={self.inverse_function!r}\n )" def _checkfunction(self): @@ -502,15 +502,15 @@ class AffineTransform: _doc = {} affine = np.diag([3,4,5,1]) - _doc['affine'] = 'The matrix representing an affine transformation ' + \ + _doc['affine'] = 'The matrix representing an affine transformation ' \ 'homogeneous form.' function_domain = CoordinateSystem('x') - _doc['function_domain'] = 'The domain of the affine transformation, ' + \ + _doc['function_domain'] = 'The domain of the affine transformation, ' \ 'a CoordinateSystem.' function_range = CoordinateSystem('y') - _doc['function_range'] = 'The range of the affine transformation, ' + \ + _doc['function_range'] = 'The range of the affine transformation, ' \ 'a CoordinateSystem.' ndims = (3,3) @@ -558,10 +558,10 @@ def __init__(self, function_domain, function_range, affine): 'affine matrix shape') # Test that it is actually an affine mapping in homogeneous # form - bottom_row = np.array([0]*self.ndims[0] + [1]) - if not np.all(affine[-1] == bottom_row): - raise ValueError('the homogeneous transform should have bottom=' + \ - f'row {repr(bottom_row)}') + bottom_row = np.array([0] * self.ndims[0] + [1]) + if not np.allclose(affine[-1].astype(float), bottom_row): + raise ValueError('the homogeneous transform should have bottom=' + f'row {bottom_row!r}') self.affine = affine ################################################################### @@ -1113,7 +1113,7 @@ def product(*cmaps, **kwargs): if allaffine: return _product_affines(*cmaps, **kwargs) else: - warnings.warn("product of non-affine CoordinateMaps is less robust than"+ + warnings.warn("product of non-affine CoordinateMaps is less robust than " "the AffineTransform") return _product_cmaps(*[_as_coordinate_map(cmap) for cmap in cmaps], **kwargs) @@ -1370,7 +1370,7 @@ def renamed_domain(mapping, newnames, name=''): for key in list(newnames): if key not in mapping.function_domain.coord_names: - raise ValueError(f'no domain coordinate named {str(key)}') + raise ValueError(f'no domain coordinate named {key}') new_coord_names = [] for n in mapping.function_domain.coord_names: @@ -1434,7 +1434,7 @@ def renamed_range(mapping, newnames): for key in list(newnames): if key not in mapping.function_range.coord_names: - raise ValueError(f'no range coordinate named {str(key)}') + raise ValueError(f'no range coordinate named {key}') new_coord_names = [] for n in mapping.function_range.coord_names: @@ -2019,14 +2019,13 @@ def input_axis_index(coordmap, axis_id, fix0=True): return in_no out2in = axmap(coordmap, 'out2in', fix0=fix0) if not out2in[axis_id] == in_no: - raise AxisError('Name "%s" present in input and output but ' - 'they do not appear to match' % axis_id) + raise AxisError(f'Name "{axis_id}" present in input and output but ' + 'they do not appear to match') return in_no in_no = axmap(coordmap, 'out2in', fix0=fix0)[axis_id] if in_no is None: - raise AxisError('Name "%s" present in output but this output axis ' - 'does not have the best match with any input axis' - % axis_id) + raise AxisError(f'Name "{axis_id}" present in output but this output axis ' + 'does not have the best match with any input axis') return in_no diff --git a/nipy/core/reference/coordinate_system.py b/nipy/core/reference/coordinate_system.py index 208cfcbf7..657b03743 100644 --- a/nipy/core/reference/coordinate_system.py +++ b/nipy/core/reference/coordinate_system.py @@ -15,6 +15,8 @@ import numpy as np +from ...utils import SCTYPES + class CoordinateSystemError(Exception): pass @@ -74,8 +76,8 @@ class CoordinateSystem: dtype = np.dtype([('x', np.float64), ('y', np.float64), ('z', np.float64)]) - _doc['dtype'] = 'The composite dtype of the CoordinateSystem, ' + \ - 'expresses the fact that there are three numbers, the' + \ + _doc['dtype'] = 'The composite dtype of the CoordinateSystem, ' \ + 'expresses the fact that there are three numbers, the ' \ 'first one corresponds to "x" and the second to "y".' def __init__(self, coord_names, name='', coord_dtype=np.float64): @@ -115,8 +117,8 @@ def __init__(self, coord_names, name='', coord_dtype=np.float64): if len(set(coord_names)) != len(coord_names): raise ValueError('coord_names must have distinct names') # verify that the dtype is coord_dtype for sanity - sctypes = (np.sctypes['int'] + np.sctypes['float'] + - np.sctypes['complex'] + np.sctypes['uint'] + [object]) + sctypes = (SCTYPES['int'] + SCTYPES['float'] + + SCTYPES['complex'] + SCTYPES['uint'] + [object]) coord_dtype = np.dtype(coord_dtype) if coord_dtype not in sctypes: raise ValueError(f'Coordinate dtype should be one of {sctypes}') @@ -332,7 +334,7 @@ def safe_dtype(*dtypes): Examples -------- >>> c1 = CoordinateSystem('ij', 'input', coord_dtype=np.float32) - >>> c2 = CoordinateSystem('kl', 'input', coord_dtype=np.complex_) + >>> c2 = CoordinateSystem('kl', 'input', coord_dtype=np.complex128) >>> safe_dtype(c1.coord_dtype, c2.coord_dtype) dtype('complex128') @@ -383,7 +385,7 @@ def product(*coord_systems, **kwargs): Examples -------- >>> c1 = CoordinateSystem('ij', 'input', coord_dtype=np.float32) - >>> c2 = CoordinateSystem('kl', 'input', coord_dtype=np.complex_) + >>> c2 = CoordinateSystem('kl', 'input', coord_dtype=np.complex128) >>> c3 = CoordinateSystem('ik', 'in3') >>> print(product(c1, c2)) diff --git a/nipy/core/reference/slices.py b/nipy/core/reference/slices.py index cb775cecd..ac5d68454 100644 --- a/nipy/core/reference/slices.py +++ b/nipy/core/reference/slices.py @@ -192,8 +192,7 @@ def zslice(z, x_spec, y_spec, world): def bounding_box(coordmap, shape): """ - Determine a valid bounding box from a CoordinateMap - and a shape. + Determine valid bounding box from CoordinateMap and shape. Parameters ---------- @@ -223,4 +222,4 @@ def bounding_box(coordmap, shape): ((2.0, 31.0), (4.0, 121.0), (6.0, 101.0)) """ e = ArrayCoordMap.from_shape(coordmap, shape) - return tuple([(r.min(), r.max()) for r in e.transposed_values]) + return tuple((r.min(), r.max()) for r in e.transposed_values) diff --git a/nipy/core/reference/spaces.py b/nipy/core/reference/spaces.py index f56403744..04ff50992 100644 --- a/nipy/core/reference/spaces.py +++ b/nipy/core/reference/spaces.py @@ -299,7 +299,7 @@ def get_world_cs(world_id, ndim=3, extras='tuvw', spaces=None): if is_coordsys_maker(world_id): return world_id(ndim) raise ValueError('Expecting CoordinateSystem, CoordSysMaker, ' - 'XYZSpace, or str, got %s' % world_id) + f'XYZSpace, or str, got {world_id}') class SpaceError(Exception): diff --git a/nipy/core/reference/tests/matrix_groups.py b/nipy/core/reference/tests/matrix_groups.py index 54818ef41..f09a336eb 100644 --- a/nipy/core/reference/tests/matrix_groups.py +++ b/nipy/core/reference/tests/matrix_groups.py @@ -73,7 +73,7 @@ def inverse(self): class GLC(MatrixGroup): - dtype = np.complex_ + dtype = np.complex128 def validate(self, M=None): """ diff --git a/nipy/core/reference/tests/test_coordinate_map.py b/nipy/core/reference/tests/test_coordinate_map.py index 4a10be2c0..0435c4651 100644 --- a/nipy/core/reference/tests/test_coordinate_map.py +++ b/nipy/core/reference/tests/test_coordinate_map.py @@ -5,6 +5,8 @@ import numpy as np +from nipy.utils import SCTYPES + # this import line is a little ridiculous... from ..coordinate_map import ( AffineTransform, @@ -32,16 +34,20 @@ CS = CoordinateSystem import pytest -from numpy.testing import assert_almost_equal, assert_array_equal +from numpy.testing import ( + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) # Dtypes for testing coordinate map creation / processing -_SYMPY_SAFE_DTYPES = (np.sctypes['int'] + np.sctypes['uint'] + - np.sctypes['float'] + np.sctypes['complex'] + +_SYMPY_SAFE_DTYPES = (SCTYPES['int'] + SCTYPES['uint'] + + SCTYPES['float'] + SCTYPES['complex'] + [object]) # Sympy <= 1.1 does not handle numpy longcomplex correctly. See: # https://github.com/sympy/sympy/pull/12901 -if np.longcomplex in _SYMPY_SAFE_DTYPES: # Not present for Windows - _SYMPY_SAFE_DTYPES.remove(np.longcomplex) +if np.clongdouble in _SYMPY_SAFE_DTYPES: # Not present for Windows + _SYMPY_SAFE_DTYPES.remove(np.clongdouble) @pytest.fixture @@ -965,42 +971,39 @@ def test_dtype_cmap_inverses(): coord = np.array(in_list, dtype=dt) out_coord = np.array(out_list, dtype=dt) # Expected output type of inverse, not preserving - if dt in np.sctypes['int'] + np.sctypes['uint']: + if dt in SCTYPES['int'] + SCTYPES['uint']: exp_i_dt = np.float64 else: exp_i_dt = dt # Default inverse cmap may alter coordinate types - try: - r_cmap = cmap.inverse() - except: - 1/0 + r_cmap = cmap.inverse() res = r_cmap(out_coord) - assert_array_equal(res, coord) + assert_array_almost_equal(res, coord) assert res.dtype == exp_i_dt # Default behavior is preserve_type=False r_cmap = cmap.inverse(preserve_dtype=False) res = r_cmap(out_coord) - assert_array_equal(res, coord) + assert_array_almost_equal(res, coord) assert res.dtype == exp_i_dt # Preserve_dtype=True - preserves dtype r_cmap = cmap.inverse(preserve_dtype=True) res = r_cmap(out_coord) - assert_array_equal(res, coord) + assert_array_almost_equal(res, coord) assert res.dtype == dt # Preserve_dtype=True is default for conversion to CoordinateMap cm_cmap = _as_coordinate_map(cmap) - assert_array_equal(cm_cmap(coord), out_list) + assert_array_almost_equal(cm_cmap(coord), out_list) rcm_cmap = cm_cmap.inverse() - assert_array_equal(rcm_cmap(coord), out_list) + assert_array_almost_equal(rcm_cmap(coord), out_list) res = rcm_cmap(out_coord) - assert_array_equal(res, coord) + assert_array_almost_equal(res, coord) assert res.dtype == dt # For integer types, where there is no integer inverse, return floatey # inverse by default, and None for inverse when preserve_dtype=True arr_p2 = arr_p1 * 2 arr_p2[-1, -1] = 1 out_list = [0, 4, 2] - for dt in np.sctypes['int'] + np.sctypes['uint']: + for dt in SCTYPES['int'] + SCTYPES['uint']: in_cs = CoordinateSystem('ijk', coord_dtype=dt) out_cs = CoordinateSystem('xyz', coord_dtype=dt) cmap = AffineTransform(in_cs, out_cs, arr_p2.astype(dt)) diff --git a/nipy/core/reference/tests/test_coordinate_system.py b/nipy/core/reference/tests/test_coordinate_system.py index 59608f829..66e4736a1 100644 --- a/nipy/core/reference/tests/test_coordinate_system.py +++ b/nipy/core/reference/tests/test_coordinate_system.py @@ -6,6 +6,8 @@ import numpy as np import pytest +from nipy.utils import SCTYPES + from ..coordinate_system import ( CoordinateSystem, CoordinateSystemError, @@ -60,7 +62,7 @@ def test_unique_coord_names(): def test_dtypes(): # invalid dtypes - dtypes = np.sctypes['others'] + dtypes = SCTYPES['others'] dtypes.remove(object) for dt in dtypes: pytest.raises(ValueError, CoordinateSystem, 'ijk', 'test', dt) @@ -68,7 +70,7 @@ def test_dtypes(): dtype = np.dtype([('field1', 'Y, Z) diff --git a/nipy/labs/datasets/converters.py b/nipy/labs/datasets/converters.py index b7da1505e..65eb57e63 100644 --- a/nipy/labs/datasets/converters.py +++ b/nipy/labs/datasets/converters.py @@ -78,8 +78,8 @@ def as_volume_img(obj, copy=True, squeeze=True, world_space=None): if filename != '': header['filename'] = filename else: - raise ValueError('Invalid type ({}) passed in: cannot convert {} to ' - 'VolumeImg'.format(type(obj), obj)) + raise ValueError(f'Invalid type ({type(obj)}) passed in: cannot convert {obj} to ' + 'VolumeImg') if world_space is None and header.get('sform_code', 0) == 4: world_space = 'mni152' diff --git a/nipy/labs/datasets/transforms/transform.py b/nipy/labs/datasets/transforms/transform.py index 64378d3b3..3451a52ec 100644 --- a/nipy/labs/datasets/transforms/transform.py +++ b/nipy/labs/datasets/transforms/transform.py @@ -102,8 +102,8 @@ def composed_with(self, transform): second_mapping = transform.mapping if first_mapping is not None and second_mapping is not None: def new_mapping(x, y, z): - """ Coordinate mapping from {} to {}. - """.format(self.input_space, transform.output_space) + f""" Coordinate mapping from {self.input_space} to {transform.output_space}. + """ return second_mapping(*first_mapping(x, y, z)) else: new_mapping = None @@ -113,8 +113,8 @@ def new_mapping(x, y, z): if ( first_inverse_mapping is not None and second_inverse_mapping is not None): def new_inverse_mapping(x, y, z): - """ Coordinate mapping from {} to {}. - """.format(transform.output_space, self.input_space) + f""" Coordinate mapping from {transform.output_space} to {self.input_space}. + """ return first_inverse_mapping(*second_inverse_mapping(x, y, z)) else: new_inverse_mapping = None @@ -153,8 +153,8 @@ def _check_composition(self, transform): """ if not transform.input_space == self.output_space: raise CompositionError("The input space of the " - "second transform ({}) does not match the input space " - "of first transform ({})".format(transform.input_space, self.output_space) + f"second transform ({transform.input_space}) does not match the input space " + f"of first transform ({self.output_space})" ) diff --git a/nipy/labs/datasets/volumes/volume_grid.py b/nipy/labs/datasets/volumes/volume_grid.py index 518b067c1..69ec0bac8 100644 --- a/nipy/labs/datasets/volumes/volume_grid.py +++ b/nipy/labs/datasets/volumes/volume_grid.py @@ -126,8 +126,7 @@ def as_volume_img(self, affine=None, shape=None, shape = [int(s) for s in shape] if not len(shape) == 3: raise ValueError('The shape specified should be the shape ' - 'the 3D grid, and thus of length 3. %s was specified' - % shape ) + f'the 3D grid, and thus of length 3. {shape} was specified' ) x, y, z = np.indices(shape) x, y, z = apply_affine(x, y, z, affine) values = self.values_in_world(x, y, z) diff --git a/nipy/labs/datasets/volumes/volume_img.py b/nipy/labs/datasets/volumes/volume_img.py index 499a9cdb6..490a07662 100644 --- a/nipy/labs/datasets/volumes/volume_img.py +++ b/nipy/labs/datasets/volumes/volume_img.py @@ -184,8 +184,7 @@ def as_volume_img(self, affine=None, shape=None, shape = [int(s) for s in shape] if not len(shape) == 3: raise ValueError('The shape specified should be the shape ' - 'the 3D grid, and thus of length 3. %s was specified' - % shape ) + f'the 3D grid, and thus of length 3. {shape} was specified' ) interpolation_order = self._get_interpolation_order(interpolation) if np.all(affine == self.affine): # Small trick to be more numericaly stable diff --git a/nipy/labs/mask.py b/nipy/labs/mask.py index 6ac955e72..5e1db8ec5 100644 --- a/nipy/labs/mask.py +++ b/nipy/labs/mask.py @@ -293,7 +293,7 @@ def compute_mask_sessions(session_images, m=0.2, M=0.9, cc=1, threshold=0.5, The mean image """ mask, mean = None, None - for index, session in enumerate(session_images): + for session in session_images: if hasattr(session, 'get_fdata'): mean = session.get_fdata() if mean.ndim > 3: diff --git a/nipy/labs/meson.build b/nipy/labs/meson.build index 5ecaf94e8..7645fd9b6 100644 --- a/nipy/labs/meson.build +++ b/nipy/labs/meson.build @@ -6,8 +6,10 @@ python_sources = [ 'mask.py', 'statistical_mapping.py', 'viz3d.py', - 'viz.py' + 'viz.py', + 'conftest.py' ] + py.install_sources( python_sources, pure: false, diff --git a/nipy/labs/spatial_models/hierarchical_parcellation.py b/nipy/labs/spatial_models/hierarchical_parcellation.py index c3e3a34ef..0b761217a 100644 --- a/nipy/labs/spatial_models/hierarchical_parcellation.py +++ b/nipy/labs/spatial_models/hierarchical_parcellation.py @@ -17,8 +17,8 @@ from .parcellation import MultiSubjectParcellation -warn('Module nipy.labs.spatial_models.hierarchical_parcellation' + - 'deprecated, will be removed', +warn('Module nipy.labs.spatial_models.hierarchical_parcellation deprecated, ' + 'will be removed', FutureWarning, stacklevel=2) diff --git a/nipy/labs/spatial_models/parcel_io.py b/nipy/labs/spatial_models/parcel_io.py index ea0862165..62282a6ba 100644 --- a/nipy/labs/spatial_models/parcel_io.py +++ b/nipy/labs/spatial_models/parcel_io.py @@ -19,8 +19,7 @@ from .discrete_domain import grid_domain_from_image from .mroi import SubDomains -warn('Module nipy.labs.spatial_models.parcel_io' + - 'deprecated, will be removed', +warn('Module nipy.labs.spatial_models.parcel_io deprecated, will be removed', FutureWarning, stacklevel=2) diff --git a/nipy/labs/spatial_models/parcellation.py b/nipy/labs/spatial_models/parcellation.py index 7b3f03694..61a394943 100644 --- a/nipy/labs/spatial_models/parcellation.py +++ b/nipy/labs/spatial_models/parcellation.py @@ -15,8 +15,7 @@ import numpy as np -warn('Module nipy.labs.spatial_models.parcellation deprecated, ' + - 'will be removed', +warn('Module nipy.labs.spatial_models.parcellation deprecated, will be removed', FutureWarning, stacklevel=2) diff --git a/nipy/labs/tests/test_mask.py b/nipy/labs/tests/test_mask.py index de7fd9073..17ee4a63e 100644 --- a/nipy/labs/tests/test_mask.py +++ b/nipy/labs/tests/test_mask.py @@ -106,7 +106,7 @@ def test_series_from_mask(in_tmp_path): assert proj.sum() == 9/np.abs(affine[axis, axis]) # Check that NaNs in the data do not propagate - data[10, 10, 10] = np.NaN + data[10, 10, 10] = np.nan img = nib.Nifti1Image(data, affine) nib.save(img, 'testing.nii') series, header = series_from_mask('testing.nii', mask, smooth=9) diff --git a/nipy/labs/tests/test_statistical_mapping.py b/nipy/labs/tests/test_statistical_mapping.py index 728525e7d..c97538d16 100644 --- a/nipy/labs/tests/test_statistical_mapping.py +++ b/nipy/labs/tests/test_statistical_mapping.py @@ -53,17 +53,17 @@ def test_6(): img = make_surrogate_data() clusters, info = cluster_stats(img, img, height_th=.05, height_control='fdr', cluster_th=0, nulls={}) - print(len(clusters), sum([c['size'] for c in clusters])) + print(len(clusters), sum(c['size'] for c in clusters)) assert len(clusters)==4 def test7(): img = make_surrogate_data() clusters, info = cluster_stats(img, img, height_th=3., height_control='None', cluster_th=0, nulls={}) - nstv = sum([c['size'] for c in clusters]) + nstv = sum(c['size'] for c in clusters) assert nstv==36 def test_8(): img = make_surrogate_data() clusters, info = cluster_stats(img, img, height_th=.001, height_control='fpr', cluster_th=0, nulls={}) - nstv = sum([c['size'] for c in clusters]) + nstv = sum(c['size'] for c in clusters) assert nstv==36 diff --git a/nipy/labs/tests/test_viz.py b/nipy/labs/tests/test_viz.py new file mode 100644 index 000000000..b960fe9e3 --- /dev/null +++ b/nipy/labs/tests/test_viz.py @@ -0,0 +1,26 @@ +""" Tests for visualization +""" + +import numpy as np + +from nipy.labs.viz import coord_transform, mni_sform, plot_map + + +def test_example(): + # Example from tutorial. + # First, create a fake activation map: a 3D image in MNI space with + # a large rectangle of activation around Broca Area + mni_sform_inv = np.linalg.inv(mni_sform) + # Color an asymmetric rectangle around Broca area: + x, y, z = -52, 10, 22 + x_map, y_map, z_map = (int(coord) for coord in coord_transform(x, y, z, + mni_sform_inv)) + map = np.zeros((182, 218, 182)) + map[x_map-30:x_map+30, y_map-3:y_map+3, z_map-10:z_map+10] = 1 + + # We use a masked array to add transparency to the parts that we are + # not interested in: + thresholded_map = np.ma.masked_less(map, 0.5) + + # And now, visualize it: + plot_map(thresholded_map, mni_sform, cut_coords=(x, y, z), vmin=0.5) diff --git a/nipy/labs/utils/reproducibility_measures.py b/nipy/labs/utils/reproducibility_measures.py index 2a63db02c..3674b0ac5 100644 --- a/nipy/labs/utils/reproducibility_measures.py +++ b/nipy/labs/utils/reproducibility_measures.py @@ -128,10 +128,11 @@ def get_cluster_position_from_thresholded_map(stat_map, domain, thr=3.0, coord = thresholded_domain.get_coord() # get the barycenters - baryc = [] - for i in range(label.max() + 1): - if np.sum(label == i) >= csize: - baryc.append(np.mean(coord[label == i], 0)) + baryc = [ + np.mean(coord[label == i], 0) + for i in range(label.max() + 1) + if np.sum(label == i) >= csize + ] if len(baryc) == 0: return None diff --git a/nipy/labs/viz_tools/cm.py b/nipy/labs/viz_tools/cm.py index 98034fe07..d67e6284a 100644 --- a/nipy/labs/viz_tools/cm.py +++ b/nipy/labs/viz_tools/cm.py @@ -182,8 +182,7 @@ def dim_cmap(cmap, factor=.3, to_white=True): """ Dim a colormap to white, or to black. """ assert factor >= 0 and factor <=1, ValueError( - 'Dimming factor must be larger than 0 and smaller than 1, %s was passed.' - % factor) + f'Dimming factor must be larger than 0 and smaller than 1, {factor} was passed.') if to_white: dimmer = lambda c: 1 - factor*(1-c) else: diff --git a/nipy/modalities/fmri/design_matrix.py b/nipy/modalities/fmri/design_matrix.py index d75a5f774..66653a1df 100644 --- a/nipy/modalities/fmri/design_matrix.py +++ b/nipy/modalities/fmri/design_matrix.py @@ -131,9 +131,7 @@ def _make_drift(drift_model, frametimes, order=1, hfcut=128.): drift = _blank_drift(frametimes) else: raise NotImplementedError(f"Unknown drift model {drift_model!r}") - names = [] - for k in range(drift.shape[1] - 1): - names.append('drift_%d' % (k + 1)) + names = [f'drift_{k}' for k in range(1, drift.shape[1])] names.append('constant') return drift, names diff --git a/nipy/modalities/fmri/fmristat/hrf.py b/nipy/modalities/fmri/fmristat/hrf.py index 8f46c87e5..42ada862b 100644 --- a/nipy/modalities/fmri/fmristat/hrf.py +++ b/nipy/modalities/fmri/fmristat/hrf.py @@ -188,7 +188,7 @@ def approx(time, delta): approx.dinverse, approx.forward, approx.dforward) = invertR(delta, approx.coef) - dhrf = implemented_function(f'd{str(hrf2decompose)}', dhrft) + dhrf = implemented_function(f'd{hrf2decompose}', dhrft) return [hrf2decompose, dhrf], approx diff --git a/nipy/modalities/fmri/tests/test_glm.py b/nipy/modalities/fmri/tests/test_glm.py index 058c49067..5a29dc477 100644 --- a/nipy/modalities/fmri/tests/test_glm.py +++ b/nipy/modalities/fmri/tests/test_glm.py @@ -37,7 +37,7 @@ def write_fake_fmri_data(shapes, rk=3, affine=np.eye(4)): def generate_fake_fmri_data(shapes, rk=3, affine=np.eye(4)): fmri_data = [] design_matrices = [] - for i, shape in enumerate(shapes): + for shape in shapes: data = 100 + np.random.randn(*shape) data[0] -= 10 fmri_data.append(Nifti1Image(data, affine)) @@ -166,8 +166,8 @@ def test_glm_ar(): mulm, n, p, q = ar1_glm() assert len(mulm.labels_) == n assert len(mulm.results_) > 1 - tmp = sum([mulm.results_[key].theta.shape[1] - for key in mulm.results_]) + tmp = sum(mulm.results_[key].theta.shape[1] + for key in mulm.results_) assert tmp == n diff --git a/nipy/utils/__init__.py b/nipy/utils/__init__.py index 0572c21e1..7f966c08d 100644 --- a/nipy/utils/__init__.py +++ b/nipy/utils/__init__.py @@ -7,6 +7,10 @@ wrote, that we ship, go in the nipy.externals tree. """ +import functools +import warnings + +import numpy as np from nibabel.data import DataError, datasource_or_bomber, make_datasource # Module level datasource instances for convenience @@ -48,3 +52,28 @@ class _NoValue: This class may be used as the default value assigned to a deprecated keyword in order to check if it has been given a user defined value. """ + + +# Numpy sctypes (np.sctypes removed in Numpy 2.0). +SCTYPES = {'int': [np.int8, np.int16, np.int32, np.int64], + 'uint': [np.uint8, np.uint16, np.uint32, np.uint64], + 'float': [np.float16, np.float32, np.float64], + 'complex': [np.complex64, np.complex128], + 'others': [bool, object, bytes, str, np.void]} + + +def deprecate_with_doc(msg): + # Adapted from: https://stackoverflow.com/a/30253848/1939576 + + def dep(func): + + @functools.wraps(func) + def new_func(*args, **kwargs): + warnings.warn( + f"{func.__name__} deprecated, {msg}", + category=DeprecationWarning, stacklevel=2) + return func(*args, **kwargs) + + return new_func + + return dep diff --git a/nipy/utils/perlpie.py b/nipy/utils/perlpie.py index 87d18f894..036f66fbe 100644 --- a/nipy/utils/perlpie.py +++ b/nipy/utils/perlpie.py @@ -96,7 +96,7 @@ def perl_dash_pie(oldstr, newstr, dry_run=None): Error while executing perl_dash_pie command: {cmd} Error: - {str(err)} + {err} """ raise Exception(msg) diff --git a/nipy/utils/tests/test_arrays.py b/nipy/utils/tests/test_arrays.py index 91b293408..9b1ac82ad 100644 --- a/nipy/utils/tests/test_arrays.py +++ b/nipy/utils/tests/test_arrays.py @@ -1,17 +1,21 @@ """ Testing arrays module """ +from itertools import chain + import numpy as np import pytest from numpy.testing import assert_array_almost_equal, assert_array_equal +from nipy.utils import SCTYPES + from ..arrays import strides_from def test_strides_from(): for shape in ((3,), (2,3), (2,3,4), (5,4,3,2)): for order in 'FC': - for dtype in sum(np.sctypes.values(), []): + for dtype in chain.from_iterable(SCTYPES.values()): if dtype is bytes: dtype = 'S3' elif dtype is str: diff --git a/pyproject.toml b/pyproject.toml index 26112be84..5240aaea4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,6 +15,7 @@ classifiers = ["Development Status :: 3 - Alpha", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", "Topic :: Scientific/Engineering"] dependencies = [ 'numpy>=1.22', @@ -94,10 +95,12 @@ select = [ 'SIM101', 'SIM109', 'SIM110', - 'SIM111', 'SIM118', 'SIM2' ] +ignore = [ + 'UP031' +] [tool.spin] package = 'nipy' @@ -105,10 +108,15 @@ package = 'nipy' [tool.spin.commands] Build = [ 'spin.cmds.meson.build', - 'spin.cmds.meson.test' + 'spin.cmds.meson.test', + 'spin.cmds.pip.install' ] Environments = [ 'spin.cmds.meson.ipython', 'spin.cmds.meson.python', 'spin.cmds.meson.run' ] +Debug = [ + 'spin.cmds.meson.gdb', + 'spin.cmds.meson.lldb' +] diff --git a/tools/apigen.py b/tools/apigen.py index 578518f8b..c25636c93 100644 --- a/tools/apigen.py +++ b/tools/apigen.py @@ -217,7 +217,7 @@ def generate_api_doc(self, uri): # Make a shorter version of the uri that omits the package name for # titles - uri_short = re.sub(r'^%s\.' % self.package_name,'',uri) + uri_short = re.sub(rf'^{self.package_name}\.','',uri) ad = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n' @@ -342,7 +342,7 @@ def discover_modules(self): root_uri = self._path2uri(os.path.join(self.root_path, dirpath)) for dirname in dirnames[:]: # copy list - we modify inplace - package_uri = '.'.join((root_uri, dirname)) + package_uri = f'{root_uri}.{dirname}' if (self._uri2path(package_uri) and self._survives_exclude(package_uri, 'package')): modules.append(package_uri) @@ -351,7 +351,7 @@ def discover_modules(self): # Check filenames for modules for filename in filenames: module_name = filename[:-3] - module_uri = '.'.join((root_uri, module_name)) + module_uri = f'{root_uri}.{module_name}' if (self._uri2path(module_uri) and self._survives_exclude(module_uri, 'module')): modules.append(module_uri) diff --git a/tools/fix_longtable.py b/tools/fix_longtable.py index be7803b5b..181239a5d 100755 --- a/tools/fix_longtable.py +++ b/tools/fix_longtable.py @@ -10,7 +10,7 @@ def replacer(match): args = '|' + 'l|' * len(match.groups()[0]) - return "longtable}{%s}" % args + return f"longtable}}{{{args}}}" if len(sys.argv) != 2: diff --git a/tools/gitwash_dumper.py b/tools/gitwash_dumper.py index 515581c60..9f53cef23 100755 --- a/tools/gitwash_dumper.py +++ b/tools/gitwash_dumper.py @@ -125,15 +125,15 @@ def make_link_targets(proj_name, have_gh_url = None for line in link_contents: if not have_url: - match = re.match(r'..\s+_`%s`:\s+' % proj_name, line) + match = re.match(rf'..\s+_`{proj_name}`:\s+', line) if match: have_url = True if not have_ml_url: - match = re.match(r'..\s+_`%s mailing list`:\s+' % proj_name, line) + match = re.match(rf'..\s+_`{proj_name} mailing list`:\s+', line) if match: have_ml_url = True if not have_gh_url: - match = re.match(r'..\s+_`%s github`:\s+' % proj_name, line) + match = re.match(rf'..\s+_`{proj_name} github`:\s+', line) if match: have_gh_url = True if not have_url or not have_ml_url: diff --git a/tools/run_log_examples.py b/tools/run_log_examples.py index 026a9bdd2..f637e9e84 100755 --- a/tools/run_log_examples.py +++ b/tools/run_log_examples.py @@ -93,7 +93,7 @@ def cmd_str_maker(self, cmd, args): Prepend some matplotlib setup to suppress figures """ if len(args) != 0: - raise ValueError("Cannot use args with {8}".format(self.__class__)) + raise ValueError(f"Cannot use args with {self.__class__}") return(f"""{PYTHON} -c "import matplotlib as mpl; mpl.use('agg'); """ f"""exec(open('{cmd}', 'rt').read())" """) @@ -157,7 +157,7 @@ def main(): else: fails += 1 _record('FAIL', fname, f) - sys.exit(fails if fails < 255 else 255) + sys.exit(min(255, fails)) if __name__ == '__main__': diff --git a/tools/touch_cython_cs.py b/tools/touch_cython_cs.py index bf8eaad05..8fbfac053 100755 --- a/tools/touch_cython_cs.py +++ b/tools/touch_cython_cs.py @@ -12,16 +12,11 @@ from os.path import isfile, splitext from os.path import join as pjoin -# From http://stackoverflow.com/questions/1158076/implement-touch-using-python -if sys.version_info[0] >= 3: - def touch(fname, times=None, ns=None, dir_fd=None): - with os.open(fname, os.O_APPEND, dir_fd=dir_fd) as f: - os.utime(f.fileno() if os.utime in os.supports_fd else fname, - times=times, ns=ns, dir_fd=dir_fd) -else: - def touch(fname, times=None): - with file(fname, 'a'): - os.utime(fname, times) + +def touch(fname, times=None, ns=None, dir_fd=None): + with os.open(fname, os.O_APPEND, dir_fd=dir_fd) as f: + os.utime(f.fileno() if os.utime in os.supports_fd else fname, + times=times, ns=ns, dir_fd=dir_fd) def main():